diff --git a/.github/workflows/ci.yaml b/.github/workflows/ci.yaml index 97acea0a2b..4b8c07e69d 100644 --- a/.github/workflows/ci.yaml +++ b/.github/workflows/ci.yaml @@ -41,9 +41,6 @@ jobs: matrix: python-version: [3.7, 3.8, 3.9, "3.10"] os: [MacOS, Ubuntu, Windows] - include: - - python-version: 3.6 - os: Ubuntu steps: - uses: actions/checkout@v1 diff --git a/news/4995.vendor.rst b/news/4995.vendor.rst new file mode 100644 index 0000000000..d7a1843518 --- /dev/null +++ b/news/4995.vendor.rst @@ -0,0 +1,3 @@ +Updated vendor version of ``pip`` from ``21.2.2`` to ``22.0.4`` which fixes a number of bugs including +several reports of pipenv locking for an infinite amount of time when using certain package constraints. +This also drops support for python 3.6 as it is EOL and support was removed in pip 22.x diff --git a/pipenv/patched/notpip/_vendor/msgpack/COPYING b/pipenv/patched/notpip/COPYING similarity index 100% rename from pipenv/patched/notpip/_vendor/msgpack/COPYING rename to pipenv/patched/notpip/COPYING diff --git a/pipenv/patched/notpip/_vendor/webencodings/LICENSE b/pipenv/patched/notpip/LICENSE similarity index 100% rename from pipenv/patched/notpip/_vendor/webencodings/LICENSE rename to pipenv/patched/notpip/LICENSE diff --git a/pipenv/patched/notpip/_vendor/packaging/LICENSE.APACHE b/pipenv/patched/notpip/LICENSE.APACHE similarity index 100% rename from pipenv/patched/notpip/_vendor/packaging/LICENSE.APACHE rename to pipenv/patched/notpip/LICENSE.APACHE diff --git a/pipenv/patched/notpip/_vendor/packaging/LICENSE.BSD b/pipenv/patched/notpip/LICENSE.BSD similarity index 100% rename from pipenv/patched/notpip/_vendor/packaging/LICENSE.BSD rename to pipenv/patched/notpip/LICENSE.BSD diff --git a/pipenv/patched/notpip/_vendor/idna/LICENSE.md b/pipenv/patched/notpip/LICENSE.md similarity index 100% rename from pipenv/patched/notpip/_vendor/idna/LICENSE.md rename to pipenv/patched/notpip/LICENSE.md diff --git a/pipenv/patched/notpip/_vendor/urllib3/LICENSE.txt b/pipenv/patched/notpip/LICENSE.txt similarity index 100% rename from pipenv/patched/notpip/_vendor/urllib3/LICENSE.txt rename to pipenv/patched/notpip/LICENSE.txt diff --git a/pipenv/patched/notpip/__init__.py b/pipenv/patched/notpip/__init__.py index 9b0b9d2995..d19d72ceea 100644 --- a/pipenv/patched/notpip/__init__.py +++ b/pipenv/patched/notpip/__init__.py @@ -1,6 +1,6 @@ from typing import List, Optional -__version__ = "21.2.2" +__version__ = "22.0.4" def main(args: Optional[List[str]] = None) -> int: diff --git a/pipenv/patched/notpip/_internal/build_env.py b/pipenv/patched/notpip/_internal/build_env.py index d8c66b3ff5..28dffd3604 100644 --- a/pipenv/patched/notpip/_internal/build_env.py +++ b/pipenv/patched/notpip/_internal/build_env.py @@ -31,15 +31,13 @@ class _Prefix: - - def __init__(self, path): - # type: (str) -> None + def __init__(self, path: str) -> None: self.path = path self.setup = False self.bin_dir = get_paths( - 'nt' if os.name == 'nt' else 'posix_prefix', - vars={'base': path, 'platbase': path} - )['scripts'] + "nt" if os.name == "nt" else "posix_prefix", + vars={"base": path, "platbase": path}, + )["scripts"] self.lib_dirs = get_prefixed_libs(path) @@ -70,22 +68,18 @@ def _create_standalone_pip() -> Iterator[str]: class BuildEnvironment: - """Creates and manages an isolated environment to install build deps - """ + """Creates and manages an isolated environment to install build deps""" - def __init__(self): - # type: () -> None - temp_dir = TempDirectory( - kind=tempdir_kinds.BUILD_ENV, globally_managed=True - ) + def __init__(self) -> None: + temp_dir = TempDirectory(kind=tempdir_kinds.BUILD_ENV, globally_managed=True) self._prefixes = OrderedDict( (name, _Prefix(os.path.join(temp_dir.path, name))) - for name in ('normal', 'overlay') + for name in ("normal", "overlay") ) - self._bin_dirs = [] # type: List[str] - self._lib_dirs = [] # type: List[str] + self._bin_dirs: List[str] = [] + self._lib_dirs: List[str] = [] for prefix in reversed(list(self._prefixes.values())): self._bin_dirs.append(prefix.bin_dir) self._lib_dirs.extend(prefix.lib_dirs) @@ -96,12 +90,15 @@ def __init__(self): system_sites = { os.path.normcase(site) for site in (get_purelib(), get_platlib()) } - self._site_dir = os.path.join(temp_dir.path, 'site') + self._site_dir = os.path.join(temp_dir.path, "site") if not os.path.exists(self._site_dir): os.mkdir(self._site_dir) - with open(os.path.join(self._site_dir, 'sitecustomize.py'), 'w') as fp: - fp.write(textwrap.dedent( - ''' + with open( + os.path.join(self._site_dir, "sitecustomize.py"), "w", encoding="utf-8" + ) as fp: + fp.write( + textwrap.dedent( + """ import os, site, sys # First, drop system-sites related paths. @@ -124,47 +121,49 @@ def __init__(self): for path in {lib_dirs!r}: assert not path in sys.path site.addsitedir(path) - ''' - ).format(system_sites=system_sites, lib_dirs=self._lib_dirs)) + """ + ).format(system_sites=system_sites, lib_dirs=self._lib_dirs) + ) - def __enter__(self): - # type: () -> None + def __enter__(self) -> None: self._save_env = { name: os.environ.get(name, None) - for name in ('PATH', 'PYTHONNOUSERSITE', 'PYTHONPATH') + for name in ("PATH", "PYTHONNOUSERSITE", "PYTHONPATH") } path = self._bin_dirs[:] - old_path = self._save_env['PATH'] + old_path = self._save_env["PATH"] if old_path: path.extend(old_path.split(os.pathsep)) pythonpath = [self._site_dir] - os.environ.update({ - 'PATH': os.pathsep.join(path), - 'PYTHONNOUSERSITE': '1', - 'PYTHONPATH': os.pathsep.join(pythonpath), - }) + os.environ.update( + { + "PATH": os.pathsep.join(path), + "PYTHONNOUSERSITE": "1", + "PYTHONPATH": os.pathsep.join(pythonpath), + } + ) def __exit__( self, - exc_type, # type: Optional[Type[BaseException]] - exc_val, # type: Optional[BaseException] - exc_tb # type: Optional[TracebackType] - ): - # type: (...) -> None + exc_type: Optional[Type[BaseException]], + exc_val: Optional[BaseException], + exc_tb: Optional[TracebackType], + ) -> None: for varname, old_value in self._save_env.items(): if old_value is None: os.environ.pop(varname, None) else: os.environ[varname] = old_value - def check_requirements(self, reqs): - # type: (Iterable[str]) -> Tuple[Set[Tuple[str, str]], Set[str]] + def check_requirements( + self, reqs: Iterable[str] + ) -> Tuple[Set[Tuple[str, str]], Set[str]]: """Return 2 sets: - - conflicting requirements: set of (installed, wanted) reqs tuples - - missing requirements: set of reqs + - conflicting requirements: set of (installed, wanted) reqs tuples + - missing requirements: set of reqs """ missing = set() conflicting = set() @@ -187,32 +186,25 @@ def check_requirements(self, reqs): def install_requirements( self, - finder, # type: PackageFinder - requirements, # type: Iterable[str] - prefix_as_string, # type: str - message # type: str - ): - # type: (...) -> None + finder: "PackageFinder", + requirements: Iterable[str], + prefix_as_string: str, + *, + kind: str, + ) -> None: prefix = self._prefixes[prefix_as_string] assert not prefix.setup prefix.setup = True if not requirements: return with contextlib.ExitStack() as ctx: - # TODO: Remove this block when dropping 3.6 support. Python 3.6 - # lacks importlib.resources and pep517 has issues loading files in - # a zip, so we fallback to the "old" method by adding the current - # pip directory to the child process's sys.path. - if sys.version_info < (3, 7): - pip_runnable = os.path.dirname(pip_location) - else: - pip_runnable = ctx.enter_context(_create_standalone_pip()) + pip_runnable = ctx.enter_context(_create_standalone_pip()) self._install_requirements( pip_runnable, finder, requirements, prefix, - message, + kind=kind, ) @staticmethod @@ -221,75 +213,85 @@ def _install_requirements( finder: "PackageFinder", requirements: Iterable[str], prefix: _Prefix, - message: str, + *, + kind: str, ) -> None: sys_executable = os.environ.get('PIP_PYTHON_PATH', sys.executable) - args = [ - sys_executable, pip_runnable, 'install', - '--ignore-installed', '--no-user', '--prefix', prefix.path, - '--no-warn-script-location', - ] # type: List[str] + args: List[str] = [ + sys_executable, + pip_runnable, + "install", + "--ignore-installed", + "--no-user", + "--prefix", + prefix.path, + "--no-warn-script-location", + ] if logger.getEffectiveLevel() <= logging.DEBUG: - args.append('-v') - for format_control in ('no_binary', 'only_binary'): + args.append("-v") + for format_control in ("no_binary", "only_binary"): formats = getattr(finder.format_control, format_control) - args.extend(('--' + format_control.replace('_', '-'), - ','.join(sorted(formats or {':none:'})))) + args.extend( + ( + "--" + format_control.replace("_", "-"), + ",".join(sorted(formats or {":none:"})), + ) + ) index_urls = finder.index_urls if index_urls: - args.extend(['-i', index_urls[0]]) + args.extend(["-i", index_urls[0]]) for extra_index in index_urls[1:]: - args.extend(['--extra-index-url', extra_index]) + args.extend(["--extra-index-url", extra_index]) else: - args.append('--no-index') + args.append("--no-index") for link in finder.find_links: - args.extend(['--find-links', link]) + args.extend(["--find-links", link]) for host in finder.trusted_hosts: - args.extend(['--trusted-host', host]) + args.extend(["--trusted-host", host]) if finder.allow_all_prereleases: - args.append('--pre') + args.append("--pre") if finder.prefer_binary: - args.append('--prefer-binary') - args.append('--') + args.append("--prefer-binary") + args.append("--") args.extend(requirements) extra_environ = {"_PIP_STANDALONE_CERT": where()} - with open_spinner(message) as spinner: - call_subprocess(args, spinner=spinner, extra_environ=extra_environ) + with open_spinner(f"Installing {kind}") as spinner: + call_subprocess( + args, + command_desc=f"pip subprocess to install {kind}", + spinner=spinner, + extra_environ=extra_environ, + ) class NoOpBuildEnvironment(BuildEnvironment): - """A no-op drop-in replacement for BuildEnvironment - """ + """A no-op drop-in replacement for BuildEnvironment""" - def __init__(self): - # type: () -> None + def __init__(self) -> None: pass - def __enter__(self): - # type: () -> None + def __enter__(self) -> None: pass def __exit__( self, - exc_type, # type: Optional[Type[BaseException]] - exc_val, # type: Optional[BaseException] - exc_tb # type: Optional[TracebackType] - ): - # type: (...) -> None + exc_type: Optional[Type[BaseException]], + exc_val: Optional[BaseException], + exc_tb: Optional[TracebackType], + ) -> None: pass - def cleanup(self): - # type: () -> None + def cleanup(self) -> None: pass def install_requirements( self, - finder, # type: PackageFinder - requirements, # type: Iterable[str] - prefix_as_string, # type: str - message # type: str - ): - # type: (...) -> None + finder: "PackageFinder", + requirements: Iterable[str], + prefix_as_string: str, + *, + kind: str, + ) -> None: raise NotImplementedError() diff --git a/pipenv/patched/notpip/_internal/cache.py b/pipenv/patched/notpip/_internal/cache.py index 5e53d9865c..d2c0c519e6 100644 --- a/pipenv/patched/notpip/_internal/cache.py +++ b/pipenv/patched/notpip/_internal/cache.py @@ -20,8 +20,7 @@ logger = logging.getLogger(__name__) -def _hash_dict(d): - # type: (Dict[str, str]) -> str +def _hash_dict(d: Dict[str, str]) -> str: """Return a stable sha224 of a dictionary.""" s = json.dumps(d, sort_keys=True, separators=(",", ":"), ensure_ascii=True) return hashlib.sha224(s.encode("ascii")).hexdigest() @@ -31,15 +30,16 @@ class Cache: """An abstract class - provides cache directories for data from links - :param cache_dir: The root of the cache. - :param format_control: An object of FormatControl class to limit - binaries being read from the cache. - :param allowed_formats: which formats of files the cache should store. - ('binary' and 'source' are the only allowed values) + :param cache_dir: The root of the cache. + :param format_control: An object of FormatControl class to limit + binaries being read from the cache. + :param allowed_formats: which formats of files the cache should store. + ('binary' and 'source' are the only allowed values) """ - def __init__(self, cache_dir, format_control, allowed_formats): - # type: (str, FormatControl, Set[str]) -> None + def __init__( + self, cache_dir: str, format_control: FormatControl, allowed_formats: Set[str] + ) -> None: super().__init__() assert not cache_dir or os.path.isabs(cache_dir) self.cache_dir = cache_dir or None @@ -49,10 +49,8 @@ def __init__(self, cache_dir, format_control, allowed_formats): _valid_formats = {"source", "binary"} assert self.allowed_formats.union(_valid_formats) == _valid_formats - def _get_cache_path_parts(self, link): - # type: (Link) -> List[str] - """Get parts of part that must be os.path.joined with cache_dir - """ + def _get_cache_path_parts(self, link: Link) -> List[str]: + """Get parts of part that must be os.path.joined with cache_dir""" # We want to generate an url to use as our cache key, we don't want to # just re-use the URL because it might have other items in the fragment @@ -84,19 +82,12 @@ def _get_cache_path_parts(self, link): return parts - def _get_candidates(self, link, canonical_package_name): - # type: (Link, str) -> List[Any] - can_not_cache = ( - not self.cache_dir or - not canonical_package_name or - not link - ) + def _get_candidates(self, link: Link, canonical_package_name: str) -> List[Any]: + can_not_cache = not self.cache_dir or not canonical_package_name or not link if can_not_cache: return [] - formats = self.format_control.get_allowed_formats( - canonical_package_name - ) + formats = self.format_control.get_allowed_formats(canonical_package_name) if not self.allowed_formats.intersection(formats): return [] @@ -107,19 +98,16 @@ def _get_candidates(self, link, canonical_package_name): candidates.append((candidate, path)) return candidates - def get_path_for_link(self, link): - # type: (Link) -> str - """Return a directory to store cached items in for link. - """ + def get_path_for_link(self, link: Link) -> str: + """Return a directory to store cached items in for link.""" raise NotImplementedError() def get( self, - link, # type: Link - package_name, # type: Optional[str] - supported_tags, # type: List[Tag] - ): - # type: (...) -> Link + link: Link, + package_name: Optional[str], + supported_tags: List[Tag], + ) -> Link: """Returns a link to a cached item if it exists, otherwise returns the passed link. """ @@ -127,15 +115,12 @@ def get( class SimpleWheelCache(Cache): - """A cache of wheels for future installs. - """ + """A cache of wheels for future installs.""" - def __init__(self, cache_dir, format_control): - # type: (str, FormatControl) -> None + def __init__(self, cache_dir: str, format_control: FormatControl) -> None: super().__init__(cache_dir, format_control, {"binary"}) - def get_path_for_link(self, link): - # type: (Link) -> str + def get_path_for_link(self, link: Link) -> str: """Return a directory to store cached wheels for link Because there are M wheels for any one sdist, we provide a directory @@ -157,20 +142,17 @@ def get_path_for_link(self, link): def get( self, - link, # type: Link - package_name, # type: Optional[str] - supported_tags, # type: List[Tag] - ): - # type: (...) -> Link + link: Link, + package_name: Optional[str], + supported_tags: List[Tag], + ) -> Link: candidates = [] if not package_name: return link canonical_package_name = canonicalize_name(package_name) - for wheel_name, wheel_dir in self._get_candidates( - link, canonical_package_name - ): + for wheel_name, wheel_dir in self._get_candidates(link, canonical_package_name): try: wheel = Wheel(wheel_name) except InvalidWheelFilename: @@ -179,7 +161,9 @@ def get( logger.debug( "Ignoring cached wheel %s for %s as it " "does not match the expected distribution name %s.", - wheel_name, link, package_name, + wheel_name, + link, + package_name, ) continue if not wheel.supported(supported_tags): @@ -201,11 +185,9 @@ def get( class EphemWheelCache(SimpleWheelCache): - """A SimpleWheelCache that creates it's own temporary cache directory - """ + """A SimpleWheelCache that creates it's own temporary cache directory""" - def __init__(self, format_control): - # type: (FormatControl) -> None + def __init__(self, format_control: FormatControl) -> None: self._temp_dir = TempDirectory( kind=tempdir_kinds.EPHEM_WHEEL_CACHE, globally_managed=True, @@ -217,8 +199,8 @@ def __init__(self, format_control): class CacheEntry: def __init__( self, - link, # type: Link - persistent, # type: bool + link: Link, + persistent: bool, ): self.link = link self.persistent = persistent @@ -231,27 +213,23 @@ class WheelCache(Cache): when a certain link is not found in the simple wheel cache first. """ - def __init__(self, cache_dir, format_control): - # type: (str, FormatControl) -> None - super().__init__(cache_dir, format_control, {'binary'}) + def __init__(self, cache_dir: str, format_control: FormatControl) -> None: + super().__init__(cache_dir, format_control, {"binary"}) self._wheel_cache = SimpleWheelCache(cache_dir, format_control) self._ephem_cache = EphemWheelCache(format_control) - def get_path_for_link(self, link): - # type: (Link) -> str + def get_path_for_link(self, link: Link) -> str: return self._wheel_cache.get_path_for_link(link) - def get_ephem_path_for_link(self, link): - # type: (Link) -> str + def get_ephem_path_for_link(self, link: Link) -> str: return self._ephem_cache.get_path_for_link(link) def get( self, - link, # type: Link - package_name, # type: Optional[str] - supported_tags, # type: List[Tag] - ): - # type: (...) -> Link + link: Link, + package_name: Optional[str], + supported_tags: List[Tag], + ) -> Link: cache_entry = self.get_cache_entry(link, package_name, supported_tags) if cache_entry is None: return link @@ -259,11 +237,10 @@ def get( def get_cache_entry( self, - link, # type: Link - package_name, # type: Optional[str] - supported_tags, # type: List[Tag] - ): - # type: (...) -> Optional[CacheEntry] + link: Link, + package_name: Optional[str], + supported_tags: List[Tag], + ) -> Optional[CacheEntry]: """Returns a CacheEntry with a link to a cached item if it exists or None. The cache entry indicates if the item was found in the persistent or ephemeral cache. diff --git a/pipenv/patched/notpip/_internal/cli/autocompletion.py b/pipenv/patched/notpip/_internal/cli/autocompletion.py index 2648f29fec..82c95652f1 100644 --- a/pipenv/patched/notpip/_internal/cli/autocompletion.py +++ b/pipenv/patched/notpip/_internal/cli/autocompletion.py @@ -59,6 +59,14 @@ def autocomplete() -> None: print(dist) sys.exit(1) + should_list_installables = ( + not current.startswith("-") and subcommand_name == "install" + ) + if should_list_installables: + for path in auto_complete_paths(current, "path"): + print(path) + sys.exit(1) + subcommand = create_command(subcommand_name) for opt in subcommand.parser.option_list_all: @@ -138,7 +146,7 @@ def auto_complete_paths(current: str, completion_type: str) -> Iterable[str]: starting with ``current``. :param current: The word to be completed - :param completion_type: path completion type(`file`, `path` or `dir`)i + :param completion_type: path completion type(``file``, ``path`` or ``dir``) :return: A generator of regular files and/or directories """ directory, filename = os.path.split(current) diff --git a/pipenv/patched/notpip/_internal/cli/base_command.py b/pipenv/patched/notpip/_internal/cli/base_command.py index b4b034321b..6a71bc2125 100644 --- a/pipenv/patched/notpip/_internal/cli/base_command.py +++ b/pipenv/patched/notpip/_internal/cli/base_command.py @@ -1,5 +1,6 @@ """Base Command class, and related routines""" +import functools import logging import logging.config import optparse @@ -7,7 +8,9 @@ import sys import traceback from optparse import Values -from typing import Any, List, Optional, Tuple +from typing import Any, Callable, List, Optional, Tuple + +from pipenv.patched.notpip._vendor.rich import traceback as rich_traceback from pipenv.patched.notpip._internal.cli import cmdoptions from pipenv.patched.notpip._internal.cli.command_context import CommandContextMixIn @@ -21,12 +24,12 @@ from pipenv.patched.notpip._internal.exceptions import ( BadCommand, CommandError, + DiagnosticPipError, InstallationError, NetworkConnectionError, PreviousBuildDirError, UninstallationError, ) -from pipenv.patched.notpip._internal.utils.deprecation import deprecated from pipenv.patched.notpip._internal.utils.filesystem import check_path_owner from pipenv.patched.notpip._internal.utils.logging import BrokenStdoutLoggingError, setup_logging from pipenv.patched.notpip._internal.utils.misc import get_prog, normalize_path @@ -85,10 +88,10 @@ def handle_pip_version_check(self, options: Values) -> None: # are present. assert not hasattr(options, "no_index") - def run(self, options: Values, args: List[Any]) -> int: + def run(self, options: Values, args: List[str]) -> int: raise NotImplementedError - def parse_args(self, args: List[str]) -> Tuple[Any, Any]: + def parse_args(self, args: List[str]) -> Tuple[Values, List[str]]: # factored out for testability return self.parser.parse_args(args) @@ -148,20 +151,6 @@ def _main(self, args: List[str]) -> int: ) options.cache_dir = None - if getattr(options, "build_dir", None): - deprecated( - reason=( - "The -b/--build/--build-dir/--build-directory " - "option is deprecated and has no effect anymore." - ), - replacement=( - "use the TMPDIR/TEMP/TMP environment variable, " - "possibly combined with --no-clean" - ), - gone_in="21.3", - issue=8333, - ) - if "2020-resolver" in options.features_enabled: logger.warning( "--use-feature=2020-resolver no longer has any effect, " @@ -169,46 +158,66 @@ def _main(self, args: List[str]) -> int: "This will become an error in pip 21.0." ) + def intercepts_unhandled_exc( + run_func: Callable[..., int] + ) -> Callable[..., int]: + @functools.wraps(run_func) + def exc_logging_wrapper(*args: Any) -> int: + try: + status = run_func(*args) + assert isinstance(status, int) + return status + except DiagnosticPipError as exc: + logger.error("[present-diagnostic] %s", exc) + logger.debug("Exception information:", exc_info=True) + + return ERROR + except PreviousBuildDirError as exc: + logger.critical(str(exc)) + logger.debug("Exception information:", exc_info=True) + + return PREVIOUS_BUILD_DIR_ERROR + except ( + InstallationError, + UninstallationError, + BadCommand, + NetworkConnectionError, + ) as exc: + logger.critical(str(exc)) + logger.debug("Exception information:", exc_info=True) + + return ERROR + except CommandError as exc: + logger.critical("%s", exc) + logger.debug("Exception information:", exc_info=True) + + return ERROR + except BrokenStdoutLoggingError: + # Bypass our logger and write any remaining messages to + # stderr because stdout no longer works. + print("ERROR: Pipe to stdout was broken", file=sys.stderr) + if level_number <= logging.DEBUG: + traceback.print_exc(file=sys.stderr) + + return ERROR + except KeyboardInterrupt: + logger.critical("Operation cancelled by user") + logger.debug("Exception information:", exc_info=True) + + return ERROR + except BaseException: + logger.critical("Exception:", exc_info=True) + + return UNKNOWN_ERROR + + return exc_logging_wrapper + try: - status = self.run(options, args) - assert isinstance(status, int) - return status - except PreviousBuildDirError as exc: - logger.critical(str(exc)) - logger.debug("Exception information:", exc_info=True) - - return PREVIOUS_BUILD_DIR_ERROR - except ( - InstallationError, - UninstallationError, - BadCommand, - NetworkConnectionError, - ) as exc: - logger.critical(str(exc)) - logger.debug("Exception information:", exc_info=True) - - return ERROR - except CommandError as exc: - logger.critical("%s", exc) - logger.debug("Exception information:", exc_info=True) - - return ERROR - except BrokenStdoutLoggingError: - # Bypass our logger and write any remaining messages to stderr - # because stdout no longer works. - print("ERROR: Pipe to stdout was broken", file=sys.stderr) - if level_number <= logging.DEBUG: - traceback.print_exc(file=sys.stderr) - - return ERROR - except KeyboardInterrupt: - logger.critical("Operation cancelled by user") - logger.debug("Exception information:", exc_info=True) - - return ERROR - except BaseException: - logger.critical("Exception:", exc_info=True) - - return UNKNOWN_ERROR + if not options.debug_mode: + run = intercepts_unhandled_exc(self.run) + else: + run = self.run + rich_traceback.install(show_locals=True) + return run(options, args) finally: self.handle_pip_version_check(options) diff --git a/pipenv/patched/notpip/_internal/cli/cmdoptions.py b/pipenv/patched/notpip/_internal/cli/cmdoptions.py index 1b72229caa..603f14aa2d 100644 --- a/pipenv/patched/notpip/_internal/cli/cmdoptions.py +++ b/pipenv/patched/notpip/_internal/cli/cmdoptions.py @@ -10,9 +10,9 @@ # The following comment should be removed at some point in the future. # mypy: strict-optional=False +import logging import os import textwrap -import warnings from functools import partial from optparse import SUPPRESS_HELP, Option, OptionGroup, OptionParser, Values from textwrap import dedent @@ -30,6 +30,8 @@ from pipenv.patched.notpip._internal.utils.hashes import STRONG_HASHES from pipenv.patched.notpip._internal.utils.misc import strtobool +logger = logging.getLogger(__name__) + def raise_option_error(parser: OptionParser, option: Option, msg: str) -> None: """ @@ -76,10 +78,9 @@ def getname(n: str) -> Optional[Any]: if any(map(getname, names)): control = options.format_control control.disallow_binaries() - warnings.warn( + logger.warning( "Disabling all use of wheels due to the use of --build-option " "/ --global-option / --install-option.", - stacklevel=2, ) @@ -151,6 +152,18 @@ class PipOption(Option): help="Show help.", ) +debug_mode: Callable[..., Option] = partial( + Option, + "--debug", + dest="debug_mode", + action="store_true", + default=False, + help=( + "Let unhandled exceptions propagate outside the main subroutine, " + "instead of logging them to stderr." + ), +) + isolated_mode: Callable[..., Option] = partial( Option, "--isolated", @@ -165,13 +178,15 @@ class PipOption(Option): require_virtualenv: Callable[..., Option] = partial( Option, - # Run only if inside a virtualenv, bail if not. "--require-virtualenv", "--require-venv", dest="require_venv", action="store_true", default=False, - help=SUPPRESS_HELP, + help=( + "Allow pip to only run in a virtual environment; " + "exit with an error otherwise." + ), ) verbose: Callable[..., Option] = partial( @@ -719,18 +734,6 @@ def _handle_no_cache_dir( help="Don't install package dependencies.", ) -build_dir: Callable[..., Option] = partial( - PipOption, - "-b", - "--build", - "--build-dir", - "--build-directory", - dest="build_dir", - type="path", - metavar="dir", - help=SUPPRESS_HELP, -) - ignore_requires_python: Callable[..., Option] = partial( Option, "--ignore-requires-python", @@ -961,7 +964,12 @@ def check_list_path_option(options: Values) -> None: metavar="feature", action="append", default=[], - choices=["legacy-resolver"], + choices=[ + "legacy-resolver", + "out-of-tree-build", + "backtrack-on-build-failures", + "html5lib", + ], help=("Enable deprecated functionality, that will be removed in the future."), ) @@ -974,6 +982,7 @@ def check_list_path_option(options: Values) -> None: "name": "General Options", "options": [ help_, + debug_mode, isolated_mode, require_virtualenv, verbose, diff --git a/pipenv/patched/notpip/_internal/cli/progress_bars.py b/pipenv/patched/notpip/_internal/cli/progress_bars.py index 582747ed8f..692a1b4c5c 100644 --- a/pipenv/patched/notpip/_internal/cli/progress_bars.py +++ b/pipenv/patched/notpip/_internal/cli/progress_bars.py @@ -1,10 +1,23 @@ +import functools import itertools import sys from signal import SIGINT, default_int_handler, signal -from typing import Any +from typing import Any, Callable, Iterator, Optional, Tuple from pipenv.patched.notpip._vendor.progress.bar import Bar, FillingCirclesBar, IncrementalBar from pipenv.patched.notpip._vendor.progress.spinner import Spinner +from pipenv.patched.notpip._vendor.rich.progress import ( + BarColumn, + DownloadColumn, + FileSizeColumn, + Progress, + ProgressColumn, + SpinnerColumn, + TextColumn, + TimeElapsedColumn, + TimeRemainingColumn, + TransferSpeedColumn, +) from pipenv.patched.notpip._internal.utils.compat import WINDOWS from pipenv.patched.notpip._internal.utils.logging import get_indentation @@ -17,6 +30,8 @@ except Exception: colorama = None +DownloadProgressRenderer = Callable[[Iterator[bytes]], Iterator[bytes]] + def _select_progress_class(preferred: Bar, fallback: Bar) -> Bar: encoding = getattr(preferred.file, "encoding", None) @@ -243,8 +258,64 @@ def update(self) -> None: } -def DownloadProgressProvider(progress_bar, max=None): # type: ignore +def _legacy_progress_bar( + progress_bar: str, max: Optional[int] +) -> DownloadProgressRenderer: if max is None or max == 0: - return BAR_TYPES[progress_bar][1]().iter + return BAR_TYPES[progress_bar][1]().iter # type: ignore else: return BAR_TYPES[progress_bar][0](max=max).iter + + +# +# Modern replacement, for our legacy progress bars. +# +def _rich_progress_bar( + iterable: Iterator[bytes], + *, + bar_type: str, + size: int, +) -> Iterator[bytes]: + assert bar_type == "on", "This should only be used in the default mode." + + if not size: + total = float("inf") + columns: Tuple[ProgressColumn, ...] = ( + TextColumn("[progress.description]{task.description}"), + SpinnerColumn("line", speed=1.5), + FileSizeColumn(), + TransferSpeedColumn(), + TimeElapsedColumn(), + ) + else: + total = size + columns = ( + TextColumn("[progress.description]{task.description}"), + BarColumn(), + DownloadColumn(), + TransferSpeedColumn(), + TextColumn("eta"), + TimeRemainingColumn(), + ) + + progress = Progress(*columns, refresh_per_second=30) + task_id = progress.add_task(" " * (get_indentation() + 2), total=total) + with progress: + for chunk in iterable: + yield chunk + progress.update(task_id, advance=len(chunk)) + + +def get_download_progress_renderer( + *, bar_type: str, size: Optional[int] = None +) -> DownloadProgressRenderer: + """Get an object that can be used to render the download progress. + + Returns a callable, that takes an iterable to "wrap". + """ + if bar_type == "on": + return functools.partial(_rich_progress_bar, bar_type=bar_type, size=size) + elif bar_type == "off": + return iter # no-op, when passed an iterator + else: + return _legacy_progress_bar(bar_type, size) diff --git a/pipenv/patched/notpip/_internal/cli/req_command.py b/pipenv/patched/notpip/_internal/cli/req_command.py index 17c2209ff6..91664b3f15 100644 --- a/pipenv/patched/notpip/_internal/cli/req_command.py +++ b/pipenv/patched/notpip/_internal/cli/req_command.py @@ -34,6 +34,7 @@ from pipenv.patched.notpip._internal.req.req_tracker import RequirementTracker from pipenv.patched.notpip._internal.resolution.base import BaseResolver from pipenv.patched.notpip._internal.self_outdated_check import pip_self_version_check +from pipenv.patched.notpip._internal.utils.deprecation import deprecated from pipenv.patched.notpip._internal.utils.temp_dir import ( TempDirectory, TempDirectoryTypeRegistry, @@ -172,9 +173,10 @@ def warn_if_run_as_root() -> None: # checks: https://mypy.readthedocs.io/en/stable/common_issues.html if sys.platform == "win32" or sys.platform == "cygwin": return - if sys.platform == "darwin" or sys.platform == "linux": - if os.getuid() != 0: - return + + if os.getuid() != 0: + return + logger.warning( "Running pip as the 'root' user can result in broken permissions and " "conflicting behaviour with the system package manager. " @@ -225,6 +227,31 @@ def determine_resolver_variant(options: Values) -> str: return "2020-resolver" + @staticmethod + def determine_build_failure_suppression(options: Values) -> bool: + """Determines whether build failures should be suppressed and backtracked on.""" + if "backtrack-on-build-failures" not in options.deprecated_features_enabled: + return False + + if "legacy-resolver" in options.deprecated_features_enabled: + raise CommandError("Cannot backtrack with legacy resolver.") + + deprecated( + reason=( + "Backtracking on build failures can mask issues related to how " + "a package generates metadata or builds a wheel. This flag will " + "be removed in pip 22.2." + ), + gone_in=None, + replacement=( + "avoiding known-bad versions by explicitly telling pip to ignore them " + "(either directly as requirements, or via a constraints file)" + ), + feature_flag=None, + issue=10655, + ) + return True + @classmethod def make_requirement_preparer( cls, @@ -235,6 +262,7 @@ def make_requirement_preparer( finder: PackageFinder, use_user_site: bool, download_dir: Optional[str] = None, + verbosity: int = 0, ) -> RequirementPreparer: """ Create a RequirementPreparer instance for the given parameters. @@ -260,6 +288,27 @@ def make_requirement_preparer( "fast-deps has no effect when used with the legacy resolver." ) + in_tree_build = "out-of-tree-build" not in options.deprecated_features_enabled + if "in-tree-build" in options.features_enabled: + deprecated( + reason="In-tree builds are now the default.", + replacement="to remove the --use-feature=in-tree-build flag", + gone_in="22.1", + ) + if "out-of-tree-build" in options.deprecated_features_enabled: + deprecated( + reason="Out-of-tree builds are deprecated.", + replacement=None, + gone_in="22.1", + ) + + if options.progress_bar not in {"on", "off"}: + deprecated( + reason="Custom progress bar styles are deprecated", + replacement="to use the default progress bar style.", + gone_in="22.1", + ) + return RequirementPreparer( build_dir=temp_build_dir_path, src_dir=options.src_dir, @@ -272,7 +321,8 @@ def make_requirement_preparer( require_hashes=options.require_hashes, use_user_site=use_user_site, lazy_wheel=lazy_wheel, - in_tree_build="in-tree-build" in options.features_enabled, + verbosity=verbosity, + in_tree_build=in_tree_build, ) @classmethod @@ -298,6 +348,7 @@ def make_resolver( isolated=options.isolated_mode, use_pep517=use_pep517, ) + suppress_build_failures = cls.determine_build_failure_suppression(options) resolver_variant = cls.determine_resolver_variant(options) # The long import name and duplicated invocation is needed to convince # Mypy into correctly typechecking. Otherwise it would complain the @@ -317,6 +368,7 @@ def make_resolver( force_reinstall=force_reinstall, upgrade_strategy=upgrade_strategy, py_version_info=py_version_info, + suppress_build_failures=suppress_build_failures, ) import pipenv.patched.notpip._internal.resolution.legacy.resolver @@ -450,4 +502,5 @@ def _build_package_finder( link_collector=link_collector, selection_prefs=selection_prefs, target_python=target_python, + use_deprecated_html5lib="html5lib" in options.deprecated_features_enabled, ) diff --git a/pipenv/patched/notpip/_internal/commands/__init__.py b/pipenv/patched/notpip/_internal/commands/__init__.py index 24ad0b49b8..c1b22ee4b1 100644 --- a/pipenv/patched/notpip/_internal/commands/__init__.py +++ b/pipenv/patched/notpip/_internal/commands/__init__.py @@ -3,87 +3,102 @@ """ import importlib -from collections import OrderedDict, namedtuple +from collections import namedtuple from typing import Any, Dict, Optional from pipenv.patched.notpip._internal.cli.base_command import Command -CommandInfo = namedtuple('CommandInfo', 'module_path, class_name, summary') +CommandInfo = namedtuple("CommandInfo", "module_path, class_name, summary") -# The ordering matters for help display. -# Also, even though the module path starts with the same -# "pipenv.patched.notpip._internal.commands" prefix in each case, we include the full path -# because it makes testing easier (specifically when modifying commands_dict -# in test setup / teardown by adding info for a FakeCommand class defined -# in a test-related module). -# Finally, we need to pass an iterable of pairs here rather than a dict -# so that the ordering won't be lost when using Python 2.7. -commands_dict: Dict[str, CommandInfo] = OrderedDict([ - ('install', CommandInfo( - 'pipenv.patched.notpip._internal.commands.install', 'InstallCommand', - 'Install packages.', - )), - ('download', CommandInfo( - 'pipenv.patched.notpip._internal.commands.download', 'DownloadCommand', - 'Download packages.', - )), - ('uninstall', CommandInfo( - 'pipenv.patched.notpip._internal.commands.uninstall', 'UninstallCommand', - 'Uninstall packages.', - )), - ('freeze', CommandInfo( - 'pipenv.patched.notpip._internal.commands.freeze', 'FreezeCommand', - 'Output installed packages in requirements format.', - )), - ('list', CommandInfo( - 'pipenv.patched.notpip._internal.commands.list', 'ListCommand', - 'List installed packages.', - )), - ('show', CommandInfo( - 'pipenv.patched.notpip._internal.commands.show', 'ShowCommand', - 'Show information about installed packages.', - )), - ('check', CommandInfo( - 'pipenv.patched.notpip._internal.commands.check', 'CheckCommand', - 'Verify installed packages have compatible dependencies.', - )), - ('config', CommandInfo( - 'pipenv.patched.notpip._internal.commands.configuration', 'ConfigurationCommand', - 'Manage local and global configuration.', - )), - ('search', CommandInfo( - 'pipenv.patched.notpip._internal.commands.search', 'SearchCommand', - 'Search PyPI for packages.', - )), - ('cache', CommandInfo( - 'pipenv.patched.notpip._internal.commands.cache', 'CacheCommand', +# This dictionary does a bunch of heavy lifting for help output: +# - Enables avoiding additional (costly) imports for presenting `--help`. +# - The ordering matters for help display. +# +# Even though the module path starts with the same "pipenv.patched.notpip._internal.commands" +# prefix, the full path makes testing easier (specifically when modifying +# `commands_dict` in test setup / teardown). +commands_dict: Dict[str, CommandInfo] = { + "install": CommandInfo( + "pipenv.patched.notpip._internal.commands.install", + "InstallCommand", + "Install packages.", + ), + "download": CommandInfo( + "pipenv.patched.notpip._internal.commands.download", + "DownloadCommand", + "Download packages.", + ), + "uninstall": CommandInfo( + "pipenv.patched.notpip._internal.commands.uninstall", + "UninstallCommand", + "Uninstall packages.", + ), + "freeze": CommandInfo( + "pipenv.patched.notpip._internal.commands.freeze", + "FreezeCommand", + "Output installed packages in requirements format.", + ), + "list": CommandInfo( + "pipenv.patched.notpip._internal.commands.list", + "ListCommand", + "List installed packages.", + ), + "show": CommandInfo( + "pipenv.patched.notpip._internal.commands.show", + "ShowCommand", + "Show information about installed packages.", + ), + "check": CommandInfo( + "pipenv.patched.notpip._internal.commands.check", + "CheckCommand", + "Verify installed packages have compatible dependencies.", + ), + "config": CommandInfo( + "pipenv.patched.notpip._internal.commands.configuration", + "ConfigurationCommand", + "Manage local and global configuration.", + ), + "search": CommandInfo( + "pipenv.patched.notpip._internal.commands.search", + "SearchCommand", + "Search PyPI for packages.", + ), + "cache": CommandInfo( + "pipenv.patched.notpip._internal.commands.cache", + "CacheCommand", "Inspect and manage pip's wheel cache.", - )), - ('index', CommandInfo( - 'pipenv.patched.notpip._internal.commands.index', 'IndexCommand', + ), + "index": CommandInfo( + "pipenv.patched.notpip._internal.commands.index", + "IndexCommand", "Inspect information available from package indexes.", - )), - ('wheel', CommandInfo( - 'pipenv.patched.notpip._internal.commands.wheel', 'WheelCommand', - 'Build wheels from your requirements.', - )), - ('hash', CommandInfo( - 'pipenv.patched.notpip._internal.commands.hash', 'HashCommand', - 'Compute hashes of package archives.', - )), - ('completion', CommandInfo( - 'pipenv.patched.notpip._internal.commands.completion', 'CompletionCommand', - 'A helper command used for command completion.', - )), - ('debug', CommandInfo( - 'pipenv.patched.notpip._internal.commands.debug', 'DebugCommand', - 'Show information useful for debugging.', - )), - ('help', CommandInfo( - 'pipenv.patched.notpip._internal.commands.help', 'HelpCommand', - 'Show help for commands.', - )), -]) + ), + "wheel": CommandInfo( + "pipenv.patched.notpip._internal.commands.wheel", + "WheelCommand", + "Build wheels from your requirements.", + ), + "hash": CommandInfo( + "pipenv.patched.notpip._internal.commands.hash", + "HashCommand", + "Compute hashes of package archives.", + ), + "completion": CommandInfo( + "pipenv.patched.notpip._internal.commands.completion", + "CompletionCommand", + "A helper command used for command completion.", + ), + "debug": CommandInfo( + "pipenv.patched.notpip._internal.commands.debug", + "DebugCommand", + "Show information useful for debugging.", + ), + "help": CommandInfo( + "pipenv.patched.notpip._internal.commands.help", + "HelpCommand", + "Show help for commands.", + ), +} def create_command(name: str, **kwargs: Any) -> Command: diff --git a/pipenv/patched/notpip/_internal/commands/cache.py b/pipenv/patched/notpip/_internal/commands/cache.py index 2668996a84..9ce71763b4 100644 --- a/pipenv/patched/notpip/_internal/commands/cache.py +++ b/pipenv/patched/notpip/_internal/commands/cache.py @@ -39,17 +39,17 @@ class CacheCommand(Command): def add_options(self) -> None: self.cmd_opts.add_option( - '--format', - action='store', - dest='list_format', + "--format", + action="store", + dest="list_format", default="human", - choices=('human', 'abspath'), - help="Select the output format among: human (default) or abspath" + choices=("human", "abspath"), + help="Select the output format among: human (default) or abspath", ) self.parser.insert_option_group(0, self.cmd_opts) - def run(self, options: Values, args: List[Any]) -> int: + def run(self, options: Values, args: List[str]) -> int: handlers = { "dir": self.get_cache_dir, "info": self.get_cache_info, @@ -59,8 +59,7 @@ def run(self, options: Values, args: List[Any]) -> int: } if not options.cache_dir: - logger.error("pip cache commands can not " - "function since cache is disabled.") + logger.error("pip cache commands can not function since cache is disabled.") return ERROR # Determine action @@ -84,69 +83,73 @@ def run(self, options: Values, args: List[Any]) -> int: def get_cache_dir(self, options: Values, args: List[Any]) -> None: if args: - raise CommandError('Too many arguments') + raise CommandError("Too many arguments") logger.info(options.cache_dir) def get_cache_info(self, options: Values, args: List[Any]) -> None: if args: - raise CommandError('Too many arguments') + raise CommandError("Too many arguments") num_http_files = len(self._find_http_files(options)) - num_packages = len(self._find_wheels(options, '*')) + num_packages = len(self._find_wheels(options, "*")) - http_cache_location = self._cache_dir(options, 'http') - wheels_cache_location = self._cache_dir(options, 'wheels') + http_cache_location = self._cache_dir(options, "http") + wheels_cache_location = self._cache_dir(options, "wheels") http_cache_size = filesystem.format_directory_size(http_cache_location) - wheels_cache_size = filesystem.format_directory_size( - wheels_cache_location + wheels_cache_size = filesystem.format_directory_size(wheels_cache_location) + + message = ( + textwrap.dedent( + """ + Package index page cache location: {http_cache_location} + Package index page cache size: {http_cache_size} + Number of HTTP files: {num_http_files} + Wheels location: {wheels_cache_location} + Wheels size: {wheels_cache_size} + Number of wheels: {package_count} + """ + ) + .format( + http_cache_location=http_cache_location, + http_cache_size=http_cache_size, + num_http_files=num_http_files, + wheels_cache_location=wheels_cache_location, + package_count=num_packages, + wheels_cache_size=wheels_cache_size, + ) + .strip() ) - message = textwrap.dedent(""" - Package index page cache location: {http_cache_location} - Package index page cache size: {http_cache_size} - Number of HTTP files: {num_http_files} - Wheels location: {wheels_cache_location} - Wheels size: {wheels_cache_size} - Number of wheels: {package_count} - """).format( - http_cache_location=http_cache_location, - http_cache_size=http_cache_size, - num_http_files=num_http_files, - wheels_cache_location=wheels_cache_location, - package_count=num_packages, - wheels_cache_size=wheels_cache_size, - ).strip() - logger.info(message) def list_cache_items(self, options: Values, args: List[Any]) -> None: if len(args) > 1: - raise CommandError('Too many arguments') + raise CommandError("Too many arguments") if args: pattern = args[0] else: - pattern = '*' + pattern = "*" files = self._find_wheels(options, pattern) - if options.list_format == 'human': + if options.list_format == "human": self.format_for_human(files) else: self.format_for_abspath(files) def format_for_human(self, files: List[str]) -> None: if not files: - logger.info('Nothing cached.') + logger.info("Nothing cached.") return results = [] for filename in files: wheel = os.path.basename(filename) size = filesystem.format_file_size(filename) - results.append(f' - {wheel} ({size})') - logger.info('Cache contents:\n') - logger.info('\n'.join(sorted(results))) + results.append(f" - {wheel} ({size})") + logger.info("Cache contents:\n") + logger.info("\n".join(sorted(results))) def format_for_abspath(self, files: List[str]) -> None: if not files: @@ -156,23 +159,27 @@ def format_for_abspath(self, files: List[str]) -> None: for filename in files: results.append(filename) - logger.info('\n'.join(sorted(results))) + logger.info("\n".join(sorted(results))) def remove_cache_items(self, options: Values, args: List[Any]) -> None: if len(args) > 1: - raise CommandError('Too many arguments') + raise CommandError("Too many arguments") if not args: - raise CommandError('Please provide a pattern') + raise CommandError("Please provide a pattern") files = self._find_wheels(options, args[0]) - # Only fetch http files if no specific pattern given - if args[0] == '*': + no_matching_msg = "No matching packages" + if args[0] == "*": + # Only fetch http files if no specific pattern given files += self._find_http_files(options) + else: + # Add the pattern to the log message + no_matching_msg += ' for pattern "{}"'.format(args[0]) if not files: - raise CommandError('No matching packages') + logger.warning(no_matching_msg) for filename in files: os.unlink(filename) @@ -181,19 +188,19 @@ def remove_cache_items(self, options: Values, args: List[Any]) -> None: def purge_cache(self, options: Values, args: List[Any]) -> None: if args: - raise CommandError('Too many arguments') + raise CommandError("Too many arguments") - return self.remove_cache_items(options, ['*']) + return self.remove_cache_items(options, ["*"]) def _cache_dir(self, options: Values, subdir: str) -> str: return os.path.join(options.cache_dir, subdir) def _find_http_files(self, options: Values) -> List[str]: - http_dir = self._cache_dir(options, 'http') - return filesystem.find_files(http_dir, '*') + http_dir = self._cache_dir(options, "http") + return filesystem.find_files(http_dir, "*") def _find_wheels(self, options: Values, pattern: str) -> List[str]: - wheel_dir = self._cache_dir(options, 'wheels') + wheel_dir = self._cache_dir(options, "wheels") # The wheel filename format, as specified in PEP 427, is: # {distribution}-{version}(-{build})?-{python}-{abi}-{platform}.whl diff --git a/pipenv/patched/notpip/_internal/commands/check.py b/pipenv/patched/notpip/_internal/commands/check.py index 219a02c5c9..ae57ac22d7 100644 --- a/pipenv/patched/notpip/_internal/commands/check.py +++ b/pipenv/patched/notpip/_internal/commands/check.py @@ -1,6 +1,6 @@ import logging from optparse import Values -from typing import Any, List +from typing import List from pipenv.patched.notpip._internal.cli.base_command import Command from pipenv.patched.notpip._internal.cli.status_codes import ERROR, SUCCESS @@ -19,7 +19,7 @@ class CheckCommand(Command): usage = """ %prog [options]""" - def run(self, options: Values, args: List[Any]) -> int: + def run(self, options: Values, args: List[str]) -> int: package_set, parsing_probs = create_package_set_from_installed() missing, conflicting = check_package_set(package_set) @@ -29,7 +29,9 @@ def run(self, options: Values, args: List[Any]) -> int: for dependency in missing[project_name]: write_output( "%s %s requires %s, which is not installed.", - project_name, version, dependency[0], + project_name, + version, + dependency[0], ) for project_name in conflicting: @@ -37,7 +39,11 @@ def run(self, options: Values, args: List[Any]) -> int: for dep_name, dep_version, req in conflicting[project_name]: write_output( "%s %s has requirement %s, but you have %s %s.", - project_name, version, req, dep_name, dep_version, + project_name, + version, + req, + dep_name, + dep_version, ) if missing or conflicting or parsing_probs: diff --git a/pipenv/patched/notpip/_internal/commands/completion.py b/pipenv/patched/notpip/_internal/commands/completion.py index 7a63054638..dac731e530 100644 --- a/pipenv/patched/notpip/_internal/commands/completion.py +++ b/pipenv/patched/notpip/_internal/commands/completion.py @@ -12,7 +12,7 @@ """ COMPLETION_SCRIPTS = { - 'bash': """ + "bash": """ _pip_completion() {{ COMPREPLY=( $( COMP_WORDS="${{COMP_WORDS[*]}}" \\ @@ -21,7 +21,7 @@ }} complete -o default -F _pip_completion {prog} """, - 'zsh': """ + "zsh": """ function _pip_completion {{ local words cword read -Ac words @@ -32,7 +32,7 @@ }} compctl -K _pip_completion {prog} """, - 'fish': """ + "fish": """ function __fish_complete_pip set -lx COMP_WORDS (commandline -o) "" set -lx COMP_CWORD ( \\ @@ -53,39 +53,44 @@ class CompletionCommand(Command): def add_options(self) -> None: self.cmd_opts.add_option( - '--bash', '-b', - action='store_const', - const='bash', - dest='shell', - help='Emit completion code for bash') + "--bash", + "-b", + action="store_const", + const="bash", + dest="shell", + help="Emit completion code for bash", + ) self.cmd_opts.add_option( - '--zsh', '-z', - action='store_const', - const='zsh', - dest='shell', - help='Emit completion code for zsh') + "--zsh", + "-z", + action="store_const", + const="zsh", + dest="shell", + help="Emit completion code for zsh", + ) self.cmd_opts.add_option( - '--fish', '-f', - action='store_const', - const='fish', - dest='shell', - help='Emit completion code for fish') + "--fish", + "-f", + action="store_const", + const="fish", + dest="shell", + help="Emit completion code for fish", + ) self.parser.insert_option_group(0, self.cmd_opts) def run(self, options: Values, args: List[str]) -> int: """Prints the completion code of the given shell""" shells = COMPLETION_SCRIPTS.keys() - shell_options = ['--' + shell for shell in sorted(shells)] + shell_options = ["--" + shell for shell in sorted(shells)] if options.shell in shells: script = textwrap.dedent( - COMPLETION_SCRIPTS.get(options.shell, '').format( - prog=get_prog()) + COMPLETION_SCRIPTS.get(options.shell, "").format(prog=get_prog()) ) print(BASE_COMPLETION.format(script=script, shell=options.shell)) return SUCCESS else: sys.stderr.write( - 'ERROR: You must pass {}\n' .format(' or '.join(shell_options)) + "ERROR: You must pass {}\n".format(" or ".join(shell_options)) ) return SUCCESS diff --git a/pipenv/patched/notpip/_internal/commands/configuration.py b/pipenv/patched/notpip/_internal/commands/configuration.py index 658074f2c2..0f15e54073 100644 --- a/pipenv/patched/notpip/_internal/commands/configuration.py +++ b/pipenv/patched/notpip/_internal/commands/configuration.py @@ -34,7 +34,7 @@ class ConfigurationCommand(Command): If none of --user, --global and --site are passed, a virtual environment configuration file is used if one is active and the file - exists. Otherwise, all modifications happen on the to the user file by + exists. Otherwise, all modifications happen to the user file by default. """ @@ -51,38 +51,38 @@ class ConfigurationCommand(Command): def add_options(self) -> None: self.cmd_opts.add_option( - '--editor', - dest='editor', - action='store', + "--editor", + dest="editor", + action="store", default=None, help=( - 'Editor to use to edit the file. Uses VISUAL or EDITOR ' - 'environment variables if not provided.' - ) + "Editor to use to edit the file. Uses VISUAL or EDITOR " + "environment variables if not provided." + ), ) self.cmd_opts.add_option( - '--global', - dest='global_file', - action='store_true', + "--global", + dest="global_file", + action="store_true", default=False, - help='Use the system-wide configuration file only' + help="Use the system-wide configuration file only", ) self.cmd_opts.add_option( - '--user', - dest='user_file', - action='store_true', + "--user", + dest="user_file", + action="store_true", default=False, - help='Use the user configuration file only' + help="Use the user configuration file only", ) self.cmd_opts.add_option( - '--site', - dest='site_file', - action='store_true', + "--site", + dest="site_file", + action="store_true", default=False, - help='Use the current environment configuration file only' + help="Use the current environment configuration file only", ) self.parser.insert_option_group(0, self.cmd_opts) @@ -133,11 +133,15 @@ def run(self, options: Values, args: List[str]) -> int: return SUCCESS def _determine_file(self, options: Values, need_value: bool) -> Optional[Kind]: - file_options = [key for key, value in ( - (kinds.USER, options.user_file), - (kinds.GLOBAL, options.global_file), - (kinds.SITE, options.site_file), - ) if value] + file_options = [ + key + for key, value in ( + (kinds.USER, options.user_file), + (kinds.GLOBAL, options.global_file), + (kinds.SITE, options.site_file), + ) + if value + ] if not file_options: if not need_value: @@ -194,24 +198,22 @@ def list_config_values(self, options: Values, args: List[str]) -> None: for fname in files: with indent_log(): file_exists = os.path.exists(fname) - write_output("%s, exists: %r", - fname, file_exists) + write_output("%s, exists: %r", fname, file_exists) if file_exists: self.print_config_file_values(variant) def print_config_file_values(self, variant: Kind) -> None: """Get key-value pairs from the file of a variant""" - for name, value in self.configuration.\ - get_values_in_config(variant).items(): + for name, value in self.configuration.get_values_in_config(variant).items(): with indent_log(): write_output("%s: %s", name, value) def print_env_var_values(self) -> None: """Get key-values pairs present as environment variables""" - write_output("%s:", 'env_var') + write_output("%s:", "env_var") with indent_log(): for key, value in sorted(self.configuration.get_environ_vars()): - env_var = f'PIP_{key.upper()}' + env_var = f"PIP_{key.upper()}" write_output("%s=%r", env_var, value) def open_in_editor(self, options: Values, args: List[str]) -> None: @@ -225,16 +227,14 @@ def open_in_editor(self, options: Values, args: List[str]) -> None: subprocess.check_call([editor, fname]) except subprocess.CalledProcessError as e: raise PipError( - "Editor Subprocess exited with exit code {}" - .format(e.returncode) + "Editor Subprocess exited with exit code {}".format(e.returncode) ) def _get_n_args(self, args: List[str], example: str, n: int) -> Any: - """Helper to make sure the command got the right number of arguments - """ + """Helper to make sure the command got the right number of arguments""" if len(args) != n: msg = ( - 'Got unexpected number of arguments, expected {}. ' + "Got unexpected number of arguments, expected {}. " '(example: "{} config {}")' ).format(n, get_prog(), example) raise PipError(msg) diff --git a/pipenv/patched/notpip/_internal/commands/debug.py b/pipenv/patched/notpip/_internal/commands/debug.py index 97035956d4..20e9e1fd5e 100644 --- a/pipenv/patched/notpip/_internal/commands/debug.py +++ b/pipenv/patched/notpip/_internal/commands/debug.py @@ -24,52 +24,46 @@ def show_value(name: str, value: Any) -> None: - logger.info('%s: %s', name, value) + logger.info("%s: %s", name, value) def show_sys_implementation() -> None: - logger.info('sys.implementation:') + logger.info("sys.implementation:") implementation_name = sys.implementation.name with indent_log(): - show_value('name', implementation_name) + show_value("name", implementation_name) def create_vendor_txt_map() -> Dict[str, str]: vendor_txt_path = os.path.join( - os.path.dirname(pip_location), - '_vendor', - 'vendor.txt' + os.path.dirname(pip_location), "_vendor", "vendor.txt" ) with open(vendor_txt_path) as f: # Purge non version specifying lines. # Also, remove any space prefix or suffixes (including comments). - lines = [line.strip().split(' ', 1)[0] - for line in f.readlines() if '==' in line] + lines = [ + line.strip().split(" ", 1)[0] for line in f.readlines() if "==" in line + ] # Transform into "module" -> version dict. - return dict(line.split('==', 1) for line in lines) # type: ignore + return dict(line.split("==", 1) for line in lines) # type: ignore def get_module_from_module_name(module_name: str) -> ModuleType: # Module name can be uppercase in vendor.txt for some reason... module_name = module_name.lower() # PATCH: setuptools is actually only pkg_resources. - if module_name == 'setuptools': - module_name = 'pkg_resources' - - __import__( - f'pipenv.patched.notpip._vendor.{module_name}', - globals(), - locals(), - level=0 - ) + if module_name == "setuptools": + module_name = "pkg_resources" + + __import__(f"pipenv.patched.notpip._vendor.{module_name}", globals(), locals(), level=0) return getattr(pipenv.patched.notpip._vendor, module_name) def get_vendor_version_from_module(module_name: str) -> Optional[str]: module = get_module_from_module_name(module_name) - version = getattr(module, '__version__', None) + version = getattr(module, "__version__", None) if not version: # Try to find version in debundled module info. @@ -86,20 +80,24 @@ def show_actual_vendor_versions(vendor_txt_versions: Dict[str, str]) -> None: a conflict or if the actual version could not be imported. """ for module_name, expected_version in vendor_txt_versions.items(): - extra_message = '' + extra_message = "" actual_version = get_vendor_version_from_module(module_name) if not actual_version: - extra_message = ' (Unable to locate actual module version, using'\ - ' vendor.txt specified version)' + extra_message = ( + " (Unable to locate actual module version, using" + " vendor.txt specified version)" + ) actual_version = expected_version elif parse_version(actual_version) != parse_version(expected_version): - extra_message = ' (CONFLICT: vendor.txt suggests version should'\ - ' be {})'.format(expected_version) - logger.info('%s==%s%s', module_name, actual_version, extra_message) + extra_message = ( + " (CONFLICT: vendor.txt suggests version should" + " be {})".format(expected_version) + ) + logger.info("%s==%s%s", module_name, actual_version, extra_message) def show_vendor_versions() -> None: - logger.info('vendored library versions:') + logger.info("vendored library versions:") vendor_txt_versions = create_vendor_txt_map() with indent_log(): @@ -114,11 +112,11 @@ def show_tags(options: Values) -> None: # Display the target options that were explicitly provided. formatted_target = target_python.format_given() - suffix = '' + suffix = "" if formatted_target: - suffix = f' (target: {formatted_target})' + suffix = f" (target: {formatted_target})" - msg = 'Compatible tags: {}{}'.format(len(tags), suffix) + msg = "Compatible tags: {}{}".format(len(tags), suffix) logger.info(msg) if options.verbose < 1 and len(tags) > tag_limit: @@ -133,8 +131,7 @@ def show_tags(options: Values) -> None: if tags_limited: msg = ( - '...\n' - '[First {tag_limit} tags shown. Pass --verbose to show all.]' + "...\n[First {tag_limit} tags shown. Pass --verbose to show all.]" ).format(tag_limit=tag_limit) logger.info(msg) @@ -142,20 +139,20 @@ def show_tags(options: Values) -> None: def ca_bundle_info(config: Configuration) -> str: levels = set() for key, _ in config.items(): - levels.add(key.split('.')[0]) + levels.add(key.split(".")[0]) if not levels: return "Not specified" - levels_that_override_global = ['install', 'wheel', 'download'] + levels_that_override_global = ["install", "wheel", "download"] global_overriding_level = [ level for level in levels if level in levels_that_override_global ] if not global_overriding_level: - return 'global' + return "global" - if 'global' in levels: - levels.remove('global') + if "global" in levels: + levels.remove("global") return ", ".join(levels) @@ -180,20 +177,21 @@ def run(self, options: Values, args: List[str]) -> int: "details, since the output and options of this command may " "change without notice." ) - show_value('pip version', get_pip_version()) - show_value('sys.version', sys.version) - show_value('sys.executable', sys.executable) - show_value('sys.getdefaultencoding', sys.getdefaultencoding()) - show_value('sys.getfilesystemencoding', sys.getfilesystemencoding()) + show_value("pip version", get_pip_version()) + show_value("sys.version", sys.version) + show_value("sys.executable", sys.executable) + show_value("sys.getdefaultencoding", sys.getdefaultencoding()) + show_value("sys.getfilesystemencoding", sys.getfilesystemencoding()) show_value( - 'locale.getpreferredencoding', locale.getpreferredencoding(), + "locale.getpreferredencoding", + locale.getpreferredencoding(), ) - show_value('sys.platform', sys.platform) + show_value("sys.platform", sys.platform) show_sys_implementation() show_value("'cert' config value", ca_bundle_info(self.parser.config)) - show_value("REQUESTS_CA_BUNDLE", os.environ.get('REQUESTS_CA_BUNDLE')) - show_value("CURL_CA_BUNDLE", os.environ.get('CURL_CA_BUNDLE')) + show_value("REQUESTS_CA_BUNDLE", os.environ.get("REQUESTS_CA_BUNDLE")) + show_value("CURL_CA_BUNDLE", os.environ.get("CURL_CA_BUNDLE")) show_value("pipenv.patched.notpip._vendor.certifi.where()", where()) show_value("pipenv.patched.notpip._vendor.DEBUNDLED", pipenv.patched.notpip._vendor.DEBUNDLED) diff --git a/pipenv/patched/notpip/_internal/commands/download.py b/pipenv/patched/notpip/_internal/commands/download.py index 1b3c18a04c..2025bc784f 100644 --- a/pipenv/patched/notpip/_internal/commands/download.py +++ b/pipenv/patched/notpip/_internal/commands/download.py @@ -37,7 +37,6 @@ class DownloadCommand(RequirementCommand): def add_options(self) -> None: self.cmd_opts.add_option(cmdoptions.constraints()) self.cmd_opts.add_option(cmdoptions.requirements()) - self.cmd_opts.add_option(cmdoptions.build_dir()) self.cmd_opts.add_option(cmdoptions.no_deps()) self.cmd_opts.add_option(cmdoptions.global_options()) self.cmd_opts.add_option(cmdoptions.no_binary()) @@ -53,11 +52,14 @@ def add_options(self) -> None: self.cmd_opts.add_option(cmdoptions.ignore_requires_python()) self.cmd_opts.add_option( - '-d', '--dest', '--destination-dir', '--destination-directory', - dest='download_dir', - metavar='dir', + "-d", + "--dest", + "--destination-dir", + "--destination-directory", + dest="download_dir", + metavar="dir", default=os.curdir, - help=("Download packages into ."), + help="Download packages into .", ) cmdoptions.add_target_python_options(self.cmd_opts) @@ -111,6 +113,7 @@ def run(self, options: Values, args: List[str]) -> int: finder=finder, download_dir=options.download_dir, use_user_site=False, + verbosity=self.verbosity, ) resolver = self.make_resolver( @@ -123,9 +126,7 @@ def run(self, options: Values, args: List[str]) -> int: self.trace_basic_info(finder) - requirement_set = resolver.resolve( - reqs, check_supported_wheels=True - ) + requirement_set = resolver.resolve(reqs, check_supported_wheels=True) downloaded: List[str] = [] for req in requirement_set.requirements.values(): @@ -134,6 +135,6 @@ def run(self, options: Values, args: List[str]) -> int: preparer.save_linked_requirement(req) downloaded.append(req.name) if downloaded: - write_output('Successfully downloaded %s', ' '.join(downloaded)) + write_output("Successfully downloaded %s", " ".join(downloaded)) return SUCCESS diff --git a/pipenv/patched/notpip/_internal/commands/freeze.py b/pipenv/patched/notpip/_internal/commands/freeze.py index 2344639ff2..90e0afdd2c 100644 --- a/pipenv/patched/notpip/_internal/commands/freeze.py +++ b/pipenv/patched/notpip/_internal/commands/freeze.py @@ -8,7 +8,7 @@ from pipenv.patched.notpip._internal.operations.freeze import freeze from pipenv.patched.notpip._internal.utils.compat import stdlib_pkgs -DEV_PKGS = {'pip', 'setuptools', 'distribute', 'wheel'} +DEV_PKGS = {"pip", "setuptools", "distribute", "wheel"} class FreezeCommand(Command): @@ -24,39 +24,52 @@ class FreezeCommand(Command): def add_options(self) -> None: self.cmd_opts.add_option( - '-r', '--requirement', - dest='requirements', - action='append', + "-r", + "--requirement", + dest="requirements", + action="append", default=[], - metavar='file', - help="Use the order in the given requirements file and its " - "comments when generating output. This option can be " - "used multiple times.") + metavar="file", + help=( + "Use the order in the given requirements file and its " + "comments when generating output. This option can be " + "used multiple times." + ), + ) self.cmd_opts.add_option( - '-l', '--local', - dest='local', - action='store_true', + "-l", + "--local", + dest="local", + action="store_true", default=False, - help='If in a virtualenv that has global access, do not output ' - 'globally-installed packages.') + help=( + "If in a virtualenv that has global access, do not output " + "globally-installed packages." + ), + ) self.cmd_opts.add_option( - '--user', - dest='user', - action='store_true', + "--user", + dest="user", + action="store_true", default=False, - help='Only output packages installed in user-site.') + help="Only output packages installed in user-site.", + ) self.cmd_opts.add_option(cmdoptions.list_path()) self.cmd_opts.add_option( - '--all', - dest='freeze_all', - action='store_true', - help='Do not skip these packages in the output:' - ' {}'.format(', '.join(DEV_PKGS))) + "--all", + dest="freeze_all", + action="store_true", + help=( + "Do not skip these packages in the output:" + " {}".format(", ".join(DEV_PKGS)) + ), + ) self.cmd_opts.add_option( - '--exclude-editable', - dest='exclude_editable', - action='store_true', - help='Exclude editable package from output.') + "--exclude-editable", + dest="exclude_editable", + action="store_true", + help="Exclude editable package from output.", + ) self.cmd_opts.add_option(cmdoptions.list_exclude()) self.parser.insert_option_group(0, self.cmd_opts) @@ -80,5 +93,5 @@ def run(self, options: Values, args: List[str]) -> int: skip=skip, exclude_editable=options.exclude_editable, ): - sys.stdout.write(line + '\n') + sys.stdout.write(line + "\n") return SUCCESS diff --git a/pipenv/patched/notpip/_internal/commands/hash.py b/pipenv/patched/notpip/_internal/commands/hash.py index 3955eb3e81..e763e65543 100644 --- a/pipenv/patched/notpip/_internal/commands/hash.py +++ b/pipenv/patched/notpip/_internal/commands/hash.py @@ -20,18 +20,21 @@ class HashCommand(Command): installs. """ - usage = '%prog [options] ...' + usage = "%prog [options] ..." ignore_require_venv = True def add_options(self) -> None: self.cmd_opts.add_option( - '-a', '--algorithm', - dest='algorithm', + "-a", + "--algorithm", + dest="algorithm", choices=STRONG_HASHES, - action='store', + action="store", default=FAVORITE_HASH, - help='The hash algorithm to use: one of {}'.format( - ', '.join(STRONG_HASHES))) + help="The hash algorithm to use: one of {}".format( + ", ".join(STRONG_HASHES) + ), + ) self.parser.insert_option_group(0, self.cmd_opts) def run(self, options: Values, args: List[str]) -> int: @@ -41,14 +44,15 @@ def run(self, options: Values, args: List[str]) -> int: algorithm = options.algorithm for path in args: - write_output('%s:\n--hash=%s:%s', - path, algorithm, _hash_of_file(path, algorithm)) + write_output( + "%s:\n--hash=%s:%s", path, algorithm, _hash_of_file(path, algorithm) + ) return SUCCESS def _hash_of_file(path: str, algorithm: str) -> str: """Return the hash digest of a file.""" - with open(path, 'rb') as archive: + with open(path, "rb") as archive: hash = hashlib.new(algorithm) for chunk in read_chunks(archive): hash.update(chunk) diff --git a/pipenv/patched/notpip/_internal/commands/help.py b/pipenv/patched/notpip/_internal/commands/help.py index 5b56dc9a02..184126dac5 100644 --- a/pipenv/patched/notpip/_internal/commands/help.py +++ b/pipenv/patched/notpip/_internal/commands/help.py @@ -33,7 +33,7 @@ def run(self, options: Values, args: List[str]) -> int: if guess: msg.append(f'maybe you meant "{guess}"') - raise CommandError(' - '.join(msg)) + raise CommandError(" - ".join(msg)) command = create_command(cmd_name) command.parser.print_help() diff --git a/pipenv/patched/notpip/_internal/commands/index.py b/pipenv/patched/notpip/_internal/commands/index.py index 1674177299..78982fd03e 100644 --- a/pipenv/patched/notpip/_internal/commands/index.py +++ b/pipenv/patched/notpip/_internal/commands/index.py @@ -44,7 +44,7 @@ def add_options(self) -> None: self.parser.insert_option_group(0, index_opts) self.parser.insert_option_group(0, self.cmd_opts) - def run(self, options: Values, args: List[Any]) -> int: + def run(self, options: Values, args: List[str]) -> int: handlers = { "versions": self.get_available_package_versions, } @@ -97,11 +97,12 @@ def _build_package_finder( link_collector=link_collector, selection_prefs=selection_prefs, target_python=target_python, + use_deprecated_html5lib="html5lib" in options.deprecated_features_enabled, ) def get_available_package_versions(self, options: Values, args: List[Any]) -> None: if len(args) != 1: - raise CommandError('You need to specify exactly one argument') + raise CommandError("You need to specify exactly one argument") target_python = cmdoptions.make_target_python(options) query = args[0] @@ -115,25 +116,24 @@ def get_available_package_versions(self, options: Values, args: List[Any]) -> No ) versions: Iterable[Union[LegacyVersion, Version]] = ( - candidate.version - for candidate in finder.find_all_candidates(query) + candidate.version for candidate in finder.find_all_candidates(query) ) if not options.pre: # Remove prereleases - versions = (version for version in versions - if not version.is_prerelease) + versions = ( + version for version in versions if not version.is_prerelease + ) versions = set(versions) if not versions: raise DistributionNotFound( - 'No matching distribution found for {}'.format(query)) + "No matching distribution found for {}".format(query) + ) - formatted_versions = [str(ver) for ver in sorted( - versions, reverse=True)] + formatted_versions = [str(ver) for ver in sorted(versions, reverse=True)] latest = formatted_versions[0] - write_output('{} ({})'.format(query, latest)) - write_output('Available versions: {}'.format( - ', '.join(formatted_versions))) + write_output("{} ({})".format(query, latest)) + write_output("Available versions: {}".format(", ".join(formatted_versions))) print_dist_installation_info(query, latest) diff --git a/pipenv/patched/notpip/_internal/commands/install.py b/pipenv/patched/notpip/_internal/commands/install.py index d4e2296af2..ec8a695a80 100644 --- a/pipenv/patched/notpip/_internal/commands/install.py +++ b/pipenv/patched/notpip/_internal/commands/install.py @@ -86,87 +86,103 @@ def add_options(self) -> None: self.cmd_opts.add_option(cmdoptions.editable()) self.cmd_opts.add_option( - '-t', '--target', - dest='target_dir', - metavar='dir', + "-t", + "--target", + dest="target_dir", + metavar="dir", default=None, - help='Install packages into . ' - 'By default this will not replace existing files/folders in ' - '. Use --upgrade to replace existing packages in ' - 'with new versions.' + help=( + "Install packages into . " + "By default this will not replace existing files/folders in " + ". Use --upgrade to replace existing packages in " + "with new versions." + ), ) cmdoptions.add_target_python_options(self.cmd_opts) self.cmd_opts.add_option( - '--user', - dest='use_user_site', - action='store_true', - help="Install to the Python user install directory for your " - "platform. Typically ~/.local/, or %APPDATA%\\Python on " - "Windows. (See the Python documentation for site.USER_BASE " - "for full details.)") + "--user", + dest="use_user_site", + action="store_true", + help=( + "Install to the Python user install directory for your " + "platform. Typically ~/.local/, or %APPDATA%\\Python on " + "Windows. (See the Python documentation for site.USER_BASE " + "for full details.)" + ), + ) self.cmd_opts.add_option( - '--no-user', - dest='use_user_site', - action='store_false', - help=SUPPRESS_HELP) + "--no-user", + dest="use_user_site", + action="store_false", + help=SUPPRESS_HELP, + ) self.cmd_opts.add_option( - '--root', - dest='root_path', - metavar='dir', + "--root", + dest="root_path", + metavar="dir", default=None, - help="Install everything relative to this alternate root " - "directory.") + help="Install everything relative to this alternate root directory.", + ) self.cmd_opts.add_option( - '--prefix', - dest='prefix_path', - metavar='dir', + "--prefix", + dest="prefix_path", + metavar="dir", default=None, - help="Installation prefix where lib, bin and other top-level " - "folders are placed") - - self.cmd_opts.add_option(cmdoptions.build_dir()) + help=( + "Installation prefix where lib, bin and other top-level " + "folders are placed" + ), + ) self.cmd_opts.add_option(cmdoptions.src()) self.cmd_opts.add_option( - '-U', '--upgrade', - dest='upgrade', - action='store_true', - help='Upgrade all specified packages to the newest available ' - 'version. The handling of dependencies depends on the ' - 'upgrade-strategy used.' + "-U", + "--upgrade", + dest="upgrade", + action="store_true", + help=( + "Upgrade all specified packages to the newest available " + "version. The handling of dependencies depends on the " + "upgrade-strategy used." + ), ) self.cmd_opts.add_option( - '--upgrade-strategy', - dest='upgrade_strategy', - default='only-if-needed', - choices=['only-if-needed', 'eager'], - help='Determines how dependency upgrading should be handled ' - '[default: %default]. ' - '"eager" - dependencies are upgraded regardless of ' - 'whether the currently installed version satisfies the ' - 'requirements of the upgraded package(s). ' - '"only-if-needed" - are upgraded only when they do not ' - 'satisfy the requirements of the upgraded package(s).' + "--upgrade-strategy", + dest="upgrade_strategy", + default="only-if-needed", + choices=["only-if-needed", "eager"], + help=( + "Determines how dependency upgrading should be handled " + "[default: %default]. " + '"eager" - dependencies are upgraded regardless of ' + "whether the currently installed version satisfies the " + "requirements of the upgraded package(s). " + '"only-if-needed" - are upgraded only when they do not ' + "satisfy the requirements of the upgraded package(s)." + ), ) self.cmd_opts.add_option( - '--force-reinstall', - dest='force_reinstall', - action='store_true', - help='Reinstall all packages even if they are already ' - 'up-to-date.') + "--force-reinstall", + dest="force_reinstall", + action="store_true", + help="Reinstall all packages even if they are already up-to-date.", + ) self.cmd_opts.add_option( - '-I', '--ignore-installed', - dest='ignore_installed', - action='store_true', - help='Ignore the installed packages, overwriting them. ' - 'This can break your system if the existing package ' - 'is of a different version or was installed ' - 'with a different package manager!' + "-I", + "--ignore-installed", + dest="ignore_installed", + action="store_true", + help=( + "Ignore the installed packages, overwriting them. " + "This can break your system if the existing package " + "is of a different version or was installed " + "with a different package manager!" + ), ) self.cmd_opts.add_option(cmdoptions.ignore_requires_python()) @@ -249,11 +265,14 @@ def run(self, options: Values, args: List[str]) -> int: if options.target_dir: options.ignore_installed = True options.target_dir = os.path.abspath(options.target_dir) - if (os.path.exists(options.target_dir) and not - os.path.isdir(options.target_dir)): + if ( + # fmt: off + os.path.exists(options.target_dir) and + not os.path.isdir(options.target_dir) + # fmt: on + ): raise CommandError( - "Target path exists but is not a directory, will not " - "continue." + "Target path exists but is not a directory, will not continue." ) # Create a target directory for using with the target option @@ -285,9 +304,13 @@ def run(self, options: Values, args: List[str]) -> int: try: reqs = self.get_requirements(args, options, finder, session) - reject_location_related_install_options( - reqs, options.install_options - ) + # Only when installing is it permitted to use PEP 660. + # In other circumstances (pip wheel, pip download) we generate + # regular (i.e. non editable) metadata and wheels. + for req in reqs: + req.permit_editable_wheels = True + + reject_location_related_install_options(reqs, options.install_options) preparer = self.make_requirement_preparer( temp_build_dir=directory, @@ -296,6 +319,7 @@ def run(self, options: Values, args: List[str]) -> int: session=session, finder=finder, use_user_site=options.use_user_site, + verbosity=self.verbosity, ) resolver = self.make_resolver( preparer=preparer, @@ -324,19 +348,14 @@ def run(self, options: Values, args: List[str]) -> int: # If we're not replacing an already installed pip, # we're not modifying it. modifying_pip = pip_req.satisfied_by is None - protect_pip_from_modification_on_windows( - modifying_pip=modifying_pip - ) + protect_pip_from_modification_on_windows(modifying_pip=modifying_pip) - check_binary_allowed = get_check_binary_allowed( - finder.format_control - ) + check_binary_allowed = get_check_binary_allowed(finder.format_control) reqs_to_build = [ - r for r in requirement_set.requirements.values() - if should_build_for_install_command( - r, check_binary_allowed - ) + r + for r in requirement_set.requirements.values() + if should_build_for_install_command(r, check_binary_allowed) ] _, build_failures = build( @@ -347,36 +366,32 @@ def run(self, options: Values, args: List[str]) -> int: global_options=[], ) - # If we're using PEP 517, we cannot do a direct install + # If we're using PEP 517, we cannot do a legacy setup.py install # so we fail here. pep517_build_failure_names: List[str] = [ - r.name # type: ignore - for r in build_failures if r.use_pep517 + r.name for r in build_failures if r.use_pep517 # type: ignore ] if pep517_build_failure_names: raise InstallationError( - "Could not build wheels for {} which use" - " PEP 517 and cannot be installed directly".format( + "Could not build wheels for {}, which is required to " + "install pyproject.toml-based projects".format( ", ".join(pep517_build_failure_names) ) ) # For now, we just warn about failures building legacy - # requirements, as we'll fall through to a direct - # install for those. + # requirements, as we'll fall through to a setup.py install for + # those. for r in build_failures: if not r.use_pep517: r.legacy_install_reason = 8368 - to_install = resolver.get_installation_order( - requirement_set - ) + to_install = resolver.get_installation_order(requirement_set) # Check for conflicts in the package set we're installing. conflicts: Optional[ConflictDetails] = None should_warn_about_conflicts = ( - not options.ignore_dependencies and - options.warn_about_conflicts + not options.ignore_dependencies and options.warn_about_conflicts ) if should_warn_about_conflicts: conflicts = self._determine_conflicts(to_install) @@ -408,7 +423,7 @@ def run(self, options: Values, args: List[str]) -> int: ) env = get_environment(lib_locations) - installed.sort(key=operator.attrgetter('name')) + installed.sort(key=operator.attrgetter("name")) items = [] for result in installed: item = result.name @@ -426,16 +441,19 @@ def run(self, options: Values, args: List[str]) -> int: resolver_variant=self.determine_resolver_variant(options), ) - installed_desc = ' '.join(items) + installed_desc = " ".join(items) if installed_desc: write_output( - 'Successfully installed %s', installed_desc, + "Successfully installed %s", + installed_desc, ) except OSError as error: - show_traceback = (self.verbosity >= 1) + show_traceback = self.verbosity >= 1 message = create_os_error_message( - error, show_traceback, options.use_user_site, + error, + show_traceback, + options.use_user_site, ) logger.error(message, exc_info=show_traceback) # noqa @@ -461,7 +479,7 @@ def _handle_target_dir( # Checking both purelib and platlib directories for installed # packages to be moved to target directory - scheme = get_scheme('', home=target_temp_dir.path) + scheme = get_scheme("", home=target_temp_dir.path) purelib_dir = scheme.purelib platlib_dir = scheme.platlib data_dir = scheme.data @@ -483,18 +501,18 @@ def _handle_target_dir( if os.path.exists(target_item_dir): if not upgrade: logger.warning( - 'Target directory %s already exists. Specify ' - '--upgrade to force replacement.', - target_item_dir + "Target directory %s already exists. Specify " + "--upgrade to force replacement.", + target_item_dir, ) continue if os.path.islink(target_item_dir): logger.warning( - 'Target directory %s already exists and is ' - 'a link. pip will not automatically replace ' - 'links, please remove if replacement is ' - 'desired.', - target_item_dir + "Target directory %s already exists and is " + "a link. pip will not automatically replace " + "links, please remove if replacement is " + "desired.", + target_item_dir, ) continue if os.path.isdir(target_item_dir): @@ -502,10 +520,7 @@ def _handle_target_dir( else: os.remove(target_item_dir) - shutil.move( - os.path.join(lib_dir, item), - target_item_dir - ) + shutil.move(os.path.join(lib_dir, item), target_item_dir) def _determine_conflicts( self, to_install: List[InstallRequirement] @@ -567,7 +582,7 @@ def _warn_about_conflicts( requirement=req, dep_name=dep_name, dep_version=dep_version, - you=("you" if resolver_variant == "2020-resolver" else "you'll") + you=("you" if resolver_variant == "2020-resolver" else "you'll"), ) parts.append(message) @@ -575,14 +590,14 @@ def _warn_about_conflicts( def get_lib_location_guesses( - user: bool = False, - home: Optional[str] = None, - root: Optional[str] = None, - isolated: bool = False, - prefix: Optional[str] = None + user: bool = False, + home: Optional[str] = None, + root: Optional[str] = None, + isolated: bool = False, + prefix: Optional[str] = None, ) -> List[str]: scheme = get_scheme( - '', + "", user=user, home=home, root=root, @@ -594,8 +609,8 @@ def get_lib_location_guesses( def site_packages_writable(root: Optional[str], isolated: bool) -> bool: return all( - test_writable_dir(d) for d in set( - get_lib_location_guesses(root=root, isolated=isolated)) + test_writable_dir(d) + for d in set(get_lib_location_guesses(root=root, isolated=isolated)) ) @@ -653,8 +668,10 @@ def decide_user_install( logger.debug("Non-user install because site-packages writeable") return False - logger.info("Defaulting to user installation because normal site-packages " - "is not writeable") + logger.info( + "Defaulting to user installation because normal site-packages " + "is not writeable" + ) return True @@ -664,6 +681,7 @@ def reject_location_related_install_options( """If any location-changing --install-option arguments were passed for requirements or on the command-line, then show a deprecation warning. """ + def format_options(option_names: Iterable[str]) -> List[str]: return ["--{}".format(name.replace("_", "-")) for name in option_names] @@ -683,9 +701,7 @@ def format_options(option_names: Iterable[str]) -> List[str]: location_options = parse_distutils_args(options) if location_options: offenders.append( - "{!r} from command line".format( - format_options(location_options.keys()) - ) + "{!r} from command line".format(format_options(location_options.keys())) ) if not offenders: @@ -694,9 +710,7 @@ def format_options(option_names: Iterable[str]) -> List[str]: raise CommandError( "Location-changing options found in --install-option: {}." " This is unsupported, use pip-level options like --user," - " --prefix, --root, and --target instead.".format( - "; ".join(offenders) - ) + " --prefix, --root, and --target instead.".format("; ".join(offenders)) ) @@ -727,18 +741,25 @@ def create_os_error_message( permissions_part = "Check the permissions" if not running_under_virtualenv() and not using_user_site: - parts.extend([ - user_option_part, " or ", - permissions_part.lower(), - ]) + parts.extend( + [ + user_option_part, + " or ", + permissions_part.lower(), + ] + ) else: parts.append(permissions_part) parts.append(".\n") # Suggest the user to enable Long Paths if path length is # more than 260 - if (WINDOWS and error.errno == errno.ENOENT and error.filename and - len(error.filename) > 260): + if ( + WINDOWS + and error.errno == errno.ENOENT + and error.filename + and len(error.filename) > 260 + ): parts.append( "HINT: This error might have occurred since " "this system does not have Windows Long Path " diff --git a/pipenv/patched/notpip/_internal/commands/list.py b/pipenv/patched/notpip/_internal/commands/list.py index 61d5ad6d72..6ec466bd19 100644 --- a/pipenv/patched/notpip/_internal/commands/list.py +++ b/pipenv/patched/notpip/_internal/commands/list.py @@ -14,8 +14,8 @@ from pipenv.patched.notpip._internal.metadata import BaseDistribution, get_environment from pipenv.patched.notpip._internal.models.selection_prefs import SelectionPreferences from pipenv.patched.notpip._internal.network.session import PipSession -from pipenv.patched.notpip._internal.utils.misc import stdlib_pkgs, tabulate, write_output -from pipenv.patched.notpip._internal.utils.parallel import map_multithread +from pipenv.patched.notpip._internal.utils.compat import stdlib_pkgs +from pipenv.patched.notpip._internal.utils.misc import tabulate, write_output if TYPE_CHECKING: from pipenv.patched.notpip._internal.metadata.base import DistributionVersion @@ -26,6 +26,7 @@ class _DistWithLatestInfo(BaseDistribution): These will be populated during ``get_outdated()``. This is dirty but makes the rest of the code much cleaner. """ + latest_version: DistributionVersion latest_filetype: str @@ -48,77 +49,85 @@ class ListCommand(IndexGroupCommand): def add_options(self) -> None: self.cmd_opts.add_option( - '-o', '--outdated', - action='store_true', + "-o", + "--outdated", + action="store_true", default=False, - help='List outdated packages') + help="List outdated packages", + ) self.cmd_opts.add_option( - '-u', '--uptodate', - action='store_true', + "-u", + "--uptodate", + action="store_true", default=False, - help='List uptodate packages') + help="List uptodate packages", + ) self.cmd_opts.add_option( - '-e', '--editable', - action='store_true', + "-e", + "--editable", + action="store_true", default=False, - help='List editable projects.') + help="List editable projects.", + ) self.cmd_opts.add_option( - '-l', '--local', - action='store_true', + "-l", + "--local", + action="store_true", default=False, - help=('If in a virtualenv that has global access, do not list ' - 'globally-installed packages.'), + help=( + "If in a virtualenv that has global access, do not list " + "globally-installed packages." + ), ) self.cmd_opts.add_option( - '--user', - dest='user', - action='store_true', + "--user", + dest="user", + action="store_true", default=False, - help='Only output packages installed in user-site.') + help="Only output packages installed in user-site.", + ) self.cmd_opts.add_option(cmdoptions.list_path()) self.cmd_opts.add_option( - '--pre', - action='store_true', + "--pre", + action="store_true", default=False, - help=("Include pre-release and development versions. By default, " - "pip only finds stable versions."), + help=( + "Include pre-release and development versions. By default, " + "pip only finds stable versions." + ), ) self.cmd_opts.add_option( - '--format', - action='store', - dest='list_format', + "--format", + action="store", + dest="list_format", default="columns", - choices=('columns', 'freeze', 'json'), - help="Select the output format among: columns (default), freeze, " - "or json", + choices=("columns", "freeze", "json"), + help="Select the output format among: columns (default), freeze, or json", ) self.cmd_opts.add_option( - '--not-required', - action='store_true', - dest='not_required', - help="List packages that are not dependencies of " - "installed packages.", + "--not-required", + action="store_true", + dest="not_required", + help="List packages that are not dependencies of installed packages.", ) self.cmd_opts.add_option( - '--exclude-editable', - action='store_false', - dest='include_editable', - help='Exclude editable package from output.', + "--exclude-editable", + action="store_false", + dest="include_editable", + help="Exclude editable package from output.", ) self.cmd_opts.add_option( - '--include-editable', - action='store_true', - dest='include_editable', - help='Include editable package from output.', + "--include-editable", + action="store_true", + dest="include_editable", + help="Include editable package from output.", default=True, ) self.cmd_opts.add_option(cmdoptions.list_exclude()) - index_opts = cmdoptions.make_option_group( - cmdoptions.index_group, self.parser - ) + index_opts = cmdoptions.make_option_group(cmdoptions.index_group, self.parser) self.parser.insert_option_group(0, index_opts) self.parser.insert_option_group(0, self.cmd_opts) @@ -140,12 +149,12 @@ def _build_package_finder( return PackageFinder.create( link_collector=link_collector, selection_prefs=selection_prefs, + use_deprecated_html5lib="html5lib" in options.deprecated_features_enabled, ) def run(self, options: Values, args: List[str]) -> int: if options.outdated and options.uptodate: - raise CommandError( - "Options --outdated and --uptodate cannot be combined.") + raise CommandError("Options --outdated and --uptodate cannot be combined.") cmdoptions.check_list_path_option(options) @@ -183,7 +192,8 @@ def get_outdated( self, packages: "_ProcessedDists", options: Values ) -> "_ProcessedDists": return [ - dist for dist in self.iter_packages_latest_infos(packages, options) + dist + for dist in self.iter_packages_latest_infos(packages, options) if dist.latest_version > dist.version ] @@ -191,7 +201,8 @@ def get_uptodate( self, packages: "_ProcessedDists", options: Values ) -> "_ProcessedDists": return [ - dist for dist in self.iter_packages_latest_infos(packages, options) + dist + for dist in self.iter_packages_latest_infos(packages, options) if dist.latest_version == dist.version ] @@ -216,13 +227,16 @@ def iter_packages_latest_infos( finder = self._build_package_finder(options, session) def latest_info( - dist: "_DistWithLatestInfo" + dist: "_DistWithLatestInfo", ) -> Optional["_DistWithLatestInfo"]: all_candidates = finder.find_all_candidates(dist.canonical_name) if not options.pre: # Remove prereleases - all_candidates = [candidate for candidate in all_candidates - if not candidate.version.is_prerelease] + all_candidates = [ + candidate + for candidate in all_candidates + if not candidate.version.is_prerelease + ] evaluator = finder.make_candidate_evaluator( project_name=dist.canonical_name, @@ -233,14 +247,14 @@ def latest_info( remote_version = best_candidate.version if best_candidate.link.is_wheel: - typ = 'wheel' + typ = "wheel" else: - typ = 'sdist' + typ = "sdist" dist.latest_version = remote_version dist.latest_filetype = typ return dist - for dist in map_multithread(latest_info, packages): + for dist in map(latest_info, packages): if dist is not None: yield dist @@ -251,17 +265,18 @@ def output_package_listing( packages, key=lambda dist: dist.canonical_name, ) - if options.list_format == 'columns' and packages: + if options.list_format == "columns" and packages: data, header = format_for_columns(packages, options) self.output_package_listing_columns(data, header) - elif options.list_format == 'freeze': + elif options.list_format == "freeze": for dist in packages: if options.verbose >= 1: - write_output("%s==%s (%s)", dist.raw_name, - dist.version, dist.location) + write_output( + "%s==%s (%s)", dist.raw_name, dist.version, dist.location + ) else: write_output("%s==%s", dist.raw_name, dist.version) - elif options.list_format == 'json': + elif options.list_format == "json": write_output(format_for_json(packages, options)) def output_package_listing_columns( @@ -275,7 +290,7 @@ def output_package_listing_columns( # Create and add a separator. if len(data) > 0: - pkg_strings.insert(1, " ".join(map(lambda x: '-' * x, sizes))) + pkg_strings.insert(1, " ".join(map(lambda x: "-" * x, sizes))) for val in pkg_strings: write_output(val) @@ -288,19 +303,22 @@ def format_for_columns( Convert the package data into something usable by output_package_listing_columns. """ + header = ["Package", "Version"] + running_outdated = options.outdated - # Adjust the header for the `pip list --outdated` case. if running_outdated: - header = ["Package", "Version", "Latest", "Type"] - else: - header = ["Package", "Version"] + header.extend(["Latest", "Type"]) - data = [] - if options.verbose >= 1 or any(x.editable for x in pkgs): + has_editables = any(x.editable for x in pkgs) + if has_editables: + header.append("Editable project location") + + if options.verbose >= 1: header.append("Location") if options.verbose >= 1: header.append("Installer") + data = [] for proj in pkgs: # if we're working on the 'outdated' list, separate out the # latest_version and type @@ -310,7 +328,10 @@ def format_for_columns( row.append(str(proj.latest_version)) row.append(proj.latest_filetype) - if options.verbose >= 1 or proj.editable: + if has_editables: + row.append(proj.editable_project_location or "") + + if options.verbose >= 1: row.append(proj.location or "") if options.verbose >= 1: row.append(proj.installer) @@ -324,14 +345,17 @@ def format_for_json(packages: "_ProcessedDists", options: Values) -> str: data = [] for dist in packages: info = { - 'name': dist.raw_name, - 'version': str(dist.version), + "name": dist.raw_name, + "version": str(dist.version), } if options.verbose >= 1: - info['location'] = dist.location or "" - info['installer'] = dist.installer + info["location"] = dist.location or "" + info["installer"] = dist.installer if options.outdated: - info['latest_version'] = str(dist.latest_version) - info['latest_filetype'] = dist.latest_filetype + info["latest_version"] = str(dist.latest_version) + info["latest_filetype"] = dist.latest_filetype + editable_project_location = dist.editable_project_location + if editable_project_location: + info["editable_project_location"] = editable_project_location data.append(info) return json.dumps(data) diff --git a/pipenv/patched/notpip/_internal/commands/search.py b/pipenv/patched/notpip/_internal/commands/search.py index 29a0c57ec5..0b5ed0d3fd 100644 --- a/pipenv/patched/notpip/_internal/commands/search.py +++ b/pipenv/patched/notpip/_internal/commands/search.py @@ -27,6 +27,7 @@ class TransformedHit(TypedDict): summary: str versions: List[str] + logger = logging.getLogger(__name__) @@ -39,17 +40,19 @@ class SearchCommand(Command, SessionCommandMixin): def add_options(self) -> None: self.cmd_opts.add_option( - '-i', '--index', - dest='index', - metavar='URL', + "-i", + "--index", + dest="index", + metavar="URL", default=PyPI.pypi_url, - help='Base URL of Python Package Index (default %default)') + help="Base URL of Python Package Index (default %default)", + ) self.parser.insert_option_group(0, self.cmd_opts) def run(self, options: Values, args: List[str]) -> int: if not args: - raise CommandError('Missing required argument (search query).') + raise CommandError("Missing required argument (search query).") query = args pypi_hits = self.search(query, options) hits = transform_hits(pypi_hits) @@ -71,7 +74,7 @@ def search(self, query: List[str], options: Values) -> List[Dict[str, str]]: transport = PipXmlrpcTransport(index_url, session) pypi = xmlrpc.client.ServerProxy(index_url, transport) try: - hits = pypi.search({'name': query, 'summary': query}, 'or') + hits = pypi.search({"name": query, "summary": query}, "or") except xmlrpc.client.Fault as fault: message = "XMLRPC request failed [code: {code}]\n{string}".format( code=fault.faultCode, @@ -90,22 +93,22 @@ def transform_hits(hits: List[Dict[str, str]]) -> List["TransformedHit"]: """ packages: Dict[str, "TransformedHit"] = OrderedDict() for hit in hits: - name = hit['name'] - summary = hit['summary'] - version = hit['version'] + name = hit["name"] + summary = hit["summary"] + version = hit["version"] if name not in packages.keys(): packages[name] = { - 'name': name, - 'summary': summary, - 'versions': [version], + "name": name, + "summary": summary, + "versions": [version], } else: - packages[name]['versions'].append(version) + packages[name]["versions"].append(version) # if this is the highest version, replace summary and score - if version == highest_version(packages[name]['versions']): - packages[name]['summary'] = summary + if version == highest_version(packages[name]["versions"]): + packages[name]["summary"] = summary return list(packages.values()) @@ -116,14 +119,17 @@ def print_dist_installation_info(name: str, latest: str) -> None: if dist is not None: with indent_log(): if dist.version == latest: - write_output('INSTALLED: %s (latest)', dist.version) + write_output("INSTALLED: %s (latest)", dist.version) else: - write_output('INSTALLED: %s', dist.version) + write_output("INSTALLED: %s", dist.version) if parse_version(latest).pre: - write_output('LATEST: %s (pre-release; install' - ' with "pip install --pre")', latest) + write_output( + "LATEST: %s (pre-release; install" + " with `pip install --pre`)", + latest, + ) else: - write_output('LATEST: %s', latest) + write_output("LATEST: %s", latest) def print_results( @@ -134,25 +140,29 @@ def print_results( if not hits: return if name_column_width is None: - name_column_width = max([ - len(hit['name']) + len(highest_version(hit.get('versions', ['-']))) - for hit in hits - ]) + 4 + name_column_width = ( + max( + [ + len(hit["name"]) + len(highest_version(hit.get("versions", ["-"]))) + for hit in hits + ] + ) + + 4 + ) for hit in hits: - name = hit['name'] - summary = hit['summary'] or '' - latest = highest_version(hit.get('versions', ['-'])) + name = hit["name"] + summary = hit["summary"] or "" + latest = highest_version(hit.get("versions", ["-"])) if terminal_width is not None: target_width = terminal_width - name_column_width - 5 if target_width > 10: # wrap and indent summary to fit terminal summary_lines = textwrap.wrap(summary, target_width) - summary = ('\n' + ' ' * (name_column_width + 3)).join( - summary_lines) + summary = ("\n" + " " * (name_column_width + 3)).join(summary_lines) - name_latest = f'{name} ({latest})' - line = f'{name_latest:{name_column_width}} - {summary}' + name_latest = f"{name} ({latest})" + line = f"{name_latest:{name_column_width}} - {summary}" try: write_output(line) print_dist_installation_info(name, latest) diff --git a/pipenv/patched/notpip/_internal/commands/show.py b/pipenv/patched/notpip/_internal/commands/show.py index e80677ba6b..ed99966e2f 100644 --- a/pipenv/patched/notpip/_internal/commands/show.py +++ b/pipenv/patched/notpip/_internal/commands/show.py @@ -1,8 +1,6 @@ -import csv import logging -import pathlib from optparse import Values -from typing import Iterator, List, NamedTuple, Optional, Tuple +from typing import Iterator, List, NamedTuple, Optional from pipenv.patched.notpip._vendor.packaging.utils import canonicalize_name @@ -27,23 +25,26 @@ class ShowCommand(Command): def add_options(self) -> None: self.cmd_opts.add_option( - '-f', '--files', - dest='files', - action='store_true', + "-f", + "--files", + dest="files", + action="store_true", default=False, - help='Show the full list of installed files for each package.') + help="Show the full list of installed files for each package.", + ) self.parser.insert_option_group(0, self.cmd_opts) def run(self, options: Values, args: List[str]) -> int: if not args: - logger.warning('ERROR: Please provide a package name or names.') + logger.warning("ERROR: Please provide a package name or names.") return ERROR query = args results = search_packages_info(query) if not print_results( - results, list_files=options.files, verbose=options.verbose): + results, list_files=options.files, verbose=options.verbose + ): return ERROR return SUCCESS @@ -66,33 +67,6 @@ class _PackageInfo(NamedTuple): files: Optional[List[str]] -def _covert_legacy_entry(entry: Tuple[str, ...], info: Tuple[str, ...]) -> str: - """Convert a legacy installed-files.txt path into modern RECORD path. - - The legacy format stores paths relative to the info directory, while the - modern format stores paths relative to the package root, e.g. the - site-packages directory. - - :param entry: Path parts of the installed-files.txt entry. - :param info: Path parts of the egg-info directory relative to package root. - :returns: The converted entry. - - For best compatibility with symlinks, this does not use ``abspath()`` or - ``Path.resolve()``, but tries to work with path parts: - - 1. While ``entry`` starts with ``..``, remove the equal amounts of parts - from ``info``; if ``info`` is empty, start appending ``..`` instead. - 2. Join the two directly. - """ - while entry and entry[0] == "..": - if not info or info[-1] == "..": - info += ("..",) - else: - info = info[:-1] - entry = entry[1:] - return str(pathlib.Path(*info, *entry)) - - def search_packages_info(query: List[str]) -> Iterator[_PackageInfo]: """ Gather details from installed distributions. Print distribution name, @@ -102,53 +76,20 @@ def search_packages_info(query: List[str]) -> Iterator[_PackageInfo]: """ env = get_default_environment() - installed = { - dist.canonical_name: dist - for dist in env.iter_distributions() - } + installed = {dist.canonical_name: dist for dist in env.iter_distributions()} query_names = [canonicalize_name(name) for name in query] missing = sorted( [name for name, pkg in zip(query, query_names) if pkg not in installed] ) if missing: - logger.warning('Package(s) not found: %s', ', '.join(missing)) + logger.warning("Package(s) not found: %s", ", ".join(missing)) - def _get_requiring_packages(current_dist: BaseDistribution) -> List[str]: - return [ + def _get_requiring_packages(current_dist: BaseDistribution) -> Iterator[str]: + return ( dist.metadata["Name"] or "UNKNOWN" for dist in installed.values() - if current_dist.canonical_name in { - canonicalize_name(d.name) for d in dist.iter_dependencies() - } - ] - - def _files_from_record(dist: BaseDistribution) -> Optional[Iterator[str]]: - try: - text = dist.read_text('RECORD') - except FileNotFoundError: - return None - # This extra Path-str cast normalizes entries. - return (str(pathlib.Path(row[0])) for row in csv.reader(text.splitlines())) - - def _files_from_legacy(dist: BaseDistribution) -> Optional[Iterator[str]]: - try: - text = dist.read_text('installed-files.txt') - except FileNotFoundError: - return None - paths = (p for p in text.splitlines(keepends=False) if p) - root = dist.location - info = dist.info_directory - if root is None or info is None: - return paths - try: - info_rel = pathlib.Path(info).relative_to(root) - except ValueError: # info is not relative to root. - return paths - if not info_rel.parts: # info *is* root. - return paths - return ( - _covert_legacy_entry(pathlib.Path(p).parts, info_rel.parts) - for p in paths + if current_dist.canonical_name + in {canonicalize_name(d.name) for d in dist.iter_dependencies()} ) for query_name in query_names: @@ -157,13 +98,16 @@ def _files_from_legacy(dist: BaseDistribution) -> Optional[Iterator[str]]: except KeyError: continue + requires = sorted((req.name for req in dist.iter_dependencies()), key=str.lower) + required_by = sorted(_get_requiring_packages(dist), key=str.lower) + try: - entry_points_text = dist.read_text('entry_points.txt') + entry_points_text = dist.read_text("entry_points.txt") entry_points = entry_points_text.splitlines(keepends=False) except FileNotFoundError: entry_points = [] - files_iter = _files_from_record(dist) or _files_from_legacy(dist) + files_iter = dist.iter_declared_entries() if files_iter is None: files: Optional[List[str]] = None else: @@ -175,8 +119,8 @@ def _files_from_legacy(dist: BaseDistribution) -> Optional[Iterator[str]]: name=dist.raw_name, version=str(dist.version), location=dist.location or "", - requires=[req.name for req in dist.iter_dependencies()], - required_by=_get_requiring_packages(dist), + requires=requires, + required_by=required_by, installer=dist.installer, metadata_version=dist.metadata_version or "", classifiers=metadata.get_all("Classifier", []), @@ -212,8 +156,8 @@ def print_results( write_output("Author-email: %s", dist.author_email) write_output("License: %s", dist.license) write_output("Location: %s", dist.location) - write_output("Requires: %s", ', '.join(dist.requires)) - write_output("Required-by: %s", ', '.join(dist.required_by)) + write_output("Requires: %s", ", ".join(dist.requires)) + write_output("Required-by: %s", ", ".join(dist.required_by)) if verbose: write_output("Metadata-Version: %s", dist.metadata_version) diff --git a/pipenv/patched/notpip/_internal/commands/uninstall.py b/pipenv/patched/notpip/_internal/commands/uninstall.py index ebc380c31a..2b67216fcd 100644 --- a/pipenv/patched/notpip/_internal/commands/uninstall.py +++ b/pipenv/patched/notpip/_internal/commands/uninstall.py @@ -35,19 +35,24 @@ class UninstallCommand(Command, SessionCommandMixin): def add_options(self) -> None: self.cmd_opts.add_option( - '-r', '--requirement', - dest='requirements', - action='append', + "-r", + "--requirement", + dest="requirements", + action="append", default=[], - metavar='file', - help='Uninstall all the packages listed in the given requirements ' - 'file. This option can be used multiple times.', + metavar="file", + help=( + "Uninstall all the packages listed in the given requirements " + "file. This option can be used multiple times." + ), ) self.cmd_opts.add_option( - '-y', '--yes', - dest='yes', - action='store_true', - help="Don't ask for confirmation of uninstall deletions.") + "-y", + "--yes", + dest="yes", + action="store_true", + help="Don't ask for confirmation of uninstall deletions.", + ) self.parser.insert_option_group(0, self.cmd_opts) @@ -57,7 +62,8 @@ def run(self, options: Values, args: List[str]) -> int: reqs_to_uninstall = {} for name in args: req = install_req_from_line( - name, isolated=options.isolated_mode, + name, + isolated=options.isolated_mode, ) if req.name: reqs_to_uninstall[canonicalize_name(req.name)] = req @@ -70,18 +76,16 @@ def run(self, options: Values, args: List[str]) -> int: ) for filename in options.requirements: for parsed_req in parse_requirements( - filename, - options=options, - session=session): + filename, options=options, session=session + ): req = install_req_from_parsed_requirement( - parsed_req, - isolated=options.isolated_mode + parsed_req, isolated=options.isolated_mode ) if req.name: reqs_to_uninstall[canonicalize_name(req.name)] = req if not reqs_to_uninstall: raise InstallationError( - f'You must give at least one requirement to {self.name} (see ' + f"You must give at least one requirement to {self.name} (see " f'"pip help {self.name}")' ) @@ -91,7 +95,8 @@ def run(self, options: Values, args: List[str]) -> int: for req in reqs_to_uninstall.values(): uninstall_pathset = req.uninstall( - auto_confirm=options.yes, verbose=self.verbosity > 0, + auto_confirm=options.yes, + verbose=self.verbosity > 0, ) if uninstall_pathset: uninstall_pathset.commit() diff --git a/pipenv/patched/notpip/_internal/commands/wheel.py b/pipenv/patched/notpip/_internal/commands/wheel.py index ffb3d700c9..3962a371ce 100644 --- a/pipenv/patched/notpip/_internal/commands/wheel.py +++ b/pipenv/patched/notpip/_internal/commands/wheel.py @@ -43,12 +43,15 @@ class WheelCommand(RequirementCommand): def add_options(self) -> None: self.cmd_opts.add_option( - '-w', '--wheel-dir', - dest='wheel_dir', - metavar='dir', + "-w", + "--wheel-dir", + dest="wheel_dir", + metavar="dir", default=os.curdir, - help=("Build wheels into , where the default is the " - "current working directory."), + help=( + "Build wheels into , where the default is the " + "current working directory." + ), ) self.cmd_opts.add_option(cmdoptions.no_binary()) self.cmd_opts.add_option(cmdoptions.only_binary()) @@ -62,13 +65,12 @@ def add_options(self) -> None: self.cmd_opts.add_option(cmdoptions.src()) self.cmd_opts.add_option(cmdoptions.ignore_requires_python()) self.cmd_opts.add_option(cmdoptions.no_deps()) - self.cmd_opts.add_option(cmdoptions.build_dir()) self.cmd_opts.add_option(cmdoptions.progress_bar()) self.cmd_opts.add_option( - '--no-verify', - dest='no_verify', - action='store_true', + "--no-verify", + dest="no_verify", + action="store_true", default=False, help="Don't verify if built wheel is valid.", ) @@ -77,11 +79,13 @@ def add_options(self) -> None: self.cmd_opts.add_option(cmdoptions.global_options()) self.cmd_opts.add_option( - '--pre', - action='store_true', + "--pre", + action="store_true", default=False, - help=("Include pre-release and development versions. By default, " - "pip only finds stable versions."), + help=( + "Include pre-release and development versions. By default, " + "pip only finds stable versions." + ), ) self.cmd_opts.add_option(cmdoptions.require_hashes()) @@ -124,6 +128,7 @@ def run(self, options: Values, args: List[str]) -> int: finder=finder, download_dir=options.wheel_dir, use_user_site=False, + verbosity=self.verbosity, ) resolver = self.make_resolver( @@ -137,9 +142,7 @@ def run(self, options: Values, args: List[str]) -> int: self.trace_basic_info(finder) - requirement_set = resolver.resolve( - reqs, check_supported_wheels=True - ) + requirement_set = resolver.resolve(reqs, check_supported_wheels=True) reqs_to_build: List[InstallRequirement] = [] for req in requirement_set.requirements.values(): @@ -165,12 +168,11 @@ def run(self, options: Values, args: List[str]) -> int: except OSError as e: logger.warning( "Building wheel for %s failed: %s", - req.name, e, + req.name, + e, ) build_failures.append(req) if len(build_failures) != 0: - raise CommandError( - "Failed to build one or more wheels" - ) + raise CommandError("Failed to build one or more wheels") return SUCCESS diff --git a/pipenv/patched/notpip/_internal/configuration.py b/pipenv/patched/notpip/_internal/configuration.py index fc6e4db7b8..2a40f65415 100644 --- a/pipenv/patched/notpip/_internal/configuration.py +++ b/pipenv/patched/notpip/_internal/configuration.py @@ -13,7 +13,6 @@ import configparser import locale -import logging import os import sys from typing import Any, Dict, Iterable, List, NewType, Optional, Tuple @@ -24,41 +23,39 @@ ) from pipenv.patched.notpip._internal.utils import appdirs from pipenv.patched.notpip._internal.utils.compat import WINDOWS +from pipenv.patched.notpip._internal.utils.logging import getLogger from pipenv.patched.notpip._internal.utils.misc import ensure_dir, enum RawConfigParser = configparser.RawConfigParser # Shorthand Kind = NewType("Kind", str) -CONFIG_BASENAME = 'pip.ini' if WINDOWS else 'pip.conf' +CONFIG_BASENAME = "pip.ini" if WINDOWS else "pip.conf" ENV_NAMES_IGNORED = "version", "help" # The kinds of configurations there are. kinds = enum( - USER="user", # User Specific - GLOBAL="global", # System Wide - SITE="site", # [Virtual] Environment Specific - ENV="env", # from PIP_CONFIG_FILE + USER="user", # User Specific + GLOBAL="global", # System Wide + SITE="site", # [Virtual] Environment Specific + ENV="env", # from PIP_CONFIG_FILE ENV_VAR="env-var", # from Environment Variables ) OVERRIDE_ORDER = kinds.GLOBAL, kinds.USER, kinds.SITE, kinds.ENV, kinds.ENV_VAR VALID_LOAD_ONLY = kinds.USER, kinds.GLOBAL, kinds.SITE -logger = logging.getLogger(__name__) +logger = getLogger(__name__) # NOTE: Maybe use the optionx attribute to normalize keynames. -def _normalize_name(name): - # type: (str) -> str - """Make a name consistent regardless of source (environment or file) - """ - name = name.lower().replace('_', '-') - if name.startswith('--'): +def _normalize_name(name: str) -> str: + """Make a name consistent regardless of source (environment or file)""" + name = name.lower().replace("_", "-") + if name.startswith("--"): name = name[2:] # only prefer long opts return name -def _disassemble_key(name): - # type: (str) -> List[str] +def _disassemble_key(name: str) -> List[str]: if "." not in name: error_message = ( "Key does not contain dot separated section and key. " @@ -68,22 +65,18 @@ def _disassemble_key(name): return name.split(".", 1) -def get_configuration_files(): - # type: () -> Dict[Kind, List[str]] +def get_configuration_files() -> Dict[Kind, List[str]]: global_config_files = [ - os.path.join(path, CONFIG_BASENAME) - for path in appdirs.site_config_dirs('pip') + os.path.join(path, CONFIG_BASENAME) for path in appdirs.site_config_dirs("pip") ] site_config_file = os.path.join(sys.prefix, CONFIG_BASENAME) legacy_config_file = os.path.join( - os.path.expanduser('~'), - 'pip' if WINDOWS else '.pip', + os.path.expanduser("~"), + "pip" if WINDOWS else ".pip", CONFIG_BASENAME, ) - new_config_file = os.path.join( - appdirs.user_config_dir("pip"), CONFIG_BASENAME - ) + new_config_file = os.path.join(appdirs.user_config_dir("pip"), CONFIG_BASENAME) return { kinds.GLOBAL: global_config_files, kinds.SITE: [site_config_file], @@ -105,8 +98,7 @@ class Configuration: and the data stored is also nice. """ - def __init__(self, isolated, load_only=None): - # type: (bool, Optional[Kind]) -> None + def __init__(self, isolated: bool, load_only: Optional[Kind] = None) -> None: super().__init__() if load_only is not None and load_only not in VALID_LOAD_ONLY: @@ -119,54 +111,44 @@ def __init__(self, isolated, load_only=None): self.load_only = load_only # Because we keep track of where we got the data from - self._parsers = { + self._parsers: Dict[Kind, List[Tuple[str, RawConfigParser]]] = { variant: [] for variant in OVERRIDE_ORDER - } # type: Dict[Kind, List[Tuple[str, RawConfigParser]]] - self._config = { + } + self._config: Dict[Kind, Dict[str, Any]] = { variant: {} for variant in OVERRIDE_ORDER - } # type: Dict[Kind, Dict[str, Any]] - self._modified_parsers = [] # type: List[Tuple[str, RawConfigParser]] + } + self._modified_parsers: List[Tuple[str, RawConfigParser]] = [] - def load(self): - # type: () -> None - """Loads configuration from configuration files and environment - """ + def load(self) -> None: + """Loads configuration from configuration files and environment""" self._load_config_files() if not self.isolated: self._load_environment_vars() - def get_file_to_edit(self): - # type: () -> Optional[str] - """Returns the file with highest priority in configuration - """ - assert self.load_only is not None, \ - "Need to be specified a file to be editing" + def get_file_to_edit(self) -> Optional[str]: + """Returns the file with highest priority in configuration""" + assert self.load_only is not None, "Need to be specified a file to be editing" try: return self._get_parser_to_modify()[0] except IndexError: return None - def items(self): - # type: () -> Iterable[Tuple[str, Any]] + def items(self) -> Iterable[Tuple[str, Any]]: """Returns key-value pairs like dict.items() representing the loaded configuration """ return self._dictionary.items() - def get_value(self, key): - # type: (str) -> Any - """Get a value from the configuration. - """ + def get_value(self, key: str) -> Any: + """Get a value from the configuration.""" try: return self._dictionary[key] except KeyError: raise ConfigurationError(f"No such key - {key}") - def set_value(self, key, value): - # type: (str, Any) -> None - """Modify a value in the configuration. - """ + def set_value(self, key: str, value: Any) -> None: + """Modify a value in the configuration.""" self._ensure_have_load_only() assert self.load_only @@ -183,8 +165,7 @@ def set_value(self, key, value): self._config[self.load_only][key] = value self._mark_as_modified(fname, parser) - def unset_value(self, key): - # type: (str) -> None + def unset_value(self, key: str) -> None: """Unset a value in the configuration.""" self._ensure_have_load_only() @@ -196,8 +177,9 @@ def unset_value(self, key): if parser is not None: section, name = _disassemble_key(key) - if not (parser.has_section(section) - and parser.remove_option(section, name)): + if not ( + parser.has_section(section) and parser.remove_option(section, name) + ): # The option was not removed. raise ConfigurationError( "Fatal Internal error [id=1]. Please report as a bug." @@ -210,10 +192,8 @@ def unset_value(self, key): del self._config[self.load_only][key] - def save(self): - # type: () -> None - """Save the current in-memory state. - """ + def save(self) -> None: + """Save the current in-memory state.""" self._ensure_have_load_only() for fname, parser in self._modified_parsers: @@ -229,17 +209,14 @@ def save(self): # Private routines # - def _ensure_have_load_only(self): - # type: () -> None + def _ensure_have_load_only(self) -> None: if self.load_only is None: raise ConfigurationError("Needed a specific file to be modifying.") logger.debug("Will be working with %s variant only", self.load_only) @property - def _dictionary(self): - # type: () -> Dict[str, Any] - """A dictionary representing the loaded configuration. - """ + def _dictionary(self) -> Dict[str, Any]: + """A dictionary representing the loaded configuration.""" # NOTE: Dictionaries are not populated if not loaded. So, conditionals # are not needed here. retval = {} @@ -249,10 +226,8 @@ def _dictionary(self): return retval - def _load_config_files(self): - # type: () -> None - """Loads configuration from configuration files - """ + def _load_config_files(self) -> None: + """Loads configuration from configuration files""" config_files = dict(self.iter_config_files()) if config_files[kinds.ENV][0:1] == [os.devnull]: logger.debug( @@ -266,9 +241,7 @@ def _load_config_files(self): # If there's specific variant set in `load_only`, load only # that variant, not the others. if self.load_only is not None and variant != self.load_only: - logger.debug( - "Skipping file '%s' (variant: %s)", fname, variant - ) + logger.debug("Skipping file '%s' (variant: %s)", fname, variant) continue parser = self._load_file(variant, fname) @@ -276,9 +249,8 @@ def _load_config_files(self): # Keeping track of the parsers used self._parsers[variant].append((fname, parser)) - def _load_file(self, variant, fname): - # type: (Kind, str) -> RawConfigParser - logger.debug("For variant '%s', will try loading '%s'", variant, fname) + def _load_file(self, variant: Kind, fname: str) -> RawConfigParser: + logger.verbose("For variant '%s', will try loading '%s'", variant, fname) parser = self._construct_parser(fname) for section in parser.sections(): @@ -287,22 +259,20 @@ def _load_file(self, variant, fname): return parser - def _construct_parser(self, fname): - # type: (str) -> RawConfigParser + def _construct_parser(self, fname: str) -> RawConfigParser: parser = configparser.RawConfigParser() # If there is no such file, don't bother reading it but create the # parser anyway, to hold the data. # Doing this is useful when modifying and saving files, where we don't # need to construct a parser. if os.path.exists(fname): + locale_encoding = locale.getpreferredencoding(False) try: - parser.read(fname) + parser.read(fname, encoding=locale_encoding) except UnicodeDecodeError: # See https://github.com/pypa/pip/issues/4963 raise ConfigurationFileCouldNotBeLoaded( - reason="contains invalid {} characters".format( - locale.getpreferredencoding(False) - ), + reason=f"contains invalid {locale_encoding} characters", fname=fname, ) except configparser.Error as error: @@ -310,16 +280,15 @@ def _construct_parser(self, fname): raise ConfigurationFileCouldNotBeLoaded(error=error) return parser - def _load_environment_vars(self): - # type: () -> None - """Loads configuration from environment variables - """ + def _load_environment_vars(self) -> None: + """Loads configuration from environment variables""" self._config[kinds.ENV_VAR].update( self._normalized_keys(":env:", self.get_environ_vars()) ) - def _normalized_keys(self, section, items): - # type: (str, Iterable[Tuple[str, Any]]) -> Dict[str, Any] + def _normalized_keys( + self, section: str, items: Iterable[Tuple[str, Any]] + ) -> Dict[str, Any]: """Normalizes items to construct a dictionary with normalized keys. This routine is where the names become keys and are made the same @@ -331,8 +300,7 @@ def _normalized_keys(self, section, items): normalized[key] = val return normalized - def get_environ_vars(self): - # type: () -> Iterable[Tuple[str, str]] + def get_environ_vars(self) -> Iterable[Tuple[str, str]]: """Returns a generator with all environmental vars with prefix PIP_""" for key, val in os.environ.items(): if key.startswith("PIP_"): @@ -341,8 +309,7 @@ def get_environ_vars(self): yield name, val # XXX: This is patched in the tests. - def iter_config_files(self): - # type: () -> Iterable[Tuple[Kind, List[str]]] + def iter_config_files(self) -> Iterable[Tuple[Kind, List[str]]]: """Yields variant and configuration files associated with it. This should be treated like items of a dictionary. @@ -350,7 +317,7 @@ def iter_config_files(self): # SMELL: Move the conditions out of this function # environment variables have the lowest priority - config_file = os.environ.get('PIP_CONFIG_FILE', None) + config_file = os.environ.get("PIP_CONFIG_FILE", None) if config_file is not None: yield kinds.ENV, [config_file] else: @@ -372,13 +339,11 @@ def iter_config_files(self): # finally virtualenv configuration first trumping others yield kinds.SITE, config_files[kinds.SITE] - def get_values_in_config(self, variant): - # type: (Kind) -> Dict[str, Any] + def get_values_in_config(self, variant: Kind) -> Dict[str, Any]: """Get values present in a config file""" return self._config[variant] - def _get_parser_to_modify(self): - # type: () -> Tuple[str, RawConfigParser] + def _get_parser_to_modify(self) -> Tuple[str, RawConfigParser]: # Determine which parser to modify assert self.load_only parsers = self._parsers[self.load_only] @@ -392,12 +357,10 @@ def _get_parser_to_modify(self): return parsers[-1] # XXX: This is patched in the tests. - def _mark_as_modified(self, fname, parser): - # type: (str, RawConfigParser) -> None + def _mark_as_modified(self, fname: str, parser: RawConfigParser) -> None: file_parser_tuple = (fname, parser) if file_parser_tuple not in self._modified_parsers: self._modified_parsers.append(file_parser_tuple) - def __repr__(self): - # type: () -> str + def __repr__(self) -> str: return f"{self.__class__.__name__}({self._dictionary!r})" diff --git a/pipenv/patched/notpip/_internal/distributions/base.py b/pipenv/patched/notpip/_internal/distributions/base.py index fabc9b81a3..3dbc7f2363 100644 --- a/pipenv/patched/notpip/_internal/distributions/base.py +++ b/pipenv/patched/notpip/_internal/distributions/base.py @@ -1,9 +1,7 @@ import abc -from typing import Optional - -from pipenv.patched.notpip._vendor.pkg_resources import Distribution from pipenv.patched.notpip._internal.index.package_finder import PackageFinder +from pipenv.patched.notpip._internal.metadata.base import BaseDistribution from pipenv.patched.notpip._internal.req import InstallRequirement @@ -28,7 +26,7 @@ def __init__(self, req: InstallRequirement) -> None: self.req = req @abc.abstractmethod - def get_pkg_resources_distribution(self) -> Optional[Distribution]: + def get_metadata_distribution(self) -> BaseDistribution: raise NotImplementedError() @abc.abstractmethod diff --git a/pipenv/patched/notpip/_internal/distributions/installed.py b/pipenv/patched/notpip/_internal/distributions/installed.py index 127602b7bc..b7184c6368 100644 --- a/pipenv/patched/notpip/_internal/distributions/installed.py +++ b/pipenv/patched/notpip/_internal/distributions/installed.py @@ -1,9 +1,6 @@ -from typing import Optional - -from pipenv.patched.notpip._vendor.pkg_resources import Distribution - from pipenv.patched.notpip._internal.distributions.base import AbstractDistribution from pipenv.patched.notpip._internal.index.package_finder import PackageFinder +from pipenv.patched.notpip._internal.metadata import BaseDistribution class InstalledDistribution(AbstractDistribution): @@ -13,7 +10,8 @@ class InstalledDistribution(AbstractDistribution): been computed. """ - def get_pkg_resources_distribution(self) -> Optional[Distribution]: + def get_metadata_distribution(self) -> BaseDistribution: + assert self.req.satisfied_by is not None, "not actually installed" return self.req.satisfied_by def prepare_distribution_metadata( diff --git a/pipenv/patched/notpip/_internal/distributions/sdist.py b/pipenv/patched/notpip/_internal/distributions/sdist.py index 910fe48c75..d2add10ba4 100644 --- a/pipenv/patched/notpip/_internal/distributions/sdist.py +++ b/pipenv/patched/notpip/_internal/distributions/sdist.py @@ -1,12 +1,11 @@ import logging -from typing import Set, Tuple - -from pipenv.patched.notpip._vendor.pkg_resources import Distribution +from typing import Iterable, Set, Tuple from pipenv.patched.notpip._internal.build_env import BuildEnvironment from pipenv.patched.notpip._internal.distributions.base import AbstractDistribution from pipenv.patched.notpip._internal.exceptions import InstallationError from pipenv.patched.notpip._internal.index.package_finder import PackageFinder +from pipenv.patched.notpip._internal.metadata import BaseDistribution from pipenv.patched.notpip._internal.utils.subprocess import runner_with_spinner_message logger = logging.getLogger(__name__) @@ -19,7 +18,7 @@ class SourceDistribution(AbstractDistribution): generated, either using PEP 517 or using the legacy `setup.py egg_info`. """ - def get_pkg_resources_distribution(self) -> Distribution: + def get_metadata_distribution(self) -> BaseDistribution: return self.req.get_dist() def prepare_distribution_metadata( @@ -31,28 +30,23 @@ def prepare_distribution_metadata( # Set up the build isolation, if this requirement should be isolated should_isolate = self.req.use_pep517 and build_isolation if should_isolate: - self._setup_isolation(finder) + # Setup an isolated environment and install the build backend static + # requirements in it. + self._prepare_build_backend(finder) + # Check that if the requirement is editable, it either supports PEP 660 or + # has a setup.py or a setup.cfg. This cannot be done earlier because we need + # to setup the build backend to verify it supports build_editable, nor can + # it be done later, because we want to avoid installing build requirements + # needlessly. Doing it here also works around setuptools generating + # UNKNOWN.egg-info when running get_requires_for_build_wheel on a directory + # without setup.py nor setup.cfg. + self.req.isolated_editable_sanity_check() + # Install the dynamic build requirements. + self._install_build_reqs(finder) self.req.prepare_metadata() - def _setup_isolation(self, finder: PackageFinder) -> None: - def _raise_conflicts( - conflicting_with: str, conflicting_reqs: Set[Tuple[str, str]] - ) -> None: - format_string = ( - "Some build dependencies for {requirement} " - "conflict with {conflicting_with}: {description}." - ) - error_message = format_string.format( - requirement=self.req, - conflicting_with=conflicting_with, - description=", ".join( - f"{installed} is incompatible with {wanted}" - for installed, wanted in sorted(conflicting) - ), - ) - raise InstallationError(error_message) - + def _prepare_build_backend(self, finder: PackageFinder) -> None: # Isolate in a BuildEnvironment and install the build-time # requirements. pyproject_requires = self.req.pyproject_requires @@ -60,13 +54,13 @@ def _raise_conflicts( self.req.build_env = BuildEnvironment() self.req.build_env.install_requirements( - finder, pyproject_requires, "overlay", "Installing build dependencies" + finder, pyproject_requires, "overlay", kind="build dependencies" ) conflicting, missing = self.req.build_env.check_requirements( self.req.requirements_to_check ) if conflicting: - _raise_conflicts("PEP 517/518 supported requirements", conflicting) + self._raise_conflicts("PEP 517/518 supported requirements", conflicting) if missing: logger.warning( "Missing build requirements in pyproject.toml for %s.", @@ -77,19 +71,57 @@ def _raise_conflicts( "pip cannot fall back to setuptools without %s.", " and ".join(map(repr, sorted(missing))), ) - # Install any extra build dependencies that the backend requests. - # This must be done in a second pass, as the pyproject.toml - # dependencies must be installed before we can call the backend. + + def _get_build_requires_wheel(self) -> Iterable[str]: with self.req.build_env: runner = runner_with_spinner_message("Getting requirements to build wheel") backend = self.req.pep517_backend assert backend is not None with backend.subprocess_runner(runner): - reqs = backend.get_requires_for_build_wheel() + return backend.get_requires_for_build_wheel() - conflicting, missing = self.req.build_env.check_requirements(reqs) + def _get_build_requires_editable(self) -> Iterable[str]: + with self.req.build_env: + runner = runner_with_spinner_message( + "Getting requirements to build editable" + ) + backend = self.req.pep517_backend + assert backend is not None + with backend.subprocess_runner(runner): + return backend.get_requires_for_build_editable() + + def _install_build_reqs(self, finder: PackageFinder) -> None: + # Install any extra build dependencies that the backend requests. + # This must be done in a second pass, as the pyproject.toml + # dependencies must be installed before we can call the backend. + if ( + self.req.editable + and self.req.permit_editable_wheels + and self.req.supports_pyproject_editable() + ): + build_reqs = self._get_build_requires_editable() + else: + build_reqs = self._get_build_requires_wheel() + conflicting, missing = self.req.build_env.check_requirements(build_reqs) if conflicting: - _raise_conflicts("the backend dependencies", conflicting) + self._raise_conflicts("the backend dependencies", conflicting) self.req.build_env.install_requirements( - finder, missing, "normal", "Installing backend dependencies" + finder, missing, "normal", kind="backend dependencies" + ) + + def _raise_conflicts( + self, conflicting_with: str, conflicting_reqs: Set[Tuple[str, str]] + ) -> None: + format_string = ( + "Some build dependencies for {requirement} " + "conflict with {conflicting_with}: {description}." + ) + error_message = format_string.format( + requirement=self.req, + conflicting_with=conflicting_with, + description=", ".join( + f"{installed} is incompatible with {wanted}" + for installed, wanted in sorted(conflicting_reqs) + ), ) + raise InstallationError(error_message) diff --git a/pipenv/patched/notpip/_internal/distributions/wheel.py b/pipenv/patched/notpip/_internal/distributions/wheel.py index 91542844c7..7b5e17a2a0 100644 --- a/pipenv/patched/notpip/_internal/distributions/wheel.py +++ b/pipenv/patched/notpip/_internal/distributions/wheel.py @@ -1,10 +1,12 @@ -from zipfile import ZipFile - -from pipenv.patched.notpip._vendor.pkg_resources import Distribution +from pipenv.patched.notpip._vendor.packaging.utils import canonicalize_name from pipenv.patched.notpip._internal.distributions.base import AbstractDistribution from pipenv.patched.notpip._internal.index.package_finder import PackageFinder -from pipenv.patched.notpip._internal.utils.wheel import pkg_resources_distribution_for_wheel +from pipenv.patched.notpip._internal.metadata import ( + BaseDistribution, + FilesystemWheel, + get_wheel_distribution, +) class WheelDistribution(AbstractDistribution): @@ -13,20 +15,15 @@ class WheelDistribution(AbstractDistribution): This does not need any preparation as wheels can be directly unpacked. """ - def get_pkg_resources_distribution(self) -> Distribution: + def get_metadata_distribution(self) -> BaseDistribution: """Loads the metadata from the wheel file into memory and returns a Distribution that uses it, not relying on the wheel file or requirement. """ - # Set as part of preparation during download. - assert self.req.local_file_path - # Wheels are never unnamed. - assert self.req.name - - with ZipFile(self.req.local_file_path, allowZip64=True) as z: - return pkg_resources_distribution_for_wheel( - z, self.req.name, self.req.local_file_path - ) + assert self.req.local_file_path, "Set as part of preparation during download" + assert self.req.name, "Wheels are never unnamed" + wheel = FilesystemWheel(self.req.local_file_path) + return get_wheel_distribution(wheel, canonicalize_name(self.req.name)) def prepare_distribution_metadata( self, finder: PackageFinder, build_isolation: bool diff --git a/pipenv/patched/notpip/_internal/exceptions.py b/pipenv/patched/notpip/_internal/exceptions.py index 79fc0edbe3..c488d66b5d 100644 --- a/pipenv/patched/notpip/_internal/exceptions.py +++ b/pipenv/patched/notpip/_internal/exceptions.py @@ -1,22 +1,174 @@ -"""Exceptions used throughout package""" +"""Exceptions used throughout package. + +This module MUST NOT try to import from anything within `pipenv.patched.notpip._internal` to +operate. This is expected to be importable from any/all files within the +subpackage and, thus, should not depend on them. +""" import configparser +import re from itertools import chain, groupby, repeat -from typing import TYPE_CHECKING, Dict, List, Optional +from typing import TYPE_CHECKING, Dict, List, Optional, Union -from pipenv.patched.notpip._vendor.pkg_resources import Distribution from pipenv.patched.notpip._vendor.requests.models import Request, Response +from pipenv.patched.notpip._vendor.rich.console import Console, ConsoleOptions, RenderResult +from pipenv.patched.notpip._vendor.rich.markup import escape +from pipenv.patched.notpip._vendor.rich.text import Text if TYPE_CHECKING: from hashlib import _Hash + from typing import Literal + from pipenv.patched.notpip._internal.metadata import BaseDistribution from pipenv.patched.notpip._internal.req.req_install import InstallRequirement +# +# Scaffolding +# +def _is_kebab_case(s: str) -> bool: + return re.match(r"^[a-z]+(-[a-z]+)*$", s) is not None + + +def _prefix_with_indent( + s: Union[Text, str], + console: Console, + *, + prefix: str, + indent: str, +) -> Text: + if isinstance(s, Text): + text = s + else: + text = console.render_str(s) + + return console.render_str(prefix, overflow="ignore") + console.render_str( + f"\n{indent}", overflow="ignore" + ).join(text.split(allow_blank=True)) + + class PipError(Exception): - """Base pip exception""" + """The base pip error.""" + + +class DiagnosticPipError(PipError): + """An error, that presents diagnostic information to the user. + This contains a bunch of logic, to enable pretty presentation of our error + messages. Each error gets a unique reference. Each error can also include + additional context, a hint and/or a note -- which are presented with the + main error message in a consistent style. + + This is adapted from the error output styling in `sphinx-theme-builder`. + """ + reference: str + + def __init__( + self, + *, + kind: 'Literal["error", "warning"]' = "error", + reference: Optional[str] = None, + message: Union[str, Text], + context: Optional[Union[str, Text]], + hint_stmt: Optional[Union[str, Text]], + note_stmt: Optional[Union[str, Text]] = None, + link: Optional[str] = None, + ) -> None: + # Ensure a proper reference is provided. + if reference is None: + assert hasattr(self, "reference"), "error reference not provided!" + reference = self.reference + assert _is_kebab_case(reference), "error reference must be kebab-case!" + + self.kind = kind + self.reference = reference + + self.message = message + self.context = context + + self.note_stmt = note_stmt + self.hint_stmt = hint_stmt + + self.link = link + + super().__init__(f"<{self.__class__.__name__}: {self.reference}>") + + def __repr__(self) -> str: + return ( + f"<{self.__class__.__name__}(" + f"reference={self.reference!r}, " + f"message={self.message!r}, " + f"context={self.context!r}, " + f"note_stmt={self.note_stmt!r}, " + f"hint_stmt={self.hint_stmt!r}" + ")>" + ) + + def __rich_console__( + self, + console: Console, + options: ConsoleOptions, + ) -> RenderResult: + colour = "red" if self.kind == "error" else "yellow" + + yield f"[{colour} bold]{self.kind}[/]: [bold]{self.reference}[/]" + yield "" + + if not options.ascii_only: + # Present the main message, with relevant context indented. + if self.context is not None: + yield _prefix_with_indent( + self.message, + console, + prefix=f"[{colour}]×[/] ", + indent=f"[{colour}]│[/] ", + ) + yield _prefix_with_indent( + self.context, + console, + prefix=f"[{colour}]╰─>[/] ", + indent=f"[{colour}] [/] ", + ) + else: + yield _prefix_with_indent( + self.message, + console, + prefix="[red]×[/] ", + indent=" ", + ) + else: + yield self.message + if self.context is not None: + yield "" + yield self.context + + if self.note_stmt is not None or self.hint_stmt is not None: + yield "" + + if self.note_stmt is not None: + yield _prefix_with_indent( + self.note_stmt, + console, + prefix="[magenta bold]note[/]: ", + indent=" ", + ) + if self.hint_stmt is not None: + yield _prefix_with_indent( + self.hint_stmt, + console, + prefix="[cyan bold]hint[/]: ", + indent=" ", + ) + + if self.link is not None: + yield "" + yield f"Link: {self.link}" + + +# +# Actual Errors +# class ConfigurationError(PipError): """General exception in configuration""" @@ -29,17 +181,54 @@ class UninstallationError(PipError): """General exception during uninstallation""" +class MissingPyProjectBuildRequires(DiagnosticPipError): + """Raised when pyproject.toml has `build-system`, but no `build-system.requires`.""" + + reference = "missing-pyproject-build-system-requires" + + def __init__(self, *, package: str) -> None: + super().__init__( + message=f"Can not process {escape(package)}", + context=Text( + "This package has an invalid pyproject.toml file.\n" + "The [build-system] table is missing the mandatory `requires` key." + ), + note_stmt="This is an issue with the package mentioned above, not pip.", + hint_stmt=Text("See PEP 518 for the detailed specification."), + ) + + +class InvalidPyProjectBuildRequires(DiagnosticPipError): + """Raised when pyproject.toml an invalid `build-system.requires`.""" + + reference = "invalid-pyproject-build-system-requires" + + def __init__(self, *, package: str, reason: str) -> None: + super().__init__( + message=f"Can not process {escape(package)}", + context=Text( + "This package has an invalid `build-system.requires` key in " + f"pyproject.toml.\n{reason}" + ), + note_stmt="This is an issue with the package mentioned above, not pip.", + hint_stmt=Text("See PEP 518 for the detailed specification."), + ) + + class NoneMetadataError(PipError): - """ - Raised when accessing "METADATA" or "PKG-INFO" metadata for a - pipenv.patched.notpip._vendor.pkg_resources.Distribution object and - `dist.has_metadata('METADATA')` returns True but - `dist.get_metadata('METADATA')` returns None (and similarly for - "PKG-INFO"). + """Raised when accessing a Distribution's "METADATA" or "PKG-INFO". + + This signifies an inconsistency, when the Distribution claims to have + the metadata file (if not, raise ``FileNotFoundError`` instead), but is + not actually able to produce its content. This may be due to permission + errors. """ - def __init__(self, dist, metadata_name): - # type: (Distribution, str) -> None + def __init__( + self, + dist: "BaseDistribution", + metadata_name: str, + ) -> None: """ :param dist: A Distribution object. :param metadata_name: The name of the metadata being accessed @@ -48,28 +237,24 @@ def __init__(self, dist, metadata_name): self.dist = dist self.metadata_name = metadata_name - def __str__(self): - # type: () -> str + def __str__(self) -> str: # Use `dist` in the error message because its stringification # includes more information, like the version and location. - return ( - 'None {} metadata found for distribution: {}'.format( - self.metadata_name, self.dist, - ) + return "None {} metadata found for distribution: {}".format( + self.metadata_name, + self.dist, ) class UserInstallationInvalid(InstallationError): """A --user install is requested on an environment without user site.""" - def __str__(self): - # type: () -> str + def __str__(self) -> str: return "User base directory is not specified" class InvalidSchemeCombination(InstallationError): - def __str__(self): - # type: () -> str + def __str__(self) -> str: before = ", ".join(str(a) for a in self.args[:-1]) return f"Cannot set {before} and {self.args[-1]} together" @@ -102,8 +287,9 @@ class PreviousBuildDirError(PipError): class NetworkConnectionError(PipError): """HTTP connection error""" - def __init__(self, error_msg, response=None, request=None): - # type: (str, Response, Request) -> None + def __init__( + self, error_msg: str, response: Response = None, request: Request = None + ) -> None: """ Initialize NetworkConnectionError with `request` and `response` objects. @@ -111,13 +297,15 @@ def __init__(self, error_msg, response=None, request=None): self.response = response self.request = request self.error_msg = error_msg - if (self.response is not None and not self.request and - hasattr(response, 'request')): + if ( + self.response is not None + and not self.request + and hasattr(response, "request") + ): self.request = self.response.request super().__init__(error_msg, response, request) - def __str__(self): - # type: () -> str + def __str__(self) -> str: return str(self.error_msg) @@ -129,6 +317,17 @@ class UnsupportedWheel(InstallationError): """Unsupported wheel.""" +class InvalidWheel(InstallationError): + """Invalid (e.g. corrupt) wheel.""" + + def __init__(self, location: str, name: str): + self.location = location + self.name = name + + def __str__(self) -> str: + return f"Wheel '{self.name}' located at {self.location} is invalid." + + class MetadataInconsistent(InstallationError): """Built metadata contains inconsistent information. @@ -136,15 +335,16 @@ class MetadataInconsistent(InstallationError): that do not match the information previously obtained from sdist filename or user-supplied ``#egg=`` value. """ - def __init__(self, ireq, field, f_val, m_val): - # type: (InstallRequirement, str, str, str) -> None + + def __init__( + self, ireq: "InstallRequirement", field: str, f_val: str, m_val: str + ) -> None: self.ireq = ireq self.field = field self.f_val = f_val self.m_val = m_val - def __str__(self): - # type: () -> str + def __str__(self) -> str: template = ( "Requested {} has inconsistent {}: " "filename has {!r}, but metadata has {!r}" @@ -152,51 +352,102 @@ def __str__(self): return template.format(self.ireq, self.field, self.f_val, self.m_val) -class InstallationSubprocessError(InstallationError): - """A subprocess call failed during installation.""" - def __init__(self, returncode, description): - # type: (int, str) -> None - self.returncode = returncode - self.description = description +class LegacyInstallFailure(DiagnosticPipError): + """Error occurred while executing `setup.py install`""" - def __str__(self): - # type: () -> str - return ( - "Command errored out with exit status {}: {} " - "Check the logs for full command output." - ).format(self.returncode, self.description) + reference = "legacy-install-failure" + + def __init__(self, package_details: str) -> None: + super().__init__( + message="Encountered error while trying to install package.", + context=package_details, + hint_stmt="See above for output from the failure.", + note_stmt="This is an issue with the package mentioned above, not pip.", + ) + + +class InstallationSubprocessError(DiagnosticPipError, InstallationError): + """A subprocess call failed.""" + + reference = "subprocess-exited-with-error" + + def __init__( + self, + *, + command_description: str, + exit_code: int, + output_lines: Optional[List[str]], + ) -> None: + if output_lines is None: + output_prompt = Text("See above for output.") + else: + output_prompt = ( + Text.from_markup(f"[red][{len(output_lines)} lines of output][/]\n") + + Text("".join(output_lines)) + + Text.from_markup(R"[red]\[end of output][/]") + ) + + super().__init__( + message=( + f"[green]{escape(command_description)}[/] did not run successfully.\n" + f"exit code: {exit_code}" + ), + context=output_prompt, + hint_stmt=None, + note_stmt=( + "This error originates from a subprocess, and is likely not a " + "problem with pip." + ), + ) + + self.command_description = command_description + self.exit_code = exit_code + + def __str__(self) -> str: + return f"{self.command_description} exited with {self.exit_code}" + + +class MetadataGenerationFailed(InstallationSubprocessError, InstallationError): + reference = "metadata-generation-failed" + + def __init__( + self, + *, + package_details: str, + ) -> None: + super(InstallationSubprocessError, self).__init__( + message="Encountered error while generating package metadata.", + context=escape(package_details), + hint_stmt="See above for details.", + note_stmt="This is an issue with the package mentioned above, not pip.", + ) + + def __str__(self) -> str: + return "metadata generation failed" class HashErrors(InstallationError): """Multiple HashError instances rolled into one for reporting""" - def __init__(self): - # type: () -> None - self.errors = [] # type: List[HashError] + def __init__(self) -> None: + self.errors: List["HashError"] = [] - def append(self, error): - # type: (HashError) -> None + def append(self, error: "HashError") -> None: self.errors.append(error) - def __str__(self): - # type: () -> str + def __str__(self) -> str: lines = [] self.errors.sort(key=lambda e: e.order) for cls, errors_of_cls in groupby(self.errors, lambda e: e.__class__): lines.append(cls.head) lines.extend(e.body() for e in errors_of_cls) if lines: - return '\n'.join(lines) - return '' + return "\n".join(lines) + return "" - def __nonzero__(self): - # type: () -> bool + def __bool__(self) -> bool: return bool(self.errors) - def __bool__(self): - # type: () -> bool - return self.__nonzero__() - class HashError(InstallationError): """ @@ -214,12 +465,12 @@ class HashError(InstallationError): typically available earlier. """ - req = None # type: Optional[InstallRequirement] - head = '' - order = -1 # type: int - def body(self): - # type: () -> str + req: Optional["InstallRequirement"] = None + head = "" + order: int = -1 + + def body(self) -> str: """Return a summary of me for display under the heading. This default implementation simply prints a description of the @@ -229,21 +480,19 @@ def body(self): its link already populated by the resolver's _populate_link(). """ - return f' {self._requirement_name()}' + return f" {self._requirement_name()}" - def __str__(self): - # type: () -> str - return f'{self.head}\n{self.body()}' + def __str__(self) -> str: + return f"{self.head}\n{self.body()}" - def _requirement_name(self): - # type: () -> str + def _requirement_name(self) -> str: """Return a description of the requirement that triggered me. This default implementation returns long description of the req, with line numbers """ - return str(self.req) if self.req else 'unknown package' + return str(self.req) if self.req else "unknown package" class VcsHashUnsupported(HashError): @@ -251,8 +500,10 @@ class VcsHashUnsupported(HashError): we don't have a method for hashing those.""" order = 0 - head = ("Can't verify hashes for these requirements because we don't " - "have a way to hash version control repositories:") + head = ( + "Can't verify hashes for these requirements because we don't " + "have a way to hash version control repositories:" + ) class DirectoryUrlHashUnsupported(HashError): @@ -260,32 +511,34 @@ class DirectoryUrlHashUnsupported(HashError): we don't have a method for hashing those.""" order = 1 - head = ("Can't verify hashes for these file:// requirements because they " - "point to directories:") + head = ( + "Can't verify hashes for these file:// requirements because they " + "point to directories:" + ) class HashMissing(HashError): """A hash was needed for a requirement but is absent.""" order = 2 - head = ('Hashes are required in --require-hashes mode, but they are ' - 'missing from some requirements. Here is a list of those ' - 'requirements along with the hashes their downloaded archives ' - 'actually had. Add lines like these to your requirements files to ' - 'prevent tampering. (If you did not enable --require-hashes ' - 'manually, note that it turns on automatically when any package ' - 'has a hash.)') - - def __init__(self, gotten_hash): - # type: (str) -> None + head = ( + "Hashes are required in --require-hashes mode, but they are " + "missing from some requirements. Here is a list of those " + "requirements along with the hashes their downloaded archives " + "actually had. Add lines like these to your requirements files to " + "prevent tampering. (If you did not enable --require-hashes " + "manually, note that it turns on automatically when any package " + "has a hash.)" + ) + + def __init__(self, gotten_hash: str) -> None: """ :param gotten_hash: The hash of the (possibly malicious) archive we just downloaded """ self.gotten_hash = gotten_hash - def body(self): - # type: () -> str + def body(self) -> str: # Dodge circular import. from pipenv.patched.notpip._internal.utils.hashes import FAVORITE_HASH @@ -294,13 +547,16 @@ def body(self): # In the case of URL-based requirements, display the original URL # seen in the requirements file rather than the package name, # so the output can be directly copied into the requirements file. - package = (self.req.original_link if self.req.original_link - # In case someone feeds something downright stupid - # to InstallRequirement's constructor. - else getattr(self.req, 'req', None)) - return ' {} --hash={}:{}'.format(package or 'unknown package', - FAVORITE_HASH, - self.gotten_hash) + package = ( + self.req.original_link + if self.req.original_link + # In case someone feeds something downright stupid + # to InstallRequirement's constructor. + else getattr(self.req, "req", None) + ) + return " {} --hash={}:{}".format( + package or "unknown package", FAVORITE_HASH, self.gotten_hash + ) class HashUnpinned(HashError): @@ -308,8 +564,10 @@ class HashUnpinned(HashError): version.""" order = 3 - head = ('In --require-hashes mode, all requirements must have their ' - 'versions pinned with ==. These do not:') + head = ( + "In --require-hashes mode, all requirements must have their " + "versions pinned with ==. These do not:" + ) class HashMismatch(HashError): @@ -321,14 +579,16 @@ class HashMismatch(HashError): improve its error message. """ - order = 4 - head = ('THESE PACKAGES DO NOT MATCH THE HASHES FROM THE REQUIREMENTS ' - 'FILE. If you have updated the package versions, please update ' - 'the hashes. Otherwise, examine the package contents carefully; ' - 'someone may have tampered with them.') - def __init__(self, allowed, gots): - # type: (Dict[str, List[str]], Dict[str, _Hash]) -> None + order = 4 + head = ( + "THESE PACKAGES DO NOT MATCH THE HASHES FROM THE REQUIREMENTS " + "FILE. If you have updated the package versions, please update " + "the hashes. Otherwise, examine the package contents carefully; " + "someone may have tampered with them." + ) + + def __init__(self, allowed: Dict[str, List[str]], gots: Dict[str, "_Hash"]) -> None: """ :param allowed: A dict of algorithm names pointing to lists of allowed hex digests @@ -338,13 +598,10 @@ def __init__(self, allowed, gots): self.allowed = allowed self.gots = gots - def body(self): - # type: () -> str - return ' {}:\n{}'.format(self._requirement_name(), - self._hash_comparison()) + def body(self) -> str: + return " {}:\n{}".format(self._requirement_name(), self._hash_comparison()) - def _hash_comparison(self): - # type: () -> str + def _hash_comparison(self) -> str: """ Return a comparison of actual and expected hash values. @@ -355,20 +612,22 @@ def _hash_comparison(self): Got bcdefbcdefbcdefbcdefbcdefbcdefbcdefbcdefbcdef """ - def hash_then_or(hash_name): - # type: (str) -> chain[str] + + def hash_then_or(hash_name: str) -> "chain[str]": # For now, all the decent hashes have 6-char names, so we can get # away with hard-coding space literals. - return chain([hash_name], repeat(' or')) + return chain([hash_name], repeat(" or")) - lines = [] # type: List[str] + lines: List[str] = [] for hash_name, expecteds in self.allowed.items(): prefix = hash_then_or(hash_name) - lines.extend((' Expected {} {}'.format(next(prefix), e)) - for e in expecteds) - lines.append(' Got {}\n'.format( - self.gots[hash_name].hexdigest())) - return '\n'.join(lines) + lines.extend( + (" Expected {} {}".format(next(prefix), e)) for e in expecteds + ) + lines.append( + " Got {}\n".format(self.gots[hash_name].hexdigest()) + ) + return "\n".join(lines) class UnsupportedPythonVersion(InstallationError): @@ -377,18 +636,20 @@ class UnsupportedPythonVersion(InstallationError): class ConfigurationFileCouldNotBeLoaded(ConfigurationError): - """When there are errors while loading a configuration file - """ - - def __init__(self, reason="could not be loaded", fname=None, error=None): - # type: (str, Optional[str], Optional[configparser.Error]) -> None + """When there are errors while loading a configuration file""" + + def __init__( + self, + reason: str = "could not be loaded", + fname: Optional[str] = None, + error: Optional[configparser.Error] = None, + ) -> None: super().__init__(error) self.reason = reason self.fname = fname self.error = error - def __str__(self): - # type: () -> str + def __str__(self) -> str: if self.fname is not None: message_part = f" in {self.fname}." else: diff --git a/pipenv/patched/notpip/_internal/index/collector.py b/pipenv/patched/notpip/_internal/index/collector.py index 98ac9d2e34..e9c2e3800b 100644 --- a/pipenv/patched/notpip/_internal/index/collector.py +++ b/pipenv/patched/notpip/_internal/index/collector.py @@ -5,7 +5,6 @@ import cgi import collections import functools -import html import itertools import logging import os @@ -13,15 +12,19 @@ import urllib.parse import urllib.request import xml.etree.ElementTree +from html.parser import HTMLParser from optparse import Values from typing import ( + TYPE_CHECKING, Callable, + Dict, Iterable, List, MutableMapping, NamedTuple, Optional, Sequence, + Tuple, Union, ) @@ -40,6 +43,11 @@ from .sources import CandidatesFromPage, LinkSource, build_source +if TYPE_CHECKING: + from typing import Protocol +else: + Protocol = object + logger = logging.getLogger(__name__) HTMLElement = xml.etree.ElementTree.Element @@ -52,7 +60,7 @@ def _match_vcs_scheme(url: str) -> Optional[str]: Returns the matched VCS scheme, or None if there's no match. """ for scheme in vcs.schemes: - if url.lower().startswith(scheme) and url[len(scheme)] in '+:': + if url.lower().startswith(scheme) and url[len(scheme)] in "+:": return scheme return None @@ -85,7 +93,7 @@ def _ensure_html_response(url: str, session: PipSession) -> None: `_NotHTML` if the content type is not text/html. """ scheme, netloc, path, query, fragment = urllib.parse.urlsplit(url) - if scheme not in {'http', 'https'}: + if scheme not in {"http", "https"}: raise _NotHTTP() resp = session.head(url, allow_redirects=True) @@ -110,7 +118,7 @@ def _get_html_response(url: str, session: PipSession) -> Response: if is_archive_file(Link(url).filename): _ensure_html_response(url, session=session) - logger.debug('Getting page %s', redact_auth_from_url(url)) + logger.debug("Getting page %s", redact_auth_from_url(url)) resp = session.get( url, @@ -145,12 +153,11 @@ def _get_html_response(url: str, session: PipSession) -> Response: def _get_encoding_from_headers(headers: ResponseHeaders) -> Optional[str]: - """Determine if we have any encoding information in our headers. - """ + """Determine if we have any encoding information in our headers.""" if headers and "Content-Type" in headers: content_type, params = cgi.parse_header(headers["Content-Type"]) if "charset" in params: - return params['charset'] + return params["charset"] return None @@ -165,6 +172,8 @@ def _determine_base_url(document: HTMLElement, page_url: str) -> str: :param document: An HTML document representation. The current implementation expects the result of ``html5lib.parse()``. :param page_url: The URL of the HTML document. + + TODO: Remove when `html5lib` is dropped. """ for base in document.findall(".//base"): href = base.get("href") @@ -195,7 +204,7 @@ def _clean_file_url_path(part: str) -> str: # percent-encoded: / -_reserved_chars_re = re.compile('(@|%2F)', re.IGNORECASE) +_reserved_chars_re = re.compile("(@|%2F)", re.IGNORECASE) def _clean_url_path(path: str, is_local_path: bool) -> str: @@ -212,12 +221,12 @@ def _clean_url_path(path: str, is_local_path: bool) -> str: parts = _reserved_chars_re.split(path) cleaned_parts = [] - for to_clean, reserved in pairwise(itertools.chain(parts, [''])): + for to_clean, reserved in pairwise(itertools.chain(parts, [""])): cleaned_parts.append(clean_func(to_clean)) # Normalize %xx escapes (e.g. %2f -> %2F) cleaned_parts.append(reserved.upper()) - return ''.join(cleaned_parts) + return "".join(cleaned_parts) def _clean_link(url: str) -> str: @@ -236,24 +245,20 @@ def _clean_link(url: str) -> str: def _create_link_from_element( - anchor: HTMLElement, + element_attribs: Dict[str, Optional[str]], page_url: str, base_url: str, ) -> Optional[Link]: """ - Convert an anchor element in a simple repository page to a Link. + Convert an anchor element's attributes in a simple repository page to a Link. """ - href = anchor.get("href") + href = element_attribs.get("href") if not href: return None url = _clean_link(urllib.parse.urljoin(base_url, href)) - pyrequire = anchor.get('data-requires-python') - pyrequire = html.unescape(pyrequire) if pyrequire else None - - yanked_reason = anchor.get('data-yanked') - if yanked_reason: - yanked_reason = html.unescape(yanked_reason) + pyrequire = element_attribs.get("data-requires-python") + yanked_reason = element_attribs.get("data-yanked") link = Link( url, @@ -271,16 +276,20 @@ def __init__(self, page: "HTMLPage") -> None: self.page = page def __eq__(self, other: object) -> bool: - return (isinstance(other, type(self)) and - self.page.url == other.page.url) + return isinstance(other, type(self)) and self.page.url == other.page.url def __hash__(self) -> int: return hash(self.page.url) -def with_cached_html_pages( - fn: Callable[["HTMLPage"], Iterable[Link]], -) -> Callable[["HTMLPage"], List[Link]]: +class ParseLinks(Protocol): + def __call__( + self, page: "HTMLPage", use_deprecated_html5lib: bool + ) -> Iterable[Link]: + ... + + +def with_cached_html_pages(fn: ParseLinks) -> ParseLinks: """ Given a function that parses an Iterable[Link] from an HTMLPage, cache the function's result (keyed by CacheablePageContent), unless the HTMLPage @@ -288,22 +297,25 @@ def with_cached_html_pages( """ @functools.lru_cache(maxsize=None) - def wrapper(cacheable_page: CacheablePageContent) -> List[Link]: - return list(fn(cacheable_page.page)) + def wrapper( + cacheable_page: CacheablePageContent, use_deprecated_html5lib: bool + ) -> List[Link]: + return list(fn(cacheable_page.page, use_deprecated_html5lib)) @functools.wraps(fn) - def wrapper_wrapper(page: "HTMLPage") -> List[Link]: + def wrapper_wrapper(page: "HTMLPage", use_deprecated_html5lib: bool) -> List[Link]: if page.cache_link_parsing: - return wrapper(CacheablePageContent(page)) - return list(fn(page)) + return wrapper(CacheablePageContent(page), use_deprecated_html5lib) + return list(fn(page, use_deprecated_html5lib)) return wrapper_wrapper -@with_cached_html_pages -def parse_links(page: "HTMLPage") -> Iterable[Link]: +def _parse_links_html5lib(page: "HTMLPage") -> Iterable[Link]: """ Parse an HTML document, and yield its anchor elements as Link objects. + + TODO: Remove when `html5lib` is dropped. """ document = html5lib.parse( page.content, @@ -314,6 +326,33 @@ def parse_links(page: "HTMLPage") -> Iterable[Link]: url = page.url base_url = _determine_base_url(document, url) for anchor in document.findall(".//a"): + link = _create_link_from_element( + anchor.attrib, + page_url=url, + base_url=base_url, + ) + if link is None: + continue + yield link + + +@with_cached_html_pages +def parse_links(page: "HTMLPage", use_deprecated_html5lib: bool) -> Iterable[Link]: + """ + Parse an HTML document, and yield its anchor elements as Link objects. + """ + + if use_deprecated_html5lib: + yield from _parse_links_html5lib(page) + return + + parser = HTMLLinkParser(page.url) + encoding = page.encoding or "utf-8" + parser.feed(page.content.decode(encoding)) + + url = page.url + base_url = parser.base_url or url + for anchor in parser.anchors: link = _create_link_from_element( anchor, page_url=url, @@ -350,10 +389,38 @@ def __str__(self) -> str: return redact_auth_from_url(self.url) +class HTMLLinkParser(HTMLParser): + """ + HTMLParser that keeps the first base HREF and a list of all anchor + elements' attributes. + """ + + def __init__(self, url: str) -> None: + super().__init__(convert_charrefs=True) + + self.url: str = url + self.base_url: Optional[str] = None + self.anchors: List[Dict[str, Optional[str]]] = [] + + def handle_starttag(self, tag: str, attrs: List[Tuple[str, Optional[str]]]) -> None: + if tag == "base" and self.base_url is None: + href = self.get_href(attrs) + if href is not None: + self.base_url = href + elif tag == "a": + self.anchors.append(dict(attrs)) + + def get_href(self, attrs: List[Tuple[str, Optional[str]]]) -> Optional[str]: + for name, value in attrs: + if name == "href": + return value + return None + + def _handle_get_page_fail( link: Link, reason: Union[str, Exception], - meth: Optional[Callable[..., None]] = None + meth: Optional[Callable[..., None]] = None, ) -> None: if meth is None: meth = logger.debug @@ -366,7 +433,8 @@ def _make_html_page(response: Response, cache_link_parsing: bool = True) -> HTML response.content, encoding=encoding, url=response.url, - cache_link_parsing=cache_link_parsing) + cache_link_parsing=cache_link_parsing, + ) def _get_html_page( @@ -377,37 +445,43 @@ def _get_html_page( "_get_html_page() missing 1 required keyword argument: 'session'" ) - url = link.url.split('#', 1)[0] + url = link.url.split("#", 1)[0] # Check for VCS schemes that do not support lookup as web pages. vcs_scheme = _match_vcs_scheme(url) if vcs_scheme: - logger.warning('Cannot look at %s URL %s because it does not support ' - 'lookup as web pages.', vcs_scheme, link) + logger.warning( + "Cannot look at %s URL %s because it does not support lookup as web pages.", + vcs_scheme, + link, + ) return None # Tack index.html onto file:// URLs that point to directories scheme, _, path, _, _, _ = urllib.parse.urlparse(url) - if (scheme == 'file' and os.path.isdir(urllib.request.url2pathname(path))): + if scheme == "file" and os.path.isdir(urllib.request.url2pathname(path)): # add trailing slash if not present so urljoin doesn't trim # final segment - if not url.endswith('/'): - url += '/' - url = urllib.parse.urljoin(url, 'index.html') - logger.debug(' file: URL is directory, getting %s', url) + if not url.endswith("/"): + url += "/" + url = urllib.parse.urljoin(url, "index.html") + logger.debug(" file: URL is directory, getting %s", url) try: resp = _get_html_response(url, session=session) except _NotHTTP: logger.warning( - 'Skipping page %s because it looks like an archive, and cannot ' - 'be checked by a HTTP HEAD request.', link, + "Skipping page %s because it looks like an archive, and cannot " + "be checked by a HTTP HEAD request.", + link, ) except _NotHTML as exc: logger.warning( - 'Skipping page %s because the %s request got Content-Type: %s.' - 'The only supported Content-Type is text/html', - link, exc.request_desc, exc.content_type, + "Skipping page %s because the %s request got Content-Type: %s." + "The only supported Content-Type is text/html", + link, + exc.request_desc, + exc.content_type, ) except NetworkConnectionError as exc: _handle_get_page_fail(link, exc) @@ -422,8 +496,7 @@ def _get_html_page( except requests.Timeout: _handle_get_page_fail(link, "timed out") else: - return _make_html_page(resp, - cache_link_parsing=link.cache_link_parsing) + return _make_html_page(resp, cache_link_parsing=link.cache_link_parsing) return None @@ -451,9 +524,10 @@ def __init__( @classmethod def create( - cls, session: PipSession, + cls, + session: PipSession, options: Values, - suppress_no_index: bool = False + suppress_no_index: bool = False, ) -> "LinkCollector": """ :param session: The Session to use to make requests. @@ -463,8 +537,8 @@ def create( index_urls = [options.index_url] + options.extra_index_urls if options.no_index and not suppress_no_index: logger.debug( - 'Ignoring indexes: %s', - ','.join(redact_auth_from_url(url) for url in index_urls), + "Ignoring indexes: %s", + ",".join(redact_auth_from_url(url) for url in index_urls), ) index_urls = [] @@ -472,10 +546,12 @@ def create( find_links = options.find_links or [] search_scope = SearchScope.create( - find_links=find_links, index_urls=index_urls, + find_links=find_links, + index_urls=index_urls, ) link_collector = LinkCollector( - session=session, search_scope=search_scope, + session=session, + search_scope=search_scope, ) return link_collector diff --git a/pipenv/patched/notpip/_internal/index/package_finder.py b/pipenv/patched/notpip/_internal/index/package_finder.py index ab48628c87..46f3d48036 100644 --- a/pipenv/patched/notpip/_internal/index/package_finder.py +++ b/pipenv/patched/notpip/_internal/index/package_finder.py @@ -37,17 +37,14 @@ from pipenv.patched.notpip._internal.utils.misc import build_netloc from pipenv.patched.notpip._internal.utils.packaging import check_requires_python from pipenv.patched.notpip._internal.utils.unpacking import SUPPORTED_EXTENSIONS -from pipenv.patched.notpip._internal.utils.urls import url_to_path -__all__ = ['FormatControl', 'BestCandidateResult', 'PackageFinder'] +__all__ = ["FormatControl", "BestCandidateResult", "PackageFinder"] logger = getLogger(__name__) BuildTag = Union[Tuple[()], Tuple[int, str]] -CandidateSortingKey = ( - Tuple[int, int, int, _BaseVersion, Optional[int], BuildTag] -) +CandidateSortingKey = Tuple[int, int, int, _BaseVersion, Optional[int], BuildTag] def _check_link_requires_python( @@ -66,27 +63,32 @@ def _check_link_requires_python( """ try: is_compatible = check_requires_python( - link.requires_python, version_info=version_info, + link.requires_python, + version_info=version_info, ) except specifiers.InvalidSpecifier: logger.debug( "Ignoring invalid Requires-Python (%r) for link: %s", - link.requires_python, link, + link.requires_python, + link, ) else: if not is_compatible: - version = '.'.join(map(str, version_info)) + version = ".".join(map(str, version_info)) if not ignore_requires_python: logger.verbose( - 'Link requires a different Python (%s not in: %r): %s', - version, link.requires_python, link, + "Link requires a different Python (%s not in: %r): %s", + version, + link.requires_python, + link, ) return False logger.debug( - 'Ignoring failed Requires-Python check (%s not in: %r) ' - 'for link: %s', - version, link.requires_python, link, + "Ignoring failed Requires-Python check (%s not in: %r) for link: %s", + version, + link.requires_python, + link, ) return True @@ -98,7 +100,7 @@ class LinkEvaluator: Responsible for evaluating links for a particular project. """ - _py_version_re = re.compile(r'-py([123]\.?[0-9]?)$') + _py_version_re = re.compile(r"-py([123]\.?[0-9]?)$") # Don't include an allow_yanked default value to make sure each call # site considers whether yanked releases are allowed. This also causes @@ -143,9 +145,9 @@ def __init__( self._ignore_requires_python = ignore_requires_python self._formats = formats self._target_python = target_python - self._ignore_compatibility = ignore_compatibility self.project_name = project_name + self._ignore_compatibility = ignore_compatibility def evaluate_link(self, link: Link) -> Tuple[bool, Optional[str]]: """ @@ -158,8 +160,8 @@ def evaluate_link(self, link: Link) -> Tuple[bool, Optional[str]]: """ version = None if link.is_yanked and not self._allow_yanked: - reason = link.yanked_reason or '' - return (False, f'yanked for reason: {reason}') + reason = link.yanked_reason or "" + return (False, f"yanked for reason: {reason}") if link.egg_fragment: egg_info = link.egg_fragment @@ -167,23 +169,21 @@ def evaluate_link(self, link: Link) -> Tuple[bool, Optional[str]]: else: egg_info, ext = link.splitext() if not ext: - return (False, 'not a file') + return (False, "not a file") if ext not in SUPPORTED_EXTENSIONS: - return (False, f'unsupported archive format: {ext}') + return (False, f"unsupported archive format: {ext}") if "binary" not in self._formats and ext == WHEEL_EXTENSION and not self._ignore_compatibility: - reason = 'No binaries permitted for {}'.format( - self.project_name) + reason = "No binaries permitted for {}".format(self.project_name) return (False, reason) if "macosx10" in link.path and ext == '.zip' and not self._ignore_compatibility: - return (False, 'macosx10 one') + return (False, "macosx10 one") if ext == WHEEL_EXTENSION: try: wheel = Wheel(link.filename) except InvalidWheelFilename: - return (False, 'invalid wheel filename') + return (False, "invalid wheel filename") if canonicalize_name(wheel.name) != self._canonical_name: - reason = 'wrong project name (not {})'.format( - self.project_name) + reason = "wrong project name (not {})".format(self.project_name) return (False, reason) supported_tags = self._target_python.get_tags() @@ -194,7 +194,7 @@ def evaluate_link(self, link: Link) -> Tuple[bool, Optional[str]]: reason = ( "none of the wheel's tags ({}) are compatible " "(run pip debug --verbose to show compatible tags)".format( - ', '.join(file_tags) + ", ".join(file_tags) ) ) return (False, reason) @@ -203,26 +203,28 @@ def evaluate_link(self, link: Link) -> Tuple[bool, Optional[str]]: # This should be up by the self.ok_binary check, but see issue 2700. if "source" not in self._formats and ext != WHEEL_EXTENSION: - reason = f'No sources permitted for {self.project_name}' + reason = f"No sources permitted for {self.project_name}" return (False, reason) if not version: version = _extract_version_from_fragment( - egg_info, self._canonical_name, + egg_info, + self._canonical_name, ) if not version: - reason = f'Missing project version for {self.project_name}' + reason = f"Missing project version for {self.project_name}" return (False, reason) match = self._py_version_re.search(version) if match: - version = version[:match.start()] + version = version[: match.start()] py_version = match.group(1) if py_version != self._target_python.py_version: - return (False, 'Python version is incorrect') + return (False, "Python version is incorrect") supports_python = _check_link_requires_python( - link, version_info=self._target_python.py_version_info, + link, + version_info=self._target_python.py_version_info, ignore_requires_python=self._ignore_requires_python, ) if not supports_python and not self._ignore_compatibility: @@ -230,7 +232,7 @@ def evaluate_link(self, link: Link) -> Tuple[bool, Optional[str]]: # _log_skipped_link(). return (False, None) - logger.debug('Found link %s, version: %s', link, version) + logger.debug("Found link %s, version: %s", link, version) return (True, version) @@ -257,8 +259,8 @@ def filter_unallowed_hashes( """ if not hashes: logger.debug( - 'Given no hashes to check %s links for project %r: ' - 'discarding no candidates', + "Given no hashes to check %s links for project %r: " + "discarding no candidates", len(candidates), project_name, ) @@ -288,22 +290,22 @@ def filter_unallowed_hashes( filtered = list(candidates) if len(filtered) == len(candidates): - discard_message = 'discarding no candidates' + discard_message = "discarding no candidates" else: - discard_message = 'discarding {} non-matches:\n {}'.format( + discard_message = "discarding {} non-matches:\n {}".format( len(non_matches), - '\n '.join(str(candidate.link) for candidate in non_matches) + "\n ".join(str(candidate.link) for candidate in non_matches), ) logger.debug( - 'Checked %s links for project %r against %s hashes ' - '(%s matches, %s no digest): %s', + "Checked %s links for project %r against %s hashes " + "(%s matches, %s no digest): %s", len(candidates), project_name, hashes.digest_count, match_count, len(matches_or_no_digest) - match_count, - discard_message + discard_message, ) return filtered @@ -360,13 +362,11 @@ def __init__( self.best_candidate = best_candidate def iter_all(self) -> Iterable[InstallationCandidate]: - """Iterate through all candidates. - """ + """Iterate through all candidates.""" return iter(self._candidates) def iter_applicable(self) -> Iterable[InstallationCandidate]: - """Iterate through the applicable candidates. - """ + """Iterate through the applicable candidates.""" return iter(self._applicable_candidates) @@ -450,7 +450,8 @@ def get_applicable_candidates( allow_prereleases = self._allow_all_prereleases or None specifier = self._specifier versions = { - str(v) for v in specifier.filter( + str(v) + for v in specifier.filter( # We turn the version object into a str here because otherwise # when we're debundled but setuptools isn't, Python will see # packaging.version.Version and @@ -464,9 +465,7 @@ def get_applicable_candidates( } # Again, converting version to str to deal with debundling. - applicable_candidates = [ - c for c in candidates if str(c.version) in versions - ] + applicable_candidates = [c for c in candidates if str(c.version) in versions] filtered_applicable_candidates = filter_unallowed_hashes( candidates=applicable_candidates, @@ -518,9 +517,11 @@ def _sort_key( # can raise InvalidWheelFilename wheel = Wheel(link.filename) try: - pri = -(wheel.find_most_preferred_tag( - valid_tags, self._wheel_tag_preferences - )) + pri = -( + wheel.find_most_preferred_tag( + valid_tags, self._wheel_tag_preferences + ) + ) except ValueError: if not ignore_compatibility: raise UnsupportedWheel( @@ -528,10 +529,11 @@ def _sort_key( "can't be sorted.".format(wheel.filename) ) pri = -(support_num) + if self._prefer_binary: binary_preference = 1 if wheel.build_tag is not None: - match = re.match(r'^(\d+)(.*)$', wheel.build_tag) + match = re.match(r"^(\d+)(.*)$", wheel.build_tag) build_tag_groups = match.groups() build_tag = (int(build_tag_groups[0]), build_tag_groups[1]) else: # sdist @@ -539,8 +541,12 @@ def _sort_key( has_allowed_hash = int(link.is_hash_allowed(self._hashes)) yank_value = -1 * int(link.is_yanked) # -1 for yanked. return ( - has_allowed_hash, yank_value, binary_preference, candidate.version, - pri, build_tag, + has_allowed_hash, + yank_value, + binary_preference, + candidate.version, + pri, + build_tag, ) def sort_best_candidate( @@ -586,6 +592,7 @@ def __init__( link_collector: LinkCollector, target_python: TargetPython, allow_yanked: bool, + use_deprecated_html5lib: bool, format_control: Optional[FormatControl] = None, candidate_prefs: Optional[CandidatePreferences] = None, ignore_requires_python: Optional[bool] = None, @@ -612,6 +619,7 @@ def __init__( self._link_collector = link_collector self._target_python = target_python self._ignore_compatibility = ignore_compatibility + self._use_deprecated_html5lib = use_deprecated_html5lib self.format_control = format_control @@ -628,6 +636,8 @@ def create( link_collector: LinkCollector, selection_prefs: SelectionPreferences, target_python: Optional[TargetPython] = None, + *, + use_deprecated_html5lib: bool, ) -> "PackageFinder": """Create a PackageFinder. @@ -652,6 +662,7 @@ def create( allow_yanked=selection_prefs.allow_yanked, format_control=selection_prefs.format_control, ignore_requires_python=selection_prefs.ignore_requires_python, + use_deprecated_html5lib=use_deprecated_html5lib, ) @property @@ -727,7 +738,7 @@ def _log_skipped_link(self, link: Link, reason: str) -> None: if link not in self._logged_links: # Put the link at the end so the reason is more visible and because # the link string is usually very long. - logger.debug('Skipping link: %s: %s', reason, link) + logger.debug("Skipping link: %s: %s", reason, link) self._logged_links.add(link) def get_install_candidate( @@ -767,13 +778,14 @@ def process_project_url( self, project_url: Link, link_evaluator: LinkEvaluator ) -> List[InstallationCandidate]: logger.debug( - 'Fetching project page and analyzing links: %s', project_url, + "Fetching project page and analyzing links: %s", + project_url, ) html_page = self._link_collector.fetch_page(project_url) if html_page is None: return [] - page_links = list(parse_links(html_page)) + page_links = list(parse_links(html_page, self._use_deprecated_html5lib)) with indent_log(): package_links = self.evaluate_links( @@ -823,7 +835,14 @@ def find_all_candidates(self, project_name: str) -> List[InstallationCandidate]: ) if logger.isEnabledFor(logging.DEBUG) and file_candidates: - paths = [url_to_path(c.link.url) for c in file_candidates] + paths = [] + for candidate in file_candidates: + assert candidate.link.url # we need to have a URL + try: + paths.append(candidate.link.file_path) + except Exception: + paths.append(candidate.link.url) # it's not a local file + logger.debug("Local files found: %s", ", ".join(paths)) # This is an intentional priority ordering @@ -835,8 +854,7 @@ def make_candidate_evaluator( specifier: Optional[specifiers.BaseSpecifier] = None, hashes: Optional[Hashes] = None, ) -> CandidateEvaluator: - """Create a CandidateEvaluator object to use. - """ + """Create a CandidateEvaluator object to use.""" candidate_prefs = self._candidate_prefs return CandidateEvaluator.create( project_name=project_name, @@ -881,54 +899,60 @@ def find_requirement( """ hashes = req.hashes(trust_internet=False) best_candidate_result = self.find_best_candidate( - req.name, specifier=req.specifier, hashes=hashes, + req.name, + specifier=req.specifier, + hashes=hashes, ) best_candidate = best_candidate_result.best_candidate installed_version: Optional[_BaseVersion] = None if req.satisfied_by is not None: - installed_version = parse_version(req.satisfied_by.version) + installed_version = req.satisfied_by.version def _format_versions(cand_iter: Iterable[InstallationCandidate]) -> str: # This repeated parse_version and str() conversion is needed to # handle different vendoring sources from pip and pkg_resources. # If we stop using the pkg_resources provided specifier and start # using our own, we can drop the cast to str(). - return ", ".join(sorted( - {str(c.version) for c in cand_iter}, - key=parse_version, - )) or "none" + return ( + ", ".join( + sorted( + {str(c.version) for c in cand_iter}, + key=parse_version, + ) + ) + or "none" + ) if installed_version is None and best_candidate is None: logger.critical( - 'Could not find a version that satisfies the requirement %s ' - '(from versions: %s)', + "Could not find a version that satisfies the requirement %s " + "(from versions: %s)", req, _format_versions(best_candidate_result.iter_all()), ) raise DistributionNotFound( - 'No matching distribution found for {}'.format( - req) + "No matching distribution found for {}".format(req) ) best_installed = False if installed_version and ( - best_candidate is None or - best_candidate.version <= installed_version): + best_candidate is None or best_candidate.version <= installed_version + ): best_installed = True if not upgrade and installed_version is not None: if best_installed: logger.debug( - 'Existing installed version (%s) is most up-to-date and ' - 'satisfies requirement', + "Existing installed version (%s) is most up-to-date and " + "satisfies requirement", installed_version, ) else: logger.debug( - 'Existing installed version (%s) satisfies requirement ' - '(most up-to-date version is %s)', + "Existing installed version (%s) satisfies requirement " + "(most up-to-date version is %s)", installed_version, best_candidate.version, ) @@ -937,15 +961,14 @@ def _format_versions(cand_iter: Iterable[InstallationCandidate]) -> str: if best_installed: # We have an existing version, and its the best version logger.debug( - 'Installed version (%s) is most up-to-date (past versions: ' - '%s)', + "Installed version (%s) is most up-to-date (past versions: %s)", installed_version, _format_versions(best_candidate_result.iter_applicable()), ) raise BestVersionAlreadyInstalled logger.debug( - 'Using version %s (newest of versions: %s)', + "Using version %s (newest of versions: %s)", best_candidate.version, _format_versions(best_candidate_result.iter_applicable()), ) diff --git a/pipenv/patched/notpip/_internal/locations/__init__.py b/pipenv/patched/notpip/_internal/locations/__init__.py index 83c6920412..093c325026 100644 --- a/pipenv/patched/notpip/_internal/locations/__init__.py +++ b/pipenv/patched/notpip/_internal/locations/__init__.py @@ -4,11 +4,12 @@ import pathlib import sys import sysconfig -from typing import Dict, Iterator, List, Optional, Tuple +from typing import Any, Dict, Iterator, List, Optional, Tuple from pipenv.patched.notpip._internal.models.scheme import SCHEME_KEYS, Scheme from pipenv.patched.notpip._internal.utils.compat import WINDOWS from pipenv.patched.notpip._internal.utils.deprecation import deprecated +from pipenv.patched.notpip._internal.utils.virtualenv import running_under_virtualenv from . import _distutils, _sysconfig from .base import ( @@ -37,14 +38,54 @@ logger = logging.getLogger(__name__) -if os.environ.get("_PIP_LOCATIONS_NO_WARN_ON_MISMATCH"): - _MISMATCH_LEVEL = logging.DEBUG -else: + +_PLATLIBDIR: str = getattr(sys, "platlibdir", "lib") + +_USE_SYSCONFIG_DEFAULT = sys.version_info >= (3, 10) + + +def _should_use_sysconfig() -> bool: + """This function determines the value of _USE_SYSCONFIG. + + By default, pip uses sysconfig on Python 3.10+. + But Python distributors can override this decision by setting: + sysconfig._PIP_USE_SYSCONFIG = True / False + Rationale in https://github.com/pypa/pip/issues/10647 + + This is a function for testability, but should be constant during any one + run. + """ + return bool(getattr(sysconfig, "_PIP_USE_SYSCONFIG", _USE_SYSCONFIG_DEFAULT)) + + +_USE_SYSCONFIG = _should_use_sysconfig() + +# Be noisy about incompatibilities if this platforms "should" be using +# sysconfig, but is explicitly opting out and using distutils instead. +if _USE_SYSCONFIG_DEFAULT and not _USE_SYSCONFIG: _MISMATCH_LEVEL = logging.WARNING +else: + _MISMATCH_LEVEL = logging.DEBUG + + +def _looks_like_bpo_44860() -> bool: + """The resolution to bpo-44860 will change this incorrect platlib. + + See . + """ + from distutils.command.install import INSTALL_SCHEMES # type: ignore + + try: + unix_user_platlib = INSTALL_SCHEMES["unix_user"]["platlib"] + except KeyError: + return False + return unix_user_platlib == "$usersite" def _looks_like_red_hat_patched_platlib_purelib(scheme: Dict[str, str]) -> bool: platlib = scheme["platlib"] + if "/$platlibdir/" in platlib: + platlib = platlib.replace("/$platlibdir/", f"/{_PLATLIBDIR}/") if "/lib64/" not in platlib: return False unpatched = platlib.replace("/lib64/", "/lib/") @@ -52,7 +93,7 @@ def _looks_like_red_hat_patched_platlib_purelib(scheme: Dict[str, str]) -> bool: @functools.lru_cache(maxsize=None) -def _looks_like_red_hat_patched() -> bool: +def _looks_like_red_hat_lib() -> bool: """Red Hat patches platlib in unix_prefix and unix_home, but not purelib. This is the only way I can see to tell a Red Hat-patched Python. @@ -67,13 +108,67 @@ def _looks_like_red_hat_patched() -> bool: @functools.lru_cache(maxsize=None) -def _looks_like_debian_patched() -> bool: +def _looks_like_debian_scheme() -> bool: """Debian adds two additional schemes.""" from distutils.command.install import INSTALL_SCHEMES # type: ignore return "deb_system" in INSTALL_SCHEMES and "unix_local" in INSTALL_SCHEMES +@functools.lru_cache(maxsize=None) +def _looks_like_red_hat_scheme() -> bool: + """Red Hat patches ``sys.prefix`` and ``sys.exec_prefix``. + + Red Hat's ``00251-change-user-install-location.patch`` changes the install + command's ``prefix`` and ``exec_prefix`` to append ``"/local"``. This is + (fortunately?) done quite unconditionally, so we create a default command + object without any configuration to detect this. + """ + from distutils.command.install import install + from distutils.dist import Distribution + + cmd: Any = install(Distribution()) + cmd.finalize_options() + return ( + cmd.exec_prefix == f"{os.path.normpath(sys.exec_prefix)}/local" + and cmd.prefix == f"{os.path.normpath(sys.prefix)}/local" + ) + + +@functools.lru_cache(maxsize=None) +def _looks_like_slackware_scheme() -> bool: + """Slackware patches sysconfig but fails to patch distutils and site. + + Slackware changes sysconfig's user scheme to use ``"lib64"`` for the lib + path, but does not do the same to the site module. + """ + if user_site is None: # User-site not available. + return False + try: + paths = sysconfig.get_paths(scheme="posix_user", expand=False) + except KeyError: # User-site not available. + return False + return "/lib64/" in paths["purelib"] and "/lib64/" not in user_site + + +@functools.lru_cache(maxsize=None) +def _looks_like_msys2_mingw_scheme() -> bool: + """MSYS2 patches distutils and sysconfig to use a UNIX-like scheme. + + However, MSYS2 incorrectly patches sysconfig ``nt`` scheme. The fix is + likely going to be included in their 3.10 release, so we ignore the warning. + See msys2/MINGW-packages#9319. + + MSYS2 MINGW's patch uses lowercase ``"lib"`` instead of the usual uppercase, + and is missing the final ``"site-packages"``. + """ + paths = sysconfig.get_paths("nt", expand=False) + return all( + "Lib" not in p and "lib" in p and not p.endswith("site-packages") + for p in (paths[key] for key in ("platlib", "purelib")) + ) + + def _fix_abiflags(parts: Tuple[str]) -> Iterator[str]: ldversion = sysconfig.get_config_var("LDVERSION") abiflags: str = getattr(sys, "abiflags", None) @@ -90,15 +185,6 @@ def _fix_abiflags(parts: Tuple[str]) -> Iterator[str]: yield part -def _default_base(*, user: bool) -> str: - if user: - base = sysconfig.get_config_var("userbase") - else: - base = sysconfig.get_config_var("base") - assert base is not None - return base - - @functools.lru_cache(maxsize=None) def _warn_mismatched(old: pathlib.Path, new: pathlib.Path, *, key: str) -> None: issue_url = "https://github.com/pypa/pip/issues/10151" @@ -144,7 +230,7 @@ def get_scheme( isolated: bool = False, prefix: Optional[str] = None, ) -> Scheme: - old = _distutils.get_scheme( + new = _sysconfig.get_scheme( dist_name, user=user, home=home, @@ -152,7 +238,10 @@ def get_scheme( isolated=isolated, prefix=prefix, ) - new = _sysconfig.get_scheme( + if _USE_SYSCONFIG: + return new + + old = _distutils.get_scheme( dist_name, user=user, home=home, @@ -161,11 +250,9 @@ def get_scheme( prefix=prefix, ) - base = prefix or home or _default_base(user=user) warning_contexts = [] for k in SCHEME_KEYS: - # Extra join because distutils can return relative paths. - old_v = pathlib.Path(base, getattr(old, k)) + old_v = pathlib.Path(getattr(old, k)) new_v = pathlib.Path(getattr(new, k)) if old_v == new_v: @@ -201,19 +288,45 @@ def get_scheme( # On Red Hat and derived Linux distributions, distutils is patched to # use "lib64" instead of "lib" for platlib. - if k == "platlib" and _looks_like_red_hat_patched(): + if k == "platlib" and _looks_like_red_hat_lib(): + continue + + # On Python 3.9+, sysconfig's posix_user scheme sets platlib against + # sys.platlibdir, but distutils's unix_user incorrectly coninutes + # using the same $usersite for both platlib and purelib. This creates a + # mismatch when sys.platlibdir is not "lib". + skip_bpo_44860 = ( + user + and k == "platlib" + and not WINDOWS + and sys.version_info >= (3, 9) + and _PLATLIBDIR != "lib" + and _looks_like_bpo_44860() + ) + if skip_bpo_44860: + continue + + # Slackware incorrectly patches posix_user to use lib64 instead of lib, + # but not usersite to match the location. + skip_slackware_user_scheme = ( + user + and k in ("platlib", "purelib") + and not WINDOWS + and _looks_like_slackware_scheme() + ) + if skip_slackware_user_scheme: continue # Both Debian and Red Hat patch Python to place the system site under # /usr/local instead of /usr. Debian also places lib in dist-packages # instead of site-packages, but the /usr/local check should cover it. skip_linux_system_special_case = ( - not (user or home or prefix) + not (user or home or prefix or running_under_virtualenv()) and old_v.parts[1:3] == ("usr", "local") and len(new_v.parts) > 1 and new_v.parts[1] == "usr" and (len(new_v.parts) < 3 or new_v.parts[2] != "local") - and (_looks_like_red_hat_patched() or _looks_like_debian_patched()) + and (_looks_like_red_hat_scheme() or _looks_like_debian_scheme()) ) if skip_linux_system_special_case: continue @@ -229,6 +342,26 @@ def get_scheme( if skip_sysconfig_abiflag_bug: continue + # MSYS2 MINGW's sysconfig patch does not include the "site-packages" + # part of the path. This is incorrect and will be fixed in MSYS. + skip_msys2_mingw_bug = ( + WINDOWS and k in ("platlib", "purelib") and _looks_like_msys2_mingw_scheme() + ) + if skip_msys2_mingw_bug: + continue + + # CPython's POSIX install script invokes pip (via ensurepip) against the + # interpreter located in the source tree, not the install site. This + # triggers special logic in sysconfig that's not present in distutils. + # https://github.com/python/cpython/blob/8c21941ddaf/Lib/sysconfig.py#L178-L194 + skip_cpython_build = ( + sysconfig.is_python_build(check_home=True) + and not WINDOWS + and k in ("headers", "include", "platinclude") + ) + if skip_cpython_build: + continue + warning_contexts.append((old_v, new_v, f"scheme.{k}")) if not warning_contexts: @@ -248,10 +381,12 @@ def get_scheme( ) if any(default_old[k] != getattr(old, k) for k in SCHEME_KEYS): deprecated( - "Configuring installation scheme with distutils config files " - "is deprecated and will no longer work in the near future. If you " - "are using a Homebrew or Linuxbrew Python, please see discussion " - "at https://github.com/Homebrew/homebrew-core/issues/76621", + reason=( + "Configuring installation scheme with distutils config files " + "is deprecated and will no longer work in the near future. If you " + "are using a Homebrew or Linuxbrew Python, please see discussion " + "at https://github.com/Homebrew/homebrew-core/issues/76621" + ), replacement=None, gone_in=None, ) @@ -266,8 +401,11 @@ def get_scheme( def get_bin_prefix() -> str: - old = _distutils.get_bin_prefix() new = _sysconfig.get_bin_prefix() + if _USE_SYSCONFIG: + return new + + old = _distutils.get_bin_prefix() if _warn_if_mismatch(pathlib.Path(old), pathlib.Path(new), key="bin_prefix"): _log_context() return old @@ -277,10 +415,32 @@ def get_bin_user() -> str: return _sysconfig.get_scheme("", user=True).scripts +def _looks_like_deb_system_dist_packages(value: str) -> bool: + """Check if the value is Debian's APT-controlled dist-packages. + + Debian's ``distutils.sysconfig.get_python_lib()`` implementation returns the + default package path controlled by APT, but does not patch ``sysconfig`` to + do the same. This is similar to the bug worked around in ``get_scheme()``, + but here the default is ``deb_system`` instead of ``unix_local``. Ultimately + we can't do anything about this Debian bug, and this detection allows us to + skip the warning when needed. + """ + if not _looks_like_debian_scheme(): + return False + if value == "/usr/lib/python3/dist-packages": + return True + return False + + def get_purelib() -> str: """Return the default pure-Python lib location.""" - old = _distutils.get_purelib() new = _sysconfig.get_purelib() + if _USE_SYSCONFIG: + return new + + old = _distutils.get_purelib() + if _looks_like_deb_system_dist_packages(old): + return old if _warn_if_mismatch(pathlib.Path(old), pathlib.Path(new), key="purelib"): _log_context() return old @@ -288,17 +448,59 @@ def get_purelib() -> str: def get_platlib() -> str: """Return the default platform-shared lib location.""" - old = _distutils.get_platlib() new = _sysconfig.get_platlib() + if _USE_SYSCONFIG: + return new + + old = _distutils.get_platlib() + if _looks_like_deb_system_dist_packages(old): + return old if _warn_if_mismatch(pathlib.Path(old), pathlib.Path(new), key="platlib"): _log_context() return old +def _deduplicated(v1: str, v2: str) -> List[str]: + """Deduplicate values from a list.""" + if v1 == v2: + return [v1] + return [v1, v2] + + +def _looks_like_apple_library(path: str) -> bool: + """Apple patches sysconfig to *always* look under */Library/Python*.""" + if sys.platform[:6] != "darwin": + return False + return path == f"/Library/Python/{get_major_minor_version()}/site-packages" + + def get_prefixed_libs(prefix: str) -> List[str]: """Return the lib locations under ``prefix``.""" - old_pure, old_plat = _distutils.get_prefixed_libs(prefix) new_pure, new_plat = _sysconfig.get_prefixed_libs(prefix) + if _USE_SYSCONFIG: + return _deduplicated(new_pure, new_plat) + + old_pure, old_plat = _distutils.get_prefixed_libs(prefix) + old_lib_paths = _deduplicated(old_pure, old_plat) + + # Apple's Python (shipped with Xcode and Command Line Tools) hard-code + # platlib and purelib to '/Library/Python/X.Y/site-packages'. This will + # cause serious build isolation bugs when Apple starts shipping 3.10 because + # pip will install build backends to the wrong location. This tells users + # who is at fault so Apple may notice it and fix the issue in time. + if all(_looks_like_apple_library(p) for p in old_lib_paths): + deprecated( + reason=( + "Python distributed by Apple's Command Line Tools incorrectly " + "patches sysconfig to always point to '/Library/Python'. This " + "will cause build isolation to operate incorrectly on Python " + "3.10 or later. Please help report this to Apple so they can " + "fix this. https://developer.apple.com/bug-reporting/" + ), + replacement=None, + gone_in=None, + ) + return old_lib_paths warned = [ _warn_if_mismatch( @@ -315,6 +517,4 @@ def get_prefixed_libs(prefix: str) -> List[str]: if any(warned): _log_context(prefix=prefix) - if old_pure == old_plat: - return [old_pure] - return [old_pure, old_plat] + return old_lib_paths diff --git a/pipenv/patched/notpip/_internal/locations/_distutils.py b/pipenv/patched/notpip/_internal/locations/_distutils.py index 6be0409462..f5481ea0c0 100644 --- a/pipenv/patched/notpip/_internal/locations/_distutils.py +++ b/pipenv/patched/notpip/_internal/locations/_distutils.py @@ -81,8 +81,14 @@ def distutils_scheme( scheme.update(dict(purelib=i.install_lib, platlib=i.install_lib)) if running_under_virtualenv(): + if home: + prefix = home + elif user: + prefix = i.install_userbase # type: ignore + else: + prefix = i.prefix scheme["headers"] = os.path.join( - i.prefix, + prefix, "include", "site", f"python{get_major_minor_version()}", @@ -91,10 +97,7 @@ def distutils_scheme( if root is not None: path_no_drive = os.path.splitdrive(os.path.abspath(scheme["headers"]))[1] - scheme["headers"] = os.path.join( - root, - path_no_drive[1:], - ) + scheme["headers"] = os.path.join(root, path_no_drive[1:]) return scheme @@ -135,17 +138,20 @@ def get_scheme( def get_bin_prefix() -> str: + # XXX: In old virtualenv versions, sys.prefix can contain '..' components, + # so we need to call normpath to eliminate them. + prefix = os.path.normpath(sys.prefix) if WINDOWS: - bin_py = os.path.join(sys.prefix, "Scripts") + bin_py = os.path.join(prefix, "Scripts") # buildout uses 'bin' on Windows too? if not os.path.exists(bin_py): - bin_py = os.path.join(sys.prefix, "bin") + bin_py = os.path.join(prefix, "bin") return bin_py # Forcing to use /usr/local/bin for standard macOS framework installs # Also log to ~/Library/Logs/ for use with the Console.app log viewer - if sys.platform[:6] == "darwin" and sys.prefix[:16] == "/System/Library/": + if sys.platform[:6] == "darwin" and prefix[:16] == "/System/Library/": return "/usr/local/bin" - return os.path.join(sys.prefix, "bin") + return os.path.join(prefix, "bin") def get_purelib() -> str: diff --git a/pipenv/patched/notpip/_internal/locations/_sysconfig.py b/pipenv/patched/notpip/_internal/locations/_sysconfig.py index 6d82ac3b3d..1becc169b5 100644 --- a/pipenv/patched/notpip/_internal/locations/_sysconfig.py +++ b/pipenv/patched/notpip/_internal/locations/_sysconfig.py @@ -15,8 +15,8 @@ # Notes on _infer_* functions. -# Unfortunately ``_get_default_scheme()`` is private, so there's no way to -# ask things like "what is the '_prefix' scheme on this platform". These +# Unfortunately ``get_default_scheme()`` didn't exist before 3.10, so there's no +# way to ask things like "what is the '_prefix' scheme on this platform". These # functions try to answer that with some heuristics while accounting for ad-hoc # platforms not covered by CPython's default sysconfig implementation. If the # ad-hoc implementation does not fully implement sysconfig, we'll fall back to @@ -24,7 +24,33 @@ _AVAILABLE_SCHEMES = set(sysconfig.get_scheme_names()) -_HAS_PREFERRED_SCHEME_API = sys.version_info >= (3, 10) +_PREFERRED_SCHEME_API = getattr(sysconfig, "get_preferred_scheme", None) + + +def _should_use_osx_framework_prefix() -> bool: + """Check for Apple's ``osx_framework_library`` scheme. + + Python distributed by Apple's Command Line Tools has this special scheme + that's used when: + + * This is a framework build. + * We are installing into the system prefix. + + This does not account for ``pip install --prefix`` (also means we're not + installing to the system prefix), which should use ``posix_prefix``, but + logic here means ``_infer_prefix()`` outputs ``osx_framework_library``. But + since ``prefix`` is not available for ``sysconfig.get_default_scheme()``, + which is the stdlib replacement for ``_infer_prefix()``, presumably Apple + wouldn't be able to magically switch between ``osx_framework_library`` and + ``posix_prefix``. ``_infer_prefix()`` returning ``osx_framework_library`` + means its behavior is consistent whether we use the stdlib implementation + or our own, and we deal with this special case in ``get_scheme()`` instead. + """ + return ( + "osx_framework_library" in _AVAILABLE_SCHEMES + and not running_under_virtualenv() + and is_osx_framework() + ) def _infer_prefix() -> str: @@ -41,10 +67,9 @@ def _infer_prefix() -> str: If none of the above works, fall back to ``posix_prefix``. """ - if _HAS_PREFERRED_SCHEME_API: - return sysconfig.get_preferred_scheme("prefix") # type: ignore - os_framework_global = is_osx_framework() and not running_under_virtualenv() - if os_framework_global and "osx_framework_library" in _AVAILABLE_SCHEMES: + if _PREFERRED_SCHEME_API: + return _PREFERRED_SCHEME_API("prefix") + if _should_use_osx_framework_prefix(): return "osx_framework_library" implementation_suffixed = f"{sys.implementation.name}_{os.name}" if implementation_suffixed in _AVAILABLE_SCHEMES: @@ -61,8 +86,8 @@ def _infer_prefix() -> str: def _infer_user() -> str: """Try to find a user scheme for the current platform.""" - if _HAS_PREFERRED_SCHEME_API: - return sysconfig.get_preferred_scheme("user") # type: ignore + if _PREFERRED_SCHEME_API: + return _PREFERRED_SCHEME_API("user") if is_osx_framework() and not running_under_virtualenv(): suffixed = "osx_framework_user" else: @@ -76,8 +101,8 @@ def _infer_user() -> str: def _infer_home() -> str: """Try to find a home for the current platform.""" - if _HAS_PREFERRED_SCHEME_API: - return sysconfig.get_preferred_scheme("home") # type: ignore + if _PREFERRED_SCHEME_API: + return _PREFERRED_SCHEME_API("home") suffixed = f"{os.name}_home" if suffixed in _AVAILABLE_SCHEMES: return suffixed @@ -130,6 +155,12 @@ def get_scheme( else: scheme_name = _infer_prefix() + # Special case: When installing into a custom prefix, use posix_prefix + # instead of osx_framework_library. See _should_use_osx_framework_prefix() + # docstring for details. + if prefix is not None and scheme_name == "osx_framework_library": + scheme_name = "posix_prefix" + if home is not None: variables = {k: home for k in _HOME_KEYS} elif prefix is not None: diff --git a/pipenv/patched/notpip/_internal/main.py b/pipenv/patched/notpip/_internal/main.py index d57e93c9de..3390484ce4 100644 --- a/pipenv/patched/notpip/_internal/main.py +++ b/pipenv/patched/notpip/_internal/main.py @@ -1,8 +1,7 @@ from typing import List, Optional -def main(args=None): - # type: (Optional[List[str]]) -> int +def main(args: Optional[List[str]] = None) -> int: """This is preserved for old console scripts that may still be referencing it. diff --git a/pipenv/patched/notpip/_internal/metadata/__init__.py b/pipenv/patched/notpip/_internal/metadata/__init__.py index e3429d2551..cc037c14f0 100644 --- a/pipenv/patched/notpip/_internal/metadata/__init__.py +++ b/pipenv/patched/notpip/_internal/metadata/__init__.py @@ -1,10 +1,13 @@ from typing import List, Optional -from .base import BaseDistribution, BaseEnvironment +from .base import BaseDistribution, BaseEnvironment, FilesystemWheel, MemoryWheel, Wheel __all__ = [ "BaseDistribution", "BaseEnvironment", + "FilesystemWheel", + "MemoryWheel", + "Wheel", "get_default_environment", "get_environment", "get_wheel_distribution", @@ -35,7 +38,18 @@ def get_environment(paths: Optional[List[str]]) -> BaseEnvironment: return Environment.from_paths(paths) -def get_wheel_distribution(wheel_path: str, canonical_name: str) -> BaseDistribution: +def get_directory_distribution(directory: str) -> BaseDistribution: + """Get the distribution metadata representation in the specified directory. + + This returns a Distribution instance from the chosen backend based on + the given on-disk ``.dist-info`` directory. + """ + from .pkg_resources import Distribution + + return Distribution.from_directory(directory) + + +def get_wheel_distribution(wheel: Wheel, canonical_name: str) -> BaseDistribution: """Get the representation of the specified wheel's distribution metadata. This returns a Distribution instance from the chosen backend based on @@ -45,4 +59,4 @@ def get_wheel_distribution(wheel_path: str, canonical_name: str) -> BaseDistribu """ from .pkg_resources import Distribution - return Distribution.from_wheel(wheel_path, canonical_name) + return Distribution.from_wheel(wheel, canonical_name) diff --git a/pipenv/patched/notpip/_internal/metadata/base.py b/pipenv/patched/notpip/_internal/metadata/base.py index af3f811c13..1ff7c95113 100644 --- a/pipenv/patched/notpip/_internal/metadata/base.py +++ b/pipenv/patched/notpip/_internal/metadata/base.py @@ -1,8 +1,12 @@ +import csv import email.message import json import logging +import pathlib import re +import zipfile from typing import ( + IO, TYPE_CHECKING, Collection, Container, @@ -10,28 +14,39 @@ Iterator, List, Optional, + Tuple, Union, ) from pipenv.patched.notpip._vendor.packaging.requirements import Requirement +from pipenv.patched.notpip._vendor.packaging.specifiers import InvalidSpecifier, SpecifierSet +from pipenv.patched.notpip._vendor.packaging.utils import NormalizedName from pipenv.patched.notpip._vendor.packaging.version import LegacyVersion, Version +from pipenv.patched.notpip._internal.exceptions import NoneMetadataError +from pipenv.patched.notpip._internal.locations import site_packages, user_site from pipenv.patched.notpip._internal.models.direct_url import ( DIRECT_URL_METADATA_NAME, DirectUrl, DirectUrlValidationError, ) -from pipenv.patched.notpip._internal.utils.misc import stdlib_pkgs # TODO: Move definition here. +from pipenv.patched.notpip._internal.utils.compat import stdlib_pkgs # TODO: Move definition here. +from pipenv.patched.notpip._internal.utils.egg_link import ( + egg_link_path_from_location, + egg_link_path_from_sys_path, +) +from pipenv.patched.notpip._internal.utils.misc import is_local, normalize_path +from pipenv.patched.notpip._internal.utils.urls import url_to_path if TYPE_CHECKING: from typing import Protocol - - from pipenv.patched.notpip._vendor.packaging.utils import NormalizedName else: Protocol = object DistributionVersion = Union[LegacyVersion, Version] +InfoPath = Union[str, pathlib.PurePosixPath] + logger = logging.getLogger(__name__) @@ -49,7 +64,43 @@ def group(self) -> str: raise NotImplementedError() +def _convert_installed_files_path( + entry: Tuple[str, ...], + info: Tuple[str, ...], +) -> str: + """Convert a legacy installed-files.txt path into modern RECORD path. + + The legacy format stores paths relative to the info directory, while the + modern format stores paths relative to the package root, e.g. the + site-packages directory. + + :param entry: Path parts of the installed-files.txt entry. + :param info: Path parts of the egg-info directory relative to package root. + :returns: The converted entry. + + For best compatibility with symlinks, this does not use ``abspath()`` or + ``Path.resolve()``, but tries to work with path parts: + + 1. While ``entry`` starts with ``..``, remove the equal amounts of parts + from ``info``; if ``info`` is empty, start appending ``..`` instead. + 2. Join the two directly. + """ + while entry and entry[0] == "..": + if not info or info[-1] == "..": + info += ("..",) + else: + info = info[:-1] + entry = entry[1:] + return str(pathlib.Path(*info, *entry)) + + class BaseDistribution(Protocol): + def __repr__(self) -> str: + return f"{self.raw_name} {self.version} ({self.location})" + + def __str__(self) -> str: + return f"{self.raw_name} {self.version}" + @property def location(self) -> Optional[str]: """Where the distribution is loaded from. @@ -65,8 +116,50 @@ def location(self) -> Optional[str]: raise NotImplementedError() @property - def info_directory(self) -> Optional[str]: - """Location of the .[egg|dist]-info directory. + def editable_project_location(self) -> Optional[str]: + """The project location for editable distributions. + + This is the directory where pyproject.toml or setup.py is located. + None if the distribution is not installed in editable mode. + """ + # TODO: this property is relatively costly to compute, memoize it ? + direct_url = self.direct_url + if direct_url: + if direct_url.is_local_editable(): + return url_to_path(direct_url.url) + else: + # Search for an .egg-link file by walking sys.path, as it was + # done before by dist_is_editable(). + egg_link_path = egg_link_path_from_sys_path(self.raw_name) + if egg_link_path: + # TODO: get project location from second line of egg_link file + # (https://github.com/pypa/pip/issues/10243) + return self.location + return None + + @property + def installed_location(self) -> Optional[str]: + """The distribution's "installed" location. + + This should generally be a ``site-packages`` directory. This is + usually ``dist.location``, except for legacy develop-installed packages, + where ``dist.location`` is the source code location, and this is where + the ``.egg-link`` file is. + + The returned location is normalized (in particular, with symlinks removed). + """ + egg_link = egg_link_path_from_location(self.raw_name) + if egg_link: + location = egg_link + elif self.location: + location = self.location + else: + return None + return normalize_path(location) + + @property + def info_location(self) -> Optional[str]: + """Location of the .[egg|dist]-info directory or file. Similarly to ``location``, a string value is not necessarily a filesystem path. ``None`` means the distribution is created in-memory. @@ -81,13 +174,80 @@ def info_directory(self) -> Optional[str]: raise NotImplementedError() @property - def canonical_name(self) -> "NormalizedName": + def installed_by_distutils(self) -> bool: + """Whether this distribution is installed with legacy distutils format. + + A distribution installed with "raw" distutils not patched by setuptools + uses one single file at ``info_location`` to store metadata. We need to + treat this specially on uninstallation. + """ + info_location = self.info_location + if not info_location: + return False + return pathlib.Path(info_location).is_file() + + @property + def installed_as_egg(self) -> bool: + """Whether this distribution is installed as an egg. + + This usually indicates the distribution was installed by (older versions + of) easy_install. + """ + location = self.location + if not location: + return False + return location.endswith(".egg") + + @property + def installed_with_setuptools_egg_info(self) -> bool: + """Whether this distribution is installed with the ``.egg-info`` format. + + This usually indicates the distribution was installed with setuptools + with an old pip version or with ``single-version-externally-managed``. + + Note that this ensure the metadata store is a directory. distutils can + also installs an ``.egg-info``, but as a file, not a directory. This + property is *False* for that case. Also see ``installed_by_distutils``. + """ + info_location = self.info_location + if not info_location: + return False + if not info_location.endswith(".egg-info"): + return False + return pathlib.Path(info_location).is_dir() + + @property + def installed_with_dist_info(self) -> bool: + """Whether this distribution is installed with the "modern format". + + This indicates a "modern" installation, e.g. storing metadata in the + ``.dist-info`` directory. This applies to installations made by + setuptools (but through pip, not directly), or anything using the + standardized build backend interface (PEP 517). + """ + info_location = self.info_location + if not info_location: + return False + if not info_location.endswith(".dist-info"): + return False + return pathlib.Path(info_location).is_dir() + + @property + def canonical_name(self) -> NormalizedName: raise NotImplementedError() @property def version(self) -> DistributionVersion: raise NotImplementedError() + @property + def setuptools_filename(self) -> str: + """Convert a project name to its setuptools-compatible filename. + + This is a copy of ``pkg_resources.to_filename()`` for compatibility. + """ + return self.raw_name.replace("-", "_") + @property def direct_url(self) -> Optional[DirectUrl]: """Obtain a DirectUrl from this distribution. @@ -116,29 +276,62 @@ def direct_url(self) -> Optional[DirectUrl]: @property def installer(self) -> str: - raise NotImplementedError() + try: + installer_text = self.read_text("INSTALLER") + except (OSError, ValueError, NoneMetadataError): + return "" # Fail silently if the installer file cannot be read. + for line in installer_text.splitlines(): + cleaned_line = line.strip() + if cleaned_line: + return cleaned_line + return "" @property def editable(self) -> bool: - raise NotImplementedError() + return bool(self.editable_project_location) @property def local(self) -> bool: - raise NotImplementedError() + """If distribution is installed in the current virtual environment. + + Always True if we're not in a virtualenv. + """ + if self.installed_location is None: + return False + return is_local(self.installed_location) @property def in_usersite(self) -> bool: - raise NotImplementedError() + if self.installed_location is None or user_site is None: + return False + return self.installed_location.startswith(normalize_path(user_site)) @property def in_site_packages(self) -> bool: + if self.installed_location is None or site_packages is None: + return False + return self.installed_location.startswith(normalize_path(site_packages)) + + def is_file(self, path: InfoPath) -> bool: + """Check whether an entry in the info directory is a file.""" + raise NotImplementedError() + + def iterdir(self, path: InfoPath) -> Iterator[pathlib.PurePosixPath]: + """Iterate through a directory in the info directory. + + Each item yielded would be a path relative to the info directory. + + :raise FileNotFoundError: If ``name`` does not exist in the directory. + :raise NotADirectoryError: If ``name`` does not point to a directory. + """ raise NotImplementedError() - def read_text(self, name: str) -> str: - """Read a file in the .dist-info (or .egg-info) directory. + def read_text(self, path: InfoPath) -> str: + """Read a file in the info directory. - Should raise ``FileNotFoundError`` if ``name`` does not exist in the - metadata directory. + :raise FileNotFoundError: If ``name`` does not exist in the directory. + :raise NoneMetadataError: If ``name`` exists in the info directory, but + cannot be read. """ raise NotImplementedError() @@ -147,7 +340,13 @@ def iter_entry_points(self) -> Iterable[BaseEntryPoint]: @property def metadata(self) -> email.message.Message: - """Metadata of distribution parsed from e.g. METADATA or PKG-INFO.""" + """Metadata of distribution parsed from e.g. METADATA or PKG-INFO. + + This should return an empty message if the metadata file is unavailable. + + :raises NoneMetadataError: If the metadata file is available, but does + not contain valid metadata. + """ raise NotImplementedError() @property @@ -159,12 +358,89 @@ def metadata_version(self) -> Optional[str]: def raw_name(self) -> str: """Value of "Name:" in distribution metadata.""" # The metadata should NEVER be missing the Name: key, but if it somehow - # does not, fall back to the known canonical name. + # does, fall back to the known canonical name. return self.metadata.get("Name", self.canonical_name) + @property + def requires_python(self) -> SpecifierSet: + """Value of "Requires-Python:" in distribution metadata. + + If the key does not exist or contains an invalid value, an empty + SpecifierSet should be returned. + """ + value = self.metadata.get("Requires-Python") + if value is None: + return SpecifierSet() + try: + # Convert to str to satisfy the type checker; this can be a Header object. + spec = SpecifierSet(str(value)) + except InvalidSpecifier as e: + message = "Package %r has an invalid Requires-Python: %s" + logger.warning(message, self.raw_name, e) + return SpecifierSet() + return spec + def iter_dependencies(self, extras: Collection[str] = ()) -> Iterable[Requirement]: + """Dependencies of this distribution. + + For modern .dist-info distributions, this is the collection of + "Requires-Dist:" entries in distribution metadata. + """ raise NotImplementedError() + def iter_provided_extras(self) -> Iterable[str]: + """Extras provided by this distribution. + + For modern .dist-info distributions, this is the collection of + "Provides-Extra:" entries in distribution metadata. + """ + raise NotImplementedError() + + def _iter_declared_entries_from_record(self) -> Optional[Iterator[str]]: + try: + text = self.read_text("RECORD") + except FileNotFoundError: + return None + # This extra Path-str cast normalizes entries. + return (str(pathlib.Path(row[0])) for row in csv.reader(text.splitlines())) + + def _iter_declared_entries_from_legacy(self) -> Optional[Iterator[str]]: + try: + text = self.read_text("installed-files.txt") + except FileNotFoundError: + return None + paths = (p for p in text.splitlines(keepends=False) if p) + root = self.location + info = self.info_location + if root is None or info is None: + return paths + try: + info_rel = pathlib.Path(info).relative_to(root) + except ValueError: # info is not relative to root. + return paths + if not info_rel.parts: # info *is* root. + return paths + return ( + _convert_installed_files_path(pathlib.Path(p).parts, info_rel.parts) + for p in paths + ) + + def iter_declared_entries(self) -> Optional[Iterator[str]]: + """Iterate through file entires declared in this distribution. + + For modern .dist-info distributions, this is the files listed in the + ``RECORD`` metadata file. For legacy setuptools distributions, this + comes from ``installed-files.txt``, with entries normalized to be + compatible with the format used by ``RECORD``. + + :return: An iterator for listed entries, or None if the distribution + contains neither ``RECORD`` nor ``installed-files.txt``. + """ + return ( + self._iter_declared_entries_from_record() + or self._iter_declared_entries_from_legacy() + ) + class BaseEnvironment: """An environment containing distributions to introspect.""" @@ -178,7 +454,11 @@ def from_paths(cls, paths: Optional[List[str]]) -> "BaseEnvironment": raise NotImplementedError() def get_distribution(self, name: str) -> Optional["BaseDistribution"]: - """Given a requirement name, return the installed distributions.""" + """Given a requirement name, return the installed distributions. + + The name may not be normalized. The implementation must canonicalize + it for lookup. + """ raise NotImplementedError() def _iter_distributions(self) -> Iterator["BaseDistribution"]: @@ -240,3 +520,27 @@ def iter_installed_distributions( if user_only: it = (d for d in it if d.in_usersite) return (d for d in it if d.canonical_name not in skip) + + +class Wheel(Protocol): + location: str + + def as_zipfile(self) -> zipfile.ZipFile: + raise NotImplementedError() + + +class FilesystemWheel(Wheel): + def __init__(self, location: str) -> None: + self.location = location + + def as_zipfile(self) -> zipfile.ZipFile: + return zipfile.ZipFile(self.location, allowZip64=True) + + +class MemoryWheel(Wheel): + def __init__(self, location: str, stream: IO[bytes]) -> None: + self.location = location + self.stream = stream + + def as_zipfile(self) -> zipfile.ZipFile: + return zipfile.ZipFile(self.stream, allowZip64=True) diff --git a/pipenv/patched/notpip/_internal/metadata/pkg_resources.py b/pipenv/patched/notpip/_internal/metadata/pkg_resources.py index 179a0831d2..b56713d64f 100644 --- a/pipenv/patched/notpip/_internal/metadata/pkg_resources.py +++ b/pipenv/patched/notpip/_internal/metadata/pkg_resources.py @@ -1,29 +1,28 @@ import email.message +import email.parser import logging +import os +import pathlib import zipfile -from typing import ( - TYPE_CHECKING, - Collection, - Iterable, - Iterator, - List, - NamedTuple, - Optional, -) +from typing import Collection, Iterable, Iterator, List, Mapping, NamedTuple, Optional from pipenv.patched.notpip._vendor import pkg_resources from pipenv.patched.notpip._vendor.packaging.requirements import Requirement -from pipenv.patched.notpip._vendor.packaging.utils import canonicalize_name +from pipenv.patched.notpip._vendor.packaging.utils import NormalizedName, canonicalize_name from pipenv.patched.notpip._vendor.packaging.version import parse as parse_version -from pipenv.patched.notpip._internal.utils import misc # TODO: Move definition here. -from pipenv.patched.notpip._internal.utils.packaging import get_installer, get_metadata -from pipenv.patched.notpip._internal.utils.wheel import pkg_resources_distribution_for_wheel - -from .base import BaseDistribution, BaseEntryPoint, BaseEnvironment, DistributionVersion - -if TYPE_CHECKING: - from pipenv.patched.notpip._vendor.packaging.utils import NormalizedName +from pipenv.patched.notpip._internal.exceptions import InvalidWheel, NoneMetadataError, UnsupportedWheel +from pipenv.patched.notpip._internal.utils.misc import display_path +from pipenv.patched.notpip._internal.utils.wheel import parse_wheel, read_wheel_metadata_file + +from .base import ( + BaseDistribution, + BaseEntryPoint, + BaseEnvironment, + DistributionVersion, + InfoPath, + Wheel, +) logger = logging.getLogger(__name__) @@ -34,14 +33,91 @@ class EntryPoint(NamedTuple): group: str +class WheelMetadata: + """IMetadataProvider that reads metadata files from a dictionary. + + This also maps metadata decoding exceptions to our internal exception type. + """ + + def __init__(self, metadata: Mapping[str, bytes], wheel_name: str) -> None: + self._metadata = metadata + self._wheel_name = wheel_name + + def has_metadata(self, name: str) -> bool: + return name in self._metadata + + def get_metadata(self, name: str) -> str: + try: + return self._metadata[name].decode() + except UnicodeDecodeError as e: + # Augment the default error with the origin of the file. + raise UnsupportedWheel( + f"Error decoding metadata for {self._wheel_name}: {e} in {name} file" + ) + + def get_metadata_lines(self, name: str) -> Iterable[str]: + return pkg_resources.yield_lines(self.get_metadata(name)) + + def metadata_isdir(self, name: str) -> bool: + return False + + def metadata_listdir(self, name: str) -> List[str]: + return [] + + def run_script(self, script_name: str, namespace: str) -> None: + pass + + class Distribution(BaseDistribution): def __init__(self, dist: pkg_resources.Distribution) -> None: self._dist = dist @classmethod - def from_wheel(cls, path: str, name: str) -> "Distribution": - with zipfile.ZipFile(path, allowZip64=True) as zf: - dist = pkg_resources_distribution_for_wheel(zf, name, path) + def from_directory(cls, directory: str) -> "Distribution": + dist_dir = directory.rstrip(os.sep) + + # Build a PathMetadata object, from path to metadata. :wink: + base_dir, dist_dir_name = os.path.split(dist_dir) + metadata = pkg_resources.PathMetadata(base_dir, dist_dir) + + # Determine the correct Distribution object type. + if dist_dir.endswith(".egg-info"): + dist_cls = pkg_resources.Distribution + dist_name = os.path.splitext(dist_dir_name)[0] + else: + assert dist_dir.endswith(".dist-info") + dist_cls = pkg_resources.DistInfoDistribution + dist_name = os.path.splitext(dist_dir_name)[0].split("-")[0] + + dist = dist_cls(base_dir, project_name=dist_name, metadata=metadata) + return cls(dist) + + @classmethod + def from_wheel(cls, wheel: Wheel, name: str) -> "Distribution": + """Load the distribution from a given wheel. + + :raises InvalidWheel: Whenever loading of the wheel causes a + :py:exc:`zipfile.BadZipFile` exception to be thrown. + :raises UnsupportedWheel: If the wheel is a valid zip, but malformed + internally. + """ + try: + with wheel.as_zipfile() as zf: + info_dir, _ = parse_wheel(zf, name) + metadata_text = { + path.split("/", 1)[-1]: read_wheel_metadata_file(zf, path) + for path in zf.namelist() + if path.startswith(f"{info_dir}/") + } + except zipfile.BadZipFile as e: + raise InvalidWheel(wheel.location, name) from e + except UnsupportedWheel as e: + raise UnsupportedWheel(f"{name} has an invalid wheel, {e}") + dist = pkg_resources.DistInfoDistribution( + location=wheel.location, + metadata=WheelMetadata(metadata_text, wheel.location), + project_name=name, + ) return cls(dist) @property @@ -49,41 +125,47 @@ def location(self) -> Optional[str]: return self._dist.location @property - def info_directory(self) -> Optional[str]: + def info_location(self) -> Optional[str]: return self._dist.egg_info @property - def canonical_name(self) -> "NormalizedName": + def installed_by_distutils(self) -> bool: + # A distutils-installed distribution is provided by FileMetadata. This + # provider has a "path" attribute not present anywhere else. Not the + # best introspection logic, but pip has been doing this for a long time. + try: + return bool(self._dist._provider.path) + except AttributeError: + return False + + @property + def canonical_name(self) -> NormalizedName: return canonicalize_name(self._dist.project_name) @property def version(self) -> DistributionVersion: return parse_version(self._dist.version) - @property - def installer(self) -> str: - return get_installer(self._dist) - - @property - def editable(self) -> bool: - return misc.dist_is_editable(self._dist) + def is_file(self, path: InfoPath) -> bool: + return self._dist.has_metadata(str(path)) - @property - def local(self) -> bool: - return misc.dist_is_local(self._dist) - - @property - def in_usersite(self) -> bool: - return misc.dist_in_usersite(self._dist) - - @property - def in_site_packages(self) -> bool: - return misc.dist_in_site_packages(self._dist) + def iterdir(self, path: InfoPath) -> Iterator[pathlib.PurePosixPath]: + name = str(path) + if not self._dist.has_metadata(name): + raise FileNotFoundError(name) + if not self._dist.isdir(name): + raise NotADirectoryError(name) + for child in self._dist.metadata_listdir(name): + yield pathlib.PurePosixPath(path, child) - def read_text(self, name: str) -> str: + def read_text(self, path: InfoPath) -> str: + name = str(path) if not self._dist.has_metadata(name): raise FileNotFoundError(name) - return self._dist.get_metadata(name) + content = self._dist.get_metadata(name) + if content is None: + raise NoneMetadataError(self, name) + return content def iter_entry_points(self) -> Iterable[BaseEntryPoint]: for group, entries in self._dist.get_entry_map().items(): @@ -93,13 +175,35 @@ def iter_entry_points(self) -> Iterable[BaseEntryPoint]: @property def metadata(self) -> email.message.Message: - return get_metadata(self._dist) + """ + :raises NoneMetadataError: if the distribution reports `has_metadata()` + True but `get_metadata()` returns None. + """ + if isinstance(self._dist, pkg_resources.DistInfoDistribution): + metadata_name = "METADATA" + else: + metadata_name = "PKG-INFO" + try: + metadata = self.read_text(metadata_name) + except FileNotFoundError: + if self.location: + displaying_path = display_path(self.location) + else: + displaying_path = repr(self.location) + logger.warning("No metadata found in %s", displaying_path) + metadata = "" + feed_parser = email.parser.FeedParser() + feed_parser.feed(metadata) + return feed_parser.close() def iter_dependencies(self, extras: Collection[str] = ()) -> Iterable[Requirement]: if extras: # pkg_resources raises on invalid extras, so we sanitize. extras = frozenset(extras).intersection(self._dist.extras) return self._dist.requires(extras) + def iter_provided_extras(self) -> Iterable[str]: + return self._dist.extras + class Environment(BaseEnvironment): def __init__(self, ws: pkg_resources.WorkingSet) -> None: @@ -126,7 +230,6 @@ def _search_distribution(self, name: str) -> Optional[BaseDistribution]: return None def get_distribution(self, name: str) -> Optional[BaseDistribution]: - # Search the distribution by looking through the working set. dist = self._search_distribution(name) if dist: diff --git a/pipenv/patched/notpip/_internal/models/candidate.py b/pipenv/patched/notpip/_internal/models/candidate.py index ea8227cf40..36ee0c6d3d 100644 --- a/pipenv/patched/notpip/_internal/models/candidate.py +++ b/pipenv/patched/notpip/_internal/models/candidate.py @@ -5,8 +5,7 @@ class InstallationCandidate(KeyBasedCompareMixin): - """Represents a potential "candidate" for installation. - """ + """Represents a potential "candidate" for installation.""" __slots__ = ["name", "version", "link"] @@ -17,15 +16,19 @@ def __init__(self, name: str, version: str, link: Link) -> None: super().__init__( key=(self.name, self.version, self.link), - defining_class=InstallationCandidate + defining_class=InstallationCandidate, ) def __repr__(self) -> str: return "".format( - self.name, self.version, self.link, + self.name, + self.version, + self.link, ) def __str__(self) -> str: - return '{!r} candidate (version {} at {})'.format( - self.name, self.version, self.link, + return "{!r} candidate (version {} at {})".format( + self.name, + self.version, + self.link, ) diff --git a/pipenv/patched/notpip/_internal/models/direct_url.py b/pipenv/patched/notpip/_internal/models/direct_url.py index 3f9b6993e3..92060d45db 100644 --- a/pipenv/patched/notpip/_internal/models/direct_url.py +++ b/pipenv/patched/notpip/_internal/models/direct_url.py @@ -137,9 +137,7 @@ def __init__( def _from_dict(cls, d: Optional[Dict[str, Any]]) -> Optional["DirInfo"]: if d is None: return None - return cls( - editable=_get_required(d, bool, "editable", default=False) - ) + return cls(editable=_get_required(d, bool, "editable", default=False)) def _to_dict(self) -> Dict[str, Any]: return _filter_none(editable=self.editable or None) @@ -149,7 +147,6 @@ def _to_dict(self) -> Dict[str, Any]: class DirectUrl: - def __init__( self, url: str, @@ -165,9 +162,9 @@ def _remove_auth_from_netloc(self, netloc: str) -> str: return netloc user_pass, netloc_no_user_pass = netloc.split("@", 1) if ( - isinstance(self.info, VcsInfo) and - self.info.vcs == "git" and - user_pass == "git" + isinstance(self.info, VcsInfo) + and self.info.vcs == "git" + and user_pass == "git" ): return netloc if ENV_VAR_RE.match(user_pass): @@ -218,3 +215,6 @@ def from_json(cls, s: str) -> "DirectUrl": def to_json(self) -> str: return json.dumps(self.to_dict(), sort_keys=True) + + def is_local_editable(self) -> bool: + return isinstance(self.info, DirInfo) and self.info.editable diff --git a/pipenv/patched/notpip/_internal/models/format_control.py b/pipenv/patched/notpip/_internal/models/format_control.py index 40eae29554..a7de83ba6b 100644 --- a/pipenv/patched/notpip/_internal/models/format_control.py +++ b/pipenv/patched/notpip/_internal/models/format_control.py @@ -6,15 +6,14 @@ class FormatControl: - """Helper for managing formats from which a package can be installed. - """ + """Helper for managing formats from which a package can be installed.""" __slots__ = ["no_binary", "only_binary"] def __init__( self, no_binary: Optional[Set[str]] = None, - only_binary: Optional[Set[str]] = None + only_binary: Optional[Set[str]] = None, ) -> None: if no_binary is None: no_binary = set() @@ -31,35 +30,30 @@ def __eq__(self, other: object) -> bool: if self.__slots__ != other.__slots__: return False - return all( - getattr(self, k) == getattr(other, k) - for k in self.__slots__ - ) + return all(getattr(self, k) == getattr(other, k) for k in self.__slots__) def __repr__(self) -> str: return "{}({}, {})".format( - self.__class__.__name__, - self.no_binary, - self.only_binary + self.__class__.__name__, self.no_binary, self.only_binary ) @staticmethod def handle_mutual_excludes(value: str, target: Set[str], other: Set[str]) -> None: - if value.startswith('-'): + if value.startswith("-"): raise CommandError( "--no-binary / --only-binary option requires 1 argument." ) - new = value.split(',') - while ':all:' in new: + new = value.split(",") + while ":all:" in new: other.clear() target.clear() - target.add(':all:') - del new[:new.index(':all:') + 1] + target.add(":all:") + del new[: new.index(":all:") + 1] # Without a none, we want to discard everything as :all: covers it - if ':none:' not in new: + if ":none:" not in new: return for name in new: - if name == ':none:': + if name == ":none:": target.clear() continue name = canonicalize_name(name) @@ -69,16 +63,18 @@ def handle_mutual_excludes(value: str, target: Set[str], other: Set[str]) -> Non def get_allowed_formats(self, canonical_name: str) -> FrozenSet[str]: result = {"binary", "source"} if canonical_name in self.only_binary: - result.discard('source') + result.discard("source") elif canonical_name in self.no_binary: - result.discard('binary') - elif ':all:' in self.only_binary: - result.discard('source') - elif ':all:' in self.no_binary: - result.discard('binary') + result.discard("binary") + elif ":all:" in self.only_binary: + result.discard("source") + elif ":all:" in self.no_binary: + result.discard("binary") return frozenset(result) def disallow_binaries(self) -> None: self.handle_mutual_excludes( - ':all:', self.no_binary, self.only_binary, + ":all:", + self.no_binary, + self.only_binary, ) diff --git a/pipenv/patched/notpip/_internal/models/index.py b/pipenv/patched/notpip/_internal/models/index.py index 1874a5b60d..b94c32511f 100644 --- a/pipenv/patched/notpip/_internal/models/index.py +++ b/pipenv/patched/notpip/_internal/models/index.py @@ -2,18 +2,16 @@ class PackageIndex: - """Represents a Package Index and provides easier access to endpoints - """ + """Represents a Package Index and provides easier access to endpoints""" - __slots__ = ['url', 'netloc', 'simple_url', 'pypi_url', - 'file_storage_domain'] + __slots__ = ["url", "netloc", "simple_url", "pypi_url", "file_storage_domain"] def __init__(self, url: str, file_storage_domain: str) -> None: super().__init__() self.url = url self.netloc = urllib.parse.urlsplit(url).netloc - self.simple_url = self._url_for_path('simple') - self.pypi_url = self._url_for_path('pypi') + self.simple_url = self._url_for_path("simple") + self.pypi_url = self._url_for_path("pypi") # This is part of a temporary hack used to block installs of PyPI # packages which depend on external urls only necessary until PyPI can @@ -24,9 +22,7 @@ def _url_for_path(self, path: str) -> str: return urllib.parse.urljoin(self.url, path) -PyPI = PackageIndex( - 'https://pypi.org/', file_storage_domain='files.pythonhosted.org' -) +PyPI = PackageIndex("https://pypi.org/", file_storage_domain="files.pythonhosted.org") TestPyPI = PackageIndex( - 'https://test.pypi.org/', file_storage_domain='test-files.pythonhosted.org' + "https://test.pypi.org/", file_storage_domain="test-files.pythonhosted.org" ) diff --git a/pipenv/patched/notpip/_internal/models/link.py b/pipenv/patched/notpip/_internal/models/link.py index 8baa9ac06a..334a38c30a 100644 --- a/pipenv/patched/notpip/_internal/models/link.py +++ b/pipenv/patched/notpip/_internal/models/link.py @@ -26,8 +26,7 @@ class Link(KeyBasedCompareMixin): - """Represents a parsed link from a Package Index's simple URL - """ + """Represents a parsed link from a Package Index's simple URL""" __slots__ = [ "_parsed_url", @@ -68,7 +67,7 @@ def __init__( """ # url can be a UNC windows share - if url.startswith('\\\\'): + if url.startswith("\\\\"): url = path_to_url(url) self._parsed_url = urllib.parse.urlsplit(url) @@ -86,17 +85,18 @@ def __init__( def __str__(self) -> str: if self.requires_python: - rp = f' (requires-python:{self.requires_python})' + rp = f" (requires-python:{self.requires_python})" else: - rp = '' + rp = "" if self.comes_from: - return '{} (from {}){}'.format( - redact_auth_from_url(self._url), self.comes_from, rp) + return "{} (from {}){}".format( + redact_auth_from_url(self._url), self.comes_from, rp + ) else: return redact_auth_from_url(str(self._url)) def __repr__(self) -> str: - return f'' + return f"" @property def url(self) -> str: @@ -104,7 +104,7 @@ def url(self) -> str: @property def filename(self) -> str: - path = self.path.rstrip('/') + path = self.path.rstrip("/") name = posixpath.basename(path) if not name: # Make sure we don't leak auth information if the netloc @@ -113,7 +113,7 @@ def filename(self) -> str: return netloc name = urllib.parse.unquote(name) - assert name, f'URL {self._url!r} produced no filename' + assert name, f"URL {self._url!r} produced no filename" return name @property @@ -136,7 +136,7 @@ def path(self) -> str: return urllib.parse.unquote(self._parsed_url.path) def splitext(self) -> Tuple[str, str]: - return splitext(posixpath.basename(self.path.rstrip('/'))) + return splitext(posixpath.basename(self.path.rstrip("/"))) @property def ext(self) -> str: @@ -145,9 +145,9 @@ def ext(self) -> str: @property def url_without_fragment(self) -> str: scheme, netloc, path, query, fragment = self._parsed_url - return urllib.parse.urlunsplit((scheme, netloc, path, query, '')) + return urllib.parse.urlunsplit((scheme, netloc, path, query, "")) - _egg_fragment_re = re.compile(r'[#&]egg=([^&]*)') + _egg_fragment_re = re.compile(r"[#&]egg=([^&]*)") @property def egg_fragment(self) -> Optional[str]: @@ -156,7 +156,7 @@ def egg_fragment(self) -> Optional[str]: return None return match.group(1) - _subdirectory_fragment_re = re.compile(r'[#&]subdirectory=([^&]*)') + _subdirectory_fragment_re = re.compile(r"[#&]subdirectory=([^&]*)") @property def subdirectory_fragment(self) -> Optional[str]: @@ -166,7 +166,7 @@ def subdirectory_fragment(self) -> Optional[str]: return match.group(1) _hash_re = re.compile( - r'({choices})=([a-f0-9]+)'.format(choices="|".join(_SUPPORTED_HASHES)) + r"({choices})=([a-f0-9]+)".format(choices="|".join(_SUPPORTED_HASHES)) ) @property @@ -185,11 +185,11 @@ def hash_name(self) -> Optional[str]: @property def show_url(self) -> str: - return posixpath.basename(self._url.split('#', 1)[0].split('?', 1)[0]) + return posixpath.basename(self._url.split("#", 1)[0].split("?", 1)[0]) @property def is_file(self) -> bool: - return self.scheme == 'file' + return self.scheme == "file" def is_existing_dir(self) -> bool: return self.is_file and os.path.isdir(self.file_path) @@ -256,33 +256,33 @@ class _CleanResult(NamedTuple): subdirectory: str hashes: Dict[str, str] - @classmethod - def from_link(cls, link: Link) -> "_CleanResult": - parsed = link._parsed_url - netloc = parsed.netloc.rsplit("@", 1)[-1] - # According to RFC 8089, an empty host in file: means localhost. - if parsed.scheme == "file" and not netloc: - netloc = "localhost" - fragment = urllib.parse.parse_qs(parsed.fragment) - if "egg" in fragment: - logger.debug("Ignoring egg= fragment in %s", link) - try: - # If there are multiple subdirectory values, use the first one. - # This matches the behavior of Link.subdirectory_fragment. - subdirectory = fragment["subdirectory"][0] - except (IndexError, KeyError): - subdirectory = "" - # If there are multiple hash values under the same algorithm, use the - # first one. This matches the behavior of Link.hash_value. - hashes = {k: fragment[k][0] for k in _SUPPORTED_HASHES if k in fragment} - return cls( - parsed=parsed._replace(netloc=netloc, query="", fragment=""), - query=urllib.parse.parse_qs(parsed.query), - subdirectory=subdirectory, - hashes=hashes, - ) + +def _clean_link(link: Link) -> _CleanResult: + parsed = link._parsed_url + netloc = parsed.netloc.rsplit("@", 1)[-1] + # According to RFC 8089, an empty host in file: means localhost. + if parsed.scheme == "file" and not netloc: + netloc = "localhost" + fragment = urllib.parse.parse_qs(parsed.fragment) + if "egg" in fragment: + logger.debug("Ignoring egg= fragment in %s", link) + try: + # If there are multiple subdirectory values, use the first one. + # This matches the behavior of Link.subdirectory_fragment. + subdirectory = fragment["subdirectory"][0] + except (IndexError, KeyError): + subdirectory = "" + # If there are multiple hash values under the same algorithm, use the + # first one. This matches the behavior of Link.hash_value. + hashes = {k: fragment[k][0] for k in _SUPPORTED_HASHES if k in fragment} + return _CleanResult( + parsed=parsed._replace(netloc=netloc, query="", fragment=""), + query=urllib.parse.parse_qs(parsed.query), + subdirectory=subdirectory, + hashes=hashes, + ) @functools.lru_cache(maxsize=None) def links_equivalent(link1: Link, link2: Link) -> bool: - return _CleanResult.from_link(link1) == _CleanResult.from_link(link2) + return _clean_link(link1) == _clean_link(link2) diff --git a/pipenv/patched/notpip/_internal/models/scheme.py b/pipenv/patched/notpip/_internal/models/scheme.py index 9a8dafba30..f51190ac60 100644 --- a/pipenv/patched/notpip/_internal/models/scheme.py +++ b/pipenv/patched/notpip/_internal/models/scheme.py @@ -6,7 +6,7 @@ """ -SCHEME_KEYS = ['platlib', 'purelib', 'headers', 'scripts', 'data'] +SCHEME_KEYS = ["platlib", "purelib", "headers", "scripts", "data"] class Scheme: diff --git a/pipenv/patched/notpip/_internal/models/search_scope.py b/pipenv/patched/notpip/_internal/models/search_scope.py index 4b70040732..3ace085b14 100644 --- a/pipenv/patched/notpip/_internal/models/search_scope.py +++ b/pipenv/patched/notpip/_internal/models/search_scope.py @@ -38,7 +38,7 @@ def create( # blindly normalize anything starting with a ~... built_find_links: List[str] = [] for link in find_links: - if link.startswith('~'): + if link.startswith("~"): new_link = normalize_path(link) if os.path.exists(new_link): link = new_link @@ -49,11 +49,11 @@ def create( if not has_tls(): for link in itertools.chain(index_urls, built_find_links): parsed = urllib.parse.urlparse(link) - if parsed.scheme == 'https': + if parsed.scheme == "https": logger.warning( - 'pip is configured with locations that require ' - 'TLS/SSL, however the ssl module in Python is not ' - 'available.' + "pip is configured with locations that require " + "TLS/SSL, however the ssl module in Python is not " + "available." ) break @@ -88,20 +88,23 @@ def get_formatted_locations(self) -> str: # exceptions for malformed URLs if not purl.scheme and not purl.netloc: logger.warning( - 'The index url "%s" seems invalid, ' - 'please provide a scheme.', redacted_index_url) + 'The index url "%s" seems invalid, please provide a scheme.', + redacted_index_url, + ) redacted_index_urls.append(redacted_index_url) - lines.append('Looking in indexes: {}'.format( - ', '.join(redacted_index_urls))) + lines.append( + "Looking in indexes: {}".format(", ".join(redacted_index_urls)) + ) if self.find_links: lines.append( - 'Looking in links: {}'.format(', '.join( - redact_auth_from_url(url) for url in self.find_links)) + "Looking in links: {}".format( + ", ".join(redact_auth_from_url(url) for url in self.find_links) + ) ) - return '\n'.join(lines) + return "\n".join(lines) def get_index_urls_locations(self, project_name: str) -> List[str]: """Returns the locations found via self.index_urls @@ -112,15 +115,15 @@ def get_index_urls_locations(self, project_name: str) -> List[str]: def mkurl_pypi_url(url: str) -> str: loc = posixpath.join( - url, - urllib.parse.quote(canonicalize_name(project_name))) + url, urllib.parse.quote(canonicalize_name(project_name)) + ) # For maximum compatibility with easy_install, ensure the path # ends in a trailing slash. Although this isn't in the spec # (and PyPI can handle it without the slash) some other index # implementations might break if they relied on easy_install's # behavior. - if not loc.endswith('/'): - loc = loc + '/' + if not loc.endswith("/"): + loc = loc + "/" return loc return [mkurl_pypi_url(url) for url in self.index_urls] diff --git a/pipenv/patched/notpip/_internal/models/selection_prefs.py b/pipenv/patched/notpip/_internal/models/selection_prefs.py index a6b6388126..7e93637bc9 100644 --- a/pipenv/patched/notpip/_internal/models/selection_prefs.py +++ b/pipenv/patched/notpip/_internal/models/selection_prefs.py @@ -9,8 +9,13 @@ class SelectionPreferences: and installing files. """ - __slots__ = ['allow_yanked', 'allow_all_prereleases', 'format_control', - 'prefer_binary', 'ignore_requires_python'] + __slots__ = [ + "allow_yanked", + "allow_all_prereleases", + "format_control", + "prefer_binary", + "ignore_requires_python", + ] # Don't include an allow_yanked default value to make sure each call # site considers whether yanked releases are allowed. This also causes diff --git a/pipenv/patched/notpip/_internal/models/target_python.py b/pipenv/patched/notpip/_internal/models/target_python.py index 9bd6fba562..0eafa97fa8 100644 --- a/pipenv/patched/notpip/_internal/models/target_python.py +++ b/pipenv/patched/notpip/_internal/models/target_python.py @@ -53,7 +53,7 @@ def __init__( else: py_version_info = normalize_version_info(py_version_info) - py_version = '.'.join(map(str, py_version_info[:2])) + py_version = ".".join(map(str, py_version_info[:2])) self.abis = abis self.implementation = implementation @@ -70,19 +70,18 @@ def format_given(self) -> str: """ display_version = None if self._given_py_version_info is not None: - display_version = '.'.join( + display_version = ".".join( str(part) for part in self._given_py_version_info ) key_values = [ - ('platforms', self.platforms), - ('version_info', display_version), - ('abis', self.abis), - ('implementation', self.implementation), + ("platforms", self.platforms), + ("version_info", display_version), + ("abis", self.abis), + ("implementation", self.implementation), ] - return ' '.join( - f'{key}={value!r}' for key, value in key_values - if value is not None + return " ".join( + f"{key}={value!r}" for key, value in key_values if value is not None ) def get_tags(self) -> List[Tag]: diff --git a/pipenv/patched/notpip/_internal/models/wheel.py b/pipenv/patched/notpip/_internal/models/wheel.py index ed690340f9..9a5fccda2f 100644 --- a/pipenv/patched/notpip/_internal/models/wheel.py +++ b/pipenv/patched/notpip/_internal/models/wheel.py @@ -16,7 +16,7 @@ class Wheel: r"""^(?P(?P.+?)-(?P.*?)) ((-(?P\d[^-]*?))?-(?P.+?)-(?P.+?)-(?P.+?) \.whl|\.dist-info)$""", - re.VERBOSE + re.VERBOSE, ) def __init__(self, filename: str) -> None: @@ -25,23 +25,20 @@ def __init__(self, filename: str) -> None: """ wheel_info = self.wheel_file_re.match(filename) if not wheel_info: - raise InvalidWheelFilename( - f"{filename} is not a valid wheel filename." - ) + raise InvalidWheelFilename(f"{filename} is not a valid wheel filename.") self.filename = filename - self.name = wheel_info.group('name').replace('_', '-') + self.name = wheel_info.group("name").replace("_", "-") # we'll assume "_" means "-" due to wheel naming scheme # (https://github.com/pypa/pip/issues/1150) - self.version = wheel_info.group('ver').replace('_', '-') - self.build_tag = wheel_info.group('build') - self.pyversions = wheel_info.group('pyver').split('.') - self.abis = wheel_info.group('abi').split('.') - self.plats = wheel_info.group('plat').split('.') + self.version = wheel_info.group("ver").replace("_", "-") + self.build_tag = wheel_info.group("build") + self.pyversions = wheel_info.group("pyver").split(".") + self.abis = wheel_info.group("abi").split(".") + self.plats = wheel_info.group("plat").split(".") # All the tag combinations from this file self.file_tags = { - Tag(x, y, z) for x in self.pyversions - for y in self.abis for z in self.plats + Tag(x, y, z) for x in self.pyversions for y in self.abis for z in self.plats } def get_formatted_file_tags(self) -> List[str]: diff --git a/pipenv/patched/notpip/_internal/network/auth.py b/pipenv/patched/notpip/_internal/network/auth.py index 102ce5be61..66ce543b52 100644 --- a/pipenv/patched/notpip/_internal/network/auth.py +++ b/pipenv/patched/notpip/_internal/network/auth.py @@ -28,13 +28,13 @@ try: import keyring except ImportError: - keyring = None + keyring = None # type: ignore[assignment] except Exception as exc: logger.warning( "Keyring is skipped due to an exception: %s", str(exc), ) - keyring = None + keyring = None # type: ignore[assignment] def get_keyring_auth(url: Optional[str], username: Optional[str]) -> Optional[AuthInfo]: @@ -66,7 +66,7 @@ def get_keyring_auth(url: Optional[str], username: Optional[str]) -> Optional[Au "Keyring is skipped due to an exception: %s", str(exc), ) - keyring = None + keyring = None # type: ignore[assignment] return None @@ -179,9 +179,16 @@ def _get_url_and_credentials( # Try to get credentials from original url username, password = self._get_new_credentials(original_url) - # If credentials not found, use any stored credentials for this netloc - if username is None and password is None: - username, password = self.passwords.get(netloc, (None, None)) + # If credentials not found, use any stored credentials for this netloc. + # Do this if either the username or the password is missing. + # This accounts for the situation in which the user has specified + # the username in the index url, but the password comes from keyring. + if (username is None or password is None) and netloc in self.passwords: + un, pw = self.passwords[netloc] + # It is possible that the cached credentials are for a different username, + # in which case the cache should be ignored. + if username is None or username == un: + username, password = un, pw if username is not None or password is not None: # Convert the username and password if they're None, so that diff --git a/pipenv/patched/notpip/_internal/network/cache.py b/pipenv/patched/notpip/_internal/network/cache.py index 0ec6f96747..99b84b7d8d 100644 --- a/pipenv/patched/notpip/_internal/network/cache.py +++ b/pipenv/patched/notpip/_internal/network/cache.py @@ -53,7 +53,7 @@ def get(self, key: str) -> Optional[bytes]: with open(path, "rb") as f: return f.read() - def set(self, key: str, value: bytes) -> None: + def set(self, key: str, value: bytes, expires: Optional[int] = None) -> None: path = self._get_cache_path(key) with suppressed_cache_errors(): ensure_dir(os.path.dirname(path)) diff --git a/pipenv/patched/notpip/_internal/network/download.py b/pipenv/patched/notpip/_internal/network/download.py index fe229686d3..7175398b46 100644 --- a/pipenv/patched/notpip/_internal/network/download.py +++ b/pipenv/patched/notpip/_internal/network/download.py @@ -8,7 +8,7 @@ from pipenv.patched.notpip._vendor.requests.models import CONTENT_CHUNK_SIZE, Response -from pipenv.patched.notpip._internal.cli.progress_bars import DownloadProgressProvider +from pipenv.patched.notpip._internal.cli.progress_bars import get_download_progress_renderer from pipenv.patched.notpip._internal.exceptions import NetworkConnectionError from pipenv.patched.notpip._internal.models.index import PyPI from pipenv.patched.notpip._internal.models.link import Link @@ -65,7 +65,8 @@ def _prepare_download( if not show_progress: return chunks - return DownloadProgressProvider(progress_bar, max=total_length)(chunks) + renderer = get_download_progress_renderer(bar_type=progress_bar, size=total_length) + return renderer(chunks) def sanitize_content_filename(filename: str) -> str: diff --git a/pipenv/patched/notpip/_internal/network/lazy_wheel.py b/pipenv/patched/notpip/_internal/network/lazy_wheel.py index 17565c7e33..42545850c5 100644 --- a/pipenv/patched/notpip/_internal/network/lazy_wheel.py +++ b/pipenv/patched/notpip/_internal/network/lazy_wheel.py @@ -8,33 +8,33 @@ from typing import Any, Dict, Iterator, List, Optional, Tuple from zipfile import BadZipfile, ZipFile -from pipenv.patched.notpip._vendor.pkg_resources import Distribution +from pipenv.patched.notpip._vendor.packaging.utils import canonicalize_name from pipenv.patched.notpip._vendor.requests.models import CONTENT_CHUNK_SIZE, Response +from pipenv.patched.notpip._internal.metadata import BaseDistribution, MemoryWheel, get_wheel_distribution from pipenv.patched.notpip._internal.network.session import PipSession from pipenv.patched.notpip._internal.network.utils import HEADERS, raise_for_status, response_chunks -from pipenv.patched.notpip._internal.utils.wheel import pkg_resources_distribution_for_wheel class HTTPRangeRequestUnsupported(Exception): pass -def dist_from_wheel_url(name: str, url: str, session: PipSession) -> Distribution: - """Return a pkg_resources.Distribution from the given wheel URL. +def dist_from_wheel_url(name: str, url: str, session: PipSession) -> BaseDistribution: + """Return a distribution object from the given wheel URL. This uses HTTP range requests to only fetch the potion of the wheel containing metadata, just enough for the object to be constructed. If such requests are not supported, HTTPRangeRequestUnsupported is raised. """ - with LazyZipOverHTTP(url, session) as wheel: + with LazyZipOverHTTP(url, session) as zf: # For read-only ZIP files, ZipFile only needs methods read, # seek, seekable and tell, not the whole IO protocol. - zip_file = ZipFile(wheel) # type: ignore + wheel = MemoryWheel(zf.name, zf) # type: ignore # After context manager exit, wheel.name # is an invalid file by intention. - return pkg_resources_distribution_for_wheel(zip_file, name, wheel.name) + return get_wheel_distribution(wheel, canonicalize_name(name)) class LazyZipOverHTTP: diff --git a/pipenv/patched/notpip/_internal/network/session.py b/pipenv/patched/notpip/_internal/network/session.py index cf54d62291..88f79b0b38 100644 --- a/pipenv/patched/notpip/_internal/network/session.py +++ b/pipenv/patched/notpip/_internal/network/session.py @@ -2,17 +2,8 @@ network request configuration and behavior. """ -# When mypy runs on Windows the call to distro.linux_distribution() is skipped -# resulting in the failure: -# -# error: unused 'type: ignore' comment -# -# If the upstream module adds typing, this comment should be removed. See -# https://github.com/nir0s/distro/pull/269 -# -# mypy: warn-unused-ignores=False - import email.utils +import io import ipaddress import json import logging @@ -128,9 +119,8 @@ def user_agent() -> str: if sys.platform.startswith("linux"): from pipenv.patched.notpip._vendor import distro - # https://github.com/nir0s/distro/pull/269 - linux_distribution = distro.linux_distribution() # type: ignore - distro_infos = dict( + linux_distribution = distro.name(), distro.version(), distro.codename() + distro_infos: Dict[str, Any] = dict( filter( lambda x: x[1], zip(["name", "version", "id"], linux_distribution), @@ -218,8 +208,11 @@ def send( try: stats = os.stat(pathname) except OSError as exc: + # format the exception raised as a io.BytesIO object, + # to return a better error message: resp.status_code = 404 - resp.raw = exc + resp.reason = type(exc).__name__ + resp.raw = io.BytesIO(f"{resp.reason}: {exc}".encode("utf8")) else: modified = email.utils.formatdate(stats.st_mtime, usegmt=True) content_type = mimetypes.guess_type(pathname)[0] or "text/plain" @@ -369,8 +362,15 @@ def add_trusted_host( if host_port not in self.pip_trusted_origins: self.pip_trusted_origins.append(host_port) + self.mount( + build_url_from_netloc(host, scheme="http") + "/", self._trusted_host_adapter + ) self.mount(build_url_from_netloc(host) + "/", self._trusted_host_adapter) if not host_port[1]: + self.mount( + build_url_from_netloc(host, scheme="http") + ":", + self._trusted_host_adapter, + ) # Mount wildcard ports for the same host. self.mount(build_url_from_netloc(host) + ":", self._trusted_host_adapter) diff --git a/pipenv/patched/notpip/_internal/operations/build/metadata.py b/pipenv/patched/notpip/_internal/operations/build/metadata.py index ced231b483..b1ecfb8c77 100644 --- a/pipenv/patched/notpip/_internal/operations/build/metadata.py +++ b/pipenv/patched/notpip/_internal/operations/build/metadata.py @@ -6,19 +6,22 @@ from pipenv.patched.notpip._vendor.pep517.wrappers import Pep517HookCaller from pipenv.patched.notpip._internal.build_env import BuildEnvironment +from pipenv.patched.notpip._internal.exceptions import ( + InstallationSubprocessError, + MetadataGenerationFailed, +) from pipenv.patched.notpip._internal.utils.subprocess import runner_with_spinner_message from pipenv.patched.notpip._internal.utils.temp_dir import TempDirectory -def generate_metadata(build_env, backend): - # type: (BuildEnvironment, Pep517HookCaller) -> str +def generate_metadata( + build_env: BuildEnvironment, backend: Pep517HookCaller, details: str +) -> str: """Generate metadata using mechanisms described in PEP 517. Returns the generated metadata directory. """ - metadata_tmpdir = TempDirectory( - kind="modern-metadata", globally_managed=True - ) + metadata_tmpdir = TempDirectory(kind="modern-metadata", globally_managed=True) metadata_dir = metadata_tmpdir.path @@ -26,10 +29,11 @@ def generate_metadata(build_env, backend): # Note that Pep517HookCaller implements a fallback for # prepare_metadata_for_build_wheel, so we don't have to # consider the possibility that this hook doesn't exist. - runner = runner_with_spinner_message("Preparing wheel metadata") + runner = runner_with_spinner_message("Preparing metadata (pyproject.toml)") with backend.subprocess_runner(runner): - distinfo_dir = backend.prepare_metadata_for_build_wheel( - metadata_dir - ) + try: + distinfo_dir = backend.prepare_metadata_for_build_wheel(metadata_dir) + except InstallationSubprocessError as error: + raise MetadataGenerationFailed(package_details=details) from error return os.path.join(metadata_dir, distinfo_dir) diff --git a/pipenv/patched/notpip/_internal/operations/build/metadata_editable.py b/pipenv/patched/notpip/_internal/operations/build/metadata_editable.py new file mode 100644 index 0000000000..cdd3fd5350 --- /dev/null +++ b/pipenv/patched/notpip/_internal/operations/build/metadata_editable.py @@ -0,0 +1,41 @@ +"""Metadata generation logic for source distributions. +""" + +import os + +from pipenv.patched.notpip._vendor.pep517.wrappers import Pep517HookCaller + +from pipenv.patched.notpip._internal.build_env import BuildEnvironment +from pipenv.patched.notpip._internal.exceptions import ( + InstallationSubprocessError, + MetadataGenerationFailed, +) +from pipenv.patched.notpip._internal.utils.subprocess import runner_with_spinner_message +from pipenv.patched.notpip._internal.utils.temp_dir import TempDirectory + + +def generate_editable_metadata( + build_env: BuildEnvironment, backend: Pep517HookCaller, details: str +) -> str: + """Generate metadata using mechanisms described in PEP 660. + + Returns the generated metadata directory. + """ + metadata_tmpdir = TempDirectory(kind="modern-metadata", globally_managed=True) + + metadata_dir = metadata_tmpdir.path + + with build_env: + # Note that Pep517HookCaller implements a fallback for + # prepare_metadata_for_build_wheel/editable, so we don't have to + # consider the possibility that this hook doesn't exist. + runner = runner_with_spinner_message( + "Preparing editable metadata (pyproject.toml)" + ) + with backend.subprocess_runner(runner): + try: + distinfo_dir = backend.prepare_metadata_for_build_editable(metadata_dir) + except InstallationSubprocessError as error: + raise MetadataGenerationFailed(package_details=details) from error + + return os.path.join(metadata_dir, distinfo_dir) diff --git a/pipenv/patched/notpip/_internal/operations/build/metadata_legacy.py b/pipenv/patched/notpip/_internal/operations/build/metadata_legacy.py index 5443b86669..fa345a530c 100644 --- a/pipenv/patched/notpip/_internal/operations/build/metadata_legacy.py +++ b/pipenv/patched/notpip/_internal/operations/build/metadata_legacy.py @@ -5,7 +5,12 @@ import os from pipenv.patched.notpip._internal.build_env import BuildEnvironment -from pipenv.patched.notpip._internal.exceptions import InstallationError +from pipenv.patched.notpip._internal.cli.spinners import open_spinner +from pipenv.patched.notpip._internal.exceptions import ( + InstallationError, + InstallationSubprocessError, + MetadataGenerationFailed, +) from pipenv.patched.notpip._internal.utils.setuptools_build import make_setuptools_egg_info_args from pipenv.patched.notpip._internal.utils.subprocess import call_subprocess from pipenv.patched.notpip._internal.utils.temp_dir import TempDirectory @@ -13,49 +18,39 @@ logger = logging.getLogger(__name__) -def _find_egg_info(directory): - # type: (str) -> str - """Find an .egg-info subdirectory in `directory`. - """ - filenames = [ - f for f in os.listdir(directory) if f.endswith(".egg-info") - ] +def _find_egg_info(directory: str) -> str: + """Find an .egg-info subdirectory in `directory`.""" + filenames = [f for f in os.listdir(directory) if f.endswith(".egg-info")] if not filenames: - raise InstallationError( - f"No .egg-info directory found in {directory}" - ) + raise InstallationError(f"No .egg-info directory found in {directory}") if len(filenames) > 1: raise InstallationError( - "More than one .egg-info directory found in {}".format( - directory - ) + "More than one .egg-info directory found in {}".format(directory) ) return os.path.join(directory, filenames[0]) def generate_metadata( - build_env, # type: BuildEnvironment - setup_py_path, # type: str - source_dir, # type: str - isolated, # type: bool - details, # type: str -): - # type: (...) -> str + build_env: BuildEnvironment, + setup_py_path: str, + source_dir: str, + isolated: bool, + details: str, +) -> str: """Generate metadata using setup.py-based defacto mechanisms. Returns the generated metadata directory. """ logger.debug( - 'Running setup.py (path:%s) egg_info for package %s', - setup_py_path, details, + "Running setup.py (path:%s) egg_info for package %s", + setup_py_path, + details, ) - egg_info_dir = TempDirectory( - kind="pip-egg-info", globally_managed=True - ).path + egg_info_dir = TempDirectory(kind="pip-egg-info", globally_managed=True).path args = make_setuptools_egg_info_args( setup_py_path, @@ -64,11 +59,16 @@ def generate_metadata( ) with build_env: - call_subprocess( - args, - cwd=source_dir, - command_desc='python setup.py egg_info', - ) + with open_spinner("Preparing metadata (setup.py)") as spinner: + try: + call_subprocess( + args, + cwd=source_dir, + command_desc="python setup.py egg_info", + spinner=spinner, + ) + except InstallationSubprocessError as error: + raise MetadataGenerationFailed(package_details=details) from error # Return the .egg-info directory. return _find_egg_info(egg_info_dir) diff --git a/pipenv/patched/notpip/_internal/operations/build/wheel.py b/pipenv/patched/notpip/_internal/operations/build/wheel.py index d8ea255ff2..9e36925ff0 100644 --- a/pipenv/patched/notpip/_internal/operations/build/wheel.py +++ b/pipenv/patched/notpip/_internal/operations/build/wheel.py @@ -10,22 +10,21 @@ def build_wheel_pep517( - name, # type: str - backend, # type: Pep517HookCaller - metadata_directory, # type: str - tempd, # type: str -): - # type: (...) -> Optional[str] + name: str, + backend: Pep517HookCaller, + metadata_directory: str, + tempd: str, +) -> Optional[str]: """Build one InstallRequirement using the PEP 517 build process. Returns path to wheel if successfully built. Otherwise, returns None. """ assert metadata_directory is not None try: - logger.debug('Destination directory: %s', tempd) + logger.debug("Destination directory: %s", tempd) runner = runner_with_spinner_message( - f'Building wheel for {name} (PEP 517)' + f"Building wheel for {name} (pyproject.toml)" ) with backend.subprocess_runner(runner): wheel_name = backend.build_wheel( @@ -33,6 +32,6 @@ def build_wheel_pep517( metadata_directory=metadata_directory, ) except Exception: - logger.error('Failed building wheel for %s', name) + logger.error("Failed building wheel for %s", name) return None return os.path.join(tempd, wheel_name) diff --git a/pipenv/patched/notpip/_internal/operations/build/wheel_editable.py b/pipenv/patched/notpip/_internal/operations/build/wheel_editable.py new file mode 100644 index 0000000000..7826533405 --- /dev/null +++ b/pipenv/patched/notpip/_internal/operations/build/wheel_editable.py @@ -0,0 +1,46 @@ +import logging +import os +from typing import Optional + +from pipenv.patched.notpip._vendor.pep517.wrappers import HookMissing, Pep517HookCaller + +from pipenv.patched.notpip._internal.utils.subprocess import runner_with_spinner_message + +logger = logging.getLogger(__name__) + + +def build_wheel_editable( + name: str, + backend: Pep517HookCaller, + metadata_directory: str, + tempd: str, +) -> Optional[str]: + """Build one InstallRequirement using the PEP 660 build process. + + Returns path to wheel if successfully built. Otherwise, returns None. + """ + assert metadata_directory is not None + try: + logger.debug("Destination directory: %s", tempd) + + runner = runner_with_spinner_message( + f"Building editable for {name} (pyproject.toml)" + ) + with backend.subprocess_runner(runner): + try: + wheel_name = backend.build_editable( + tempd, + metadata_directory=metadata_directory, + ) + except HookMissing as e: + logger.error( + "Cannot build editable %s because the build " + "backend does not have the %s hook", + name, + e, + ) + return None + except Exception: + logger.error("Failed building editable for %s", name) + return None + return os.path.join(tempd, wheel_name) diff --git a/pipenv/patched/notpip/_internal/operations/build/wheel_legacy.py b/pipenv/patched/notpip/_internal/operations/build/wheel_legacy.py index f631c78ae8..ec5b836717 100644 --- a/pipenv/patched/notpip/_internal/operations/build/wheel_legacy.py +++ b/pipenv/patched/notpip/_internal/operations/build/wheel_legacy.py @@ -4,59 +4,51 @@ from pipenv.patched.notpip._internal.cli.spinners import open_spinner from pipenv.patched.notpip._internal.utils.setuptools_build import make_setuptools_bdist_wheel_args -from pipenv.patched.notpip._internal.utils.subprocess import ( - LOG_DIVIDER, - call_subprocess, - format_command_args, -) +from pipenv.patched.notpip._internal.utils.subprocess import call_subprocess, format_command_args logger = logging.getLogger(__name__) def format_command_result( - command_args, # type: List[str] - command_output, # type: str -): - # type: (...) -> str + command_args: List[str], + command_output: str, +) -> str: """Format command information for logging.""" command_desc = format_command_args(command_args) - text = f'Command arguments: {command_desc}\n' + text = f"Command arguments: {command_desc}\n" if not command_output: - text += 'Command output: None' + text += "Command output: None" elif logger.getEffectiveLevel() > logging.DEBUG: - text += 'Command output: [use --verbose to show]' + text += "Command output: [use --verbose to show]" else: - if not command_output.endswith('\n'): - command_output += '\n' - text += f'Command output:\n{command_output}{LOG_DIVIDER}' + if not command_output.endswith("\n"): + command_output += "\n" + text += f"Command output:\n{command_output}" return text def get_legacy_build_wheel_path( - names, # type: List[str] - temp_dir, # type: str - name, # type: str - command_args, # type: List[str] - command_output, # type: str -): - # type: (...) -> Optional[str] + names: List[str], + temp_dir: str, + name: str, + command_args: List[str], + command_output: str, +) -> Optional[str]: """Return the path to the wheel in the temporary build directory.""" # Sort for determinism. names = sorted(names) if not names: - msg = ( - 'Legacy build of wheel for {!r} created no files.\n' - ).format(name) + msg = ("Legacy build of wheel for {!r} created no files.\n").format(name) msg += format_command_result(command_args, command_output) logger.warning(msg) return None if len(names) > 1: msg = ( - 'Legacy build of wheel for {!r} created more than one file.\n' - 'Filenames (choosing first): {}\n' + "Legacy build of wheel for {!r} created more than one file.\n" + "Filenames (choosing first): {}\n" ).format(name, names) msg += format_command_result(command_args, command_output) logger.warning(msg) @@ -65,14 +57,13 @@ def get_legacy_build_wheel_path( def build_wheel_legacy( - name, # type: str - setup_py_path, # type: str - source_dir, # type: str - global_options, # type: List[str] - build_options, # type: List[str] - tempd, # type: str -): - # type: (...) -> Optional[str] + name: str, + setup_py_path: str, + source_dir: str, + global_options: List[str], + build_options: List[str], + tempd: str, +) -> Optional[str]: """Build one unpacked package using the "legacy" build process. Returns path to wheel if successfully built. Otherwise, returns None. @@ -84,19 +75,20 @@ def build_wheel_legacy( destination_dir=tempd, ) - spin_message = f'Building wheel for {name} (setup.py)' + spin_message = f"Building wheel for {name} (setup.py)" with open_spinner(spin_message) as spinner: - logger.debug('Destination directory: %s', tempd) + logger.debug("Destination directory: %s", tempd) try: output = call_subprocess( wheel_args, + command_desc="python setup.py bdist_wheel", cwd=source_dir, spinner=spinner, ) except Exception: spinner.finish("error") - logger.error('Failed building wheel for %s', name) + logger.error("Failed building wheel for %s", name) return None names = os.listdir(tempd) diff --git a/pipenv/patched/notpip/_internal/operations/check.py b/pipenv/patched/notpip/_internal/operations/check.py index a65cb08f46..f26bdc4512 100644 --- a/pipenv/patched/notpip/_internal/operations/check.py +++ b/pipenv/patched/notpip/_internal/operations/check.py @@ -2,19 +2,16 @@ """ import logging -from typing import TYPE_CHECKING, Callable, Dict, List, NamedTuple, Optional, Set, Tuple +from typing import Callable, Dict, List, NamedTuple, Optional, Set, Tuple from pipenv.patched.notpip._vendor.packaging.requirements import Requirement -from pipenv.patched.notpip._vendor.packaging.utils import canonicalize_name +from pipenv.patched.notpip._vendor.packaging.utils import NormalizedName, canonicalize_name from pipenv.patched.notpip._internal.distributions import make_distribution_for_install_requirement from pipenv.patched.notpip._internal.metadata import get_default_environment from pipenv.patched.notpip._internal.metadata.base import DistributionVersion from pipenv.patched.notpip._internal.req.req_install import InstallRequirement -if TYPE_CHECKING: - from pipenv.patched.notpip._vendor.packaging.utils import NormalizedName - logger = logging.getLogger(__name__) @@ -24,12 +21,12 @@ class PackageDetails(NamedTuple): # Shorthands -PackageSet = Dict['NormalizedName', PackageDetails] -Missing = Tuple['NormalizedName', Requirement] -Conflicting = Tuple['NormalizedName', DistributionVersion, Requirement] +PackageSet = Dict[NormalizedName, PackageDetails] +Missing = Tuple[NormalizedName, Requirement] +Conflicting = Tuple[NormalizedName, DistributionVersion, Requirement] -MissingDict = Dict['NormalizedName', List[Missing]] -ConflictingDict = Dict['NormalizedName', List[Conflicting]] +MissingDict = Dict[NormalizedName, List[Missing]] +ConflictingDict = Dict[NormalizedName, List[Conflicting]] CheckResult = Tuple[MissingDict, ConflictingDict] ConflictDetails = Tuple[PackageSet, CheckResult] @@ -51,8 +48,9 @@ def create_package_set_from_installed() -> Tuple[PackageSet, bool]: return package_set, problems -def check_package_set(package_set, should_ignore=None): - # type: (PackageSet, Optional[Callable[[str], bool]]) -> CheckResult +def check_package_set( + package_set: PackageSet, should_ignore: Optional[Callable[[str], bool]] = None +) -> CheckResult: """Check if a package set is consistent If should_ignore is passed, it should be a callable that takes a @@ -64,8 +62,8 @@ def check_package_set(package_set, should_ignore=None): for package_name, package_detail in package_set.items(): # Info about dependencies of package_name - missing_deps = set() # type: Set[Missing] - conflicting_deps = set() # type: Set[Conflicting] + missing_deps: Set[Missing] = set() + conflicting_deps: Set[Conflicting] = set() if should_ignore and should_ignore(package_name): continue @@ -95,8 +93,7 @@ def check_package_set(package_set, should_ignore=None): return missing, conflicting -def check_install_conflicts(to_install): - # type: (List[InstallRequirement]) -> ConflictDetails +def check_install_conflicts(to_install: List[InstallRequirement]) -> ConflictDetails: """For checking if the dependency graph would be consistent after \ installing given requirements """ @@ -112,33 +109,32 @@ def check_install_conflicts(to_install): package_set, check_package_set( package_set, should_ignore=lambda name: name not in whitelist - ) + ), ) -def _simulate_installation_of(to_install, package_set): - # type: (List[InstallRequirement], PackageSet) -> Set[NormalizedName] - """Computes the version of packages after installing to_install. - """ +def _simulate_installation_of( + to_install: List[InstallRequirement], package_set: PackageSet +) -> Set[NormalizedName]: + """Computes the version of packages after installing to_install.""" # Keep track of packages that were installed installed = set() # Modify it as installing requirement_set would (assuming no errors) for inst_req in to_install: abstract_dist = make_distribution_for_install_requirement(inst_req) - dist = abstract_dist.get_pkg_resources_distribution() - - assert dist is not None - name = canonicalize_name(dist.project_name) - package_set[name] = PackageDetails(dist.parsed_version, dist.requires()) + dist = abstract_dist.get_metadata_distribution() + name = dist.canonical_name + package_set[name] = PackageDetails(dist.version, list(dist.iter_dependencies())) installed.add(name) return installed -def _create_whitelist(would_be_installed, package_set): - # type: (Set[NormalizedName], PackageSet) -> Set[NormalizedName] +def _create_whitelist( + would_be_installed: Set[NormalizedName], package_set: PackageSet +) -> Set[NormalizedName]: packages_affected = set(would_be_installed) for package_name in package_set: diff --git a/pipenv/patched/notpip/_internal/operations/freeze.py b/pipenv/patched/notpip/_internal/operations/freeze.py index 103655363d..8c4471beab 100644 --- a/pipenv/patched/notpip/_internal/operations/freeze.py +++ b/pipenv/patched/notpip/_internal/operations/freeze.py @@ -1,19 +1,8 @@ import collections import logging import os -from typing import ( - Container, - Dict, - Iterable, - Iterator, - List, - NamedTuple, - Optional, - Set, - Union, -) +from typing import Container, Dict, Iterable, Iterator, List, NamedTuple, Optional, Set -from pipenv.patched.notpip._vendor.packaging.requirements import Requirement from pipenv.patched.notpip._vendor.packaging.utils import canonicalize_name from pipenv.patched.notpip._vendor.packaging.version import Version @@ -30,22 +19,20 @@ class _EditableInfo(NamedTuple): - requirement: Optional[str] - editable: bool + requirement: str comments: List[str] def freeze( - requirement=None, # type: Optional[List[str]] - local_only=False, # type: bool - user_only=False, # type: bool - paths=None, # type: Optional[List[str]] - isolated=False, # type: bool - exclude_editable=False, # type: bool - skip=() # type: Container[str] -): - # type: (...) -> Iterator[str] - installations = {} # type: Dict[str, FrozenRequirement] + requirement: Optional[List[str]] = None, + local_only: bool = False, + user_only: bool = False, + paths: Optional[List[str]] = None, + isolated: bool = False, + exclude_editable: bool = False, + skip: Container[str] = (), +) -> Iterator[str]: + installations: Dict[str, FrozenRequirement] = {} dists = get_environment(paths).iter_installed_distributions( local_only=local_only, @@ -63,42 +50,50 @@ def freeze( # should only be emitted once, even if the same option is in multiple # requirements files, so we need to keep track of what has been emitted # so that we don't emit it again if it's seen again - emitted_options = set() # type: Set[str] + emitted_options: Set[str] = set() # keep track of which files a requirement is in so that we can # give an accurate warning if a requirement appears multiple times. - req_files = collections.defaultdict(list) # type: Dict[str, List[str]] + req_files: Dict[str, List[str]] = collections.defaultdict(list) for req_file_path in requirement: with open(req_file_path) as req_file: for line in req_file: - if (not line.strip() or - line.strip().startswith('#') or - line.startswith(( - '-r', '--requirement', - '-f', '--find-links', - '-i', '--index-url', - '--pre', - '--trusted-host', - '--process-dependency-links', - '--extra-index-url', - '--use-feature'))): + if ( + not line.strip() + or line.strip().startswith("#") + or line.startswith( + ( + "-r", + "--requirement", + "-f", + "--find-links", + "-i", + "--index-url", + "--pre", + "--trusted-host", + "--process-dependency-links", + "--extra-index-url", + "--use-feature", + ) + ) + ): line = line.rstrip() if line not in emitted_options: emitted_options.add(line) yield line continue - if line.startswith('-e') or line.startswith('--editable'): - if line.startswith('-e'): + if line.startswith("-e") or line.startswith("--editable"): + if line.startswith("-e"): line = line[2:].strip() else: - line = line[len('--editable'):].strip().lstrip('=') + line = line[len("--editable") :].strip().lstrip("=") line_req = install_req_from_editable( line, isolated=isolated, ) else: line_req = install_req_from_line( - COMMENT_RE.sub('', line).strip(), + COMMENT_RE.sub("", line).strip(), isolated=isolated, ) @@ -106,15 +101,15 @@ def freeze( logger.info( "Skipping line in requirement file [%s] because " "it's not clear what it would install: %s", - req_file_path, line.strip(), + req_file_path, + line.strip(), ) logger.info( " (add #egg=PackageName to the URL to avoid" " this warning)" ) else: - line_req_canonical_name = canonicalize_name( - line_req.name) + line_req_canonical_name = canonicalize_name(line_req.name) if line_req_canonical_name not in installations: # either it's not installed, or it is installed # but has been processed already @@ -123,14 +118,13 @@ def freeze( "Requirement file [%s] contains %s, but " "package %r is not installed", req_file_path, - COMMENT_RE.sub('', line).strip(), - line_req.name + COMMENT_RE.sub("", line).strip(), + line_req.name, ) else: req_files[line_req.name].append(req_file_path) else: - yield str(installations[ - line_req_canonical_name]).rstrip() + yield str(installations[line_req_canonical_name]).rstrip() del installations[line_req_canonical_name] req_files[line_req.name].append(req_file_path) @@ -138,15 +132,14 @@ def freeze( # single requirements file or in different requirements files). for name, files in req_files.items(): if len(files) > 1: - logger.warning("Requirement %s included multiple times [%s]", - name, ', '.join(sorted(set(files)))) + logger.warning( + "Requirement %s included multiple times [%s]", + name, + ", ".join(sorted(set(files))), + ) - yield( - '## The following requirements were added by ' - 'pip freeze:' - ) - for installation in sorted( - installations.values(), key=lambda x: x.name.lower()): + yield ("## The following requirements were added by pip freeze:") + for installation in sorted(installations.values(), key=lambda x: x.name.lower()): if installation.canonical_name not in skip: yield str(installation).rstrip() @@ -159,21 +152,12 @@ def _format_as_name_version(dist: BaseDistribution) -> str: def _get_editable_info(dist: BaseDistribution) -> _EditableInfo: """ - Compute and return values (req, editable, comments) for use in + Compute and return values (req, comments) for use in FrozenRequirement.from_dist(). """ - if not dist.editable: - return _EditableInfo(requirement=None, editable=False, comments=[]) - if dist.location is None: - display = _format_as_name_version(dist) - logger.warning("Editable requirement not found on disk: %s", display) - return _EditableInfo( - requirement=None, - editable=True, - comments=[f"# Editable install not found ({display})"], - ) - - location = os.path.normcase(os.path.abspath(dist.location)) + editable_project_location = dist.editable_project_location + assert editable_project_location + location = os.path.normcase(os.path.abspath(editable_project_location)) from pipenv.patched.notpip._internal.vcs import RemoteNotFoundError, RemoteNotValidError, vcs @@ -182,13 +166,13 @@ def _get_editable_info(dist: BaseDistribution) -> _EditableInfo: if vcs_backend is None: display = _format_as_name_version(dist) logger.debug( - 'No VCS found for editable requirement "%s" in: %r', display, + 'No VCS found for editable requirement "%s" in: %r', + display, location, ) return _EditableInfo( requirement=location, - editable=True, - comments=[f'# Editable install with no version control ({display})'], + comments=[f"# Editable install with no version control ({display})"], ) vcs_name = type(vcs_backend).__name__ @@ -199,50 +183,47 @@ def _get_editable_info(dist: BaseDistribution) -> _EditableInfo: display = _format_as_name_version(dist) return _EditableInfo( requirement=location, - editable=True, - comments=[f'# Editable {vcs_name} install with no remote ({display})'], + comments=[f"# Editable {vcs_name} install with no remote ({display})"], ) except RemoteNotValidError as ex: display = _format_as_name_version(dist) return _EditableInfo( requirement=location, - editable=True, comments=[ f"# Editable {vcs_name} install ({display}) with either a deleted " f"local remote or invalid URI:", f"# '{ex.url}'", ], ) - except BadCommand: logger.warning( - 'cannot determine version of editable source in %s ' - '(%s command not found in path)', + "cannot determine version of editable source in %s " + "(%s command not found in path)", location, vcs_backend.name, ) - return _EditableInfo(requirement=None, editable=True, comments=[]) - + return _EditableInfo(requirement=location, comments=[]) except InstallationError as exc: - logger.warning( - "Error when trying to get requirement for VCS system %s, " - "falling back to uneditable format", exc - ) + logger.warning("Error when trying to get requirement for VCS system %s", exc) else: - return _EditableInfo(requirement=req, editable=True, comments=[]) + return _EditableInfo(requirement=req, comments=[]) - logger.warning('Could not determine repository location of %s', location) + logger.warning("Could not determine repository location of %s", location) return _EditableInfo( - requirement=None, - editable=False, - comments=['## !! Could not determine repository location'], + requirement=location, + comments=["## !! Could not determine repository location"], ) class FrozenRequirement: - def __init__(self, name, req, editable, comments=()): - # type: (str, Union[str, Requirement], bool, Iterable[str]) -> None + def __init__( + self, + name: str, + req: str, + editable: bool, + comments: Iterable[str] = (), + ) -> None: self.name = name self.canonical_name = canonicalize_name(name) self.req = req @@ -251,27 +232,23 @@ def __init__(self, name, req, editable, comments=()): @classmethod def from_dist(cls, dist: BaseDistribution) -> "FrozenRequirement": - # TODO `get_requirement_info` is taking care of editable requirements. - # TODO This should be refactored when we will add detection of - # editable that provide .dist-info metadata. - req, editable, comments = _get_editable_info(dist) - if req is None and not editable: - # if PEP 610 metadata is present, attempt to use it + editable = dist.editable + if editable: + req, comments = _get_editable_info(dist) + else: + comments = [] direct_url = dist.direct_url if direct_url: - req = direct_url_as_pep440_direct_reference( - direct_url, dist.raw_name - ) - comments = [] - if req is None: - # name==version requirement - req = _format_as_name_version(dist) + # if PEP 610 metadata is present, use it + req = direct_url_as_pep440_direct_reference(direct_url, dist.raw_name) + else: + # name==version requirement + req = _format_as_name_version(dist) return cls(dist.raw_name, req, editable, comments=comments) - def __str__(self): - # type: () -> str + def __str__(self) -> str: req = self.req if self.editable: - req = f'-e {req}' - return '\n'.join(list(self.comments) + [str(req)]) + '\n' + req = f"-e {req}" + return "\n".join(list(self.comments) + [str(req)]) + "\n" diff --git a/pipenv/patched/notpip/_internal/operations/install/editable_legacy.py b/pipenv/patched/notpip/_internal/operations/install/editable_legacy.py index 2c627458ef..7cd114f70d 100644 --- a/pipenv/patched/notpip/_internal/operations/install/editable_legacy.py +++ b/pipenv/patched/notpip/_internal/operations/install/editable_legacy.py @@ -12,22 +12,21 @@ def install_editable( - install_options, # type: List[str] - global_options, # type: Sequence[str] - prefix, # type: Optional[str] - home, # type: Optional[str] - use_user_site, # type: bool - name, # type: str - setup_py_path, # type: str - isolated, # type: bool - build_env, # type: BuildEnvironment - unpacked_source_directory, # type: str -): - # type: (...) -> None + install_options: List[str], + global_options: Sequence[str], + prefix: Optional[str], + home: Optional[str], + use_user_site: bool, + name: str, + setup_py_path: str, + isolated: bool, + build_env: BuildEnvironment, + unpacked_source_directory: str, +) -> None: """Install a package in editable mode. Most arguments are pass-through to setuptools. """ - logger.info('Running setup.py develop for %s', name) + logger.info("Running setup.py develop for %s", name) args = make_setuptools_develop_args( setup_py_path, @@ -43,5 +42,6 @@ def install_editable( with build_env: call_subprocess( args, + command_desc="python setup.py develop", cwd=unpacked_source_directory, ) diff --git a/pipenv/patched/notpip/_internal/operations/install/legacy.py b/pipenv/patched/notpip/_internal/operations/install/legacy.py index e7dcc42f08..74df467db0 100644 --- a/pipenv/patched/notpip/_internal/operations/install/legacy.py +++ b/pipenv/patched/notpip/_internal/operations/install/legacy.py @@ -3,14 +3,12 @@ import logging import os -import sys from distutils.util import change_root from typing import List, Optional, Sequence from pipenv.patched.notpip._internal.build_env import BuildEnvironment -from pipenv.patched.notpip._internal.exceptions import InstallationError +from pipenv.patched.notpip._internal.exceptions import InstallationError, LegacyInstallFailure from pipenv.patched.notpip._internal.models.scheme import Scheme -from pipenv.patched.notpip._internal.utils.logging import indent_log from pipenv.patched.notpip._internal.utils.misc import ensure_dir from pipenv.patched.notpip._internal.utils.setuptools_build import make_setuptools_install_args from pipenv.patched.notpip._internal.utils.subprocess import runner_with_spinner_message @@ -19,19 +17,12 @@ logger = logging.getLogger(__name__) -class LegacyInstallFailure(Exception): - def __init__(self): - # type: () -> None - self.parent = sys.exc_info() - - def write_installed_files_from_setuptools_record( record_lines: List[str], root: Optional[str], req_description: str, ) -> None: - def prepend_root(path): - # type: (str) -> str + def prepend_root(path: str) -> str: if root is None or not os.path.isabs(path): return path else: @@ -39,7 +30,7 @@ def prepend_root(path): for line in record_lines: directory = os.path.dirname(line) - if directory.endswith('.egg-info'): + if directory.endswith(".egg-info"): egg_info_dir = prepend_root(directory) break else: @@ -55,39 +46,36 @@ def prepend_root(path): filename = line.strip() if os.path.isdir(filename): filename += os.path.sep - new_lines.append( - os.path.relpath(prepend_root(filename), egg_info_dir) - ) + new_lines.append(os.path.relpath(prepend_root(filename), egg_info_dir)) new_lines.sort() ensure_dir(egg_info_dir) - inst_files_path = os.path.join(egg_info_dir, 'installed-files.txt') - with open(inst_files_path, 'w') as f: - f.write('\n'.join(new_lines) + '\n') + inst_files_path = os.path.join(egg_info_dir, "installed-files.txt") + with open(inst_files_path, "w") as f: + f.write("\n".join(new_lines) + "\n") def install( - install_options, # type: List[str] - global_options, # type: Sequence[str] - root, # type: Optional[str] - home, # type: Optional[str] - prefix, # type: Optional[str] - use_user_site, # type: bool - pycompile, # type: bool - scheme, # type: Scheme - setup_py_path, # type: str - isolated, # type: bool - req_name, # type: str - build_env, # type: BuildEnvironment - unpacked_source_directory, # type: str - req_description, # type: str -): - # type: (...) -> bool + install_options: List[str], + global_options: Sequence[str], + root: Optional[str], + home: Optional[str], + prefix: Optional[str], + use_user_site: bool, + pycompile: bool, + scheme: Scheme, + setup_py_path: str, + isolated: bool, + req_name: str, + build_env: BuildEnvironment, + unpacked_source_directory: str, + req_description: str, +) -> bool: header_dir = scheme.headers with TempDirectory(kind="record") as temp_dir: try: - record_filename = os.path.join(temp_dir.path, 'install-record.txt') + record_filename = os.path.join(temp_dir.path, "install-record.txt") install_args = make_setuptools_install_args( setup_py_path, global_options=global_options, @@ -105,20 +93,20 @@ def install( runner = runner_with_spinner_message( f"Running setup.py install for {req_name}" ) - with indent_log(), build_env: + with build_env: runner( cmd=install_args, cwd=unpacked_source_directory, ) if not os.path.exists(record_filename): - logger.debug('Record file %s not found', record_filename) + logger.debug("Record file %s not found", record_filename) # Signal to the caller that we didn't install the new package return False - except Exception: + except Exception as e: # Signal to the caller that we didn't install the new package - raise LegacyInstallFailure + raise LegacyInstallFailure(package_details=req_name) from e # At this point, we have successfully installed the requirement. diff --git a/pipenv/patched/notpip/_internal/operations/install/wheel.py b/pipenv/patched/notpip/_internal/operations/install/wheel.py index d26ef15164..41a3b32daf 100644 --- a/pipenv/patched/notpip/_internal/operations/install/wheel.py +++ b/pipenv/patched/notpip/_internal/operations/install/wheel.py @@ -38,11 +38,14 @@ from pipenv.patched.notpip._vendor.distlib.scripts import ScriptMaker from pipenv.patched.notpip._vendor.distlib.util import get_export_entry from pipenv.patched.notpip._vendor.packaging.utils import canonicalize_name -from pipenv.patched.notpip._vendor.six import ensure_str, ensure_text, reraise from pipenv.patched.notpip._internal.exceptions import InstallationError from pipenv.patched.notpip._internal.locations import get_major_minor_version -from pipenv.patched.notpip._internal.metadata import BaseDistribution, get_wheel_distribution +from pipenv.patched.notpip._internal.metadata import ( + BaseDistribution, + FilesystemWheel, + get_wheel_distribution, +) from pipenv.patched.notpip._internal.models.direct_url import DIRECT_URL_METADATA_NAME, DirectUrl from pipenv.patched.notpip._internal.models.scheme import SCHEME_KEYS, Scheme from pipenv.patched.notpip._internal.utils.filesystem import adjacent_tmp_file, replace @@ -59,62 +62,55 @@ from typing import Protocol class File(Protocol): - src_record_path = None # type: RecordPath - dest_path = None # type: str - changed = None # type: bool + src_record_path: "RecordPath" + dest_path: str + changed: bool - def save(self): - # type: () -> None + def save(self) -> None: pass logger = logging.getLogger(__name__) -RecordPath = NewType('RecordPath', str) +RecordPath = NewType("RecordPath", str) InstalledCSVRow = Tuple[RecordPath, str, Union[int, str]] -def rehash(path, blocksize=1 << 20): - # type: (str, int) -> Tuple[str, str] +def rehash(path: str, blocksize: int = 1 << 20) -> Tuple[str, str]: """Return (encoded_digest, length) for path using hashlib.sha256()""" h, length = hash_file(path, blocksize) - digest = 'sha256=' + urlsafe_b64encode( - h.digest() - ).decode('latin1').rstrip('=') + digest = "sha256=" + urlsafe_b64encode(h.digest()).decode("latin1").rstrip("=") return (digest, str(length)) -def csv_io_kwargs(mode): - # type: (str) -> Dict[str, Any] +def csv_io_kwargs(mode: str) -> Dict[str, Any]: """Return keyword arguments to properly open a CSV file in the given mode. """ - return {'mode': mode, 'newline': '', 'encoding': 'utf-8'} + return {"mode": mode, "newline": "", "encoding": "utf-8"} -def fix_script(path): - # type: (str) -> bool +def fix_script(path: str) -> bool: """Replace #!python with #!/path/to/python Return True if file was changed. """ # XXX RECORD hashes will need to be updated assert os.path.isfile(path) - with open(path, 'rb') as script: + with open(path, "rb") as script: firstline = script.readline() - if not firstline.startswith(b'#!python'): + if not firstline.startswith(b"#!python"): return False exename = sys.executable.encode(sys.getfilesystemencoding()) - firstline = b'#!' + exename + os.linesep.encode("ascii") + firstline = b"#!" + exename + os.linesep.encode("ascii") rest = script.read() - with open(path, 'wb') as script: + with open(path, "wb") as script: script.write(firstline) script.write(rest) return True -def wheel_root_is_purelib(metadata): - # type: (Message) -> bool +def wheel_root_is_purelib(metadata: Message) -> bool: return metadata.get("Root-Is-Purelib", "").lower() == "true" @@ -129,8 +125,7 @@ def get_entrypoints(dist: BaseDistribution) -> Tuple[Dict[str, str], Dict[str, s return console_scripts, gui_scripts -def message_about_scripts_not_on_PATH(scripts): - # type: (Sequence[str]) -> Optional[str] +def message_about_scripts_not_on_PATH(scripts: Sequence[str]) -> Optional[str]: """Determine if any scripts are not on PATH and format a warning. Returns a warning message if one or more scripts are not on PATH, otherwise None. @@ -139,7 +134,7 @@ def message_about_scripts_not_on_PATH(scripts): return None # Group scripts by the path they were installed in - grouped_by_dir = collections.defaultdict(set) # type: Dict[str, Set[str]] + grouped_by_dir: Dict[str, Set[str]] = collections.defaultdict(set) for destfile in scripts: parent_dir = os.path.dirname(destfile) script_name = os.path.basename(destfile) @@ -147,23 +142,24 @@ def message_about_scripts_not_on_PATH(scripts): # We don't want to warn for directories that are on PATH. not_warn_dirs = [ - os.path.normcase(i).rstrip(os.sep) for i in - os.environ.get("PATH", "").split(os.pathsep) + os.path.normcase(i).rstrip(os.sep) + for i in os.environ.get("PATH", "").split(os.pathsep) ] # If an executable sits with sys.executable, we don't warn for it. # This covers the case of venv invocations without activating the venv. not_warn_dirs.append(os.path.normcase(os.path.dirname(sys.executable))) - warn_for = { - parent_dir: scripts for parent_dir, scripts in grouped_by_dir.items() + warn_for: Dict[str, Set[str]] = { + parent_dir: scripts + for parent_dir, scripts in grouped_by_dir.items() if os.path.normcase(parent_dir) not in not_warn_dirs - } # type: Dict[str, Set[str]] + } if not warn_for: return None # Format a message msg_lines = [] for parent_dir, dir_scripts in warn_for.items(): - sorted_scripts = sorted(dir_scripts) # type: List[str] + sorted_scripts: List[str] = sorted(dir_scripts) if len(sorted_scripts) == 1: start_text = "script {} is".format(sorted_scripts[0]) else: @@ -172,8 +168,9 @@ def message_about_scripts_not_on_PATH(scripts): ) msg_lines.append( - "The {} installed in '{}' which is not on PATH." - .format(start_text, parent_dir) + "The {} installed in '{}' which is not on PATH.".format( + start_text, parent_dir + ) ) last_line_fmt = ( @@ -200,8 +197,9 @@ def message_about_scripts_not_on_PATH(scripts): return "\n".join(msg_lines) -def _normalized_outrows(outrows): - # type: (Iterable[InstalledCSVRow]) -> List[Tuple[str, str, str]] +def _normalized_outrows( + outrows: Iterable[InstalledCSVRow], +) -> List[Tuple[str, str, str]]: """Normalize the given rows of a RECORD file. Items in each row are converted into str. Rows are then sorted to make @@ -221,69 +219,60 @@ def _normalized_outrows(outrows): # For additional background, see-- # https://github.com/pypa/pip/issues/5868 return sorted( - (ensure_str(record_path, encoding='utf-8'), hash_, str(size)) - for record_path, hash_, size in outrows + (record_path, hash_, str(size)) for record_path, hash_, size in outrows ) -def _record_to_fs_path(record_path): - # type: (RecordPath) -> str +def _record_to_fs_path(record_path: RecordPath) -> str: return record_path -def _fs_to_record_path(path, relative_to=None): - # type: (str, Optional[str]) -> RecordPath +def _fs_to_record_path(path: str, relative_to: Optional[str] = None) -> RecordPath: if relative_to is not None: # On Windows, do not handle relative paths if they belong to different # logical disks - if os.path.splitdrive(path)[0].lower() == \ - os.path.splitdrive(relative_to)[0].lower(): + if ( + os.path.splitdrive(path)[0].lower() + == os.path.splitdrive(relative_to)[0].lower() + ): path = os.path.relpath(path, relative_to) - path = path.replace(os.path.sep, '/') - return cast('RecordPath', path) - - -def _parse_record_path(record_column): - # type: (str) -> RecordPath - p = ensure_text(record_column, encoding='utf-8') - return cast('RecordPath', p) + path = path.replace(os.path.sep, "/") + return cast("RecordPath", path) def get_csv_rows_for_installed( - old_csv_rows, # type: List[List[str]] - installed, # type: Dict[RecordPath, RecordPath] - changed, # type: Set[RecordPath] - generated, # type: List[str] - lib_dir, # type: str -): - # type: (...) -> List[InstalledCSVRow] + old_csv_rows: List[List[str]], + installed: Dict[RecordPath, RecordPath], + changed: Set[RecordPath], + generated: List[str], + lib_dir: str, +) -> List[InstalledCSVRow]: """ :param installed: A map from archive RECORD path to installation RECORD path. """ - installed_rows = [] # type: List[InstalledCSVRow] + installed_rows: List[InstalledCSVRow] = [] for row in old_csv_rows: if len(row) > 3: - logger.warning('RECORD line has more than three elements: %s', row) - old_record_path = _parse_record_path(row[0]) + logger.warning("RECORD line has more than three elements: %s", row) + old_record_path = cast("RecordPath", row[0]) new_record_path = installed.pop(old_record_path, old_record_path) if new_record_path in changed: digest, length = rehash(_record_to_fs_path(new_record_path)) else: - digest = row[1] if len(row) > 1 else '' - length = row[2] if len(row) > 2 else '' + digest = row[1] if len(row) > 1 else "" + length = row[2] if len(row) > 2 else "" installed_rows.append((new_record_path, digest, length)) for f in generated: path = _fs_to_record_path(f, lib_dir) digest, length = rehash(f) installed_rows.append((path, digest, length)) for installed_record_path in installed.values(): - installed_rows.append((installed_record_path, '', '')) + installed_rows.append((installed_record_path, "", "")) return installed_rows -def get_console_script_specs(console): - # type: (Dict[str, str]) -> List[str] +def get_console_script_specs(console: Dict[str, str]) -> List[str]: """ Given the mapping from entrypoint name to callable, return the relevant console script specs. @@ -326,62 +315,57 @@ def get_console_script_specs(console): # DEFAULT # - The default behavior is to install pip, pipX, pipX.Y, easy_install # and easy_install-X.Y. - pip_script = console.pop('pip', None) + pip_script = console.pop("pip", None) if pip_script: if "ENSUREPIP_OPTIONS" not in os.environ: - scripts_to_generate.append('pip = ' + pip_script) + scripts_to_generate.append("pip = " + pip_script) if os.environ.get("ENSUREPIP_OPTIONS", "") != "altinstall": scripts_to_generate.append( - 'pip{} = {}'.format(sys.version_info[0], pip_script) + "pip{} = {}".format(sys.version_info[0], pip_script) ) - scripts_to_generate.append( - f'pip{get_major_minor_version()} = {pip_script}' - ) + scripts_to_generate.append(f"pip{get_major_minor_version()} = {pip_script}") # Delete any other versioned pip entry points - pip_ep = [k for k in console if re.match(r'pip(\d(\.\d)?)?$', k)] + pip_ep = [k for k in console if re.match(r"pip(\d(\.\d)?)?$", k)] for k in pip_ep: del console[k] - easy_install_script = console.pop('easy_install', None) + easy_install_script = console.pop("easy_install", None) if easy_install_script: if "ENSUREPIP_OPTIONS" not in os.environ: - scripts_to_generate.append( - 'easy_install = ' + easy_install_script - ) + scripts_to_generate.append("easy_install = " + easy_install_script) scripts_to_generate.append( - 'easy_install-{} = {}'.format( + "easy_install-{} = {}".format( get_major_minor_version(), easy_install_script ) ) # Delete any other versioned easy_install entry points easy_install_ep = [ - k for k in console if re.match(r'easy_install(-\d\.\d)?$', k) + k for k in console if re.match(r"easy_install(-\d\.\d)?$", k) ] for k in easy_install_ep: del console[k] # Generate the console entry points specified in the wheel - scripts_to_generate.extend(starmap('{} = {}'.format, console.items())) + scripts_to_generate.extend(starmap("{} = {}".format, console.items())) return scripts_to_generate class ZipBackedFile: - def __init__(self, src_record_path, dest_path, zip_file): - # type: (RecordPath, str, ZipFile) -> None + def __init__( + self, src_record_path: RecordPath, dest_path: str, zip_file: ZipFile + ) -> None: self.src_record_path = src_record_path self.dest_path = dest_path self._zip_file = zip_file self.changed = False - def _getinfo(self): - # type: () -> ZipInfo + def _getinfo(self) -> ZipInfo: return self._zip_file.getinfo(self.src_record_path) - def save(self): - # type: () -> None + def save(self) -> None: # directory creation is lazy and after file filtering # to ensure we don't install empty dirs; empty dirs can't be # uninstalled. @@ -410,22 +394,19 @@ def save(self): class ScriptFile: - def __init__(self, file): - # type: (File) -> None + def __init__(self, file: "File") -> None: self._file = file self.src_record_path = self._file.src_record_path self.dest_path = self._file.dest_path self.changed = False - def save(self): - # type: () -> None + def save(self) -> None: self._file.save() self.changed = fix_script(self.dest_path) class MissingCallableSuffix(InstallationError): - def __init__(self, entry_point): - # type: (str) -> None + def __init__(self, entry_point: str) -> None: super().__init__( "Invalid script entry point: {} - A callable " "suffix is required. Cf https://packaging.python.org/" @@ -434,31 +415,28 @@ def __init__(self, entry_point): ) -def _raise_for_invalid_entrypoint(specification): - # type: (str) -> None +def _raise_for_invalid_entrypoint(specification: str) -> None: entry = get_export_entry(specification) if entry is not None and entry.suffix is None: raise MissingCallableSuffix(str(entry)) class PipScriptMaker(ScriptMaker): - def make(self, specification, options=None): - # type: (str, Dict[str, Any]) -> List[str] + def make(self, specification: str, options: Dict[str, Any] = None) -> List[str]: _raise_for_invalid_entrypoint(specification) return super().make(specification, options) def _install_wheel( - name, # type: str - wheel_zip, # type: ZipFile - wheel_path, # type: str - scheme, # type: Scheme - pycompile=True, # type: bool - warn_script_location=True, # type: bool - direct_url=None, # type: Optional[DirectUrl] - requested=False, # type: bool -): - # type: (...) -> None + name: str, + wheel_zip: ZipFile, + wheel_path: str, + scheme: Scheme, + pycompile: bool = True, + warn_script_location: bool = True, + direct_url: Optional[DirectUrl] = None, + requested: bool = False, +) -> None: """Install a wheel. :param name: Name of the project to install @@ -485,33 +463,23 @@ def _install_wheel( # installed = files copied from the wheel to the destination # changed = files changed while installing (scripts #! line typically) # generated = files newly generated during the install (script wrappers) - installed = {} # type: Dict[RecordPath, RecordPath] - changed = set() # type: Set[RecordPath] - generated = [] # type: List[str] + installed: Dict[RecordPath, RecordPath] = {} + changed: Set[RecordPath] = set() + generated: List[str] = [] - def record_installed(srcfile, destfile, modified=False): - # type: (RecordPath, str, bool) -> None + def record_installed( + srcfile: RecordPath, destfile: str, modified: bool = False + ) -> None: """Map archive RECORD paths to installation RECORD paths.""" newpath = _fs_to_record_path(destfile, lib_dir) installed[srcfile] = newpath if modified: changed.add(_fs_to_record_path(destfile)) - def all_paths(): - # type: () -> Iterable[RecordPath] - names = wheel_zip.namelist() - # If a flag is set, names may be unicode in Python 2. We convert to - # text explicitly so these are valid for lookup in RECORD. - decoded_names = map(ensure_text, names) - for name in decoded_names: - yield cast("RecordPath", name) - - def is_dir_path(path): - # type: (RecordPath) -> bool + def is_dir_path(path: RecordPath) -> bool: return path.endswith("/") - def assert_no_path_traversal(dest_dir_path, target_path): - # type: (str, str) -> None + def assert_no_path_traversal(dest_dir_path: str, target_path: str) -> None: if not is_within_directory(dest_dir_path, target_path): message = ( "The wheel {!r} has a file {!r} trying to install" @@ -521,10 +489,10 @@ def assert_no_path_traversal(dest_dir_path, target_path): message.format(wheel_path, target_path, dest_dir_path) ) - def root_scheme_file_maker(zip_file, dest): - # type: (ZipFile, str) -> Callable[[RecordPath], File] - def make_root_scheme_file(record_path): - # type: (RecordPath) -> File + def root_scheme_file_maker( + zip_file: ZipFile, dest: str + ) -> Callable[[RecordPath], "File"]: + def make_root_scheme_file(record_path: RecordPath) -> "File": normed_path = os.path.normpath(record_path) dest_path = os.path.join(dest, normed_path) assert_no_path_traversal(dest, dest_path) @@ -532,17 +500,12 @@ def make_root_scheme_file(record_path): return make_root_scheme_file - def data_scheme_file_maker(zip_file, scheme): - # type: (ZipFile, Scheme) -> Callable[[RecordPath], File] - scheme_paths = {} - for key in SCHEME_KEYS: - encoded_key = ensure_text(key) - scheme_paths[encoded_key] = ensure_text( - getattr(scheme, key), encoding=sys.getfilesystemencoding() - ) + def data_scheme_file_maker( + zip_file: ZipFile, scheme: Scheme + ) -> Callable[[RecordPath], "File"]: + scheme_paths = {key: getattr(scheme, key) for key in SCHEME_KEYS} - def make_data_scheme_file(record_path): - # type: (RecordPath) -> File + def make_data_scheme_file(record_path: RecordPath) -> "File": normed_path = os.path.normpath(record_path) try: _, scheme_key, dest_subpath = normed_path.split(os.path.sep, 2) @@ -561,9 +524,7 @@ def make_data_scheme_file(record_path): "Unknown scheme key used in {}: {} (for file {!r}). .data" " directory contents should be in subdirectories named" " with a valid scheme key ({})" - ).format( - wheel_path, scheme_key, record_path, valid_scheme_keys - ) + ).format(wheel_path, scheme_key, record_path, valid_scheme_keys) raise InstallationError(message) dest_path = os.path.join(scheme_path, dest_subpath) @@ -572,30 +533,19 @@ def make_data_scheme_file(record_path): return make_data_scheme_file - def is_data_scheme_path(path): - # type: (RecordPath) -> bool + def is_data_scheme_path(path: RecordPath) -> bool: return path.split("/", 1)[0].endswith(".data") - paths = all_paths() + paths = cast(List[RecordPath], wheel_zip.namelist()) file_paths = filterfalse(is_dir_path, paths) - root_scheme_paths, data_scheme_paths = partition( - is_data_scheme_path, file_paths - ) + root_scheme_paths, data_scheme_paths = partition(is_data_scheme_path, file_paths) - make_root_scheme_file = root_scheme_file_maker( - wheel_zip, - ensure_text(lib_dir, encoding=sys.getfilesystemencoding()), - ) - files = map(make_root_scheme_file, root_scheme_paths) + make_root_scheme_file = root_scheme_file_maker(wheel_zip, lib_dir) + files: Iterator[File] = map(make_root_scheme_file, root_scheme_paths) - def is_script_scheme_path(path): - # type: (RecordPath) -> bool + def is_script_scheme_path(path: RecordPath) -> bool: parts = path.split("/", 2) - return ( - len(parts) > 2 and - parts[0].endswith(".data") and - parts[1] == "scripts" - ) + return len(parts) > 2 and parts[0].endswith(".data") and parts[1] == "scripts" other_scheme_paths, script_scheme_paths = partition( is_script_scheme_path, data_scheme_paths @@ -606,30 +556,32 @@ def is_script_scheme_path(path): files = chain(files, other_scheme_files) # Get the defined entry points - distribution = get_wheel_distribution(wheel_path, canonicalize_name(name)) + distribution = get_wheel_distribution( + FilesystemWheel(wheel_path), + canonicalize_name(name), + ) console, gui = get_entrypoints(distribution) - def is_entrypoint_wrapper(file): - # type: (File) -> bool + def is_entrypoint_wrapper(file: "File") -> bool: # EP, EP.exe and EP-script.py are scripts generated for # entry point EP by setuptools path = file.dest_path name = os.path.basename(path) - if name.lower().endswith('.exe'): + if name.lower().endswith(".exe"): matchname = name[:-4] - elif name.lower().endswith('-script.py'): + elif name.lower().endswith("-script.py"): matchname = name[:-10] elif name.lower().endswith(".pya"): matchname = name[:-4] else: matchname = name # Ignore setuptools-generated scripts - return (matchname in console or matchname in gui) + return matchname in console or matchname in gui - script_scheme_files = map(make_data_scheme_file, script_scheme_paths) - script_scheme_files = filterfalse( - is_entrypoint_wrapper, script_scheme_files + script_scheme_files: Iterator[File] = map( + make_data_scheme_file, script_scheme_paths ) + script_scheme_files = filterfalse(is_entrypoint_wrapper, script_scheme_files) script_scheme_files = map(ScriptFile, script_scheme_files) files = chain(files, script_scheme_files) @@ -637,8 +589,7 @@ def is_entrypoint_wrapper(file): file.save() record_installed(file.src_record_path, file.dest_path, file.changed) - def pyc_source_file_paths(): - # type: () -> Iterator[str] + def pyc_source_file_paths() -> Iterator[str]: # We de-duplicate installation paths, since there can be overlap (e.g. # file in .data maps to same location as file in wheel root). # Sorting installation paths makes it easier to reproduce and debug @@ -647,30 +598,21 @@ def pyc_source_file_paths(): full_installed_path = os.path.join(lib_dir, installed_path) if not os.path.isfile(full_installed_path): continue - if not full_installed_path.endswith('.py'): + if not full_installed_path.endswith(".py"): continue yield full_installed_path - def pyc_output_path(path): - # type: (str) -> str - """Return the path the pyc file would have been written to. - """ + def pyc_output_path(path: str) -> str: + """Return the path the pyc file would have been written to.""" return importlib.util.cache_from_source(path) # Compile all of the pyc files for the installed files if pycompile: with captured_stdout() as stdout: with warnings.catch_warnings(): - warnings.filterwarnings('ignore') + warnings.filterwarnings("ignore") for path in pyc_source_file_paths(): - # Python 2's `compileall.compile_file` requires a str in - # error cases, so we must convert to the native type. - path_arg = ensure_str( - path, encoding=sys.getfilesystemencoding() - ) - success = compileall.compile_file( - path_arg, force=True, quiet=True - ) + success = compileall.compile_file(path, force=True, quiet=True) if success: pyc_path = pyc_output_path(path) assert os.path.exists(pyc_path) @@ -689,7 +631,7 @@ def pyc_output_path(path): # Ensure we don't generate any variants for scripts because this is almost # never what somebody wants. # See https://bitbucket.org/pypa/distlib/issue/35/ - maker.variants = {''} + maker.variants = {""} # This is required because otherwise distlib creates scripts that are not # executable. @@ -699,14 +641,12 @@ def pyc_output_path(path): # Generate the console and GUI entry points specified in the wheel scripts_to_generate = get_console_script_specs(console) - gui_scripts_to_generate = list(starmap('{} = {}'.format, gui.items())) + gui_scripts_to_generate = list(starmap("{} = {}".format, gui.items())) generated_console_scripts = maker.make_multiple(scripts_to_generate) generated.extend(generated_console_scripts) - generated.extend( - maker.make_multiple(gui_scripts_to_generate, {'gui': True}) - ) + generated.extend(maker.make_multiple(gui_scripts_to_generate, {"gui": True})) if warn_script_location: msg = message_about_scripts_not_on_PATH(generated_console_scripts) @@ -716,8 +656,7 @@ def pyc_output_path(path): generated_file_mode = 0o666 & ~current_umask() @contextlib.contextmanager - def _generate_file(path, **kwargs): - # type: (str, **Any) -> Iterator[BinaryIO] + def _generate_file(path: str, **kwargs: Any) -> Iterator[BinaryIO]: with adjacent_tmp_file(path, **kwargs) as f: yield f os.chmod(f.name, generated_file_mode) @@ -726,9 +665,9 @@ def _generate_file(path, **kwargs): dest_info_dir = os.path.join(lib_dir, info_dir) # Record pip as the installer - installer_path = os.path.join(dest_info_dir, 'INSTALLER') + installer_path = os.path.join(dest_info_dir, "INSTALLER") with _generate_file(installer_path) as installer_file: - installer_file.write(b'pip\n') + installer_file.write(b"pip\n") generated.append(installer_path) # Record the PEP 610 direct URL reference @@ -740,12 +679,12 @@ def _generate_file(path, **kwargs): # Record the REQUESTED file if requested: - requested_path = os.path.join(dest_info_dir, 'REQUESTED') + requested_path = os.path.join(dest_info_dir, "REQUESTED") with open(requested_path, "wb"): pass generated.append(requested_path) - record_text = distribution.read_text('RECORD') + record_text = distribution.read_text("RECORD") record_rows = list(csv.reader(record_text.splitlines())) rows = get_csv_rows_for_installed( @@ -753,42 +692,38 @@ def _generate_file(path, **kwargs): installed=installed, changed=changed, generated=generated, - lib_dir=lib_dir) + lib_dir=lib_dir, + ) # Record details of all files installed - record_path = os.path.join(dest_info_dir, 'RECORD') + record_path = os.path.join(dest_info_dir, "RECORD") - with _generate_file(record_path, **csv_io_kwargs('w')) as record_file: - # The type mypy infers for record_file is different for Python 3 - # (typing.IO[Any]) and Python 2 (typing.BinaryIO). We explicitly - # cast to typing.IO[str] as a workaround. - writer = csv.writer(cast('IO[str]', record_file)) + with _generate_file(record_path, **csv_io_kwargs("w")) as record_file: + # Explicitly cast to typing.IO[str] as a workaround for the mypy error: + # "writer" has incompatible type "BinaryIO"; expected "_Writer" + writer = csv.writer(cast("IO[str]", record_file)) writer.writerows(_normalized_outrows(rows)) @contextlib.contextmanager -def req_error_context(req_description): - # type: (str) -> Iterator[None] +def req_error_context(req_description: str) -> Iterator[None]: try: yield except InstallationError as e: message = "For req: {}. {}".format(req_description, e.args[0]) - reraise( - InstallationError, InstallationError(message), sys.exc_info()[2] - ) + raise InstallationError(message) from e def install_wheel( - name, # type: str - wheel_path, # type: str - scheme, # type: Scheme - req_description, # type: str - pycompile=True, # type: bool - warn_script_location=True, # type: bool - direct_url=None, # type: Optional[DirectUrl] - requested=False, # type: bool -): - # type: (...) -> None + name: str, + wheel_path: str, + scheme: Scheme, + req_description: str, + pycompile: bool = True, + warn_script_location: bool = True, + direct_url: Optional[DirectUrl] = None, + requested: bool = False, +) -> None: with ZipFile(wheel_path, allowZip64=True) as z: with req_error_context(req_description): _install_wheel( diff --git a/pipenv/patched/notpip/_internal/operations/prepare.py b/pipenv/patched/notpip/_internal/operations/prepare.py index 0c256d6b05..58a6f3f2d6 100644 --- a/pipenv/patched/notpip/_internal/operations/prepare.py +++ b/pipenv/patched/notpip/_internal/operations/prepare.py @@ -8,10 +8,9 @@ import mimetypes import os import shutil -from typing import Dict, Iterable, List, Optional, Tuple +from typing import Dict, Iterable, List, Optional from pipenv.patched.notpip._vendor.packaging.utils import canonicalize_name -from pipenv.patched.notpip._vendor.pkg_resources import Distribution from pipenv.patched.notpip._internal.distributions import make_distribution_for_install_requirement from pipenv.patched.notpip._internal.distributions.installed import InstalledDistribution @@ -25,6 +24,7 @@ VcsHashUnsupported, ) from pipenv.patched.notpip._internal.index.package_finder import PackageFinder +from pipenv.patched.notpip._internal.metadata import BaseDistribution from pipenv.patched.notpip._internal.models.link import Link from pipenv.patched.notpip._internal.models.wheel import Wheel from pipenv.patched.notpip._internal.network.download import BatchDownloader, Downloader @@ -35,7 +35,6 @@ from pipenv.patched.notpip._internal.network.session import PipSession from pipenv.patched.notpip._internal.req.req_install import InstallRequirement from pipenv.patched.notpip._internal.req.req_tracker import RequirementTracker -from pipenv.patched.notpip._internal.utils.deprecation import deprecated from pipenv.patched.notpip._internal.utils.filesystem import copy2_fixed from pipenv.patched.notpip._internal.utils.hashes import Hashes, MissingHashes from pipenv.patched.notpip._internal.utils.logging import indent_log @@ -48,30 +47,26 @@ def _get_prepared_distribution( - req, # type: InstallRequirement - req_tracker, # type: RequirementTracker - finder, # type: PackageFinder - build_isolation, # type: bool -): - # type: (...) -> Distribution + req: InstallRequirement, + req_tracker: RequirementTracker, + finder: PackageFinder, + build_isolation: bool, +) -> BaseDistribution: """Prepare a distribution for installation.""" abstract_dist = make_distribution_for_install_requirement(req) with req_tracker.track(req): abstract_dist.prepare_distribution_metadata(finder, build_isolation) - return abstract_dist.get_pkg_resources_distribution() + return abstract_dist.get_metadata_distribution() -def unpack_vcs_link(link, location): - # type: (Link, str) -> None +def unpack_vcs_link(link: Link, location: str, verbosity: int) -> None: vcs_backend = vcs.get_backend_for_scheme(link.scheme) assert vcs_backend is not None - vcs_backend.unpack(location, url=hide_url(link.url)) + vcs_backend.unpack(location, url=hide_url(link.url), verbosity=verbosity) class File: - - def __init__(self, path, content_type): - # type: (str, Optional[str]) -> None + def __init__(self, path: str, content_type: Optional[str]) -> None: self.path = path if content_type is None: self.content_type = mimetypes.guess_type(path)[0] @@ -80,19 +75,16 @@ def __init__(self, path, content_type): def get_http_url( - link, # type: Link - download, # type: Downloader - download_dir=None, # type: Optional[str] - hashes=None, # type: Optional[Hashes] -): - # type: (...) -> File + link: Link, + download: Downloader, + download_dir: Optional[str] = None, + hashes: Optional[Hashes] = None, +) -> File: temp_dir = TempDirectory(kind="unpack", globally_managed=True) # If a download dir is specified, is the file already downloaded there? already_downloaded_path = None if download_dir: - already_downloaded_path = _check_download_dir( - link, download_dir, hashes - ) + already_downloaded_path = _check_download_dir(link, download_dir, hashes) if already_downloaded_path: from_path = already_downloaded_path @@ -106,8 +98,7 @@ def get_http_url( return File(from_path, content_type) -def _copy2_ignoring_special_files(src, dest): - # type: (str, str) -> None +def _copy2_ignoring_special_files(src: str, dest: str) -> None: """Copying special files is not supported, but as a convenience to users we skip errors copying them. This supports tools that may create e.g. socket files in the project source directory. @@ -127,21 +118,19 @@ def _copy2_ignoring_special_files(src, dest): ) -def _copy_source_tree(source, target): - # type: (str, str) -> None +def _copy_source_tree(source: str, target: str) -> None: target_abspath = os.path.abspath(target) target_basename = os.path.basename(target_abspath) target_dirname = os.path.dirname(target_abspath) - def ignore(d, names): - # type: (str, List[str]) -> List[str] - skipped = [] # type: List[str] + def ignore(d: str, names: List[str]) -> List[str]: + skipped: List[str] = [] if d == source: # Pulling in those directories can potentially be very slow, # exclude the following directories if they appear in the top # level dir (and only it). # See discussion at https://github.com/pypa/pip/pull/6770 - skipped += ['.tox', '.nox'] + skipped += [".tox", ".nox"] if os.path.abspath(d) == target_dirname: # Prevent an infinite recursion if the target is in source. # This can happen when TMPDIR is set to ${PWD}/... @@ -159,19 +148,13 @@ def ignore(d, names): def get_file_url( - link, # type: Link - download_dir=None, # type: Optional[str] - hashes=None # type: Optional[Hashes] -): - # type: (...) -> File - """Get file and optionally check its hash. - """ + link: Link, download_dir: Optional[str] = None, hashes: Optional[Hashes] = None +) -> File: + """Get file and optionally check its hash.""" # If a download dir is specified, is the file already there and valid? already_downloaded_path = None if download_dir: - already_downloaded_path = _check_download_dir( - link, download_dir, hashes - ) + already_downloaded_path = _check_download_dir(link, download_dir, hashes) if already_downloaded_path: from_path = already_downloaded_path @@ -189,13 +172,13 @@ def get_file_url( def unpack_url( - link, # type: Link - location, # type: str - download, # type: Downloader - download_dir=None, # type: Optional[str] - hashes=None, # type: Optional[Hashes] -): - # type: (...) -> Optional[File] + link: Link, + location: str, + download: Downloader, + verbosity: int, + download_dir: Optional[str] = None, + hashes: Optional[Hashes] = None, +) -> Optional[File]: """Unpack link into location, downloading if required. :param hashes: A Hashes object, one of whose embedded hashes must match, @@ -205,7 +188,7 @@ def unpack_url( """ # non-editable vcs urls if link.is_vcs: - unpack_vcs_link(link, location) + unpack_vcs_link(link, location, verbosity=verbosity) return None # Once out-of-tree-builds are no longer supported, could potentially @@ -214,17 +197,9 @@ def unpack_url( # # As further cleanup, _copy_source_tree and accompanying tests can # be removed. + # + # TODO when use-deprecated=out-of-tree-build is removed if link.is_existing_dir(): - deprecated( - "A future pip version will change local packages to be built " - "in-place without first copying to a temporary directory. " - "We recommend you use --use-feature=in-tree-build to test " - "your packages with this new behavior before it becomes the " - "default.\n", - replacement=None, - gone_in="21.3", - issue=7555 - ) if os.path.isdir(location): rmtree(location) _copy_source_tree(link.file_path, location) @@ -251,10 +226,11 @@ def unpack_url( return file -def _check_download_dir(link, download_dir, hashes): - # type: (Link, str, Optional[Hashes]) -> Optional[str] - """ Check download_dir for previously downloaded file with correct hash - If a correct file is found return its path else None +def _check_download_dir( + link: Link, download_dir: str, hashes: Optional[Hashes] +) -> Optional[str]: + """Check download_dir for previously downloaded file with correct hash + If a correct file is found return its path else None """ download_path = os.path.join(download_dir, link.filename) @@ -262,15 +238,14 @@ def _check_download_dir(link, download_dir, hashes): return None # If already downloaded, does its hash match? - logger.info('File was already downloaded %s', download_path) + logger.info("File was already downloaded %s", download_path) if hashes: try: hashes.check_against_path(download_path) except HashMismatch: logger.warning( - 'Previously-downloaded file %s has bad hash. ' - 'Re-downloading.', - download_path + "Previously-downloaded file %s has bad hash. Re-downloading.", + download_path, ) os.unlink(download_path) return None @@ -278,25 +253,24 @@ def _check_download_dir(link, download_dir, hashes): class RequirementPreparer: - """Prepares a Requirement - """ + """Prepares a Requirement""" def __init__( self, - build_dir, # type: str - download_dir, # type: Optional[str] - src_dir, # type: str - build_isolation, # type: bool - req_tracker, # type: RequirementTracker - session, # type: PipSession - progress_bar, # type: str - finder, # type: PackageFinder - require_hashes, # type: bool - use_user_site, # type: bool - lazy_wheel, # type: bool - in_tree_build, # type: bool - ): - # type: (...) -> None + build_dir: str, + download_dir: Optional[str], + src_dir: str, + build_isolation: bool, + req_tracker: RequirementTracker, + session: PipSession, + progress_bar: str, + finder: PackageFinder, + require_hashes: bool, + use_user_site: bool, + lazy_wheel: bool, + verbosity: int, + in_tree_build: bool, + ) -> None: super().__init__() self.src_dir = src_dir @@ -323,17 +297,19 @@ def __init__( # Should wheels be downloaded lazily? self.use_lazy_wheel = lazy_wheel + # How verbose should underlying tooling be? + self.verbosity = verbosity + # Should in-tree builds be used for local paths? self.in_tree_build = in_tree_build - # Memoized downloaded files, as mapping of url: (path, mime type) - self._downloaded = {} # type: Dict[str, Tuple[str, str]] + # Memoized downloaded files, as mapping of url: path. + self._downloaded: Dict[str, str] = {} # Previous "header" printed for a link-based InstallRequirement self._previous_requirement_header = ("", "") - def _log_preparing_link(self, req): - # type: (InstallRequirement) -> None + def _log_preparing_link(self, req: InstallRequirement) -> None: """Provide context for the requirement being prepared.""" if req.link.is_file and not req.original_link_is_in_wheel_cache: message = "Processing %s" @@ -350,8 +326,9 @@ def _log_preparing_link(self, req): with indent_log(): logger.info("Using cached %s", req.link.filename) - def _ensure_link_req_src_dir(self, req, parallel_builds): - # type: (InstallRequirement, bool) -> None + def _ensure_link_req_src_dir( + self, req: InstallRequirement, parallel_builds: bool + ) -> None: """Ensure source_dir of a linked InstallRequirement.""" # Since source_dir is only set for editable requirements. if req.link.is_wheel: @@ -376,6 +353,7 @@ def _ensure_link_req_src_dir(self, req, parallel_builds): # installation. # FIXME: this won't upgrade when there's an existing # package unpacked in `req.source_dir` + # TODO: this check is now probably dead code if is_installable_dir(req.source_dir): raise PreviousBuildDirError( "pip can't proceed with requirements '{}' due to a" @@ -385,8 +363,7 @@ def _ensure_link_req_src_dir(self, req, parallel_builds): "Please delete it and try again.".format(req, req.source_dir) ) - def _get_linked_req_hashes(self, req): - # type: (InstallRequirement) -> Hashes + def _get_linked_req_hashes(self, req: InstallRequirement) -> Hashes: # By the time this is called, the requirement's link should have # been checked so we can tell what kind of requirements req is # and raise some more informative errors than otherwise. @@ -418,18 +395,19 @@ def _get_linked_req_hashes(self, req): # showing the user what the hash should be. return req.hashes(trust_internet=False) or MissingHashes() - def _fetch_metadata_using_lazy_wheel(self, link): - # type: (Link) -> Optional[Distribution] + def _fetch_metadata_using_lazy_wheel( + self, + link: Link, + ) -> Optional[BaseDistribution]: """Fetch metadata using lazy wheel, if possible.""" if not self.use_lazy_wheel: return None if self.require_hashes: - logger.debug('Lazy wheel is not used as hash checking is required') + logger.debug("Lazy wheel is not used as hash checking is required") return None if link.is_file or not link.is_wheel: logger.debug( - 'Lazy wheel is not used as ' - '%r does not points to a remote wheel', + "Lazy wheel is not used as %r does not points to a remote wheel", link, ) return None @@ -437,22 +415,22 @@ def _fetch_metadata_using_lazy_wheel(self, link): wheel = Wheel(link.filename) name = canonicalize_name(wheel.name) logger.info( - 'Obtaining dependency information from %s %s', - name, wheel.version, + "Obtaining dependency information from %s %s", + name, + wheel.version, ) - url = link.url.split('#', 1)[0] + url = link.url.split("#", 1)[0] try: return dist_from_wheel_url(name, url, self._session) except HTTPRangeRequestUnsupported: - logger.debug('%s does not support range requests', url) + logger.debug("%s does not support range requests", url) return None def _complete_partial_requirements( self, - partially_downloaded_reqs, # type: Iterable[InstallRequirement] - parallel_builds=False, # type: bool - ): - # type: (...) -> None + partially_downloaded_reqs: Iterable[InstallRequirement], + parallel_builds: bool = False, + ) -> None: """Download any requirements which were only fetched by metadata.""" # Download to a temporary directory. These will be copied over as # needed for downstream 'download', 'wheel', and 'install' commands. @@ -461,7 +439,7 @@ def _complete_partial_requirements( # Map each link to the requirement that owns it. This allows us to set # `req.local_file_path` on the appropriate requirement after passing # all the links at once into BatchDownloader. - links_to_fully_download = {} # type: Dict[Link, InstallRequirement] + links_to_fully_download: Dict[Link, InstallRequirement] = {} for req in partially_downloaded_reqs: assert req.link links_to_fully_download[req.link] = req @@ -480,8 +458,9 @@ def _complete_partial_requirements( for req in partially_downloaded_reqs: self._prepare_linked_requirement(req, parallel_builds) - def prepare_linked_requirement(self, req, parallel_builds=False): - # type: (InstallRequirement, bool) -> Distribution + def prepare_linked_requirement( + self, req: InstallRequirement, parallel_builds: bool = False + ) -> BaseDistribution: """Prepare a requirement to be obtained from req.link.""" assert req.link link = req.link @@ -496,7 +475,7 @@ def prepare_linked_requirement(self, req, parallel_builds=False): if file_path is not None: # The file is already available, so mark it as downloaded - self._downloaded[req.link.url] = file_path, None + self._downloaded[req.link.url] = file_path else: # The file is not available, attempt to fetch only metadata wheel_dist = self._fetch_metadata_using_lazy_wheel(link) @@ -507,8 +486,9 @@ def prepare_linked_requirement(self, req, parallel_builds=False): # None of the optimizations worked, fully prepare the requirement return self._prepare_linked_requirement(req, parallel_builds) - def prepare_linked_requirements_more(self, reqs, parallel_builds=False): - # type: (Iterable[InstallRequirement], bool) -> None + def prepare_linked_requirements_more( + self, reqs: Iterable[InstallRequirement], parallel_builds: bool = False + ) -> None: """Prepare linked requirements more, if needed.""" reqs = [req for req in reqs if req.needs_more_preparation] for req in reqs: @@ -517,12 +497,12 @@ def prepare_linked_requirements_more(self, reqs, parallel_builds=False): hashes = self._get_linked_req_hashes(req) file_path = _check_download_dir(req.link, self.download_dir, hashes) if file_path is not None: - self._downloaded[req.link.url] = file_path, None + self._downloaded[req.link.url] = file_path req.needs_more_preparation = False # Prepare requirements we found were already downloaded for some # reason. The other downloads will be completed separately. - partially_downloaded_reqs = [] # type: List[InstallRequirement] + partially_downloaded_reqs: List[InstallRequirement] = [] for req in reqs: if req.needs_more_preparation: partially_downloaded_reqs.append(req) @@ -532,11 +512,13 @@ def prepare_linked_requirements_more(self, reqs, parallel_builds=False): # TODO: separate this part out from RequirementPreparer when the v1 # resolver can be removed! self._complete_partial_requirements( - partially_downloaded_reqs, parallel_builds=parallel_builds, + partially_downloaded_reqs, + parallel_builds=parallel_builds, ) - def _prepare_linked_requirement(self, req, parallel_builds): - # type: (InstallRequirement, bool) -> Distribution + def _prepare_linked_requirement( + self, req: InstallRequirement, parallel_builds: bool + ) -> BaseDistribution: assert req.link link = req.link @@ -548,19 +530,23 @@ def _prepare_linked_requirement(self, req, parallel_builds): elif link.url not in self._downloaded: try: local_file = unpack_url( - link, req.source_dir, self._download, - self.download_dir, hashes + link, + req.source_dir, + self._download, + self.verbosity, + self.download_dir, + hashes, ) except NetworkConnectionError as exc: raise InstallationError( - 'Could not install requirement {} because of HTTP ' - 'error {} for URL {}'.format(req, exc, link) + "Could not install requirement {} because of HTTP " + "error {} for URL {}".format(req, exc, link) ) else: - file_path, content_type = self._downloaded[link.url] + file_path = self._downloaded[link.url] if hashes: hashes.check_against_path(file_path) - local_file = File(file_path, content_type) + local_file = File(file_path, content_type=None) # For use in later processing, # preserve the file path on the requirement. @@ -568,12 +554,14 @@ def _prepare_linked_requirement(self, req, parallel_builds): req.local_file_path = local_file.path dist = _get_prepared_distribution( - req, self.req_tracker, self.finder, self.build_isolation, + req, + self.req_tracker, + self.finder, + self.build_isolation, ) return dist - def save_linked_requirement(self, req): - # type: (InstallRequirement) -> None + def save_linked_requirement(self, req: InstallRequirement) -> None: assert self.download_dir is not None assert req.link is not None link = req.link @@ -584,8 +572,9 @@ def save_linked_requirement(self, req): if link.is_existing_dir(): logger.debug( - 'Not copying link to destination directory ' - 'since it is a directory: %s', link, + "Not copying link to destination directory " + "since it is a directory: %s", + link, ) return if req.local_file_path is None: @@ -596,31 +585,32 @@ def save_linked_requirement(self, req): if not os.path.exists(download_location): shutil.copy(req.local_file_path, download_location) download_path = display_path(download_location) - logger.info('Saved %s', download_path) + logger.info("Saved %s", download_path) def prepare_editable_requirement( self, - req, # type: InstallRequirement - ): - # type: (...) -> Distribution - """Prepare an editable requirement - """ + req: InstallRequirement, + ) -> BaseDistribution: + """Prepare an editable requirement.""" assert req.editable, "cannot prepare a non-editable req as editable" - logger.info('Obtaining %s', req) + logger.info("Obtaining %s", req) with indent_log(): if self.require_hashes: raise InstallationError( - 'The editable requirement {} cannot be installed when ' - 'requiring hashes, because there is no single file to ' - 'hash.'.format(req) + "The editable requirement {} cannot be installed when " + "requiring hashes, because there is no single file to " + "hash.".format(req) ) req.ensure_has_source_dir(self.src_dir) req.update_editable() dist = _get_prepared_distribution( - req, self.req_tracker, self.finder, self.build_isolation, + req, + self.req_tracker, + self.finder, + self.build_isolation, ) req.check_if_exists(self.use_user_site) @@ -629,27 +619,24 @@ def prepare_editable_requirement( def prepare_installed_requirement( self, - req, # type: InstallRequirement - skip_reason # type: str - ): - # type: (...) -> Distribution - """Prepare an already-installed requirement - """ + req: InstallRequirement, + skip_reason: str, + ) -> BaseDistribution: + """Prepare an already-installed requirement.""" assert req.satisfied_by, "req should have been satisfied but isn't" assert skip_reason is not None, ( "did not get skip reason skipped but req.satisfied_by " "is set to {}".format(req.satisfied_by) ) logger.info( - 'Requirement %s: %s (%s)', - skip_reason, req, req.satisfied_by.version + "Requirement %s: %s (%s)", skip_reason, req, req.satisfied_by.version ) with indent_log(): if self.require_hashes: logger.debug( - 'Since it is already installed, we are trusting this ' - 'package without checking its hash. To ensure a ' - 'completely repeatable environment, install into an ' - 'empty virtualenv.' + "Since it is already installed, we are trusting this " + "package without checking its hash. To ensure a " + "completely repeatable environment, install into an " + "empty virtualenv." ) - return InstalledDistribution(req).get_pkg_resources_distribution() + return InstalledDistribution(req).get_metadata_distribution() diff --git a/pipenv/patched/notpip/_internal/pyproject.py b/pipenv/patched/notpip/_internal/pyproject.py index 6e4dc3e638..c9544e181e 100644 --- a/pipenv/patched/notpip/_internal/pyproject.py +++ b/pipenv/patched/notpip/_internal/pyproject.py @@ -5,34 +5,29 @@ from pipenv.patched.notpip._vendor import tomli from pipenv.patched.notpip._vendor.packaging.requirements import InvalidRequirement, Requirement -from pipenv.patched.notpip._internal.exceptions import InstallationError +from pipenv.patched.notpip._internal.exceptions import ( + InstallationError, + InvalidPyProjectBuildRequires, + MissingPyProjectBuildRequires, +) -def _is_list_of_str(obj): - # type: (Any) -> bool - return ( - isinstance(obj, list) and - all(isinstance(item, str) for item in obj) - ) +def _is_list_of_str(obj: Any) -> bool: + return isinstance(obj, list) and all(isinstance(item, str) for item in obj) -def make_pyproject_path(unpacked_source_directory): - # type: (str) -> str - return os.path.join(unpacked_source_directory, 'pyproject.toml') +def make_pyproject_path(unpacked_source_directory: str) -> str: + return os.path.join(unpacked_source_directory, "pyproject.toml") -BuildSystemDetails = namedtuple('BuildSystemDetails', [ - 'requires', 'backend', 'check', 'backend_path' -]) +BuildSystemDetails = namedtuple( + "BuildSystemDetails", ["requires", "backend", "check", "backend_path"] +) def load_pyproject_toml( - use_pep517, # type: Optional[bool] - pyproject_toml, # type: str - setup_py, # type: str - req_name # type: str -): - # type: (...) -> Optional[BuildSystemDetails] + use_pep517: Optional[bool], pyproject_toml: str, setup_py: str, req_name: str +) -> Optional[BuildSystemDetails]: """Load the pyproject.toml file. Parameters: @@ -57,9 +52,15 @@ def load_pyproject_toml( has_pyproject = os.path.isfile(pyproject_toml) has_setup = os.path.isfile(setup_py) + if not has_pyproject and not has_setup: + raise InstallationError( + f"{req_name} does not appear to be a Python project: " + f"neither 'setup.py' nor 'pyproject.toml' found." + ) + if has_pyproject: with open(pyproject_toml, encoding="utf-8") as f: - pp_toml = tomli.load(f) + pp_toml = tomli.loads(f.read()) build_system = pp_toml.get("build-system") else: build_system = None @@ -82,9 +83,7 @@ def load_pyproject_toml( raise InstallationError( "Disabling PEP 517 processing is invalid: " "project specifies a build backend of {} " - "in pyproject.toml".format( - build_system["build-backend"] - ) + "in pyproject.toml".format(build_system["build-backend"]) ) use_pep517 = True @@ -124,46 +123,32 @@ def load_pyproject_toml( # Ensure that the build-system section in pyproject.toml conforms # to PEP 518. - error_template = ( - "{package} has a pyproject.toml file that does not comply " - "with PEP 518: {reason}" - ) # Specifying the build-system table but not the requires key is invalid if "requires" not in build_system: - raise InstallationError( - error_template.format(package=req_name, reason=( - "it has a 'build-system' table but not " - "'build-system.requires' which is mandatory in the table" - )) - ) + raise MissingPyProjectBuildRequires(package=req_name) # Error out if requires is not a list of strings requires = build_system["requires"] if not _is_list_of_str(requires): - raise InstallationError(error_template.format( + raise InvalidPyProjectBuildRequires( package=req_name, - reason="'build-system.requires' is not a list of strings.", - )) + reason="It is not a list of strings.", + ) # Each requirement must be valid as per PEP 508 for requirement in requires: try: Requirement(requirement) - except InvalidRequirement: - raise InstallationError( - error_template.format( - package=req_name, - reason=( - "'build-system.requires' contains an invalid " - "requirement: {!r}".format(requirement) - ), - ) - ) + except InvalidRequirement as error: + raise InvalidPyProjectBuildRequires( + package=req_name, + reason=f"It contains an invalid requirement: {requirement!r}", + ) from error backend = build_system.get("build-backend") backend_path = build_system.get("backend-path", []) - check = [] # type: List[str] + check: List[str] = [] if backend is None: # If the user didn't specify a backend, we assume they want to use # the setuptools backend. But we can't be sure they have included diff --git a/pipenv/patched/notpip/_internal/req/__init__.py b/pipenv/patched/notpip/_internal/req/__init__.py index dcc9bcaf44..0f68f7e3c6 100644 --- a/pipenv/patched/notpip/_internal/req/__init__.py +++ b/pipenv/patched/notpip/_internal/req/__init__.py @@ -9,8 +9,10 @@ from .req_set import RequirementSet __all__ = [ - "RequirementSet", "InstallRequirement", - "parse_requirements", "install_given_reqs", + "RequirementSet", + "InstallRequirement", + "parse_requirements", + "install_given_reqs", ] logger = logging.getLogger(__name__) @@ -52,8 +54,8 @@ def install_given_reqs( if to_install: logger.info( - 'Installing collected packages: %s', - ', '.join(to_install.keys()), + "Installing collected packages: %s", + ", ".join(to_install.keys()), ) installed = [] @@ -61,11 +63,9 @@ def install_given_reqs( with indent_log(): for req_name, requirement in to_install.items(): if requirement.should_reinstall: - logger.info('Attempting uninstall: %s', req_name) + logger.info("Attempting uninstall: %s", req_name) with indent_log(): - uninstalled_pathset = requirement.uninstall( - auto_confirm=True - ) + uninstalled_pathset = requirement.uninstall(auto_confirm=True) else: uninstalled_pathset = None diff --git a/pipenv/patched/notpip/_internal/req/constructors.py b/pipenv/patched/notpip/_internal/req/constructors.py index 16739d9c43..cef8ed1313 100644 --- a/pipenv/patched/notpip/_internal/req/constructors.py +++ b/pipenv/patched/notpip/_internal/req/constructors.py @@ -16,23 +16,23 @@ from pipenv.patched.notpip._vendor.packaging.markers import Marker from pipenv.patched.notpip._vendor.packaging.requirements import InvalidRequirement, Requirement from pipenv.patched.notpip._vendor.packaging.specifiers import Specifier -from pipenv.patched.notpip._vendor.pkg_resources import RequirementParseError, parse_requirements from pipenv.patched.notpip._internal.exceptions import InstallationError from pipenv.patched.notpip._internal.models.index import PyPI, TestPyPI from pipenv.patched.notpip._internal.models.link import Link from pipenv.patched.notpip._internal.models.wheel import Wheel -from pipenv.patched.notpip._internal.pyproject import make_pyproject_path from pipenv.patched.notpip._internal.req.req_file import ParsedRequirement from pipenv.patched.notpip._internal.req.req_install import InstallRequirement from pipenv.patched.notpip._internal.utils.filetypes import is_archive_file from pipenv.patched.notpip._internal.utils.misc import is_installable_dir +from pipenv.patched.notpip._internal.utils.packaging import get_requirement from pipenv.patched.notpip._internal.utils.urls import path_to_url from pipenv.patched.notpip._internal.vcs import is_url, vcs __all__ = [ - "install_req_from_editable", "install_req_from_line", - "parse_editable" + "install_req_from_editable", + "install_req_from_line", + "parse_editable", ] logger = logging.getLogger(__name__) @@ -40,7 +40,7 @@ def _strip_extras(path: str) -> Tuple[str, Optional[str]]: - m = re.match(r'^(.+)(\[[^\]]+\])$', path) + m = re.match(r"^(.+)(\[[^\]]+\])$", path) extras = None if m: path_no_extras = m.group(1) @@ -54,7 +54,7 @@ def _strip_extras(path: str) -> Tuple[str, Optional[str]]: def convert_extras(extras: Optional[str]) -> Set[str]: if not extras: return set() - return Requirement("placeholder" + extras.lower()).extras + return get_requirement("placeholder" + extras.lower()).extras def parse_editable(editable_req: str) -> Tuple[Optional[str], str, Set[str]]: @@ -74,39 +74,23 @@ def parse_editable(editable_req: str) -> Tuple[Optional[str], str, Set[str]]: url_no_extras, extras = _strip_extras(url) if os.path.isdir(url_no_extras): - setup_py = os.path.join(url_no_extras, 'setup.py') - setup_cfg = os.path.join(url_no_extras, 'setup.cfg') - if not os.path.exists(setup_py) and not os.path.exists(setup_cfg): - msg = ( - 'File "setup.py" or "setup.cfg" not found. Directory cannot be ' - 'installed in editable mode: {}' - .format(os.path.abspath(url_no_extras)) - ) - pyproject_path = make_pyproject_path(url_no_extras) - if os.path.isfile(pyproject_path): - msg += ( - '\n(A "pyproject.toml" file was found, but editable ' - 'mode currently requires a setuptools-based build.)' - ) - raise InstallationError(msg) - # Treating it as code that has already been checked out url_no_extras = path_to_url(url_no_extras) - if url_no_extras.lower().startswith('file:'): + if url_no_extras.lower().startswith("file:"): package_name = Link(url_no_extras).egg_fragment if extras: return ( package_name, url_no_extras, - Requirement("placeholder" + extras.lower()).extras, + get_requirement("placeholder" + extras.lower()).extras, ) else: return package_name, url_no_extras, set() for version_control in vcs: - if url.lower().startswith(f'{version_control}:'): - url = f'{version_control}+{url}' + if url.lower().startswith(f"{version_control}:"): + url = f"{version_control}+{url}" break link = Link(url) @@ -114,9 +98,9 @@ def parse_editable(editable_req: str) -> Tuple[Optional[str], str, Set[str]]: if not link.is_vcs: backends = ", ".join(vcs.all_schemes) raise InstallationError( - f'{editable_req} is not a valid editable requirement. ' - f'It should either be a path to a local project or a VCS URL ' - f'(beginning with {backends}).' + f"{editable_req} is not a valid editable requirement. " + f"It should either be a path to a local project or a VCS URL " + f"(beginning with {backends})." ) package_name = link.egg_fragment @@ -128,43 +112,66 @@ def parse_editable(editable_req: str) -> Tuple[Optional[str], str, Set[str]]: return package_name, url, set() +def check_first_requirement_in_file(filename: str) -> None: + """Check if file is parsable as a requirements file. + + This is heavily based on ``pkg_resources.parse_requirements``, but + simplified to just check the first meaningful line. + + :raises InvalidRequirement: If the first meaningful line cannot be parsed + as an requirement. + """ + with open(filename, encoding="utf-8", errors="ignore") as f: + # Create a steppable iterator, so we can handle \-continuations. + lines = ( + line + for line in (line.strip() for line in f) + if line and not line.startswith("#") # Skip blank lines/comments. + ) + + for line in lines: + # Drop comments -- a hash without a space may be in a URL. + if " #" in line: + line = line[: line.find(" #")] + # If there is a line continuation, drop it, and append the next line. + if line.endswith("\\"): + line = line[:-2].strip() + next(lines, "") + Requirement(line) + return + + def deduce_helpful_msg(req: str) -> str: """Returns helpful msg in case requirements file does not exist, or cannot be parsed. :params req: Requirements file path """ - msg = "" - if os.path.exists(req): - msg = " The path does exist. " - # Try to parse and check if it is a requirements file. - try: - with open(req) as fp: - # parse first line only - next(parse_requirements(fp.read())) - msg += ( - "The argument you provided " - "({}) appears to be a" - " requirements file. If that is the" - " case, use the '-r' flag to install" - " the packages specified within it." - ).format(req) - except RequirementParseError: - logger.debug( - "Cannot parse '%s' as requirements file", req, exc_info=True - ) + if not os.path.exists(req): + return f" File '{req}' does not exist." + msg = " The path does exist. " + # Try to parse and check if it is a requirements file. + try: + check_first_requirement_in_file(req) + except InvalidRequirement: + logger.debug("Cannot parse '%s' as requirements file", req) else: - msg += f" File '{req}' does not exist." + msg += ( + f"The argument you provided " + f"({req}) appears to be a" + f" requirements file. If that is the" + f" case, use the '-r' flag to install" + f" the packages specified within it." + ) return msg class RequirementParts: def __init__( - self, - requirement: Optional[Requirement], - link: Optional[Link], - markers: Optional[Marker], - extras: Set[str], + self, + requirement: Optional[Requirement], + link: Optional[Link], + markers: Optional[Marker], + extras: Set[str], ): self.requirement = requirement self.link = link @@ -199,6 +206,7 @@ def install_req_from_editable( options: Optional[Dict[str, Any]] = None, constraint: bool = False, user_supplied: bool = False, + permit_editable_wheels: bool = False, ) -> InstallRequirement: parts = parse_req_from_editable(editable_req) @@ -208,6 +216,7 @@ def install_req_from_editable( comes_from=comes_from, user_supplied=user_supplied, editable=True, + permit_editable_wheels=permit_editable_wheels, link=parts.link, constraint=constraint, use_pep517=use_pep517, @@ -250,6 +259,8 @@ def _get_url_from_path(path: str, name: str) -> Optional[str]: if _looks_like_path(name) and os.path.isdir(path): if is_installable_dir(path): return path_to_url(path) + # TODO: The is_installable_dir test here might not be necessary + # now that it is done in load_pyproject_toml too. raise InstallationError( f"Directory {name!r} is not installable. Neither 'setup.py' " "nor 'pyproject.toml' found." @@ -258,24 +269,23 @@ def _get_url_from_path(path: str, name: str) -> Optional[str]: return None if os.path.isfile(path): return path_to_url(path) - urlreq_parts = name.split('@', 1) + urlreq_parts = name.split("@", 1) if len(urlreq_parts) >= 2 and not _looks_like_path(urlreq_parts[0]): # If the path contains '@' and the part before it does not look # like a path, try to treat it as a PEP 440 URL req instead. return None logger.warning( - 'Requirement %r looks like a filename, but the ' - 'file does not exist', - name + "Requirement %r looks like a filename, but the file does not exist", + name, ) return path_to_url(path) def parse_req_from_line(name: str, line_source: Optional[str]) -> RequirementParts: if is_url(name): - marker_sep = '; ' + marker_sep = "; " else: - marker_sep = ';' + marker_sep = ";" if marker_sep in name: name, markers_as_string = name.split(marker_sep, 1) markers_as_string = markers_as_string.strip() @@ -302,9 +312,8 @@ def parse_req_from_line(name: str, line_source: Optional[str]) -> RequirementPar # it's a local file, dir, or url if link: # Handle relative file URLs - if link.scheme == 'file' and re.search(r'\.\./', link.url): - link = Link( - path_to_url(os.path.normpath(os.path.abspath(link.path)))) + if link.scheme == "file" and re.search(r"\.\./", link.url): + link = Link(path_to_url(os.path.normpath(os.path.abspath(link.path)))) # wheel file if link.is_wheel: wheel = Wheel(link.filename) # can raise InvalidWheelFilename @@ -323,25 +332,24 @@ def parse_req_from_line(name: str, line_source: Optional[str]) -> RequirementPar def with_source(text: str) -> str: if not line_source: return text - return f'{text} (from {line_source})' + return f"{text} (from {line_source})" def _parse_req_string(req_as_string: str) -> Requirement: try: - req = Requirement(req_as_string) + req = get_requirement(req_as_string) except InvalidRequirement: if os.path.sep in req_as_string: add_msg = "It looks like a path." add_msg += deduce_helpful_msg(req_as_string) - elif ('=' in req_as_string and - not any(op in req_as_string for op in operators)): + elif "=" in req_as_string and not any( + op in req_as_string for op in operators + ): add_msg = "= is not a valid operator. Did you mean == ?" else: - add_msg = '' - msg = with_source( - f'Invalid requirement: {req_as_string!r}' - ) + add_msg = "" + msg = with_source(f"Invalid requirement: {req_as_string!r}") if add_msg: - msg += f'\nHint: {add_msg}' + msg += f"\nHint: {add_msg}" raise InstallationError(msg) else: # Deprecate extras after specifiers: "name>=1.0[extras]" @@ -350,7 +358,7 @@ def _parse_req_string(req_as_string: str) -> Requirement: # RequirementParts for spec in req.specifier: spec_str = str(spec) - if spec_str.endswith(']'): + if spec_str.endswith("]"): msg = f"Extras after version '{spec_str}'." raise InstallationError(msg) return req @@ -382,8 +390,12 @@ def install_req_from_line( parts = parse_req_from_line(name, line_source) return InstallRequirement( - parts.requirement, comes_from, link=parts.link, markers=parts.markers, - use_pep517=use_pep517, isolated=isolated, + parts.requirement, + comes_from, + link=parts.link, + markers=parts.markers, + use_pep517=use_pep517, + isolated=isolated, install_options=options.get("install_options", []) if options else [], global_options=options.get("global_options", []) if options else [], hash_options=options.get("hashes", {}) if options else {}, @@ -401,7 +413,7 @@ def install_req_from_req_string( user_supplied: bool = False, ) -> InstallRequirement: try: - req = Requirement(req_string) + req = get_requirement(req_string) except InvalidRequirement: raise InstallationError(f"Invalid requirement: '{req_string}'") @@ -409,8 +421,12 @@ def install_req_from_req_string( PyPI.file_storage_domain, TestPyPI.file_storage_domain, ] - if (req.url and comes_from and comes_from.link and - comes_from.link.netloc in domains_not_allowed): + if ( + req.url + and comes_from + and comes_from.link + and comes_from.link.netloc in domains_not_allowed + ): # Explicitly disallow pypi packages that depend on external urls raise InstallationError( "Packages installed from PyPI cannot depend on packages " diff --git a/pipenv/patched/notpip/_internal/req/req_file.py b/pipenv/patched/notpip/_internal/req/req_file.py index 2ea94f64c9..83ce8b8f4a 100644 --- a/pipenv/patched/notpip/_internal/req/req_file.py +++ b/pipenv/patched/notpip/_internal/req/req_file.py @@ -8,7 +8,17 @@ import shlex import urllib.parse from optparse import Values -from typing import TYPE_CHECKING, Any, Callable, Dict, Iterator, List, Optional, Tuple +from typing import ( + TYPE_CHECKING, + Any, + Callable, + Dict, + Iterable, + Iterator, + List, + Optional, + Tuple, +) from pipenv.patched.notpip._internal.cli import cmdoptions from pipenv.patched.notpip._internal.exceptions import InstallationError, RequirementsFileParseError @@ -25,20 +35,20 @@ from pipenv.patched.notpip._internal.index.package_finder import PackageFinder -__all__ = ['parse_requirements'] +__all__ = ["parse_requirements"] -ReqFileLines = Iterator[Tuple[int, str]] +ReqFileLines = Iterable[Tuple[int, str]] LineParser = Callable[[str], Tuple[str, Values]] -SCHEME_RE = re.compile(r'^(http|https|file):', re.I) -COMMENT_RE = re.compile(r'(^|\s+)#.*$') +SCHEME_RE = re.compile(r"^(http|https|file):", re.I) +COMMENT_RE = re.compile(r"(^|\s+)#.*$") # Matches environment variable-style values in '${MY_VARIABLE_1}' with the # variable name consisting of only uppercase letters, digits or the '_' # (underscore). This follows the POSIX standard defined in IEEE Std 1003.1, # 2013 Edition. -ENV_VAR_RE = re.compile(r'(?P\$\{(?P[A-Z0-9_]+)\})') +ENV_VAR_RE = re.compile(r"(?P\$\{(?P[A-Z0-9_]+)\})") SUPPORTED_OPTIONS: List[Callable[..., optparse.Option]] = [ cmdoptions.index_url, @@ -134,10 +144,7 @@ def parse_requirements( for parsed_line in parser.parse(filename, constraint): parsed_req = handle_line( - parsed_line, - options=options, - finder=finder, - session=session + parsed_line, options=options, finder=finder, session=session ) if parsed_req is not None: yield parsed_req @@ -161,8 +168,10 @@ def handle_requirement_line( ) -> ParsedRequirement: # preserve for the nested code path - line_comes_from = '{} {} (line {})'.format( - '-c' if line.constraint else '-r', line.filename, line.lineno, + line_comes_from = "{} {} (line {})".format( + "-c" if line.constraint else "-r", + line.filename, + line.lineno, ) assert line.is_requirement @@ -187,7 +196,7 @@ def handle_requirement_line( if dest in line.opts.__dict__ and line.opts.__dict__[dest]: req_options[dest] = line.opts.__dict__[dest] - line_source = f'line {line.lineno} of {line.filename}' + line_source = f"line {line.lineno} of {line.filename}" return ParsedRequirement( requirement=line.requirement, is_editable=line.is_editable, @@ -213,8 +222,7 @@ def handle_option_line( options.require_hashes = opts.require_hashes if opts.features_enabled: options.features_enabled.extend( - f for f in opts.features_enabled - if f not in options.features_enabled + f for f in opts.features_enabled if f not in options.features_enabled ) # set finder options @@ -256,7 +264,7 @@ def handle_option_line( if session: for host in opts.trusted_hosts or []: - source = f'line {lineno} of {filename}' + source = f"line {lineno} of {filename}" session.add_trusted_host(host, source=source) @@ -314,17 +322,15 @@ def __init__( self._line_parser = line_parser def parse(self, filename: str, constraint: bool) -> Iterator[ParsedLine]: - """Parse a given file, yielding parsed lines. - """ + """Parse a given file, yielding parsed lines.""" yield from self._parse_and_recurse(filename, constraint) def _parse_and_recurse( self, filename: str, constraint: bool ) -> Iterator[ParsedLine]: for line in self._parse_file(filename, constraint): - if ( - not line.is_requirement and - (line.opts.requirements or line.opts.constraints) + if not line.is_requirement and ( + line.opts.requirements or line.opts.constraints ): # parse a nested requirements file if line.opts.requirements: @@ -342,7 +348,8 @@ def _parse_and_recurse( elif not SCHEME_RE.search(req_path): # do a join so relative paths work req_path = os.path.join( - os.path.dirname(filename), req_path, + os.path.dirname(filename), + req_path, ) yield from self._parse_and_recurse(req_path, nested_constraint) @@ -359,7 +366,7 @@ def _parse_file(self, filename: str, constraint: bool) -> Iterator[ParsedLine]: args_str, opts = self._line_parser(line) except OptionParsingError as e: # add offending line - msg = f'Invalid requirement: {line}\n{e.msg}' + msg = f"Invalid requirement: {line}\n{e.msg}" raise RequirementsFileParseError(msg) yield ParsedLine( @@ -395,16 +402,16 @@ def break_args_options(line: str) -> Tuple[str, str]: (and then optparse) the options, not the args. args can contain markers which are corrupted by shlex. """ - tokens = line.split(' ') + tokens = line.split(" ") args = [] options = tokens[:] for token in tokens: - if token.startswith('-') or token.startswith('--'): + if token.startswith("-") or token.startswith("--"): break else: args.append(token) options.pop(0) - return ' '.join(args), ' '.join(options) + return " ".join(args), " ".join(options) class OptionParsingError(Exception): @@ -427,6 +434,7 @@ def build_parser() -> optparse.OptionParser: # that in our own exception. def parser_exit(self: Any, msg: str) -> "NoReturn": raise OptionParsingError(msg) + # NOTE: mypy disallows assigning to a method # https://github.com/python/mypy/issues/2427 parser.exit = parser_exit # type: ignore @@ -441,26 +449,26 @@ def join_lines(lines_enum: ReqFileLines) -> ReqFileLines: primary_line_number = None new_line: List[str] = [] for line_number, line in lines_enum: - if not line.endswith('\\') or COMMENT_RE.match(line): + if not line.endswith("\\") or COMMENT_RE.match(line): if COMMENT_RE.match(line): # this ensures comments are always matched later - line = ' ' + line + line = " " + line if new_line: new_line.append(line) assert primary_line_number is not None - yield primary_line_number, ''.join(new_line) + yield primary_line_number, "".join(new_line) new_line = [] else: yield line_number, line else: if not new_line: primary_line_number = line_number - new_line.append(line.strip('\\')) + new_line.append(line.strip("\\")) # last line contains \ if new_line: assert primary_line_number is not None - yield primary_line_number, ''.join(new_line) + yield primary_line_number, "".join(new_line) # TODO: handle space after '\'. @@ -470,7 +478,7 @@ def ignore_comments(lines_enum: ReqFileLines) -> ReqFileLines: Strips comments and filter empty lines. """ for line_number, line in lines_enum: - line = COMMENT_RE.sub('', line) + line = COMMENT_RE.sub("", line) line = line.strip() if line: yield line_number, line @@ -514,15 +522,15 @@ def get_file_content(url: str, session: PipSession) -> Tuple[str, str]: scheme = get_url_scheme(url) # Pip has special support for file:// URLs (LocalFSAdapter). - if scheme in ['http', 'https', 'file']: + if scheme in ["http", "https", "file"]: resp = session.get(url) raise_for_status(resp) return resp.url, resp.text # Assume this is a bare path. try: - with open(url, 'rb') as f: + with open(url, "rb") as f: content = auto_decode(f.read()) except OSError as exc: - raise InstallationError(f'Could not open requirements file: {exc}') + raise InstallationError(f"Could not open requirements file: {exc}") return url, content diff --git a/pipenv/patched/notpip/_internal/req/req_install.py b/pipenv/patched/notpip/_internal/req/req_install.py index 149ecc4424..c5ef9a61d2 100644 --- a/pipenv/patched/notpip/_internal/req/req_install.py +++ b/pipenv/patched/notpip/_internal/req/req_install.py @@ -1,15 +1,15 @@ # The following comment should be removed at some point in the future. # mypy: strict-optional=False +import functools import logging import os import shutil import sys import uuid import zipfile -from typing import Any, Dict, Iterable, List, Optional, Sequence, Union +from typing import Any, Collection, Dict, Iterable, List, Optional, Sequence, Union -from pipenv.patched.notpip._vendor import pkg_resources, six from pipenv.patched.notpip._vendor.packaging.markers import Marker from pipenv.patched.notpip._vendor.packaging.requirements import Requirement from pipenv.patched.notpip._vendor.packaging.specifiers import SpecifierSet @@ -17,39 +17,43 @@ from pipenv.patched.notpip._vendor.packaging.version import Version from pipenv.patched.notpip._vendor.packaging.version import parse as parse_version from pipenv.patched.notpip._vendor.pep517.wrappers import Pep517HookCaller -from pipenv.patched.notpip._vendor.pkg_resources import Distribution from pipenv.patched.notpip._internal.build_env import BuildEnvironment, NoOpBuildEnvironment -from pipenv.patched.notpip._internal.exceptions import InstallationError +from pipenv.patched.notpip._internal.exceptions import InstallationError, LegacyInstallFailure from pipenv.patched.notpip._internal.locations import get_scheme +from pipenv.patched.notpip._internal.metadata import ( + BaseDistribution, + get_default_environment, + get_directory_distribution, +) from pipenv.patched.notpip._internal.models.link import Link from pipenv.patched.notpip._internal.operations.build.metadata import generate_metadata +from pipenv.patched.notpip._internal.operations.build.metadata_editable import generate_editable_metadata from pipenv.patched.notpip._internal.operations.build.metadata_legacy import ( generate_metadata as generate_metadata_legacy, ) from pipenv.patched.notpip._internal.operations.install.editable_legacy import ( install_editable as install_editable_legacy, ) -from pipenv.patched.notpip._internal.operations.install.legacy import LegacyInstallFailure from pipenv.patched.notpip._internal.operations.install.legacy import install as install_legacy from pipenv.patched.notpip._internal.operations.install.wheel import install_wheel from pipenv.patched.notpip._internal.pyproject import load_pyproject_toml, make_pyproject_path from pipenv.patched.notpip._internal.req.req_uninstall import UninstallPathSet from pipenv.patched.notpip._internal.utils.deprecation import deprecated -from pipenv.patched.notpip._internal.utils.direct_url_helpers import direct_url_from_link +from pipenv.patched.notpip._internal.utils.direct_url_helpers import ( + direct_url_for_editable, + direct_url_from_link, +) from pipenv.patched.notpip._internal.utils.hashes import Hashes -from pipenv.patched.notpip._internal.utils.logging import indent_log from pipenv.patched.notpip._internal.utils.misc import ( ask_path_exists, backup_dir, display_path, - dist_in_site_packages, - dist_in_usersite, - get_distribution, hide_url, redact_auth_from_url, ) -from pipenv.patched.notpip._internal.utils.packaging import get_metadata +from pipenv.patched.notpip._internal.utils.packaging import safe_extra +from pipenv.patched.notpip._internal.utils.subprocess import runner_with_spinner_message from pipenv.patched.notpip._internal.utils.temp_dir import TempDirectory, tempdir_kinds from pipenv.patched.notpip._internal.utils.virtualenv import running_under_virtualenv from pipenv.patched.notpip._internal.vcs import vcs @@ -57,32 +61,6 @@ logger = logging.getLogger(__name__) -def _get_dist(metadata_directory: str) -> Distribution: - """Return a pkg_resources.Distribution for the provided - metadata directory. - """ - dist_dir = metadata_directory.rstrip(os.sep) - - # Build a PathMetadata object, from path to metadata. :wink: - base_dir, dist_dir_name = os.path.split(dist_dir) - metadata = pkg_resources.PathMetadata(base_dir, dist_dir) - - # Determine the correct Distribution object type. - if dist_dir.endswith(".egg-info"): - dist_cls = pkg_resources.Distribution - dist_name = os.path.splitext(dist_dir_name)[0] - else: - assert dist_dir.endswith(".dist-info") - dist_cls = pkg_resources.DistInfoDistribution - dist_name = os.path.splitext(dist_dir_name)[0].split("-")[0] - - return dist_cls( - base_dir, - project_name=dist_name, - metadata=metadata, - ) - - class InstallRequirement: """ Represents something that may be installed later on, may have information @@ -103,14 +81,16 @@ def __init__( global_options: Optional[List[str]] = None, hash_options: Optional[Dict[str, List[str]]] = None, constraint: bool = False, - extras: Iterable[str] = (), + extras: Collection[str] = (), user_supplied: bool = False, + permit_editable_wheels: bool = False, ) -> None: assert req is None or isinstance(req, Requirement), req self.req = req self.comes_from = comes_from self.constraint = constraint self.editable = editable + self.permit_editable_wheels = permit_editable_wheels self.legacy_install_reason: Optional[int] = None # source_dir is the local directory where the linked requirement is @@ -122,9 +102,7 @@ def __init__( if self.editable: assert link if link.is_file: - self.source_dir = os.path.normpath( - os.path.abspath(link.file_path) - ) + self.source_dir = os.path.normpath(os.path.abspath(link.file_path)) if link is None and req and req.url: # PEP 508 URL requirement @@ -140,18 +118,15 @@ def __init__( if extras: self.extras = extras elif req: - self.extras = { - pkg_resources.safe_extra(extra) for extra in req.extras - } + self.extras = {safe_extra(extra) for extra in req.extras} else: self.extras = set() if markers is None and req: markers = req.marker self.markers = markers - # This holds the pkg_resources.Distribution object if this requirement - # is already available: - self.satisfied_by: Optional[Distribution] = None + # This holds the Distribution object if this requirement is already installed. + self.satisfied_by: Optional[BaseDistribution] = None # Whether the installation process should try to uninstall an existing # distribution before installing this requirement. self.should_reinstall = False @@ -202,36 +177,34 @@ def __str__(self) -> str: if self.req: s = str(self.req) if self.link: - s += ' from {}'.format(redact_auth_from_url(self.link.url)) + s += " from {}".format(redact_auth_from_url(self.link.url)) elif self.link: s = redact_auth_from_url(self.link.url) else: - s = '' + s = "" if self.satisfied_by is not None: - s += ' in {}'.format(display_path(self.satisfied_by.location)) + s += " in {}".format(display_path(self.satisfied_by.location)) if self.comes_from: if isinstance(self.comes_from, str): comes_from: Optional[str] = self.comes_from else: comes_from = self.comes_from.from_path() if comes_from: - s += f' (from {comes_from})' + s += f" (from {comes_from})" return s def __repr__(self) -> str: - return '<{} object: {} editable={!r}>'.format( - self.__class__.__name__, str(self), self.editable) + return "<{} object: {} editable={!r}>".format( + self.__class__.__name__, str(self), self.editable + ) def format_debug(self) -> str: - """An un-tested helper for getting state, for debugging. - """ + """An un-tested helper for getting state, for debugging.""" attributes = vars(self) names = sorted(attributes) - state = ( - "{}={!r}".format(attr, attributes[attr]) for attr in sorted(names) - ) - return '<{name} object: {{{state}}}>'.format( + state = ("{}={!r}".format(attr, attributes[attr]) for attr in sorted(names)) + return "<{name} object: {{{state}}}>".format( name=self.__class__.__name__, state=", ".join(state), ) @@ -241,7 +214,19 @@ def format_debug(self) -> str: def name(self) -> Optional[str]: if self.req is None: return None - return pkg_resources.safe_name(self.req.name) + return self.req.name + + @functools.lru_cache() # use cached_property in python 3.8+ + def supports_pyproject_editable(self) -> bool: + if not self.use_pep517: + return False + assert self.pep517_backend + with self.build_env: + runner = runner_with_spinner_message( + "Checking if build backend supports build_editable" + ) + with self.pep517_backend.subprocess_runner(runner): + return "build_editable" in self.pep517_backend._supported_features() @property def specifier(self) -> SpecifierSet: @@ -254,18 +239,17 @@ def is_pinned(self) -> bool: For example, some-package==1.2 is pinned; some-package>1.2 is not. """ specifiers = self.specifier - return (len(specifiers) == 1 and - next(iter(specifiers)).operator in {'==', '==='}) + return len(specifiers) == 1 and next(iter(specifiers)).operator in {"==", "==="} def match_markers(self, extras_requested: Optional[Iterable[str]] = None) -> bool: if not extras_requested: # Provide an extra to safely evaluate the markers # without matching any extra - extras_requested = ('',) + extras_requested = ("",) if self.markers is not None: return any( - self.markers.evaluate({'extra': extra}) - for extra in extras_requested) + self.markers.evaluate({"extra": extra}) for extra in extras_requested + ) else: return True @@ -301,8 +285,7 @@ def hashes(self, trust_internet: bool = True) -> Hashes: return Hashes(good_hashes) def from_path(self) -> Optional[str]: - """Format a nice indicator to show where this "comes from" - """ + """Format a nice indicator to show where this "comes from" """ if self.req is None: return None s = str(self.req) @@ -312,7 +295,7 @@ def from_path(self) -> Optional[str]: else: comes_from = self.comes_from.from_path() if comes_from: - s += '->' + comes_from + s += "->" + comes_from return s def ensure_build_location( @@ -345,7 +328,7 @@ def ensure_build_location( # FIXME: Is there a better place to create the build_dir? (hg and bzr # need this) if not os.path.exists(build_dir): - logger.debug('Creating directory %s', build_dir) + logger.debug("Creating directory %s", build_dir) os.makedirs(build_dir) actual_build_dir = os.path.join(build_dir, dir_name) # `None` indicates that we respect the globally-configured deletion @@ -359,8 +342,7 @@ def ensure_build_location( ).path def _set_requirement(self) -> None: - """Set requirement after generating metadata. - """ + """Set requirement after generating metadata.""" assert self.req is None assert self.metadata is not None assert self.source_dir is not None @@ -372,11 +354,13 @@ def _set_requirement(self) -> None: op = "===" self.req = Requirement( - "".join([ - self.metadata["Name"], - op, - self.metadata["Version"], - ]) + "".join( + [ + self.metadata["Name"], + op, + self.metadata["Version"], + ] + ) ) def warn_on_mismatching_name(self) -> None: @@ -387,10 +371,12 @@ def warn_on_mismatching_name(self) -> None: # If we're here, there's a mismatch. Log a warning about it. logger.warning( - 'Generating metadata for package %s ' - 'produced metadata for project name %s. Fix your ' - '#egg=%s fragments.', - self.name, metadata_name, self.name + "Generating metadata for package %s " + "produced metadata for project name %s. Fix your " + "#egg=%s fragments.", + self.name, + metadata_name, + self.name, ) self.req = Requirement(metadata_name) @@ -401,30 +387,24 @@ def check_if_exists(self, use_user_site: bool) -> None: """ if self.req is None: return - existing_dist = get_distribution(self.req.name) + existing_dist = get_default_environment().get_distribution(self.req.name) if not existing_dist: return - # pkg_resouces may contain a different copy of packaging.version from - # pip in if the downstream distributor does a poor job debundling pip. - # We avoid existing_dist.parsed_version and let SpecifierSet.contains - # parses the version instead. - existing_version = existing_dist.version - version_compatible = ( - existing_version is not None and - self.req.specifier.contains(existing_version, prereleases=True) + version_compatible = self.req.specifier.contains( + existing_dist.version, + prereleases=True, ) if not version_compatible: self.satisfied_by = None if use_user_site: - if dist_in_usersite(existing_dist): + if existing_dist.in_usersite: self.should_reinstall = True - elif (running_under_virtualenv() and - dist_in_site_packages(existing_dist)): + elif running_under_virtualenv() and existing_dist.in_site_packages: raise InstallationError( - "Will not install to the user site because it will " - "lack sys.path precedence to {} in {}".format( - existing_dist.project_name, existing_dist.location) + f"Will not install to the user site because it will " + f"lack sys.path precedence to {existing_dist.raw_name} " + f"in {existing_dist.location}" ) else: self.should_reinstall = True @@ -448,16 +428,23 @@ def is_wheel(self) -> bool: @property def unpacked_source_directory(self) -> str: return os.path.join( - self.source_dir, - self.link and self.link.subdirectory_fragment or '') + self.source_dir, self.link and self.link.subdirectory_fragment or "" + ) @property def setup_py_path(self) -> str: assert self.source_dir, f"No source dir for {self}" - setup_py = os.path.join(self.unpacked_source_directory, 'setup.py') + setup_py = os.path.join(self.unpacked_source_directory, "setup.py") return setup_py + @property + def setup_cfg_path(self) -> str: + assert self.source_dir, f"No source dir for {self}" + setup_cfg = os.path.join(self.unpacked_source_directory, "setup.cfg") + + return setup_cfg + @property def pyproject_toml_path(self) -> str: assert self.source_dir, f"No source dir for {self}" @@ -472,10 +459,7 @@ def load_pyproject_toml(self) -> None: follow the PEP 517 or legacy (setup.py) code path. """ pyproject_toml_data = load_pyproject_toml( - self.use_pep517, - self.pyproject_toml_path, - self.setup_py_path, - str(self) + self.use_pep517, self.pyproject_toml_path, self.setup_py_path, str(self) ) if pyproject_toml_data is None: @@ -487,46 +471,68 @@ def load_pyproject_toml(self) -> None: self.requirements_to_check = check self.pyproject_requires = requires self.pep517_backend = Pep517HookCaller( - self.unpacked_source_directory, backend, backend_path=backend_path, + self.unpacked_source_directory, + backend, + backend_path=backend_path, python_executable=os.getenv('PIP_PYTHON_PATH', sys.executable) ) - def _generate_metadata(self) -> str: - """Invokes metadata generator functions, with the required arguments. - """ - if not self.use_pep517: - assert self.unpacked_source_directory + def isolated_editable_sanity_check(self) -> None: + """Check that an editable requirement if valid for use with PEP 517/518. - if not os.path.exists(self.setup_py_path): - raise InstallationError( - f'File "setup.py" not found for legacy project {self}.' - ) - - return generate_metadata_legacy( - build_env=self.build_env, - setup_py_path=self.setup_py_path, - source_dir=self.unpacked_source_directory, - isolated=self.isolated, - details=self.name or f"from {self.link}" + This verifies that an editable that has a pyproject.toml either supports PEP 660 + or as a setup.py or a setup.cfg + """ + if ( + self.editable + and self.use_pep517 + and not self.supports_pyproject_editable() + and not os.path.isfile(self.setup_py_path) + and not os.path.isfile(self.setup_cfg_path) + ): + raise InstallationError( + f"Project {self} has a 'pyproject.toml' and its build " + f"backend is missing the 'build_editable' hook. Since it does not " + f"have a 'setup.py' nor a 'setup.cfg', " + f"it cannot be installed in editable mode. " + f"Consider using a build backend that supports PEP 660." ) - assert self.pep517_backend is not None - - return generate_metadata( - build_env=self.build_env, - backend=self.pep517_backend, - ) - def prepare_metadata(self) -> None: """Ensure that project metadata is available. - Under PEP 517, call the backend hook to prepare the metadata. + Under PEP 517 and PEP 660, call the backend hook to prepare the metadata. Under legacy processing, call setup.py egg-info. """ assert self.source_dir - - with indent_log(): - self.metadata_directory = self._generate_metadata() + details = self.name or f"from {self.link}" + + if self.use_pep517: + assert self.pep517_backend is not None + if ( + self.editable + and self.permit_editable_wheels + and self.supports_pyproject_editable() + ): + self.metadata_directory = generate_editable_metadata( + build_env=self.build_env, + backend=self.pep517_backend, + details=details, + ) + else: + self.metadata_directory = generate_metadata( + build_env=self.build_env, + backend=self.pep517_backend, + details=details, + ) + else: + self.metadata_directory = generate_metadata_legacy( + build_env=self.build_env, + setup_py_path=self.setup_py_path, + source_dir=self.unpacked_source_directory, + isolated=self.isolated, + details=details, + ) # Act on the newly generated metadata, based on the name and version. if not self.name: @@ -538,26 +544,26 @@ def prepare_metadata(self) -> None: @property def metadata(self) -> Any: - if not hasattr(self, '_metadata'): - self._metadata = get_metadata(self.get_dist()) + if not hasattr(self, "_metadata"): + self._metadata = self.get_dist().metadata return self._metadata - def get_dist(self) -> Distribution: - return _get_dist(self.metadata_directory) + def get_dist(self) -> BaseDistribution: + return get_directory_distribution(self.metadata_directory) def assert_source_matches_version(self) -> None: assert self.source_dir - version = self.metadata['version'] + version = self.metadata["version"] if self.req.specifier and version not in self.req.specifier: logger.warning( - 'Requested %s, but installing version %s', + "Requested %s, but installing version %s", self, version, ) else: logger.debug( - 'Source in %s has version %s, which satisfies requirement %s', + "Source in %s has version %s, which satisfies requirement %s", display_path(self.source_dir), version, self, @@ -590,14 +596,13 @@ def ensure_has_source_dir( def update_editable(self) -> None: if not self.link: logger.debug( - "Cannot update repository at %s; repository location is " - "unknown", + "Cannot update repository at %s; repository location is unknown", self.source_dir, ) return assert self.editable assert self.source_dir - if self.link.scheme == 'file': + if self.link.scheme == "file": # Static paths don't get updated return vcs_backend = vcs.get_backend_for_scheme(self.link.scheme) @@ -605,7 +610,7 @@ def update_editable(self) -> None: # So here, if it's neither a path nor a valid VCS URL, it's a bug. assert vcs_backend, f"Unsupported VCS URL {self.link.url}" hidden_url = hide_url(self.link.url) - vcs_backend.obtain(self.source_dir, url=hidden_url) + vcs_backend.obtain(self.source_dir, url=hidden_url, verbosity=0) # Top-level Actions def uninstall( @@ -624,29 +629,28 @@ def uninstall( """ assert self.req - dist = get_distribution(self.req.name) + dist = get_default_environment().get_distribution(self.req.name) if not dist: logger.warning("Skipping %s as it is not installed.", self.name) return None - logger.info('Found existing installation: %s', dist) + logger.info("Found existing installation: %s", dist) uninstalled_pathset = UninstallPathSet.from_dist(dist) uninstalled_pathset.remove(auto_confirm, verbose) return uninstalled_pathset def _get_archive_name(self, path: str, parentdir: str, rootdir: str) -> str: - def _clean_zip_name(name: str, prefix: str) -> str: - assert name.startswith(prefix + os.path.sep), ( - f"name {name!r} doesn't start with prefix {prefix!r}" - ) - name = name[len(prefix) + 1:] - name = name.replace(os.path.sep, '/') + assert name.startswith( + prefix + os.path.sep + ), f"name {name!r} doesn't start with prefix {prefix!r}" + name = name[len(prefix) + 1 :] + name = name.replace(os.path.sep, "/") return name path = os.path.join(parentdir, path) name = _clean_zip_name(path, rootdir) - return self.name + '/' + name + return self.name + "/" + name def archive(self, build_dir: Optional[str]) -> None: """Saves archive to provided build_dir. @@ -658,57 +662,62 @@ def archive(self, build_dir: Optional[str]) -> None: return create_archive = True - archive_name = '{}-{}.zip'.format(self.name, self.metadata["version"]) + archive_name = "{}-{}.zip".format(self.name, self.metadata["version"]) archive_path = os.path.join(build_dir, archive_name) if os.path.exists(archive_path): response = ask_path_exists( - 'The file {} exists. (i)gnore, (w)ipe, ' - '(b)ackup, (a)bort '.format( - display_path(archive_path)), - ('i', 'w', 'b', 'a')) - if response == 'i': + "The file {} exists. (i)gnore, (w)ipe, " + "(b)ackup, (a)bort ".format(display_path(archive_path)), + ("i", "w", "b", "a"), + ) + if response == "i": create_archive = False - elif response == 'w': - logger.warning('Deleting %s', display_path(archive_path)) + elif response == "w": + logger.warning("Deleting %s", display_path(archive_path)) os.remove(archive_path) - elif response == 'b': + elif response == "b": dest_file = backup_dir(archive_path) logger.warning( - 'Backing up %s to %s', + "Backing up %s to %s", display_path(archive_path), display_path(dest_file), ) shutil.move(archive_path, dest_file) - elif response == 'a': + elif response == "a": sys.exit(-1) if not create_archive: return zip_output = zipfile.ZipFile( - archive_path, 'w', zipfile.ZIP_DEFLATED, allowZip64=True, + archive_path, + "w", + zipfile.ZIP_DEFLATED, + allowZip64=True, ) with zip_output: - dir = os.path.normcase( - os.path.abspath(self.unpacked_source_directory) - ) + dir = os.path.normcase(os.path.abspath(self.unpacked_source_directory)) for dirpath, dirnames, filenames in os.walk(dir): for dirname in dirnames: dir_arcname = self._get_archive_name( - dirname, parentdir=dirpath, rootdir=dir, + dirname, + parentdir=dirpath, + rootdir=dir, ) - zipdir = zipfile.ZipInfo(dir_arcname + '/') + zipdir = zipfile.ZipInfo(dir_arcname + "/") zipdir.external_attr = 0x1ED << 16 # 0o755 - zip_output.writestr(zipdir, '') + zip_output.writestr(zipdir, "") for filename in filenames: file_arcname = self._get_archive_name( - filename, parentdir=dirpath, rootdir=dir, + filename, + parentdir=dirpath, + rootdir=dir, ) filename = os.path.join(dirpath, filename) zip_output.write(filename, file_arcname) - logger.info('Saved %s', display_path(archive_path)) + logger.info("Saved %s", display_path(archive_path)) def install( self, @@ -719,7 +728,7 @@ def install( prefix: Optional[str] = None, warn_script_location: bool = True, use_user_site: bool = False, - pycompile: bool = True + pycompile: bool = True, ) -> None: scheme = get_scheme( self.name, @@ -731,7 +740,7 @@ def install( ) global_options = global_options if global_options is not None else [] - if self.editable: + if self.editable and not self.is_wheel: install_editable_legacy( install_options, global_options, @@ -750,7 +759,9 @@ def install( if self.is_wheel: assert self.local_file_path direct_url = None - if self.original_link: + if self.editable: + direct_url = direct_url_for_editable(self.unpacked_source_directory) + elif self.original_link: direct_url = direct_url_from_link( self.original_link, self.source_dir, @@ -798,7 +809,7 @@ def install( ) except LegacyInstallFailure as exc: self.install_succeeded = False - six.reraise(*exc.parent) + raise exc except Exception: self.install_succeeded = True raise @@ -809,8 +820,9 @@ def install( deprecated( reason=( "{} was installed using the legacy 'setup.py install' " - "method, because a wheel could not be built for it.". - format(self.name) + "method, because a wheel could not be built for it.".format( + self.name + ) ), replacement="to fix the wheel build issue reported above", gone_in=None, @@ -838,12 +850,10 @@ def check_invalid_constraint_type(req: InstallRequirement) -> str: "undocumented. The new implementation of the resolver no " "longer supports these forms." ), - replacement=( - "replacing the constraint with a requirement." - ), + replacement="replacing the constraint with a requirement", # No plan yet for when the new resolver becomes default gone_in=None, - issue=8210 + issue=8210, ) return problem diff --git a/pipenv/patched/notpip/_internal/req/req_set.py b/pipenv/patched/notpip/_internal/req/req_set.py index a9cde8004c..697ee8483b 100644 --- a/pipenv/patched/notpip/_internal/req/req_set.py +++ b/pipenv/patched/notpip/_internal/req/req_set.py @@ -13,10 +13,8 @@ class RequirementSet: - def __init__(self, check_supported_wheels: bool = True) -> None: - """Create a RequirementSet. - """ + """Create a RequirementSet.""" self.requirements: Dict[str, InstallRequirement] = OrderedDict() self.check_supported_wheels = check_supported_wheels @@ -28,7 +26,7 @@ def __str__(self) -> str: (req for req in self.requirements.values() if not req.comes_from), key=lambda req: canonicalize_name(req.name or ""), ) - return ' '.join(str(req.req) for req in requirements) + return " ".join(str(req.req) for req in requirements) def __repr__(self) -> str: requirements = sorted( @@ -36,11 +34,11 @@ def __repr__(self) -> str: key=lambda req: canonicalize_name(req.name or ""), ) - format_string = '<{classname} object; {count} requirement(s): {reqs}>' + format_string = "<{classname} object; {count} requirement(s): {reqs}>" return format_string.format( classname=self.__class__.__name__, count=len(requirements), - reqs=', '.join(str(req.req) for req in requirements), + reqs=", ".join(str(req.req) for req in requirements), ) def add_unnamed_requirement(self, install_req: InstallRequirement) -> None: @@ -57,7 +55,7 @@ def add_requirement( self, install_req: InstallRequirement, parent_req_name: Optional[str] = None, - extras_requested: Optional[Iterable[str]] = None + extras_requested: Optional[Iterable[str]] = None, ) -> Tuple[List[InstallRequirement], Optional[InstallRequirement]]: """Add install_req as a requirement to install. @@ -77,7 +75,8 @@ def add_requirement( if not install_req.match_markers(extras_requested): logger.info( "Ignoring %s: markers '%s' don't match your environment", - install_req.name, install_req.markers, + install_req.name, + install_req.markers, ) return [], None @@ -88,16 +87,17 @@ def add_requirement( if install_req.link and install_req.link.is_wheel: wheel = Wheel(install_req.link.filename) tags = compatibility_tags.get_supported() - if (self.check_supported_wheels and not wheel.supported(tags)): + if self.check_supported_wheels and not wheel.supported(tags): raise InstallationError( "{} is not a supported wheel on this platform.".format( - wheel.filename) + wheel.filename + ) ) # This next bit is really a sanity check. - assert not install_req.user_supplied or parent_req_name is None, ( - "a user supplied req shouldn't have a parent" - ) + assert ( + not install_req.user_supplied or parent_req_name is None + ), "a user supplied req shouldn't have a parent" # Unnamed requirements are scanned again and the requirement won't be # added as a dependency until after scanning. @@ -107,23 +107,25 @@ def add_requirement( try: existing_req: Optional[InstallRequirement] = self.get_requirement( - install_req.name) + install_req.name + ) except KeyError: existing_req = None has_conflicting_requirement = ( - parent_req_name is None and - existing_req and - not existing_req.constraint and - existing_req.extras == install_req.extras and - existing_req.req and - install_req.req and - existing_req.req.specifier != install_req.req.specifier + parent_req_name is None + and existing_req + and not existing_req.constraint + and existing_req.extras == install_req.extras + and existing_req.req + and install_req.req + and existing_req.req.specifier != install_req.req.specifier ) if has_conflicting_requirement: raise InstallationError( - "Double requirement given: {} (already in {}, name={!r})" - .format(install_req, existing_req, install_req.name) + "Double requirement given: {} (already in {}, name={!r})".format( + install_req, existing_req, install_req.name + ) ) # When no existing requirement exists, add the requirement as a @@ -138,12 +140,8 @@ def add_requirement( if install_req.constraint or not existing_req.constraint: return [], existing_req - does_not_satisfy_constraint = ( - install_req.link and - not ( - existing_req.link and - install_req.link.path == existing_req.link.path - ) + does_not_satisfy_constraint = install_req.link and not ( + existing_req.link and install_req.link.path == existing_req.link.path ) if does_not_satisfy_constraint: raise InstallationError( @@ -158,12 +156,13 @@ def add_requirement( # mark the existing object as such. if install_req.user_supplied: existing_req.user_supplied = True - existing_req.extras = tuple(sorted( - set(existing_req.extras) | set(install_req.extras) - )) + existing_req.extras = tuple( + sorted(set(existing_req.extras) | set(install_req.extras)) + ) logger.debug( "Setting %s extras to: %s", - existing_req, existing_req.extras, + existing_req, + existing_req.extras, ) # Return the existing requirement for addition to the parent and # scanning again. @@ -173,8 +172,8 @@ def has_requirement(self, name: str) -> bool: project_name = canonicalize_name(name) return ( - project_name in self.requirements and - not self.requirements[project_name].constraint + project_name in self.requirements + and not self.requirements[project_name].constraint ) def get_requirement(self, name: str) -> InstallRequirement: diff --git a/pipenv/patched/notpip/_internal/req/req_tracker.py b/pipenv/patched/notpip/_internal/req/req_tracker.py index cee440a469..f0d9d92c9e 100644 --- a/pipenv/patched/notpip/_internal/req/req_tracker.py +++ b/pipenv/patched/notpip/_internal/req/req_tracker.py @@ -40,12 +40,10 @@ def update_env_context_manager(**changes: str) -> Iterator[None]: @contextlib.contextmanager def get_requirement_tracker() -> Iterator["RequirementTracker"]: - root = os.environ.get('PIP_REQ_TRACKER') + root = os.environ.get("PIP_REQ_TRACKER") with contextlib.ExitStack() as ctx: if root is None: - root = ctx.enter_context( - TempDirectory(kind='req-tracker') - ).path + root = ctx.enter_context(TempDirectory(kind="req-tracker")).path ctx.enter_context(update_env_context_manager(PIP_REQ_TRACKER=root)) logger.debug("Initialized build tracking at %s", root) @@ -54,7 +52,6 @@ def get_requirement_tracker() -> Iterator["RequirementTracker"]: class RequirementTracker: - def __init__(self, root: str) -> None: self._root = root self._entries: Set[InstallRequirement] = set() @@ -68,7 +65,7 @@ def __exit__( self, exc_type: Optional[Type[BaseException]], exc_val: Optional[BaseException], - exc_tb: Optional[TracebackType] + exc_tb: Optional[TracebackType], ) -> None: self.cleanup() @@ -77,8 +74,7 @@ def _entry_path(self, link: Link) -> str: return os.path.join(self._root, hashed) def add(self, req: InstallRequirement) -> None: - """Add an InstallRequirement to build tracking. - """ + """Add an InstallRequirement to build tracking.""" assert req.link # Get the file to write information about this requirement. @@ -92,30 +88,28 @@ def add(self, req: InstallRequirement) -> None: except FileNotFoundError: pass else: - message = '{} is already being built: {}'.format( - req.link, contents) + message = "{} is already being built: {}".format(req.link, contents) raise LookupError(message) # If we're here, req should really not be building already. assert req not in self._entries # Start tracking this requirement. - with open(entry_path, 'w', encoding="utf-8") as fp: + with open(entry_path, "w", encoding="utf-8") as fp: fp.write(str(req)) self._entries.add(req) - logger.debug('Added %s to build tracker %r', req, self._root) + logger.debug("Added %s to build tracker %r", req, self._root) def remove(self, req: InstallRequirement) -> None: - """Remove an InstallRequirement from build tracking. - """ + """Remove an InstallRequirement from build tracking.""" assert req.link # Delete the created file and the corresponding entries. os.unlink(self._entry_path(req.link)) self._entries.remove(req) - logger.debug('Removed %s from build tracker %r', req, self._root) + logger.debug("Removed %s from build tracker %r", req, self._root) def cleanup(self) -> None: for req in set(self._entries): diff --git a/pipenv/patched/notpip/_internal/req/req_uninstall.py b/pipenv/patched/notpip/_internal/req/req_uninstall.py index 22ce30689d..4dd6561b98 100644 --- a/pipenv/patched/notpip/_internal/req/req_uninstall.py +++ b/pipenv/patched/notpip/_internal/req/req_uninstall.py @@ -1,4 +1,3 @@ -import csv import functools import os import sys @@ -6,47 +5,33 @@ from importlib.util import cache_from_source from typing import Any, Callable, Dict, Iterable, Iterator, List, Optional, Set, Tuple -from pipenv.patched.notpip._vendor import pkg_resources -from pipenv.patched.notpip._vendor.pkg_resources import Distribution - from pipenv.patched.notpip._internal.exceptions import UninstallationError from pipenv.patched.notpip._internal.locations import get_bin_prefix, get_bin_user +from pipenv.patched.notpip._internal.metadata import BaseDistribution from pipenv.patched.notpip._internal.utils.compat import WINDOWS +from pipenv.patched.notpip._internal.utils.egg_link import egg_link_path_from_location from pipenv.patched.notpip._internal.utils.logging import getLogger, indent_log -from pipenv.patched.notpip._internal.utils.misc import ( - ask, - dist_in_usersite, - dist_is_local, - egg_link_path, - is_local, - normalize_path, - renames, - rmtree, -) +from pipenv.patched.notpip._internal.utils.misc import ask, is_local, normalize_path, renames, rmtree from pipenv.patched.notpip._internal.utils.temp_dir import AdjacentTempDirectory, TempDirectory logger = getLogger(__name__) -def _script_names(dist: Distribution, script_name: str, is_gui: bool) -> List[str]: +def _script_names(bin_dir: str, script_name: str, is_gui: bool) -> Iterator[str]: """Create the fully qualified name of the files created by {console,gui}_scripts for the given ``dist``. Returns the list of file names """ - if dist_in_usersite(dist): - bin_dir = get_bin_user() - else: - bin_dir = get_bin_prefix() exe_name = os.path.join(bin_dir, script_name) - paths_to_remove = [exe_name] - if WINDOWS: - paths_to_remove.append(exe_name + '.exe') - paths_to_remove.append(exe_name + '.exe.manifest') - if is_gui: - paths_to_remove.append(exe_name + '-script.pyw') - else: - paths_to_remove.append(exe_name + '-script.py') - return paths_to_remove + yield exe_name + if not WINDOWS: + return + yield f"{exe_name}.exe" + yield f"{exe_name}.exe.manifest" + if is_gui: + yield f"{exe_name}-script.pyw" + else: + yield f"{exe_name}-script.py" def _unique(fn: Callable[..., Iterator[Any]]) -> Callable[..., Iterator[Any]]: @@ -57,11 +42,12 @@ def unique(*args: Any, **kw: Any) -> Iterator[Any]: if item not in seen: seen.add(item) yield item + return unique @_unique -def uninstallation_paths(dist: Distribution) -> Iterator[str]: +def uninstallation_paths(dist: BaseDistribution) -> Iterator[str]: """ Yield all the uninstallation paths for dist based on RECORD-without-.py[co] @@ -75,30 +61,32 @@ def uninstallation_paths(dist: Distribution) -> Iterator[str]: https://packaging.python.org/specifications/recording-installed-packages/ """ - try: - r = csv.reader(dist.get_metadata_lines('RECORD')) - except FileNotFoundError as missing_record_exception: - msg = 'Cannot uninstall {dist}, RECORD file not found.'.format(dist=dist) - try: - installer = next(dist.get_metadata_lines('INSTALLER')) - if not installer or installer == 'pip': - raise ValueError() - except (OSError, StopIteration, ValueError): - dep = '{}=={}'.format(dist.project_name, dist.version) - msg += (" You might be able to recover from this via: " - "'pip install --force-reinstall --no-deps {}'.".format(dep)) + location = dist.location + assert location is not None, "not installed" + + entries = dist.iter_declared_entries() + if entries is None: + msg = "Cannot uninstall {dist}, RECORD file not found.".format(dist=dist) + installer = dist.installer + if not installer or installer == "pip": + dep = "{}=={}".format(dist.raw_name, dist.version) + msg += ( + " You might be able to recover from this via: " + "'pip install --force-reinstall --no-deps {}'.".format(dep) + ) else: - msg += ' Hint: The package was installed by {}.'.format(installer) - raise UninstallationError(msg) from missing_record_exception - for row in r: - path = os.path.join(dist.location, row[0]) + msg += " Hint: The package was installed by {}.".format(installer) + raise UninstallationError(msg) + + for entry in entries: + path = os.path.join(location, entry) yield path - if path.endswith('.py'): + if path.endswith(".py"): dn, fn = os.path.split(path) base = fn[:-3] - path = os.path.join(dn, base + '.pyc') + path = os.path.join(dn, base + ".pyc") yield path - path = os.path.join(dn, base + '.pyo') + path = os.path.join(dn, base + ".pyo") yield path @@ -112,8 +100,8 @@ def compact(paths: Iterable[str]) -> Set[str]: short_paths: Set[str] = set() for path in sorted(paths, key=len): should_skip = any( - path.startswith(shortpath.rstrip("*")) and - path[len(shortpath.rstrip("*").rstrip(sep))] == sep + path.startswith(shortpath.rstrip("*")) + and path[len(shortpath.rstrip("*").rstrip(sep))] == sep for shortpath in short_paths ) if not should_skip: @@ -136,18 +124,15 @@ def norm_join(*a: str) -> str: return os.path.normcase(os.path.join(*a)) for root in unchecked: - if any(os.path.normcase(root).startswith(w) - for w in wildcards): + if any(os.path.normcase(root).startswith(w) for w in wildcards): # This directory has already been handled. continue all_files: Set[str] = set() all_subdirs: Set[str] = set() for dirname, subdirs, files in os.walk(root): - all_subdirs.update(norm_join(root, dirname, d) - for d in subdirs) - all_files.update(norm_join(root, dirname, f) - for f in files) + all_subdirs.update(norm_join(root, dirname, d) for d in subdirs) + all_files.update(norm_join(root, dirname, f) for f in files) # If all the files we found are in our remaining set of files to # remove, then remove them from the latter set and add a wildcard # for the directory. @@ -196,14 +181,14 @@ def compress_for_output_listing(paths: Iterable[str]) -> Tuple[Set[str], Set[str continue file_ = os.path.join(dirpath, fname) - if (os.path.isfile(file_) and - os.path.normcase(file_) not in _normcased_files): + if ( + os.path.isfile(file_) + and os.path.normcase(file_) not in _normcased_files + ): # We are skipping this file. Add it to the set. will_skip.add(file_) - will_remove = files | { - os.path.join(folder, "*") for folder in folders - } + will_remove = files | {os.path.join(folder, "*") for folder in folders} return will_remove, will_skip @@ -211,6 +196,7 @@ def compress_for_output_listing(paths: Iterable[str]) -> Tuple[Set[str], Set[str class StashedUninstallPathSet: """A set of file rename operations to stash files while tentatively uninstalling them.""" + def __init__(self) -> None: # Mapping from source file root to [Adjacent]TempDirectory # for files under that directory. @@ -252,7 +238,7 @@ def _get_file_stash(self, path: str) -> str: else: # Did not find any suitable root head = os.path.dirname(path) - save_dir = TempDirectory(kind='uninstall') + save_dir = TempDirectory(kind="uninstall") self._save_dirs[head] = save_dir relpath = os.path.relpath(path, head) @@ -271,7 +257,7 @@ def stash(self, path: str) -> str: new_path = self._get_file_stash(path) self._moves.append((path, new_path)) - if (path_is_dir and os.path.isdir(new_path)): + if path_is_dir and os.path.isdir(new_path): # If we're moving a directory, we need to # remove the destination first or else it will be # moved to inside the existing directory. @@ -295,7 +281,7 @@ def rollback(self) -> None: for new_path, path in self._moves: try: - logger.debug('Replacing %s from %s', new_path, path) + logger.debug("Replacing %s from %s", new_path, path) if os.path.isfile(new_path) or os.path.islink(new_path): os.unlink(new_path) elif os.path.isdir(new_path): @@ -315,11 +301,12 @@ def can_rollback(self) -> bool: class UninstallPathSet: """A set of file paths to be removed in the uninstallation of a requirement.""" - def __init__(self, dist: Distribution) -> None: - self.paths: Set[str] = set() + + def __init__(self, dist: BaseDistribution) -> None: + self._paths: Set[str] = set() self._refuse: Set[str] = set() - self.pth: Dict[str, UninstallPthEntries] = {} - self.dist = dist + self._pth: Dict[str, UninstallPthEntries] = {} + self._dist = dist self._moved_paths = StashedUninstallPathSet() def _permitted(self, path: str) -> bool: @@ -340,58 +327,55 @@ def add(self, path: str) -> None: if not os.path.exists(path): return if self._permitted(path): - self.paths.add(path) + self._paths.add(path) else: self._refuse.add(path) # __pycache__ files can show up after 'installed-files.txt' is created, # due to imports - if os.path.splitext(path)[1] == '.py': + if os.path.splitext(path)[1] == ".py": self.add(cache_from_source(path)) def add_pth(self, pth_file: str, entry: str) -> None: pth_file = normalize_path(pth_file) if self._permitted(pth_file): - if pth_file not in self.pth: - self.pth[pth_file] = UninstallPthEntries(pth_file) - self.pth[pth_file].add(entry) + if pth_file not in self._pth: + self._pth[pth_file] = UninstallPthEntries(pth_file) + self._pth[pth_file].add(entry) else: self._refuse.add(pth_file) def remove(self, auto_confirm: bool = False, verbose: bool = False) -> None: - """Remove paths in ``self.paths`` with confirmation (unless + """Remove paths in ``self._paths`` with confirmation (unless ``auto_confirm`` is True).""" - if not self.paths: + if not self._paths: logger.info( "Can't uninstall '%s'. No files were found to uninstall.", - self.dist.project_name, + self._dist.raw_name, ) return - dist_name_version = ( - self.dist.project_name + "-" + self.dist.version - ) - logger.info('Uninstalling %s:', dist_name_version) + dist_name_version = f"{self._dist.raw_name}-{self._dist.version}" + logger.info("Uninstalling %s:", dist_name_version) with indent_log(): if auto_confirm or self._allowed_to_proceed(verbose): moved = self._moved_paths - for_rename = compress_for_rename(self.paths) + for_rename = compress_for_rename(self._paths) for path in sorted(compact(for_rename)): moved.stash(path) - logger.verbose('Removing file or directory %s', path) + logger.verbose("Removing file or directory %s", path) - for pth in self.pth.values(): + for pth in self._pth.values(): pth.remove() - logger.info('Successfully uninstalled %s', dist_name_version) + logger.info("Successfully uninstalled %s", dist_name_version) def _allowed_to_proceed(self, verbose: bool) -> bool: - """Display which files would be deleted and prompt for confirmation - """ + """Display which files would be deleted and prompt for confirmation""" def _display(msg: str, paths: Iterable[str]) -> None: if not paths: @@ -403,32 +387,32 @@ def _display(msg: str, paths: Iterable[str]) -> None: logger.info(path) if not verbose: - will_remove, will_skip = compress_for_output_listing(self.paths) + will_remove, will_skip = compress_for_output_listing(self._paths) else: # In verbose mode, display all the files that are going to be # deleted. - will_remove = set(self.paths) + will_remove = set(self._paths) will_skip = set() - _display('Would remove:', will_remove) - _display('Would not remove (might be manually added):', will_skip) - _display('Would not remove (outside of prefix):', self._refuse) + _display("Would remove:", will_remove) + _display("Would not remove (might be manually added):", will_skip) + _display("Would not remove (outside of prefix):", self._refuse) if verbose: - _display('Will actually move:', compress_for_rename(self.paths)) + _display("Will actually move:", compress_for_rename(self._paths)) - return ask('Proceed (Y/n)? ', ('y', 'n', '')) != 'n' + return ask("Proceed (Y/n)? ", ("y", "n", "")) != "n" def rollback(self) -> None: """Rollback the changes previously made by remove().""" if not self._moved_paths.can_rollback: logger.error( "Can't roll back %s; was not uninstalled", - self.dist.project_name, + self._dist.raw_name, ) return - logger.info('Rolling back uninstall of %s', self.dist.project_name) + logger.info("Rolling back uninstall of %s", self._dist.raw_name) self._moved_paths.rollback() - for pth in self.pth.values(): + for pth in self._pth.values(): pth.rollback() def commit(self) -> None: @@ -436,132 +420,156 @@ def commit(self) -> None: self._moved_paths.commit() @classmethod - def from_dist(cls, dist: Distribution) -> "UninstallPathSet": - dist_path = normalize_path(dist.location) - if not dist_is_local(dist): + def from_dist(cls, dist: BaseDistribution) -> "UninstallPathSet": + dist_location = dist.location + info_location = dist.info_location + if dist_location is None: + logger.info( + "Not uninstalling %s since it is not installed", + dist.canonical_name, + ) + return cls(dist) + + normalized_dist_location = normalize_path(dist_location) + if not dist.local: logger.info( "Not uninstalling %s at %s, outside environment %s", - dist.key, - dist_path, + dist.canonical_name, + normalized_dist_location, sys.prefix, ) return cls(dist) - if dist_path in {p for p in {sysconfig.get_path("stdlib"), - sysconfig.get_path("platstdlib")} - if p}: + if normalized_dist_location in { + p + for p in {sysconfig.get_path("stdlib"), sysconfig.get_path("platstdlib")} + if p + }: logger.info( "Not uninstalling %s at %s, as it is in the standard library.", - dist.key, - dist_path, + dist.canonical_name, + normalized_dist_location, ) return cls(dist) paths_to_remove = cls(dist) - develop_egg_link = egg_link_path(dist) - develop_egg_link_egg_info = '{}.egg-info'.format( - pkg_resources.to_filename(dist.project_name)) - egg_info_exists = dist.egg_info and os.path.exists(dist.egg_info) - # Special case for distutils installed package - distutils_egg_info = getattr(dist._provider, 'path', None) + develop_egg_link = egg_link_path_from_location(dist.raw_name) + + # Distribution is installed with metadata in a "flat" .egg-info + # directory. This means it is not a modern .dist-info installation, an + # egg, or legacy editable. + setuptools_flat_installation = ( + dist.installed_with_setuptools_egg_info + and info_location is not None + and os.path.exists(info_location) + # If dist is editable and the location points to a ``.egg-info``, + # we are in fact in the legacy editable case. + and not info_location.endswith(f"{dist.setuptools_filename}.egg-info") + ) # Uninstall cases order do matter as in the case of 2 installs of the # same package, pip needs to uninstall the currently detected version - if (egg_info_exists and dist.egg_info.endswith('.egg-info') and - not dist.egg_info.endswith(develop_egg_link_egg_info)): - # if dist.egg_info.endswith(develop_egg_link_egg_info), we - # are in fact in the develop_egg_link case - paths_to_remove.add(dist.egg_info) - if dist.has_metadata('installed-files.txt'): - for installed_file in dist.get_metadata( - 'installed-files.txt').splitlines(): - path = os.path.normpath( - os.path.join(dist.egg_info, installed_file) - ) - paths_to_remove.add(path) + if setuptools_flat_installation: + if info_location is not None: + paths_to_remove.add(info_location) + installed_files = dist.iter_declared_entries() + if installed_files is not None: + for installed_file in installed_files: + paths_to_remove.add(os.path.join(dist_location, installed_file)) # FIXME: need a test for this elif block # occurs with --single-version-externally-managed/--record outside # of pip - elif dist.has_metadata('top_level.txt'): - if dist.has_metadata('namespace_packages.txt'): - namespaces = dist.get_metadata('namespace_packages.txt') - else: + elif dist.is_file("top_level.txt"): + try: + namespace_packages = dist.read_text("namespace_packages.txt") + except FileNotFoundError: namespaces = [] + else: + namespaces = namespace_packages.splitlines(keepends=False) for top_level_pkg in [ - p for p - in dist.get_metadata('top_level.txt').splitlines() - if p and p not in namespaces]: - path = os.path.join(dist.location, top_level_pkg) + p + for p in dist.read_text("top_level.txt").splitlines() + if p and p not in namespaces + ]: + path = os.path.join(dist_location, top_level_pkg) paths_to_remove.add(path) - paths_to_remove.add(path + '.py') - paths_to_remove.add(path + '.pyc') - paths_to_remove.add(path + '.pyo') + paths_to_remove.add(f"{path}.py") + paths_to_remove.add(f"{path}.pyc") + paths_to_remove.add(f"{path}.pyo") - elif distutils_egg_info: + elif dist.installed_by_distutils: raise UninstallationError( "Cannot uninstall {!r}. It is a distutils installed project " "and thus we cannot accurately determine which files belong " "to it which would lead to only a partial uninstall.".format( - dist.project_name, + dist.raw_name, ) ) - elif dist.location.endswith('.egg'): + elif dist.installed_as_egg: # package installed by easy_install # We cannot match on dist.egg_name because it can slightly vary # i.e. setuptools-0.6c11-py2.6.egg vs setuptools-0.6rc11-py2.6.egg - paths_to_remove.add(dist.location) - easy_install_egg = os.path.split(dist.location)[1] - easy_install_pth = os.path.join(os.path.dirname(dist.location), - 'easy-install.pth') - paths_to_remove.add_pth(easy_install_pth, './' + easy_install_egg) + paths_to_remove.add(dist_location) + easy_install_egg = os.path.split(dist_location)[1] + easy_install_pth = os.path.join( + os.path.dirname(dist_location), + "easy-install.pth", + ) + paths_to_remove.add_pth(easy_install_pth, "./" + easy_install_egg) - elif egg_info_exists and dist.egg_info.endswith('.dist-info'): + elif dist.installed_with_dist_info: for path in uninstallation_paths(dist): paths_to_remove.add(path) elif develop_egg_link: - # develop egg + # PEP 660 modern editable is handled in the ``.dist-info`` case + # above, so this only covers the setuptools-style editable. with open(develop_egg_link) as fh: link_pointer = os.path.normcase(fh.readline().strip()) - assert (link_pointer == dist.location), ( - 'Egg-link {} does not match installed location of {} ' - '(at {})'.format( - link_pointer, dist.project_name, dist.location) + assert link_pointer == dist_location, ( + f"Egg-link {link_pointer} does not match installed location of " + f"{dist.raw_name} (at {dist_location})" ) paths_to_remove.add(develop_egg_link) - easy_install_pth = os.path.join(os.path.dirname(develop_egg_link), - 'easy-install.pth') - paths_to_remove.add_pth(easy_install_pth, dist.location) + easy_install_pth = os.path.join( + os.path.dirname(develop_egg_link), "easy-install.pth" + ) + paths_to_remove.add_pth(easy_install_pth, dist_location) else: logger.debug( - 'Not sure how to uninstall: %s - Check: %s', - dist, dist.location, + "Not sure how to uninstall: %s - Check: %s", + dist, + dist_location, ) + if dist.in_usersite: + bin_dir = get_bin_user() + else: + bin_dir = get_bin_prefix() + # find distutils scripts= scripts - if dist.has_metadata('scripts') and dist.metadata_isdir('scripts'): - for script in dist.metadata_listdir('scripts'): - if dist_in_usersite(dist): - bin_dir = get_bin_user() - else: - bin_dir = get_bin_prefix() - paths_to_remove.add(os.path.join(bin_dir, script)) + try: + for script in dist.iterdir("scripts"): + paths_to_remove.add(os.path.join(bin_dir, script.name)) if WINDOWS: - paths_to_remove.add(os.path.join(bin_dir, script) + '.bat') - - # find console_scripts - _scripts_to_remove = [] - console_scripts = dist.get_entry_map(group='console_scripts') - for name in console_scripts.keys(): - _scripts_to_remove.extend(_script_names(dist, name, False)) - # find gui_scripts - gui_scripts = dist.get_entry_map(group='gui_scripts') - for name in gui_scripts.keys(): - _scripts_to_remove.extend(_script_names(dist, name, True)) - - for s in _scripts_to_remove: + paths_to_remove.add(os.path.join(bin_dir, f"{script.name}.bat")) + except (FileNotFoundError, NotADirectoryError): + pass + + # find console_scripts and gui_scripts + def iter_scripts_to_remove( + dist: BaseDistribution, + bin_dir: str, + ) -> Iterator[str]: + for entry_point in dist.iter_entry_points(): + if entry_point.group == "console_scripts": + yield from _script_names(bin_dir, entry_point.name, False) + elif entry_point.group == "gui_scripts": + yield from _script_names(bin_dir, entry_point.name, True) + + for s in iter_scripts_to_remove(dist, bin_dir): paths_to_remove.add(s) return paths_to_remove @@ -585,45 +593,41 @@ def add(self, entry: str) -> None: # have more than "\\sever\share". Valid examples: "\\server\share\" or # "\\server\share\folder". if WINDOWS and not os.path.splitdrive(entry)[0]: - entry = entry.replace('\\', '/') + entry = entry.replace("\\", "/") self.entries.add(entry) def remove(self) -> None: - logger.verbose('Removing pth entries from %s:', self.file) + logger.verbose("Removing pth entries from %s:", self.file) # If the file doesn't exist, log a warning and return if not os.path.isfile(self.file): - logger.warning( - "Cannot remove entries from nonexistent file %s", self.file - ) + logger.warning("Cannot remove entries from nonexistent file %s", self.file) return - with open(self.file, 'rb') as fh: + with open(self.file, "rb") as fh: # windows uses '\r\n' with py3k, but uses '\n' with py2.x lines = fh.readlines() self._saved_lines = lines - if any(b'\r\n' in line for line in lines): - endline = '\r\n' + if any(b"\r\n" in line for line in lines): + endline = "\r\n" else: - endline = '\n' + endline = "\n" # handle missing trailing newline if lines and not lines[-1].endswith(endline.encode("utf-8")): lines[-1] = lines[-1] + endline.encode("utf-8") for entry in self.entries: try: - logger.verbose('Removing entry: %s', entry) + logger.verbose("Removing entry: %s", entry) lines.remove((entry + endline).encode("utf-8")) except ValueError: pass - with open(self.file, 'wb') as fh: + with open(self.file, "wb") as fh: fh.writelines(lines) def rollback(self) -> bool: if self._saved_lines is None: - logger.error( - 'Cannot roll back changes to %s, none were made', self.file - ) + logger.error("Cannot roll back changes to %s, none were made", self.file) return False - logger.debug('Rolling %s back to previous state', self.file) - with open(self.file, 'wb') as fh: + logger.debug("Rolling %s back to previous state", self.file) + with open(self.file, "wb") as fh: fh.writelines(self._saved_lines) return True diff --git a/pipenv/patched/notpip/_internal/resolution/base.py b/pipenv/patched/notpip/_internal/resolution/base.py index fa43475a60..d912b22dca 100644 --- a/pipenv/patched/notpip/_internal/resolution/base.py +++ b/pipenv/patched/notpip/_internal/resolution/base.py @@ -1,9 +1,11 @@ -from typing import Callable, List +from typing import Callable, List, Optional from pipenv.patched.notpip._internal.req.req_install import InstallRequirement from pipenv.patched.notpip._internal.req.req_set import RequirementSet -InstallRequirementProvider = Callable[[str, InstallRequirement], InstallRequirement] +InstallRequirementProvider = Callable[ + [str, Optional[InstallRequirement]], InstallRequirement +] class BaseResolver: diff --git a/pipenv/patched/notpip/_internal/resolution/legacy/resolver.py b/pipenv/patched/notpip/_internal/resolution/legacy/resolver.py index 8e52bcb085..a434e16d47 100644 --- a/pipenv/patched/notpip/_internal/resolution/legacy/resolver.py +++ b/pipenv/patched/notpip/_internal/resolution/legacy/resolver.py @@ -20,7 +20,7 @@ from typing import DefaultDict, Iterable, List, Optional, Set, Tuple from pipenv.patched.notpip._vendor.packaging import specifiers -from pipenv.patched.notpip._vendor.pkg_resources import Distribution +from pipenv.patched.notpip._vendor.packaging.requirements import Requirement from pipenv.patched.notpip._internal.cache import WheelCache from pipenv.patched.notpip._internal.exceptions import ( @@ -28,9 +28,11 @@ DistributionNotFound, HashError, HashErrors, + NoneMetadataError, UnsupportedPythonVersion, ) from pipenv.patched.notpip._internal.index.package_finder import PackageFinder +from pipenv.patched.notpip._internal.metadata import BaseDistribution from pipenv.patched.notpip._internal.models.link import Link from pipenv.patched.notpip._internal.operations.prepare import RequirementPreparer from pipenv.patched.notpip._internal.req.req_install import ( @@ -41,8 +43,8 @@ from pipenv.patched.notpip._internal.resolution.base import BaseResolver, InstallRequirementProvider from pipenv.patched.notpip._internal.utils.compatibility_tags import get_supported from pipenv.patched.notpip._internal.utils.logging import indent_log -from pipenv.patched.notpip._internal.utils.misc import dist_in_usersite, normalize_version_info -from pipenv.patched.notpip._internal.utils.packaging import check_requires_python, get_requires_python +from pipenv.patched.notpip._internal.utils.misc import normalize_version_info +from pipenv.patched.notpip._internal.utils.packaging import check_requires_python logger = logging.getLogger(__name__) @@ -50,7 +52,7 @@ def _check_dist_requires_python( - dist: Distribution, + dist: BaseDistribution, version_info: Tuple[int, int, int], ignore_requires_python: bool = False, ) -> None: @@ -66,14 +68,21 @@ def _check_dist_requires_python( :raises UnsupportedPythonVersion: When the given Python version isn't compatible. """ - requires_python = get_requires_python(dist) + # This idiosyncratically converts the SpecifierSet to str and let + # check_requires_python then parse it again into SpecifierSet. But this + # is the legacy resolver so I'm just not going to bother refactoring. + try: + requires_python = str(dist.requires_python) + except FileNotFoundError as e: + raise NoneMetadataError(dist, str(e)) try: is_compatible = check_requires_python( - requires_python, version_info=version_info + requires_python, + version_info=version_info, ) except specifiers.InvalidSpecifier as exc: logger.warning( - "Package %r has an invalid Requires-Python: %s", dist.project_name, exc + "Package %r has an invalid Requires-Python: %s", dist.raw_name, exc ) return @@ -84,7 +93,7 @@ def _check_dist_requires_python( if ignore_requires_python: logger.debug( "Ignoring failed Requires-Python check for package %r: %s not in %r", - dist.project_name, + dist.raw_name, version, requires_python, ) @@ -92,7 +101,7 @@ def _check_dist_requires_python( raise UnsupportedPythonVersion( "Package {!r} requires a different Python: {} not in {!r}".format( - dist.project_name, version, requires_python + dist.raw_name, version, requires_python ) ) @@ -194,7 +203,7 @@ def _set_req_to_reinstall(self, req: InstallRequirement) -> None: """ # Don't uninstall the conflict if doing a user install and the # conflict is not a user install. - if not self.use_user_site or dist_in_usersite(req.satisfied_by): + if not self.use_user_site or req.satisfied_by.in_usersite: req.should_reinstall = True req.satisfied_by = None @@ -303,7 +312,7 @@ def _populate_link(self, req: InstallRequirement) -> None: req.original_link_is_in_wheel_cache = True req.link = cache_entry.link - def _get_dist_for(self, req: InstallRequirement) -> Distribution: + def _get_dist_for(self, req: InstallRequirement) -> BaseDistribution: """Takes a InstallRequirement and returns a single AbstractDist \ representing a prepared variant of the same. """ @@ -378,11 +387,11 @@ def _resolve_one( more_reqs: List[InstallRequirement] = [] - def add_req(subreq: Distribution, extras_requested: Iterable[str]) -> None: - sub_install_req = self._make_install_req( - str(subreq), - req_to_install, - ) + def add_req(subreq: Requirement, extras_requested: Iterable[str]) -> None: + # This idiosyncratically converts the Requirement to str and let + # make_install_req then parse it again into Requirement. But this is + # the legacy resolver so I'm just not going to bother refactoring. + sub_install_req = self._make_install_req(str(subreq), req_to_install) parent_req_name = req_to_install.name to_scan_again, add_to_parent = requirement_set.add_requirement( sub_install_req, @@ -410,15 +419,20 @@ def add_req(subreq: Distribution, extras_requested: Iterable[str]) -> None: ",".join(req_to_install.extras), ) missing_requested = sorted( - set(req_to_install.extras) - set(dist.extras) + set(req_to_install.extras) - set(dist.iter_provided_extras()) ) for missing in missing_requested: - logger.warning("%s does not provide the extra '%s'", dist, missing) + logger.warning( + "%s %s does not provide the extra '%s'", + dist.raw_name, + dist.version, + missing, + ) available_requested = sorted( - set(dist.extras) & set(req_to_install.extras) + set(dist.iter_provided_extras()) & set(req_to_install.extras) ) - for subreq in dist.requires(available_requested): + for subreq in dist.iter_dependencies(available_requested): add_req(subreq, extras_requested=available_requested) return more_reqs diff --git a/pipenv/patched/notpip/_internal/resolution/resolvelib/base.py b/pipenv/patched/notpip/_internal/resolution/resolvelib/base.py index 3a2ad094d9..b21725ff3b 100644 --- a/pipenv/patched/notpip/_internal/resolution/resolvelib/base.py +++ b/pipenv/patched/notpip/_internal/resolution/resolvelib/base.py @@ -36,11 +36,8 @@ def from_ireq(cls, ireq: InstallRequirement) -> "Constraint": links = frozenset([ireq.link]) if ireq.link else frozenset() return Constraint(ireq.specifier, ireq.hashes(trust_internet=False), links) - def __nonzero__(self) -> bool: - return bool(self.specifier) or bool(self.hashes) or bool(self.links) - def __bool__(self) -> bool: - return self.__nonzero__() + return bool(self.specifier) or bool(self.hashes) or bool(self.links) def __and__(self, other: InstallRequirement) -> "Constraint": if not isinstance(other, InstallRequirement): diff --git a/pipenv/patched/notpip/_internal/resolution/resolvelib/candidates.py b/pipenv/patched/notpip/_internal/resolution/resolvelib/candidates.py index 6fdb59b711..8b1a55a306 100644 --- a/pipenv/patched/notpip/_internal/resolution/resolvelib/candidates.py +++ b/pipenv/patched/notpip/_internal/resolution/resolvelib/candidates.py @@ -2,13 +2,16 @@ import sys from typing import TYPE_CHECKING, Any, FrozenSet, Iterable, Optional, Tuple, Union, cast -from pipenv.patched.notpip._vendor.packaging.specifiers import InvalidSpecifier, SpecifierSet +from pipenv.patched.notpip._vendor.packaging.specifiers import SpecifierSet from pipenv.patched.notpip._vendor.packaging.utils import NormalizedName, canonicalize_name from pipenv.patched.notpip._vendor.packaging.version import Version -from pipenv.patched.notpip._vendor.packaging.version import parse as parse_version -from pipenv.patched.notpip._vendor.pkg_resources import Distribution -from pipenv.patched.notpip._internal.exceptions import HashError, MetadataInconsistent +from pipenv.patched.notpip._internal.exceptions import ( + HashError, + InstallationSubprocessError, + MetadataInconsistent, +) +from pipenv.patched.notpip._internal.metadata import BaseDistribution from pipenv.patched.notpip._internal.models.link import Link, links_equivalent from pipenv.patched.notpip._internal.models.wheel import Wheel from pipenv.patched.notpip._internal.req.constructors import ( @@ -16,8 +19,7 @@ install_req_from_line, ) from pipenv.patched.notpip._internal.req.req_install import InstallRequirement -from pipenv.patched.notpip._internal.utils.misc import dist_is_editable, normalize_version_info -from pipenv.patched.notpip._internal.utils.packaging import get_requires_python +from pipenv.patched.notpip._internal.utils.misc import normalize_version_info from .base import Candidate, CandidateVersion, Requirement, format_name @@ -85,6 +87,7 @@ def make_install_req_from_editable( use_pep517=template.use_pep517, isolated=template.isolated, constraint=template.constraint, + permit_editable_wheels=template.permit_editable_wheels, options=dict( install_options=template.install_options, global_options=template.global_options, @@ -93,16 +96,15 @@ def make_install_req_from_editable( ) -def make_install_req_from_dist( - dist: Distribution, template: InstallRequirement +def _make_install_req_from_dist( + dist: BaseDistribution, template: InstallRequirement ) -> InstallRequirement: - project_name = canonicalize_name(dist.project_name) if template.req: line = str(template.req) elif template.link: - line = f"{project_name} @ {template.link.url}" + line = f"{dist.canonical_name} @ {template.link.url}" else: - line = f"{project_name}=={dist.parsed_version}" + line = f"{dist.canonical_name}=={dist.version}" ireq = install_req_from_line( line, user_supplied=template.user_supplied, @@ -136,6 +138,7 @@ class exposes appropriate information to the resolver. found remote link (e.g. from pypi.org). """ + dist: BaseDistribution is_installed = False def __init__( @@ -180,7 +183,7 @@ def source_link(self) -> Optional[Link]: def project_name(self) -> NormalizedName: """The normalised name of the project the candidate refers to""" if self._name is None: - self._name = canonicalize_name(self.dist.project_name) + self._name = self.dist.canonical_name return self._name @property @@ -190,7 +193,7 @@ def name(self) -> str: @property def version(self) -> CandidateVersion: if self._version is None: - self._version = parse_version(self.dist.version) + self._version = self.dist.version return self._version def format_for_error(self) -> str: @@ -200,29 +203,27 @@ def format_for_error(self) -> str: self._link.file_path if self._link.is_file else self._link, ) - def _prepare_distribution(self) -> Distribution: + def _prepare_distribution(self) -> BaseDistribution: raise NotImplementedError("Override in subclass") - def _check_metadata_consistency(self, dist: Distribution) -> None: + def _check_metadata_consistency(self, dist: BaseDistribution) -> None: """Check for consistency of project name and version of dist.""" - canonical_name = canonicalize_name(dist.project_name) - if self._name is not None and self._name != canonical_name: + if self._name is not None and self._name != dist.canonical_name: raise MetadataInconsistent( self._ireq, "name", self._name, - dist.project_name, + dist.canonical_name, ) - parsed_version = parse_version(dist.version) - if self._version is not None and self._version != parsed_version: + if self._version is not None and self._version != dist.version: raise MetadataInconsistent( self._ireq, "version", str(self._version), - dist.version, + str(dist.version), ) - def _prepare(self) -> Distribution: + def _prepare(self) -> BaseDistribution: try: dist = self._prepare_distribution() except HashError as e: @@ -231,26 +232,19 @@ def _prepare(self) -> Distribution: # offending line to the user. e.req = self._ireq raise + except InstallationSubprocessError as exc: + # The output has been presented already, so don't duplicate it. + exc.context = "See above for output." + raise + self._check_metadata_consistency(dist) return dist - def _get_requires_python_dependency(self) -> Optional[Requirement]: - requires_python = get_requires_python(self.dist) - if requires_python is None: - return None - try: - spec = SpecifierSet(requires_python) - except InvalidSpecifier as e: - message = "Package %r has an invalid Requires-Python: %s" - logger.warning(message, self.name, e) - return None - return self._factory.make_requires_python_requirement(spec) - def iter_dependencies(self, with_requires: bool) -> Iterable[Optional[Requirement]]: - requires = self.dist.requires() if with_requires else () + requires = self.dist.iter_dependencies() if with_requires else () for r in requires: yield self._factory.make_requirement_from_spec(str(r), self._ireq) - yield self._get_requires_python_dependency() + yield self._factory.make_requires_python_requirement(self.dist.requires_python) def get_install_requirement(self) -> Optional[InstallRequirement]: ireq = self._ireq @@ -304,10 +298,9 @@ def __init__( version=version, ) - def _prepare_distribution(self) -> Distribution: - return self._factory.preparer.prepare_linked_requirement( - self._ireq, parallel_builds=True - ) + def _prepare_distribution(self) -> BaseDistribution: + preparer = self._factory.preparer + return preparer.prepare_linked_requirement(self._ireq, parallel_builds=True) class EditableCandidate(_InstallRequirementBackedCandidate): @@ -330,7 +323,7 @@ def __init__( version=version, ) - def _prepare_distribution(self) -> Distribution: + def _prepare_distribution(self) -> BaseDistribution: return self._factory.preparer.prepare_editable_requirement(self._ireq) @@ -340,17 +333,17 @@ class AlreadyInstalledCandidate(Candidate): def __init__( self, - dist: Distribution, + dist: BaseDistribution, template: InstallRequirement, factory: "Factory", ) -> None: self.dist = dist - self._ireq = make_install_req_from_dist(dist, template) + self._ireq = _make_install_req_from_dist(dist, template) self._factory = factory # This is just logging some messages, so we can do it eagerly. # The returned dist would be exactly the same as self.dist because we - # set satisfied_by in make_install_req_from_dist. + # set satisfied_by in _make_install_req_from_dist. # TODO: Supply reason based on force_reinstall and upgrade_strategy. skip_reason = "already satisfied" factory.preparer.prepare_installed_requirement(self._ireq, skip_reason) @@ -374,7 +367,7 @@ def __eq__(self, other: Any) -> bool: @property def project_name(self) -> NormalizedName: - return canonicalize_name(self.dist.project_name) + return self.dist.canonical_name @property def name(self) -> str: @@ -382,11 +375,11 @@ def name(self) -> str: @property def version(self) -> CandidateVersion: - return parse_version(self.dist.version) + return self.dist.version @property def is_editable(self) -> bool: - return dist_is_editable(self.dist) + return self.dist.editable def format_for_error(self) -> str: return f"{self.name} {self.version} (Installed)" @@ -394,7 +387,7 @@ def format_for_error(self) -> str: def iter_dependencies(self, with_requires: bool) -> Iterable[Optional[Requirement]]: if not with_requires: return - for r in self.dist.requires(): + for r in self.dist.iter_dependencies(): yield self._factory.make_requirement_from_spec(str(r), self._ireq) def get_install_requirement(self) -> Optional[InstallRequirement]: @@ -494,8 +487,8 @@ def iter_dependencies(self, with_requires: bool) -> Iterable[Optional[Requiremen # The user may have specified extras that the candidate doesn't # support. We ignore any unsupported extras here. - valid_extras = self.extras.intersection(self.base.dist.extras) - invalid_extras = self.extras.difference(self.base.dist.extras) + valid_extras = self.extras.intersection(self.base.dist.iter_provided_extras()) + invalid_extras = self.extras.difference(self.base.dist.iter_provided_extras()) for extra in sorted(invalid_extras): logger.warning( "%s %s does not provide the extra '%s'", @@ -504,7 +497,7 @@ def iter_dependencies(self, with_requires: bool) -> Iterable[Optional[Requiremen extra, ) - for r in self.base.dist.requires(valid_extras): + for r in self.base.dist.iter_dependencies(valid_extras): requirement = factory.make_requirement_from_spec( str(r), self.base._ireq, valid_extras ) diff --git a/pipenv/patched/notpip/_internal/resolution/resolvelib/factory.py b/pipenv/patched/notpip/_internal/resolution/resolvelib/factory.py index 9b216a1ba5..c2b17cf9a8 100644 --- a/pipenv/patched/notpip/_internal/resolution/resolvelib/factory.py +++ b/pipenv/patched/notpip/_internal/resolution/resolvelib/factory.py @@ -19,7 +19,6 @@ ) from pipenv.patched.notpip._vendor.packaging.requirements import InvalidRequirement -from pipenv.patched.notpip._vendor.packaging.requirements import Requirement as PackagingRequirement from pipenv.patched.notpip._vendor.packaging.specifiers import SpecifierSet from pipenv.patched.notpip._vendor.packaging.utils import NormalizedName, canonicalize_name from pipenv.patched.notpip._vendor.resolvelib import ResolutionImpossible @@ -46,6 +45,7 @@ from pipenv.patched.notpip._internal.resolution.base import InstallRequirementProvider from pipenv.patched.notpip._internal.utils.compatibility_tags import get_supported from pipenv.patched.notpip._internal.utils.hashes import Hashes +from pipenv.patched.notpip._internal.utils.packaging import get_requirement from pipenv.patched.notpip._internal.utils.virtualenv import running_under_virtualenv from .base import Candidate, CandidateVersion, Constraint, Requirement @@ -97,6 +97,7 @@ def __init__( force_reinstall: bool, ignore_installed: bool, ignore_requires_python: bool, + suppress_build_failures: bool, py_version_info: Optional[Tuple[int, ...]] = None, ) -> None: self._finder = finder @@ -107,6 +108,7 @@ def __init__( self._use_user_site = use_user_site self._force_reinstall = force_reinstall self._ignore_requires_python = ignore_requires_python + self._suppress_build_failures = suppress_build_failures self._build_failures: Cache[InstallationError] = {} self._link_candidate_cache: Cache[LinkCandidate] = {} @@ -158,10 +160,7 @@ def _make_candidate_from_dist( try: base = self._installed_candidate_cache[dist.canonical_name] except KeyError: - from pipenv.patched.notpip._internal.metadata.pkg_resources import Distribution as _Dist - - compat_dist = cast(_Dist, dist)._dist - base = AlreadyInstalledCandidate(compat_dist, template, factory=self) + base = AlreadyInstalledCandidate(dist, template, factory=self) self._installed_candidate_cache[dist.canonical_name] = base if not extras: return base @@ -193,10 +192,22 @@ def _make_candidate_from_link( name=name, version=version, ) - except (InstallationSubprocessError, MetadataInconsistent) as e: - logger.warning("Discarding %s. %s", link, e) + except MetadataInconsistent as e: + logger.info( + "Discarding [blue underline]%s[/]: [yellow]%s[reset]", + link, + e, + extra={"markup": True}, + ) + self._build_failures[link] = e + return None + except InstallationSubprocessError as e: + if not self._suppress_build_failures: + raise + logger.warning("Discarding %s due to build failure: %s", link, e) self._build_failures[link] = e return None + base: BaseCandidate = self._editable_candidate_cache[link] else: if link not in self._link_candidate_cache: @@ -208,8 +219,19 @@ def _make_candidate_from_link( name=name, version=version, ) - except (InstallationSubprocessError, MetadataInconsistent) as e: - logger.warning("Discarding %s. %s", link, e) + except MetadataInconsistent as e: + logger.info( + "Discarding [blue underline]%s[/]: [yellow]%s[reset]", + link, + e, + extra={"markup": True}, + ) + self._build_failures[link] = e + return None + except InstallationSubprocessError as e: + if not self._suppress_build_failures: + raise + logger.warning("Discarding %s due to build failure: %s", link, e) self._build_failures[link] = e return None base = self._link_candidate_cache[link] @@ -263,7 +285,7 @@ def _get_installed_candidate() -> Optional[Candidate]: extras=extras, template=template, ) - # The candidate is a known incompatiblity. Don't use it. + # The candidate is a known incompatibility. Don't use it. if id(candidate) in incompatible_ids: return None return candidate @@ -276,14 +298,27 @@ def iter_index_candidate_infos() -> Iterator[IndexCandidateInfo]: ) icans = list(result.iter_applicable()) - # PEP 592: Yanked releases must be ignored unless only yanked - # releases can satisfy the version range. So if this is false, - # all yanked icans need to be skipped. + # PEP 592: Yanked releases are ignored unless the specifier + # explicitly pins a version (via '==' or '===') that can be + # solely satisfied by a yanked release. all_yanked = all(ican.link.is_yanked for ican in icans) + def is_pinned(specifier: SpecifierSet) -> bool: + for sp in specifier: + if sp.operator == "===": + return True + if sp.operator != "==": + continue + if sp.version.endswith(".*"): + continue + return True + return False + + pinned = is_pinned(specifier) + # PackageFinder returns earlier versions first, so we reverse. for ican in reversed(icans): - if not all_yanked and ican.link.is_yanked: + if not (all_yanked and pinned) and ican.link.is_yanked: continue func = functools.partial( self._make_candidate_from_link, @@ -350,7 +385,7 @@ def _iter_candidates_from_constraints( def find_candidates( self, identifier: str, - requirements: Mapping[str, Iterator[Requirement]], + requirements: Mapping[str, Iterable[Requirement]], incompatibilities: Mapping[str, Iterator[Candidate]], constraint: Constraint, prefers_installed: bool, @@ -368,7 +403,7 @@ def find_candidates( # If the current identifier contains extras, add explicit candidates # from entries from extra-less identifier. with contextlib.suppress(InvalidRequirement): - parsed_requirement = PackagingRequirement(identifier) + parsed_requirement = get_requirement(identifier) explicit_candidates.update( self._iter_explicit_candidates_from_base( requirements.get(parsed_requirement.name, ()), @@ -377,7 +412,7 @@ def find_candidates( ) # Add explicit candidates from constraints. We only do this if there are - # kown ireqs, which represent requirements not already explicit. If + # known ireqs, which represent requirements not already explicit. If # there are no ireqs, we're constraining already-explicit requirements, # which is handled later when we return the explicit candidates. if ireqs: @@ -487,16 +522,20 @@ def make_requirement_from_candidate( def make_requirement_from_spec( self, specifier: str, - comes_from: InstallRequirement, + comes_from: Optional[InstallRequirement], requested_extras: Iterable[str] = (), ) -> Optional[Requirement]: ireq = self._make_install_req_from_spec(specifier, comes_from) return self._make_requirement_from_install_req(ireq, requested_extras) def make_requires_python_requirement( - self, specifier: Optional[SpecifierSet] + self, + specifier: SpecifierSet, ) -> Optional[Requirement]: - if self._ignore_requires_python or specifier is None: + if self._ignore_requires_python: + return None + # Don't bother creating a dependency for an empty Requires-Python. + if not str(specifier): return None return RequiresPythonRequirement(specifier, self._python_candidate) @@ -614,7 +653,7 @@ def get_installation_error( ] if requires_python_causes: # The comprehension above makes sure all Requirement instances are - # RequiresPythonRequirement, so let's cast for convinience. + # RequiresPythonRequirement, so let's cast for convenience. return self._report_requires_python_error( cast("Sequence[ConflictCause]", requires_python_causes), ) @@ -695,6 +734,6 @@ def describe_trigger(parent: Candidate) -> str: return DistributionNotFound( "ResolutionImpossible: for help visit " - "https://pip.pypa.io/en/latest/user_guide/" - "#fixing-conflicting-dependencies" + "https://pip.pypa.io/en/latest/topics/dependency-resolution/" + "#dealing-with-dependency-conflicts" ) diff --git a/pipenv/patched/notpip/_internal/resolution/resolvelib/found_candidates.py b/pipenv/patched/notpip/_internal/resolution/resolvelib/found_candidates.py index e0514d97ef..da284004a3 100644 --- a/pipenv/patched/notpip/_internal/resolution/resolvelib/found_candidates.py +++ b/pipenv/patched/notpip/_internal/resolution/resolvelib/found_candidates.py @@ -9,15 +9,30 @@ """ import functools -from typing import Callable, Iterator, Optional, Set, Tuple +from collections.abc import Sequence +from typing import TYPE_CHECKING, Any, Callable, Iterator, Optional, Set, Tuple from pipenv.patched.notpip._vendor.packaging.version import _BaseVersion -from pipenv.patched.notpip._vendor.six.moves import collections_abc # type: ignore from .base import Candidate IndexCandidateInfo = Tuple[_BaseVersion, Callable[[], Optional[Candidate]]] +if TYPE_CHECKING: + SequenceCandidate = Sequence[Candidate] +else: + # For compatibility: Python before 3.9 does not support using [] on the + # Sequence class. + # + # >>> from collections.abc import Sequence + # >>> Sequence[str] + # Traceback (most recent call last): + # File "", line 1, in + # TypeError: 'ABCMeta' object is not subscriptable + # + # TODO: Remove this block after dropping Python 3.8 support. + SequenceCandidate = Sequence + def _iter_built(infos: Iterator[IndexCandidateInfo]) -> Iterator[Candidate]: """Iterator for ``FoundCandidates``. @@ -90,7 +105,7 @@ def _iter_built_with_inserted( yield installed -class FoundCandidates(collections_abc.Sequence): +class FoundCandidates(SequenceCandidate): """A lazy sequence to provide candidates to the resolver. The intended usage is to return this from `find_matches()` so the resolver @@ -111,7 +126,7 @@ def __init__( self._prefers_installed = prefers_installed self._incompatible_ids = incompatible_ids - def __getitem__(self, index: int) -> Candidate: + def __getitem__(self, index: Any) -> Any: # Implemented to satisfy the ABC check. This is not needed by the # resolver, and should not be used by the provider either (for # performance reasons). @@ -138,5 +153,3 @@ def __bool__(self) -> bool: if self._prefers_installed and self._installed: return True return any(self) - - __nonzero__ = __bool__ # XXX: Python 2. diff --git a/pipenv/patched/notpip/_internal/resolution/resolvelib/provider.py b/pipenv/patched/notpip/_internal/resolution/resolvelib/provider.py index daaa437aec..12fae1f089 100644 --- a/pipenv/patched/notpip/_internal/resolution/resolvelib/provider.py +++ b/pipenv/patched/notpip/_internal/resolution/resolvelib/provider.py @@ -1,6 +1,15 @@ import collections import math -from typing import TYPE_CHECKING, Dict, Iterable, Iterator, Mapping, Sequence, Union +from typing import ( + TYPE_CHECKING, + Dict, + Iterable, + Iterator, + Mapping, + Sequence, + TypeVar, + Union, +) from pipenv.patched.notpip._vendor.resolvelib.providers import AbstractProvider @@ -37,6 +46,35 @@ # services to those objects (access to pip's finder and preparer). +D = TypeVar("D") +V = TypeVar("V") + + +def _get_with_identifier( + mapping: Mapping[str, V], + identifier: str, + default: D, +) -> Union[D, V]: + """Get item from a package name lookup mapping with a resolver identifier. + + This extra logic is needed when the target mapping is keyed by package + name, which cannot be directly looked up with an identifier (which may + contain requested extras). Additional logic is added to also look up a value + by "cleaning up" the extras from the identifier. + """ + if identifier in mapping: + return mapping[identifier] + # HACK: Theoretically we should check whether this identifier is a valid + # "NAME[EXTRAS]" format, and parse out the name part with packaging or + # some regular expression. But since pip's resolver only spits out three + # kinds of identifiers: normalized PEP 503 names, normalized names plus + # extras, and Requires-Python, we can cheat a bit here. + name, open_bracket, _ = identifier.partition("[") + if open_bracket and name in mapping: + return mapping[name] + return default + + class PipProvider(_ProviderBase): """Pip's provider implementation for resolvelib. @@ -66,12 +104,13 @@ def __init__( def identify(self, requirement_or_candidate: Union[Requirement, Candidate]) -> str: return requirement_or_candidate.name - def get_preference( + def get_preference( # type: ignore self, identifier: str, resolutions: Mapping[str, Candidate], candidates: Mapping[str, Iterator[Candidate]], - information: Mapping[str, Iterator["PreferenceInformation"]], + information: Mapping[str, Iterable["PreferenceInformation"]], + backtrack_causes: Sequence["PreferenceInformation"], ) -> "Preference": """Produce a sort key for given requirement based on preference. @@ -112,9 +151,9 @@ def get_preference( for _, parent in information[identifier] ) inferred_depth = min(d for d in parent_depths) + 1.0 - self._known_depths[identifier] = inferred_depth else: inferred_depth = 1.0 + self._known_depths[identifier] = inferred_depth requested_order = self._user_requested.get(identifier, math.inf) @@ -128,43 +167,34 @@ def get_preference( # (Most projects specify it only to request for an installer feature, # which does not work, but that's another topic.) Intentionally # delaying Setuptools helps reduce branches the resolver has to check. - # This serves as a temporary fix for issues like "apache-airlfow[all]" + # This serves as a temporary fix for issues like "apache-airflow[all]" # while we work on "proper" branch pruning techniques. delay_this = identifier == "setuptools" + # Prefer the causes of backtracking on the assumption that the problem + # resolving the dependency tree is related to the failures that caused + # the backtracking + backtrack_cause = self.is_backtrack_cause(identifier, backtrack_causes) + return ( not requires_python, delay_this, not direct, not pinned, + not backtrack_cause, inferred_depth, requested_order, not unfree, identifier, ) - def _get_constraint(self, identifier: str) -> Constraint: - if identifier in self._constraints: - return self._constraints[identifier] - - # HACK: Theoratically we should check whether this identifier is a valid - # "NAME[EXTRAS]" format, and parse out the name part with packaging or - # some regular expression. But since pip's resolver only spits out - # three kinds of identifiers: normalized PEP 503 names, normalized names - # plus extras, and Requires-Python, we can cheat a bit here. - name, open_bracket, _ = identifier.partition("[") - if open_bracket and name in self._constraints: - return self._constraints[name] - - return Constraint.empty() - def find_matches( self, identifier: str, requirements: Mapping[str, Iterator[Requirement]], incompatibilities: Mapping[str, Iterator[Candidate]], ) -> Iterable[Candidate]: - def _eligible_for_upgrade(name: str) -> bool: + def _eligible_for_upgrade(identifier: str) -> bool: """Are upgrades allowed for this project? This checks the upgrade strategy, and whether the project was one @@ -178,13 +208,23 @@ def _eligible_for_upgrade(name: str) -> bool: if self._upgrade_strategy == "eager": return True elif self._upgrade_strategy == "only-if-needed": - return name in self._user_requested + user_order = _get_with_identifier( + self._user_requested, + identifier, + default=None, + ) + return user_order is not None return False + constraint = _get_with_identifier( + self._constraints, + identifier, + default=Constraint.empty(), + ) return self._factory.find_candidates( identifier=identifier, requirements=requirements, - constraint=self._get_constraint(identifier), + constraint=constraint, prefers_installed=(not _eligible_for_upgrade(identifier)), incompatibilities=incompatibilities, ) @@ -195,3 +235,14 @@ def is_satisfied_by(self, requirement: Requirement, candidate: Candidate) -> boo def get_dependencies(self, candidate: Candidate) -> Sequence[Requirement]: with_requires = not self._ignore_dependencies return [r for r in candidate.iter_dependencies(with_requires) if r is not None] + + @staticmethod + def is_backtrack_cause( + identifier: str, backtrack_causes: Sequence["PreferenceInformation"] + ) -> bool: + for backtrack_cause in backtrack_causes: + if identifier == backtrack_cause.requirement.name: + return True + if backtrack_cause.parent and identifier == backtrack_cause.parent.name: + return True + return False diff --git a/pipenv/patched/notpip/_internal/resolution/resolvelib/reporter.py b/pipenv/patched/notpip/_internal/resolution/resolvelib/reporter.py index e0155df043..f01d6535a9 100644 --- a/pipenv/patched/notpip/_internal/resolution/resolvelib/reporter.py +++ b/pipenv/patched/notpip/_internal/resolution/resolvelib/reporter.py @@ -27,9 +27,8 @@ def __init__(self) -> None: 13: ( "This is taking longer than usual. You might need to provide " "the dependency resolver with stricter constraints to reduce " - "runtime. If you want to abort this run, you can press " - "Ctrl + C to do so. To improve how pip performs, tell us what " - "happened here: https://pip.pypa.io/surveys/backtracking" + "runtime. See https://pip.pypa.io/warnings/backtracking for " + "guidance. If you want to abort this run, press Ctrl + C." ), } diff --git a/pipenv/patched/notpip/_internal/resolution/resolvelib/requirements.py b/pipenv/patched/notpip/_internal/resolution/resolvelib/requirements.py index 3579570665..cfd7bd68bb 100644 --- a/pipenv/patched/notpip/_internal/resolution/resolvelib/requirements.py +++ b/pipenv/patched/notpip/_internal/resolution/resolvelib/requirements.py @@ -21,12 +21,12 @@ def __repr__(self) -> str: @property def project_name(self) -> NormalizedName: - # No need to canonicalise - the candidate did this + # No need to canonicalize - the candidate did this return self.candidate.project_name @property def name(self) -> str: - # No need to canonicalise - the candidate did this + # No need to canonicalize - the candidate did this return self.candidate.name def format_for_error(self) -> str: diff --git a/pipenv/patched/notpip/_internal/resolution/resolvelib/resolver.py b/pipenv/patched/notpip/_internal/resolution/resolvelib/resolver.py index 99d5984f41..ec64dcc555 100644 --- a/pipenv/patched/notpip/_internal/resolution/resolvelib/resolver.py +++ b/pipenv/patched/notpip/_internal/resolution/resolvelib/resolver.py @@ -19,8 +19,6 @@ PipDebuggingReporter, PipReporter, ) -from pipenv.patched.notpip._internal.utils.deprecation import deprecated -from pipenv.patched.notpip._internal.utils.filetypes import is_archive_file from .base import Candidate, Requirement from .factory import Factory @@ -49,6 +47,7 @@ def __init__( ignore_requires_python: bool, force_reinstall: bool, upgrade_strategy: str, + suppress_build_failures: bool, py_version_info: Optional[Tuple[int, ...]] = None, ): super().__init__() @@ -63,6 +62,7 @@ def __init__( force_reinstall=force_reinstall, ignore_installed=ignore_installed, ignore_requires_python=ignore_requires_python, + suppress_build_failures=suppress_build_failures, py_version_info=py_version_info, ) self.ignore_dependencies = ignore_dependencies @@ -136,25 +136,6 @@ def resolve( ) continue - looks_like_sdist = ( - is_archive_file(candidate.source_link.file_path) - and candidate.source_link.ext != ".zip" - ) - if looks_like_sdist: - # is a local sdist -- show a deprecation warning! - reason = ( - "Source distribution is being reinstalled despite an " - "installed package having the same name and version as " - "the installed package." - ) - replacement = "use --force-reinstall" - deprecated( - reason=reason, - replacement=replacement, - gone_in="21.3", - issue=8711, - ) - # is a local sdist or path -- reinstall ireq.should_reinstall = True else: @@ -192,17 +173,19 @@ def get_installation_order( get installed one-by-one. The current implementation creates a topological ordering of the - dependency graph, while breaking any cycles in the graph at arbitrary - points. We make no guarantees about where the cycle would be broken, - other than they would be broken. + dependency graph, giving more weight to packages with less + or no dependencies, while breaking any cycles in the graph at + arbitrary points. We make no guarantees about where the cycle + would be broken, other than it *would* be broken. """ assert self._result is not None, "must call resolve() first" + if not req_set.requirements: + # Nothing is left to install, so we do not need an order. + return [] + graph = self._result.graph - weights = get_topological_weights( - graph, - expected_node_count=len(self._result.mapping) + 1, - ) + weights = get_topological_weights(graph, set(req_set.requirements.keys())) sorted_items = sorted( req_set.requirements.items(), @@ -213,23 +196,32 @@ def get_installation_order( def get_topological_weights( - graph: "DirectedGraph[Optional[str]]", expected_node_count: int + graph: "DirectedGraph[Optional[str]]", requirement_keys: Set[str] ) -> Dict[Optional[str], int]: """Assign weights to each node based on how "deep" they are. This implementation may change at any point in the future without prior notice. - We take the length for the longest path to any node from root, ignoring any - paths that contain a single node twice (i.e. cycles). This is done through - a depth-first search through the graph, while keeping track of the path to - the node. + We first simplify the dependency graph by pruning any leaves and giving them + the highest weight: a package without any dependencies should be installed + first. This is done again and again in the same way, giving ever less weight + to the newly found leaves. The loop stops when no leaves are left: all + remaining packages have at least one dependency left in the graph. + + Then we continue with the remaining graph, by taking the length for the + longest path to any node from root, ignoring any paths that contain a single + node twice (i.e. cycles). This is done through a depth-first search through + the graph, while keeping track of the path to the node. Cycles in the graph result would result in node being revisited while also - being it's own path. In this case, take no action. This helps ensure we + being on its own path. In this case, take no action. This helps ensure we don't get stuck in a cycle. When assigning weight, the longer path (i.e. larger length) is preferred. + + We are only interested in the weights of packages that are in the + requirement_keys. """ path: Set[Optional[str]] = set() weights: Dict[Optional[str], int] = {} @@ -245,15 +237,49 @@ def visit(node: Optional[str]) -> None: visit(child) path.remove(node) + if node not in requirement_keys: + return + last_known_parent_count = weights.get(node, 0) weights[node] = max(last_known_parent_count, len(path)) + # Simplify the graph, pruning leaves that have no dependencies. + # This is needed for large graphs (say over 200 packages) because the + # `visit` function is exponentially slower then, taking minutes. + # See https://github.com/pypa/pip/issues/10557 + # We will loop until we explicitly break the loop. + while True: + leaves = set() + for key in graph: + if key is None: + continue + for _child in graph.iter_children(key): + # This means we have at least one child + break + else: + # No child. + leaves.add(key) + if not leaves: + # We are done simplifying. + break + # Calculate the weight for the leaves. + weight = len(graph) - 1 + for leaf in leaves: + if leaf not in requirement_keys: + continue + weights[leaf] = weight + # Remove the leaves from the graph, making it simpler. + for leaf in leaves: + graph.remove(leaf) + + # Visit the remaining graph. # `None` is guaranteed to be the root node by resolvelib. visit(None) - # Sanity checks - assert weights[None] == 0 - assert len(weights) == expected_node_count + # Sanity check: all requirement keys should be in the weights, + # and no other keys should be in the weights. + difference = set(weights.keys()).difference(requirement_keys) + assert not difference, difference return weights diff --git a/pipenv/patched/notpip/_internal/self_outdated_check.py b/pipenv/patched/notpip/_internal/self_outdated_check.py index acc8d78168..3376186cf0 100644 --- a/pipenv/patched/notpip/_internal/self_outdated_check.py +++ b/pipenv/patched/notpip/_internal/self_outdated_check.py @@ -23,17 +23,15 @@ logger = logging.getLogger(__name__) -def _get_statefile_name(key): - # type: (str) -> str +def _get_statefile_name(key: str) -> str: key_bytes = key.encode() name = hashlib.sha224(key_bytes).hexdigest() return name class SelfCheckState: - def __init__(self, cache_dir): - # type: (str) -> None - self.state = {} # type: Dict[str, Any] + def __init__(self, cache_dir: str) -> None: + self.state: Dict[str, Any] = {} self.statefile_path = None # Try to load the existing state @@ -50,12 +48,10 @@ def __init__(self, cache_dir): pass @property - def key(self): - # type: () -> str + def key(self) -> str: return sys.prefix - def save(self, pypi_version, current_time): - # type: (str, datetime.datetime) -> None + def save(self, pypi_version: str, current_time: datetime.datetime) -> None: # If we do not have a path to cache in, don't bother saving. if not self.statefile_path: return @@ -90,8 +86,7 @@ def save(self, pypi_version, current_time): pass -def was_installed_by_pip(pkg): - # type: (str) -> bool +def was_installed_by_pip(pkg: str) -> bool: """Checks whether pkg was installed by pip This is used not to display the upgrade message when pip is in fact @@ -101,8 +96,7 @@ def was_installed_by_pip(pkg): return dist is not None and "pip" == dist.installer -def pip_self_version_check(session, options): - # type: (PipSession, optparse.Values) -> None +def pip_self_version_check(session: PipSession, options: optparse.Values) -> None: """Check for an update for pip. Limit the frequency of checks to once per week. State is stored either in @@ -123,8 +117,7 @@ def pip_self_version_check(session, options): # Determine if we need to refresh the state if "last_check" in state.state and "pypi_version" in state.state: last_check = datetime.datetime.strptime( - state.state["last_check"], - SELFCHECK_DATE_FMT + state.state["last_check"], SELFCHECK_DATE_FMT ) if (current_time - last_check).total_seconds() < 7 * 24 * 60 * 60: pypi_version = state.state["pypi_version"] @@ -148,6 +141,9 @@ def pip_self_version_check(session, options): finder = PackageFinder.create( link_collector=link_collector, selection_prefs=selection_prefs, + use_deprecated_html5lib=( + "html5lib" in options.deprecated_features_enabled + ), ) best_candidate = finder.find_best_candidate("pip").best_candidate if best_candidate is None: @@ -160,9 +156,9 @@ def pip_self_version_check(session, options): remote_version = parse_version(pypi_version) local_version_is_older = ( - pip_version < remote_version and - pip_version.base_version != remote_version.base_version and - was_installed_by_pip('pip') + pip_version < remote_version + and pip_version.base_version != remote_version.base_version + and was_installed_by_pip("pip") ) # Determine if our pypi_version is older @@ -172,13 +168,19 @@ def pip_self_version_check(session, options): # We cannot tell how the current pip is available in the current # command context, so be pragmatic here and suggest the command # that's always available. This does not accommodate spaces in - # `sys.executable`. + # `sys.executable` on purpose as it is not possible to do it + # correctly without knowing the user's shell. Thus, + # it won't be done until possible through the standard library. + # Do not be tempted to use the undocumented subprocess.list2cmdline. + # It is considered an internal implementation detail for a reason. pip_cmd = f"{sys.executable} -m pip" logger.warning( "You are using pip version %s; however, version %s is " "available.\nYou should consider upgrading via the " "'%s install --upgrade pip' command.", - pip_version, pypi_version, pip_cmd + pip_version, + pypi_version, + pip_cmd, ) except Exception: logger.debug( diff --git a/pipenv/patched/notpip/_internal/utils/appdirs.py b/pipenv/patched/notpip/_internal/utils/appdirs.py index 1e9983654b..e2cffb316f 100644 --- a/pipenv/patched/notpip/_internal/utils/appdirs.py +++ b/pipenv/patched/notpip/_internal/utils/appdirs.py @@ -7,29 +7,46 @@ """ import os +import sys from typing import List -from pipenv.patched.notpip._vendor import appdirs as _appdirs +from pipenv.patched.notpip._vendor import platformdirs as _appdirs def user_cache_dir(appname: str) -> str: return _appdirs.user_cache_dir(appname, appauthor=False) +def _macos_user_config_dir(appname: str, roaming: bool = True) -> str: + # Use ~/Application Support/pip, if the directory exists. + path = _appdirs.user_data_dir(appname, appauthor=False, roaming=roaming) + if os.path.isdir(path): + return path + + # Use a Linux-like ~/.config/pip, by default. + linux_like_path = "~/.config/" + if appname: + linux_like_path = os.path.join(linux_like_path, appname) + + return os.path.expanduser(linux_like_path) + + def user_config_dir(appname: str, roaming: bool = True) -> str: - path = _appdirs.user_config_dir(appname, appauthor=False, roaming=roaming) - if _appdirs.system == "darwin" and not os.path.isdir(path): - path = os.path.expanduser("~/.config/") - if appname: - path = os.path.join(path, appname) - return path + if sys.platform == "darwin": + return _macos_user_config_dir(appname, roaming) + + return _appdirs.user_config_dir(appname, appauthor=False, roaming=roaming) # for the discussion regarding site_config_dir locations # see def site_config_dirs(appname: str) -> List[str]: + if sys.platform == "darwin": + return [_appdirs.site_data_dir(appname, appauthor=False, multipath=True)] + dirval = _appdirs.site_config_dir(appname, appauthor=False, multipath=True) - if _appdirs.system not in ["win32", "darwin"]: - # always look in /etc directly as well - return dirval.split(os.pathsep) + ["/etc"] - return [dirval] + if sys.platform == "win32": + return [dirval] + + # Unix-y system. Look in /etc as well. + return dirval.split(os.pathsep) + ["/etc"] diff --git a/pipenv/patched/notpip/_internal/utils/compatibility_tags.py b/pipenv/patched/notpip/_internal/utils/compatibility_tags.py index f1badcc04d..dd1e7cddab 100644 --- a/pipenv/patched/notpip/_internal/utils/compatibility_tags.py +++ b/pipenv/patched/notpip/_internal/utils/compatibility_tags.py @@ -2,9 +2,10 @@ """ import re -from typing import TYPE_CHECKING, List, Optional, Tuple +from typing import List, Optional, Tuple from pipenv.patched.notpip._vendor.packaging.tags import ( + PythonVersion, Tag, compatible_tags, cpython_tags, @@ -14,10 +15,6 @@ mac_platforms, ) -if TYPE_CHECKING: - from pipenv.patched.notpip._vendor.packaging.tags import PythonVersion - - _osx_arch_pat = re.compile(r"(.+)_(\d+)_(\d+)_(.+)") @@ -95,7 +92,7 @@ def _expand_allowed_platforms(platforms: Optional[List[str]]) -> Optional[List[s return result -def _get_python_version(version: str) -> "PythonVersion": +def _get_python_version(version: str) -> PythonVersion: if len(version) > 1: return int(version[0]), int(version[1:]) else: @@ -132,7 +129,7 @@ def get_supported( """ supported: List[Tag] = [] - python_version: Optional["PythonVersion"] = None + python_version: Optional[PythonVersion] = None if version is not None: python_version = _get_python_version(version) diff --git a/pipenv/patched/notpip/_internal/utils/deprecation.py b/pipenv/patched/notpip/_internal/utils/deprecation.py index c9cd17ebb9..a3a0888210 100644 --- a/pipenv/patched/notpip/_internal/utils/deprecation.py +++ b/pipenv/patched/notpip/_internal/utils/deprecation.py @@ -8,7 +8,7 @@ from pipenv.patched.notpip._vendor.packaging.version import parse -from pipenv.patched.notpip import __version__ as current_version +from pipenv.patched.notpip import __version__ as current_version # NOTE: tests patch this name. DEPRECATION_MSG_PREFIX = "DEPRECATION: " @@ -53,52 +53,68 @@ def install_warning_logger() -> None: def deprecated( + *, reason: str, replacement: Optional[str], gone_in: Optional[str], + feature_flag: Optional[str] = None, issue: Optional[int] = None, ) -> None: """Helper to deprecate existing functionality. reason: Textual reason shown to the user about why this functionality has - been deprecated. + been deprecated. Should be a complete sentence. replacement: Textual suggestion shown to the user about what alternative functionality they can use. gone_in: The version of pip does this functionality should get removed in. - Raises errors if pip's current version is greater than or equal to + Raises an error if pip's current version is greater than or equal to this. + feature_flag: + Command-line flag of the form --use-feature={feature_flag} for testing + upcoming functionality. issue: Issue number on the tracker that would serve as a useful place for users to find related discussion and provide feedback. - - Always pass replacement, gone_in and issue as keyword arguments for clarity - at the call site. """ - # Construct a nice message. - # This is eagerly formatted as we want it to get logged as if someone - # typed this entire message out. - sentences = [ - (reason, DEPRECATION_MSG_PREFIX + "{}"), - (gone_in, "pip {} will remove support for this functionality."), - (replacement, "A possible replacement is {}."), + # Determine whether or not the feature is already gone in this version. + is_gone = gone_in is not None and parse(current_version) >= parse(gone_in) + + message_parts = [ + (reason, f"{DEPRECATION_MSG_PREFIX}{{}}"), + ( + gone_in, + "pip {} will enforce this behaviour change." + if not is_gone + else "Since pip {}, this is no longer supported.", + ), + ( + replacement, + "A possible replacement is {}.", + ), + ( + feature_flag, + "You can use the flag --use-feature={} to test the upcoming behaviour." + if not is_gone + else None, + ), ( issue, - ( - "You can find discussion regarding this at " - "https://github.com/pypa/pip/issues/{}." - ), + "Discussion can be found at https://github.com/pypa/pip/issues/{}", ), ] + message = " ".join( - template.format(val) for val, template in sentences if val is not None + format_str.format(value) + for value, format_str in message_parts + if format_str is not None and value is not None ) - # Raise as an error if it has to be removed. - if gone_in is not None and parse(current_version) >= parse(gone_in): + # Raise as an error if this behaviour is deprecated. + if is_gone: raise PipDeprecationWarning(message) warnings.warn(message, category=PipDeprecationWarning, stacklevel=2) diff --git a/pipenv/patched/notpip/_internal/utils/direct_url_helpers.py b/pipenv/patched/notpip/_internal/utils/direct_url_helpers.py index 8729cc1b97..34504d3e02 100644 --- a/pipenv/patched/notpip/_internal/utils/direct_url_helpers.py +++ b/pipenv/patched/notpip/_internal/utils/direct_url_helpers.py @@ -2,6 +2,7 @@ from pipenv.patched.notpip._internal.models.direct_url import ArchiveInfo, DirectUrl, DirInfo, VcsInfo from pipenv.patched.notpip._internal.models.link import Link +from pipenv.patched.notpip._internal.utils.urls import path_to_url from pipenv.patched.notpip._internal.vcs import vcs @@ -28,6 +29,13 @@ def direct_url_as_pep440_direct_reference(direct_url: DirectUrl, name: str) -> s return requirement +def direct_url_for_editable(source_dir: str) -> DirectUrl: + return DirectUrl( + url=path_to_url(source_dir), + info=DirInfo(editable=True), + ) + + def direct_url_from_link( link: Link, source_dir: Optional[str] = None, link_is_in_wheel_cache: bool = False ) -> DirectUrl: diff --git a/pipenv/patched/notpip/_internal/utils/egg_link.py b/pipenv/patched/notpip/_internal/utils/egg_link.py new file mode 100644 index 0000000000..3c1a44b627 --- /dev/null +++ b/pipenv/patched/notpip/_internal/utils/egg_link.py @@ -0,0 +1,75 @@ +# The following comment should be removed at some point in the future. +# mypy: strict-optional=False + +import os +import re +import sys +from typing import Optional + +from pipenv.patched.notpip._internal.locations import site_packages, user_site +from pipenv.patched.notpip._internal.utils.virtualenv import ( + running_under_virtualenv, + virtualenv_no_global, +) + +__all__ = [ + "egg_link_path_from_sys_path", + "egg_link_path_from_location", +] + + +def _egg_link_name(raw_name: str) -> str: + """ + Convert a Name metadata value to a .egg-link name, by applying + the same substitution as pkg_resources's safe_name function. + Note: we cannot use canonicalize_name because it has a different logic. + """ + return re.sub("[^A-Za-z0-9.]+", "-", raw_name) + ".egg-link" + + +def egg_link_path_from_sys_path(raw_name: str) -> Optional[str]: + """ + Look for a .egg-link file for project name, by walking sys.path. + """ + egg_link_name = _egg_link_name(raw_name) + for path_item in sys.path: + egg_link = os.path.join(path_item, egg_link_name) + if os.path.isfile(egg_link): + return egg_link + return None + + +def egg_link_path_from_location(raw_name: str) -> Optional[str]: + """ + Return the path for the .egg-link file if it exists, otherwise, None. + + There's 3 scenarios: + 1) not in a virtualenv + try to find in site.USER_SITE, then site_packages + 2) in a no-global virtualenv + try to find in site_packages + 3) in a yes-global virtualenv + try to find in site_packages, then site.USER_SITE + (don't look in global location) + + For #1 and #3, there could be odd cases, where there's an egg-link in 2 + locations. + + This method will just return the first one found. + """ + sites = [] + if running_under_virtualenv(): + sites.append(site_packages) + if not virtualenv_no_global() and user_site: + sites.append(user_site) + else: + if user_site: + sites.append(user_site) + sites.append(site_packages) + + egg_link_name = _egg_link_name(raw_name) + for site in sites: + egglink = os.path.join(site, egg_link_name) + if os.path.isfile(egglink): + return egglink + return None diff --git a/pipenv/patched/notpip/_internal/utils/filetypes.py b/pipenv/patched/notpip/_internal/utils/filetypes.py index 287473697a..b7c3adc48a 100644 --- a/pipenv/patched/notpip/_internal/utils/filetypes.py +++ b/pipenv/patched/notpip/_internal/utils/filetypes.py @@ -6,21 +6,20 @@ from pipenv.patched.notpip._internal.utils.misc import splitext WHEEL_EXTENSION = ".whl" -BZ2_EXTENSIONS = (".tar.bz2", ".tbz") # type: Tuple[str, ...] -XZ_EXTENSIONS = ( +BZ2_EXTENSIONS: Tuple[str, ...] = (".tar.bz2", ".tbz") +XZ_EXTENSIONS: Tuple[str, ...] = ( ".tar.xz", ".txz", ".tlz", ".tar.lz", ".tar.lzma", -) # type: Tuple[str, ...] -ZIP_EXTENSIONS = (".zip", WHEEL_EXTENSION) # type: Tuple[str, ...] -TAR_EXTENSIONS = (".tar.gz", ".tgz", ".tar") # type: Tuple[str, ...] +) +ZIP_EXTENSIONS: Tuple[str, ...] = (".zip", WHEEL_EXTENSION) +TAR_EXTENSIONS: Tuple[str, ...] = (".tar.gz", ".tgz", ".tar") ARCHIVE_EXTENSIONS = ZIP_EXTENSIONS + BZ2_EXTENSIONS + TAR_EXTENSIONS + XZ_EXTENSIONS -def is_archive_file(name): - # type: (str) -> bool +def is_archive_file(name: str) -> bool: """Return True if `name` is a considered as an archive file.""" ext = splitext(name)[1].lower() if ext in ARCHIVE_EXTENSIONS: diff --git a/pipenv/patched/notpip/_internal/utils/glibc.py b/pipenv/patched/notpip/_internal/utils/glibc.py index 1c9ff35446..7bd3c20681 100644 --- a/pipenv/patched/notpip/_internal/utils/glibc.py +++ b/pipenv/patched/notpip/_internal/utils/glibc.py @@ -6,14 +6,12 @@ from typing import Optional, Tuple -def glibc_version_string(): - # type: () -> Optional[str] +def glibc_version_string() -> Optional[str]: "Returns glibc version string, or None if not using glibc." return glibc_version_string_confstr() or glibc_version_string_ctypes() -def glibc_version_string_confstr(): - # type: () -> Optional[str] +def glibc_version_string_confstr() -> Optional[str]: "Primary implementation of glibc_version_string using os.confstr." # os.confstr is quite a bit faster than ctypes.DLL. It's also less likely # to be broken or missing. This strategy is used in the standard library @@ -30,8 +28,7 @@ def glibc_version_string_confstr(): return version -def glibc_version_string_ctypes(): - # type: () -> Optional[str] +def glibc_version_string_ctypes() -> Optional[str]: "Fallback implementation of glibc_version_string using ctypes." try: @@ -78,8 +75,7 @@ def glibc_version_string_ctypes(): # versions that was generated by pip 8.1.2 and earlier is useless and # misleading. Solution: instead of using platform, use our code that actually # works. -def libc_ver(): - # type: () -> Tuple[str, str] +def libc_ver() -> Tuple[str, str]: """Try to determine the glibc version Returns a tuple of strings (lib, version) which default to empty strings diff --git a/pipenv/patched/notpip/_internal/utils/hashes.py b/pipenv/patched/notpip/_internal/utils/hashes.py index 9730e55669..88692c4412 100644 --- a/pipenv/patched/notpip/_internal/utils/hashes.py +++ b/pipenv/patched/notpip/_internal/utils/hashes.py @@ -28,8 +28,7 @@ class Hashes: """ - def __init__(self, hashes=None): - # type: (Dict[str, List[str]]) -> None + def __init__(self, hashes: Dict[str, List[str]] = None) -> None: """ :param hashes: A dict of algorithm names pointing to lists of allowed hex digests @@ -41,8 +40,7 @@ def __init__(self, hashes=None): allowed[alg] = sorted(keys) self._allowed = allowed - def __and__(self, other): - # type: (Hashes) -> Hashes + def __and__(self, other: "Hashes") -> "Hashes": if not isinstance(other, Hashes): return NotImplemented @@ -62,21 +60,14 @@ def __and__(self, other): return Hashes(new) @property - def digest_count(self): - # type: () -> int + def digest_count(self) -> int: return sum(len(digests) for digests in self._allowed.values()) - def is_hash_allowed( - self, - hash_name, # type: str - hex_digest, # type: str - ): - # type: (...) -> bool + def is_hash_allowed(self, hash_name: str, hex_digest: str) -> bool: """Return whether the given hex digest is allowed.""" return hex_digest in self._allowed.get(hash_name, []) - def check_against_chunks(self, chunks): - # type: (Iterator[bytes]) -> None + def check_against_chunks(self, chunks: Iterator[bytes]) -> None: """Check good hashes against ones built from iterable of chunks of data. @@ -99,12 +90,10 @@ def check_against_chunks(self, chunks): return self._raise(gots) - def _raise(self, gots): - # type: (Dict[str, _Hash]) -> NoReturn + def _raise(self, gots: Dict[str, "_Hash"]) -> "NoReturn": raise HashMismatch(self._allowed, gots) - def check_against_file(self, file): - # type: (BinaryIO) -> None + def check_against_file(self, file: BinaryIO) -> None: """Check good hashes against a file-like object Raise HashMismatch if none match. @@ -112,28 +101,20 @@ def check_against_file(self, file): """ return self.check_against_chunks(read_chunks(file)) - def check_against_path(self, path): - # type: (str) -> None + def check_against_path(self, path: str) -> None: with open(path, "rb") as file: return self.check_against_file(file) - def __nonzero__(self): - # type: () -> bool + def __bool__(self) -> bool: """Return whether I know any known-good hashes.""" return bool(self._allowed) - def __bool__(self): - # type: () -> bool - return self.__nonzero__() - - def __eq__(self, other): - # type: (object) -> bool + def __eq__(self, other: object) -> bool: if not isinstance(other, Hashes): return NotImplemented return self._allowed == other._allowed - def __hash__(self): - # type: () -> int + def __hash__(self) -> int: return hash( ",".join( sorted( @@ -153,13 +134,11 @@ class MissingHashes(Hashes): """ - def __init__(self): - # type: () -> None + def __init__(self) -> None: """Don't offer the ``hashes`` kwarg.""" # Pass our favorite hash in to generate a "gotten hash". With the # empty list, it will never match, so an error will always raise. super().__init__(hashes={FAVORITE_HASH: []}) - def _raise(self, gots): - # type: (Dict[str, _Hash]) -> NoReturn + def _raise(self, gots: Dict[str, "_Hash"]) -> "NoReturn": raise HashMissing(gots[FAVORITE_HASH].hexdigest()) diff --git a/pipenv/patched/notpip/_internal/utils/inject_securetransport.py b/pipenv/patched/notpip/_internal/utils/inject_securetransport.py index d1a6256a05..c98e5ad04f 100644 --- a/pipenv/patched/notpip/_internal/utils/inject_securetransport.py +++ b/pipenv/patched/notpip/_internal/utils/inject_securetransport.py @@ -10,8 +10,7 @@ import sys -def inject_securetransport(): - # type: () -> None +def inject_securetransport() -> None: # Only relevant on macOS if sys.platform != "darwin": return diff --git a/pipenv/patched/notpip/_internal/utils/logging.py b/pipenv/patched/notpip/_internal/utils/logging.py index c8f31e575d..cc9a6abe78 100644 --- a/pipenv/patched/notpip/_internal/utils/logging.py +++ b/pipenv/patched/notpip/_internal/utils/logging.py @@ -4,28 +4,28 @@ import logging.handlers import os import sys +import threading +from dataclasses import dataclass from logging import Filter -from typing import IO, Any, Callable, Iterator, Optional, TextIO, Type, cast - +from typing import IO, Any, ClassVar, Iterator, List, Optional, TextIO, Type + +from pipenv.patched.notpip._vendor.rich.console import ( + Console, + ConsoleOptions, + ConsoleRenderable, + RenderResult, +) +from pipenv.patched.notpip._vendor.rich.highlighter import NullHighlighter +from pipenv.patched.notpip._vendor.rich.logging import RichHandler +from pipenv.patched.notpip._vendor.rich.segment import Segment +from pipenv.patched.notpip._vendor.rich.style import Style + +from pipenv.patched.notpip._internal.exceptions import DiagnosticPipError from pipenv.patched.notpip._internal.utils._log import VERBOSE, getLogger from pipenv.patched.notpip._internal.utils.compat import WINDOWS from pipenv.patched.notpip._internal.utils.deprecation import DEPRECATION_MSG_PREFIX from pipenv.patched.notpip._internal.utils.misc import ensure_dir -try: - import threading -except ImportError: - import dummy_threading as threading # type: ignore - - -try: - from pipenv.patched.notpip._vendor import colorama -# Lots of different errors can come from this, including SystemError and -# ImportError. -except Exception: - colorama = None - - _log_state = threading.local() subprocess_logger = getLogger("pip.subprocessor") @@ -35,39 +35,22 @@ class BrokenStdoutLoggingError(Exception): Raised if BrokenPipeError occurs for the stdout stream while logging. """ - pass +def _is_broken_pipe_error(exc_class: Type[BaseException], exc: BaseException) -> bool: + if exc_class is BrokenPipeError: + return True -# BrokenPipeError manifests differently in Windows and non-Windows. -if WINDOWS: - # In Windows, a broken pipe can show up as EINVAL rather than EPIPE: + # On Windows, a broken pipe can show up as EINVAL rather than EPIPE: # https://bugs.python.org/issue19612 # https://bugs.python.org/issue30418 - def _is_broken_pipe_error(exc_class, exc): - # type: (Type[BaseException], BaseException) -> bool - """See the docstring for non-Windows below.""" - return (exc_class is BrokenPipeError) or ( - isinstance(exc, OSError) and exc.errno in (errno.EINVAL, errno.EPIPE) - ) - - -else: - # Then we are in the non-Windows case. - def _is_broken_pipe_error(exc_class, exc): - # type: (Type[BaseException], BaseException) -> bool - """ - Return whether an exception is a broken pipe error. + if not WINDOWS: + return False - Args: - exc_class: an exception class. - exc: an exception instance. - """ - return exc_class is BrokenPipeError + return isinstance(exc, OSError) and exc.errno in (errno.EINVAL, errno.EPIPE) @contextlib.contextmanager -def indent_log(num=2): - # type: (int) -> Iterator[None] +def indent_log(num: int = 2) -> Iterator[None]: """ A context manager which will cause the log output to be indented for any log messages emitted inside it. @@ -81,8 +64,7 @@ def indent_log(num=2): _log_state.indentation -= num -def get_indentation(): - # type: () -> int +def get_indentation() -> int: return getattr(_log_state, "indentation", 0) @@ -91,11 +73,10 @@ class IndentingFormatter(logging.Formatter): def __init__( self, - *args, # type: Any - add_timestamp=False, # type: bool - **kwargs, # type: Any - ): - # type: (...) -> None + *args: Any, + add_timestamp: bool = False, + **kwargs: Any, + ) -> None: """ A logging.Formatter that obeys the indent_log() context manager. @@ -105,8 +86,7 @@ def __init__( self.add_timestamp = add_timestamp super().__init__(*args, **kwargs) - def get_message_start(self, formatted, levelno): - # type: (str, int) -> str + def get_message_start(self, formatted: str, levelno: int) -> str: """ Return the start of the formatted log message (not counting the prefix to add to each line). @@ -122,8 +102,7 @@ def get_message_start(self, formatted, levelno): return "ERROR: " - def format(self, record): - # type: (logging.LogRecord) -> str + def format(self, record: logging.LogRecord) -> str: """ Calls the standard formatter, but will indent all of the log message lines by our current indentation level. @@ -140,85 +119,63 @@ def format(self, record): return formatted -def _color_wrap(*colors): - # type: (*str) -> Callable[[str], str] - def wrapped(inp): - # type: (str) -> str - return "".join(list(colors) + [inp, colorama.Style.RESET_ALL]) - - return wrapped - - -class ColorizedStreamHandler(logging.StreamHandler): - - # Don't build up a list of colors if we don't have colorama - if colorama: - COLORS = [ - # This needs to be in order from highest logging level to lowest. - (logging.ERROR, _color_wrap(colorama.Fore.RED)), - (logging.WARNING, _color_wrap(colorama.Fore.YELLOW)), - ] - else: - COLORS = [] - - def __init__(self, stream=None, no_color=None): - # type: (Optional[TextIO], bool) -> None - super().__init__(stream) - self._no_color = no_color - - if WINDOWS and colorama: - self.stream = colorama.AnsiToWin32(self.stream) - - def _using_stdout(self): - # type: () -> bool - """ - Return whether the handler is using sys.stdout. - """ - if WINDOWS and colorama: - # Then self.stream is an AnsiToWin32 object. - stream = cast(colorama.AnsiToWin32, self.stream) - return stream.wrapped is sys.stdout - - return self.stream is sys.stdout - - def should_color(self): - # type: () -> bool - # Don't colorize things if we do not have colorama or if told not to - if not colorama or self._no_color: - return False - - real_stream = ( - self.stream - if not isinstance(self.stream, colorama.AnsiToWin32) - else self.stream.wrapped +@dataclass +class IndentedRenderable: + renderable: ConsoleRenderable + indent: int + + def __rich_console__( + self, console: Console, options: ConsoleOptions + ) -> RenderResult: + segments = console.render(self.renderable, options) + lines = Segment.split_lines(segments) + for line in lines: + yield Segment(" " * self.indent) + yield from line + yield Segment("\n") + + +class RichPipStreamHandler(RichHandler): + KEYWORDS: ClassVar[Optional[List[str]]] = [] + + def __init__(self, stream: Optional[TextIO], no_color: bool) -> None: + super().__init__( + console=Console(file=stream, no_color=no_color, soft_wrap=True), + show_time=False, + show_level=False, + show_path=False, + highlighter=NullHighlighter(), ) - # If the stream is a tty we should color it - if hasattr(real_stream, "isatty") and real_stream.isatty(): - return True - - # If we have an ANSI term we should color it - if os.environ.get("TERM") == "ANSI": - return True + # Our custom override on Rich's logger, to make things work as we need them to. + def emit(self, record: logging.LogRecord) -> None: + style: Optional[Style] = None + + # If we are given a diagnostic error to present, present it with indentation. + if record.msg == "[present-diagnostic] %s" and len(record.args) == 1: + diagnostic_error: DiagnosticPipError = record.args[0] # type: ignore[index] + assert isinstance(diagnostic_error, DiagnosticPipError) + + renderable: ConsoleRenderable = IndentedRenderable( + diagnostic_error, indent=get_indentation() + ) + else: + message = self.format(record) + renderable = self.render_message(record, message) + if record.levelno is not None: + if record.levelno >= logging.ERROR: + style = Style(color="red") + elif record.levelno >= logging.WARNING: + style = Style(color="yellow") + + try: + self.console.print(renderable, overflow="ignore", crop=False, style=style) + except Exception: + self.handleError(record) + + def handleError(self, record: logging.LogRecord) -> None: + """Called when logging is unable to log some output.""" - # If anything else we should not color it - return False - - def format(self, record): - # type: (logging.LogRecord) -> str - msg = super().format(record) - - if self.should_color(): - for level, color in self.COLORS: - if record.levelno >= level: - msg = color(msg) - break - - return msg - - # The logging module says handleError() can be customized. - def handleError(self, record): - # type: (logging.LogRecord) -> None exc_class, exc = sys.exc_info()[:2] # If a broken pipe occurred while calling write() or flush() on the # stdout stream in logging's Handler.emit(), then raise our special @@ -227,7 +184,7 @@ def handleError(self, record): if ( exc_class and exc - and self._using_stdout() + and self.console.file is sys.stdout and _is_broken_pipe_error(exc_class, exc) ): raise BrokenStdoutLoggingError() @@ -236,19 +193,16 @@ def handleError(self, record): class BetterRotatingFileHandler(logging.handlers.RotatingFileHandler): - def _open(self): - # type: () -> IO[Any] + def _open(self) -> IO[Any]: ensure_dir(os.path.dirname(self.baseFilename)) return super()._open() class MaxLevelFilter(Filter): - def __init__(self, level): - # type: (int) -> None + def __init__(self, level: int) -> None: self.level = level - def filter(self, record): - # type: (logging.LogRecord) -> bool + def filter(self, record: logging.LogRecord) -> bool: return record.levelno < self.level @@ -258,15 +212,13 @@ class ExcludeLoggerFilter(Filter): A logging Filter that excludes records from a logger (or its children). """ - def filter(self, record): - # type: (logging.LogRecord) -> bool + def filter(self, record: logging.LogRecord) -> bool: # The base Filter class allows only records from a logger (or its # children). return not super().filter(record) -def setup_logging(verbosity, no_color, user_log_file): - # type: (int, bool, Optional[str]) -> int +def setup_logging(verbosity: int, no_color: bool, user_log_file: Optional[str]) -> int: """Configures and sets up all of the logging Returns the requested logging level, as its integer value. @@ -308,7 +260,7 @@ def setup_logging(verbosity, no_color, user_log_file): "stderr": "ext://sys.stderr", } handler_classes = { - "stream": "pipenv.patched.notpip._internal.utils.logging.ColorizedStreamHandler", + "stream": "pipenv.patched.notpip._internal.utils.logging.RichPipStreamHandler", "file": "pipenv.patched.notpip._internal.utils.logging.BetterRotatingFileHandler", } handlers = ["console", "console_errors", "console_subprocess"] + ( @@ -366,8 +318,8 @@ def setup_logging(verbosity, no_color, user_log_file): "console_subprocess": { "level": level, "class": handler_classes["stream"], - "no_color": no_color, "stream": log_streams["stderr"], + "no_color": no_color, "filters": ["restrict_to_subprocess"], "formatter": "indent", }, diff --git a/pipenv/patched/notpip/_internal/utils/misc.py b/pipenv/patched/notpip/_internal/utils/misc.py index 4224909d8e..0c79796f1a 100644 --- a/pipenv/patched/notpip/_internal/utils/misc.py +++ b/pipenv/patched/notpip/_internal/utils/misc.py @@ -18,10 +18,8 @@ from types import TracebackType from typing import ( Any, - AnyStr, BinaryIO, Callable, - Container, ContextManager, Iterable, Iterator, @@ -34,17 +32,13 @@ cast, ) -from pipenv.patched.notpip._vendor.pkg_resources import Distribution from pipenv.patched.notpip._vendor.tenacity import retry, stop_after_delay, wait_fixed from pipenv.patched.notpip import __version__ from pipenv.patched.notpip._internal.exceptions import CommandError -from pipenv.patched.notpip._internal.locations import get_major_minor_version, site_packages, user_site -from pipenv.patched.notpip._internal.utils.compat import WINDOWS, stdlib_pkgs -from pipenv.patched.notpip._internal.utils.virtualenv import ( - running_under_virtualenv, - virtualenv_no_global, -) +from pipenv.patched.notpip._internal.locations import get_major_minor_version +from pipenv.patched.notpip._internal.utils.compat import WINDOWS +from pipenv.patched.notpip._internal.utils.virtualenv import running_under_virtualenv __all__ = [ "rmtree", @@ -71,8 +65,7 @@ NetlocTuple = Tuple[str, Tuple[Optional[str], Optional[str]]] -def get_pip_version(): - # type: () -> str +def get_pip_version() -> str: pip_pkg_dir = os.path.join(os.path.dirname(__file__), "..", "..") pip_pkg_dir = os.path.abspath(pip_pkg_dir) @@ -83,8 +76,7 @@ def get_pip_version(): ) -def normalize_version_info(py_version_info): - # type: (Tuple[int, ...]) -> Tuple[int, int, int] +def normalize_version_info(py_version_info: Tuple[int, ...]) -> Tuple[int, int, int]: """ Convert a tuple of ints representing a Python version to one of length three. @@ -103,8 +95,7 @@ def normalize_version_info(py_version_info): return cast("VersionInfo", py_version_info) -def ensure_dir(path): - # type: (AnyStr) -> None +def ensure_dir(path: str) -> None: """os.path.makedirs without EEXIST.""" try: os.makedirs(path) @@ -114,8 +105,7 @@ def ensure_dir(path): raise -def get_prog(): - # type: () -> str +def get_prog() -> str: try: prog = os.path.basename(sys.argv[0]) if prog in ("__main__.py", "-c"): @@ -130,13 +120,11 @@ def get_prog(): # Retry every half second for up to 3 seconds # Tenacity raises RetryError by default, explicitly raise the original exception @retry(reraise=True, stop=stop_after_delay(3), wait=wait_fixed(0.5)) -def rmtree(dir, ignore_errors=False): - # type: (AnyStr, bool) -> None +def rmtree(dir: str, ignore_errors: bool = False) -> None: shutil.rmtree(dir, ignore_errors=ignore_errors, onerror=rmtree_errorhandler) -def rmtree_errorhandler(func, path, exc_info): - # type: (Callable[..., Any], str, ExcInfo) -> None +def rmtree_errorhandler(func: Callable[..., Any], path: str, exc_info: ExcInfo) -> None: """On Windows, the files in .svn are read-only, so when rmtree() tries to remove them, an exception is thrown. We catch that here, remove the read-only attribute, and hopefully continue without problems.""" @@ -156,8 +144,7 @@ def rmtree_errorhandler(func, path, exc_info): raise -def display_path(path): - # type: (str) -> str +def display_path(path: str) -> str: """Gives the display value for a given path, making it relative to cwd if possible.""" path = os.path.normcase(os.path.abspath(path)) @@ -166,8 +153,7 @@ def display_path(path): return path -def backup_dir(dir, ext=".bak"): - # type: (str, str) -> str +def backup_dir(dir: str, ext: str = ".bak") -> str: """Figure out the name of a directory to back up the given dir to (adding .bak, .bak2, etc)""" n = 1 @@ -178,16 +164,14 @@ def backup_dir(dir, ext=".bak"): return dir + extension -def ask_path_exists(message, options): - # type: (str, Iterable[str]) -> str +def ask_path_exists(message: str, options: Iterable[str]) -> str: for action in os.environ.get("PIP_EXISTS_ACTION", "").split(): if action in options: return action return ask(message, options) -def _check_no_input(message): - # type: (str) -> None +def _check_no_input(message: str) -> None: """Raise an error if no input is allowed.""" if os.environ.get("PIP_NO_INPUT"): raise Exception( @@ -195,8 +179,7 @@ def _check_no_input(message): ) -def ask(message, options): - # type: (str, Iterable[str]) -> str +def ask(message: str, options: Iterable[str]) -> str: """Ask the message interactively, with the given possible responses""" while 1: _check_no_input(message) @@ -211,22 +194,19 @@ def ask(message, options): return response -def ask_input(message): - # type: (str) -> str +def ask_input(message: str) -> str: """Ask for input interactively.""" _check_no_input(message) return input(message) -def ask_password(message): - # type: (str) -> str +def ask_password(message: str) -> str: """Ask for a password interactively.""" _check_no_input(message) return getpass.getpass(message) -def strtobool(val): - # type: (str) -> int +def strtobool(val: str) -> int: """Convert a string representation of truth to true (1) or false (0). True values are 'y', 'yes', 't', 'true', 'on', and '1'; false values @@ -242,8 +222,7 @@ def strtobool(val): raise ValueError(f"invalid truth value {val!r}") -def format_size(bytes): - # type: (float) -> str +def format_size(bytes: float) -> str: if bytes > 1000 * 1000: return "{:.1f} MB".format(bytes / 1000.0 / 1000) elif bytes > 10 * 1000: @@ -254,8 +233,7 @@ def format_size(bytes): return "{} bytes".format(int(bytes)) -def tabulate(rows): - # type: (Iterable[Iterable[Any]]) -> Tuple[List[str], List[int]] +def tabulate(rows: Iterable[Iterable[Any]]) -> Tuple[List[str], List[int]]: """Return a list of formatted rows and a list of column sizes. For example:: @@ -286,8 +264,7 @@ def is_installable_dir(path: str) -> bool: return False -def read_chunks(file, size=io.DEFAULT_BUFFER_SIZE): - # type: (BinaryIO, int) -> Iterator[bytes] +def read_chunks(file: BinaryIO, size: int = io.DEFAULT_BUFFER_SIZE) -> Iterator[bytes]: """Yield pieces of data from a file-like object until EOF.""" while True: chunk = file.read(size) @@ -296,8 +273,7 @@ def read_chunks(file, size=io.DEFAULT_BUFFER_SIZE): yield chunk -def normalize_path(path, resolve_symlinks=True): - # type: (str, bool) -> str +def normalize_path(path: str, resolve_symlinks: bool = True) -> str: """ Convert a path to its canonical, case-normalized, absolute version. @@ -310,8 +286,7 @@ def normalize_path(path, resolve_symlinks=True): return os.path.normcase(path) -def splitext(path): - # type: (str) -> Tuple[str, str] +def splitext(path: str) -> Tuple[str, str]: """Like os.path.splitext, but take off .tar too""" base, ext = posixpath.splitext(path) if base.lower().endswith(".tar"): @@ -320,8 +295,7 @@ def splitext(path): return base, ext -def renames(old, new): - # type: (str, str) -> None +def renames(old: str, new: str) -> None: """Like os.renames(), but handles renaming across devices.""" # Implementation borrowed from os.renames(). head, tail = os.path.split(new) @@ -338,8 +312,7 @@ def renames(old, new): pass -def is_local(path): - # type: (str) -> bool +def is_local(path: str) -> bool: """ Return True if path is within sys.prefix, if we're running in a virtualenv. @@ -353,158 +326,15 @@ def is_local(path): return path.startswith(normalize_path(sys.prefix)) -def dist_is_local(dist): - # type: (Distribution) -> bool - """ - Return True if given Distribution object is installed locally - (i.e. within current virtualenv). - - Always True if we're not in a virtualenv. - - """ - return is_local(dist_location(dist)) - - -def dist_in_usersite(dist): - # type: (Distribution) -> bool - """ - Return True if given Distribution is installed in user site. - """ - return dist_location(dist).startswith(normalize_path(user_site)) - - -def dist_in_site_packages(dist): - # type: (Distribution) -> bool - """ - Return True if given Distribution is installed in - sysconfig.get_python_lib(). - """ - return dist_location(dist).startswith(normalize_path(site_packages)) - - -def dist_is_editable(dist): - # type: (Distribution) -> bool - """ - Return True if given Distribution is an editable install. - """ - for path_item in sys.path: - egg_link = os.path.join(path_item, dist.project_name + ".egg-link") - if os.path.isfile(egg_link): - return True - return False - - -def get_installed_distributions( - local_only=True, # type: bool - skip=stdlib_pkgs, # type: Container[str] - include_editables=True, # type: bool - editables_only=False, # type: bool - user_only=False, # type: bool - paths=None, # type: Optional[List[str]] -): - # type: (...) -> List[Distribution] - """Return a list of installed Distribution objects. - - Left for compatibility until direct pkg_resources uses are refactored out. - """ - from pipenv.patched.notpip._internal.metadata import get_default_environment, get_environment - from pipenv.patched.notpip._internal.metadata.pkg_resources import Distribution as _Dist - - if paths is None: - env = get_default_environment() - else: - env = get_environment(paths) - dists = env.iter_installed_distributions( - local_only=local_only, - skip=skip, - include_editables=include_editables, - editables_only=editables_only, - user_only=user_only, - ) - return [cast(_Dist, dist)._dist for dist in dists] - - -def get_distribution(req_name): - # type: (str) -> Optional[Distribution] - """Given a requirement name, return the installed Distribution object. - - This searches from *all* distributions available in the environment, to - match the behavior of ``pkg_resources.get_distribution()``. - - Left for compatibility until direct pkg_resources uses are refactored out. - """ - from pipenv.patched.notpip._internal.metadata import get_default_environment - from pipenv.patched.notpip._internal.metadata.pkg_resources import Distribution as _Dist - - dist = get_default_environment().get_distribution(req_name) - if dist is None: - return None - return cast(_Dist, dist)._dist - - -def egg_link_path(dist): - # type: (Distribution) -> Optional[str] - """ - Return the path for the .egg-link file if it exists, otherwise, None. - - There's 3 scenarios: - 1) not in a virtualenv - try to find in site.USER_SITE, then site_packages - 2) in a no-global virtualenv - try to find in site_packages - 3) in a yes-global virtualenv - try to find in site_packages, then site.USER_SITE - (don't look in global location) - - For #1 and #3, there could be odd cases, where there's an egg-link in 2 - locations. - - This method will just return the first one found. - """ - sites = [] - if running_under_virtualenv(): - sites.append(site_packages) - if not virtualenv_no_global() and user_site: - sites.append(user_site) - else: - if user_site: - sites.append(user_site) - sites.append(site_packages) - - for site in sites: - egglink = os.path.join(site, dist.project_name) + ".egg-link" - if os.path.isfile(egglink): - return egglink - return None - - -def dist_location(dist): - # type: (Distribution) -> str - """ - Get the site-packages location of this distribution. Generally - this is dist.location, except in the case of develop-installed - packages, where dist.location is the source code location, and we - want to know where the egg-link file is. - - The returned location is normalized (in particular, with symlinks removed). - """ - egg_link = egg_link_path(dist) - if egg_link: - return normalize_path(egg_link) - return normalize_path(dist.location) - - -def write_output(msg, *args): - # type: (Any, Any) -> None +def write_output(msg: Any, *args: Any) -> None: logger.info(msg, *args) class StreamWrapper(StringIO): - orig_stream = None # type: TextIO + orig_stream: TextIO = None @classmethod - def from_stream(cls, orig_stream): - # type: (TextIO) -> StreamWrapper + def from_stream(cls, orig_stream: TextIO) -> "StreamWrapper": cls.orig_stream = orig_stream return cls() @@ -516,8 +346,7 @@ def encoding(self): # type: ignore @contextlib.contextmanager -def captured_output(stream_name): - # type: (str) -> Iterator[StreamWrapper] +def captured_output(stream_name: str) -> Iterator[StreamWrapper]: """Return a context manager used by captured_stdout/stdin/stderr that temporarily replaces the sys stream *stream_name* with a StringIO. @@ -531,8 +360,7 @@ def captured_output(stream_name): setattr(sys, stream_name, orig_stdout) -def captured_stdout(): - # type: () -> ContextManager[StreamWrapper] +def captured_stdout() -> ContextManager[StreamWrapper]: """Capture the output of sys.stdout: with captured_stdout() as stdout: @@ -544,8 +372,7 @@ def captured_stdout(): return captured_output("stdout") -def captured_stderr(): - # type: () -> ContextManager[StreamWrapper] +def captured_stderr() -> ContextManager[StreamWrapper]: """ See captured_stdout(). """ @@ -553,16 +380,14 @@ def captured_stderr(): # Simulates an enum -def enum(*sequential, **named): - # type: (*Any, **Any) -> Type[Any] +def enum(*sequential: Any, **named: Any) -> Type[Any]: enums = dict(zip(sequential, range(len(sequential))), **named) reverse = {value: key for key, value in enums.items()} enums["reverse_mapping"] = reverse return type("Enum", (), enums) -def build_netloc(host, port): - # type: (str, Optional[int]) -> str +def build_netloc(host: str, port: Optional[int]) -> str: """ Build a netloc from a host-port pair """ @@ -574,8 +399,7 @@ def build_netloc(host, port): return f"{host}:{port}" -def build_url_from_netloc(netloc, scheme="https"): - # type: (str, str) -> str +def build_url_from_netloc(netloc: str, scheme: str = "https") -> str: """ Build a full URL from a netloc. """ @@ -585,8 +409,7 @@ def build_url_from_netloc(netloc, scheme="https"): return f"{scheme}://{netloc}" -def parse_netloc(netloc): - # type: (str) -> Tuple[str, Optional[int]] +def parse_netloc(netloc: str) -> Tuple[str, Optional[int]]: """ Return the host-port pair from a netloc. """ @@ -595,8 +418,7 @@ def parse_netloc(netloc): return parsed.hostname, parsed.port -def split_auth_from_netloc(netloc): - # type: (str) -> NetlocTuple +def split_auth_from_netloc(netloc: str) -> NetlocTuple: """ Parse out and remove the auth information from a netloc. @@ -609,7 +431,7 @@ def split_auth_from_netloc(netloc): # behaves if more than one @ is present (which can be checked using # the password attribute of urlsplit()'s return value). auth, netloc = netloc.rsplit("@", 1) - pw = None # type: Optional[str] + pw: Optional[str] = None if ":" in auth: # Split from the left because that's how urllib.parse.urlsplit() # behaves if more than one : is present (which again can be checked @@ -625,8 +447,7 @@ def split_auth_from_netloc(netloc): return netloc, (user, pw) -def redact_netloc(netloc): - # type: (str) -> str +def redact_netloc(netloc: str) -> str: """ Replace the sensitive data in a netloc with "****", if it exists. @@ -648,8 +469,9 @@ def redact_netloc(netloc): ) -def _transform_url(url, transform_netloc): - # type: (str, Callable[[str], Tuple[Any, ...]]) -> Tuple[str, NetlocTuple] +def _transform_url( + url: str, transform_netloc: Callable[[str], Tuple[Any, ...]] +) -> Tuple[str, NetlocTuple]: """Transform and replace netloc in a url. transform_netloc is a function taking the netloc and returning a @@ -667,18 +489,15 @@ def _transform_url(url, transform_netloc): return surl, cast("NetlocTuple", netloc_tuple) -def _get_netloc(netloc): - # type: (str) -> NetlocTuple +def _get_netloc(netloc: str) -> NetlocTuple: return split_auth_from_netloc(netloc) -def _redact_netloc(netloc): - # type: (str) -> Tuple[str,] +def _redact_netloc(netloc: str) -> Tuple[str]: return (redact_netloc(netloc),) -def split_auth_netloc_from_url(url): - # type: (str) -> Tuple[str, str, Tuple[str, str]] +def split_auth_netloc_from_url(url: str) -> Tuple[str, str, Tuple[str, str]]: """ Parse a url into separate netloc, auth, and url with no auth. @@ -688,41 +507,31 @@ def split_auth_netloc_from_url(url): return url_without_auth, netloc, auth -def remove_auth_from_url(url): - # type: (str) -> str +def remove_auth_from_url(url: str) -> str: """Return a copy of url with 'username:password@' removed.""" # username/pass params are passed to subversion through flags # and are not recognized in the url. return _transform_url(url, _get_netloc)[0] -def redact_auth_from_url(url): - # type: (str) -> str +def redact_auth_from_url(url: str) -> str: """Replace the password in a given url with ****.""" return _transform_url(url, _redact_netloc)[0] class HiddenText: - def __init__( - self, - secret, # type: str - redacted, # type: str - ): - # type: (...) -> None + def __init__(self, secret: str, redacted: str) -> None: self.secret = secret self.redacted = redacted - def __repr__(self): - # type: (...) -> str + def __repr__(self) -> str: return "".format(str(self)) - def __str__(self): - # type: (...) -> str + def __str__(self) -> str: return self.redacted # This is useful for testing. - def __eq__(self, other): - # type: (Any) -> bool + def __eq__(self, other: Any) -> bool: if type(self) != type(other): return False @@ -731,19 +540,16 @@ def __eq__(self, other): return self.secret == other.secret -def hide_value(value): - # type: (str) -> HiddenText +def hide_value(value: str) -> HiddenText: return HiddenText(value, redacted="****") -def hide_url(url): - # type: (str) -> HiddenText +def hide_url(url: str) -> HiddenText: redacted = redact_auth_from_url(url) return HiddenText(url, redacted=redacted) -def protect_pip_from_modification_on_windows(modifying_pip): - # type: (bool) -> None +def protect_pip_from_modification_on_windows(modifying_pip: bool) -> None: """Protection of pip.exe from modification on Windows On Windows, any operation modifying pip should be run as: @@ -769,14 +575,12 @@ def protect_pip_from_modification_on_windows(modifying_pip): ) -def is_console_interactive(): - # type: () -> bool +def is_console_interactive() -> bool: """Is this console interactive?""" return sys.stdin is not None and sys.stdin.isatty() -def hash_file(path, blocksize=1 << 20): - # type: (str, int) -> Tuple[Any, int] +def hash_file(path: str, blocksize: int = 1 << 20) -> Tuple[Any, int]: """Return (hash, length) for path using hashlib.sha256()""" h = hashlib.sha256() @@ -788,8 +592,7 @@ def hash_file(path, blocksize=1 << 20): return h, length -def is_wheel_installed(): - # type: () -> bool +def is_wheel_installed() -> bool: """ Return whether the wheel package is installed. """ @@ -801,8 +604,7 @@ def is_wheel_installed(): return True -def pairwise(iterable): - # type: (Iterable[Any]) -> Iterator[Tuple[Any, Any]] +def pairwise(iterable: Iterable[Any]) -> Iterator[Tuple[Any, Any]]: """ Return paired elements. @@ -814,10 +616,9 @@ def pairwise(iterable): def partition( - pred, # type: Callable[[T], bool] - iterable, # type: Iterable[T] -): - # type: (...) -> Tuple[Iterable[T], Iterable[T]] + pred: Callable[[T], bool], + iterable: Iterable[T], +) -> Tuple[Iterable[T], Iterable[T]]: """ Use a predicate to partition entries into false entries and true entries, like diff --git a/pipenv/patched/notpip/_internal/utils/models.py b/pipenv/patched/notpip/_internal/utils/models.py index 0e02bc7a5b..b6bb21a8b2 100644 --- a/pipenv/patched/notpip/_internal/utils/models.py +++ b/pipenv/patched/notpip/_internal/utils/models.py @@ -10,37 +10,29 @@ class KeyBasedCompareMixin: __slots__ = ["_compare_key", "_defining_class"] - def __init__(self, key, defining_class): - # type: (Any, Type[KeyBasedCompareMixin]) -> None + def __init__(self, key: Any, defining_class: Type["KeyBasedCompareMixin"]) -> None: self._compare_key = key self._defining_class = defining_class - def __hash__(self): - # type: () -> int + def __hash__(self) -> int: return hash(self._compare_key) - def __lt__(self, other): - # type: (Any) -> bool + def __lt__(self, other: Any) -> bool: return self._compare(other, operator.__lt__) - def __le__(self, other): - # type: (Any) -> bool + def __le__(self, other: Any) -> bool: return self._compare(other, operator.__le__) - def __gt__(self, other): - # type: (Any) -> bool + def __gt__(self, other: Any) -> bool: return self._compare(other, operator.__gt__) - def __ge__(self, other): - # type: (Any) -> bool + def __ge__(self, other: Any) -> bool: return self._compare(other, operator.__ge__) - def __eq__(self, other): - # type: (Any) -> bool + def __eq__(self, other: Any) -> bool: return self._compare(other, operator.__eq__) - def _compare(self, other, method): - # type: (Any, Callable[[Any, Any], bool]) -> bool + def _compare(self, other: Any, method: Callable[[Any, Any], bool]) -> bool: if not isinstance(other, self._defining_class): return NotImplemented diff --git a/pipenv/patched/notpip/_internal/utils/packaging.py b/pipenv/patched/notpip/_internal/utils/packaging.py index ea6d29f14f..10255d4312 100644 --- a/pipenv/patched/notpip/_internal/utils/packaging.py +++ b/pipenv/patched/notpip/_internal/utils/packaging.py @@ -1,20 +1,19 @@ +import functools import logging -from email.message import Message -from email.parser import FeedParser -from typing import Optional, Tuple +import re +from typing import NewType, Optional, Tuple, cast -from pipenv.patched.notpip._vendor import pkg_resources from pipenv.patched.notpip._vendor.packaging import specifiers, version -from pipenv.patched.notpip._vendor.pkg_resources import Distribution +from pipenv.patched.notpip._vendor.packaging.requirements import Requirement -from pipenv.patched.notpip._internal.exceptions import NoneMetadataError -from pipenv.patched.notpip._internal.utils.misc import display_path +NormalizedExtra = NewType("NormalizedExtra", str) logger = logging.getLogger(__name__) -def check_requires_python(requires_python, version_info): - # type: (Optional[str], Tuple[int, ...]) -> bool +def check_requires_python( + requires_python: Optional[str], version_info: Tuple[int, ...] +) -> bool: """ Check if the given Python version matches a "Requires-Python" specifier. @@ -35,55 +34,24 @@ def check_requires_python(requires_python, version_info): return python_version in requires_python_specifier -def get_metadata(dist): - # type: (Distribution) -> Message - """ - :raises NoneMetadataError: if the distribution reports `has_metadata()` - True but `get_metadata()` returns None. - """ - metadata_name = "METADATA" - if isinstance(dist, pkg_resources.DistInfoDistribution) and dist.has_metadata( - metadata_name - ): - metadata = dist.get_metadata(metadata_name) - elif dist.has_metadata("PKG-INFO"): - metadata_name = "PKG-INFO" - metadata = dist.get_metadata(metadata_name) - else: - logger.warning("No metadata found in %s", display_path(dist.location)) - metadata = "" +@functools.lru_cache(maxsize=512) +def get_requirement(req_string: str) -> Requirement: + """Construct a packaging.Requirement object with caching""" + # Parsing requirement strings is expensive, and is also expected to happen + # with a low diversity of different arguments (at least relative the number + # constructed). This method adds a cache to requirement object creation to + # minimize repeated parsing of the same string to construct equivalent + # Requirement objects. + return Requirement(req_string) - if metadata is None: - raise NoneMetadataError(dist, metadata_name) - feed_parser = FeedParser() - # The following line errors out if with a "NoneType" TypeError if - # passed metadata=None. - feed_parser.feed(metadata) - return feed_parser.close() +def safe_extra(extra: str) -> NormalizedExtra: + """Convert an arbitrary string to a standard 'extra' name + Any runs of non-alphanumeric characters are replaced with a single '_', + and the result is always lowercased. -def get_requires_python(dist): - # type: (pkg_resources.Distribution) -> Optional[str] + This function is duplicated from ``pkg_resources``. Note that this is not + the same to either ``canonicalize_name`` or ``_egg_link_name``. """ - Return the "Requires-Python" metadata for a distribution, or None - if not present. - """ - pkg_info_dict = get_metadata(dist) - requires_python = pkg_info_dict.get("Requires-Python") - - if requires_python is not None: - # Convert to a str to satisfy the type checker, since requires_python - # can be a Header object. - requires_python = str(requires_python) - - return requires_python - - -def get_installer(dist): - # type: (Distribution) -> str - if dist.has_metadata("INSTALLER"): - for line in dist.get_metadata_lines("INSTALLER"): - if line.strip(): - return line.strip() - return "" + return cast(NormalizedExtra, re.sub("[^A-Za-z0-9.-]+", "_", extra).lower()) diff --git a/pipenv/patched/notpip/_internal/utils/parallel.py b/pipenv/patched/notpip/_internal/utils/parallel.py deleted file mode 100644 index 31e12c8e4a..0000000000 --- a/pipenv/patched/notpip/_internal/utils/parallel.py +++ /dev/null @@ -1,101 +0,0 @@ -"""Convenient parallelization of higher order functions. - -This module provides two helper functions, with appropriate fallbacks on -Python 2 and on systems lacking support for synchronization mechanisms: - -- map_multiprocess -- map_multithread - -These helpers work like Python 3's map, with two differences: - -- They don't guarantee the order of processing of - the elements of the iterable. -- The underlying process/thread pools chop the iterable into - a number of chunks, so that for very long iterables using - a large value for chunksize can make the job complete much faster - than using the default value of 1. -""" - -__all__ = ["map_multiprocess", "map_multithread"] - -from contextlib import contextmanager -from multiprocessing import Pool as ProcessPool -from multiprocessing import pool -from multiprocessing.dummy import Pool as ThreadPool -from typing import Callable, Iterable, Iterator, TypeVar, Union - -from pipenv.patched.notpip._vendor.requests.adapters import DEFAULT_POOLSIZE - -Pool = Union[pool.Pool, pool.ThreadPool] -S = TypeVar("S") -T = TypeVar("T") - -# On platforms without sem_open, multiprocessing[.dummy] Pool -# cannot be created. -try: - import multiprocessing.synchronize # noqa -except ImportError: - LACK_SEM_OPEN = True -else: - LACK_SEM_OPEN = False - -# Incredibly large timeout to work around bpo-8296 on Python 2. -TIMEOUT = 2000000 - - -@contextmanager -def closing(pool): - # type: (Pool) -> Iterator[Pool] - """Return a context manager making sure the pool closes properly.""" - try: - yield pool - finally: - # For Pool.imap*, close and join are needed - # for the returned iterator to begin yielding. - pool.close() - pool.join() - pool.terminate() - - -def _map_fallback(func, iterable, chunksize=1): - # type: (Callable[[S], T], Iterable[S], int) -> Iterator[T] - """Make an iterator applying func to each element in iterable. - - This function is the sequential fallback either on Python 2 - where Pool.imap* doesn't react to KeyboardInterrupt - or when sem_open is unavailable. - """ - return map(func, iterable) - - -def _map_multiprocess(func, iterable, chunksize=1): - # type: (Callable[[S], T], Iterable[S], int) -> Iterator[T] - """Chop iterable into chunks and submit them to a process pool. - - For very long iterables using a large value for chunksize can make - the job complete much faster than using the default value of 1. - - Return an unordered iterator of the results. - """ - with closing(ProcessPool()) as pool: - return pool.imap_unordered(func, iterable, chunksize) - - -def _map_multithread(func, iterable, chunksize=1): - # type: (Callable[[S], T], Iterable[S], int) -> Iterator[T] - """Chop iterable into chunks and submit them to a thread pool. - - For very long iterables using a large value for chunksize can make - the job complete much faster than using the default value of 1. - - Return an unordered iterator of the results. - """ - with closing(ThreadPool(DEFAULT_POOLSIZE)) as pool: - return pool.imap_unordered(func, iterable, chunksize) - - -if LACK_SEM_OPEN: - map_multiprocess = map_multithread = _map_fallback -else: - map_multiprocess = _map_multiprocess - map_multithread = _map_multithread diff --git a/pipenv/patched/notpip/_internal/utils/pkg_resources.py b/pipenv/patched/notpip/_internal/utils/pkg_resources.py deleted file mode 100644 index 3d60b61885..0000000000 --- a/pipenv/patched/notpip/_internal/utils/pkg_resources.py +++ /dev/null @@ -1,40 +0,0 @@ -from typing import Dict, Iterable, List - -from pipenv.patched.notpip._vendor.pkg_resources import yield_lines - - -class DictMetadata: - """IMetadataProvider that reads metadata files from a dictionary.""" - - def __init__(self, metadata): - # type: (Dict[str, bytes]) -> None - self._metadata = metadata - - def has_metadata(self, name): - # type: (str) -> bool - return name in self._metadata - - def get_metadata(self, name): - # type: (str) -> str - try: - return self._metadata[name].decode() - except UnicodeDecodeError as e: - # Mirrors handling done in pkg_resources.NullProvider. - e.reason += f" in {name} file" - raise - - def get_metadata_lines(self, name): - # type: (str) -> Iterable[str] - return yield_lines(self.get_metadata(name)) - - def metadata_isdir(self, name): - # type: (str) -> bool - return False - - def metadata_listdir(self, name): - # type: (str) -> List[str] - return [] - - def run_script(self, script_name, namespace): - # type: (str, str) -> None - pass diff --git a/pipenv/patched/notpip/_internal/utils/setuptools_build.py b/pipenv/patched/notpip/_internal/utils/setuptools_build.py index 4b8e4b359f..f460c4003f 100644 --- a/pipenv/patched/notpip/_internal/utils/setuptools_build.py +++ b/pipenv/patched/notpip/_internal/utils/setuptools_build.py @@ -1,30 +1,57 @@ import sys +import textwrap from typing import List, Optional, Sequence # Shim to wrap setup.py invocation with setuptools -# -# We set sys.argv[0] to the path to the underlying setup.py file so -# setuptools / distutils don't take the path to the setup.py to be "-c" when -# invoking via the shim. This avoids e.g. the following manifest_maker -# warning: "warning: manifest_maker: standard file '-c' not found". -_SETUPTOOLS_SHIM = ( - "import io, os, sys, setuptools, tokenize; sys.argv[0] = {0!r}; __file__={0!r};" - "f = getattr(tokenize, 'open', open)(__file__) " - "if os.path.exists(__file__) " - "else io.StringIO('from setuptools import setup; setup()');" - "code = f.read().replace('\\r\\n', '\\n');" - "f.close();" - "exec(compile(code, __file__, 'exec'))" -) +# Note that __file__ is handled via two {!r} *and* %r, to ensure that paths on +# Windows are correctly handled (it should be "C:\\Users" not "C:\Users"). +_SETUPTOOLS_SHIM = textwrap.dedent( + """ + exec(compile(''' + # This is -- a caller that pip uses to run setup.py + # + # - It imports setuptools before invoking setup.py, to enable projects that directly + # import from `distutils.core` to work with newer packaging standards. + # - It provides a clear error message when setuptools is not installed. + # - It sets `sys.argv[0]` to the underlying `setup.py`, when invoking `setup.py` so + # setuptools doesn't think the script is `-c`. This avoids the following warning: + # manifest_maker: standard file '-c' not found". + # - It generates a shim setup.py, for handling setup.cfg-only projects. + import os, sys, tokenize + + try: + import setuptools + except ImportError as error: + print( + "ERROR: Can not execute `setup.py` since setuptools is not available in " + "the build environment.", + file=sys.stderr, + ) + sys.exit(1) + + __file__ = %r + sys.argv[0] = __file__ + + if os.path.exists(__file__): + filename = __file__ + with tokenize.open(__file__) as f: + setup_py_code = f.read() + else: + filename = "" + setup_py_code = "from setuptools import setup; setup()" + + exec(compile(setup_py_code, filename, "exec")) + ''' % ({!r},), "", "exec")) + """ +).rstrip() def make_setuptools_shim_args( - setup_py_path, # type: str - global_options=None, # type: Sequence[str] - no_user_config=False, # type: bool - unbuffered_output=False, # type: bool -): - # type: (...) -> List[str] + setup_py_path: str, + global_options: Sequence[str] = None, + no_user_config: bool = False, + unbuffered_output: bool = False, +) -> List[str]: """ Get setuptools command arguments with shim wrapped setup file invocation. @@ -46,12 +73,11 @@ def make_setuptools_shim_args( def make_setuptools_bdist_wheel_args( - setup_py_path, # type: str - global_options, # type: Sequence[str] - build_options, # type: Sequence[str] - destination_dir, # type: str -): - # type: (...) -> List[str] + setup_py_path: str, + global_options: Sequence[str], + build_options: Sequence[str], + destination_dir: str, +) -> List[str]: # NOTE: Eventually, we'd want to also -S to the flags here, when we're # isolating. Currently, it breaks Python in virtualenvs, because it # relies on site.py to find parts of the standard library outside the @@ -65,10 +91,9 @@ def make_setuptools_bdist_wheel_args( def make_setuptools_clean_args( - setup_py_path, # type: str - global_options, # type: Sequence[str] -): - # type: (...) -> List[str] + setup_py_path: str, + global_options: Sequence[str], +) -> List[str]: args = make_setuptools_shim_args( setup_py_path, global_options=global_options, unbuffered_output=True ) @@ -77,15 +102,14 @@ def make_setuptools_clean_args( def make_setuptools_develop_args( - setup_py_path, # type: str - global_options, # type: Sequence[str] - install_options, # type: Sequence[str] - no_user_config, # type: bool - prefix, # type: Optional[str] - home, # type: Optional[str] - use_user_site, # type: bool -): - # type: (...) -> List[str] + setup_py_path: str, + global_options: Sequence[str], + install_options: Sequence[str], + no_user_config: bool, + prefix: Optional[str], + home: Optional[str], + use_user_site: bool, +) -> List[str]: assert not (use_user_site and prefix) args = make_setuptools_shim_args( @@ -110,11 +134,10 @@ def make_setuptools_develop_args( def make_setuptools_egg_info_args( - setup_py_path, # type: str - egg_info_dir, # type: Optional[str] - no_user_config, # type: bool -): - # type: (...) -> List[str] + setup_py_path: str, + egg_info_dir: Optional[str], + no_user_config: bool, +) -> List[str]: args = make_setuptools_shim_args(setup_py_path, no_user_config=no_user_config) args += ["egg_info"] @@ -126,19 +149,18 @@ def make_setuptools_egg_info_args( def make_setuptools_install_args( - setup_py_path, # type: str - global_options, # type: Sequence[str] - install_options, # type: Sequence[str] - record_filename, # type: str - root, # type: Optional[str] - prefix, # type: Optional[str] - header_dir, # type: Optional[str] - home, # type: Optional[str] - use_user_site, # type: bool - no_user_config, # type: bool - pycompile, # type: bool -): - # type: (...) -> List[str] + setup_py_path: str, + global_options: Sequence[str], + install_options: Sequence[str], + record_filename: str, + root: Optional[str], + prefix: Optional[str], + header_dir: Optional[str], + home: Optional[str], + use_user_site: bool, + no_user_config: bool, + pycompile: bool, +) -> List[str]: assert not (use_user_site and prefix) assert not (use_user_site and root) diff --git a/pipenv/patched/notpip/_internal/utils/subprocess.py b/pipenv/patched/notpip/_internal/utils/subprocess.py index c23aee0627..16cff7979e 100644 --- a/pipenv/patched/notpip/_internal/utils/subprocess.py +++ b/pipenv/patched/notpip/_internal/utils/subprocess.py @@ -2,25 +2,38 @@ import os import shlex import subprocess -from typing import Any, Callable, Iterable, List, Mapping, Optional, Union +from typing import ( + TYPE_CHECKING, + Any, + Callable, + Iterable, + List, + Mapping, + Optional, + Union, +) + +from pipenv.patched.notpip._vendor.rich.markup import escape from pipenv.patched.notpip._internal.cli.spinners import SpinnerInterface, open_spinner from pipenv.patched.notpip._internal.exceptions import InstallationSubprocessError from pipenv.patched.notpip._internal.utils.logging import VERBOSE, subprocess_logger from pipenv.patched.notpip._internal.utils.misc import HiddenText -CommandArgs = List[Union[str, HiddenText]] - +if TYPE_CHECKING: + # Literal was introduced in Python 3.8. + # + # TODO: Remove `if TYPE_CHECKING` when dropping support for Python 3.7. + from typing import Literal -LOG_DIVIDER = "----------------------------------------" +CommandArgs = List[Union[str, HiddenText]] -def make_command(*args): - # type: (Union[str, HiddenText, CommandArgs]) -> CommandArgs +def make_command(*args: Union[str, HiddenText, CommandArgs]) -> CommandArgs: """ Create a CommandArgs object. """ - command_args = [] # type: CommandArgs + command_args: CommandArgs = [] for arg in args: # Check for list instead of CommandArgs since CommandArgs is # only known during type-checking. @@ -33,8 +46,7 @@ def make_command(*args): return command_args -def format_command_args(args): - # type: (Union[List[str], CommandArgs]) -> str +def format_command_args(args: Union[List[str], CommandArgs]) -> str: """ Format command arguments for display. """ @@ -49,64 +61,27 @@ def format_command_args(args): ) -def reveal_command_args(args): - # type: (Union[List[str], CommandArgs]) -> List[str] +def reveal_command_args(args: Union[List[str], CommandArgs]) -> List[str]: """ Return the arguments in their raw, unredacted form. """ return [arg.secret if isinstance(arg, HiddenText) else arg for arg in args] -def make_subprocess_output_error( - cmd_args, # type: Union[List[str], CommandArgs] - cwd, # type: Optional[str] - lines, # type: List[str] - exit_status, # type: int -): - # type: (...) -> str - """ - Create and return the error message to use to log a subprocess error - with command output. - - :param lines: A list of lines, each ending with a newline. - """ - command = format_command_args(cmd_args) - - # We know the joined output value ends in a newline. - output = "".join(lines) - msg = ( - # Use a unicode string to avoid "UnicodeEncodeError: 'ascii' - # codec can't encode character ..." in Python 2 when a format - # argument (e.g. `output`) has a non-ascii character. - "Command errored out with exit status {exit_status}:\n" - " command: {command_display}\n" - " cwd: {cwd_display}\n" - "Complete output ({line_count} lines):\n{output}{divider}" - ).format( - exit_status=exit_status, - command_display=command, - cwd_display=cwd, - line_count=len(lines), - output=output, - divider=LOG_DIVIDER, - ) - return msg - - def call_subprocess( - cmd, # type: Union[List[str], CommandArgs] - show_stdout=False, # type: bool - cwd=None, # type: Optional[str] - on_returncode="raise", # type: str - extra_ok_returncodes=None, # type: Optional[Iterable[int]] - command_desc=None, # type: Optional[str] - extra_environ=None, # type: Optional[Mapping[str, Any]] - unset_environ=None, # type: Optional[Iterable[str]] - spinner=None, # type: Optional[SpinnerInterface] - log_failed_cmd=True, # type: Optional[bool] - stdout_only=False, # type: Optional[bool] -): - # type: (...) -> str + cmd: Union[List[str], CommandArgs], + show_stdout: bool = False, + cwd: Optional[str] = None, + on_returncode: 'Literal["raise", "warn", "ignore"]' = "raise", + extra_ok_returncodes: Optional[Iterable[int]] = None, + extra_environ: Optional[Mapping[str, Any]] = None, + unset_environ: Optional[Iterable[str]] = None, + spinner: Optional[SpinnerInterface] = None, + log_failed_cmd: Optional[bool] = True, + stdout_only: Optional[bool] = False, + *, + command_desc: str, +) -> str: """ Args: show_stdout: if true, use INFO to log the subprocess's stderr and @@ -156,9 +131,6 @@ def call_subprocess( # and we have a spinner. use_spinner = not showing_subprocess and spinner is not None - if command_desc is None: - command_desc = format_command_args(cmd) - log_subprocess("Running command %s", command_desc) env = os.environ.copy() if extra_environ: @@ -191,7 +163,7 @@ def call_subprocess( proc.stdin.close() # In this mode, stdout and stderr are in the same pipe. while True: - line = proc.stdout.readline() # type: str + line: str = proc.stdout.readline() if not line: break line = line.rstrip() @@ -231,17 +203,25 @@ def call_subprocess( spinner.finish("done") if proc_had_error: if on_returncode == "raise": - if not showing_subprocess and log_failed_cmd: - # Then the subprocess streams haven't been logged to the - # console yet. - msg = make_subprocess_output_error( - cmd_args=cmd, - cwd=cwd, - lines=all_output, - exit_status=proc.returncode, + error = InstallationSubprocessError( + command_description=command_desc, + exit_code=proc.returncode, + output_lines=all_output if not showing_subprocess else None, + ) + if log_failed_cmd: + subprocess_logger.error("[present-diagnostic] %s", error) + subprocess_logger.verbose( + "[bold magenta]full command[/]: [blue]%s[/]", + escape(format_command_args(cmd)), + extra={"markup": True}, ) - subprocess_logger.error(msg) - raise InstallationSubprocessError(proc.returncode, command_desc) + subprocess_logger.verbose( + "[bold magenta]cwd[/]: %s", + escape(cwd or "[inherit]"), + extra={"markup": True}, + ) + + raise error elif on_returncode == "warn": subprocess_logger.warning( 'Command "%s" had error code %s in %s', @@ -256,8 +236,7 @@ def call_subprocess( return output -def runner_with_spinner_message(message): - # type: (str) -> Callable[..., None] +def runner_with_spinner_message(message: str) -> Callable[..., None]: """Provide a subprocess_runner that shows a spinner message. Intended for use with for pep517's Pep517HookCaller. Thus, the runner has @@ -265,14 +244,14 @@ def runner_with_spinner_message(message): """ def runner( - cmd, # type: List[str] - cwd=None, # type: Optional[str] - extra_environ=None, # type: Optional[Mapping[str, Any]] - ): - # type: (...) -> None + cmd: List[str], + cwd: Optional[str] = None, + extra_environ: Optional[Mapping[str, Any]] = None, + ) -> None: with open_spinner(message) as spinner: call_subprocess( cmd, + command_desc=message, cwd=cwd, extra_environ=extra_environ, spinner=spinner, diff --git a/pipenv/patched/notpip/_internal/utils/temp_dir.py b/pipenv/patched/notpip/_internal/utils/temp_dir.py index 1bc0c14e89..cf26edec91 100644 --- a/pipenv/patched/notpip/_internal/utils/temp_dir.py +++ b/pipenv/patched/notpip/_internal/utils/temp_dir.py @@ -22,12 +22,11 @@ ) -_tempdir_manager = None # type: Optional[ExitStack] +_tempdir_manager: Optional[ExitStack] = None @contextmanager -def global_tempdir_manager(): - # type: () -> Iterator[None] +def global_tempdir_manager() -> Iterator[None]: global _tempdir_manager with ExitStack() as stack: old_tempdir_manager, _tempdir_manager = _tempdir_manager, stack @@ -40,31 +39,27 @@ def global_tempdir_manager(): class TempDirectoryTypeRegistry: """Manages temp directory behavior""" - def __init__(self): - # type: () -> None - self._should_delete = {} # type: Dict[str, bool] + def __init__(self) -> None: + self._should_delete: Dict[str, bool] = {} - def set_delete(self, kind, value): - # type: (str, bool) -> None + def set_delete(self, kind: str, value: bool) -> None: """Indicate whether a TempDirectory of the given kind should be auto-deleted. """ self._should_delete[kind] = value - def get_delete(self, kind): - # type: (str) -> bool + def get_delete(self, kind: str) -> bool: """Get configured auto-delete flag for a given TempDirectory type, default True. """ return self._should_delete.get(kind, True) -_tempdir_registry = None # type: Optional[TempDirectoryTypeRegistry] +_tempdir_registry: Optional[TempDirectoryTypeRegistry] = None @contextmanager -def tempdir_registry(): - # type: () -> Iterator[TempDirectoryTypeRegistry] +def tempdir_registry() -> Iterator[TempDirectoryTypeRegistry]: """Provides a scoped global tempdir registry that can be used to dictate whether directories should be deleted. """ @@ -107,10 +102,10 @@ class TempDirectory: def __init__( self, - path=None, # type: Optional[str] - delete=_default, # type: Union[bool, None, _Default] - kind="temp", # type: str - globally_managed=False, # type: bool + path: Optional[str] = None, + delete: Union[bool, None, _Default] = _default, + kind: str = "temp", + globally_managed: bool = False, ): super().__init__() @@ -139,21 +134,17 @@ def __init__( _tempdir_manager.enter_context(self) @property - def path(self): - # type: () -> str + def path(self) -> str: assert not self._deleted, f"Attempted to access deleted path: {self._path}" return self._path - def __repr__(self): - # type: () -> str + def __repr__(self) -> str: return f"<{self.__class__.__name__} {self.path!r}>" - def __enter__(self): - # type: (_T) -> _T + def __enter__(self: _T) -> _T: return self - def __exit__(self, exc, value, tb): - # type: (Any, Any, Any) -> None + def __exit__(self, exc: Any, value: Any, tb: Any) -> None: if self.delete is not None: delete = self.delete elif _tempdir_registry: @@ -164,8 +155,7 @@ def __exit__(self, exc, value, tb): if delete: self.cleanup() - def _create(self, kind): - # type: (str) -> str + def _create(self, kind: str) -> str: """Create a temporary directory and store its path in self.path""" # We realpath here because some systems have their default tmpdir # symlinked to another directory. This tends to confuse build @@ -175,8 +165,7 @@ def _create(self, kind): logger.debug("Created temporary directory: %s", path) return path - def cleanup(self): - # type: () -> None + def cleanup(self) -> None: """Remove the temporary directory created and reset state""" self._deleted = True if not os.path.exists(self._path): @@ -206,14 +195,12 @@ class AdjacentTempDirectory(TempDirectory): # with leading '-' and invalid metadata LEADING_CHARS = "-~.=%0123456789" - def __init__(self, original, delete=None): - # type: (str, Optional[bool]) -> None + def __init__(self, original: str, delete: Optional[bool] = None) -> None: self.original = original.rstrip("/\\") super().__init__(delete=delete) @classmethod - def _generate_names(cls, name): - # type: (str) -> Iterator[str] + def _generate_names(cls, name: str) -> Iterator[str]: """Generates a series of temporary names. The algorithm replaces the leading characters in the name @@ -238,8 +225,7 @@ def _generate_names(cls, name): if new_name != name: yield new_name - def _create(self, kind): - # type: (str) -> str + def _create(self, kind: str) -> str: root, name = os.path.split(self.original) for candidate in self._generate_names(name): path = os.path.join(root, candidate) diff --git a/pipenv/patched/notpip/_internal/utils/unpacking.py b/pipenv/patched/notpip/_internal/utils/unpacking.py index aea8900a4f..0a9b5f4500 100644 --- a/pipenv/patched/notpip/_internal/utils/unpacking.py +++ b/pipenv/patched/notpip/_internal/utils/unpacking.py @@ -40,16 +40,14 @@ logger.debug("lzma module is not available") -def current_umask(): - # type: () -> int +def current_umask() -> int: """Get the current umask which involves having to set it temporarily.""" mask = os.umask(0) os.umask(mask) return mask -def split_leading_dir(path): - # type: (str) -> List[str] +def split_leading_dir(path: str) -> List[str]: path = path.lstrip("/").lstrip("\\") if "/" in path and ( ("\\" in path and path.find("/") < path.find("\\")) or "\\" not in path @@ -61,8 +59,7 @@ def split_leading_dir(path): return [path, ""] -def has_leading_dir(paths): - # type: (Iterable[str]) -> bool +def has_leading_dir(paths: Iterable[str]) -> bool: """Returns true if all the paths have the same leading path name (i.e., everything is in one subdirectory in an archive)""" common_prefix = None @@ -77,8 +74,7 @@ def has_leading_dir(paths): return True -def is_within_directory(directory, target): - # type: (str, str) -> bool +def is_within_directory(directory: str, target: str) -> bool: """ Return true if the absolute path of target is within the directory """ @@ -89,8 +85,7 @@ def is_within_directory(directory, target): return prefix == abs_directory -def set_extracted_file_to_default_mode_plus_executable(path): - # type: (str) -> None +def set_extracted_file_to_default_mode_plus_executable(path: str) -> None: """ Make file present at path have execute for user/group/world (chmod +x) is no-op on windows per python docs @@ -98,16 +93,14 @@ def set_extracted_file_to_default_mode_plus_executable(path): os.chmod(path, (0o777 & ~current_umask() | 0o111)) -def zip_item_is_executable(info): - # type: (ZipInfo) -> bool +def zip_item_is_executable(info: ZipInfo) -> bool: mode = info.external_attr >> 16 # if mode and regular file and any execute permissions for # user/group/world? return bool(mode and stat.S_ISREG(mode) and mode & 0o111) -def unzip_file(filename, location, flatten=True): - # type: (str, str, bool) -> None +def unzip_file(filename: str, location: str, flatten: bool = True) -> None: """ Unzip the file (with path `filename`) to the destination `location`. All files are written based on system defaults and umask (i.e. permissions are @@ -153,8 +146,7 @@ def unzip_file(filename, location, flatten=True): zipfp.close() -def untar_file(filename, location): - # type: (str, str) -> None +def untar_file(filename: str, location: str) -> None: """ Untar the file (with path `filename`) to the destination `location`. All files are written based on system defaults and umask (i.e. permissions @@ -236,11 +228,10 @@ def untar_file(filename, location): def unpack_file( - filename, # type: str - location, # type: str - content_type=None, # type: Optional[str] -): - # type: (...) -> None + filename: str, + location: str, + content_type: Optional[str] = None, +) -> None: filename = os.path.realpath(filename) if ( content_type == "application/zip" diff --git a/pipenv/patched/notpip/_internal/utils/urls.py b/pipenv/patched/notpip/_internal/utils/urls.py index 7b51052c9a..6ba2e04f35 100644 --- a/pipenv/patched/notpip/_internal/utils/urls.py +++ b/pipenv/patched/notpip/_internal/utils/urls.py @@ -7,15 +7,13 @@ from .compat import WINDOWS -def get_url_scheme(url): - # type: (str) -> Optional[str] +def get_url_scheme(url: str) -> Optional[str]: if ":" not in url: return None return url.split(":", 1)[0].lower() -def path_to_url(path): - # type: (str) -> str +def path_to_url(path: str) -> str: """ Convert a path to a file: URL. The path will be made absolute and have quoted path parts. @@ -25,8 +23,7 @@ def path_to_url(path): return url -def url_to_path(url): - # type: (str) -> str +def url_to_path(url: str) -> str: """ Convert a file: URL to a path. """ diff --git a/pipenv/patched/notpip/_internal/utils/virtualenv.py b/pipenv/patched/notpip/_internal/utils/virtualenv.py index 51cacb55ca..c926db4c33 100644 --- a/pipenv/patched/notpip/_internal/utils/virtualenv.py +++ b/pipenv/patched/notpip/_internal/utils/virtualenv.py @@ -11,8 +11,7 @@ ) -def _running_under_venv(): - # type: () -> bool +def _running_under_venv() -> bool: """Checks if sys.base_prefix and sys.prefix match. This handles PEP 405 compliant virtual environments. @@ -20,8 +19,7 @@ def _running_under_venv(): return sys.prefix != getattr(sys, "base_prefix", sys.prefix) -def _running_under_regular_virtualenv(): - # type: () -> bool +def _running_under_regular_virtualenv() -> bool: """Checks if sys.real_prefix is set. This handles virtual environments created with pypa's virtualenv. @@ -30,14 +28,12 @@ def _running_under_regular_virtualenv(): return hasattr(sys, "real_prefix") -def running_under_virtualenv(): - # type: () -> bool +def running_under_virtualenv() -> bool: """Return True if we're running inside a virtualenv, False otherwise.""" return _running_under_venv() or _running_under_regular_virtualenv() -def _get_pyvenv_cfg_lines(): - # type: () -> Optional[List[str]] +def _get_pyvenv_cfg_lines() -> Optional[List[str]]: """Reads {sys.prefix}/pyvenv.cfg and returns its contents as list of lines Returns None, if it could not read/access the file. @@ -52,8 +48,7 @@ def _get_pyvenv_cfg_lines(): return None -def _no_global_under_venv(): - # type: () -> bool +def _no_global_under_venv() -> bool: """Check `{sys.prefix}/pyvenv.cfg` for system site-packages inclusion PEP 405 specifies that when system site-packages are not supposed to be @@ -82,8 +77,7 @@ def _no_global_under_venv(): return False -def _no_global_under_regular_virtualenv(): - # type: () -> bool +def _no_global_under_regular_virtualenv() -> bool: """Check if "no-global-site-packages.txt" exists beside site.py This mirrors logic in pypa/virtualenv for determining whether system @@ -97,8 +91,7 @@ def _no_global_under_regular_virtualenv(): return os.path.exists(no_global_site_packages_file) -def virtualenv_no_global(): - # type: () -> bool +def virtualenv_no_global() -> bool: """Returns a boolean, whether running in venv with no system site-packages.""" # PEP 405 compliance needs to be checked first since virtualenv >=20 would # return True for both checks, but is only able to use the PEP 405 config. diff --git a/pipenv/patched/notpip/_internal/utils/wheel.py b/pipenv/patched/notpip/_internal/utils/wheel.py index a125b5ffd0..9e9627cd1e 100644 --- a/pipenv/patched/notpip/_internal/utils/wheel.py +++ b/pipenv/patched/notpip/_internal/utils/wheel.py @@ -4,14 +4,12 @@ import logging from email.message import Message from email.parser import Parser -from typing import Dict, Tuple +from typing import Tuple from zipfile import BadZipFile, ZipFile from pipenv.patched.notpip._vendor.packaging.utils import canonicalize_name -from pipenv.patched.notpip._vendor.pkg_resources import DistInfoDistribution, Distribution from pipenv.patched.notpip._internal.exceptions import UnsupportedWheel -from pipenv.patched.notpip._internal.utils.pkg_resources import DictMetadata VERSION_COMPATIBLE = (1, 0) @@ -19,53 +17,7 @@ logger = logging.getLogger(__name__) -class WheelMetadata(DictMetadata): - """Metadata provider that maps metadata decoding exceptions to our - internal exception type. - """ - - def __init__(self, metadata, wheel_name): - # type: (Dict[str, bytes], str) -> None - super().__init__(metadata) - self._wheel_name = wheel_name - - def get_metadata(self, name): - # type: (str) -> str - try: - return super().get_metadata(name) - except UnicodeDecodeError as e: - # Augment the default error with the origin of the file. - raise UnsupportedWheel( - f"Error decoding metadata for {self._wheel_name}: {e}" - ) - - -def pkg_resources_distribution_for_wheel(wheel_zip, name, location): - # type: (ZipFile, str, str) -> Distribution - """Get a pkg_resources distribution given a wheel. - - :raises UnsupportedWheel: on any errors - """ - info_dir, _ = parse_wheel(wheel_zip, name) - - metadata_files = [p for p in wheel_zip.namelist() if p.startswith(f"{info_dir}/")] - - metadata_text = {} # type: Dict[str, bytes] - for path in metadata_files: - _, metadata_name = path.split("/", 1) - - try: - metadata_text[metadata_name] = read_wheel_metadata_file(wheel_zip, path) - except UnsupportedWheel as e: - raise UnsupportedWheel("{} has an invalid wheel, {}".format(name, str(e))) - - metadata = WheelMetadata(metadata_text, location) - - return DistInfoDistribution(location=location, metadata=metadata, project_name=name) - - -def parse_wheel(wheel_zip, name): - # type: (ZipFile, str) -> Tuple[str, Message] +def parse_wheel(wheel_zip: ZipFile, name: str) -> Tuple[str, Message]: """Extract information from the provided wheel, ensuring it meets basic standards. @@ -83,8 +35,7 @@ def parse_wheel(wheel_zip, name): return info_dir, metadata -def wheel_dist_info_dir(source, name): - # type: (ZipFile, str) -> str +def wheel_dist_info_dir(source: ZipFile, name: str) -> str: """Returns the name of the contained .dist-info directory. Raises AssertionError or UnsupportedWheel if not found, >1 found, or @@ -117,8 +68,7 @@ def wheel_dist_info_dir(source, name): return info_dir -def read_wheel_metadata_file(source, path): - # type: (ZipFile, str) -> bytes +def read_wheel_metadata_file(source: ZipFile, path: str) -> bytes: try: return source.read(path) # BadZipFile for general corruption, KeyError for missing entry, @@ -127,8 +77,7 @@ def read_wheel_metadata_file(source, path): raise UnsupportedWheel(f"could not read {path!r} file: {e!r}") -def wheel_metadata(source, dist_info_dir): - # type: (ZipFile, str) -> Message +def wheel_metadata(source: ZipFile, dist_info_dir: str) -> Message: """Return the WHEEL metadata of an extracted wheel, if possible. Otherwise, raise UnsupportedWheel. """ @@ -147,8 +96,7 @@ def wheel_metadata(source, dist_info_dir): return Parser().parsestr(wheel_text) -def wheel_version(wheel_data): - # type: (Message) -> Tuple[int, ...] +def wheel_version(wheel_data: Message) -> Tuple[int, ...]: """Given WHEEL metadata, return the parsed Wheel-Version. Otherwise, raise UnsupportedWheel. """ @@ -164,8 +112,7 @@ def wheel_version(wheel_data): raise UnsupportedWheel(f"invalid Wheel-Version: {version!r}") -def check_compatibility(version, name): - # type: (Tuple[int, ...], str) -> None +def check_compatibility(version: Tuple[int, ...], name: str) -> None: """Raises errors or warns if called with an incompatible Wheel-Version. pip should refuse to install a Wheel-Version that's a major series diff --git a/pipenv/patched/notpip/_internal/vcs/bazaar.py b/pipenv/patched/notpip/_internal/vcs/bazaar.py index 0913297c64..b33d967f22 100644 --- a/pipenv/patched/notpip/_internal/vcs/bazaar.py +++ b/pipenv/patched/notpip/_internal/vcs/bazaar.py @@ -16,61 +16,65 @@ class Bazaar(VersionControl): - name = 'bzr' - dirname = '.bzr' - repo_name = 'branch' + name = "bzr" + dirname = ".bzr" + repo_name = "branch" schemes = ( - 'bzr+http', 'bzr+https', 'bzr+ssh', 'bzr+sftp', 'bzr+ftp', - 'bzr+lp', 'bzr+file' + "bzr+http", + "bzr+https", + "bzr+ssh", + "bzr+sftp", + "bzr+ftp", + "bzr+lp", + "bzr+file", ) @staticmethod - def get_base_rev_args(rev): - # type: (str) -> List[str] - return ['-r', rev] + def get_base_rev_args(rev: str) -> List[str]: + return ["-r", rev] - def fetch_new(self, dest, url, rev_options): - # type: (str, HiddenText, RevOptions) -> None + def fetch_new( + self, dest: str, url: HiddenText, rev_options: RevOptions, verbosity: int + ) -> None: rev_display = rev_options.to_display() logger.info( - 'Checking out %s%s to %s', + "Checking out %s%s to %s", url, rev_display, display_path(dest), ) - cmd_args = ( - make_command('branch', '-q', rev_options.to_args(), url, dest) - ) + if verbosity <= 0: + flag = "--quiet" + elif verbosity == 1: + flag = "" + else: + flag = f"-{'v'*verbosity}" + cmd_args = make_command("branch", flag, rev_options.to_args(), url, dest) self.run_command(cmd_args) - def switch(self, dest, url, rev_options): - # type: (str, HiddenText, RevOptions) -> None - self.run_command(make_command('switch', url), cwd=dest) + def switch(self, dest: str, url: HiddenText, rev_options: RevOptions) -> None: + self.run_command(make_command("switch", url), cwd=dest) - def update(self, dest, url, rev_options): - # type: (str, HiddenText, RevOptions) -> None - cmd_args = make_command('pull', '-q', rev_options.to_args()) + def update(self, dest: str, url: HiddenText, rev_options: RevOptions) -> None: + cmd_args = make_command("pull", "-q", rev_options.to_args()) self.run_command(cmd_args, cwd=dest) @classmethod - def get_url_rev_and_auth(cls, url): - # type: (str) -> Tuple[str, Optional[str], AuthInfo] + def get_url_rev_and_auth(cls, url: str) -> Tuple[str, Optional[str], AuthInfo]: # hotfix the URL scheme after removing bzr+ from bzr+ssh:// readd it url, rev, user_pass = super().get_url_rev_and_auth(url) - if url.startswith('ssh://'): - url = 'bzr+' + url + if url.startswith("ssh://"): + url = "bzr+" + url return url, rev, user_pass @classmethod - def get_remote_url(cls, location): - # type: (str) -> str + def get_remote_url(cls, location: str) -> str: urls = cls.run_command( - ['info'], show_stdout=False, stdout_only=True, cwd=location + ["info"], show_stdout=False, stdout_only=True, cwd=location ) for line in urls.splitlines(): line = line.strip() - for x in ('checkout of branch: ', - 'parent branch: '): + for x in ("checkout of branch: ", "parent branch: "): if line.startswith(x): repo = line.split(x)[1] if cls._is_local_repository(repo): @@ -79,16 +83,17 @@ def get_remote_url(cls, location): raise RemoteNotFoundError @classmethod - def get_revision(cls, location): - # type: (str) -> str + def get_revision(cls, location: str) -> str: revision = cls.run_command( - ['revno'], show_stdout=False, stdout_only=True, cwd=location, + ["revno"], + show_stdout=False, + stdout_only=True, + cwd=location, ) return revision.splitlines()[-1] @classmethod - def is_commit_id_equal(cls, dest, name): - # type: (str, Optional[str]) -> bool + def is_commit_id_equal(cls, dest: str, name: Optional[str]) -> bool: """Always assume the versions don't match""" return False diff --git a/pipenv/patched/notpip/_internal/vcs/git.py b/pipenv/patched/notpip/_internal/vcs/git.py index 37b0e787a5..c8ba695ca8 100644 --- a/pipenv/patched/notpip/_internal/vcs/git.py +++ b/pipenv/patched/notpip/_internal/vcs/git.py @@ -34,10 +34,11 @@ r".*$" # Suffix, including any pre- and post-release segments we don't care about. ) -HASH_REGEX = re.compile('^[a-fA-F0-9]{40}$') +HASH_REGEX = re.compile("^[a-fA-F0-9]{40}$") # SCP (Secure copy protocol) shorthand. e.g. 'git@example.com:foo/bar.git' -SCP_REGEX = re.compile(r"""^ +SCP_REGEX = re.compile( + r"""^ # Optional user, e.g. 'git@' (\w+@)? # Server, e.g. 'github.com'. @@ -46,33 +47,36 @@ # alphanumeric character so as not to be confusable with a Windows paths # like 'C:/foo/bar' or 'C:\foo\bar'. (\w[^:]*) -$""", re.VERBOSE) + $""", + re.VERBOSE, +) -def looks_like_hash(sha): - # type: (str) -> bool +def looks_like_hash(sha: str) -> bool: return bool(HASH_REGEX.match(sha)) class Git(VersionControl): - name = 'git' - dirname = '.git' - repo_name = 'clone' + name = "git" + dirname = ".git" + repo_name = "clone" schemes = ( - 'git+http', 'git+https', 'git+ssh', 'git+git', 'git+file', + "git+http", + "git+https", + "git+ssh", + "git+git", + "git+file", ) # Prevent the user's environment variables from interfering with pip: # https://github.com/pypa/pip/issues/1130 - unset_environ = ('GIT_DIR', 'GIT_WORK_TREE') - default_arg_rev = 'HEAD' + unset_environ = ("GIT_DIR", "GIT_WORK_TREE") + default_arg_rev = "HEAD" @staticmethod - def get_base_rev_args(rev): - # type: (str) -> List[str] + def get_base_rev_args(rev: str) -> List[str]: return [rev] - def is_immutable_rev_checkout(self, url, dest): - # type: (str, str) -> bool + def is_immutable_rev_checkout(self, url: str, dest: str) -> bool: _, rev_options = self.get_url_rev_options(hide_url(url)) if not rev_options.rev: return False @@ -83,23 +87,24 @@ def is_immutable_rev_checkout(self, url, dest): # return False in the rare case rev is both a commit hash # and a tag or a branch; we don't want to cache in that case # because that branch/tag could point to something else in the future - is_tag_or_branch = bool( - self.get_revision_sha(dest, rev_options.rev)[0] - ) + is_tag_or_branch = bool(self.get_revision_sha(dest, rev_options.rev)[0]) return not is_tag_or_branch def get_git_version(self) -> Tuple[int, ...]: version = self.run_command( - ['version'], show_stdout=False, stdout_only=True + ["version"], + command_desc="git version", + show_stdout=False, + stdout_only=True, ) match = GIT_VERSION_REGEX.match(version) if not match: + logger.warning("Can't parse git version: %s", version) return () return tuple(int(c) for c in match.groups()) @classmethod - def get_current_branch(cls, location): - # type: (str) -> Optional[str] + def get_current_branch(cls, location: str) -> Optional[str]: """ Return the current branch, or None if HEAD isn't at a branch (e.g. detached HEAD). @@ -108,24 +113,23 @@ def get_current_branch(cls, location): # HEAD rather than a symbolic ref. In addition, the -q causes the # command to exit with status code 1 instead of 128 in this case # and to suppress the message to stderr. - args = ['symbolic-ref', '-q', 'HEAD'] + args = ["symbolic-ref", "-q", "HEAD"] output = cls.run_command( args, - extra_ok_returncodes=(1, ), + extra_ok_returncodes=(1,), show_stdout=False, stdout_only=True, cwd=location, ) ref = output.strip() - if ref.startswith('refs/heads/'): - return ref[len('refs/heads/'):] + if ref.startswith("refs/heads/"): + return ref[len("refs/heads/") :] return None @classmethod - def get_revision_sha(cls, dest, rev): - # type: (str, str) -> Tuple[Optional[str], bool] + def get_revision_sha(cls, dest: str, rev: str) -> Tuple[Optional[str], bool]: """ Return (sha_or_none, is_branch), where sha_or_none is a commit hash if the revision names a remote branch or tag, otherwise None. @@ -136,11 +140,11 @@ def get_revision_sha(cls, dest, rev): """ # Pass rev to pre-filter the list. output = cls.run_command( - ['show-ref', rev], + ["show-ref", rev], cwd=dest, show_stdout=False, stdout_only=True, - on_returncode='ignore', + on_returncode="ignore", ) refs = {} # NOTE: We do not use splitlines here since that would split on other @@ -155,12 +159,12 @@ def get_revision_sha(cls, dest, rev): except ValueError: # Include the offending line to simplify troubleshooting if # this error ever occurs. - raise ValueError(f'unexpected show-ref line: {line!r}') + raise ValueError(f"unexpected show-ref line: {line!r}") refs[ref_name] = ref_sha - branch_ref = f'refs/remotes/origin/{rev}' - tag_ref = f'refs/tags/{rev}' + branch_ref = f"refs/remotes/origin/{rev}" + tag_ref = f"refs/tags/{rev}" sha = refs.get(branch_ref) if sha is not None: @@ -171,8 +175,7 @@ def get_revision_sha(cls, dest, rev): return (sha, False) @classmethod - def _should_fetch(cls, dest, rev): - # type: (str, str) -> bool + def _should_fetch(cls, dest: str, rev: str) -> bool: """ Return true if rev is a ref or is a commit that we don't have locally. @@ -195,8 +198,9 @@ def _should_fetch(cls, dest, rev): return True @classmethod - def resolve_revision(cls, dest, url, rev_options): - # type: (str, HiddenText, RevOptions) -> RevOptions + def resolve_revision( + cls, dest: str, url: HiddenText, rev_options: RevOptions + ) -> RevOptions: """ Resolve a revision to a new RevOptions object with the SHA1 of the branch, tag, or ref if found. @@ -230,18 +234,17 @@ def resolve_revision(cls, dest, url, rev_options): # fetch the requested revision cls.run_command( - make_command('fetch', '-q', url, rev_options.to_args()), + make_command("fetch", "-q", url, rev_options.to_args()), cwd=dest, ) # Change the revision to the SHA of the ref we fetched - sha = cls.get_revision(dest, rev='FETCH_HEAD') + sha = cls.get_revision(dest, rev="FETCH_HEAD") rev_options = rev_options.make_new(sha) return rev_options @classmethod - def is_commit_id_equal(cls, dest, name): - # type: (str, Optional[str]) -> bool + def is_commit_id_equal(cls, dest: str, name: Optional[str]) -> bool: """ Return whether the current commit hash equals the given name. @@ -255,30 +258,58 @@ def is_commit_id_equal(cls, dest, name): return cls.get_revision(dest) == name - def fetch_new(self, dest, url, rev_options): - # type: (str, HiddenText, RevOptions) -> None + def fetch_new( + self, dest: str, url: HiddenText, rev_options: RevOptions, verbosity: int + ) -> None: rev_display = rev_options.to_display() - logger.info('Cloning %s%s to %s', url, rev_display, display_path(dest)) - self.run_command(make_command('clone', '-q', url, dest)) + logger.info("Cloning %s%s to %s", url, rev_display, display_path(dest)) + if verbosity <= 0: + flags: Tuple[str, ...] = ("--quiet",) + elif verbosity == 1: + flags = () + else: + flags = ("--verbose", "--progress") + if self.get_git_version() >= (2, 17): + # Git added support for partial clone in 2.17 + # https://git-scm.com/docs/partial-clone + # Speeds up cloning by functioning without a complete copy of repository + self.run_command( + make_command( + "clone", + "--filter=blob:none", + *flags, + url, + dest, + ) + ) + else: + self.run_command(make_command("clone", *flags, url, dest)) if rev_options.rev: # Then a specific revision was requested. rev_options = self.resolve_revision(dest, url, rev_options) - branch_name = getattr(rev_options, 'branch_name', None) + branch_name = getattr(rev_options, "branch_name", None) + logger.debug("Rev options %s, branch_name %s", rev_options, branch_name) if branch_name is None: # Only do a checkout if the current commit id doesn't match # the requested revision. if not self.is_commit_id_equal(dest, rev_options.rev): cmd_args = make_command( - 'checkout', '-q', rev_options.to_args(), + "checkout", + "-q", + rev_options.to_args(), ) self.run_command(cmd_args, cwd=dest) elif self.get_current_branch(dest) != branch_name: # Then a specific branch was requested, and that branch # is not yet checked out. - track_branch = f'origin/{branch_name}' + track_branch = f"origin/{branch_name}" cmd_args = [ - 'checkout', '-b', branch_name, '--track', track_branch, + "checkout", + "-b", + branch_name, + "--track", + track_branch, ] self.run_command(cmd_args, cwd=dest) else: @@ -290,35 +321,32 @@ def fetch_new(self, dest, url, rev_options): #: repo may contain submodules self.update_submodules(dest) - def switch(self, dest, url, rev_options): - # type: (str, HiddenText, RevOptions) -> None + def switch(self, dest: str, url: HiddenText, rev_options: RevOptions) -> None: self.run_command( - make_command('config', 'remote.origin.url', url), + make_command("config", "remote.origin.url", url), cwd=dest, ) - cmd_args = make_command('checkout', '-q', rev_options.to_args()) + cmd_args = make_command("checkout", "-q", rev_options.to_args()) self.run_command(cmd_args, cwd=dest) self.update_submodules(dest) - def update(self, dest, url, rev_options): - # type: (str, HiddenText, RevOptions) -> None + def update(self, dest: str, url: HiddenText, rev_options: RevOptions) -> None: # First fetch changes from the default remote if self.get_git_version() >= (1, 9): # fetch tags in addition to everything else - self.run_command(['fetch', '-q', '--tags'], cwd=dest) + self.run_command(["fetch", "-q", "--tags"], cwd=dest) else: - self.run_command(['fetch', '-q'], cwd=dest) + self.run_command(["fetch", "-q"], cwd=dest) # Then reset to wanted revision (maybe even origin/master) rev_options = self.resolve_revision(dest, url, rev_options) - cmd_args = make_command('reset', '--hard', '-q', rev_options.to_args()) + cmd_args = make_command("reset", "--hard", "-q", rev_options.to_args()) self.run_command(cmd_args, cwd=dest) #: update submodules self.update_submodules(dest) @classmethod - def get_remote_url(cls, location): - # type: (str) -> str + def get_remote_url(cls, location: str) -> str: """ Return URL of the first remote encountered. @@ -328,8 +356,8 @@ def get_remote_url(cls, location): # We need to pass 1 for extra_ok_returncodes since the command # exits with return code 1 if there are no matching lines. stdout = cls.run_command( - ['config', '--get-regexp', r'remote\..*\.url'], - extra_ok_returncodes=(1, ), + ["config", "--get-regexp", r"remote\..*\.url"], + extra_ok_returncodes=(1,), show_stdout=False, stdout_only=True, cwd=location, @@ -341,15 +369,14 @@ def get_remote_url(cls, location): raise RemoteNotFoundError for remote in remotes: - if remote.startswith('remote.origin.url '): + if remote.startswith("remote.origin.url "): found_remote = remote break - url = found_remote.split(' ')[1] + url = found_remote.split(" ")[1] return cls._git_remote_to_pip_url(url.strip()) @staticmethod - def _git_remote_to_pip_url(url): - # type: (str) -> str + def _git_remote_to_pip_url(url: str) -> str: """ Convert a remote url from what git uses to what pip accepts. @@ -380,14 +407,13 @@ def _git_remote_to_pip_url(url): raise RemoteNotValidError(url) @classmethod - def has_commit(cls, location, rev): - # type: (str, str) -> bool + def has_commit(cls, location: str, rev: str) -> bool: """ Check if rev is a commit that is available in the local repository. """ try: cls.run_command( - ['rev-parse', '-q', '--verify', "sha^" + rev], + ["rev-parse", "-q", "--verify", "sha^" + rev], cwd=location, log_failed_cmd=False, ) @@ -397,12 +423,11 @@ def has_commit(cls, location, rev): return True @classmethod - def get_revision(cls, location, rev=None): - # type: (str, Optional[str]) -> str + def get_revision(cls, location: str, rev: Optional[str] = None) -> str: if rev is None: - rev = 'HEAD' + rev = "HEAD" current_rev = cls.run_command( - ['rev-parse', rev], + ["rev-parse", rev], show_stdout=False, stdout_only=True, cwd=location, @@ -410,27 +435,25 @@ def get_revision(cls, location, rev=None): return current_rev.strip() @classmethod - def get_subdirectory(cls, location): - # type: (str) -> Optional[str] + def get_subdirectory(cls, location: str) -> Optional[str]: """ Return the path to Python project root, relative to the repo root. Return None if the project root is in the repo root. """ # find the repo root git_dir = cls.run_command( - ['rev-parse', '--git-dir'], + ["rev-parse", "--git-dir"], show_stdout=False, stdout_only=True, cwd=location, ).strip() if not os.path.isabs(git_dir): git_dir = os.path.join(location, git_dir) - repo_root = os.path.abspath(os.path.join(git_dir, '..')) + repo_root = os.path.abspath(os.path.join(git_dir, "..")) return find_path_to_project_root_from_repo_root(location, repo_root) @classmethod - def get_url_rev_and_auth(cls, url): - # type: (str) -> Tuple[str, Optional[str], AuthInfo] + def get_url_rev_and_auth(cls, url: str) -> Tuple[str, Optional[str], AuthInfo]: """ Prefixes stub URLs like 'user@hostname:user/repo.git' with 'ssh://'. That's required because although they use SSH they sometimes don't @@ -440,66 +463,63 @@ def get_url_rev_and_auth(cls, url): # Works around an apparent Git bug # (see https://article.gmane.org/gmane.comp.version-control.git/146500) scheme, netloc, path, query, fragment = urlsplit(url) - if scheme.endswith('file'): - initial_slashes = path[:-len(path.lstrip('/'))] - newpath = ( - initial_slashes + - urllib.request.url2pathname(path) - .replace('\\', '/').lstrip('/') - ) - after_plus = scheme.find('+') + 1 + if scheme.endswith("file"): + initial_slashes = path[: -len(path.lstrip("/"))] + newpath = initial_slashes + urllib.request.url2pathname(path).replace( + "\\", "/" + ).lstrip("/") + after_plus = scheme.find("+") + 1 url = scheme[:after_plus] + urlunsplit( (scheme[after_plus:], netloc, newpath, query, fragment), ) - if '://' not in url: - assert 'file:' not in url - url = url.replace('git+', 'git+ssh://') + if "://" not in url: + assert "file:" not in url + url = url.replace("git+", "git+ssh://") url, rev, user_pass = super().get_url_rev_and_auth(url) - url = url.replace('ssh://', '') + url = url.replace("ssh://", "") else: url, rev, user_pass = super().get_url_rev_and_auth(url) return url, rev, user_pass @classmethod - def update_submodules(cls, location): - # type: (str) -> None - if not os.path.exists(os.path.join(location, '.gitmodules')): + def update_submodules(cls, location: str) -> None: + if not os.path.exists(os.path.join(location, ".gitmodules")): return cls.run_command( - ['submodule', 'update', '--init', '--recursive', '-q'], + ["submodule", "update", "--init", "--recursive", "-q"], cwd=location, ) @classmethod - def get_repository_root(cls, location): - # type: (str) -> Optional[str] + def get_repository_root(cls, location: str) -> Optional[str]: loc = super().get_repository_root(location) if loc: return loc try: r = cls.run_command( - ['rev-parse', '--show-toplevel'], + ["rev-parse", "--show-toplevel"], cwd=location, show_stdout=False, stdout_only=True, - on_returncode='raise', + on_returncode="raise", log_failed_cmd=False, ) except BadCommand: - logger.debug("could not determine if %s is under git control " - "because git is not available", location) + logger.debug( + "could not determine if %s is under git control " + "because git is not available", + location, + ) return None except InstallationError: return None - return os.path.normpath(r.rstrip('\r\n')) + return os.path.normpath(r.rstrip("\r\n")) @staticmethod - def should_add_vcs_url_prefix(repo_url): - # type: (str) -> bool - """In either https or ssh form, requirements must be prefixed with git+. - """ + def should_add_vcs_url_prefix(repo_url: str) -> bool: + """In either https or ssh form, requirements must be prefixed with git+.""" return True diff --git a/pipenv/patched/notpip/_internal/vcs/mercurial.py b/pipenv/patched/notpip/_internal/vcs/mercurial.py index f108780b60..cdc16ad173 100644 --- a/pipenv/patched/notpip/_internal/vcs/mercurial.py +++ b/pipenv/patched/notpip/_internal/vcs/mercurial.py @@ -1,7 +1,7 @@ import configparser import logging import os -from typing import List, Optional +from typing import List, Optional, Tuple from pipenv.patched.notpip._internal.exceptions import BadCommand, InstallationError from pipenv.patched.notpip._internal.utils.misc import HiddenText, display_path @@ -18,61 +18,68 @@ class Mercurial(VersionControl): - name = 'hg' - dirname = '.hg' - repo_name = 'clone' + name = "hg" + dirname = ".hg" + repo_name = "clone" schemes = ( - 'hg+file', 'hg+http', 'hg+https', 'hg+ssh', 'hg+static-http', + "hg+file", + "hg+http", + "hg+https", + "hg+ssh", + "hg+static-http", ) @staticmethod - def get_base_rev_args(rev): - # type: (str) -> List[str] + def get_base_rev_args(rev: str) -> List[str]: return [rev] - def fetch_new(self, dest, url, rev_options): - # type: (str, HiddenText, RevOptions) -> None + def fetch_new( + self, dest: str, url: HiddenText, rev_options: RevOptions, verbosity: int + ) -> None: rev_display = rev_options.to_display() logger.info( - 'Cloning hg %s%s to %s', + "Cloning hg %s%s to %s", url, rev_display, display_path(dest), ) - self.run_command(make_command('clone', '--noupdate', '-q', url, dest)) + if verbosity <= 0: + flags: Tuple[str, ...] = ("--quiet",) + elif verbosity == 1: + flags = () + elif verbosity == 2: + flags = ("--verbose",) + else: + flags = ("--verbose", "--debug") + self.run_command(make_command("clone", "--noupdate", *flags, url, dest)) self.run_command( - make_command('update', '-q', rev_options.to_args()), + make_command("update", *flags, rev_options.to_args()), cwd=dest, ) - def switch(self, dest, url, rev_options): - # type: (str, HiddenText, RevOptions) -> None - repo_config = os.path.join(dest, self.dirname, 'hgrc') + def switch(self, dest: str, url: HiddenText, rev_options: RevOptions) -> None: + repo_config = os.path.join(dest, self.dirname, "hgrc") config = configparser.RawConfigParser() try: config.read(repo_config) - config.set('paths', 'default', url.secret) - with open(repo_config, 'w') as config_file: + config.set("paths", "default", url.secret) + with open(repo_config, "w") as config_file: config.write(config_file) except (OSError, configparser.NoSectionError) as exc: - logger.warning( - 'Could not switch Mercurial repository to %s: %s', url, exc, - ) + logger.warning("Could not switch Mercurial repository to %s: %s", url, exc) else: - cmd_args = make_command('update', '-q', rev_options.to_args()) + cmd_args = make_command("update", "-q", rev_options.to_args()) self.run_command(cmd_args, cwd=dest) - def update(self, dest, url, rev_options): - # type: (str, HiddenText, RevOptions) -> None - self.run_command(['pull', '-q'], cwd=dest) - cmd_args = make_command('update', '-q', rev_options.to_args()) + def update(self, dest: str, url: HiddenText, rev_options: RevOptions) -> None: + self.run_command(["pull", "-q"], cwd=dest) + cmd_args = make_command("update", "-q", rev_options.to_args()) self.run_command(cmd_args, cwd=dest) @classmethod - def get_remote_url(cls, location): - # type: (str) -> str + def get_remote_url(cls, location: str) -> str: url = cls.run_command( - ['showconfig', 'paths.default'], + ["showconfig", "paths.default"], show_stdout=False, stdout_only=True, cwd=location, @@ -82,13 +89,12 @@ def get_remote_url(cls, location): return url.strip() @classmethod - def get_revision(cls, location): - # type: (str) -> str + def get_revision(cls, location: str) -> str: """ Return the repository-local changeset revision number, as an integer. """ current_revision = cls.run_command( - ['parents', '--template={rev}'], + ["parents", "--template={rev}"], show_stdout=False, stdout_only=True, cwd=location, @@ -96,14 +102,13 @@ def get_revision(cls, location): return current_revision @classmethod - def get_requirement_revision(cls, location): - # type: (str) -> str + def get_requirement_revision(cls, location: str) -> str: """ Return the changeset identification hash, as a 40-character hexadecimal string """ current_rev_hash = cls.run_command( - ['parents', '--template={node}'], + ["parents", "--template={node}"], show_stdout=False, stdout_only=True, cwd=location, @@ -111,48 +116,48 @@ def get_requirement_revision(cls, location): return current_rev_hash @classmethod - def is_commit_id_equal(cls, dest, name): - # type: (str, Optional[str]) -> bool + def is_commit_id_equal(cls, dest: str, name: Optional[str]) -> bool: """Always assume the versions don't match""" return False @classmethod - def get_subdirectory(cls, location): - # type: (str) -> Optional[str] + def get_subdirectory(cls, location: str) -> Optional[str]: """ Return the path to Python project root, relative to the repo root. Return None if the project root is in the repo root. """ # find the repo root repo_root = cls.run_command( - ['root'], show_stdout=False, stdout_only=True, cwd=location + ["root"], show_stdout=False, stdout_only=True, cwd=location ).strip() if not os.path.isabs(repo_root): repo_root = os.path.abspath(os.path.join(location, repo_root)) return find_path_to_project_root_from_repo_root(location, repo_root) @classmethod - def get_repository_root(cls, location): - # type: (str) -> Optional[str] + def get_repository_root(cls, location: str) -> Optional[str]: loc = super().get_repository_root(location) if loc: return loc try: r = cls.run_command( - ['root'], + ["root"], cwd=location, show_stdout=False, stdout_only=True, - on_returncode='raise', + on_returncode="raise", log_failed_cmd=False, ) except BadCommand: - logger.debug("could not determine if %s is under hg control " - "because hg is not available", location) + logger.debug( + "could not determine if %s is under hg control " + "because hg is not available", + location, + ) return None except InstallationError: return None - return os.path.normpath(r.rstrip('\r\n')) + return os.path.normpath(r.rstrip("\r\n")) vcs.register(Mercurial) diff --git a/pipenv/patched/notpip/_internal/vcs/subversion.py b/pipenv/patched/notpip/_internal/vcs/subversion.py index 11bb41ac7d..01f19d9be6 100644 --- a/pipenv/patched/notpip/_internal/vcs/subversion.py +++ b/pipenv/patched/notpip/_internal/vcs/subversion.py @@ -24,30 +24,25 @@ _svn_xml_url_re = re.compile('url="([^"]+)"') _svn_rev_re = re.compile(r'committed-rev="(\d+)"') _svn_info_xml_rev_re = re.compile(r'\s*revision="(\d+)"') -_svn_info_xml_url_re = re.compile(r'(.*)') +_svn_info_xml_url_re = re.compile(r"(.*)") class Subversion(VersionControl): - name = 'svn' - dirname = '.svn' - repo_name = 'checkout' - schemes = ( - 'svn+ssh', 'svn+http', 'svn+https', 'svn+svn', 'svn+file' - ) + name = "svn" + dirname = ".svn" + repo_name = "checkout" + schemes = ("svn+ssh", "svn+http", "svn+https", "svn+svn", "svn+file") @classmethod - def should_add_vcs_url_prefix(cls, remote_url): - # type: (str) -> bool + def should_add_vcs_url_prefix(cls, remote_url: str) -> bool: return True @staticmethod - def get_base_rev_args(rev): - # type: (str) -> List[str] - return ['-r', rev] + def get_base_rev_args(rev: str) -> List[str]: + return ["-r", rev] @classmethod - def get_revision(cls, location): - # type: (str) -> str + def get_revision(cls, location: str) -> str: """ Return the maximum revision for all files under a given location """ @@ -57,9 +52,9 @@ def get_revision(cls, location): for base, dirs, _ in os.walk(location): if cls.dirname not in dirs: dirs[:] = [] - continue # no sense walking uncontrolled subdirs + continue # no sense walking uncontrolled subdirs dirs.remove(cls.dirname) - entries_fn = os.path.join(base, cls.dirname, 'entries') + entries_fn = os.path.join(base, cls.dirname, "entries") if not os.path.exists(entries_fn): # FIXME: should we warn? continue @@ -68,21 +63,22 @@ def get_revision(cls, location): if base == location: assert dirurl is not None - base = dirurl + '/' # save the root url + base = dirurl + "/" # save the root url elif not dirurl or not dirurl.startswith(base): dirs[:] = [] - continue # not part of the same svn tree, skip it + continue # not part of the same svn tree, skip it revision = max(revision, localrev) return str(revision) @classmethod - def get_netloc_and_auth(cls, netloc, scheme): - # type: (str, str) -> Tuple[str, Tuple[Optional[str], Optional[str]]] + def get_netloc_and_auth( + cls, netloc: str, scheme: str + ) -> Tuple[str, Tuple[Optional[str], Optional[str]]]: """ This override allows the auth information to be passed to svn via the --username and --password options instead of via the URL. """ - if scheme == 'ssh': + if scheme == "ssh": # The --username and --password options can't be used for # svn+ssh URLs, so keep the auth information in the URL. return super().get_netloc_and_auth(netloc, scheme) @@ -90,28 +86,27 @@ def get_netloc_and_auth(cls, netloc, scheme): return split_auth_from_netloc(netloc) @classmethod - def get_url_rev_and_auth(cls, url): - # type: (str) -> Tuple[str, Optional[str], AuthInfo] + def get_url_rev_and_auth(cls, url: str) -> Tuple[str, Optional[str], AuthInfo]: # hotfix the URL scheme after removing svn+ from svn+ssh:// readd it url, rev, user_pass = super().get_url_rev_and_auth(url) - if url.startswith('ssh://'): - url = 'svn+' + url + if url.startswith("ssh://"): + url = "svn+" + url return url, rev, user_pass @staticmethod - def make_rev_args(username, password): - # type: (Optional[str], Optional[HiddenText]) -> CommandArgs - extra_args = [] # type: CommandArgs + def make_rev_args( + username: Optional[str], password: Optional[HiddenText] + ) -> CommandArgs: + extra_args: CommandArgs = [] if username: - extra_args += ['--username', username] + extra_args += ["--username", username] if password: - extra_args += ['--password', password] + extra_args += ["--password", password] return extra_args @classmethod - def get_remote_url(cls, location): - # type: (str) -> str + def get_remote_url(cls, location: str) -> str: # In cases where the source is in a subdirectory, we have to look up in # the location until we find a valid project root. orig_location = location @@ -135,30 +130,27 @@ def get_remote_url(cls, location): return url @classmethod - def _get_svn_url_rev(cls, location): - # type: (str) -> Tuple[Optional[str], int] + def _get_svn_url_rev(cls, location: str) -> Tuple[Optional[str], int]: from pipenv.patched.notpip._internal.exceptions import InstallationError - entries_path = os.path.join(location, cls.dirname, 'entries') + entries_path = os.path.join(location, cls.dirname, "entries") if os.path.exists(entries_path): with open(entries_path) as f: data = f.read() else: # subversion >= 1.7 does not have the 'entries' file - data = '' + data = "" url = None - if (data.startswith('8') or - data.startswith('9') or - data.startswith('10')): - entries = list(map(str.splitlines, data.split('\n\x0c\n'))) + if data.startswith("8") or data.startswith("9") or data.startswith("10"): + entries = list(map(str.splitlines, data.split("\n\x0c\n"))) del entries[0][0] # get rid of the '8' url = entries[0][3] revs = [int(d[9]) for d in entries if len(d) > 9 and d[9]] + [0] - elif data.startswith(' bool + def is_commit_id_equal(cls, dest: str, name: Optional[str]) -> bool: """Always assume the versions don't match""" return False - def __init__(self, use_interactive=None): - # type: (bool) -> None + def __init__(self, use_interactive: bool = None) -> None: if use_interactive is None: use_interactive = is_console_interactive() self.use_interactive = use_interactive @@ -206,12 +194,11 @@ def __init__(self, use_interactive=None): # Special value definitions: # None: Not evaluated yet. # Empty tuple: Could not parse version. - self._vcs_version = None # type: Optional[Tuple[int, ...]] + self._vcs_version: Optional[Tuple[int, ...]] = None super().__init__() - def call_vcs_version(self): - # type: () -> Tuple[int, ...] + def call_vcs_version(self) -> Tuple[int, ...]: """Query the version of the currently installed Subversion client. :return: A tuple containing the parts of the version information or @@ -225,15 +212,13 @@ def call_vcs_version(self): # compiled Mar 28 2018, 08:49:13 on x86_64-pc-linux-gnu # svn, version 1.12.0-SlikSvn (SlikSvn/1.12.0) # compiled May 28 2019, 13:44:56 on x86_64-microsoft-windows6.2 - version_prefix = 'svn, version ' - version = self.run_command( - ['--version'], show_stdout=False, stdout_only=True - ) + version_prefix = "svn, version " + version = self.run_command(["--version"], show_stdout=False, stdout_only=True) if not version.startswith(version_prefix): return () - version = version[len(version_prefix):].split()[0] - version_list = version.partition('-')[0].split('.') + version = version[len(version_prefix) :].split()[0] + version_list = version.partition("-")[0].split(".") try: parsed_version = tuple(map(int, version_list)) except ValueError: @@ -241,8 +226,7 @@ def call_vcs_version(self): return parsed_version - def get_vcs_version(self): - # type: () -> Tuple[int, ...] + def get_vcs_version(self) -> Tuple[int, ...]: """Return the version of the currently installed Subversion client. If the version of the Subversion client has already been queried, @@ -262,8 +246,7 @@ def get_vcs_version(self): self._vcs_version = vcs_version return vcs_version - def get_remote_call_options(self): - # type: () -> CommandArgs + def get_remote_call_options(self) -> CommandArgs: """Return options to be used on calls to Subversion that contact the server. These options are applicable for the following ``svn`` subcommands used @@ -278,7 +261,7 @@ def get_remote_call_options(self): if not self.use_interactive: # --non-interactive switch is available since Subversion 0.14.4. # Subversion < 1.8 runs in interactive mode by default. - return ['--non-interactive'] + return ["--non-interactive"] svn_version = self.get_vcs_version() # By default, Subversion >= 1.8 runs in non-interactive mode if @@ -290,37 +273,49 @@ def get_remote_call_options(self): # SVN 1.7, pip should continue to support SVN 1.7. Therefore, pip # can't safely add the option if the SVN version is < 1.8 (or unknown). if svn_version >= (1, 8): - return ['--force-interactive'] + return ["--force-interactive"] return [] - def fetch_new(self, dest, url, rev_options): - # type: (str, HiddenText, RevOptions) -> None + def fetch_new( + self, dest: str, url: HiddenText, rev_options: RevOptions, verbosity: int + ) -> None: rev_display = rev_options.to_display() logger.info( - 'Checking out %s%s to %s', + "Checking out %s%s to %s", url, rev_display, display_path(dest), ) + if verbosity <= 0: + flag = "--quiet" + else: + flag = "" cmd_args = make_command( - 'checkout', '-q', self.get_remote_call_options(), - rev_options.to_args(), url, dest, + "checkout", + flag, + self.get_remote_call_options(), + rev_options.to_args(), + url, + dest, ) self.run_command(cmd_args) - def switch(self, dest, url, rev_options): - # type: (str, HiddenText, RevOptions) -> None + def switch(self, dest: str, url: HiddenText, rev_options: RevOptions) -> None: cmd_args = make_command( - 'switch', self.get_remote_call_options(), rev_options.to_args(), - url, dest, + "switch", + self.get_remote_call_options(), + rev_options.to_args(), + url, + dest, ) self.run_command(cmd_args) - def update(self, dest, url, rev_options): - # type: (str, HiddenText, RevOptions) -> None + def update(self, dest: str, url: HiddenText, rev_options: RevOptions) -> None: cmd_args = make_command( - 'update', self.get_remote_call_options(), rev_options.to_args(), + "update", + self.get_remote_call_options(), + rev_options.to_args(), dest, ) self.run_command(cmd_args) diff --git a/pipenv/patched/notpip/_internal/vcs/versioncontrol.py b/pipenv/patched/notpip/_internal/vcs/versioncontrol.py index 9355848e76..d62ef29cfe 100644 --- a/pipenv/patched/notpip/_internal/vcs/versioncontrol.py +++ b/pipenv/patched/notpip/_internal/vcs/versioncontrol.py @@ -6,6 +6,7 @@ import sys import urllib.parse from typing import ( + TYPE_CHECKING, Any, Dict, Iterable, @@ -30,10 +31,22 @@ is_installable_dir, rmtree, ) -from pipenv.patched.notpip._internal.utils.subprocess import CommandArgs, call_subprocess, make_command +from pipenv.patched.notpip._internal.utils.subprocess import ( + CommandArgs, + call_subprocess, + format_command_args, + make_command, +) from pipenv.patched.notpip._internal.utils.urls import get_url_scheme -__all__ = ['vcs'] +if TYPE_CHECKING: + # Literal was introduced in Python 3.8. + # + # TODO: Remove `if TYPE_CHECKING` when dropping support for Python 3.7. + from typing import Literal + + +__all__ = ["vcs"] logger = logging.getLogger(__name__) @@ -41,19 +54,19 @@ AuthInfo = Tuple[Optional[str], Optional[str]] -def is_url(name): - # type: (str) -> bool +def is_url(name: str) -> bool: """ Return true if the name looks like a URL. """ scheme = get_url_scheme(name) if scheme is None: return False - return scheme in ['http', 'https', 'file', 'ftp'] + vcs.all_schemes + return scheme in ["http", "https", "file", "ftp"] + vcs.all_schemes -def make_vcs_requirement_url(repo_url, rev, project_name, subdir=None): - # type: (str, str, str, Optional[str]) -> str +def make_vcs_requirement_url( + repo_url: str, rev: str, project_name: str, subdir: Optional[str] = None +) -> str: """ Return the URL for a VCS requirement. @@ -62,15 +75,16 @@ def make_vcs_requirement_url(repo_url, rev, project_name, subdir=None): project_name: the (unescaped) project name. """ egg_project_name = project_name.replace("-", "_") - req = f'{repo_url}@{rev}#egg={egg_project_name}' + req = f"{repo_url}@{rev}#egg={egg_project_name}" if subdir: - req += f'&subdirectory={subdir}' + req += f"&subdirectory={subdir}" return req -def find_path_to_project_root_from_repo_root(location, repo_root): - # type: (str, str) -> Optional[str] +def find_path_to_project_root_from_repo_root( + location: str, repo_root: str +) -> Optional[str]: """ Find the the Python project's root by searching up the filesystem from `location`. Return the path to project root relative to `repo_root`. @@ -118,11 +132,10 @@ class RevOptions: def __init__( self, - vc_class, # type: Type[VersionControl] - rev=None, # type: Optional[str] - extra_args=None, # type: Optional[CommandArgs] - ): - # type: (...) -> None + vc_class: Type["VersionControl"], + rev: Optional[str] = None, + extra_args: Optional[CommandArgs] = None, + ) -> None: """ Args: vc_class: a VersionControl subclass. @@ -135,26 +148,23 @@ def __init__( self.extra_args = extra_args self.rev = rev self.vc_class = vc_class - self.branch_name = None # type: Optional[str] + self.branch_name: Optional[str] = None - def __repr__(self): - # type: () -> str - return f'' + def __repr__(self) -> str: + return f"" @property - def arg_rev(self): - # type: () -> Optional[str] + def arg_rev(self) -> Optional[str]: if self.rev is None: return self.vc_class.default_arg_rev return self.rev - def to_args(self): - # type: () -> CommandArgs + def to_args(self) -> CommandArgs: """ Return the VCS-specific command arguments. """ - args = [] # type: CommandArgs + args: CommandArgs = [] rev = self.arg_rev if rev is not None: args += self.vc_class.get_base_rev_args(rev) @@ -162,15 +172,13 @@ def to_args(self): return args - def to_display(self): - # type: () -> str + def to_display(self) -> str: if not self.rev: - return '' + return "" - return f' (to revision {self.rev})' + return f" (to revision {self.rev})" - def make_new(self, rev): - # type: (str) -> RevOptions + def make_new(self, rev: str) -> "RevOptions": """ Make a copy of the current instance, but with a new rev. @@ -181,54 +189,46 @@ def make_new(self, rev): class VcsSupport: - _registry = {} # type: Dict[str, VersionControl] - schemes = ['ssh', 'git', 'hg', 'bzr', 'sftp', 'svn'] + _registry: Dict[str, "VersionControl"] = {} + schemes = ["ssh", "git", "hg", "bzr", "sftp", "svn"] - def __init__(self): - # type: () -> None + def __init__(self) -> None: # Register more schemes with urlparse for various version control # systems urllib.parse.uses_netloc.extend(self.schemes) super().__init__() - def __iter__(self): - # type: () -> Iterator[str] + def __iter__(self) -> Iterator[str]: return self._registry.__iter__() @property - def backends(self): - # type: () -> List[VersionControl] + def backends(self) -> List["VersionControl"]: return list(self._registry.values()) @property - def dirnames(self): - # type: () -> List[str] + def dirnames(self) -> List[str]: return [backend.dirname for backend in self.backends] @property - def all_schemes(self): - # type: () -> List[str] - schemes = [] # type: List[str] + def all_schemes(self) -> List[str]: + schemes: List[str] = [] for backend in self.backends: schemes.extend(backend.schemes) return schemes - def register(self, cls): - # type: (Type[VersionControl]) -> None - if not hasattr(cls, 'name'): - logger.warning('Cannot register VCS %s', cls.__name__) + def register(self, cls: Type["VersionControl"]) -> None: + if not hasattr(cls, "name"): + logger.warning("Cannot register VCS %s", cls.__name__) return if cls.name not in self._registry: self._registry[cls.name] = cls() - logger.debug('Registered VCS backend: %s', cls.name) + logger.debug("Registered VCS backend: %s", cls.name) - def unregister(self, name): - # type: (str) -> None + def unregister(self, name: str) -> None: if name in self._registry: del self._registry[name] - def get_backend_for_dir(self, location): - # type: (str) -> Optional[VersionControl] + def get_backend_for_dir(self, location: str) -> Optional["VersionControl"]: """ Return a VersionControl object if a repository of that type is found at the given directory. @@ -238,8 +238,7 @@ def get_backend_for_dir(self, location): repo_path = vcs_backend.get_repository_root(location) if not repo_path: continue - logger.debug('Determine that %s uses VCS: %s', - location, vcs_backend.name) + logger.debug("Determine that %s uses VCS: %s", location, vcs_backend.name) vcs_backends[repo_path] = vcs_backend if not vcs_backends: @@ -252,8 +251,7 @@ def get_backend_for_dir(self, location): inner_most_repo_path = max(vcs_backends, key=len) return vcs_backends[inner_most_repo_path] - def get_backend_for_scheme(self, scheme): - # type: (str) -> Optional[VersionControl] + def get_backend_for_scheme(self, scheme: str) -> Optional["VersionControl"]: """ Return a VersionControl object or None. """ @@ -262,8 +260,7 @@ def get_backend_for_scheme(self, scheme): return vcs_backend return None - def get_backend(self, name): - # type: (str) -> Optional[VersionControl] + def get_backend(self, name: str) -> Optional["VersionControl"]: """ Return a VersionControl object or None. """ @@ -275,27 +272,25 @@ def get_backend(self, name): class VersionControl: - name = '' - dirname = '' - repo_name = '' + name = "" + dirname = "" + repo_name = "" # List of supported schemes for this Version Control - schemes = () # type: Tuple[str, ...] + schemes: Tuple[str, ...] = () # Iterable of environment variable names to pass to call_subprocess(). - unset_environ = () # type: Tuple[str, ...] - default_arg_rev = None # type: Optional[str] + unset_environ: Tuple[str, ...] = () + default_arg_rev: Optional[str] = None @classmethod - def should_add_vcs_url_prefix(cls, remote_url): - # type: (str) -> bool + def should_add_vcs_url_prefix(cls, remote_url: str) -> bool: """ Return whether the vcs prefix (e.g. "git+") should be added to a repository's remote url when used in a requirement. """ - return not remote_url.lower().startswith(f'{cls.name}:') + return not remote_url.lower().startswith(f"{cls.name}:") @classmethod - def get_subdirectory(cls, location): - # type: (str) -> Optional[str] + def get_subdirectory(cls, location: str) -> Optional[str]: """ Return the path to Python project root, relative to the repo root. Return None if the project root is in the repo root. @@ -303,16 +298,14 @@ def get_subdirectory(cls, location): return None @classmethod - def get_requirement_revision(cls, repo_dir): - # type: (str) -> str + def get_requirement_revision(cls, repo_dir: str) -> str: """ Return the revision string that should be used in a requirement. """ return cls.get_revision(repo_dir) @classmethod - def get_src_requirement(cls, repo_dir, project_name): - # type: (str, str) -> str + def get_src_requirement(cls, repo_dir: str, project_name: str) -> str: """ Return the requirement string to use to redownload the files currently at the given repository directory. @@ -327,18 +320,16 @@ def get_src_requirement(cls, repo_dir, project_name): repo_url = cls.get_remote_url(repo_dir) if cls.should_add_vcs_url_prefix(repo_url): - repo_url = f'{cls.name}+{repo_url}' + repo_url = f"{cls.name}+{repo_url}" revision = cls.get_requirement_revision(repo_dir) subdir = cls.get_subdirectory(repo_dir) - req = make_vcs_requirement_url(repo_url, revision, project_name, - subdir=subdir) + req = make_vcs_requirement_url(repo_url, revision, project_name, subdir=subdir) return req @staticmethod - def get_base_rev_args(rev): - # type: (str) -> List[str] + def get_base_rev_args(rev: str) -> List[str]: """ Return the base revision arguments for a vcs command. @@ -347,8 +338,7 @@ def get_base_rev_args(rev): """ raise NotImplementedError - def is_immutable_rev_checkout(self, url, dest): - # type: (str, str) -> bool + def is_immutable_rev_checkout(self, url: str, dest: str) -> bool: """ Return true if the commit hash checked out at dest matches the revision in url. @@ -362,8 +352,9 @@ def is_immutable_rev_checkout(self, url, dest): return False @classmethod - def make_rev_options(cls, rev=None, extra_args=None): - # type: (Optional[str], Optional[CommandArgs]) -> RevOptions + def make_rev_options( + cls, rev: Optional[str] = None, extra_args: Optional[CommandArgs] = None + ) -> RevOptions: """ Return a RevOptions object. @@ -374,18 +365,18 @@ def make_rev_options(cls, rev=None, extra_args=None): return RevOptions(cls, rev, extra_args=extra_args) @classmethod - def _is_local_repository(cls, repo): - # type: (str) -> bool + def _is_local_repository(cls, repo: str) -> bool: """ - posix absolute paths start with os.path.sep, - win32 ones start with drive (like c:\\folder) + posix absolute paths start with os.path.sep, + win32 ones start with drive (like c:\\folder) """ drive, tail = os.path.splitdrive(repo) return repo.startswith(os.path.sep) or bool(drive) @classmethod - def get_netloc_and_auth(cls, netloc, scheme): - # type: (str, str) -> Tuple[str, Tuple[Optional[str], Optional[str]]] + def get_netloc_and_auth( + cls, netloc: str, scheme: str + ) -> Tuple[str, Tuple[Optional[str], Optional[str]]]: """ Parse the repository URL's netloc, and return the new netloc to use along with auth information. @@ -404,8 +395,7 @@ def get_netloc_and_auth(cls, netloc, scheme): return netloc, (None, None) @classmethod - def get_url_rev_and_auth(cls, url): - # type: (str) -> Tuple[str, Optional[str], AuthInfo] + def get_url_rev_and_auth(cls, url: str) -> Tuple[str, Optional[str], AuthInfo]: """ Parse the repository URL to use, and return the URL, revision, and auth info to use. @@ -413,44 +403,44 @@ def get_url_rev_and_auth(cls, url): Returns: (url, rev, (username, password)). """ scheme, netloc, path, query, frag = urllib.parse.urlsplit(url) - if '+' not in scheme: + if "+" not in scheme: raise ValueError( "Sorry, {!r} is a malformed VCS url. " "The format is +://, " "e.g. svn+http://myrepo/svn/MyApp#egg=MyApp".format(url) ) # Remove the vcs prefix. - scheme = scheme.split('+', 1)[1] + scheme = scheme.split("+", 1)[1] netloc, user_pass = cls.get_netloc_and_auth(netloc, scheme) rev = None - if '@' in path: - path, rev = path.rsplit('@', 1) + if "@" in path: + path, rev = path.rsplit("@", 1) if not rev: raise InstallationError( "The URL {!r} has an empty revision (after @) " "which is not supported. Include a revision after @ " "or remove @ from the URL.".format(url) ) - url = urllib.parse.urlunsplit((scheme, netloc, path, query, '')) + url = urllib.parse.urlunsplit((scheme, netloc, path, query, "")) return url, rev, user_pass @staticmethod - def make_rev_args(username, password): - # type: (Optional[str], Optional[HiddenText]) -> CommandArgs + def make_rev_args( + username: Optional[str], password: Optional[HiddenText] + ) -> CommandArgs: """ Return the RevOptions "extra arguments" to use in obtain(). """ return [] - def get_url_rev_options(self, url): - # type: (HiddenText) -> Tuple[HiddenText, RevOptions] + def get_url_rev_options(self, url: HiddenText) -> Tuple[HiddenText, RevOptions]: """ Return the URL and RevOptions object to use in obtain(), as a tuple (url, rev_options). """ secret_url, rev, user_pass = self.get_url_rev_and_auth(url.secret) username, secret_password = user_pass - password = None # type: Optional[HiddenText] + password: Optional[HiddenText] = None if secret_password is not None: password = hide_value(secret_password) extra_args = self.make_rev_args(username, password) @@ -459,24 +449,23 @@ def get_url_rev_options(self, url): return hide_url(secret_url), rev_options @staticmethod - def normalize_url(url): - # type: (str) -> str + def normalize_url(url: str) -> str: """ Normalize a URL for comparison by unquoting it and removing any trailing slash. """ - return urllib.parse.unquote(url).rstrip('/') + return urllib.parse.unquote(url).rstrip("/") @classmethod - def compare_urls(cls, url1, url2): - # type: (str, str) -> bool + def compare_urls(cls, url1: str, url2: str) -> bool: """ Compare two repo URLs for identity, ignoring incidental differences. """ - return (cls.normalize_url(url1) == cls.normalize_url(url2)) + return cls.normalize_url(url1) == cls.normalize_url(url2) - def fetch_new(self, dest, url, rev_options): - # type: (str, HiddenText, RevOptions) -> None + def fetch_new( + self, dest: str, url: HiddenText, rev_options: RevOptions, verbosity: int + ) -> None: """ Fetch a revision from a repository, in the case that this is the first fetch from the repository. @@ -484,11 +473,11 @@ def fetch_new(self, dest, url, rev_options): Args: dest: the directory to fetch the repository to. rev_options: a RevOptions object. + verbosity: verbosity level. """ raise NotImplementedError - def switch(self, dest, url, rev_options): - # type: (str, HiddenText, RevOptions) -> None + def switch(self, dest: str, url: HiddenText, rev_options: RevOptions) -> None: """ Switch the repo at ``dest`` to point to ``URL``. @@ -497,8 +486,7 @@ def switch(self, dest, url, rev_options): """ raise NotImplementedError - def update(self, dest, url, rev_options): - # type: (str, HiddenText, RevOptions) -> None + def update(self, dest: str, url: HiddenText, rev_options: RevOptions) -> None: """ Update an already-existing repo to the given ``rev_options``. @@ -508,8 +496,7 @@ def update(self, dest, url, rev_options): raise NotImplementedError @classmethod - def is_commit_id_equal(cls, dest, name): - # type: (str, Optional[str]) -> bool + def is_commit_id_equal(cls, dest: str, name: Optional[str]) -> bool: """ Return whether the id of the current commit equals the given name. @@ -519,19 +506,19 @@ def is_commit_id_equal(cls, dest, name): """ raise NotImplementedError - def obtain(self, dest, url): - # type: (str, HiddenText) -> None + def obtain(self, dest: str, url: HiddenText, verbosity: int) -> None: """ Install or update in editable mode the package represented by this VersionControl object. :param dest: the repository directory in which to install or update. :param url: the repository URL starting with a vcs prefix. + :param verbosity: verbosity level. """ url, rev_options = self.get_url_rev_options(url) if not os.path.exists(dest): - self.fetch_new(dest, url, rev_options) + self.fetch_new(dest, url, rev_options, verbosity=verbosity) return rev_display = rev_options.to_display() @@ -539,73 +526,68 @@ def obtain(self, dest, url): existing_url = self.get_remote_url(dest) if self.compare_urls(existing_url, url.secret): logger.debug( - '%s in %s exists, and has correct URL (%s)', + "%s in %s exists, and has correct URL (%s)", self.repo_name.title(), display_path(dest), url, ) if not self.is_commit_id_equal(dest, rev_options.rev): logger.info( - 'Updating %s %s%s', + "Updating %s %s%s", display_path(dest), self.repo_name, rev_display, ) self.update(dest, url, rev_options) else: - logger.info('Skipping because already up-to-date.') + logger.info("Skipping because already up-to-date.") return logger.warning( - '%s %s in %s exists with URL %s', + "%s %s in %s exists with URL %s", self.name, self.repo_name, display_path(dest), existing_url, ) - prompt = ('(s)witch, (i)gnore, (w)ipe, (b)ackup ', - ('s', 'i', 'w', 'b')) + prompt = ("(s)witch, (i)gnore, (w)ipe, (b)ackup ", ("s", "i", "w", "b")) else: logger.warning( - 'Directory %s already exists, and is not a %s %s.', + "Directory %s already exists, and is not a %s %s.", dest, self.name, self.repo_name, ) # https://github.com/python/mypy/issues/1174 - prompt = ('(i)gnore, (w)ipe, (b)ackup ', # type: ignore - ('i', 'w', 'b')) + prompt = ("(i)gnore, (w)ipe, (b)ackup ", ("i", "w", "b")) # type: ignore logger.warning( - 'The plan is to install the %s repository %s', + "The plan is to install the %s repository %s", self.name, url, ) - response = ask_path_exists('What to do? {}'.format( - prompt[0]), prompt[1]) + response = ask_path_exists("What to do? {}".format(prompt[0]), prompt[1]) - if response == 'a': + if response == "a": sys.exit(-1) - if response == 'w': - logger.warning('Deleting %s', display_path(dest)) + if response == "w": + logger.warning("Deleting %s", display_path(dest)) rmtree(dest) - self.fetch_new(dest, url, rev_options) + self.fetch_new(dest, url, rev_options, verbosity=verbosity) return - if response == 'b': + if response == "b": dest_dir = backup_dir(dest) - logger.warning( - 'Backing up %s to %s', display_path(dest), dest_dir, - ) + logger.warning("Backing up %s to %s", display_path(dest), dest_dir) shutil.move(dest, dest_dir) - self.fetch_new(dest, url, rev_options) + self.fetch_new(dest, url, rev_options, verbosity=verbosity) return # Do nothing if the response is "i". - if response == 's': + if response == "s": logger.info( - 'Switching %s %s to %s%s', + "Switching %s %s to %s%s", self.repo_name, display_path(dest), url, @@ -613,21 +595,20 @@ def obtain(self, dest, url): ) self.switch(dest, url, rev_options) - def unpack(self, location, url): - # type: (str, HiddenText) -> None + def unpack(self, location: str, url: HiddenText, verbosity: int) -> None: """ Clean up current location and download the url repository (and vcs infos) into location :param url: the repository URL starting with a vcs prefix. + :param verbosity: verbosity level. """ if os.path.exists(location): rmtree(location) - self.obtain(location, url=url) + self.obtain(location, url=url, verbosity=verbosity) @classmethod - def get_remote_url(cls, location): - # type: (str) -> str + def get_remote_url(cls, location: str) -> str: """ Return the url used at location @@ -637,8 +618,7 @@ def get_remote_url(cls, location): raise NotImplementedError @classmethod - def get_revision(cls, location): - # type: (str) -> str + def get_revision(cls, location: str) -> str: """ Return the current commit id of the files at the given location. """ @@ -647,40 +627,46 @@ def get_revision(cls, location): @classmethod def run_command( cls, - cmd, # type: Union[List[str], CommandArgs] - show_stdout=True, # type: bool - cwd=None, # type: Optional[str] - on_returncode='raise', # type: str - extra_ok_returncodes=None, # type: Optional[Iterable[int]] - command_desc=None, # type: Optional[str] - extra_environ=None, # type: Optional[Mapping[str, Any]] - spinner=None, # type: Optional[SpinnerInterface] - log_failed_cmd=True, # type: bool - stdout_only=False, # type: bool - ): - # type: (...) -> str + cmd: Union[List[str], CommandArgs], + show_stdout: bool = True, + cwd: Optional[str] = None, + on_returncode: 'Literal["raise", "warn", "ignore"]' = "raise", + extra_ok_returncodes: Optional[Iterable[int]] = None, + command_desc: Optional[str] = None, + extra_environ: Optional[Mapping[str, Any]] = None, + spinner: Optional[SpinnerInterface] = None, + log_failed_cmd: bool = True, + stdout_only: bool = False, + ) -> str: """ Run a VCS subcommand This is simply a wrapper around call_subprocess that adds the VCS command name, and checks that the VCS is available """ cmd = make_command(cls.name, *cmd) + if command_desc is None: + command_desc = format_command_args(cmd) try: - return call_subprocess(cmd, show_stdout, cwd, - on_returncode=on_returncode, - extra_ok_returncodes=extra_ok_returncodes, - command_desc=command_desc, - extra_environ=extra_environ, - unset_environ=cls.unset_environ, - spinner=spinner, - log_failed_cmd=log_failed_cmd, - stdout_only=stdout_only) + return call_subprocess( + cmd, + show_stdout, + cwd, + on_returncode=on_returncode, + extra_ok_returncodes=extra_ok_returncodes, + command_desc=command_desc, + extra_environ=extra_environ, + unset_environ=cls.unset_environ, + spinner=spinner, + log_failed_cmd=log_failed_cmd, + stdout_only=stdout_only, + ) except FileNotFoundError: # errno.ENOENT = no such file or directory # In other words, the VCS executable isn't available raise BadCommand( - f'Cannot find command {cls.name!r} - do you have ' - f'{cls.name!r} installed and in your PATH?') + f"Cannot find command {cls.name!r} - do you have " + f"{cls.name!r} installed and in your PATH?" + ) except PermissionError: # errno.EACCES = Permission denied # This error occurs, for instance, when the command is installed @@ -695,18 +681,15 @@ def run_command( ) @classmethod - def is_repository_directory(cls, path): - # type: (str) -> bool + def is_repository_directory(cls, path: str) -> bool: """ Return whether a directory path is a repository directory. """ - logger.debug('Checking in %s for %s (%s)...', - path, cls.dirname, cls.name) + logger.debug("Checking in %s for %s (%s)...", path, cls.dirname, cls.name) return os.path.exists(os.path.join(path, cls.dirname)) @classmethod - def get_repository_root(cls, location): - # type: (str) -> Optional[str] + def get_repository_root(cls, location: str) -> Optional[str]: """ Return the "root" (top-level) directory controlled by the vcs, or `None` if the directory is not in any. diff --git a/pipenv/patched/notpip/_internal/wheel_builder.py b/pipenv/patched/notpip/_internal/wheel_builder.py index ddb5bf788a..9cbd9abe60 100644 --- a/pipenv/patched/notpip/_internal/wheel_builder.py +++ b/pipenv/patched/notpip/_internal/wheel_builder.py @@ -12,10 +12,11 @@ from pipenv.patched.notpip._internal.cache import WheelCache from pipenv.patched.notpip._internal.exceptions import InvalidWheelFilename, UnsupportedWheel -from pipenv.patched.notpip._internal.metadata import get_wheel_distribution +from pipenv.patched.notpip._internal.metadata import FilesystemWheel, get_wheel_distribution from pipenv.patched.notpip._internal.models.link import Link from pipenv.patched.notpip._internal.models.wheel import Wheel from pipenv.patched.notpip._internal.operations.build.wheel import build_wheel_pep517 +from pipenv.patched.notpip._internal.operations.build.wheel_editable import build_wheel_editable from pipenv.patched.notpip._internal.operations.build.wheel_legacy import build_wheel_legacy from pipenv.patched.notpip._internal.req.req_install import InstallRequirement from pipenv.patched.notpip._internal.utils.logging import indent_log @@ -28,14 +29,13 @@ logger = logging.getLogger(__name__) -_egg_info_re = re.compile(r'([a-z0-9_.]+)-([a-z0-9_.!+-]+)', re.IGNORECASE) +_egg_info_re = re.compile(r"([a-z0-9_.]+)-([a-z0-9_.!+-]+)", re.IGNORECASE) BinaryAllowedPredicate = Callable[[InstallRequirement], bool] BuildResult = Tuple[List[InstallRequirement], List[InstallRequirement]] -def _contains_egg_info(s): - # type: (str) -> bool +def _contains_egg_info(s: str) -> bool: """Determine whether the string looks like an egg_info. :param s: The string to parse. E.g. foo-2.1 @@ -44,11 +44,10 @@ def _contains_egg_info(s): def _should_build( - req, # type: InstallRequirement - need_wheel, # type: bool - check_binary_allowed, # type: BinaryAllowedPredicate -): - # type: (...) -> bool + req: InstallRequirement, + need_wheel: bool, + check_binary_allowed: BinaryAllowedPredicate, +) -> bool: """Return whether an InstallRequirement should be built into a wheel.""" if req.constraint: # never build requirements that are merely constraints @@ -56,7 +55,8 @@ def _should_build( if req.is_wheel: if need_wheel: logger.info( - 'Skipping %s, due to already being wheel.', req.name, + "Skipping %s, due to already being wheel.", + req.name, ) return False @@ -67,16 +67,20 @@ def _should_build( # From this point, this concerns the pip install command only # (need_wheel=False). - if req.editable or not req.source_dir: + if not req.source_dir: return False + if req.editable: + # we only build PEP 660 editable requirements + return req.supports_pyproject_editable() + if req.use_pep517: return True if not check_binary_allowed(req): logger.info( - "Skipping wheel build for %s, due to binaries " - "being disabled for it.", req.name, + "Skipping wheel build for %s, due to binaries being disabled for it.", + req.name, ) return False @@ -84,7 +88,8 @@ def _should_build( # we don't build legacy requirements if wheel is not installed logger.info( "Using legacy 'setup.py install' for %s, " - "since package 'wheel' is not installed.", req.name, + "since package 'wheel' is not installed.", + req.name, ) return False @@ -92,28 +97,23 @@ def _should_build( def should_build_for_wheel_command( - req, # type: InstallRequirement -): - # type: (...) -> bool - return _should_build( - req, need_wheel=True, check_binary_allowed=_always_true - ) + req: InstallRequirement, +) -> bool: + return _should_build(req, need_wheel=True, check_binary_allowed=_always_true) def should_build_for_install_command( - req, # type: InstallRequirement - check_binary_allowed, # type: BinaryAllowedPredicate -): - # type: (...) -> bool + req: InstallRequirement, + check_binary_allowed: BinaryAllowedPredicate, +) -> bool: return _should_build( req, need_wheel=False, check_binary_allowed=check_binary_allowed ) def _should_cache( - req, # type: InstallRequirement -): - # type: (...) -> Optional[bool] + req: InstallRequirement, +) -> Optional[bool]: """ Return whether a built InstallRequirement can be stored in the persistent wheel cache, assuming the wheel cache is available, and _should_build() @@ -144,10 +144,9 @@ def _should_cache( def _get_cache_dir( - req, # type: InstallRequirement - wheel_cache, # type: WheelCache -): - # type: (...) -> str + req: InstallRequirement, + wheel_cache: WheelCache, +) -> str: """Return the persistent or temporary cache directory where the built wheel need to be stored. """ @@ -160,13 +159,11 @@ def _get_cache_dir( return cache_dir -def _always_true(_): - # type: (Any) -> bool +def _always_true(_: Any) -> bool: return True -def _verify_one(req, wheel_path): - # type: (InstallRequirement, str) -> None +def _verify_one(req: InstallRequirement, wheel_path: str) -> None: canonical_name = canonicalize_name(req.name or "") w = Wheel(os.path.basename(wheel_path)) if canonicalize_name(w.name) != canonical_name: @@ -174,7 +171,7 @@ def _verify_one(req, wheel_path): "Wheel has unexpected file name: expected {!r}, " "got {!r}".format(canonical_name, w.name), ) - dist = get_wheel_distribution(wheel_path, canonical_name) + dist = get_wheel_distribution(FilesystemWheel(wheel_path), canonical_name) dist_verstr = str(dist.version) if canonicalize_version(dist_verstr) != canonicalize_version(w.version): raise InvalidWheelFilename( @@ -189,8 +186,7 @@ def _verify_one(req, wheel_path): except InvalidVersion: msg = f"Invalid Metadata-Version: {metadata_version_value}" raise UnsupportedWheel(msg) - if (metadata_version >= Version("1.2") - and not isinstance(dist.version, Version)): + if metadata_version >= Version("1.2") and not isinstance(dist.version, Version): raise UnsupportedWheel( "Metadata 1.2 mandates PEP 440 version, " "but {!r} is not".format(dist_verstr) @@ -198,47 +194,50 @@ def _verify_one(req, wheel_path): def _build_one( - req, # type: InstallRequirement - output_dir, # type: str - verify, # type: bool - build_options, # type: List[str] - global_options, # type: List[str] -): - # type: (...) -> Optional[str] + req: InstallRequirement, + output_dir: str, + verify: bool, + build_options: List[str], + global_options: List[str], + editable: bool, +) -> Optional[str]: """Build one wheel. :return: The filename of the built wheel, or None if the build failed. """ + artifact = "editable" if editable else "wheel" try: ensure_dir(output_dir) except OSError as e: logger.warning( - "Building wheel for %s failed: %s", - req.name, e, + "Building %s for %s failed: %s", + artifact, + req.name, + e, ) return None # Install build deps into temporary directory (PEP 518) with req.build_env: wheel_path = _build_one_inside_env( - req, output_dir, build_options, global_options + req, output_dir, build_options, global_options, editable ) if wheel_path and verify: try: _verify_one(req, wheel_path) except (InvalidWheelFilename, UnsupportedWheel) as e: - logger.warning("Built wheel for %s is invalid: %s", req.name, e) + logger.warning("Built %s for %s is invalid: %s", artifact, req.name, e) return None return wheel_path def _build_one_inside_env( - req, # type: InstallRequirement - output_dir, # type: str - build_options, # type: List[str] - global_options, # type: List[str] -): - # type: (...) -> Optional[str] + req: InstallRequirement, + output_dir: str, + build_options: List[str], + global_options: List[str], + editable: bool, +) -> Optional[str]: with TempDirectory(kind="wheel") as temp_dir: assert req.name if req.use_pep517: @@ -246,18 +245,26 @@ def _build_one_inside_env( assert req.pep517_backend if global_options: logger.warning( - 'Ignoring --global-option when building %s using PEP 517', req.name + "Ignoring --global-option when building %s using PEP 517", req.name ) if build_options: logger.warning( - 'Ignoring --build-option when building %s using PEP 517', req.name + "Ignoring --build-option when building %s using PEP 517", req.name + ) + if editable: + wheel_path = build_wheel_editable( + name=req.name, + backend=req.pep517_backend, + metadata_directory=req.metadata_directory, + tempd=temp_dir.path, + ) + else: + wheel_path = build_wheel_pep517( + name=req.name, + backend=req.pep517_backend, + metadata_directory=req.metadata_directory, + tempd=temp_dir.path, ) - wheel_path = build_wheel_pep517( - name=req.name, - backend=req.pep517_backend, - metadata_directory=req.metadata_directory, - tempd=temp_dir.path, - ) else: wheel_path = build_wheel_legacy( name=req.name, @@ -274,16 +281,20 @@ def _build_one_inside_env( try: wheel_hash, length = hash_file(wheel_path) shutil.move(wheel_path, dest_path) - logger.info('Created wheel for %s: ' - 'filename=%s size=%d sha256=%s', - req.name, wheel_name, length, - wheel_hash.hexdigest()) - logger.info('Stored in directory: %s', output_dir) + logger.info( + "Created wheel for %s: filename=%s size=%d sha256=%s", + req.name, + wheel_name, + length, + wheel_hash.hexdigest(), + ) + logger.info("Stored in directory: %s", output_dir) return dest_path except Exception as e: logger.warning( "Building wheel for %s failed: %s", - req.name, e, + req.name, + e, ) # Ignore return, we can't do anything else useful. if not req.use_pep517: @@ -291,30 +302,30 @@ def _build_one_inside_env( return None -def _clean_one_legacy(req, global_options): - # type: (InstallRequirement, List[str]) -> bool +def _clean_one_legacy(req: InstallRequirement, global_options: List[str]) -> bool: clean_args = make_setuptools_clean_args( req.setup_py_path, global_options=global_options, ) - logger.info('Running setup.py clean for %s', req.name) + logger.info("Running setup.py clean for %s", req.name) try: - call_subprocess(clean_args, cwd=req.source_dir) + call_subprocess( + clean_args, command_desc="python setup.py clean", cwd=req.source_dir + ) return True except Exception: - logger.error('Failed cleaning build dir for %s', req.name) + logger.error("Failed cleaning build dir for %s", req.name) return False def build( - requirements, # type: Iterable[InstallRequirement] - wheel_cache, # type: WheelCache - verify, # type: bool - build_options, # type: List[str] - global_options, # type: List[str] -): - # type: (...) -> BuildResult + requirements: Iterable[InstallRequirement], + wheel_cache: WheelCache, + verify: bool, + build_options: List[str], + global_options: List[str], +) -> BuildResult: """Build wheels. :return: The list of InstallRequirement that succeeded to build and @@ -325,16 +336,22 @@ def build( # Build the wheels. logger.info( - 'Building wheels for collected packages: %s', - ', '.join(req.name for req in requirements), # type: ignore + "Building wheels for collected packages: %s", + ", ".join(req.name for req in requirements), # type: ignore ) with indent_log(): build_successes, build_failures = [], [] for req in requirements: + assert req.name cache_dir = _get_cache_dir(req, wheel_cache) wheel_file = _build_one( - req, cache_dir, verify, build_options, global_options + req, + cache_dir, + verify, + build_options, + global_options, + req.editable and req.permit_editable_wheels, ) if wheel_file: # Update the link for this. @@ -348,13 +365,13 @@ def build( # notify success/failure if build_successes: logger.info( - 'Successfully built %s', - ' '.join([req.name for req in build_successes]), # type: ignore + "Successfully built %s", + " ".join([req.name for req in build_successes]), # type: ignore ) if build_failures: logger.info( - 'Failed to build %s', - ' '.join([req.name for req in build_failures]), # type: ignore + "Failed to build %s", + " ".join([req.name for req in build_failures]), # type: ignore ) # Return a list of requirements that failed to build return build_successes, build_failures diff --git a/pipenv/patched/notpip/_vendor/__init__.py b/pipenv/patched/notpip/_vendor/__init__.py index d6c55e38b7..94c2e826cf 100644 --- a/pipenv/patched/notpip/_vendor/__init__.py +++ b/pipenv/patched/notpip/_vendor/__init__.py @@ -58,7 +58,6 @@ def vendored(modulename): sys.path[:] = glob.glob(os.path.join(WHEEL_DIR, "*.whl")) + sys.path # Actually alias all of our vendored dependencies. - vendored("appdirs") vendored("cachecontrol") vendored("certifi") vendored("colorama") @@ -74,6 +73,7 @@ def vendored(modulename): vendored("packaging.specifiers") vendored("pep517") vendored("pkg_resources") + vendored("platformdirs") vendored("progress") vendored("requests") vendored("requests.exceptions") diff --git a/pipenv/patched/notpip/_vendor/appdirs.LICENSE.txt b/pipenv/patched/notpip/_vendor/appdirs.LICENSE.txt deleted file mode 100644 index 107c61405e..0000000000 --- a/pipenv/patched/notpip/_vendor/appdirs.LICENSE.txt +++ /dev/null @@ -1,23 +0,0 @@ -# This is the MIT license - -Copyright (c) 2010 ActiveState Software Inc. - -Permission is hereby granted, free of charge, to any person obtaining a -copy of this software and associated documentation files (the -"Software"), to deal in the Software without restriction, including -without limitation the rights to use, copy, modify, merge, publish, -distribute, sublicense, and/or sell copies of the Software, and to -permit persons to whom the Software is furnished to do so, subject to -the following conditions: - -The above copyright notice and this permission notice shall be included -in all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS -OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF -MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. -IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY -CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, -TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE -SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - diff --git a/pipenv/patched/notpip/_vendor/appdirs.py b/pipenv/patched/notpip/_vendor/appdirs.py deleted file mode 100644 index 65501092f9..0000000000 --- a/pipenv/patched/notpip/_vendor/appdirs.py +++ /dev/null @@ -1,633 +0,0 @@ -#!/usr/bin/env python -# -*- coding: utf-8 -*- -# Copyright (c) 2005-2010 ActiveState Software Inc. -# Copyright (c) 2013 Eddy Petrișor - -"""Utilities for determining application-specific dirs. - -See for details and usage. -""" -# Dev Notes: -# - MSDN on where to store app data files: -# http://support.microsoft.com/default.aspx?scid=kb;en-us;310294#XSLTH3194121123120121120120 -# - Mac OS X: http://developer.apple.com/documentation/MacOSX/Conceptual/BPFileSystem/index.html -# - XDG spec for Un*x: http://standards.freedesktop.org/basedir-spec/basedir-spec-latest.html - -__version__ = "1.4.4" -__version_info__ = tuple(int(segment) for segment in __version__.split(".")) - - -import sys -import os - -PY3 = sys.version_info[0] == 3 - -if PY3: - unicode = str - -if sys.platform.startswith('java'): - import platform - os_name = platform.java_ver()[3][0] - if os_name.startswith('Windows'): # "Windows XP", "Windows 7", etc. - system = 'win32' - elif os_name.startswith('Mac'): # "Mac OS X", etc. - system = 'darwin' - else: # "Linux", "SunOS", "FreeBSD", etc. - # Setting this to "linux2" is not ideal, but only Windows or Mac - # are actually checked for and the rest of the module expects - # *sys.platform* style strings. - system = 'linux2' -elif sys.platform == 'cli' and os.name == 'nt': - # Detect Windows in IronPython to match pipenv.patched.notpip._internal.utils.compat.WINDOWS - # Discussion: - system = 'win32' -else: - system = sys.platform - - - -def user_data_dir(appname=None, appauthor=None, version=None, roaming=False): - r"""Return full path to the user-specific data dir for this application. - - "appname" is the name of application. - If None, just the system directory is returned. - "appauthor" (only used on Windows) is the name of the - appauthor or distributing body for this application. Typically - it is the owning company name. This falls back to appname. You may - pass False to disable it. - "version" is an optional version path element to append to the - path. You might want to use this if you want multiple versions - of your app to be able to run independently. If used, this - would typically be ".". - Only applied when appname is present. - "roaming" (boolean, default False) can be set True to use the Windows - roaming appdata directory. That means that for users on a Windows - network setup for roaming profiles, this user data will be - sync'd on login. See - - for a discussion of issues. - - Typical user data directories are: - Mac OS X: ~/Library/Application Support/ # or ~/.config/, if the other does not exist - Unix: ~/.local/share/ # or in $XDG_DATA_HOME, if defined - Win XP (not roaming): C:\Documents and Settings\\Application Data\\ - Win XP (roaming): C:\Documents and Settings\\Local Settings\Application Data\\ - Win 7 (not roaming): C:\Users\\AppData\Local\\ - Win 7 (roaming): C:\Users\\AppData\Roaming\\ - - For Unix, we follow the XDG spec and support $XDG_DATA_HOME. - That means, by default "~/.local/share/". - """ - if system == "win32": - if appauthor is None: - appauthor = appname - const = roaming and "CSIDL_APPDATA" or "CSIDL_LOCAL_APPDATA" - path = os.path.normpath(_get_win_folder(const)) - if appname: - if appauthor is not False: - path = os.path.join(path, appauthor, appname) - else: - path = os.path.join(path, appname) - elif system == 'darwin': - path = os.path.expanduser('~/Library/Application Support/') - if appname: - path = os.path.join(path, appname) - else: - path = os.getenv('XDG_DATA_HOME', os.path.expanduser("~/.local/share")) - if appname: - path = os.path.join(path, appname) - if appname and version: - path = os.path.join(path, version) - return path - - -def site_data_dir(appname=None, appauthor=None, version=None, multipath=False): - r"""Return full path to the user-shared data dir for this application. - - "appname" is the name of application. - If None, just the system directory is returned. - "appauthor" (only used on Windows) is the name of the - appauthor or distributing body for this application. Typically - it is the owning company name. This falls back to appname. You may - pass False to disable it. - "version" is an optional version path element to append to the - path. You might want to use this if you want multiple versions - of your app to be able to run independently. If used, this - would typically be ".". - Only applied when appname is present. - "multipath" is an optional parameter only applicable to *nix - which indicates that the entire list of data dirs should be - returned. By default, the first item from XDG_DATA_DIRS is - returned, or '/usr/local/share/', - if XDG_DATA_DIRS is not set - - Typical site data directories are: - Mac OS X: /Library/Application Support/ - Unix: /usr/local/share/ or /usr/share/ - Win XP: C:\Documents and Settings\All Users\Application Data\\ - Vista: (Fail! "C:\ProgramData" is a hidden *system* directory on Vista.) - Win 7: C:\ProgramData\\ # Hidden, but writeable on Win 7. - - For Unix, this is using the $XDG_DATA_DIRS[0] default. - - WARNING: Do not use this on Windows. See the Vista-Fail note above for why. - """ - if system == "win32": - if appauthor is None: - appauthor = appname - path = os.path.normpath(_get_win_folder("CSIDL_COMMON_APPDATA")) - if appname: - if appauthor is not False: - path = os.path.join(path, appauthor, appname) - else: - path = os.path.join(path, appname) - elif system == 'darwin': - path = os.path.expanduser('/Library/Application Support') - if appname: - path = os.path.join(path, appname) - else: - # XDG default for $XDG_DATA_DIRS - # only first, if multipath is False - path = os.getenv('XDG_DATA_DIRS', - os.pathsep.join(['/usr/local/share', '/usr/share'])) - pathlist = [os.path.expanduser(x.rstrip(os.sep)) for x in path.split(os.pathsep)] - if appname: - if version: - appname = os.path.join(appname, version) - pathlist = [os.path.join(x, appname) for x in pathlist] - - if multipath: - path = os.pathsep.join(pathlist) - else: - path = pathlist[0] - return path - - if appname and version: - path = os.path.join(path, version) - return path - - -def user_config_dir(appname=None, appauthor=None, version=None, roaming=False): - r"""Return full path to the user-specific config dir for this application. - - "appname" is the name of application. - If None, just the system directory is returned. - "appauthor" (only used on Windows) is the name of the - appauthor or distributing body for this application. Typically - it is the owning company name. This falls back to appname. You may - pass False to disable it. - "version" is an optional version path element to append to the - path. You might want to use this if you want multiple versions - of your app to be able to run independently. If used, this - would typically be ".". - Only applied when appname is present. - "roaming" (boolean, default False) can be set True to use the Windows - roaming appdata directory. That means that for users on a Windows - network setup for roaming profiles, this user data will be - sync'd on login. See - - for a discussion of issues. - - Typical user config directories are: - Mac OS X: same as user_data_dir - Unix: ~/.config/ # or in $XDG_CONFIG_HOME, if defined - Win *: same as user_data_dir - - For Unix, we follow the XDG spec and support $XDG_CONFIG_HOME. - That means, by default "~/.config/". - """ - if system in ["win32", "darwin"]: - path = user_data_dir(appname, appauthor, None, roaming) - else: - path = os.getenv('XDG_CONFIG_HOME', os.path.expanduser("~/.config")) - if appname: - path = os.path.join(path, appname) - if appname and version: - path = os.path.join(path, version) - return path - - -# for the discussion regarding site_config_dir locations -# see -def site_config_dir(appname=None, appauthor=None, version=None, multipath=False): - r"""Return full path to the user-shared data dir for this application. - - "appname" is the name of application. - If None, just the system directory is returned. - "appauthor" (only used on Windows) is the name of the - appauthor or distributing body for this application. Typically - it is the owning company name. This falls back to appname. You may - pass False to disable it. - "version" is an optional version path element to append to the - path. You might want to use this if you want multiple versions - of your app to be able to run independently. If used, this - would typically be ".". - Only applied when appname is present. - "multipath" is an optional parameter only applicable to *nix - which indicates that the entire list of config dirs should be - returned. By default, the first item from XDG_CONFIG_DIRS is - returned, or '/etc/xdg/', if XDG_CONFIG_DIRS is not set - - Typical site config directories are: - Mac OS X: same as site_data_dir - Unix: /etc/xdg/ or $XDG_CONFIG_DIRS[i]/ for each value in - $XDG_CONFIG_DIRS - Win *: same as site_data_dir - Vista: (Fail! "C:\ProgramData" is a hidden *system* directory on Vista.) - - For Unix, this is using the $XDG_CONFIG_DIRS[0] default, if multipath=False - - WARNING: Do not use this on Windows. See the Vista-Fail note above for why. - """ - if system in ["win32", "darwin"]: - path = site_data_dir(appname, appauthor) - if appname and version: - path = os.path.join(path, version) - else: - # XDG default for $XDG_CONFIG_DIRS (missing or empty) - # see - # only first, if multipath is False - path = os.getenv('XDG_CONFIG_DIRS') or '/etc/xdg' - pathlist = [os.path.expanduser(x.rstrip(os.sep)) for x in path.split(os.pathsep) if x] - if appname: - if version: - appname = os.path.join(appname, version) - pathlist = [os.path.join(x, appname) for x in pathlist] - - if multipath: - path = os.pathsep.join(pathlist) - else: - path = pathlist[0] - return path - - -def user_cache_dir(appname=None, appauthor=None, version=None, opinion=True): - r"""Return full path to the user-specific cache dir for this application. - - "appname" is the name of application. - If None, just the system directory is returned. - "appauthor" (only used on Windows) is the name of the - appauthor or distributing body for this application. Typically - it is the owning company name. This falls back to appname. You may - pass False to disable it. - "version" is an optional version path element to append to the - path. You might want to use this if you want multiple versions - of your app to be able to run independently. If used, this - would typically be ".". - Only applied when appname is present. - "opinion" (boolean) can be False to disable the appending of - "Cache" to the base app data dir for Windows. See - discussion below. - - Typical user cache directories are: - Mac OS X: ~/Library/Caches/ - Unix: ~/.cache/ (XDG default) - Win XP: C:\Documents and Settings\\Local Settings\Application Data\\\Cache - Vista: C:\Users\\AppData\Local\\\Cache - - On Windows the only suggestion in the MSDN docs is that local settings go in - the `CSIDL_LOCAL_APPDATA` directory. This is identical to the non-roaming - app data dir (the default returned by `user_data_dir` above). Apps typically - put cache data somewhere *under* the given dir here. Some examples: - ...\Mozilla\Firefox\Profiles\\Cache - ...\Acme\SuperApp\Cache\1.0 - OPINION: This function appends "Cache" to the `CSIDL_LOCAL_APPDATA` value. - This can be disabled with the `opinion=False` option. - """ - if system == "win32": - if appauthor is None: - appauthor = appname - path = os.path.normpath(_get_win_folder("CSIDL_LOCAL_APPDATA")) - # When using Python 2, return paths as bytes on Windows like we do on - # other operating systems. See helper function docs for more details. - if not PY3 and isinstance(path, unicode): - path = _win_path_to_bytes(path) - if appname: - if appauthor is not False: - path = os.path.join(path, appauthor, appname) - else: - path = os.path.join(path, appname) - if opinion: - path = os.path.join(path, "Cache") - elif system == 'darwin': - path = os.path.expanduser('~/Library/Caches') - if appname: - path = os.path.join(path, appname) - else: - path = os.getenv('XDG_CACHE_HOME', os.path.expanduser('~/.cache')) - if appname: - path = os.path.join(path, appname) - if appname and version: - path = os.path.join(path, version) - return path - - -def user_state_dir(appname=None, appauthor=None, version=None, roaming=False): - r"""Return full path to the user-specific state dir for this application. - - "appname" is the name of application. - If None, just the system directory is returned. - "appauthor" (only used on Windows) is the name of the - appauthor or distributing body for this application. Typically - it is the owning company name. This falls back to appname. You may - pass False to disable it. - "version" is an optional version path element to append to the - path. You might want to use this if you want multiple versions - of your app to be able to run independently. If used, this - would typically be ".". - Only applied when appname is present. - "roaming" (boolean, default False) can be set True to use the Windows - roaming appdata directory. That means that for users on a Windows - network setup for roaming profiles, this user data will be - sync'd on login. See - - for a discussion of issues. - - Typical user state directories are: - Mac OS X: same as user_data_dir - Unix: ~/.local/state/ # or in $XDG_STATE_HOME, if defined - Win *: same as user_data_dir - - For Unix, we follow this Debian proposal - to extend the XDG spec and support $XDG_STATE_HOME. - - That means, by default "~/.local/state/". - """ - if system in ["win32", "darwin"]: - path = user_data_dir(appname, appauthor, None, roaming) - else: - path = os.getenv('XDG_STATE_HOME', os.path.expanduser("~/.local/state")) - if appname: - path = os.path.join(path, appname) - if appname and version: - path = os.path.join(path, version) - return path - - -def user_log_dir(appname=None, appauthor=None, version=None, opinion=True): - r"""Return full path to the user-specific log dir for this application. - - "appname" is the name of application. - If None, just the system directory is returned. - "appauthor" (only used on Windows) is the name of the - appauthor or distributing body for this application. Typically - it is the owning company name. This falls back to appname. You may - pass False to disable it. - "version" is an optional version path element to append to the - path. You might want to use this if you want multiple versions - of your app to be able to run independently. If used, this - would typically be ".". - Only applied when appname is present. - "opinion" (boolean) can be False to disable the appending of - "Logs" to the base app data dir for Windows, and "log" to the - base cache dir for Unix. See discussion below. - - Typical user log directories are: - Mac OS X: ~/Library/Logs/ - Unix: ~/.cache//log # or under $XDG_CACHE_HOME if defined - Win XP: C:\Documents and Settings\\Local Settings\Application Data\\\Logs - Vista: C:\Users\\AppData\Local\\\Logs - - On Windows the only suggestion in the MSDN docs is that local settings - go in the `CSIDL_LOCAL_APPDATA` directory. (Note: I'm interested in - examples of what some windows apps use for a logs dir.) - - OPINION: This function appends "Logs" to the `CSIDL_LOCAL_APPDATA` - value for Windows and appends "log" to the user cache dir for Unix. - This can be disabled with the `opinion=False` option. - """ - if system == "darwin": - path = os.path.join( - os.path.expanduser('~/Library/Logs'), - appname) - elif system == "win32": - path = user_data_dir(appname, appauthor, version) - version = False - if opinion: - path = os.path.join(path, "Logs") - else: - path = user_cache_dir(appname, appauthor, version) - version = False - if opinion: - path = os.path.join(path, "log") - if appname and version: - path = os.path.join(path, version) - return path - - -class AppDirs(object): - """Convenience wrapper for getting application dirs.""" - def __init__(self, appname=None, appauthor=None, version=None, - roaming=False, multipath=False): - self.appname = appname - self.appauthor = appauthor - self.version = version - self.roaming = roaming - self.multipath = multipath - - @property - def user_data_dir(self): - return user_data_dir(self.appname, self.appauthor, - version=self.version, roaming=self.roaming) - - @property - def site_data_dir(self): - return site_data_dir(self.appname, self.appauthor, - version=self.version, multipath=self.multipath) - - @property - def user_config_dir(self): - return user_config_dir(self.appname, self.appauthor, - version=self.version, roaming=self.roaming) - - @property - def site_config_dir(self): - return site_config_dir(self.appname, self.appauthor, - version=self.version, multipath=self.multipath) - - @property - def user_cache_dir(self): - return user_cache_dir(self.appname, self.appauthor, - version=self.version) - - @property - def user_state_dir(self): - return user_state_dir(self.appname, self.appauthor, - version=self.version) - - @property - def user_log_dir(self): - return user_log_dir(self.appname, self.appauthor, - version=self.version) - - -#---- internal support stuff - -def _get_win_folder_from_registry(csidl_name): - """This is a fallback technique at best. I'm not sure if using the - registry for this guarantees us the correct answer for all CSIDL_* - names. - """ - if PY3: - import winreg as _winreg - else: - import _winreg - - shell_folder_name = { - "CSIDL_APPDATA": "AppData", - "CSIDL_COMMON_APPDATA": "Common AppData", - "CSIDL_LOCAL_APPDATA": "Local AppData", - }[csidl_name] - - key = _winreg.OpenKey( - _winreg.HKEY_CURRENT_USER, - r"Software\Microsoft\Windows\CurrentVersion\Explorer\Shell Folders" - ) - dir, type = _winreg.QueryValueEx(key, shell_folder_name) - return dir - - -def _get_win_folder_with_pywin32(csidl_name): - from win32com.shell import shellcon, shell - dir = shell.SHGetFolderPath(0, getattr(shellcon, csidl_name), 0, 0) - # Try to make this a unicode path because SHGetFolderPath does - # not return unicode strings when there is unicode data in the - # path. - try: - dir = unicode(dir) - - # Downgrade to short path name if have highbit chars. See - # . - has_high_char = False - for c in dir: - if ord(c) > 255: - has_high_char = True - break - if has_high_char: - try: - import win32api - dir = win32api.GetShortPathName(dir) - except ImportError: - pass - except UnicodeError: - pass - return dir - - -def _get_win_folder_with_ctypes(csidl_name): - import ctypes - - csidl_const = { - "CSIDL_APPDATA": 26, - "CSIDL_COMMON_APPDATA": 35, - "CSIDL_LOCAL_APPDATA": 28, - }[csidl_name] - - buf = ctypes.create_unicode_buffer(1024) - ctypes.windll.shell32.SHGetFolderPathW(None, csidl_const, None, 0, buf) - - # Downgrade to short path name if have highbit chars. See - # . - has_high_char = False - for c in buf: - if ord(c) > 255: - has_high_char = True - break - if has_high_char: - buf2 = ctypes.create_unicode_buffer(1024) - if ctypes.windll.kernel32.GetShortPathNameW(buf.value, buf2, 1024): - buf = buf2 - - return buf.value - -def _get_win_folder_with_jna(csidl_name): - import array - from com.sun import jna - from com.sun.jna.platform import win32 - - buf_size = win32.WinDef.MAX_PATH * 2 - buf = array.zeros('c', buf_size) - shell = win32.Shell32.INSTANCE - shell.SHGetFolderPath(None, getattr(win32.ShlObj, csidl_name), None, win32.ShlObj.SHGFP_TYPE_CURRENT, buf) - dir = jna.Native.toString(buf.tostring()).rstrip("\0") - - # Downgrade to short path name if have highbit chars. See - # . - has_high_char = False - for c in dir: - if ord(c) > 255: - has_high_char = True - break - if has_high_char: - buf = array.zeros('c', buf_size) - kernel = win32.Kernel32.INSTANCE - if kernel.GetShortPathName(dir, buf, buf_size): - dir = jna.Native.toString(buf.tostring()).rstrip("\0") - - return dir - -if system == "win32": - try: - from ctypes import windll - _get_win_folder = _get_win_folder_with_ctypes - except ImportError: - try: - import com.sun.jna - _get_win_folder = _get_win_folder_with_jna - except ImportError: - _get_win_folder = _get_win_folder_from_registry - - -def _win_path_to_bytes(path): - """Encode Windows paths to bytes. Only used on Python 2. - - Motivation is to be consistent with other operating systems where paths - are also returned as bytes. This avoids problems mixing bytes and Unicode - elsewhere in the codebase. For more details and discussion see - . - - If encoding using ASCII and MBCS fails, return the original Unicode path. - """ - for encoding in ('ASCII', 'MBCS'): - try: - return path.encode(encoding) - except (UnicodeEncodeError, LookupError): - pass - return path - - -#---- self test code - -if __name__ == "__main__": - appname = "MyApp" - appauthor = "MyCompany" - - props = ("user_data_dir", - "user_config_dir", - "user_cache_dir", - "user_state_dir", - "user_log_dir", - "site_data_dir", - "site_config_dir") - - print("-- app dirs %s --" % __version__) - - print("-- app dirs (with optional 'version')") - dirs = AppDirs(appname, appauthor, version="1.0") - for prop in props: - print("%s: %s" % (prop, getattr(dirs, prop))) - - print("\n-- app dirs (without optional 'version')") - dirs = AppDirs(appname, appauthor) - for prop in props: - print("%s: %s" % (prop, getattr(dirs, prop))) - - print("\n-- app dirs (without optional 'appauthor')") - dirs = AppDirs(appname) - for prop in props: - print("%s: %s" % (prop, getattr(dirs, prop))) - - print("\n-- app dirs (with disabled 'appauthor')") - dirs = AppDirs(appname, appauthor=False) - for prop in props: - print("%s: %s" % (prop, getattr(dirs, prop))) diff --git a/pipenv/patched/notpip/_vendor/cachecontrol/LICENSE.txt b/pipenv/patched/notpip/_vendor/cachecontrol/LICENSE.txt deleted file mode 100644 index 1ed31ac368..0000000000 --- a/pipenv/patched/notpip/_vendor/cachecontrol/LICENSE.txt +++ /dev/null @@ -1,15 +0,0 @@ -Copyright 2015 Eric Larson - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -implied. - -See the License for the specific language governing permissions and -limitations under the License. diff --git a/pipenv/patched/notpip/_vendor/cachecontrol/__init__.py b/pipenv/patched/notpip/_vendor/cachecontrol/__init__.py index a1bbbbe3bf..8435d628d2 100644 --- a/pipenv/patched/notpip/_vendor/cachecontrol/__init__.py +++ b/pipenv/patched/notpip/_vendor/cachecontrol/__init__.py @@ -1,11 +1,18 @@ +# SPDX-FileCopyrightText: 2015 Eric Larson +# +# SPDX-License-Identifier: Apache-2.0 + """CacheControl import Interface. Make it easy to import from cachecontrol without long namespaces. """ __author__ = "Eric Larson" __email__ = "eric@ionrock.org" -__version__ = "0.12.6" +__version__ = "0.12.10" from .wrapper import CacheControl from .adapter import CacheControlAdapter from .controller import CacheController + +import logging +logging.getLogger(__name__).addHandler(logging.NullHandler()) diff --git a/pipenv/patched/notpip/_vendor/cachecontrol/_cmd.py b/pipenv/patched/notpip/_vendor/cachecontrol/_cmd.py index fb90adf438..2b8b056ce0 100644 --- a/pipenv/patched/notpip/_vendor/cachecontrol/_cmd.py +++ b/pipenv/patched/notpip/_vendor/cachecontrol/_cmd.py @@ -1,3 +1,7 @@ +# SPDX-FileCopyrightText: 2015 Eric Larson +# +# SPDX-License-Identifier: Apache-2.0 + import logging from pipenv.patched.notpip._vendor import requests diff --git a/pipenv/patched/notpip/_vendor/cachecontrol/adapter.py b/pipenv/patched/notpip/_vendor/cachecontrol/adapter.py index 58efafffb9..0aab196b77 100644 --- a/pipenv/patched/notpip/_vendor/cachecontrol/adapter.py +++ b/pipenv/patched/notpip/_vendor/cachecontrol/adapter.py @@ -1,16 +1,20 @@ +# SPDX-FileCopyrightText: 2015 Eric Larson +# +# SPDX-License-Identifier: Apache-2.0 + import types import functools import zlib from pipenv.patched.notpip._vendor.requests.adapters import HTTPAdapter -from .controller import CacheController +from .controller import CacheController, PERMANENT_REDIRECT_STATUSES from .cache import DictCache from .filewrapper import CallbackFileWrapper class CacheControlAdapter(HTTPAdapter): - invalidating_methods = {"PUT", "DELETE"} + invalidating_methods = {"PUT", "PATCH", "DELETE"} def __init__( self, @@ -93,7 +97,7 @@ def build_response( response = cached_response # We always cache the 301 responses - elif response.status == 301: + elif int(response.status) in PERMANENT_REDIRECT_STATUSES: self.controller.cache_response(request, response) else: # Wrap the response file with a wrapper that will cache the diff --git a/pipenv/patched/notpip/_vendor/cachecontrol/cache.py b/pipenv/patched/notpip/_vendor/cachecontrol/cache.py index 94e07732d9..44e4309d20 100644 --- a/pipenv/patched/notpip/_vendor/cachecontrol/cache.py +++ b/pipenv/patched/notpip/_vendor/cachecontrol/cache.py @@ -1,3 +1,7 @@ +# SPDX-FileCopyrightText: 2015 Eric Larson +# +# SPDX-License-Identifier: Apache-2.0 + """ The cache object API for implementing caches. The default is a thread safe in-memory dictionary. @@ -10,7 +14,7 @@ class BaseCache(object): def get(self, key): raise NotImplementedError() - def set(self, key, value): + def set(self, key, value, expires=None): raise NotImplementedError() def delete(self, key): @@ -29,7 +33,7 @@ def __init__(self, init_dict=None): def get(self, key): return self.data.get(key, None) - def set(self, key, value): + def set(self, key, value, expires=None): with self.lock: self.data.update({key: value}) diff --git a/pipenv/patched/notpip/_vendor/cachecontrol/caches/__init__.py b/pipenv/patched/notpip/_vendor/cachecontrol/caches/__init__.py index 0e1658fa5e..44becd6843 100644 --- a/pipenv/patched/notpip/_vendor/cachecontrol/caches/__init__.py +++ b/pipenv/patched/notpip/_vendor/cachecontrol/caches/__init__.py @@ -1,2 +1,6 @@ +# SPDX-FileCopyrightText: 2015 Eric Larson +# +# SPDX-License-Identifier: Apache-2.0 + from .file_cache import FileCache # noqa from .redis_cache import RedisCache # noqa diff --git a/pipenv/patched/notpip/_vendor/cachecontrol/caches/file_cache.py b/pipenv/patched/notpip/_vendor/cachecontrol/caches/file_cache.py index 607b945242..6cd1106f88 100644 --- a/pipenv/patched/notpip/_vendor/cachecontrol/caches/file_cache.py +++ b/pipenv/patched/notpip/_vendor/cachecontrol/caches/file_cache.py @@ -1,3 +1,7 @@ +# SPDX-FileCopyrightText: 2015 Eric Larson +# +# SPDX-License-Identifier: Apache-2.0 + import hashlib import os from textwrap import dedent @@ -114,7 +118,7 @@ def get(self, key): except FileNotFoundError: return None - def set(self, key, value): + def set(self, key, value, expires=None): name = self._fn(key) # Make sure the directory exists diff --git a/pipenv/patched/notpip/_vendor/cachecontrol/caches/redis_cache.py b/pipenv/patched/notpip/_vendor/cachecontrol/caches/redis_cache.py index 0cc5e97998..da97071b8e 100644 --- a/pipenv/patched/notpip/_vendor/cachecontrol/caches/redis_cache.py +++ b/pipenv/patched/notpip/_vendor/cachecontrol/caches/redis_cache.py @@ -1,3 +1,7 @@ +# SPDX-FileCopyrightText: 2015 Eric Larson +# +# SPDX-License-Identifier: Apache-2.0 + from __future__ import division from datetime import datetime diff --git a/pipenv/patched/notpip/_vendor/cachecontrol/compat.py b/pipenv/patched/notpip/_vendor/cachecontrol/compat.py index 67092cb62a..0a0a218f7a 100644 --- a/pipenv/patched/notpip/_vendor/cachecontrol/compat.py +++ b/pipenv/patched/notpip/_vendor/cachecontrol/compat.py @@ -1,3 +1,7 @@ +# SPDX-FileCopyrightText: 2015 Eric Larson +# +# SPDX-License-Identifier: Apache-2.0 + try: from urllib.parse import urljoin except ImportError: @@ -9,7 +13,6 @@ except ImportError: import pickle - # Handle the case where the requests module has been patched to not have # urllib3 bundled as part of its source. try: diff --git a/pipenv/patched/notpip/_vendor/cachecontrol/controller.py b/pipenv/patched/notpip/_vendor/cachecontrol/controller.py index 80bd030f0a..99b754a5b7 100644 --- a/pipenv/patched/notpip/_vendor/cachecontrol/controller.py +++ b/pipenv/patched/notpip/_vendor/cachecontrol/controller.py @@ -1,3 +1,7 @@ +# SPDX-FileCopyrightText: 2015 Eric Larson +# +# SPDX-License-Identifier: Apache-2.0 + """ The httplib2 algorithms ported for use with requests. """ @@ -17,6 +21,8 @@ URI = re.compile(r"^(([^:/?#]+):)?(//([^/?#]*))?([^?#]*)(\?([^#]*))?(#(.*))?") +PERMANENT_REDIRECT_STATUSES = (301, 308) + def parse_uri(uri): """Parses a URI using the regex given in Appendix B of RFC 3986. @@ -37,7 +43,7 @@ def __init__( self.cache = DictCache() if cache is None else cache self.cache_etags = cache_etags self.serializer = serializer or Serializer() - self.cacheable_status_codes = status_codes or (200, 203, 300, 301) + self.cacheable_status_codes = status_codes or (200, 203, 300, 301, 308) @classmethod def _urlnorm(cls, uri): @@ -147,17 +153,18 @@ def cached_request(self, request): logger.warning("Cache entry deserialization failed, entry ignored") return False - # If we have a cached 301, return it immediately. We don't - # need to test our response for other headers b/c it is + # If we have a cached permanent redirect, return it immediately. We + # don't need to test our response for other headers b/c it is # intrinsically "cacheable" as it is Permanent. + # # See: # https://tools.ietf.org/html/rfc7231#section-6.4.2 # # Client can try to refresh the value by repeating the request # with cache busting headers as usual (ie no-cache). - if resp.status == 301: + if int(resp.status) in PERMANENT_REDIRECT_STATUSES: msg = ( - 'Returning cached "301 Moved Permanently" response ' + "Returning cached permanent redirect response " "(ignoring date and etag information)" ) logger.debug(msg) @@ -261,6 +268,11 @@ def cache_response(self, request, response, body=None, status_codes=None): response_headers = CaseInsensitiveDict(response.headers) + if "date" in response_headers: + date = calendar.timegm(parsedate_tz(response_headers["date"])) + else: + date = 0 + # If we've been given a body, our response has a Content-Length, that # Content-Length is valid then we can check to see if the body we've # been given matches the expected size, and if it doesn't we'll just @@ -304,35 +316,62 @@ def cache_response(self, request, response, body=None, status_codes=None): # If we've been given an etag, then keep the response if self.cache_etags and "etag" in response_headers: + expires_time = 0 + if response_headers.get("expires"): + expires = parsedate_tz(response_headers["expires"]) + if expires is not None: + expires_time = calendar.timegm(expires) - date + + expires_time = max(expires_time, 14 * 86400) + + logger.debug("etag object cached for {0} seconds".format(expires_time)) logger.debug("Caching due to etag") self.cache.set( - cache_url, self.serializer.dumps(request, response, body=body) + cache_url, + self.serializer.dumps(request, response, body), + expires=expires_time, ) - # Add to the cache any 301s. We do this before looking that - # the Date headers. - elif response.status == 301: - logger.debug("Caching permanant redirect") - self.cache.set(cache_url, self.serializer.dumps(request, response)) + # Add to the cache any permanent redirects. We do this before looking + # that the Date headers. + elif int(response.status) in PERMANENT_REDIRECT_STATUSES: + logger.debug("Caching permanent redirect") + self.cache.set(cache_url, self.serializer.dumps(request, response, b"")) # Add to the cache if the response headers demand it. If there # is no date header then we can't do anything about expiring # the cache. elif "date" in response_headers: + date = calendar.timegm(parsedate_tz(response_headers["date"])) # cache when there is a max-age > 0 if "max-age" in cc and cc["max-age"] > 0: logger.debug("Caching b/c date exists and max-age > 0") + expires_time = cc["max-age"] self.cache.set( - cache_url, self.serializer.dumps(request, response, body=body) + cache_url, + self.serializer.dumps(request, response, body), + expires=expires_time, ) # If the request can expire, it means we should cache it # in the meantime. elif "expires" in response_headers: if response_headers["expires"]: - logger.debug("Caching b/c of expires header") + expires = parsedate_tz(response_headers["expires"]) + if expires is not None: + expires_time = calendar.timegm(expires) - date + else: + expires_time = None + + logger.debug( + "Caching b/c of expires header. expires in {0} seconds".format( + expires_time + ) + ) self.cache.set( - cache_url, self.serializer.dumps(request, response, body=body) + cache_url, + self.serializer.dumps(request, response, body=body), + expires=expires_time, ) def update_cached_response(self, request, response): diff --git a/pipenv/patched/notpip/_vendor/cachecontrol/filewrapper.py b/pipenv/patched/notpip/_vendor/cachecontrol/filewrapper.py index 30ed4c5a62..f5ed5f6f6e 100644 --- a/pipenv/patched/notpip/_vendor/cachecontrol/filewrapper.py +++ b/pipenv/patched/notpip/_vendor/cachecontrol/filewrapper.py @@ -1,4 +1,9 @@ -from io import BytesIO +# SPDX-FileCopyrightText: 2015 Eric Larson +# +# SPDX-License-Identifier: Apache-2.0 + +from tempfile import NamedTemporaryFile +import mmap class CallbackFileWrapper(object): @@ -11,10 +16,17 @@ class CallbackFileWrapper(object): This class uses members with a double underscore (__) leading prefix so as not to accidentally shadow an attribute. + + The data is stored in a temporary file until it is all available. As long + as the temporary files directory is disk-based (sometimes it's a + memory-backed-``tmpfs`` on Linux), data will be unloaded to disk if memory + pressure is high. For small files the disk usually won't be used at all, + it'll all be in the filesystem memory cache, so there should be no + performance impact. """ def __init__(self, fp, callback): - self.__buf = BytesIO() + self.__buf = NamedTemporaryFile("rb+", delete=True) self.__fp = fp self.__callback = callback @@ -49,7 +61,19 @@ def __is_fp_closed(self): def _close(self): if self.__callback: - self.__callback(self.__buf.getvalue()) + if self.__buf.tell() == 0: + # Empty file: + result = b"" + else: + # Return the data without actually loading it into memory, + # relying on Python's buffer API and mmap(). mmap() just gives + # a view directly into the filesystem's memory cache, so it + # doesn't result in duplicate memory use. + self.__buf.seek(0, 0) + result = memoryview( + mmap.mmap(self.__buf.fileno(), 0, access=mmap.ACCESS_READ) + ) + self.__callback(result) # We assign this to None here, because otherwise we can get into # really tricky problems where the CPython interpreter dead locks @@ -58,9 +82,16 @@ def _close(self): # and allows the garbage collector to do it's thing normally. self.__callback = None + # Closing the temporary file releases memory and frees disk space. + # Important when caching big files. + self.__buf.close() + def read(self, amt=None): data = self.__fp.read(amt) - self.__buf.write(data) + if data: + # We may be dealing with b'', a sign that things are over: + # it's passed e.g. after we've already closed self.__buf. + self.__buf.write(data) if self.__is_fp_closed(): self._close() diff --git a/pipenv/patched/notpip/_vendor/cachecontrol/heuristics.py b/pipenv/patched/notpip/_vendor/cachecontrol/heuristics.py index 6c0e9790d5..ebe4a96f58 100644 --- a/pipenv/patched/notpip/_vendor/cachecontrol/heuristics.py +++ b/pipenv/patched/notpip/_vendor/cachecontrol/heuristics.py @@ -1,3 +1,7 @@ +# SPDX-FileCopyrightText: 2015 Eric Larson +# +# SPDX-License-Identifier: Apache-2.0 + import calendar import time diff --git a/pipenv/patched/notpip/_vendor/cachecontrol/serialize.py b/pipenv/patched/notpip/_vendor/cachecontrol/serialize.py index c5da06ad7c..8a67c15a12 100644 --- a/pipenv/patched/notpip/_vendor/cachecontrol/serialize.py +++ b/pipenv/patched/notpip/_vendor/cachecontrol/serialize.py @@ -1,3 +1,7 @@ +# SPDX-FileCopyrightText: 2015 Eric Larson +# +# SPDX-License-Identifier: Apache-2.0 + import base64 import io import json @@ -17,24 +21,18 @@ def _b64_decode_str(s): return _b64_decode_bytes(s).decode("utf8") -class Serializer(object): +_default_body_read = object() + +class Serializer(object): def dumps(self, request, response, body=None): response_headers = CaseInsensitiveDict(response.headers) if body is None: + # When a body isn't passed in, we'll read the response. We + # also update the response with a new file handler to be + # sure it acts as though it was never read. body = response.read(decode_content=False) - - # NOTE: 99% sure this is dead code. I'm only leaving it - # here b/c I don't have a test yet to prove - # it. Basically, before using - # `cachecontrol.filewrapper.CallbackFileWrapper`, - # this made an effort to reset the file handle. The - # `CallbackFileWrapper` short circuits this code by - # setting the body as the content is consumed, the - # result being a `body` argument is *always* passed - # into cache_response, and in turn, - # `Serializer.dump`. response._fp = io.BytesIO(body) # NOTE: This is all a bit weird, but it's really important that on diff --git a/pipenv/patched/notpip/_vendor/cachecontrol/wrapper.py b/pipenv/patched/notpip/_vendor/cachecontrol/wrapper.py index d8e6fc6a9e..b6ee7f2039 100644 --- a/pipenv/patched/notpip/_vendor/cachecontrol/wrapper.py +++ b/pipenv/patched/notpip/_vendor/cachecontrol/wrapper.py @@ -1,3 +1,7 @@ +# SPDX-FileCopyrightText: 2015 Eric Larson +# +# SPDX-License-Identifier: Apache-2.0 + from .adapter import CacheControlAdapter from .cache import DictCache diff --git a/pipenv/patched/notpip/_vendor/certifi/LICENSE b/pipenv/patched/notpip/_vendor/certifi/LICENSE deleted file mode 100644 index c2fda9a264..0000000000 --- a/pipenv/patched/notpip/_vendor/certifi/LICENSE +++ /dev/null @@ -1,21 +0,0 @@ -This package contains a modified version of ca-bundle.crt: - -ca-bundle.crt -- Bundle of CA Root Certificates - -Certificate data from Mozilla as of: Thu Nov 3 19:04:19 2011# -This is a bundle of X.509 certificates of public Certificate Authorities -(CA). These were automatically extracted from Mozilla's root certificates -file (certdata.txt). This file can be found in the mozilla source tree: -http://mxr.mozilla.org/mozilla/source/security/nss/lib/ckfw/builtins/certdata.txt?raw=1# -It contains the certificates in PEM format and therefore -can be directly used with curl / libcurl / php_curl, or with -an Apache+mod_ssl webserver for SSL client authentication. -Just configure this file as the SSLCACertificateFile.# - -***** BEGIN LICENSE BLOCK ***** -This Source Code Form is subject to the terms of the Mozilla Public License, -v. 2.0. If a copy of the MPL was not distributed with this file, You can obtain -one at http://mozilla.org/MPL/2.0/. - -***** END LICENSE BLOCK ***** -@(#) $RCSfile: certdata.txt,v $ $Revision: 1.80 $ $Date: 2011/11/03 15:11:58 $ diff --git a/pipenv/patched/notpip/_vendor/certifi/__init__.py b/pipenv/patched/notpip/_vendor/certifi/__init__.py index eebdf88867..8db1a0e554 100644 --- a/pipenv/patched/notpip/_vendor/certifi/__init__.py +++ b/pipenv/patched/notpip/_vendor/certifi/__init__.py @@ -1,3 +1,3 @@ from .core import contents, where -__version__ = "2021.05.30" +__version__ = "2021.10.08" diff --git a/pipenv/patched/notpip/_vendor/certifi/cacert.pem b/pipenv/patched/notpip/_vendor/certifi/cacert.pem index 96e2fc65a8..6d0ccc0d1c 100644 --- a/pipenv/patched/notpip/_vendor/certifi/cacert.pem +++ b/pipenv/patched/notpip/_vendor/certifi/cacert.pem @@ -4255,3 +4255,108 @@ qJZ9ZPskWkoDbGs4xugDQ5r3V7mzKWmTOPQD8rv7gmsHINFSH5pkAnuYZttcTVoP 0ISVoDwUQwbKytu4QTbaakRnh6+v40URFWkIsr4WOZckbxJF0WddCajJFdr60qZf E2Efv4WstK2tBZQIgx51F9NxO5NQI1mg7TyRVJ12AMXDuDjb -----END CERTIFICATE----- + +# Issuer: CN=TunTrust Root CA O=Agence Nationale de Certification Electronique +# Subject: CN=TunTrust Root CA O=Agence Nationale de Certification Electronique +# Label: "TunTrust Root CA" +# Serial: 108534058042236574382096126452369648152337120275 +# MD5 Fingerprint: 85:13:b9:90:5b:36:5c:b6:5e:b8:5a:f8:e0:31:57:b4 +# SHA1 Fingerprint: cf:e9:70:84:0f:e0:73:0f:9d:f6:0c:7f:2c:4b:ee:20:46:34:9c:bb +# SHA256 Fingerprint: 2e:44:10:2a:b5:8c:b8:54:19:45:1c:8e:19:d9:ac:f3:66:2c:af:bc:61:4b:6a:53:96:0a:30:f7:d0:e2:eb:41 +-----BEGIN CERTIFICATE----- +MIIFszCCA5ugAwIBAgIUEwLV4kBMkkaGFmddtLu7sms+/BMwDQYJKoZIhvcNAQEL +BQAwYTELMAkGA1UEBhMCVE4xNzA1BgNVBAoMLkFnZW5jZSBOYXRpb25hbGUgZGUg +Q2VydGlmaWNhdGlvbiBFbGVjdHJvbmlxdWUxGTAXBgNVBAMMEFR1blRydXN0IFJv +b3QgQ0EwHhcNMTkwNDI2MDg1NzU2WhcNNDQwNDI2MDg1NzU2WjBhMQswCQYDVQQG +EwJUTjE3MDUGA1UECgwuQWdlbmNlIE5hdGlvbmFsZSBkZSBDZXJ0aWZpY2F0aW9u +IEVsZWN0cm9uaXF1ZTEZMBcGA1UEAwwQVHVuVHJ1c3QgUm9vdCBDQTCCAiIwDQYJ +KoZIhvcNAQEBBQADggIPADCCAgoCggIBAMPN0/y9BFPdDCA61YguBUtB9YOCfvdZ +n56eY+hz2vYGqU8ftPkLHzmMmiDQfgbU7DTZhrx1W4eI8NLZ1KMKsmwb60ksPqxd +2JQDoOw05TDENX37Jk0bbjBU2PWARZw5rZzJJQRNmpA+TkBuimvNKWfGzC3gdOgF +VwpIUPp6Q9p+7FuaDmJ2/uqdHYVy7BG7NegfJ7/Boce7SBbdVtfMTqDhuazb1YMZ +GoXRlJfXyqNlC/M4+QKu3fZnz8k/9YosRxqZbwUN/dAdgjH8KcwAWJeRTIAAHDOF +li/LQcKLEITDCSSJH7UP2dl3RxiSlGBcx5kDPP73lad9UKGAwqmDrViWVSHbhlnU +r8a83YFuB9tgYv7sEG7aaAH0gxupPqJbI9dkxt/con3YS7qC0lH4Zr8GRuR5KiY2 +eY8fTpkdso8MDhz/yV3A/ZAQprE38806JG60hZC/gLkMjNWb1sjxVj8agIl6qeIb +MlEsPvLfe/ZdeikZjuXIvTZxi11Mwh0/rViizz1wTaZQmCXcI/m4WEEIcb9PuISg +jwBUFfyRbVinljvrS5YnzWuioYasDXxU5mZMZl+QviGaAkYt5IPCgLnPSz7ofzwB +7I9ezX/SKEIBlYrilz0QIX32nRzFNKHsLA4KUiwSVXAkPcvCFDVDXSdOvsC9qnyW +5/yeYa1E0wCXAgMBAAGjYzBhMB0GA1UdDgQWBBQGmpsfU33x9aTI04Y+oXNZtPdE +ITAPBgNVHRMBAf8EBTADAQH/MB8GA1UdIwQYMBaAFAaamx9TffH1pMjThj6hc1m0 +90QhMA4GA1UdDwEB/wQEAwIBBjANBgkqhkiG9w0BAQsFAAOCAgEAqgVutt0Vyb+z +xiD2BkewhpMl0425yAA/l/VSJ4hxyXT968pk21vvHl26v9Hr7lxpuhbI87mP0zYu +QEkHDVneixCwSQXi/5E/S7fdAo74gShczNxtr18UnH1YeA32gAm56Q6XKRm4t+v4 +FstVEuTGfbvE7Pi1HE4+Z7/FXxttbUcoqgRYYdZ2vyJ/0Adqp2RT8JeNnYA/u8EH +22Wv5psymsNUk8QcCMNE+3tjEUPRahphanltkE8pjkcFwRJpadbGNjHh/PqAulxP +xOu3Mqz4dWEX1xAZufHSCe96Qp1bWgvUxpVOKs7/B9dPfhgGiPEZtdmYu65xxBzn +dFlY7wyJz4sfdZMaBBSSSFCp61cpABbjNhzI+L/wM9VBD8TMPN3pM0MBkRArHtG5 +Xc0yGYuPjCB31yLEQtyEFpslbei0VXF/sHyz03FJuc9SpAQ/3D2gu68zngowYI7b +nV2UqL1g52KAdoGDDIzMMEZJ4gzSqK/rYXHv5yJiqfdcZGyfFoxnNidF9Ql7v/YQ +CvGwjVRDjAS6oz/v4jXH+XTgbzRB0L9zZVcg+ZtnemZoJE6AZb0QmQZZ8mWvuMZH +u/2QeItBcy6vVR/cO5JyboTT0GFMDcx2V+IthSIVNg3rAZ3r2OvEhJn7wAzMMujj +d9qDRIueVSjAi1jTkD5OGwDxFa2DK5o= +-----END CERTIFICATE----- + +# Issuer: CN=HARICA TLS RSA Root CA 2021 O=Hellenic Academic and Research Institutions CA +# Subject: CN=HARICA TLS RSA Root CA 2021 O=Hellenic Academic and Research Institutions CA +# Label: "HARICA TLS RSA Root CA 2021" +# Serial: 76817823531813593706434026085292783742 +# MD5 Fingerprint: 65:47:9b:58:86:dd:2c:f0:fc:a2:84:1f:1e:96:c4:91 +# SHA1 Fingerprint: 02:2d:05:82:fa:88:ce:14:0c:06:79:de:7f:14:10:e9:45:d7:a5:6d +# SHA256 Fingerprint: d9:5d:0e:8e:da:79:52:5b:f9:be:b1:1b:14:d2:10:0d:32:94:98:5f:0c:62:d9:fa:bd:9c:d9:99:ec:cb:7b:1d +-----BEGIN CERTIFICATE----- +MIIFpDCCA4ygAwIBAgIQOcqTHO9D88aOk8f0ZIk4fjANBgkqhkiG9w0BAQsFADBs +MQswCQYDVQQGEwJHUjE3MDUGA1UECgwuSGVsbGVuaWMgQWNhZGVtaWMgYW5kIFJl +c2VhcmNoIEluc3RpdHV0aW9ucyBDQTEkMCIGA1UEAwwbSEFSSUNBIFRMUyBSU0Eg +Um9vdCBDQSAyMDIxMB4XDTIxMDIxOTEwNTUzOFoXDTQ1MDIxMzEwNTUzN1owbDEL +MAkGA1UEBhMCR1IxNzA1BgNVBAoMLkhlbGxlbmljIEFjYWRlbWljIGFuZCBSZXNl +YXJjaCBJbnN0aXR1dGlvbnMgQ0ExJDAiBgNVBAMMG0hBUklDQSBUTFMgUlNBIFJv +b3QgQ0EgMjAyMTCCAiIwDQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIBAIvC569l +mwVnlskNJLnQDmT8zuIkGCyEf3dRywQRNrhe7Wlxp57kJQmXZ8FHws+RFjZiPTgE +4VGC/6zStGndLuwRo0Xua2s7TL+MjaQenRG56Tj5eg4MmOIjHdFOY9TnuEFE+2uv +a9of08WRiFukiZLRgeaMOVig1mlDqa2YUlhu2wr7a89o+uOkXjpFc5gH6l8Cct4M +pbOfrqkdtx2z/IpZ525yZa31MJQjB/OCFks1mJxTuy/K5FrZx40d/JiZ+yykgmvw +Kh+OC19xXFyuQnspiYHLA6OZyoieC0AJQTPb5lh6/a6ZcMBaD9YThnEvdmn8kN3b +LW7R8pv1GmuebxWMevBLKKAiOIAkbDakO/IwkfN4E8/BPzWr8R0RI7VDIp4BkrcY +AuUR0YLbFQDMYTfBKnya4dC6s1BG7oKsnTH4+yPiAwBIcKMJJnkVU2DzOFytOOqB +AGMUuTNe3QvboEUHGjMJ+E20pwKmafTCWQWIZYVWrkvL4N48fS0ayOn7H6NhStYq +E613TBoYm5EPWNgGVMWX+Ko/IIqmhaZ39qb8HOLubpQzKoNQhArlT4b4UEV4AIHr +W2jjJo3Me1xR9BQsQL4aYB16cmEdH2MtiKrOokWQCPxrvrNQKlr9qEgYRtaQQJKQ +CoReaDH46+0N0x3GfZkYVVYnZS6NRcUk7M7jAgMBAAGjQjBAMA8GA1UdEwEB/wQF +MAMBAf8wHQYDVR0OBBYEFApII6ZgpJIKM+qTW8VX6iVNvRLuMA4GA1UdDwEB/wQE +AwIBhjANBgkqhkiG9w0BAQsFAAOCAgEAPpBIqm5iFSVmewzVjIuJndftTgfvnNAU +X15QvWiWkKQUEapobQk1OUAJ2vQJLDSle1mESSmXdMgHHkdt8s4cUCbjnj1AUz/3 +f5Z2EMVGpdAgS1D0NTsY9FVqQRtHBmg8uwkIYtlfVUKqrFOFrJVWNlar5AWMxaja +H6NpvVMPxP/cyuN+8kyIhkdGGvMA9YCRotxDQpSbIPDRzbLrLFPCU3hKTwSUQZqP +JzLB5UkZv/HywouoCjkxKLR9YjYsTewfM7Z+d21+UPCfDtcRj88YxeMn/ibvBZ3P +zzfF0HvaO7AWhAw6k9a+F9sPPg4ZeAnHqQJyIkv3N3a6dcSFA1pj1bF1BcK5vZSt +jBWZp5N99sXzqnTPBIWUmAD04vnKJGW/4GKvyMX6ssmeVkjaef2WdhW+o45WxLM0 +/L5H9MG0qPzVMIho7suuyWPEdr6sOBjhXlzPrjoiUevRi7PzKzMHVIf6tLITe7pT +BGIBnfHAT+7hOtSLIBD6Alfm78ELt5BGnBkpjNxvoEppaZS3JGWg/6w/zgH7IS79 +aPib8qXPMThcFarmlwDB31qlpzmq6YR/PFGoOtmUW4y/Twhx5duoXNTSpv4Ao8YW +xw/ogM4cKGR0GQjTQuPOAF1/sdwTsOEFy9EgqoZ0njnnkf3/W9b3raYvAwtt41dU +63ZTGI0RmLo= +-----END CERTIFICATE----- + +# Issuer: CN=HARICA TLS ECC Root CA 2021 O=Hellenic Academic and Research Institutions CA +# Subject: CN=HARICA TLS ECC Root CA 2021 O=Hellenic Academic and Research Institutions CA +# Label: "HARICA TLS ECC Root CA 2021" +# Serial: 137515985548005187474074462014555733966 +# MD5 Fingerprint: ae:f7:4c:e5:66:35:d1:b7:9b:8c:22:93:74:d3:4b:b0 +# SHA1 Fingerprint: bc:b0:c1:9d:e9:98:92:70:19:38:57:e9:8d:a7:b4:5d:6e:ee:01:48 +# SHA256 Fingerprint: 3f:99:cc:47:4a:cf:ce:4d:fe:d5:87:94:66:5e:47:8d:15:47:73:9f:2e:78:0f:1b:b4:ca:9b:13:30:97:d4:01 +-----BEGIN CERTIFICATE----- +MIICVDCCAdugAwIBAgIQZ3SdjXfYO2rbIvT/WeK/zjAKBggqhkjOPQQDAzBsMQsw +CQYDVQQGEwJHUjE3MDUGA1UECgwuSGVsbGVuaWMgQWNhZGVtaWMgYW5kIFJlc2Vh +cmNoIEluc3RpdHV0aW9ucyBDQTEkMCIGA1UEAwwbSEFSSUNBIFRMUyBFQ0MgUm9v +dCBDQSAyMDIxMB4XDTIxMDIxOTExMDExMFoXDTQ1MDIxMzExMDEwOVowbDELMAkG +A1UEBhMCR1IxNzA1BgNVBAoMLkhlbGxlbmljIEFjYWRlbWljIGFuZCBSZXNlYXJj +aCBJbnN0aXR1dGlvbnMgQ0ExJDAiBgNVBAMMG0hBUklDQSBUTFMgRUNDIFJvb3Qg +Q0EgMjAyMTB2MBAGByqGSM49AgEGBSuBBAAiA2IABDgI/rGgltJ6rK9JOtDA4MM7 +KKrxcm1lAEeIhPyaJmuqS7psBAqIXhfyVYf8MLA04jRYVxqEU+kw2anylnTDUR9Y +STHMmE5gEYd103KUkE+bECUqqHgtvpBBWJAVcqeht6NCMEAwDwYDVR0TAQH/BAUw +AwEB/zAdBgNVHQ4EFgQUyRtTgRL+BNUW0aq8mm+3oJUZbsowDgYDVR0PAQH/BAQD +AgGGMAoGCCqGSM49BAMDA2cAMGQCMBHervjcToiwqfAircJRQO9gcS3ujwLEXQNw +SaSS6sUUiHCm0w2wqsosQJz76YJumgIwK0eaB8bRwoF8yguWGEEbo/QwCZ61IygN +nxS2PFOiTAZpffpskcYqSUXm7LcT4Tps +-----END CERTIFICATE----- diff --git a/pipenv/patched/notpip/_vendor/chardet/LICENSE b/pipenv/patched/notpip/_vendor/chardet/LICENSE deleted file mode 100644 index 8add30ad59..0000000000 --- a/pipenv/patched/notpip/_vendor/chardet/LICENSE +++ /dev/null @@ -1,504 +0,0 @@ - GNU LESSER GENERAL PUBLIC LICENSE - Version 2.1, February 1999 - - Copyright (C) 1991, 1999 Free Software Foundation, Inc. - 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - Everyone is permitted to copy and distribute verbatim copies - of this license document, but changing it is not allowed. - -[This is the first released version of the Lesser GPL. It also counts - as the successor of the GNU Library Public License, version 2, hence - the version number 2.1.] - - Preamble - - The licenses for most software are designed to take away your -freedom to share and change it. By contrast, the GNU General Public -Licenses are intended to guarantee your freedom to share and change -free software--to make sure the software is free for all its users. - - This license, the Lesser General Public License, applies to some -specially designated software packages--typically libraries--of the -Free Software Foundation and other authors who decide to use it. You -can use it too, but we suggest you first think carefully about whether -this license or the ordinary General Public License is the better -strategy to use in any particular case, based on the explanations below. - - When we speak of free software, we are referring to freedom of use, -not price. Our General Public Licenses are designed to make sure that -you have the freedom to distribute copies of free software (and charge -for this service if you wish); that you receive source code or can get -it if you want it; that you can change the software and use pieces of -it in new free programs; and that you are informed that you can do -these things. - - To protect your rights, we need to make restrictions that forbid -distributors to deny you these rights or to ask you to surrender these -rights. These restrictions translate to certain responsibilities for -you if you distribute copies of the library or if you modify it. - - For example, if you distribute copies of the library, whether gratis -or for a fee, you must give the recipients all the rights that we gave -you. You must make sure that they, too, receive or can get the source -code. If you link other code with the library, you must provide -complete object files to the recipients, so that they can relink them -with the library after making changes to the library and recompiling -it. And you must show them these terms so they know their rights. - - We protect your rights with a two-step method: (1) we copyright the -library, and (2) we offer you this license, which gives you legal -permission to copy, distribute and/or modify the library. - - To protect each distributor, we want to make it very clear that -there is no warranty for the free library. Also, if the library is -modified by someone else and passed on, the recipients should know -that what they have is not the original version, so that the original -author's reputation will not be affected by problems that might be -introduced by others. - - Finally, software patents pose a constant threat to the existence of -any free program. We wish to make sure that a company cannot -effectively restrict the users of a free program by obtaining a -restrictive license from a patent holder. Therefore, we insist that -any patent license obtained for a version of the library must be -consistent with the full freedom of use specified in this license. - - Most GNU software, including some libraries, is covered by the -ordinary GNU General Public License. This license, the GNU Lesser -General Public License, applies to certain designated libraries, and -is quite different from the ordinary General Public License. We use -this license for certain libraries in order to permit linking those -libraries into non-free programs. - - When a program is linked with a library, whether statically or using -a shared library, the combination of the two is legally speaking a -combined work, a derivative of the original library. The ordinary -General Public License therefore permits such linking only if the -entire combination fits its criteria of freedom. The Lesser General -Public License permits more lax criteria for linking other code with -the library. - - We call this license the "Lesser" General Public License because it -does Less to protect the user's freedom than the ordinary General -Public License. It also provides other free software developers Less -of an advantage over competing non-free programs. These disadvantages -are the reason we use the ordinary General Public License for many -libraries. However, the Lesser license provides advantages in certain -special circumstances. - - For example, on rare occasions, there may be a special need to -encourage the widest possible use of a certain library, so that it becomes -a de-facto standard. To achieve this, non-free programs must be -allowed to use the library. A more frequent case is that a free -library does the same job as widely used non-free libraries. In this -case, there is little to gain by limiting the free library to free -software only, so we use the Lesser General Public License. - - In other cases, permission to use a particular library in non-free -programs enables a greater number of people to use a large body of -free software. For example, permission to use the GNU C Library in -non-free programs enables many more people to use the whole GNU -operating system, as well as its variant, the GNU/Linux operating -system. - - Although the Lesser General Public License is Less protective of the -users' freedom, it does ensure that the user of a program that is -linked with the Library has the freedom and the wherewithal to run -that program using a modified version of the Library. - - The precise terms and conditions for copying, distribution and -modification follow. Pay close attention to the difference between a -"work based on the library" and a "work that uses the library". The -former contains code derived from the library, whereas the latter must -be combined with the library in order to run. - - GNU LESSER GENERAL PUBLIC LICENSE - TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION - - 0. This License Agreement applies to any software library or other -program which contains a notice placed by the copyright holder or -other authorized party saying it may be distributed under the terms of -this Lesser General Public License (also called "this License"). -Each licensee is addressed as "you". - - A "library" means a collection of software functions and/or data -prepared so as to be conveniently linked with application programs -(which use some of those functions and data) to form executables. - - The "Library", below, refers to any such software library or work -which has been distributed under these terms. A "work based on the -Library" means either the Library or any derivative work under -copyright law: that is to say, a work containing the Library or a -portion of it, either verbatim or with modifications and/or translated -straightforwardly into another language. (Hereinafter, translation is -included without limitation in the term "modification".) - - "Source code" for a work means the preferred form of the work for -making modifications to it. For a library, complete source code means -all the source code for all modules it contains, plus any associated -interface definition files, plus the scripts used to control compilation -and installation of the library. - - Activities other than copying, distribution and modification are not -covered by this License; they are outside its scope. The act of -running a program using the Library is not restricted, and output from -such a program is covered only if its contents constitute a work based -on the Library (independent of the use of the Library in a tool for -writing it). Whether that is true depends on what the Library does -and what the program that uses the Library does. - - 1. You may copy and distribute verbatim copies of the Library's -complete source code as you receive it, in any medium, provided that -you conspicuously and appropriately publish on each copy an -appropriate copyright notice and disclaimer of warranty; keep intact -all the notices that refer to this License and to the absence of any -warranty; and distribute a copy of this License along with the -Library. - - You may charge a fee for the physical act of transferring a copy, -and you may at your option offer warranty protection in exchange for a -fee. - - 2. You may modify your copy or copies of the Library or any portion -of it, thus forming a work based on the Library, and copy and -distribute such modifications or work under the terms of Section 1 -above, provided that you also meet all of these conditions: - - a) The modified work must itself be a software library. - - b) You must cause the files modified to carry prominent notices - stating that you changed the files and the date of any change. - - c) You must cause the whole of the work to be licensed at no - charge to all third parties under the terms of this License. - - d) If a facility in the modified Library refers to a function or a - table of data to be supplied by an application program that uses - the facility, other than as an argument passed when the facility - is invoked, then you must make a good faith effort to ensure that, - in the event an application does not supply such function or - table, the facility still operates, and performs whatever part of - its purpose remains meaningful. - - (For example, a function in a library to compute square roots has - a purpose that is entirely well-defined independent of the - application. Therefore, Subsection 2d requires that any - application-supplied function or table used by this function must - be optional: if the application does not supply it, the square - root function must still compute square roots.) - -These requirements apply to the modified work as a whole. If -identifiable sections of that work are not derived from the Library, -and can be reasonably considered independent and separate works in -themselves, then this License, and its terms, do not apply to those -sections when you distribute them as separate works. But when you -distribute the same sections as part of a whole which is a work based -on the Library, the distribution of the whole must be on the terms of -this License, whose permissions for other licensees extend to the -entire whole, and thus to each and every part regardless of who wrote -it. - -Thus, it is not the intent of this section to claim rights or contest -your rights to work written entirely by you; rather, the intent is to -exercise the right to control the distribution of derivative or -collective works based on the Library. - -In addition, mere aggregation of another work not based on the Library -with the Library (or with a work based on the Library) on a volume of -a storage or distribution medium does not bring the other work under -the scope of this License. - - 3. You may opt to apply the terms of the ordinary GNU General Public -License instead of this License to a given copy of the Library. To do -this, you must alter all the notices that refer to this License, so -that they refer to the ordinary GNU General Public License, version 2, -instead of to this License. (If a newer version than version 2 of the -ordinary GNU General Public License has appeared, then you can specify -that version instead if you wish.) Do not make any other change in -these notices. - - Once this change is made in a given copy, it is irreversible for -that copy, so the ordinary GNU General Public License applies to all -subsequent copies and derivative works made from that copy. - - This option is useful when you wish to copy part of the code of -the Library into a program that is not a library. - - 4. You may copy and distribute the Library (or a portion or -derivative of it, under Section 2) in object code or executable form -under the terms of Sections 1 and 2 above provided that you accompany -it with the complete corresponding machine-readable source code, which -must be distributed under the terms of Sections 1 and 2 above on a -medium customarily used for software interchange. - - If distribution of object code is made by offering access to copy -from a designated place, then offering equivalent access to copy the -source code from the same place satisfies the requirement to -distribute the source code, even though third parties are not -compelled to copy the source along with the object code. - - 5. A program that contains no derivative of any portion of the -Library, but is designed to work with the Library by being compiled or -linked with it, is called a "work that uses the Library". Such a -work, in isolation, is not a derivative work of the Library, and -therefore falls outside the scope of this License. - - However, linking a "work that uses the Library" with the Library -creates an executable that is a derivative of the Library (because it -contains portions of the Library), rather than a "work that uses the -library". The executable is therefore covered by this License. -Section 6 states terms for distribution of such executables. - - When a "work that uses the Library" uses material from a header file -that is part of the Library, the object code for the work may be a -derivative work of the Library even though the source code is not. -Whether this is true is especially significant if the work can be -linked without the Library, or if the work is itself a library. The -threshold for this to be true is not precisely defined by law. - - If such an object file uses only numerical parameters, data -structure layouts and accessors, and small macros and small inline -functions (ten lines or less in length), then the use of the object -file is unrestricted, regardless of whether it is legally a derivative -work. (Executables containing this object code plus portions of the -Library will still fall under Section 6.) - - Otherwise, if the work is a derivative of the Library, you may -distribute the object code for the work under the terms of Section 6. -Any executables containing that work also fall under Section 6, -whether or not they are linked directly with the Library itself. - - 6. As an exception to the Sections above, you may also combine or -link a "work that uses the Library" with the Library to produce a -work containing portions of the Library, and distribute that work -under terms of your choice, provided that the terms permit -modification of the work for the customer's own use and reverse -engineering for debugging such modifications. - - You must give prominent notice with each copy of the work that the -Library is used in it and that the Library and its use are covered by -this License. You must supply a copy of this License. If the work -during execution displays copyright notices, you must include the -copyright notice for the Library among them, as well as a reference -directing the user to the copy of this License. Also, you must do one -of these things: - - a) Accompany the work with the complete corresponding - machine-readable source code for the Library including whatever - changes were used in the work (which must be distributed under - Sections 1 and 2 above); and, if the work is an executable linked - with the Library, with the complete machine-readable "work that - uses the Library", as object code and/or source code, so that the - user can modify the Library and then relink to produce a modified - executable containing the modified Library. (It is understood - that the user who changes the contents of definitions files in the - Library will not necessarily be able to recompile the application - to use the modified definitions.) - - b) Use a suitable shared library mechanism for linking with the - Library. A suitable mechanism is one that (1) uses at run time a - copy of the library already present on the user's computer system, - rather than copying library functions into the executable, and (2) - will operate properly with a modified version of the library, if - the user installs one, as long as the modified version is - interface-compatible with the version that the work was made with. - - c) Accompany the work with a written offer, valid for at - least three years, to give the same user the materials - specified in Subsection 6a, above, for a charge no more - than the cost of performing this distribution. - - d) If distribution of the work is made by offering access to copy - from a designated place, offer equivalent access to copy the above - specified materials from the same place. - - e) Verify that the user has already received a copy of these - materials or that you have already sent this user a copy. - - For an executable, the required form of the "work that uses the -Library" must include any data and utility programs needed for -reproducing the executable from it. However, as a special exception, -the materials to be distributed need not include anything that is -normally distributed (in either source or binary form) with the major -components (compiler, kernel, and so on) of the operating system on -which the executable runs, unless that component itself accompanies -the executable. - - It may happen that this requirement contradicts the license -restrictions of other proprietary libraries that do not normally -accompany the operating system. Such a contradiction means you cannot -use both them and the Library together in an executable that you -distribute. - - 7. You may place library facilities that are a work based on the -Library side-by-side in a single library together with other library -facilities not covered by this License, and distribute such a combined -library, provided that the separate distribution of the work based on -the Library and of the other library facilities is otherwise -permitted, and provided that you do these two things: - - a) Accompany the combined library with a copy of the same work - based on the Library, uncombined with any other library - facilities. This must be distributed under the terms of the - Sections above. - - b) Give prominent notice with the combined library of the fact - that part of it is a work based on the Library, and explaining - where to find the accompanying uncombined form of the same work. - - 8. You may not copy, modify, sublicense, link with, or distribute -the Library except as expressly provided under this License. Any -attempt otherwise to copy, modify, sublicense, link with, or -distribute the Library is void, and will automatically terminate your -rights under this License. However, parties who have received copies, -or rights, from you under this License will not have their licenses -terminated so long as such parties remain in full compliance. - - 9. You are not required to accept this License, since you have not -signed it. However, nothing else grants you permission to modify or -distribute the Library or its derivative works. These actions are -prohibited by law if you do not accept this License. Therefore, by -modifying or distributing the Library (or any work based on the -Library), you indicate your acceptance of this License to do so, and -all its terms and conditions for copying, distributing or modifying -the Library or works based on it. - - 10. Each time you redistribute the Library (or any work based on the -Library), the recipient automatically receives a license from the -original licensor to copy, distribute, link with or modify the Library -subject to these terms and conditions. You may not impose any further -restrictions on the recipients' exercise of the rights granted herein. -You are not responsible for enforcing compliance by third parties with -this License. - - 11. If, as a consequence of a court judgment or allegation of patent -infringement or for any other reason (not limited to patent issues), -conditions are imposed on you (whether by court order, agreement or -otherwise) that contradict the conditions of this License, they do not -excuse you from the conditions of this License. If you cannot -distribute so as to satisfy simultaneously your obligations under this -License and any other pertinent obligations, then as a consequence you -may not distribute the Library at all. For example, if a patent -license would not permit royalty-free redistribution of the Library by -all those who receive copies directly or indirectly through you, then -the only way you could satisfy both it and this License would be to -refrain entirely from distribution of the Library. - -If any portion of this section is held invalid or unenforceable under any -particular circumstance, the balance of the section is intended to apply, -and the section as a whole is intended to apply in other circumstances. - -It is not the purpose of this section to induce you to infringe any -patents or other property right claims or to contest validity of any -such claims; this section has the sole purpose of protecting the -integrity of the free software distribution system which is -implemented by public license practices. Many people have made -generous contributions to the wide range of software distributed -through that system in reliance on consistent application of that -system; it is up to the author/donor to decide if he or she is willing -to distribute software through any other system and a licensee cannot -impose that choice. - -This section is intended to make thoroughly clear what is believed to -be a consequence of the rest of this License. - - 12. If the distribution and/or use of the Library is restricted in -certain countries either by patents or by copyrighted interfaces, the -original copyright holder who places the Library under this License may add -an explicit geographical distribution limitation excluding those countries, -so that distribution is permitted only in or among countries not thus -excluded. In such case, this License incorporates the limitation as if -written in the body of this License. - - 13. The Free Software Foundation may publish revised and/or new -versions of the Lesser General Public License from time to time. -Such new versions will be similar in spirit to the present version, -but may differ in detail to address new problems or concerns. - -Each version is given a distinguishing version number. If the Library -specifies a version number of this License which applies to it and -"any later version", you have the option of following the terms and -conditions either of that version or of any later version published by -the Free Software Foundation. If the Library does not specify a -license version number, you may choose any version ever published by -the Free Software Foundation. - - 14. If you wish to incorporate parts of the Library into other free -programs whose distribution conditions are incompatible with these, -write to the author to ask for permission. For software which is -copyrighted by the Free Software Foundation, write to the Free -Software Foundation; we sometimes make exceptions for this. Our -decision will be guided by the two goals of preserving the free status -of all derivatives of our free software and of promoting the sharing -and reuse of software generally. - - NO WARRANTY - - 15. BECAUSE THE LIBRARY IS LICENSED FREE OF CHARGE, THERE IS NO -WARRANTY FOR THE LIBRARY, TO THE EXTENT PERMITTED BY APPLICABLE LAW. -EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR -OTHER PARTIES PROVIDE THE LIBRARY "AS IS" WITHOUT WARRANTY OF ANY -KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE -IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR -PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE -LIBRARY IS WITH YOU. SHOULD THE LIBRARY PROVE DEFECTIVE, YOU ASSUME -THE COST OF ALL NECESSARY SERVICING, REPAIR OR CORRECTION. - - 16. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN -WRITING WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY -AND/OR REDISTRIBUTE THE LIBRARY AS PERMITTED ABOVE, BE LIABLE TO YOU -FOR DAMAGES, INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR -CONSEQUENTIAL DAMAGES ARISING OUT OF THE USE OR INABILITY TO USE THE -LIBRARY (INCLUDING BUT NOT LIMITED TO LOSS OF DATA OR DATA BEING -RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD PARTIES OR A -FAILURE OF THE LIBRARY TO OPERATE WITH ANY OTHER SOFTWARE), EVEN IF -SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH -DAMAGES. - - END OF TERMS AND CONDITIONS - - How to Apply These Terms to Your New Libraries - - If you develop a new library, and you want it to be of the greatest -possible use to the public, we recommend making it free software that -everyone can redistribute and change. You can do so by permitting -redistribution under these terms (or, alternatively, under the terms of the -ordinary General Public License). - - To apply these terms, attach the following notices to the library. It is -safest to attach them to the start of each source file to most effectively -convey the exclusion of warranty; and each file should have at least the -"copyright" line and a pointer to where the full notice is found. - - - Copyright (C) - - This library is free software; you can redistribute it and/or - modify it under the terms of the GNU Lesser General Public - License as published by the Free Software Foundation; either - version 2.1 of the License, or (at your option) any later version. - - This library is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - Lesser General Public License for more details. - - You should have received a copy of the GNU Lesser General Public - License along with this library; if not, write to the Free Software - Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - -Also add information on how to contact you by electronic and paper mail. - -You should also get your employer (if you work as a programmer) or your -school, if any, to sign a "copyright disclaimer" for the library, if -necessary. Here is a sample; alter the names: - - Yoyodyne, Inc., hereby disclaims all copyright interest in the - library `Frob' (a library for tweaking knobs) written by James Random Hacker. - - , 1 April 1990 - Ty Coon, President of Vice - -That's all there is to it! - - diff --git a/pipenv/patched/notpip/_vendor/distlib/__init__.py b/pipenv/patched/notpip/_vendor/distlib/__init__.py index 492c2c7058..1154948107 100644 --- a/pipenv/patched/notpip/_vendor/distlib/__init__.py +++ b/pipenv/patched/notpip/_vendor/distlib/__init__.py @@ -6,7 +6,7 @@ # import logging -__version__ = '0.3.2' +__version__ = '0.3.3' class DistlibException(Exception): pass diff --git a/pipenv/patched/notpip/_vendor/distlib/_backport/__init__.py b/pipenv/patched/notpip/_vendor/distlib/_backport/__init__.py deleted file mode 100644 index f7dbf4c9aa..0000000000 --- a/pipenv/patched/notpip/_vendor/distlib/_backport/__init__.py +++ /dev/null @@ -1,6 +0,0 @@ -"""Modules copied from Python 3 standard libraries, for internal use only. - -Individual classes and functions are found in d2._backport.misc. Intended -usage is to always import things missing from 3.1 from that module: the -built-in/stdlib objects will be used if found. -""" diff --git a/pipenv/patched/notpip/_vendor/distlib/_backport/misc.py b/pipenv/patched/notpip/_vendor/distlib/_backport/misc.py deleted file mode 100644 index cfb318d34f..0000000000 --- a/pipenv/patched/notpip/_vendor/distlib/_backport/misc.py +++ /dev/null @@ -1,41 +0,0 @@ -# -*- coding: utf-8 -*- -# -# Copyright (C) 2012 The Python Software Foundation. -# See LICENSE.txt and CONTRIBUTORS.txt. -# -"""Backports for individual classes and functions.""" - -import os -import sys - -__all__ = ['cache_from_source', 'callable', 'fsencode'] - - -try: - from imp import cache_from_source -except ImportError: - def cache_from_source(py_file, debug=__debug__): - ext = debug and 'c' or 'o' - return py_file + ext - - -try: - callable = callable -except NameError: - from collections import Callable - - def callable(obj): - return isinstance(obj, Callable) - - -try: - fsencode = os.fsencode -except AttributeError: - def fsencode(filename): - if isinstance(filename, bytes): - return filename - elif isinstance(filename, str): - return filename.encode(sys.getfilesystemencoding()) - else: - raise TypeError("expect bytes or str, not %s" % - type(filename).__name__) diff --git a/pipenv/patched/notpip/_vendor/distlib/_backport/shutil.py b/pipenv/patched/notpip/_vendor/distlib/_backport/shutil.py deleted file mode 100644 index 10ed362539..0000000000 --- a/pipenv/patched/notpip/_vendor/distlib/_backport/shutil.py +++ /dev/null @@ -1,764 +0,0 @@ -# -*- coding: utf-8 -*- -# -# Copyright (C) 2012 The Python Software Foundation. -# See LICENSE.txt and CONTRIBUTORS.txt. -# -"""Utility functions for copying and archiving files and directory trees. - -XXX The functions here don't copy the resource fork or other metadata on Mac. - -""" - -import os -import sys -import stat -from os.path import abspath -import fnmatch -try: - from collections.abc import Callable -except ImportError: - from collections import Callable -import errno -from . import tarfile - -try: - import bz2 - _BZ2_SUPPORTED = True -except ImportError: - _BZ2_SUPPORTED = False - -try: - from pwd import getpwnam -except ImportError: - getpwnam = None - -try: - from grp import getgrnam -except ImportError: - getgrnam = None - -__all__ = ["copyfileobj", "copyfile", "copymode", "copystat", "copy", "copy2", - "copytree", "move", "rmtree", "Error", "SpecialFileError", - "ExecError", "make_archive", "get_archive_formats", - "register_archive_format", "unregister_archive_format", - "get_unpack_formats", "register_unpack_format", - "unregister_unpack_format", "unpack_archive", "ignore_patterns"] - -class Error(EnvironmentError): - pass - -class SpecialFileError(EnvironmentError): - """Raised when trying to do a kind of operation (e.g. copying) which is - not supported on a special file (e.g. a named pipe)""" - -class ExecError(EnvironmentError): - """Raised when a command could not be executed""" - -class ReadError(EnvironmentError): - """Raised when an archive cannot be read""" - -class RegistryError(Exception): - """Raised when a registry operation with the archiving - and unpacking registries fails""" - - -try: - WindowsError -except NameError: - WindowsError = None - -def copyfileobj(fsrc, fdst, length=16*1024): - """copy data from file-like object fsrc to file-like object fdst""" - while 1: - buf = fsrc.read(length) - if not buf: - break - fdst.write(buf) - -def _samefile(src, dst): - # Macintosh, Unix. - if hasattr(os.path, 'samefile'): - try: - return os.path.samefile(src, dst) - except OSError: - return False - - # All other platforms: check for same pathname. - return (os.path.normcase(os.path.abspath(src)) == - os.path.normcase(os.path.abspath(dst))) - -def copyfile(src, dst): - """Copy data from src to dst""" - if _samefile(src, dst): - raise Error("`%s` and `%s` are the same file" % (src, dst)) - - for fn in [src, dst]: - try: - st = os.stat(fn) - except OSError: - # File most likely does not exist - pass - else: - # XXX What about other special files? (sockets, devices...) - if stat.S_ISFIFO(st.st_mode): - raise SpecialFileError("`%s` is a named pipe" % fn) - - with open(src, 'rb') as fsrc: - with open(dst, 'wb') as fdst: - copyfileobj(fsrc, fdst) - -def copymode(src, dst): - """Copy mode bits from src to dst""" - if hasattr(os, 'chmod'): - st = os.stat(src) - mode = stat.S_IMODE(st.st_mode) - os.chmod(dst, mode) - -def copystat(src, dst): - """Copy all stat info (mode bits, atime, mtime, flags) from src to dst""" - st = os.stat(src) - mode = stat.S_IMODE(st.st_mode) - if hasattr(os, 'utime'): - os.utime(dst, (st.st_atime, st.st_mtime)) - if hasattr(os, 'chmod'): - os.chmod(dst, mode) - if hasattr(os, 'chflags') and hasattr(st, 'st_flags'): - try: - os.chflags(dst, st.st_flags) - except OSError as why: - if (not hasattr(errno, 'EOPNOTSUPP') or - why.errno != errno.EOPNOTSUPP): - raise - -def copy(src, dst): - """Copy data and mode bits ("cp src dst"). - - The destination may be a directory. - - """ - if os.path.isdir(dst): - dst = os.path.join(dst, os.path.basename(src)) - copyfile(src, dst) - copymode(src, dst) - -def copy2(src, dst): - """Copy data and all stat info ("cp -p src dst"). - - The destination may be a directory. - - """ - if os.path.isdir(dst): - dst = os.path.join(dst, os.path.basename(src)) - copyfile(src, dst) - copystat(src, dst) - -def ignore_patterns(*patterns): - """Function that can be used as copytree() ignore parameter. - - Patterns is a sequence of glob-style patterns - that are used to exclude files""" - def _ignore_patterns(path, names): - ignored_names = [] - for pattern in patterns: - ignored_names.extend(fnmatch.filter(names, pattern)) - return set(ignored_names) - return _ignore_patterns - -def copytree(src, dst, symlinks=False, ignore=None, copy_function=copy2, - ignore_dangling_symlinks=False): - """Recursively copy a directory tree. - - The destination directory must not already exist. - If exception(s) occur, an Error is raised with a list of reasons. - - If the optional symlinks flag is true, symbolic links in the - source tree result in symbolic links in the destination tree; if - it is false, the contents of the files pointed to by symbolic - links are copied. If the file pointed by the symlink doesn't - exist, an exception will be added in the list of errors raised in - an Error exception at the end of the copy process. - - You can set the optional ignore_dangling_symlinks flag to true if you - want to silence this exception. Notice that this has no effect on - platforms that don't support os.symlink. - - The optional ignore argument is a callable. If given, it - is called with the `src` parameter, which is the directory - being visited by copytree(), and `names` which is the list of - `src` contents, as returned by os.listdir(): - - callable(src, names) -> ignored_names - - Since copytree() is called recursively, the callable will be - called once for each directory that is copied. It returns a - list of names relative to the `src` directory that should - not be copied. - - The optional copy_function argument is a callable that will be used - to copy each file. It will be called with the source path and the - destination path as arguments. By default, copy2() is used, but any - function that supports the same signature (like copy()) can be used. - - """ - names = os.listdir(src) - if ignore is not None: - ignored_names = ignore(src, names) - else: - ignored_names = set() - - os.makedirs(dst) - errors = [] - for name in names: - if name in ignored_names: - continue - srcname = os.path.join(src, name) - dstname = os.path.join(dst, name) - try: - if os.path.islink(srcname): - linkto = os.readlink(srcname) - if symlinks: - os.symlink(linkto, dstname) - else: - # ignore dangling symlink if the flag is on - if not os.path.exists(linkto) and ignore_dangling_symlinks: - continue - # otherwise let the copy occurs. copy2 will raise an error - copy_function(srcname, dstname) - elif os.path.isdir(srcname): - copytree(srcname, dstname, symlinks, ignore, copy_function) - else: - # Will raise a SpecialFileError for unsupported file types - copy_function(srcname, dstname) - # catch the Error from the recursive copytree so that we can - # continue with other files - except Error as err: - errors.extend(err.args[0]) - except EnvironmentError as why: - errors.append((srcname, dstname, str(why))) - try: - copystat(src, dst) - except OSError as why: - if WindowsError is not None and isinstance(why, WindowsError): - # Copying file access times may fail on Windows - pass - else: - errors.extend((src, dst, str(why))) - if errors: - raise Error(errors) - -def rmtree(path, ignore_errors=False, onerror=None): - """Recursively delete a directory tree. - - If ignore_errors is set, errors are ignored; otherwise, if onerror - is set, it is called to handle the error with arguments (func, - path, exc_info) where func is os.listdir, os.remove, or os.rmdir; - path is the argument to that function that caused it to fail; and - exc_info is a tuple returned by sys.exc_info(). If ignore_errors - is false and onerror is None, an exception is raised. - - """ - if ignore_errors: - def onerror(*args): - pass - elif onerror is None: - def onerror(*args): - raise - try: - if os.path.islink(path): - # symlinks to directories are forbidden, see bug #1669 - raise OSError("Cannot call rmtree on a symbolic link") - except OSError: - onerror(os.path.islink, path, sys.exc_info()) - # can't continue even if onerror hook returns - return - names = [] - try: - names = os.listdir(path) - except os.error: - onerror(os.listdir, path, sys.exc_info()) - for name in names: - fullname = os.path.join(path, name) - try: - mode = os.lstat(fullname).st_mode - except os.error: - mode = 0 - if stat.S_ISDIR(mode): - rmtree(fullname, ignore_errors, onerror) - else: - try: - os.remove(fullname) - except os.error: - onerror(os.remove, fullname, sys.exc_info()) - try: - os.rmdir(path) - except os.error: - onerror(os.rmdir, path, sys.exc_info()) - - -def _basename(path): - # A basename() variant which first strips the trailing slash, if present. - # Thus we always get the last component of the path, even for directories. - return os.path.basename(path.rstrip(os.path.sep)) - -def move(src, dst): - """Recursively move a file or directory to another location. This is - similar to the Unix "mv" command. - - If the destination is a directory or a symlink to a directory, the source - is moved inside the directory. The destination path must not already - exist. - - If the destination already exists but is not a directory, it may be - overwritten depending on os.rename() semantics. - - If the destination is on our current filesystem, then rename() is used. - Otherwise, src is copied to the destination and then removed. - A lot more could be done here... A look at a mv.c shows a lot of - the issues this implementation glosses over. - - """ - real_dst = dst - if os.path.isdir(dst): - if _samefile(src, dst): - # We might be on a case insensitive filesystem, - # perform the rename anyway. - os.rename(src, dst) - return - - real_dst = os.path.join(dst, _basename(src)) - if os.path.exists(real_dst): - raise Error("Destination path '%s' already exists" % real_dst) - try: - os.rename(src, real_dst) - except OSError: - if os.path.isdir(src): - if _destinsrc(src, dst): - raise Error("Cannot move a directory '%s' into itself '%s'." % (src, dst)) - copytree(src, real_dst, symlinks=True) - rmtree(src) - else: - copy2(src, real_dst) - os.unlink(src) - -def _destinsrc(src, dst): - src = abspath(src) - dst = abspath(dst) - if not src.endswith(os.path.sep): - src += os.path.sep - if not dst.endswith(os.path.sep): - dst += os.path.sep - return dst.startswith(src) - -def _get_gid(name): - """Returns a gid, given a group name.""" - if getgrnam is None or name is None: - return None - try: - result = getgrnam(name) - except KeyError: - result = None - if result is not None: - return result[2] - return None - -def _get_uid(name): - """Returns an uid, given a user name.""" - if getpwnam is None or name is None: - return None - try: - result = getpwnam(name) - except KeyError: - result = None - if result is not None: - return result[2] - return None - -def _make_tarball(base_name, base_dir, compress="gzip", verbose=0, dry_run=0, - owner=None, group=None, logger=None): - """Create a (possibly compressed) tar file from all the files under - 'base_dir'. - - 'compress' must be "gzip" (the default), "bzip2", or None. - - 'owner' and 'group' can be used to define an owner and a group for the - archive that is being built. If not provided, the current owner and group - will be used. - - The output tar file will be named 'base_name' + ".tar", possibly plus - the appropriate compression extension (".gz", or ".bz2"). - - Returns the output filename. - """ - tar_compression = {'gzip': 'gz', None: ''} - compress_ext = {'gzip': '.gz'} - - if _BZ2_SUPPORTED: - tar_compression['bzip2'] = 'bz2' - compress_ext['bzip2'] = '.bz2' - - # flags for compression program, each element of list will be an argument - if compress is not None and compress not in compress_ext: - raise ValueError("bad value for 'compress', or compression format not " - "supported : {0}".format(compress)) - - archive_name = base_name + '.tar' + compress_ext.get(compress, '') - archive_dir = os.path.dirname(archive_name) - - if not os.path.exists(archive_dir): - if logger is not None: - logger.info("creating %s", archive_dir) - if not dry_run: - os.makedirs(archive_dir) - - # creating the tarball - if logger is not None: - logger.info('Creating tar archive') - - uid = _get_uid(owner) - gid = _get_gid(group) - - def _set_uid_gid(tarinfo): - if gid is not None: - tarinfo.gid = gid - tarinfo.gname = group - if uid is not None: - tarinfo.uid = uid - tarinfo.uname = owner - return tarinfo - - if not dry_run: - tar = tarfile.open(archive_name, 'w|%s' % tar_compression[compress]) - try: - tar.add(base_dir, filter=_set_uid_gid) - finally: - tar.close() - - return archive_name - -def _call_external_zip(base_dir, zip_filename, verbose=False, dry_run=False): - # XXX see if we want to keep an external call here - if verbose: - zipoptions = "-r" - else: - zipoptions = "-rq" - from distutils.errors import DistutilsExecError - from distutils.spawn import spawn - try: - spawn(["zip", zipoptions, zip_filename, base_dir], dry_run=dry_run) - except DistutilsExecError: - # XXX really should distinguish between "couldn't find - # external 'zip' command" and "zip failed". - raise ExecError("unable to create zip file '%s': " - "could neither import the 'zipfile' module nor " - "find a standalone zip utility") % zip_filename - -def _make_zipfile(base_name, base_dir, verbose=0, dry_run=0, logger=None): - """Create a zip file from all the files under 'base_dir'. - - The output zip file will be named 'base_name' + ".zip". Uses either the - "zipfile" Python module (if available) or the InfoZIP "zip" utility - (if installed and found on the default search path). If neither tool is - available, raises ExecError. Returns the name of the output zip - file. - """ - zip_filename = base_name + ".zip" - archive_dir = os.path.dirname(base_name) - - if not os.path.exists(archive_dir): - if logger is not None: - logger.info("creating %s", archive_dir) - if not dry_run: - os.makedirs(archive_dir) - - # If zipfile module is not available, try spawning an external 'zip' - # command. - try: - import zipfile - except ImportError: - zipfile = None - - if zipfile is None: - _call_external_zip(base_dir, zip_filename, verbose, dry_run) - else: - if logger is not None: - logger.info("creating '%s' and adding '%s' to it", - zip_filename, base_dir) - - if not dry_run: - zip = zipfile.ZipFile(zip_filename, "w", - compression=zipfile.ZIP_DEFLATED) - - for dirpath, dirnames, filenames in os.walk(base_dir): - for name in filenames: - path = os.path.normpath(os.path.join(dirpath, name)) - if os.path.isfile(path): - zip.write(path, path) - if logger is not None: - logger.info("adding '%s'", path) - zip.close() - - return zip_filename - -_ARCHIVE_FORMATS = { - 'gztar': (_make_tarball, [('compress', 'gzip')], "gzip'ed tar-file"), - 'bztar': (_make_tarball, [('compress', 'bzip2')], "bzip2'ed tar-file"), - 'tar': (_make_tarball, [('compress', None)], "uncompressed tar file"), - 'zip': (_make_zipfile, [], "ZIP file"), - } - -if _BZ2_SUPPORTED: - _ARCHIVE_FORMATS['bztar'] = (_make_tarball, [('compress', 'bzip2')], - "bzip2'ed tar-file") - -def get_archive_formats(): - """Returns a list of supported formats for archiving and unarchiving. - - Each element of the returned sequence is a tuple (name, description) - """ - formats = [(name, registry[2]) for name, registry in - _ARCHIVE_FORMATS.items()] - formats.sort() - return formats - -def register_archive_format(name, function, extra_args=None, description=''): - """Registers an archive format. - - name is the name of the format. function is the callable that will be - used to create archives. If provided, extra_args is a sequence of - (name, value) tuples that will be passed as arguments to the callable. - description can be provided to describe the format, and will be returned - by the get_archive_formats() function. - """ - if extra_args is None: - extra_args = [] - if not isinstance(function, Callable): - raise TypeError('The %s object is not callable' % function) - if not isinstance(extra_args, (tuple, list)): - raise TypeError('extra_args needs to be a sequence') - for element in extra_args: - if not isinstance(element, (tuple, list)) or len(element) !=2: - raise TypeError('extra_args elements are : (arg_name, value)') - - _ARCHIVE_FORMATS[name] = (function, extra_args, description) - -def unregister_archive_format(name): - del _ARCHIVE_FORMATS[name] - -def make_archive(base_name, format, root_dir=None, base_dir=None, verbose=0, - dry_run=0, owner=None, group=None, logger=None): - """Create an archive file (eg. zip or tar). - - 'base_name' is the name of the file to create, minus any format-specific - extension; 'format' is the archive format: one of "zip", "tar", "bztar" - or "gztar". - - 'root_dir' is a directory that will be the root directory of the - archive; ie. we typically chdir into 'root_dir' before creating the - archive. 'base_dir' is the directory where we start archiving from; - ie. 'base_dir' will be the common prefix of all files and - directories in the archive. 'root_dir' and 'base_dir' both default - to the current directory. Returns the name of the archive file. - - 'owner' and 'group' are used when creating a tar archive. By default, - uses the current owner and group. - """ - save_cwd = os.getcwd() - if root_dir is not None: - if logger is not None: - logger.debug("changing into '%s'", root_dir) - base_name = os.path.abspath(base_name) - if not dry_run: - os.chdir(root_dir) - - if base_dir is None: - base_dir = os.curdir - - kwargs = {'dry_run': dry_run, 'logger': logger} - - try: - format_info = _ARCHIVE_FORMATS[format] - except KeyError: - raise ValueError("unknown archive format '%s'" % format) - - func = format_info[0] - for arg, val in format_info[1]: - kwargs[arg] = val - - if format != 'zip': - kwargs['owner'] = owner - kwargs['group'] = group - - try: - filename = func(base_name, base_dir, **kwargs) - finally: - if root_dir is not None: - if logger is not None: - logger.debug("changing back to '%s'", save_cwd) - os.chdir(save_cwd) - - return filename - - -def get_unpack_formats(): - """Returns a list of supported formats for unpacking. - - Each element of the returned sequence is a tuple - (name, extensions, description) - """ - formats = [(name, info[0], info[3]) for name, info in - _UNPACK_FORMATS.items()] - formats.sort() - return formats - -def _check_unpack_options(extensions, function, extra_args): - """Checks what gets registered as an unpacker.""" - # first make sure no other unpacker is registered for this extension - existing_extensions = {} - for name, info in _UNPACK_FORMATS.items(): - for ext in info[0]: - existing_extensions[ext] = name - - for extension in extensions: - if extension in existing_extensions: - msg = '%s is already registered for "%s"' - raise RegistryError(msg % (extension, - existing_extensions[extension])) - - if not isinstance(function, Callable): - raise TypeError('The registered function must be a callable') - - -def register_unpack_format(name, extensions, function, extra_args=None, - description=''): - """Registers an unpack format. - - `name` is the name of the format. `extensions` is a list of extensions - corresponding to the format. - - `function` is the callable that will be - used to unpack archives. The callable will receive archives to unpack. - If it's unable to handle an archive, it needs to raise a ReadError - exception. - - If provided, `extra_args` is a sequence of - (name, value) tuples that will be passed as arguments to the callable. - description can be provided to describe the format, and will be returned - by the get_unpack_formats() function. - """ - if extra_args is None: - extra_args = [] - _check_unpack_options(extensions, function, extra_args) - _UNPACK_FORMATS[name] = extensions, function, extra_args, description - -def unregister_unpack_format(name): - """Removes the pack format from the registry.""" - del _UNPACK_FORMATS[name] - -def _ensure_directory(path): - """Ensure that the parent directory of `path` exists""" - dirname = os.path.dirname(path) - if not os.path.isdir(dirname): - os.makedirs(dirname) - -def _unpack_zipfile(filename, extract_dir): - """Unpack zip `filename` to `extract_dir` - """ - try: - import zipfile - except ImportError: - raise ReadError('zlib not supported, cannot unpack this archive.') - - if not zipfile.is_zipfile(filename): - raise ReadError("%s is not a zip file" % filename) - - zip = zipfile.ZipFile(filename) - try: - for info in zip.infolist(): - name = info.filename - - # don't extract absolute paths or ones with .. in them - if name.startswith('/') or '..' in name: - continue - - target = os.path.join(extract_dir, *name.split('/')) - if not target: - continue - - _ensure_directory(target) - if not name.endswith('/'): - # file - data = zip.read(info.filename) - f = open(target, 'wb') - try: - f.write(data) - finally: - f.close() - del data - finally: - zip.close() - -def _unpack_tarfile(filename, extract_dir): - """Unpack tar/tar.gz/tar.bz2 `filename` to `extract_dir` - """ - try: - tarobj = tarfile.open(filename) - except tarfile.TarError: - raise ReadError( - "%s is not a compressed or uncompressed tar file" % filename) - try: - tarobj.extractall(extract_dir) - finally: - tarobj.close() - -_UNPACK_FORMATS = { - 'gztar': (['.tar.gz', '.tgz'], _unpack_tarfile, [], "gzip'ed tar-file"), - 'tar': (['.tar'], _unpack_tarfile, [], "uncompressed tar file"), - 'zip': (['.zip'], _unpack_zipfile, [], "ZIP file") - } - -if _BZ2_SUPPORTED: - _UNPACK_FORMATS['bztar'] = (['.bz2'], _unpack_tarfile, [], - "bzip2'ed tar-file") - -def _find_unpack_format(filename): - for name, info in _UNPACK_FORMATS.items(): - for extension in info[0]: - if filename.endswith(extension): - return name - return None - -def unpack_archive(filename, extract_dir=None, format=None): - """Unpack an archive. - - `filename` is the name of the archive. - - `extract_dir` is the name of the target directory, where the archive - is unpacked. If not provided, the current working directory is used. - - `format` is the archive format: one of "zip", "tar", or "gztar". Or any - other registered format. If not provided, unpack_archive will use the - filename extension and see if an unpacker was registered for that - extension. - - In case none is found, a ValueError is raised. - """ - if extract_dir is None: - extract_dir = os.getcwd() - - if format is not None: - try: - format_info = _UNPACK_FORMATS[format] - except KeyError: - raise ValueError("Unknown unpack format '{0}'".format(format)) - - func = format_info[1] - func(filename, extract_dir, **dict(format_info[2])) - else: - # we need to look at the registered unpackers supported extensions - format = _find_unpack_format(filename) - if format is None: - raise ReadError("Unknown archive format '{0}'".format(filename)) - - func = _UNPACK_FORMATS[format][1] - kwargs = dict(_UNPACK_FORMATS[format][2]) - func(filename, extract_dir, **kwargs) diff --git a/pipenv/patched/notpip/_vendor/distlib/_backport/sysconfig.cfg b/pipenv/patched/notpip/_vendor/distlib/_backport/sysconfig.cfg deleted file mode 100644 index 1746bd01c1..0000000000 --- a/pipenv/patched/notpip/_vendor/distlib/_backport/sysconfig.cfg +++ /dev/null @@ -1,84 +0,0 @@ -[posix_prefix] -# Configuration directories. Some of these come straight out of the -# configure script. They are for implementing the other variables, not to -# be used directly in [resource_locations]. -confdir = /etc -datadir = /usr/share -libdir = /usr/lib -statedir = /var -# User resource directory -local = ~/.local/{distribution.name} - -stdlib = {base}/lib/python{py_version_short} -platstdlib = {platbase}/lib/python{py_version_short} -purelib = {base}/lib/python{py_version_short}/site-packages -platlib = {platbase}/lib/python{py_version_short}/site-packages -include = {base}/include/python{py_version_short}{abiflags} -platinclude = {platbase}/include/python{py_version_short}{abiflags} -data = {base} - -[posix_home] -stdlib = {base}/lib/python -platstdlib = {base}/lib/python -purelib = {base}/lib/python -platlib = {base}/lib/python -include = {base}/include/python -platinclude = {base}/include/python -scripts = {base}/bin -data = {base} - -[nt] -stdlib = {base}/Lib -platstdlib = {base}/Lib -purelib = {base}/Lib/site-packages -platlib = {base}/Lib/site-packages -include = {base}/Include -platinclude = {base}/Include -scripts = {base}/Scripts -data = {base} - -[os2] -stdlib = {base}/Lib -platstdlib = {base}/Lib -purelib = {base}/Lib/site-packages -platlib = {base}/Lib/site-packages -include = {base}/Include -platinclude = {base}/Include -scripts = {base}/Scripts -data = {base} - -[os2_home] -stdlib = {userbase}/lib/python{py_version_short} -platstdlib = {userbase}/lib/python{py_version_short} -purelib = {userbase}/lib/python{py_version_short}/site-packages -platlib = {userbase}/lib/python{py_version_short}/site-packages -include = {userbase}/include/python{py_version_short} -scripts = {userbase}/bin -data = {userbase} - -[nt_user] -stdlib = {userbase}/Python{py_version_nodot} -platstdlib = {userbase}/Python{py_version_nodot} -purelib = {userbase}/Python{py_version_nodot}/site-packages -platlib = {userbase}/Python{py_version_nodot}/site-packages -include = {userbase}/Python{py_version_nodot}/Include -scripts = {userbase}/Scripts -data = {userbase} - -[posix_user] -stdlib = {userbase}/lib/python{py_version_short} -platstdlib = {userbase}/lib/python{py_version_short} -purelib = {userbase}/lib/python{py_version_short}/site-packages -platlib = {userbase}/lib/python{py_version_short}/site-packages -include = {userbase}/include/python{py_version_short} -scripts = {userbase}/bin -data = {userbase} - -[osx_framework_user] -stdlib = {userbase}/lib/python -platstdlib = {userbase}/lib/python -purelib = {userbase}/lib/python/site-packages -platlib = {userbase}/lib/python/site-packages -include = {userbase}/include -scripts = {userbase}/bin -data = {userbase} diff --git a/pipenv/patched/notpip/_vendor/distlib/_backport/sysconfig.py b/pipenv/patched/notpip/_vendor/distlib/_backport/sysconfig.py deleted file mode 100644 index b470a373c8..0000000000 --- a/pipenv/patched/notpip/_vendor/distlib/_backport/sysconfig.py +++ /dev/null @@ -1,786 +0,0 @@ -# -*- coding: utf-8 -*- -# -# Copyright (C) 2012 The Python Software Foundation. -# See LICENSE.txt and CONTRIBUTORS.txt. -# -"""Access to Python's configuration information.""" - -import codecs -import os -import re -import sys -from os.path import pardir, realpath -try: - import configparser -except ImportError: - import ConfigParser as configparser - - -__all__ = [ - 'get_config_h_filename', - 'get_config_var', - 'get_config_vars', - 'get_makefile_filename', - 'get_path', - 'get_path_names', - 'get_paths', - 'get_platform', - 'get_python_version', - 'get_scheme_names', - 'parse_config_h', -] - - -def _safe_realpath(path): - try: - return realpath(path) - except OSError: - return path - - -if sys.executable: - _PROJECT_BASE = os.path.dirname(_safe_realpath(sys.executable)) -else: - # sys.executable can be empty if argv[0] has been changed and Python is - # unable to retrieve the real program name - _PROJECT_BASE = _safe_realpath(os.getcwd()) - -if os.name == "nt" and "pcbuild" in _PROJECT_BASE[-8:].lower(): - _PROJECT_BASE = _safe_realpath(os.path.join(_PROJECT_BASE, pardir)) -# PC/VS7.1 -if os.name == "nt" and "\\pc\\v" in _PROJECT_BASE[-10:].lower(): - _PROJECT_BASE = _safe_realpath(os.path.join(_PROJECT_BASE, pardir, pardir)) -# PC/AMD64 -if os.name == "nt" and "\\pcbuild\\amd64" in _PROJECT_BASE[-14:].lower(): - _PROJECT_BASE = _safe_realpath(os.path.join(_PROJECT_BASE, pardir, pardir)) - - -def is_python_build(): - for fn in ("Setup.dist", "Setup.local"): - if os.path.isfile(os.path.join(_PROJECT_BASE, "Modules", fn)): - return True - return False - -_PYTHON_BUILD = is_python_build() - -_cfg_read = False - -def _ensure_cfg_read(): - global _cfg_read - if not _cfg_read: - from ..resources import finder - backport_package = __name__.rsplit('.', 1)[0] - _finder = finder(backport_package) - _cfgfile = _finder.find('sysconfig.cfg') - assert _cfgfile, 'sysconfig.cfg exists' - with _cfgfile.as_stream() as s: - _SCHEMES.readfp(s) - if _PYTHON_BUILD: - for scheme in ('posix_prefix', 'posix_home'): - _SCHEMES.set(scheme, 'include', '{srcdir}/Include') - _SCHEMES.set(scheme, 'platinclude', '{projectbase}/.') - - _cfg_read = True - - -_SCHEMES = configparser.RawConfigParser() -_VAR_REPL = re.compile(r'\{([^{]*?)\}') - -def _expand_globals(config): - _ensure_cfg_read() - if config.has_section('globals'): - globals = config.items('globals') - else: - globals = tuple() - - sections = config.sections() - for section in sections: - if section == 'globals': - continue - for option, value in globals: - if config.has_option(section, option): - continue - config.set(section, option, value) - config.remove_section('globals') - - # now expanding local variables defined in the cfg file - # - for section in config.sections(): - variables = dict(config.items(section)) - - def _replacer(matchobj): - name = matchobj.group(1) - if name in variables: - return variables[name] - return matchobj.group(0) - - for option, value in config.items(section): - config.set(section, option, _VAR_REPL.sub(_replacer, value)) - -#_expand_globals(_SCHEMES) - -_PY_VERSION = '%s.%s.%s' % sys.version_info[:3] -_PY_VERSION_SHORT = '%s.%s' % sys.version_info[:2] -_PY_VERSION_SHORT_NO_DOT = '%s%s' % sys.version_info[:2] -_PREFIX = os.path.normpath(sys.prefix) -_EXEC_PREFIX = os.path.normpath(sys.exec_prefix) -_CONFIG_VARS = None -_USER_BASE = None - - -def _subst_vars(path, local_vars): - """In the string `path`, replace tokens like {some.thing} with the - corresponding value from the map `local_vars`. - - If there is no corresponding value, leave the token unchanged. - """ - def _replacer(matchobj): - name = matchobj.group(1) - if name in local_vars: - return local_vars[name] - elif name in os.environ: - return os.environ[name] - return matchobj.group(0) - return _VAR_REPL.sub(_replacer, path) - - -def _extend_dict(target_dict, other_dict): - target_keys = target_dict.keys() - for key, value in other_dict.items(): - if key in target_keys: - continue - target_dict[key] = value - - -def _expand_vars(scheme, vars): - res = {} - if vars is None: - vars = {} - _extend_dict(vars, get_config_vars()) - - for key, value in _SCHEMES.items(scheme): - if os.name in ('posix', 'nt'): - value = os.path.expanduser(value) - res[key] = os.path.normpath(_subst_vars(value, vars)) - return res - - -def format_value(value, vars): - def _replacer(matchobj): - name = matchobj.group(1) - if name in vars: - return vars[name] - return matchobj.group(0) - return _VAR_REPL.sub(_replacer, value) - - -def _get_default_scheme(): - if os.name == 'posix': - # the default scheme for posix is posix_prefix - return 'posix_prefix' - return os.name - - -def _getuserbase(): - env_base = os.environ.get("PYTHONUSERBASE", None) - - def joinuser(*args): - return os.path.expanduser(os.path.join(*args)) - - # what about 'os2emx', 'riscos' ? - if os.name == "nt": - base = os.environ.get("APPDATA") or "~" - if env_base: - return env_base - else: - return joinuser(base, "Python") - - if sys.platform == "darwin": - framework = get_config_var("PYTHONFRAMEWORK") - if framework: - if env_base: - return env_base - else: - return joinuser("~", "Library", framework, "%d.%d" % - sys.version_info[:2]) - - if env_base: - return env_base - else: - return joinuser("~", ".local") - - -def _parse_makefile(filename, vars=None): - """Parse a Makefile-style file. - - A dictionary containing name/value pairs is returned. If an - optional dictionary is passed in as the second argument, it is - used instead of a new dictionary. - """ - # Regexes needed for parsing Makefile (and similar syntaxes, - # like old-style Setup files). - _variable_rx = re.compile(r"([a-zA-Z][a-zA-Z0-9_]+)\s*=\s*(.*)") - _findvar1_rx = re.compile(r"\$\(([A-Za-z][A-Za-z0-9_]*)\)") - _findvar2_rx = re.compile(r"\${([A-Za-z][A-Za-z0-9_]*)}") - - if vars is None: - vars = {} - done = {} - notdone = {} - - with codecs.open(filename, encoding='utf-8', errors="surrogateescape") as f: - lines = f.readlines() - - for line in lines: - if line.startswith('#') or line.strip() == '': - continue - m = _variable_rx.match(line) - if m: - n, v = m.group(1, 2) - v = v.strip() - # `$$' is a literal `$' in make - tmpv = v.replace('$$', '') - - if "$" in tmpv: - notdone[n] = v - else: - try: - v = int(v) - except ValueError: - # insert literal `$' - done[n] = v.replace('$$', '$') - else: - done[n] = v - - # do variable interpolation here - variables = list(notdone.keys()) - - # Variables with a 'PY_' prefix in the makefile. These need to - # be made available without that prefix through sysconfig. - # Special care is needed to ensure that variable expansion works, even - # if the expansion uses the name without a prefix. - renamed_variables = ('CFLAGS', 'LDFLAGS', 'CPPFLAGS') - - while len(variables) > 0: - for name in tuple(variables): - value = notdone[name] - m = _findvar1_rx.search(value) or _findvar2_rx.search(value) - if m is not None: - n = m.group(1) - found = True - if n in done: - item = str(done[n]) - elif n in notdone: - # get it on a subsequent round - found = False - elif n in os.environ: - # do it like make: fall back to environment - item = os.environ[n] - - elif n in renamed_variables: - if (name.startswith('PY_') and - name[3:] in renamed_variables): - item = "" - - elif 'PY_' + n in notdone: - found = False - - else: - item = str(done['PY_' + n]) - - else: - done[n] = item = "" - - if found: - after = value[m.end():] - value = value[:m.start()] + item + after - if "$" in after: - notdone[name] = value - else: - try: - value = int(value) - except ValueError: - done[name] = value.strip() - else: - done[name] = value - variables.remove(name) - - if (name.startswith('PY_') and - name[3:] in renamed_variables): - - name = name[3:] - if name not in done: - done[name] = value - - else: - # bogus variable reference (e.g. "prefix=$/opt/python"); - # just drop it since we can't deal - done[name] = value - variables.remove(name) - - # strip spurious spaces - for k, v in done.items(): - if isinstance(v, str): - done[k] = v.strip() - - # save the results in the global dictionary - vars.update(done) - return vars - - -def get_makefile_filename(): - """Return the path of the Makefile.""" - if _PYTHON_BUILD: - return os.path.join(_PROJECT_BASE, "Makefile") - if hasattr(sys, 'abiflags'): - config_dir_name = 'config-%s%s' % (_PY_VERSION_SHORT, sys.abiflags) - else: - config_dir_name = 'config' - return os.path.join(get_path('stdlib'), config_dir_name, 'Makefile') - - -def _init_posix(vars): - """Initialize the module as appropriate for POSIX systems.""" - # load the installed Makefile: - makefile = get_makefile_filename() - try: - _parse_makefile(makefile, vars) - except IOError as e: - msg = "invalid Python installation: unable to open %s" % makefile - if hasattr(e, "strerror"): - msg = msg + " (%s)" % e.strerror - raise IOError(msg) - # load the installed pyconfig.h: - config_h = get_config_h_filename() - try: - with open(config_h) as f: - parse_config_h(f, vars) - except IOError as e: - msg = "invalid Python installation: unable to open %s" % config_h - if hasattr(e, "strerror"): - msg = msg + " (%s)" % e.strerror - raise IOError(msg) - # On AIX, there are wrong paths to the linker scripts in the Makefile - # -- these paths are relative to the Python source, but when installed - # the scripts are in another directory. - if _PYTHON_BUILD: - vars['LDSHARED'] = vars['BLDSHARED'] - - -def _init_non_posix(vars): - """Initialize the module as appropriate for NT""" - # set basic install directories - vars['LIBDEST'] = get_path('stdlib') - vars['BINLIBDEST'] = get_path('platstdlib') - vars['INCLUDEPY'] = get_path('include') - vars['SO'] = '.pyd' - vars['EXE'] = '.exe' - vars['VERSION'] = _PY_VERSION_SHORT_NO_DOT - vars['BINDIR'] = os.path.dirname(_safe_realpath(sys.executable)) - -# -# public APIs -# - - -def parse_config_h(fp, vars=None): - """Parse a config.h-style file. - - A dictionary containing name/value pairs is returned. If an - optional dictionary is passed in as the second argument, it is - used instead of a new dictionary. - """ - if vars is None: - vars = {} - define_rx = re.compile("#define ([A-Z][A-Za-z0-9_]+) (.*)\n") - undef_rx = re.compile("/[*] #undef ([A-Z][A-Za-z0-9_]+) [*]/\n") - - while True: - line = fp.readline() - if not line: - break - m = define_rx.match(line) - if m: - n, v = m.group(1, 2) - try: - v = int(v) - except ValueError: - pass - vars[n] = v - else: - m = undef_rx.match(line) - if m: - vars[m.group(1)] = 0 - return vars - - -def get_config_h_filename(): - """Return the path of pyconfig.h.""" - if _PYTHON_BUILD: - if os.name == "nt": - inc_dir = os.path.join(_PROJECT_BASE, "PC") - else: - inc_dir = _PROJECT_BASE - else: - inc_dir = get_path('platinclude') - return os.path.join(inc_dir, 'pyconfig.h') - - -def get_scheme_names(): - """Return a tuple containing the schemes names.""" - return tuple(sorted(_SCHEMES.sections())) - - -def get_path_names(): - """Return a tuple containing the paths names.""" - # xxx see if we want a static list - return _SCHEMES.options('posix_prefix') - - -def get_paths(scheme=_get_default_scheme(), vars=None, expand=True): - """Return a mapping containing an install scheme. - - ``scheme`` is the install scheme name. If not provided, it will - return the default scheme for the current platform. - """ - _ensure_cfg_read() - if expand: - return _expand_vars(scheme, vars) - else: - return dict(_SCHEMES.items(scheme)) - - -def get_path(name, scheme=_get_default_scheme(), vars=None, expand=True): - """Return a path corresponding to the scheme. - - ``scheme`` is the install scheme name. - """ - return get_paths(scheme, vars, expand)[name] - - -def get_config_vars(*args): - """With no arguments, return a dictionary of all configuration - variables relevant for the current platform. - - On Unix, this means every variable defined in Python's installed Makefile; - On Windows and Mac OS it's a much smaller set. - - With arguments, return a list of values that result from looking up - each argument in the configuration variable dictionary. - """ - global _CONFIG_VARS - if _CONFIG_VARS is None: - _CONFIG_VARS = {} - # Normalized versions of prefix and exec_prefix are handy to have; - # in fact, these are the standard versions used most places in the - # distutils2 module. - _CONFIG_VARS['prefix'] = _PREFIX - _CONFIG_VARS['exec_prefix'] = _EXEC_PREFIX - _CONFIG_VARS['py_version'] = _PY_VERSION - _CONFIG_VARS['py_version_short'] = _PY_VERSION_SHORT - _CONFIG_VARS['py_version_nodot'] = _PY_VERSION[0] + _PY_VERSION[2] - _CONFIG_VARS['base'] = _PREFIX - _CONFIG_VARS['platbase'] = _EXEC_PREFIX - _CONFIG_VARS['projectbase'] = _PROJECT_BASE - try: - _CONFIG_VARS['abiflags'] = sys.abiflags - except AttributeError: - # sys.abiflags may not be defined on all platforms. - _CONFIG_VARS['abiflags'] = '' - - if os.name in ('nt', 'os2'): - _init_non_posix(_CONFIG_VARS) - if os.name == 'posix': - _init_posix(_CONFIG_VARS) - # Setting 'userbase' is done below the call to the - # init function to enable using 'get_config_var' in - # the init-function. - if sys.version >= '2.6': - _CONFIG_VARS['userbase'] = _getuserbase() - - if 'srcdir' not in _CONFIG_VARS: - _CONFIG_VARS['srcdir'] = _PROJECT_BASE - else: - _CONFIG_VARS['srcdir'] = _safe_realpath(_CONFIG_VARS['srcdir']) - - # Convert srcdir into an absolute path if it appears necessary. - # Normally it is relative to the build directory. However, during - # testing, for example, we might be running a non-installed python - # from a different directory. - if _PYTHON_BUILD and os.name == "posix": - base = _PROJECT_BASE - try: - cwd = os.getcwd() - except OSError: - cwd = None - if (not os.path.isabs(_CONFIG_VARS['srcdir']) and - base != cwd): - # srcdir is relative and we are not in the same directory - # as the executable. Assume executable is in the build - # directory and make srcdir absolute. - srcdir = os.path.join(base, _CONFIG_VARS['srcdir']) - _CONFIG_VARS['srcdir'] = os.path.normpath(srcdir) - - if sys.platform == 'darwin': - kernel_version = os.uname()[2] # Kernel version (8.4.3) - major_version = int(kernel_version.split('.')[0]) - - if major_version < 8: - # On Mac OS X before 10.4, check if -arch and -isysroot - # are in CFLAGS or LDFLAGS and remove them if they are. - # This is needed when building extensions on a 10.3 system - # using a universal build of python. - for key in ('LDFLAGS', 'BASECFLAGS', - # a number of derived variables. These need to be - # patched up as well. - 'CFLAGS', 'PY_CFLAGS', 'BLDSHARED'): - flags = _CONFIG_VARS[key] - flags = re.sub(r'-arch\s+\w+\s', ' ', flags) - flags = re.sub('-isysroot [^ \t]*', ' ', flags) - _CONFIG_VARS[key] = flags - else: - # Allow the user to override the architecture flags using - # an environment variable. - # NOTE: This name was introduced by Apple in OSX 10.5 and - # is used by several scripting languages distributed with - # that OS release. - if 'ARCHFLAGS' in os.environ: - arch = os.environ['ARCHFLAGS'] - for key in ('LDFLAGS', 'BASECFLAGS', - # a number of derived variables. These need to be - # patched up as well. - 'CFLAGS', 'PY_CFLAGS', 'BLDSHARED'): - - flags = _CONFIG_VARS[key] - flags = re.sub(r'-arch\s+\w+\s', ' ', flags) - flags = flags + ' ' + arch - _CONFIG_VARS[key] = flags - - # If we're on OSX 10.5 or later and the user tries to - # compiles an extension using an SDK that is not present - # on the current machine it is better to not use an SDK - # than to fail. - # - # The major usecase for this is users using a Python.org - # binary installer on OSX 10.6: that installer uses - # the 10.4u SDK, but that SDK is not installed by default - # when you install Xcode. - # - CFLAGS = _CONFIG_VARS.get('CFLAGS', '') - m = re.search(r'-isysroot\s+(\S+)', CFLAGS) - if m is not None: - sdk = m.group(1) - if not os.path.exists(sdk): - for key in ('LDFLAGS', 'BASECFLAGS', - # a number of derived variables. These need to be - # patched up as well. - 'CFLAGS', 'PY_CFLAGS', 'BLDSHARED'): - - flags = _CONFIG_VARS[key] - flags = re.sub(r'-isysroot\s+\S+(\s|$)', ' ', flags) - _CONFIG_VARS[key] = flags - - if args: - vals = [] - for name in args: - vals.append(_CONFIG_VARS.get(name)) - return vals - else: - return _CONFIG_VARS - - -def get_config_var(name): - """Return the value of a single variable using the dictionary returned by - 'get_config_vars()'. - - Equivalent to get_config_vars().get(name) - """ - return get_config_vars().get(name) - - -def get_platform(): - """Return a string that identifies the current platform. - - This is used mainly to distinguish platform-specific build directories and - platform-specific built distributions. Typically includes the OS name - and version and the architecture (as supplied by 'os.uname()'), - although the exact information included depends on the OS; eg. for IRIX - the architecture isn't particularly important (IRIX only runs on SGI - hardware), but for Linux the kernel version isn't particularly - important. - - Examples of returned values: - linux-i586 - linux-alpha (?) - solaris-2.6-sun4u - irix-5.3 - irix64-6.2 - - Windows will return one of: - win-amd64 (64bit Windows on AMD64 (aka x86_64, Intel64, EM64T, etc) - win-ia64 (64bit Windows on Itanium) - win32 (all others - specifically, sys.platform is returned) - - For other non-POSIX platforms, currently just returns 'sys.platform'. - """ - if os.name == 'nt': - # sniff sys.version for architecture. - prefix = " bit (" - i = sys.version.find(prefix) - if i == -1: - return sys.platform - j = sys.version.find(")", i) - look = sys.version[i+len(prefix):j].lower() - if look == 'amd64': - return 'win-amd64' - if look == 'itanium': - return 'win-ia64' - return sys.platform - - if os.name != "posix" or not hasattr(os, 'uname'): - # XXX what about the architecture? NT is Intel or Alpha, - # Mac OS is M68k or PPC, etc. - return sys.platform - - # Try to distinguish various flavours of Unix - osname, host, release, version, machine = os.uname() - - # Convert the OS name to lowercase, remove '/' characters - # (to accommodate BSD/OS), and translate spaces (for "Power Macintosh") - osname = osname.lower().replace('/', '') - machine = machine.replace(' ', '_') - machine = machine.replace('/', '-') - - if osname[:5] == "linux": - # At least on Linux/Intel, 'machine' is the processor -- - # i386, etc. - # XXX what about Alpha, SPARC, etc? - return "%s-%s" % (osname, machine) - elif osname[:5] == "sunos": - if release[0] >= "5": # SunOS 5 == Solaris 2 - osname = "solaris" - release = "%d.%s" % (int(release[0]) - 3, release[2:]) - # fall through to standard osname-release-machine representation - elif osname[:4] == "irix": # could be "irix64"! - return "%s-%s" % (osname, release) - elif osname[:3] == "aix": - return "%s-%s.%s" % (osname, version, release) - elif osname[:6] == "cygwin": - osname = "cygwin" - rel_re = re.compile(r'[\d.]+') - m = rel_re.match(release) - if m: - release = m.group() - elif osname[:6] == "darwin": - # - # For our purposes, we'll assume that the system version from - # distutils' perspective is what MACOSX_DEPLOYMENT_TARGET is set - # to. This makes the compatibility story a bit more sane because the - # machine is going to compile and link as if it were - # MACOSX_DEPLOYMENT_TARGET. - cfgvars = get_config_vars() - macver = cfgvars.get('MACOSX_DEPLOYMENT_TARGET') - - if True: - # Always calculate the release of the running machine, - # needed to determine if we can build fat binaries or not. - - macrelease = macver - # Get the system version. Reading this plist is a documented - # way to get the system version (see the documentation for - # the Gestalt Manager) - try: - f = open('/System/Library/CoreServices/SystemVersion.plist') - except IOError: - # We're on a plain darwin box, fall back to the default - # behaviour. - pass - else: - try: - m = re.search(r'ProductUserVisibleVersion\s*' - r'(.*?)', f.read()) - finally: - f.close() - if m is not None: - macrelease = '.'.join(m.group(1).split('.')[:2]) - # else: fall back to the default behaviour - - if not macver: - macver = macrelease - - if macver: - release = macver - osname = "macosx" - - if ((macrelease + '.') >= '10.4.' and - '-arch' in get_config_vars().get('CFLAGS', '').strip()): - # The universal build will build fat binaries, but not on - # systems before 10.4 - # - # Try to detect 4-way universal builds, those have machine-type - # 'universal' instead of 'fat'. - - machine = 'fat' - cflags = get_config_vars().get('CFLAGS') - - archs = re.findall(r'-arch\s+(\S+)', cflags) - archs = tuple(sorted(set(archs))) - - if len(archs) == 1: - machine = archs[0] - elif archs == ('i386', 'ppc'): - machine = 'fat' - elif archs == ('i386', 'x86_64'): - machine = 'intel' - elif archs == ('i386', 'ppc', 'x86_64'): - machine = 'fat3' - elif archs == ('ppc64', 'x86_64'): - machine = 'fat64' - elif archs == ('i386', 'ppc', 'ppc64', 'x86_64'): - machine = 'universal' - else: - raise ValueError( - "Don't know machine value for archs=%r" % (archs,)) - - elif machine == 'i386': - # On OSX the machine type returned by uname is always the - # 32-bit variant, even if the executable architecture is - # the 64-bit variant - if sys.maxsize >= 2**32: - machine = 'x86_64' - - elif machine in ('PowerPC', 'Power_Macintosh'): - # Pick a sane name for the PPC architecture. - # See 'i386' case - if sys.maxsize >= 2**32: - machine = 'ppc64' - else: - machine = 'ppc' - - return "%s-%s-%s" % (osname, release, machine) - - -def get_python_version(): - return _PY_VERSION_SHORT - - -def _print_dict(title, data): - for index, (key, value) in enumerate(sorted(data.items())): - if index == 0: - print('%s: ' % (title)) - print('\t%s = "%s"' % (key, value)) - - -def _main(): - """Display all information sysconfig detains.""" - print('Platform: "%s"' % get_platform()) - print('Python version: "%s"' % get_python_version()) - print('Current installation scheme: "%s"' % _get_default_scheme()) - print() - _print_dict('Paths', get_paths()) - print() - _print_dict('Variables', get_config_vars()) - - -if __name__ == '__main__': - _main() diff --git a/pipenv/patched/notpip/_vendor/distlib/_backport/tarfile.py b/pipenv/patched/notpip/_vendor/distlib/_backport/tarfile.py deleted file mode 100644 index d66d856637..0000000000 --- a/pipenv/patched/notpip/_vendor/distlib/_backport/tarfile.py +++ /dev/null @@ -1,2607 +0,0 @@ -#------------------------------------------------------------------- -# tarfile.py -#------------------------------------------------------------------- -# Copyright (C) 2002 Lars Gustaebel -# All rights reserved. -# -# Permission is hereby granted, free of charge, to any person -# obtaining a copy of this software and associated documentation -# files (the "Software"), to deal in the Software without -# restriction, including without limitation the rights to use, -# copy, modify, merge, publish, distribute, sublicense, and/or sell -# copies of the Software, and to permit persons to whom the -# Software is furnished to do so, subject to the following -# conditions: -# -# The above copyright notice and this permission notice shall be -# included in all copies or substantial portions of the Software. -# -# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, -# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES -# OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND -# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT -# HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, -# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING -# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR -# OTHER DEALINGS IN THE SOFTWARE. -# -from __future__ import print_function - -"""Read from and write to tar format archives. -""" - -__version__ = "$Revision$" - -version = "0.9.0" -__author__ = "Lars Gust\u00e4bel (lars@gustaebel.de)" -__date__ = "$Date: 2011-02-25 17:42:01 +0200 (Fri, 25 Feb 2011) $" -__cvsid__ = "$Id: tarfile.py 88586 2011-02-25 15:42:01Z marc-andre.lemburg $" -__credits__ = "Gustavo Niemeyer, Niels Gust\u00e4bel, Richard Townsend." - -#--------- -# Imports -#--------- -import sys -import os -import stat -import errno -import time -import struct -import copy -import re - -try: - import grp, pwd -except ImportError: - grp = pwd = None - -# os.symlink on Windows prior to 6.0 raises NotImplementedError -symlink_exception = (AttributeError, NotImplementedError) -try: - # WindowsError (1314) will be raised if the caller does not hold the - # SeCreateSymbolicLinkPrivilege privilege - symlink_exception += (WindowsError,) -except NameError: - pass - -# from tarfile import * -__all__ = ["TarFile", "TarInfo", "is_tarfile", "TarError"] - -if sys.version_info[0] < 3: - import __builtin__ as builtins -else: - import builtins - -_open = builtins.open # Since 'open' is TarFile.open - -#--------------------------------------------------------- -# tar constants -#--------------------------------------------------------- -NUL = b"\0" # the null character -BLOCKSIZE = 512 # length of processing blocks -RECORDSIZE = BLOCKSIZE * 20 # length of records -GNU_MAGIC = b"ustar \0" # magic gnu tar string -POSIX_MAGIC = b"ustar\x0000" # magic posix tar string - -LENGTH_NAME = 100 # maximum length of a filename -LENGTH_LINK = 100 # maximum length of a linkname -LENGTH_PREFIX = 155 # maximum length of the prefix field - -REGTYPE = b"0" # regular file -AREGTYPE = b"\0" # regular file -LNKTYPE = b"1" # link (inside tarfile) -SYMTYPE = b"2" # symbolic link -CHRTYPE = b"3" # character special device -BLKTYPE = b"4" # block special device -DIRTYPE = b"5" # directory -FIFOTYPE = b"6" # fifo special device -CONTTYPE = b"7" # contiguous file - -GNUTYPE_LONGNAME = b"L" # GNU tar longname -GNUTYPE_LONGLINK = b"K" # GNU tar longlink -GNUTYPE_SPARSE = b"S" # GNU tar sparse file - -XHDTYPE = b"x" # POSIX.1-2001 extended header -XGLTYPE = b"g" # POSIX.1-2001 global header -SOLARIS_XHDTYPE = b"X" # Solaris extended header - -USTAR_FORMAT = 0 # POSIX.1-1988 (ustar) format -GNU_FORMAT = 1 # GNU tar format -PAX_FORMAT = 2 # POSIX.1-2001 (pax) format -DEFAULT_FORMAT = GNU_FORMAT - -#--------------------------------------------------------- -# tarfile constants -#--------------------------------------------------------- -# File types that tarfile supports: -SUPPORTED_TYPES = (REGTYPE, AREGTYPE, LNKTYPE, - SYMTYPE, DIRTYPE, FIFOTYPE, - CONTTYPE, CHRTYPE, BLKTYPE, - GNUTYPE_LONGNAME, GNUTYPE_LONGLINK, - GNUTYPE_SPARSE) - -# File types that will be treated as a regular file. -REGULAR_TYPES = (REGTYPE, AREGTYPE, - CONTTYPE, GNUTYPE_SPARSE) - -# File types that are part of the GNU tar format. -GNU_TYPES = (GNUTYPE_LONGNAME, GNUTYPE_LONGLINK, - GNUTYPE_SPARSE) - -# Fields from a pax header that override a TarInfo attribute. -PAX_FIELDS = ("path", "linkpath", "size", "mtime", - "uid", "gid", "uname", "gname") - -# Fields from a pax header that are affected by hdrcharset. -PAX_NAME_FIELDS = set(("path", "linkpath", "uname", "gname")) - -# Fields in a pax header that are numbers, all other fields -# are treated as strings. -PAX_NUMBER_FIELDS = { - "atime": float, - "ctime": float, - "mtime": float, - "uid": int, - "gid": int, - "size": int -} - -#--------------------------------------------------------- -# Bits used in the mode field, values in octal. -#--------------------------------------------------------- -S_IFLNK = 0o120000 # symbolic link -S_IFREG = 0o100000 # regular file -S_IFBLK = 0o060000 # block device -S_IFDIR = 0o040000 # directory -S_IFCHR = 0o020000 # character device -S_IFIFO = 0o010000 # fifo - -TSUID = 0o4000 # set UID on execution -TSGID = 0o2000 # set GID on execution -TSVTX = 0o1000 # reserved - -TUREAD = 0o400 # read by owner -TUWRITE = 0o200 # write by owner -TUEXEC = 0o100 # execute/search by owner -TGREAD = 0o040 # read by group -TGWRITE = 0o020 # write by group -TGEXEC = 0o010 # execute/search by group -TOREAD = 0o004 # read by other -TOWRITE = 0o002 # write by other -TOEXEC = 0o001 # execute/search by other - -#--------------------------------------------------------- -# initialization -#--------------------------------------------------------- -if os.name in ("nt", "ce"): - ENCODING = "utf-8" -else: - ENCODING = sys.getfilesystemencoding() - -#--------------------------------------------------------- -# Some useful functions -#--------------------------------------------------------- - -def stn(s, length, encoding, errors): - """Convert a string to a null-terminated bytes object. - """ - s = s.encode(encoding, errors) - return s[:length] + (length - len(s)) * NUL - -def nts(s, encoding, errors): - """Convert a null-terminated bytes object to a string. - """ - p = s.find(b"\0") - if p != -1: - s = s[:p] - return s.decode(encoding, errors) - -def nti(s): - """Convert a number field to a python number. - """ - # There are two possible encodings for a number field, see - # itn() below. - if s[0] != chr(0o200): - try: - n = int(nts(s, "ascii", "strict") or "0", 8) - except ValueError: - raise InvalidHeaderError("invalid header") - else: - n = 0 - for i in range(len(s) - 1): - n <<= 8 - n += ord(s[i + 1]) - return n - -def itn(n, digits=8, format=DEFAULT_FORMAT): - """Convert a python number to a number field. - """ - # POSIX 1003.1-1988 requires numbers to be encoded as a string of - # octal digits followed by a null-byte, this allows values up to - # (8**(digits-1))-1. GNU tar allows storing numbers greater than - # that if necessary. A leading 0o200 byte indicates this particular - # encoding, the following digits-1 bytes are a big-endian - # representation. This allows values up to (256**(digits-1))-1. - if 0 <= n < 8 ** (digits - 1): - s = ("%0*o" % (digits - 1, n)).encode("ascii") + NUL - else: - if format != GNU_FORMAT or n >= 256 ** (digits - 1): - raise ValueError("overflow in number field") - - if n < 0: - # XXX We mimic GNU tar's behaviour with negative numbers, - # this could raise OverflowError. - n = struct.unpack("L", struct.pack("l", n))[0] - - s = bytearray() - for i in range(digits - 1): - s.insert(0, n & 0o377) - n >>= 8 - s.insert(0, 0o200) - return s - -def calc_chksums(buf): - """Calculate the checksum for a member's header by summing up all - characters except for the chksum field which is treated as if - it was filled with spaces. According to the GNU tar sources, - some tars (Sun and NeXT) calculate chksum with signed char, - which will be different if there are chars in the buffer with - the high bit set. So we calculate two checksums, unsigned and - signed. - """ - unsigned_chksum = 256 + sum(struct.unpack("148B", buf[:148]) + struct.unpack("356B", buf[156:512])) - signed_chksum = 256 + sum(struct.unpack("148b", buf[:148]) + struct.unpack("356b", buf[156:512])) - return unsigned_chksum, signed_chksum - -def copyfileobj(src, dst, length=None): - """Copy length bytes from fileobj src to fileobj dst. - If length is None, copy the entire content. - """ - if length == 0: - return - if length is None: - while True: - buf = src.read(16*1024) - if not buf: - break - dst.write(buf) - return - - BUFSIZE = 16 * 1024 - blocks, remainder = divmod(length, BUFSIZE) - for b in range(blocks): - buf = src.read(BUFSIZE) - if len(buf) < BUFSIZE: - raise IOError("end of file reached") - dst.write(buf) - - if remainder != 0: - buf = src.read(remainder) - if len(buf) < remainder: - raise IOError("end of file reached") - dst.write(buf) - return - -filemode_table = ( - ((S_IFLNK, "l"), - (S_IFREG, "-"), - (S_IFBLK, "b"), - (S_IFDIR, "d"), - (S_IFCHR, "c"), - (S_IFIFO, "p")), - - ((TUREAD, "r"),), - ((TUWRITE, "w"),), - ((TUEXEC|TSUID, "s"), - (TSUID, "S"), - (TUEXEC, "x")), - - ((TGREAD, "r"),), - ((TGWRITE, "w"),), - ((TGEXEC|TSGID, "s"), - (TSGID, "S"), - (TGEXEC, "x")), - - ((TOREAD, "r"),), - ((TOWRITE, "w"),), - ((TOEXEC|TSVTX, "t"), - (TSVTX, "T"), - (TOEXEC, "x")) -) - -def filemode(mode): - """Convert a file's mode to a string of the form - -rwxrwxrwx. - Used by TarFile.list() - """ - perm = [] - for table in filemode_table: - for bit, char in table: - if mode & bit == bit: - perm.append(char) - break - else: - perm.append("-") - return "".join(perm) - -class TarError(Exception): - """Base exception.""" - pass -class ExtractError(TarError): - """General exception for extract errors.""" - pass -class ReadError(TarError): - """Exception for unreadable tar archives.""" - pass -class CompressionError(TarError): - """Exception for unavailable compression methods.""" - pass -class StreamError(TarError): - """Exception for unsupported operations on stream-like TarFiles.""" - pass -class HeaderError(TarError): - """Base exception for header errors.""" - pass -class EmptyHeaderError(HeaderError): - """Exception for empty headers.""" - pass -class TruncatedHeaderError(HeaderError): - """Exception for truncated headers.""" - pass -class EOFHeaderError(HeaderError): - """Exception for end of file headers.""" - pass -class InvalidHeaderError(HeaderError): - """Exception for invalid headers.""" - pass -class SubsequentHeaderError(HeaderError): - """Exception for missing and invalid extended headers.""" - pass - -#--------------------------- -# internal stream interface -#--------------------------- -class _LowLevelFile(object): - """Low-level file object. Supports reading and writing. - It is used instead of a regular file object for streaming - access. - """ - - def __init__(self, name, mode): - mode = { - "r": os.O_RDONLY, - "w": os.O_WRONLY | os.O_CREAT | os.O_TRUNC, - }[mode] - if hasattr(os, "O_BINARY"): - mode |= os.O_BINARY - self.fd = os.open(name, mode, 0o666) - - def close(self): - os.close(self.fd) - - def read(self, size): - return os.read(self.fd, size) - - def write(self, s): - os.write(self.fd, s) - -class _Stream(object): - """Class that serves as an adapter between TarFile and - a stream-like object. The stream-like object only - needs to have a read() or write() method and is accessed - blockwise. Use of gzip or bzip2 compression is possible. - A stream-like object could be for example: sys.stdin, - sys.stdout, a socket, a tape device etc. - - _Stream is intended to be used only internally. - """ - - def __init__(self, name, mode, comptype, fileobj, bufsize): - """Construct a _Stream object. - """ - self._extfileobj = True - if fileobj is None: - fileobj = _LowLevelFile(name, mode) - self._extfileobj = False - - if comptype == '*': - # Enable transparent compression detection for the - # stream interface - fileobj = _StreamProxy(fileobj) - comptype = fileobj.getcomptype() - - self.name = name or "" - self.mode = mode - self.comptype = comptype - self.fileobj = fileobj - self.bufsize = bufsize - self.buf = b"" - self.pos = 0 - self.closed = False - - try: - if comptype == "gz": - try: - import zlib - except ImportError: - raise CompressionError("zlib module is not available") - self.zlib = zlib - self.crc = zlib.crc32(b"") - if mode == "r": - self._init_read_gz() - else: - self._init_write_gz() - - if comptype == "bz2": - try: - import bz2 - except ImportError: - raise CompressionError("bz2 module is not available") - if mode == "r": - self.dbuf = b"" - self.cmp = bz2.BZ2Decompressor() - else: - self.cmp = bz2.BZ2Compressor() - except: - if not self._extfileobj: - self.fileobj.close() - self.closed = True - raise - - def __del__(self): - if hasattr(self, "closed") and not self.closed: - self.close() - - def _init_write_gz(self): - """Initialize for writing with gzip compression. - """ - self.cmp = self.zlib.compressobj(9, self.zlib.DEFLATED, - -self.zlib.MAX_WBITS, - self.zlib.DEF_MEM_LEVEL, - 0) - timestamp = struct.pack(" self.bufsize: - self.fileobj.write(self.buf[:self.bufsize]) - self.buf = self.buf[self.bufsize:] - - def close(self): - """Close the _Stream object. No operation should be - done on it afterwards. - """ - if self.closed: - return - - if self.mode == "w" and self.comptype != "tar": - self.buf += self.cmp.flush() - - if self.mode == "w" and self.buf: - self.fileobj.write(self.buf) - self.buf = b"" - if self.comptype == "gz": - # The native zlib crc is an unsigned 32-bit integer, but - # the Python wrapper implicitly casts that to a signed C - # long. So, on a 32-bit box self.crc may "look negative", - # while the same crc on a 64-bit box may "look positive". - # To avoid irksome warnings from the `struct` module, force - # it to look positive on all boxes. - self.fileobj.write(struct.pack("= 0: - blocks, remainder = divmod(pos - self.pos, self.bufsize) - for i in range(blocks): - self.read(self.bufsize) - self.read(remainder) - else: - raise StreamError("seeking backwards is not allowed") - return self.pos - - def read(self, size=None): - """Return the next size number of bytes from the stream. - If size is not defined, return all bytes of the stream - up to EOF. - """ - if size is None: - t = [] - while True: - buf = self._read(self.bufsize) - if not buf: - break - t.append(buf) - buf = "".join(t) - else: - buf = self._read(size) - self.pos += len(buf) - return buf - - def _read(self, size): - """Return size bytes from the stream. - """ - if self.comptype == "tar": - return self.__read(size) - - c = len(self.dbuf) - while c < size: - buf = self.__read(self.bufsize) - if not buf: - break - try: - buf = self.cmp.decompress(buf) - except IOError: - raise ReadError("invalid compressed data") - self.dbuf += buf - c += len(buf) - buf = self.dbuf[:size] - self.dbuf = self.dbuf[size:] - return buf - - def __read(self, size): - """Return size bytes from stream. If internal buffer is empty, - read another block from the stream. - """ - c = len(self.buf) - while c < size: - buf = self.fileobj.read(self.bufsize) - if not buf: - break - self.buf += buf - c += len(buf) - buf = self.buf[:size] - self.buf = self.buf[size:] - return buf -# class _Stream - -class _StreamProxy(object): - """Small proxy class that enables transparent compression - detection for the Stream interface (mode 'r|*'). - """ - - def __init__(self, fileobj): - self.fileobj = fileobj - self.buf = self.fileobj.read(BLOCKSIZE) - - def read(self, size): - self.read = self.fileobj.read - return self.buf - - def getcomptype(self): - if self.buf.startswith(b"\037\213\010"): - return "gz" - if self.buf.startswith(b"BZh91"): - return "bz2" - return "tar" - - def close(self): - self.fileobj.close() -# class StreamProxy - -class _BZ2Proxy(object): - """Small proxy class that enables external file object - support for "r:bz2" and "w:bz2" modes. This is actually - a workaround for a limitation in bz2 module's BZ2File - class which (unlike gzip.GzipFile) has no support for - a file object argument. - """ - - blocksize = 16 * 1024 - - def __init__(self, fileobj, mode): - self.fileobj = fileobj - self.mode = mode - self.name = getattr(self.fileobj, "name", None) - self.init() - - def init(self): - import bz2 - self.pos = 0 - if self.mode == "r": - self.bz2obj = bz2.BZ2Decompressor() - self.fileobj.seek(0) - self.buf = b"" - else: - self.bz2obj = bz2.BZ2Compressor() - - def read(self, size): - x = len(self.buf) - while x < size: - raw = self.fileobj.read(self.blocksize) - if not raw: - break - data = self.bz2obj.decompress(raw) - self.buf += data - x += len(data) - - buf = self.buf[:size] - self.buf = self.buf[size:] - self.pos += len(buf) - return buf - - def seek(self, pos): - if pos < self.pos: - self.init() - self.read(pos - self.pos) - - def tell(self): - return self.pos - - def write(self, data): - self.pos += len(data) - raw = self.bz2obj.compress(data) - self.fileobj.write(raw) - - def close(self): - if self.mode == "w": - raw = self.bz2obj.flush() - self.fileobj.write(raw) -# class _BZ2Proxy - -#------------------------ -# Extraction file object -#------------------------ -class _FileInFile(object): - """A thin wrapper around an existing file object that - provides a part of its data as an individual file - object. - """ - - def __init__(self, fileobj, offset, size, blockinfo=None): - self.fileobj = fileobj - self.offset = offset - self.size = size - self.position = 0 - - if blockinfo is None: - blockinfo = [(0, size)] - - # Construct a map with data and zero blocks. - self.map_index = 0 - self.map = [] - lastpos = 0 - realpos = self.offset - for offset, size in blockinfo: - if offset > lastpos: - self.map.append((False, lastpos, offset, None)) - self.map.append((True, offset, offset + size, realpos)) - realpos += size - lastpos = offset + size - if lastpos < self.size: - self.map.append((False, lastpos, self.size, None)) - - def seekable(self): - if not hasattr(self.fileobj, "seekable"): - # XXX gzip.GzipFile and bz2.BZ2File - return True - return self.fileobj.seekable() - - def tell(self): - """Return the current file position. - """ - return self.position - - def seek(self, position): - """Seek to a position in the file. - """ - self.position = position - - def read(self, size=None): - """Read data from the file. - """ - if size is None: - size = self.size - self.position - else: - size = min(size, self.size - self.position) - - buf = b"" - while size > 0: - while True: - data, start, stop, offset = self.map[self.map_index] - if start <= self.position < stop: - break - else: - self.map_index += 1 - if self.map_index == len(self.map): - self.map_index = 0 - length = min(size, stop - self.position) - if data: - self.fileobj.seek(offset + (self.position - start)) - buf += self.fileobj.read(length) - else: - buf += NUL * length - size -= length - self.position += length - return buf -#class _FileInFile - - -class ExFileObject(object): - """File-like object for reading an archive member. - Is returned by TarFile.extractfile(). - """ - blocksize = 1024 - - def __init__(self, tarfile, tarinfo): - self.fileobj = _FileInFile(tarfile.fileobj, - tarinfo.offset_data, - tarinfo.size, - tarinfo.sparse) - self.name = tarinfo.name - self.mode = "r" - self.closed = False - self.size = tarinfo.size - - self.position = 0 - self.buffer = b"" - - def readable(self): - return True - - def writable(self): - return False - - def seekable(self): - return self.fileobj.seekable() - - def read(self, size=None): - """Read at most size bytes from the file. If size is not - present or None, read all data until EOF is reached. - """ - if self.closed: - raise ValueError("I/O operation on closed file") - - buf = b"" - if self.buffer: - if size is None: - buf = self.buffer - self.buffer = b"" - else: - buf = self.buffer[:size] - self.buffer = self.buffer[size:] - - if size is None: - buf += self.fileobj.read() - else: - buf += self.fileobj.read(size - len(buf)) - - self.position += len(buf) - return buf - - # XXX TextIOWrapper uses the read1() method. - read1 = read - - def readline(self, size=-1): - """Read one entire line from the file. If size is present - and non-negative, return a string with at most that - size, which may be an incomplete line. - """ - if self.closed: - raise ValueError("I/O operation on closed file") - - pos = self.buffer.find(b"\n") + 1 - if pos == 0: - # no newline found. - while True: - buf = self.fileobj.read(self.blocksize) - self.buffer += buf - if not buf or b"\n" in buf: - pos = self.buffer.find(b"\n") + 1 - if pos == 0: - # no newline found. - pos = len(self.buffer) - break - - if size != -1: - pos = min(size, pos) - - buf = self.buffer[:pos] - self.buffer = self.buffer[pos:] - self.position += len(buf) - return buf - - def readlines(self): - """Return a list with all remaining lines. - """ - result = [] - while True: - line = self.readline() - if not line: break - result.append(line) - return result - - def tell(self): - """Return the current file position. - """ - if self.closed: - raise ValueError("I/O operation on closed file") - - return self.position - - def seek(self, pos, whence=os.SEEK_SET): - """Seek to a position in the file. - """ - if self.closed: - raise ValueError("I/O operation on closed file") - - if whence == os.SEEK_SET: - self.position = min(max(pos, 0), self.size) - elif whence == os.SEEK_CUR: - if pos < 0: - self.position = max(self.position + pos, 0) - else: - self.position = min(self.position + pos, self.size) - elif whence == os.SEEK_END: - self.position = max(min(self.size + pos, self.size), 0) - else: - raise ValueError("Invalid argument") - - self.buffer = b"" - self.fileobj.seek(self.position) - - def close(self): - """Close the file object. - """ - self.closed = True - - def __iter__(self): - """Get an iterator over the file's lines. - """ - while True: - line = self.readline() - if not line: - break - yield line -#class ExFileObject - -#------------------ -# Exported Classes -#------------------ -class TarInfo(object): - """Informational class which holds the details about an - archive member given by a tar header block. - TarInfo objects are returned by TarFile.getmember(), - TarFile.getmembers() and TarFile.gettarinfo() and are - usually created internally. - """ - - __slots__ = ("name", "mode", "uid", "gid", "size", "mtime", - "chksum", "type", "linkname", "uname", "gname", - "devmajor", "devminor", - "offset", "offset_data", "pax_headers", "sparse", - "tarfile", "_sparse_structs", "_link_target") - - def __init__(self, name=""): - """Construct a TarInfo object. name is the optional name - of the member. - """ - self.name = name # member name - self.mode = 0o644 # file permissions - self.uid = 0 # user id - self.gid = 0 # group id - self.size = 0 # file size - self.mtime = 0 # modification time - self.chksum = 0 # header checksum - self.type = REGTYPE # member type - self.linkname = "" # link name - self.uname = "" # user name - self.gname = "" # group name - self.devmajor = 0 # device major number - self.devminor = 0 # device minor number - - self.offset = 0 # the tar header starts here - self.offset_data = 0 # the file's data starts here - - self.sparse = None # sparse member information - self.pax_headers = {} # pax header information - - # In pax headers the "name" and "linkname" field are called - # "path" and "linkpath". - def _getpath(self): - return self.name - def _setpath(self, name): - self.name = name - path = property(_getpath, _setpath) - - def _getlinkpath(self): - return self.linkname - def _setlinkpath(self, linkname): - self.linkname = linkname - linkpath = property(_getlinkpath, _setlinkpath) - - def __repr__(self): - return "<%s %r at %#x>" % (self.__class__.__name__,self.name,id(self)) - - def get_info(self): - """Return the TarInfo's attributes as a dictionary. - """ - info = { - "name": self.name, - "mode": self.mode & 0o7777, - "uid": self.uid, - "gid": self.gid, - "size": self.size, - "mtime": self.mtime, - "chksum": self.chksum, - "type": self.type, - "linkname": self.linkname, - "uname": self.uname, - "gname": self.gname, - "devmajor": self.devmajor, - "devminor": self.devminor - } - - if info["type"] == DIRTYPE and not info["name"].endswith("/"): - info["name"] += "/" - - return info - - def tobuf(self, format=DEFAULT_FORMAT, encoding=ENCODING, errors="surrogateescape"): - """Return a tar header as a string of 512 byte blocks. - """ - info = self.get_info() - - if format == USTAR_FORMAT: - return self.create_ustar_header(info, encoding, errors) - elif format == GNU_FORMAT: - return self.create_gnu_header(info, encoding, errors) - elif format == PAX_FORMAT: - return self.create_pax_header(info, encoding) - else: - raise ValueError("invalid format") - - def create_ustar_header(self, info, encoding, errors): - """Return the object as a ustar header block. - """ - info["magic"] = POSIX_MAGIC - - if len(info["linkname"]) > LENGTH_LINK: - raise ValueError("linkname is too long") - - if len(info["name"]) > LENGTH_NAME: - info["prefix"], info["name"] = self._posix_split_name(info["name"]) - - return self._create_header(info, USTAR_FORMAT, encoding, errors) - - def create_gnu_header(self, info, encoding, errors): - """Return the object as a GNU header block sequence. - """ - info["magic"] = GNU_MAGIC - - buf = b"" - if len(info["linkname"]) > LENGTH_LINK: - buf += self._create_gnu_long_header(info["linkname"], GNUTYPE_LONGLINK, encoding, errors) - - if len(info["name"]) > LENGTH_NAME: - buf += self._create_gnu_long_header(info["name"], GNUTYPE_LONGNAME, encoding, errors) - - return buf + self._create_header(info, GNU_FORMAT, encoding, errors) - - def create_pax_header(self, info, encoding): - """Return the object as a ustar header block. If it cannot be - represented this way, prepend a pax extended header sequence - with supplement information. - """ - info["magic"] = POSIX_MAGIC - pax_headers = self.pax_headers.copy() - - # Test string fields for values that exceed the field length or cannot - # be represented in ASCII encoding. - for name, hname, length in ( - ("name", "path", LENGTH_NAME), ("linkname", "linkpath", LENGTH_LINK), - ("uname", "uname", 32), ("gname", "gname", 32)): - - if hname in pax_headers: - # The pax header has priority. - continue - - # Try to encode the string as ASCII. - try: - info[name].encode("ascii", "strict") - except UnicodeEncodeError: - pax_headers[hname] = info[name] - continue - - if len(info[name]) > length: - pax_headers[hname] = info[name] - - # Test number fields for values that exceed the field limit or values - # that like to be stored as float. - for name, digits in (("uid", 8), ("gid", 8), ("size", 12), ("mtime", 12)): - if name in pax_headers: - # The pax header has priority. Avoid overflow. - info[name] = 0 - continue - - val = info[name] - if not 0 <= val < 8 ** (digits - 1) or isinstance(val, float): - pax_headers[name] = str(val) - info[name] = 0 - - # Create a pax extended header if necessary. - if pax_headers: - buf = self._create_pax_generic_header(pax_headers, XHDTYPE, encoding) - else: - buf = b"" - - return buf + self._create_header(info, USTAR_FORMAT, "ascii", "replace") - - @classmethod - def create_pax_global_header(cls, pax_headers): - """Return the object as a pax global header block sequence. - """ - return cls._create_pax_generic_header(pax_headers, XGLTYPE, "utf8") - - def _posix_split_name(self, name): - """Split a name longer than 100 chars into a prefix - and a name part. - """ - prefix = name[:LENGTH_PREFIX + 1] - while prefix and prefix[-1] != "/": - prefix = prefix[:-1] - - name = name[len(prefix):] - prefix = prefix[:-1] - - if not prefix or len(name) > LENGTH_NAME: - raise ValueError("name is too long") - return prefix, name - - @staticmethod - def _create_header(info, format, encoding, errors): - """Return a header block. info is a dictionary with file - information, format must be one of the *_FORMAT constants. - """ - parts = [ - stn(info.get("name", ""), 100, encoding, errors), - itn(info.get("mode", 0) & 0o7777, 8, format), - itn(info.get("uid", 0), 8, format), - itn(info.get("gid", 0), 8, format), - itn(info.get("size", 0), 12, format), - itn(info.get("mtime", 0), 12, format), - b" ", # checksum field - info.get("type", REGTYPE), - stn(info.get("linkname", ""), 100, encoding, errors), - info.get("magic", POSIX_MAGIC), - stn(info.get("uname", ""), 32, encoding, errors), - stn(info.get("gname", ""), 32, encoding, errors), - itn(info.get("devmajor", 0), 8, format), - itn(info.get("devminor", 0), 8, format), - stn(info.get("prefix", ""), 155, encoding, errors) - ] - - buf = struct.pack("%ds" % BLOCKSIZE, b"".join(parts)) - chksum = calc_chksums(buf[-BLOCKSIZE:])[0] - buf = buf[:-364] + ("%06o\0" % chksum).encode("ascii") + buf[-357:] - return buf - - @staticmethod - def _create_payload(payload): - """Return the string payload filled with zero bytes - up to the next 512 byte border. - """ - blocks, remainder = divmod(len(payload), BLOCKSIZE) - if remainder > 0: - payload += (BLOCKSIZE - remainder) * NUL - return payload - - @classmethod - def _create_gnu_long_header(cls, name, type, encoding, errors): - """Return a GNUTYPE_LONGNAME or GNUTYPE_LONGLINK sequence - for name. - """ - name = name.encode(encoding, errors) + NUL - - info = {} - info["name"] = "././@LongLink" - info["type"] = type - info["size"] = len(name) - info["magic"] = GNU_MAGIC - - # create extended header + name blocks. - return cls._create_header(info, USTAR_FORMAT, encoding, errors) + \ - cls._create_payload(name) - - @classmethod - def _create_pax_generic_header(cls, pax_headers, type, encoding): - """Return a POSIX.1-2008 extended or global header sequence - that contains a list of keyword, value pairs. The values - must be strings. - """ - # Check if one of the fields contains surrogate characters and thereby - # forces hdrcharset=BINARY, see _proc_pax() for more information. - binary = False - for keyword, value in pax_headers.items(): - try: - value.encode("utf8", "strict") - except UnicodeEncodeError: - binary = True - break - - records = b"" - if binary: - # Put the hdrcharset field at the beginning of the header. - records += b"21 hdrcharset=BINARY\n" - - for keyword, value in pax_headers.items(): - keyword = keyword.encode("utf8") - if binary: - # Try to restore the original byte representation of `value'. - # Needless to say, that the encoding must match the string. - value = value.encode(encoding, "surrogateescape") - else: - value = value.encode("utf8") - - l = len(keyword) + len(value) + 3 # ' ' + '=' + '\n' - n = p = 0 - while True: - n = l + len(str(p)) - if n == p: - break - p = n - records += bytes(str(p), "ascii") + b" " + keyword + b"=" + value + b"\n" - - # We use a hardcoded "././@PaxHeader" name like star does - # instead of the one that POSIX recommends. - info = {} - info["name"] = "././@PaxHeader" - info["type"] = type - info["size"] = len(records) - info["magic"] = POSIX_MAGIC - - # Create pax header + record blocks. - return cls._create_header(info, USTAR_FORMAT, "ascii", "replace") + \ - cls._create_payload(records) - - @classmethod - def frombuf(cls, buf, encoding, errors): - """Construct a TarInfo object from a 512 byte bytes object. - """ - if len(buf) == 0: - raise EmptyHeaderError("empty header") - if len(buf) != BLOCKSIZE: - raise TruncatedHeaderError("truncated header") - if buf.count(NUL) == BLOCKSIZE: - raise EOFHeaderError("end of file header") - - chksum = nti(buf[148:156]) - if chksum not in calc_chksums(buf): - raise InvalidHeaderError("bad checksum") - - obj = cls() - obj.name = nts(buf[0:100], encoding, errors) - obj.mode = nti(buf[100:108]) - obj.uid = nti(buf[108:116]) - obj.gid = nti(buf[116:124]) - obj.size = nti(buf[124:136]) - obj.mtime = nti(buf[136:148]) - obj.chksum = chksum - obj.type = buf[156:157] - obj.linkname = nts(buf[157:257], encoding, errors) - obj.uname = nts(buf[265:297], encoding, errors) - obj.gname = nts(buf[297:329], encoding, errors) - obj.devmajor = nti(buf[329:337]) - obj.devminor = nti(buf[337:345]) - prefix = nts(buf[345:500], encoding, errors) - - # Old V7 tar format represents a directory as a regular - # file with a trailing slash. - if obj.type == AREGTYPE and obj.name.endswith("/"): - obj.type = DIRTYPE - - # The old GNU sparse format occupies some of the unused - # space in the buffer for up to 4 sparse structures. - # Save the them for later processing in _proc_sparse(). - if obj.type == GNUTYPE_SPARSE: - pos = 386 - structs = [] - for i in range(4): - try: - offset = nti(buf[pos:pos + 12]) - numbytes = nti(buf[pos + 12:pos + 24]) - except ValueError: - break - structs.append((offset, numbytes)) - pos += 24 - isextended = bool(buf[482]) - origsize = nti(buf[483:495]) - obj._sparse_structs = (structs, isextended, origsize) - - # Remove redundant slashes from directories. - if obj.isdir(): - obj.name = obj.name.rstrip("/") - - # Reconstruct a ustar longname. - if prefix and obj.type not in GNU_TYPES: - obj.name = prefix + "/" + obj.name - return obj - - @classmethod - def fromtarfile(cls, tarfile): - """Return the next TarInfo object from TarFile object - tarfile. - """ - buf = tarfile.fileobj.read(BLOCKSIZE) - obj = cls.frombuf(buf, tarfile.encoding, tarfile.errors) - obj.offset = tarfile.fileobj.tell() - BLOCKSIZE - return obj._proc_member(tarfile) - - #-------------------------------------------------------------------------- - # The following are methods that are called depending on the type of a - # member. The entry point is _proc_member() which can be overridden in a - # subclass to add custom _proc_*() methods. A _proc_*() method MUST - # implement the following - # operations: - # 1. Set self.offset_data to the position where the data blocks begin, - # if there is data that follows. - # 2. Set tarfile.offset to the position where the next member's header will - # begin. - # 3. Return self or another valid TarInfo object. - def _proc_member(self, tarfile): - """Choose the right processing method depending on - the type and call it. - """ - if self.type in (GNUTYPE_LONGNAME, GNUTYPE_LONGLINK): - return self._proc_gnulong(tarfile) - elif self.type == GNUTYPE_SPARSE: - return self._proc_sparse(tarfile) - elif self.type in (XHDTYPE, XGLTYPE, SOLARIS_XHDTYPE): - return self._proc_pax(tarfile) - else: - return self._proc_builtin(tarfile) - - def _proc_builtin(self, tarfile): - """Process a builtin type or an unknown type which - will be treated as a regular file. - """ - self.offset_data = tarfile.fileobj.tell() - offset = self.offset_data - if self.isreg() or self.type not in SUPPORTED_TYPES: - # Skip the following data blocks. - offset += self._block(self.size) - tarfile.offset = offset - - # Patch the TarInfo object with saved global - # header information. - self._apply_pax_info(tarfile.pax_headers, tarfile.encoding, tarfile.errors) - - return self - - def _proc_gnulong(self, tarfile): - """Process the blocks that hold a GNU longname - or longlink member. - """ - buf = tarfile.fileobj.read(self._block(self.size)) - - # Fetch the next header and process it. - try: - next = self.fromtarfile(tarfile) - except HeaderError: - raise SubsequentHeaderError("missing or bad subsequent header") - - # Patch the TarInfo object from the next header with - # the longname information. - next.offset = self.offset - if self.type == GNUTYPE_LONGNAME: - next.name = nts(buf, tarfile.encoding, tarfile.errors) - elif self.type == GNUTYPE_LONGLINK: - next.linkname = nts(buf, tarfile.encoding, tarfile.errors) - - return next - - def _proc_sparse(self, tarfile): - """Process a GNU sparse header plus extra headers. - """ - # We already collected some sparse structures in frombuf(). - structs, isextended, origsize = self._sparse_structs - del self._sparse_structs - - # Collect sparse structures from extended header blocks. - while isextended: - buf = tarfile.fileobj.read(BLOCKSIZE) - pos = 0 - for i in range(21): - try: - offset = nti(buf[pos:pos + 12]) - numbytes = nti(buf[pos + 12:pos + 24]) - except ValueError: - break - if offset and numbytes: - structs.append((offset, numbytes)) - pos += 24 - isextended = bool(buf[504]) - self.sparse = structs - - self.offset_data = tarfile.fileobj.tell() - tarfile.offset = self.offset_data + self._block(self.size) - self.size = origsize - return self - - def _proc_pax(self, tarfile): - """Process an extended or global header as described in - POSIX.1-2008. - """ - # Read the header information. - buf = tarfile.fileobj.read(self._block(self.size)) - - # A pax header stores supplemental information for either - # the following file (extended) or all following files - # (global). - if self.type == XGLTYPE: - pax_headers = tarfile.pax_headers - else: - pax_headers = tarfile.pax_headers.copy() - - # Check if the pax header contains a hdrcharset field. This tells us - # the encoding of the path, linkpath, uname and gname fields. Normally, - # these fields are UTF-8 encoded but since POSIX.1-2008 tar - # implementations are allowed to store them as raw binary strings if - # the translation to UTF-8 fails. - match = re.search(br"\d+ hdrcharset=([^\n]+)\n", buf) - if match is not None: - pax_headers["hdrcharset"] = match.group(1).decode("utf8") - - # For the time being, we don't care about anything other than "BINARY". - # The only other value that is currently allowed by the standard is - # "ISO-IR 10646 2000 UTF-8" in other words UTF-8. - hdrcharset = pax_headers.get("hdrcharset") - if hdrcharset == "BINARY": - encoding = tarfile.encoding - else: - encoding = "utf8" - - # Parse pax header information. A record looks like that: - # "%d %s=%s\n" % (length, keyword, value). length is the size - # of the complete record including the length field itself and - # the newline. keyword and value are both UTF-8 encoded strings. - regex = re.compile(br"(\d+) ([^=]+)=") - pos = 0 - while True: - match = regex.match(buf, pos) - if not match: - break - - length, keyword = match.groups() - length = int(length) - value = buf[match.end(2) + 1:match.start(1) + length - 1] - - # Normally, we could just use "utf8" as the encoding and "strict" - # as the error handler, but we better not take the risk. For - # example, GNU tar <= 1.23 is known to store filenames it cannot - # translate to UTF-8 as raw strings (unfortunately without a - # hdrcharset=BINARY header). - # We first try the strict standard encoding, and if that fails we - # fall back on the user's encoding and error handler. - keyword = self._decode_pax_field(keyword, "utf8", "utf8", - tarfile.errors) - if keyword in PAX_NAME_FIELDS: - value = self._decode_pax_field(value, encoding, tarfile.encoding, - tarfile.errors) - else: - value = self._decode_pax_field(value, "utf8", "utf8", - tarfile.errors) - - pax_headers[keyword] = value - pos += length - - # Fetch the next header. - try: - next = self.fromtarfile(tarfile) - except HeaderError: - raise SubsequentHeaderError("missing or bad subsequent header") - - # Process GNU sparse information. - if "GNU.sparse.map" in pax_headers: - # GNU extended sparse format version 0.1. - self._proc_gnusparse_01(next, pax_headers) - - elif "GNU.sparse.size" in pax_headers: - # GNU extended sparse format version 0.0. - self._proc_gnusparse_00(next, pax_headers, buf) - - elif pax_headers.get("GNU.sparse.major") == "1" and pax_headers.get("GNU.sparse.minor") == "0": - # GNU extended sparse format version 1.0. - self._proc_gnusparse_10(next, pax_headers, tarfile) - - if self.type in (XHDTYPE, SOLARIS_XHDTYPE): - # Patch the TarInfo object with the extended header info. - next._apply_pax_info(pax_headers, tarfile.encoding, tarfile.errors) - next.offset = self.offset - - if "size" in pax_headers: - # If the extended header replaces the size field, - # we need to recalculate the offset where the next - # header starts. - offset = next.offset_data - if next.isreg() or next.type not in SUPPORTED_TYPES: - offset += next._block(next.size) - tarfile.offset = offset - - return next - - def _proc_gnusparse_00(self, next, pax_headers, buf): - """Process a GNU tar extended sparse header, version 0.0. - """ - offsets = [] - for match in re.finditer(br"\d+ GNU.sparse.offset=(\d+)\n", buf): - offsets.append(int(match.group(1))) - numbytes = [] - for match in re.finditer(br"\d+ GNU.sparse.numbytes=(\d+)\n", buf): - numbytes.append(int(match.group(1))) - next.sparse = list(zip(offsets, numbytes)) - - def _proc_gnusparse_01(self, next, pax_headers): - """Process a GNU tar extended sparse header, version 0.1. - """ - sparse = [int(x) for x in pax_headers["GNU.sparse.map"].split(",")] - next.sparse = list(zip(sparse[::2], sparse[1::2])) - - def _proc_gnusparse_10(self, next, pax_headers, tarfile): - """Process a GNU tar extended sparse header, version 1.0. - """ - fields = None - sparse = [] - buf = tarfile.fileobj.read(BLOCKSIZE) - fields, buf = buf.split(b"\n", 1) - fields = int(fields) - while len(sparse) < fields * 2: - if b"\n" not in buf: - buf += tarfile.fileobj.read(BLOCKSIZE) - number, buf = buf.split(b"\n", 1) - sparse.append(int(number)) - next.offset_data = tarfile.fileobj.tell() - next.sparse = list(zip(sparse[::2], sparse[1::2])) - - def _apply_pax_info(self, pax_headers, encoding, errors): - """Replace fields with supplemental information from a previous - pax extended or global header. - """ - for keyword, value in pax_headers.items(): - if keyword == "GNU.sparse.name": - setattr(self, "path", value) - elif keyword == "GNU.sparse.size": - setattr(self, "size", int(value)) - elif keyword == "GNU.sparse.realsize": - setattr(self, "size", int(value)) - elif keyword in PAX_FIELDS: - if keyword in PAX_NUMBER_FIELDS: - try: - value = PAX_NUMBER_FIELDS[keyword](value) - except ValueError: - value = 0 - if keyword == "path": - value = value.rstrip("/") - setattr(self, keyword, value) - - self.pax_headers = pax_headers.copy() - - def _decode_pax_field(self, value, encoding, fallback_encoding, fallback_errors): - """Decode a single field from a pax record. - """ - try: - return value.decode(encoding, "strict") - except UnicodeDecodeError: - return value.decode(fallback_encoding, fallback_errors) - - def _block(self, count): - """Round up a byte count by BLOCKSIZE and return it, - e.g. _block(834) => 1024. - """ - blocks, remainder = divmod(count, BLOCKSIZE) - if remainder: - blocks += 1 - return blocks * BLOCKSIZE - - def isreg(self): - return self.type in REGULAR_TYPES - def isfile(self): - return self.isreg() - def isdir(self): - return self.type == DIRTYPE - def issym(self): - return self.type == SYMTYPE - def islnk(self): - return self.type == LNKTYPE - def ischr(self): - return self.type == CHRTYPE - def isblk(self): - return self.type == BLKTYPE - def isfifo(self): - return self.type == FIFOTYPE - def issparse(self): - return self.sparse is not None - def isdev(self): - return self.type in (CHRTYPE, BLKTYPE, FIFOTYPE) -# class TarInfo - -class TarFile(object): - """The TarFile Class provides an interface to tar archives. - """ - - debug = 0 # May be set from 0 (no msgs) to 3 (all msgs) - - dereference = False # If true, add content of linked file to the - # tar file, else the link. - - ignore_zeros = False # If true, skips empty or invalid blocks and - # continues processing. - - errorlevel = 1 # If 0, fatal errors only appear in debug - # messages (if debug >= 0). If > 0, errors - # are passed to the caller as exceptions. - - format = DEFAULT_FORMAT # The format to use when creating an archive. - - encoding = ENCODING # Encoding for 8-bit character strings. - - errors = None # Error handler for unicode conversion. - - tarinfo = TarInfo # The default TarInfo class to use. - - fileobject = ExFileObject # The default ExFileObject class to use. - - def __init__(self, name=None, mode="r", fileobj=None, format=None, - tarinfo=None, dereference=None, ignore_zeros=None, encoding=None, - errors="surrogateescape", pax_headers=None, debug=None, errorlevel=None): - """Open an (uncompressed) tar archive `name'. `mode' is either 'r' to - read from an existing archive, 'a' to append data to an existing - file or 'w' to create a new file overwriting an existing one. `mode' - defaults to 'r'. - If `fileobj' is given, it is used for reading or writing data. If it - can be determined, `mode' is overridden by `fileobj's mode. - `fileobj' is not closed, when TarFile is closed. - """ - if len(mode) > 1 or mode not in "raw": - raise ValueError("mode must be 'r', 'a' or 'w'") - self.mode = mode - self._mode = {"r": "rb", "a": "r+b", "w": "wb"}[mode] - - if not fileobj: - if self.mode == "a" and not os.path.exists(name): - # Create nonexistent files in append mode. - self.mode = "w" - self._mode = "wb" - fileobj = bltn_open(name, self._mode) - self._extfileobj = False - else: - if name is None and hasattr(fileobj, "name"): - name = fileobj.name - if hasattr(fileobj, "mode"): - self._mode = fileobj.mode - self._extfileobj = True - self.name = os.path.abspath(name) if name else None - self.fileobj = fileobj - - # Init attributes. - if format is not None: - self.format = format - if tarinfo is not None: - self.tarinfo = tarinfo - if dereference is not None: - self.dereference = dereference - if ignore_zeros is not None: - self.ignore_zeros = ignore_zeros - if encoding is not None: - self.encoding = encoding - self.errors = errors - - if pax_headers is not None and self.format == PAX_FORMAT: - self.pax_headers = pax_headers - else: - self.pax_headers = {} - - if debug is not None: - self.debug = debug - if errorlevel is not None: - self.errorlevel = errorlevel - - # Init datastructures. - self.closed = False - self.members = [] # list of members as TarInfo objects - self._loaded = False # flag if all members have been read - self.offset = self.fileobj.tell() - # current position in the archive file - self.inodes = {} # dictionary caching the inodes of - # archive members already added - - try: - if self.mode == "r": - self.firstmember = None - self.firstmember = self.next() - - if self.mode == "a": - # Move to the end of the archive, - # before the first empty block. - while True: - self.fileobj.seek(self.offset) - try: - tarinfo = self.tarinfo.fromtarfile(self) - self.members.append(tarinfo) - except EOFHeaderError: - self.fileobj.seek(self.offset) - break - except HeaderError as e: - raise ReadError(str(e)) - - if self.mode in "aw": - self._loaded = True - - if self.pax_headers: - buf = self.tarinfo.create_pax_global_header(self.pax_headers.copy()) - self.fileobj.write(buf) - self.offset += len(buf) - except: - if not self._extfileobj: - self.fileobj.close() - self.closed = True - raise - - #-------------------------------------------------------------------------- - # Below are the classmethods which act as alternate constructors to the - # TarFile class. The open() method is the only one that is needed for - # public use; it is the "super"-constructor and is able to select an - # adequate "sub"-constructor for a particular compression using the mapping - # from OPEN_METH. - # - # This concept allows one to subclass TarFile without losing the comfort of - # the super-constructor. A sub-constructor is registered and made available - # by adding it to the mapping in OPEN_METH. - - @classmethod - def open(cls, name=None, mode="r", fileobj=None, bufsize=RECORDSIZE, **kwargs): - """Open a tar archive for reading, writing or appending. Return - an appropriate TarFile class. - - mode: - 'r' or 'r:*' open for reading with transparent compression - 'r:' open for reading exclusively uncompressed - 'r:gz' open for reading with gzip compression - 'r:bz2' open for reading with bzip2 compression - 'a' or 'a:' open for appending, creating the file if necessary - 'w' or 'w:' open for writing without compression - 'w:gz' open for writing with gzip compression - 'w:bz2' open for writing with bzip2 compression - - 'r|*' open a stream of tar blocks with transparent compression - 'r|' open an uncompressed stream of tar blocks for reading - 'r|gz' open a gzip compressed stream of tar blocks - 'r|bz2' open a bzip2 compressed stream of tar blocks - 'w|' open an uncompressed stream for writing - 'w|gz' open a gzip compressed stream for writing - 'w|bz2' open a bzip2 compressed stream for writing - """ - - if not name and not fileobj: - raise ValueError("nothing to open") - - if mode in ("r", "r:*"): - # Find out which *open() is appropriate for opening the file. - for comptype in cls.OPEN_METH: - func = getattr(cls, cls.OPEN_METH[comptype]) - if fileobj is not None: - saved_pos = fileobj.tell() - try: - return func(name, "r", fileobj, **kwargs) - except (ReadError, CompressionError) as e: - if fileobj is not None: - fileobj.seek(saved_pos) - continue - raise ReadError("file could not be opened successfully") - - elif ":" in mode: - filemode, comptype = mode.split(":", 1) - filemode = filemode or "r" - comptype = comptype or "tar" - - # Select the *open() function according to - # given compression. - if comptype in cls.OPEN_METH: - func = getattr(cls, cls.OPEN_METH[comptype]) - else: - raise CompressionError("unknown compression type %r" % comptype) - return func(name, filemode, fileobj, **kwargs) - - elif "|" in mode: - filemode, comptype = mode.split("|", 1) - filemode = filemode or "r" - comptype = comptype or "tar" - - if filemode not in "rw": - raise ValueError("mode must be 'r' or 'w'") - - stream = _Stream(name, filemode, comptype, fileobj, bufsize) - try: - t = cls(name, filemode, stream, **kwargs) - except: - stream.close() - raise - t._extfileobj = False - return t - - elif mode in "aw": - return cls.taropen(name, mode, fileobj, **kwargs) - - raise ValueError("undiscernible mode") - - @classmethod - def taropen(cls, name, mode="r", fileobj=None, **kwargs): - """Open uncompressed tar archive name for reading or writing. - """ - if len(mode) > 1 or mode not in "raw": - raise ValueError("mode must be 'r', 'a' or 'w'") - return cls(name, mode, fileobj, **kwargs) - - @classmethod - def gzopen(cls, name, mode="r", fileobj=None, compresslevel=9, **kwargs): - """Open gzip compressed tar archive name for reading or writing. - Appending is not allowed. - """ - if len(mode) > 1 or mode not in "rw": - raise ValueError("mode must be 'r' or 'w'") - - try: - import gzip - gzip.GzipFile - except (ImportError, AttributeError): - raise CompressionError("gzip module is not available") - - extfileobj = fileobj is not None - try: - fileobj = gzip.GzipFile(name, mode + "b", compresslevel, fileobj) - t = cls.taropen(name, mode, fileobj, **kwargs) - except IOError: - if not extfileobj and fileobj is not None: - fileobj.close() - if fileobj is None: - raise - raise ReadError("not a gzip file") - except: - if not extfileobj and fileobj is not None: - fileobj.close() - raise - t._extfileobj = extfileobj - return t - - @classmethod - def bz2open(cls, name, mode="r", fileobj=None, compresslevel=9, **kwargs): - """Open bzip2 compressed tar archive name for reading or writing. - Appending is not allowed. - """ - if len(mode) > 1 or mode not in "rw": - raise ValueError("mode must be 'r' or 'w'.") - - try: - import bz2 - except ImportError: - raise CompressionError("bz2 module is not available") - - if fileobj is not None: - fileobj = _BZ2Proxy(fileobj, mode) - else: - fileobj = bz2.BZ2File(name, mode, compresslevel=compresslevel) - - try: - t = cls.taropen(name, mode, fileobj, **kwargs) - except (IOError, EOFError): - fileobj.close() - raise ReadError("not a bzip2 file") - t._extfileobj = False - return t - - # All *open() methods are registered here. - OPEN_METH = { - "tar": "taropen", # uncompressed tar - "gz": "gzopen", # gzip compressed tar - "bz2": "bz2open" # bzip2 compressed tar - } - - #-------------------------------------------------------------------------- - # The public methods which TarFile provides: - - def close(self): - """Close the TarFile. In write-mode, two finishing zero blocks are - appended to the archive. - """ - if self.closed: - return - - if self.mode in "aw": - self.fileobj.write(NUL * (BLOCKSIZE * 2)) - self.offset += (BLOCKSIZE * 2) - # fill up the end with zero-blocks - # (like option -b20 for tar does) - blocks, remainder = divmod(self.offset, RECORDSIZE) - if remainder > 0: - self.fileobj.write(NUL * (RECORDSIZE - remainder)) - - if not self._extfileobj: - self.fileobj.close() - self.closed = True - - def getmember(self, name): - """Return a TarInfo object for member `name'. If `name' can not be - found in the archive, KeyError is raised. If a member occurs more - than once in the archive, its last occurrence is assumed to be the - most up-to-date version. - """ - tarinfo = self._getmember(name) - if tarinfo is None: - raise KeyError("filename %r not found" % name) - return tarinfo - - def getmembers(self): - """Return the members of the archive as a list of TarInfo objects. The - list has the same order as the members in the archive. - """ - self._check() - if not self._loaded: # if we want to obtain a list of - self._load() # all members, we first have to - # scan the whole archive. - return self.members - - def getnames(self): - """Return the members of the archive as a list of their names. It has - the same order as the list returned by getmembers(). - """ - return [tarinfo.name for tarinfo in self.getmembers()] - - def gettarinfo(self, name=None, arcname=None, fileobj=None): - """Create a TarInfo object for either the file `name' or the file - object `fileobj' (using os.fstat on its file descriptor). You can - modify some of the TarInfo's attributes before you add it using - addfile(). If given, `arcname' specifies an alternative name for the - file in the archive. - """ - self._check("aw") - - # When fileobj is given, replace name by - # fileobj's real name. - if fileobj is not None: - name = fileobj.name - - # Building the name of the member in the archive. - # Backward slashes are converted to forward slashes, - # Absolute paths are turned to relative paths. - if arcname is None: - arcname = name - drv, arcname = os.path.splitdrive(arcname) - arcname = arcname.replace(os.sep, "/") - arcname = arcname.lstrip("/") - - # Now, fill the TarInfo object with - # information specific for the file. - tarinfo = self.tarinfo() - tarinfo.tarfile = self - - # Use os.stat or os.lstat, depending on platform - # and if symlinks shall be resolved. - if fileobj is None: - if hasattr(os, "lstat") and not self.dereference: - statres = os.lstat(name) - else: - statres = os.stat(name) - else: - statres = os.fstat(fileobj.fileno()) - linkname = "" - - stmd = statres.st_mode - if stat.S_ISREG(stmd): - inode = (statres.st_ino, statres.st_dev) - if not self.dereference and statres.st_nlink > 1 and \ - inode in self.inodes and arcname != self.inodes[inode]: - # Is it a hardlink to an already - # archived file? - type = LNKTYPE - linkname = self.inodes[inode] - else: - # The inode is added only if its valid. - # For win32 it is always 0. - type = REGTYPE - if inode[0]: - self.inodes[inode] = arcname - elif stat.S_ISDIR(stmd): - type = DIRTYPE - elif stat.S_ISFIFO(stmd): - type = FIFOTYPE - elif stat.S_ISLNK(stmd): - type = SYMTYPE - linkname = os.readlink(name) - elif stat.S_ISCHR(stmd): - type = CHRTYPE - elif stat.S_ISBLK(stmd): - type = BLKTYPE - else: - return None - - # Fill the TarInfo object with all - # information we can get. - tarinfo.name = arcname - tarinfo.mode = stmd - tarinfo.uid = statres.st_uid - tarinfo.gid = statres.st_gid - if type == REGTYPE: - tarinfo.size = statres.st_size - else: - tarinfo.size = 0 - tarinfo.mtime = statres.st_mtime - tarinfo.type = type - tarinfo.linkname = linkname - if pwd: - try: - tarinfo.uname = pwd.getpwuid(tarinfo.uid)[0] - except KeyError: - pass - if grp: - try: - tarinfo.gname = grp.getgrgid(tarinfo.gid)[0] - except KeyError: - pass - - if type in (CHRTYPE, BLKTYPE): - if hasattr(os, "major") and hasattr(os, "minor"): - tarinfo.devmajor = os.major(statres.st_rdev) - tarinfo.devminor = os.minor(statres.st_rdev) - return tarinfo - - def list(self, verbose=True): - """Print a table of contents to sys.stdout. If `verbose' is False, only - the names of the members are printed. If it is True, an `ls -l'-like - output is produced. - """ - self._check() - - for tarinfo in self: - if verbose: - print(filemode(tarinfo.mode), end=' ') - print("%s/%s" % (tarinfo.uname or tarinfo.uid, - tarinfo.gname or tarinfo.gid), end=' ') - if tarinfo.ischr() or tarinfo.isblk(): - print("%10s" % ("%d,%d" \ - % (tarinfo.devmajor, tarinfo.devminor)), end=' ') - else: - print("%10d" % tarinfo.size, end=' ') - print("%d-%02d-%02d %02d:%02d:%02d" \ - % time.localtime(tarinfo.mtime)[:6], end=' ') - - print(tarinfo.name + ("/" if tarinfo.isdir() else ""), end=' ') - - if verbose: - if tarinfo.issym(): - print("->", tarinfo.linkname, end=' ') - if tarinfo.islnk(): - print("link to", tarinfo.linkname, end=' ') - print() - - def add(self, name, arcname=None, recursive=True, exclude=None, filter=None): - """Add the file `name' to the archive. `name' may be any type of file - (directory, fifo, symbolic link, etc.). If given, `arcname' - specifies an alternative name for the file in the archive. - Directories are added recursively by default. This can be avoided by - setting `recursive' to False. `exclude' is a function that should - return True for each filename to be excluded. `filter' is a function - that expects a TarInfo object argument and returns the changed - TarInfo object, if it returns None the TarInfo object will be - excluded from the archive. - """ - self._check("aw") - - if arcname is None: - arcname = name - - # Exclude pathnames. - if exclude is not None: - import warnings - warnings.warn("use the filter argument instead", - DeprecationWarning, 2) - if exclude(name): - self._dbg(2, "tarfile: Excluded %r" % name) - return - - # Skip if somebody tries to archive the archive... - if self.name is not None and os.path.abspath(name) == self.name: - self._dbg(2, "tarfile: Skipped %r" % name) - return - - self._dbg(1, name) - - # Create a TarInfo object from the file. - tarinfo = self.gettarinfo(name, arcname) - - if tarinfo is None: - self._dbg(1, "tarfile: Unsupported type %r" % name) - return - - # Change or exclude the TarInfo object. - if filter is not None: - tarinfo = filter(tarinfo) - if tarinfo is None: - self._dbg(2, "tarfile: Excluded %r" % name) - return - - # Append the tar header and data to the archive. - if tarinfo.isreg(): - f = bltn_open(name, "rb") - self.addfile(tarinfo, f) - f.close() - - elif tarinfo.isdir(): - self.addfile(tarinfo) - if recursive: - for f in os.listdir(name): - self.add(os.path.join(name, f), os.path.join(arcname, f), - recursive, exclude, filter=filter) - - else: - self.addfile(tarinfo) - - def addfile(self, tarinfo, fileobj=None): - """Add the TarInfo object `tarinfo' to the archive. If `fileobj' is - given, tarinfo.size bytes are read from it and added to the archive. - You can create TarInfo objects using gettarinfo(). - On Windows platforms, `fileobj' should always be opened with mode - 'rb' to avoid irritation about the file size. - """ - self._check("aw") - - tarinfo = copy.copy(tarinfo) - - buf = tarinfo.tobuf(self.format, self.encoding, self.errors) - self.fileobj.write(buf) - self.offset += len(buf) - - # If there's data to follow, append it. - if fileobj is not None: - copyfileobj(fileobj, self.fileobj, tarinfo.size) - blocks, remainder = divmod(tarinfo.size, BLOCKSIZE) - if remainder > 0: - self.fileobj.write(NUL * (BLOCKSIZE - remainder)) - blocks += 1 - self.offset += blocks * BLOCKSIZE - - self.members.append(tarinfo) - - def extractall(self, path=".", members=None): - """Extract all members from the archive to the current working - directory and set owner, modification time and permissions on - directories afterwards. `path' specifies a different directory - to extract to. `members' is optional and must be a subset of the - list returned by getmembers(). - """ - directories = [] - - if members is None: - members = self - - for tarinfo in members: - if tarinfo.isdir(): - # Extract directories with a safe mode. - directories.append(tarinfo) - tarinfo = copy.copy(tarinfo) - tarinfo.mode = 0o700 - # Do not set_attrs directories, as we will do that further down - self.extract(tarinfo, path, set_attrs=not tarinfo.isdir()) - - # Reverse sort directories. - directories.sort(key=lambda a: a.name) - directories.reverse() - - # Set correct owner, mtime and filemode on directories. - for tarinfo in directories: - dirpath = os.path.join(path, tarinfo.name) - try: - self.chown(tarinfo, dirpath) - self.utime(tarinfo, dirpath) - self.chmod(tarinfo, dirpath) - except ExtractError as e: - if self.errorlevel > 1: - raise - else: - self._dbg(1, "tarfile: %s" % e) - - def extract(self, member, path="", set_attrs=True): - """Extract a member from the archive to the current working directory, - using its full name. Its file information is extracted as accurately - as possible. `member' may be a filename or a TarInfo object. You can - specify a different directory using `path'. File attributes (owner, - mtime, mode) are set unless `set_attrs' is False. - """ - self._check("r") - - if isinstance(member, str): - tarinfo = self.getmember(member) - else: - tarinfo = member - - # Prepare the link target for makelink(). - if tarinfo.islnk(): - tarinfo._link_target = os.path.join(path, tarinfo.linkname) - - try: - self._extract_member(tarinfo, os.path.join(path, tarinfo.name), - set_attrs=set_attrs) - except EnvironmentError as e: - if self.errorlevel > 0: - raise - else: - if e.filename is None: - self._dbg(1, "tarfile: %s" % e.strerror) - else: - self._dbg(1, "tarfile: %s %r" % (e.strerror, e.filename)) - except ExtractError as e: - if self.errorlevel > 1: - raise - else: - self._dbg(1, "tarfile: %s" % e) - - def extractfile(self, member): - """Extract a member from the archive as a file object. `member' may be - a filename or a TarInfo object. If `member' is a regular file, a - file-like object is returned. If `member' is a link, a file-like - object is constructed from the link's target. If `member' is none of - the above, None is returned. - The file-like object is read-only and provides the following - methods: read(), readline(), readlines(), seek() and tell() - """ - self._check("r") - - if isinstance(member, str): - tarinfo = self.getmember(member) - else: - tarinfo = member - - if tarinfo.isreg(): - return self.fileobject(self, tarinfo) - - elif tarinfo.type not in SUPPORTED_TYPES: - # If a member's type is unknown, it is treated as a - # regular file. - return self.fileobject(self, tarinfo) - - elif tarinfo.islnk() or tarinfo.issym(): - if isinstance(self.fileobj, _Stream): - # A small but ugly workaround for the case that someone tries - # to extract a (sym)link as a file-object from a non-seekable - # stream of tar blocks. - raise StreamError("cannot extract (sym)link as file object") - else: - # A (sym)link's file object is its target's file object. - return self.extractfile(self._find_link_target(tarinfo)) - else: - # If there's no data associated with the member (directory, chrdev, - # blkdev, etc.), return None instead of a file object. - return None - - def _extract_member(self, tarinfo, targetpath, set_attrs=True): - """Extract the TarInfo object tarinfo to a physical - file called targetpath. - """ - # Fetch the TarInfo object for the given name - # and build the destination pathname, replacing - # forward slashes to platform specific separators. - targetpath = targetpath.rstrip("/") - targetpath = targetpath.replace("/", os.sep) - - # Create all upper directories. - upperdirs = os.path.dirname(targetpath) - if upperdirs and not os.path.exists(upperdirs): - # Create directories that are not part of the archive with - # default permissions. - os.makedirs(upperdirs) - - if tarinfo.islnk() or tarinfo.issym(): - self._dbg(1, "%s -> %s" % (tarinfo.name, tarinfo.linkname)) - else: - self._dbg(1, tarinfo.name) - - if tarinfo.isreg(): - self.makefile(tarinfo, targetpath) - elif tarinfo.isdir(): - self.makedir(tarinfo, targetpath) - elif tarinfo.isfifo(): - self.makefifo(tarinfo, targetpath) - elif tarinfo.ischr() or tarinfo.isblk(): - self.makedev(tarinfo, targetpath) - elif tarinfo.islnk() or tarinfo.issym(): - self.makelink(tarinfo, targetpath) - elif tarinfo.type not in SUPPORTED_TYPES: - self.makeunknown(tarinfo, targetpath) - else: - self.makefile(tarinfo, targetpath) - - if set_attrs: - self.chown(tarinfo, targetpath) - if not tarinfo.issym(): - self.chmod(tarinfo, targetpath) - self.utime(tarinfo, targetpath) - - #-------------------------------------------------------------------------- - # Below are the different file methods. They are called via - # _extract_member() when extract() is called. They can be replaced in a - # subclass to implement other functionality. - - def makedir(self, tarinfo, targetpath): - """Make a directory called targetpath. - """ - try: - # Use a safe mode for the directory, the real mode is set - # later in _extract_member(). - os.mkdir(targetpath, 0o700) - except EnvironmentError as e: - if e.errno != errno.EEXIST: - raise - - def makefile(self, tarinfo, targetpath): - """Make a file called targetpath. - """ - source = self.fileobj - source.seek(tarinfo.offset_data) - target = bltn_open(targetpath, "wb") - if tarinfo.sparse is not None: - for offset, size in tarinfo.sparse: - target.seek(offset) - copyfileobj(source, target, size) - else: - copyfileobj(source, target, tarinfo.size) - target.seek(tarinfo.size) - target.truncate() - target.close() - - def makeunknown(self, tarinfo, targetpath): - """Make a file from a TarInfo object with an unknown type - at targetpath. - """ - self.makefile(tarinfo, targetpath) - self._dbg(1, "tarfile: Unknown file type %r, " \ - "extracted as regular file." % tarinfo.type) - - def makefifo(self, tarinfo, targetpath): - """Make a fifo called targetpath. - """ - if hasattr(os, "mkfifo"): - os.mkfifo(targetpath) - else: - raise ExtractError("fifo not supported by system") - - def makedev(self, tarinfo, targetpath): - """Make a character or block device called targetpath. - """ - if not hasattr(os, "mknod") or not hasattr(os, "makedev"): - raise ExtractError("special devices not supported by system") - - mode = tarinfo.mode - if tarinfo.isblk(): - mode |= stat.S_IFBLK - else: - mode |= stat.S_IFCHR - - os.mknod(targetpath, mode, - os.makedev(tarinfo.devmajor, tarinfo.devminor)) - - def makelink(self, tarinfo, targetpath): - """Make a (symbolic) link called targetpath. If it cannot be created - (platform limitation), we try to make a copy of the referenced file - instead of a link. - """ - try: - # For systems that support symbolic and hard links. - if tarinfo.issym(): - os.symlink(tarinfo.linkname, targetpath) - else: - # See extract(). - if os.path.exists(tarinfo._link_target): - os.link(tarinfo._link_target, targetpath) - else: - self._extract_member(self._find_link_target(tarinfo), - targetpath) - except symlink_exception: - if tarinfo.issym(): - linkpath = os.path.join(os.path.dirname(tarinfo.name), - tarinfo.linkname) - else: - linkpath = tarinfo.linkname - else: - try: - self._extract_member(self._find_link_target(tarinfo), - targetpath) - except KeyError: - raise ExtractError("unable to resolve link inside archive") - - def chown(self, tarinfo, targetpath): - """Set owner of targetpath according to tarinfo. - """ - if pwd and hasattr(os, "geteuid") and os.geteuid() == 0: - # We have to be root to do so. - try: - g = grp.getgrnam(tarinfo.gname)[2] - except KeyError: - g = tarinfo.gid - try: - u = pwd.getpwnam(tarinfo.uname)[2] - except KeyError: - u = tarinfo.uid - try: - if tarinfo.issym() and hasattr(os, "lchown"): - os.lchown(targetpath, u, g) - else: - if sys.platform != "os2emx": - os.chown(targetpath, u, g) - except EnvironmentError as e: - raise ExtractError("could not change owner") - - def chmod(self, tarinfo, targetpath): - """Set file permissions of targetpath according to tarinfo. - """ - if hasattr(os, 'chmod'): - try: - os.chmod(targetpath, tarinfo.mode) - except EnvironmentError as e: - raise ExtractError("could not change mode") - - def utime(self, tarinfo, targetpath): - """Set modification time of targetpath according to tarinfo. - """ - if not hasattr(os, 'utime'): - return - try: - os.utime(targetpath, (tarinfo.mtime, tarinfo.mtime)) - except EnvironmentError as e: - raise ExtractError("could not change modification time") - - #-------------------------------------------------------------------------- - def next(self): - """Return the next member of the archive as a TarInfo object, when - TarFile is opened for reading. Return None if there is no more - available. - """ - self._check("ra") - if self.firstmember is not None: - m = self.firstmember - self.firstmember = None - return m - - # Read the next block. - self.fileobj.seek(self.offset) - tarinfo = None - while True: - try: - tarinfo = self.tarinfo.fromtarfile(self) - except EOFHeaderError as e: - if self.ignore_zeros: - self._dbg(2, "0x%X: %s" % (self.offset, e)) - self.offset += BLOCKSIZE - continue - except InvalidHeaderError as e: - if self.ignore_zeros: - self._dbg(2, "0x%X: %s" % (self.offset, e)) - self.offset += BLOCKSIZE - continue - elif self.offset == 0: - raise ReadError(str(e)) - except EmptyHeaderError: - if self.offset == 0: - raise ReadError("empty file") - except TruncatedHeaderError as e: - if self.offset == 0: - raise ReadError(str(e)) - except SubsequentHeaderError as e: - raise ReadError(str(e)) - break - - if tarinfo is not None: - self.members.append(tarinfo) - else: - self._loaded = True - - return tarinfo - - #-------------------------------------------------------------------------- - # Little helper methods: - - def _getmember(self, name, tarinfo=None, normalize=False): - """Find an archive member by name from bottom to top. - If tarinfo is given, it is used as the starting point. - """ - # Ensure that all members have been loaded. - members = self.getmembers() - - # Limit the member search list up to tarinfo. - if tarinfo is not None: - members = members[:members.index(tarinfo)] - - if normalize: - name = os.path.normpath(name) - - for member in reversed(members): - if normalize: - member_name = os.path.normpath(member.name) - else: - member_name = member.name - - if name == member_name: - return member - - def _load(self): - """Read through the entire archive file and look for readable - members. - """ - while True: - tarinfo = self.next() - if tarinfo is None: - break - self._loaded = True - - def _check(self, mode=None): - """Check if TarFile is still open, and if the operation's mode - corresponds to TarFile's mode. - """ - if self.closed: - raise IOError("%s is closed" % self.__class__.__name__) - if mode is not None and self.mode not in mode: - raise IOError("bad operation for mode %r" % self.mode) - - def _find_link_target(self, tarinfo): - """Find the target member of a symlink or hardlink member in the - archive. - """ - if tarinfo.issym(): - # Always search the entire archive. - linkname = os.path.dirname(tarinfo.name) + "/" + tarinfo.linkname - limit = None - else: - # Search the archive before the link, because a hard link is - # just a reference to an already archived file. - linkname = tarinfo.linkname - limit = tarinfo - - member = self._getmember(linkname, tarinfo=limit, normalize=True) - if member is None: - raise KeyError("linkname %r not found" % linkname) - return member - - def __iter__(self): - """Provide an iterator object. - """ - if self._loaded: - return iter(self.members) - else: - return TarIter(self) - - def _dbg(self, level, msg): - """Write debugging output to sys.stderr. - """ - if level <= self.debug: - print(msg, file=sys.stderr) - - def __enter__(self): - self._check() - return self - - def __exit__(self, type, value, traceback): - if type is None: - self.close() - else: - # An exception occurred. We must not call close() because - # it would try to write end-of-archive blocks and padding. - if not self._extfileobj: - self.fileobj.close() - self.closed = True -# class TarFile - -class TarIter(object): - """Iterator Class. - - for tarinfo in TarFile(...): - suite... - """ - - def __init__(self, tarfile): - """Construct a TarIter object. - """ - self.tarfile = tarfile - self.index = 0 - def __iter__(self): - """Return iterator object. - """ - return self - - def __next__(self): - """Return the next item using TarFile's next() method. - When all members have been read, set TarFile as _loaded. - """ - # Fix for SF #1100429: Under rare circumstances it can - # happen that getmembers() is called during iteration, - # which will cause TarIter to stop prematurely. - if not self.tarfile._loaded: - tarinfo = self.tarfile.next() - if not tarinfo: - self.tarfile._loaded = True - raise StopIteration - else: - try: - tarinfo = self.tarfile.members[self.index] - except IndexError: - raise StopIteration - self.index += 1 - return tarinfo - - next = __next__ # for Python 2.x - -#-------------------- -# exported functions -#-------------------- -def is_tarfile(name): - """Return True if name points to a tar archive that we - are able to handle, else return False. - """ - try: - t = open(name) - t.close() - return True - except TarError: - return False - -bltn_open = open -open = TarFile.open diff --git a/pipenv/patched/notpip/_vendor/distlib/compat.py b/pipenv/patched/notpip/_vendor/distlib/compat.py index c316fd973a..e594106956 100644 --- a/pipenv/patched/notpip/_vendor/distlib/compat.py +++ b/pipenv/patched/notpip/_vendor/distlib/compat.py @@ -48,17 +48,18 @@ def quote(s): from itertools import ifilter as filter from itertools import ifilterfalse as filterfalse - _userprog = None - def splituser(host): - """splituser('user[:passwd]@host[:port]') --> 'user[:passwd]', 'host[:port]'.""" - global _userprog - if _userprog is None: - import re - _userprog = re.compile('^(.*)@(.*)$') - - match = _userprog.match(host) - if match: return match.group(1, 2) - return None, host + # Leaving this around for now, in case it needs resurrecting in some way + # _userprog = None + # def splituser(host): + # """splituser('user[:passwd]@host[:port]') --> 'user[:passwd]', 'host[:port]'.""" + # global _userprog + # if _userprog is None: + # import re + # _userprog = re.compile('^(.*)@(.*)$') + + # match = _userprog.match(host) + # if match: return match.group(1, 2) + # return None, host else: # pragma: no cover from io import StringIO @@ -68,7 +69,7 @@ def splituser(host): import builtins import configparser import shutil - from urllib.parse import (urlparse, urlunparse, urljoin, splituser, quote, + from urllib.parse import (urlparse, urlunparse, urljoin, quote, unquote, urlsplit, urlunsplit, splittype) from urllib.request import (urlopen, urlretrieve, Request, url2pathname, pathname2url, @@ -88,6 +89,7 @@ def splituser(host): from itertools import filterfalse filter = filter + try: from ssl import match_hostname, CertificateError except ImportError: # pragma: no cover diff --git a/pipenv/patched/notpip/_vendor/distlib/markers.py b/pipenv/patched/notpip/_vendor/distlib/markers.py index 923a832b2a..b43136fa11 100644 --- a/pipenv/patched/notpip/_vendor/distlib/markers.py +++ b/pipenv/patched/notpip/_vendor/distlib/markers.py @@ -13,19 +13,29 @@ # as ~= and === which aren't in Python, necessitating a different approach. import os +import re import sys import platform from .compat import string_types from .util import in_venv, parse_marker +from .version import NormalizedVersion as NV __all__ = ['interpret'] +_VERSION_PATTERN = re.compile(r'((\d+(\.\d+)*\w*)|\'(\d+(\.\d+)*\w*)\'|\"(\d+(\.\d+)*\w*)\")') + def _is_literal(o): if not isinstance(o, string_types) or not o: return False return o[0] in '\'"' +def _get_versions(s): + result = [] + for m in _VERSION_PATTERN.finditer(s): + result.append(NV(m.groups()[0])) + return set(result) + class Evaluator(object): """ This class is used to evaluate marker expessions. @@ -70,6 +80,13 @@ def evaluate(self, expr, context): lhs = self.evaluate(elhs, context) rhs = self.evaluate(erhs, context) + if ((elhs == 'python_version' or erhs == 'python_version') and + op in ('<', '<=', '>', '>=', '===', '==', '!=', '~=')): + lhs = NV(lhs) + rhs = NV(rhs) + elif elhs == 'python_version' and op in ('in', 'not in'): + lhs = NV(lhs) + rhs = _get_versions(rhs) result = self.operations[op](lhs, rhs) return result diff --git a/pipenv/patched/notpip/_vendor/distlib/scripts.py b/pipenv/patched/notpip/_vendor/distlib/scripts.py index 1ac01dde51..913912c7b8 100644 --- a/pipenv/patched/notpip/_vendor/distlib/scripts.py +++ b/pipenv/patched/notpip/_vendor/distlib/scripts.py @@ -14,7 +14,7 @@ from .compat import sysconfig, detect_encoding, ZipFile from .resources import finder from .util import (FileOperator, get_export_entry, convert_path, - get_executable, in_venv) + get_executable, get_platform, in_venv) logger = logging.getLogger(__name__) @@ -170,6 +170,11 @@ def _get_shebang(self, encoding, post_interp=b'', options=None): sysconfig.get_config_var('BINDIR'), 'python%s%s' % (sysconfig.get_config_var('VERSION'), sysconfig.get_config_var('EXE'))) + if not os.path.isfile(executable): + # for Python builds from source on Windows, no Python executables with + # a version suffix are created, so we use python.exe + executable = os.path.join(sysconfig.get_config_var('BINDIR'), + 'python%s' % (sysconfig.get_config_var('EXE'))) if options: executable = self._get_alternate_executable(executable, options) @@ -379,7 +384,8 @@ def _get_launcher(self, kind): bits = '64' else: bits = '32' - name = '%s%s.exe' % (kind, bits) + platform_suffix = '-arm' if get_platform() == 'win-arm64' else '' + name = '%s%s%s.exe' % (kind, bits, platform_suffix) # Issue 31: don't hardcode an absolute package name, but # determine it relative to the current package distlib_package = __name__.rsplit('.', 1)[0] diff --git a/pipenv/patched/notpip/_vendor/distlib/t64-arm.exe b/pipenv/patched/notpip/_vendor/distlib/t64-arm.exe new file mode 100644 index 0000000000..c5df4869da Binary files /dev/null and b/pipenv/patched/notpip/_vendor/distlib/t64-arm.exe differ diff --git a/pipenv/patched/notpip/_vendor/distlib/util.py b/pipenv/patched/notpip/_vendor/distlib/util.py index b9e2c695c9..80bfc864bc 100644 --- a/pipenv/patched/notpip/_vendor/distlib/util.py +++ b/pipenv/patched/notpip/_vendor/distlib/util.py @@ -215,6 +215,10 @@ def get_versions(ver_remaining): if not ver_remaining or ver_remaining[0] != ',': break ver_remaining = ver_remaining[1:].lstrip() + # Some packages have a trailing comma which would break things + # See issue #148 + if not ver_remaining: + break m = COMPARE_OP.match(ver_remaining) if not m: raise SyntaxError('invalid constraint: %s' % ver_remaining) diff --git a/pipenv/patched/notpip/_vendor/distlib/version.py b/pipenv/patched/notpip/_vendor/distlib/version.py index 86c069a7c2..c7c8bb6ff4 100644 --- a/pipenv/patched/notpip/_vendor/distlib/version.py +++ b/pipenv/patched/notpip/_vendor/distlib/version.py @@ -194,7 +194,7 @@ def _pep_440_key(s): if not groups[0]: epoch = 0 else: - epoch = int(groups[0]) + epoch = int(groups[0][:-1]) pre = groups[4:6] post = groups[7:9] dev = groups[10:12] diff --git a/pipenv/patched/notpip/_vendor/distlib/w64-arm.exe b/pipenv/patched/notpip/_vendor/distlib/w64-arm.exe new file mode 100644 index 0000000000..70a2ec2685 Binary files /dev/null and b/pipenv/patched/notpip/_vendor/distlib/w64-arm.exe differ diff --git a/pipenv/patched/notpip/_vendor/distlib/wheel.py b/pipenv/patched/notpip/_vendor/distlib/wheel.py index 5262c8323e..48abfde5b5 100644 --- a/pipenv/patched/notpip/_vendor/distlib/wheel.py +++ b/pipenv/patched/notpip/_vendor/distlib/wheel.py @@ -47,10 +47,7 @@ VER_SUFFIX = sysconfig.get_config_var('py_version_nodot') if not VER_SUFFIX: # pragma: no cover - if sys.version_info[1] >= 10: - VER_SUFFIX = '%s_%s' % sys.version_info[:2] # PEP 641 (draft) - else: - VER_SUFFIX = '%s%s' % sys.version_info[:2] + VER_SUFFIX = '%s%s' % sys.version_info[:2] PYVER = 'py' + VER_SUFFIX IMPVER = IMP_PREFIX + VER_SUFFIX diff --git a/pipenv/patched/notpip/_vendor/distro.py b/pipenv/patched/notpip/_vendor/distro.py index 0611b62a3a..7892741347 100644 --- a/pipenv/patched/notpip/_vendor/distro.py +++ b/pipenv/patched/notpip/_vendor/distro.py @@ -20,26 +20,61 @@ It is the recommended replacement for Python's original :py:func:`platform.linux_distribution` function, but it provides much more functionality. An alternative implementation became necessary because Python -3.5 deprecated this function, and Python 3.8 will remove it altogether. -Its predecessor function :py:func:`platform.dist` was already -deprecated since Python 2.6 and will also be removed in Python 3.8. -Still, there are many cases in which access to OS distribution information -is needed. See `Python issue 1322 `_ for -more information. +3.5 deprecated this function, and Python 3.8 removed it altogether. Its +predecessor function :py:func:`platform.dist` was already deprecated since +Python 2.6 and removed in Python 3.8. Still, there are many cases in which +access to OS distribution information is needed. See `Python issue 1322 +`_ for more information. """ +import argparse +import json +import logging import os import re -import sys -import json import shlex -import logging -import argparse import subprocess +import sys +import warnings + +__version__ = "1.6.0" + +# Use `if False` to avoid an ImportError on Python 2. After dropping Python 2 +# support, can use typing.TYPE_CHECKING instead. See: +# https://docs.python.org/3/library/typing.html#typing.TYPE_CHECKING +if False: # pragma: nocover + from typing import ( + Any, + Callable, + Dict, + Iterable, + Optional, + Sequence, + TextIO, + Tuple, + Type, + TypedDict, + Union, + ) + + VersionDict = TypedDict( + "VersionDict", {"major": str, "minor": str, "build_number": str} + ) + InfoDict = TypedDict( + "InfoDict", + { + "id": str, + "version": str, + "version_parts": VersionDict, + "like": str, + "codename": str, + }, + ) -_UNIXCONFDIR = os.environ.get('UNIXCONFDIR', '/etc') -_OS_RELEASE_BASENAME = 'os-release' +_UNIXCONFDIR = os.environ.get("UNIXCONFDIR", "/etc") +_UNIXUSRLIBDIR = os.environ.get("UNIXUSRLIBDIR", "/usr/lib") +_OS_RELEASE_BASENAME = "os-release" #: Translation table for normalizing the "ID" attribute defined in os-release #: files, for use by the :func:`distro.id` method. @@ -49,7 +84,7 @@ #: #: * Value: Normalized value. NORMALIZED_OS_ID = { - 'ol': 'oracle', # Oracle Linux + "ol": "oracle", # Oracle Linux } #: Translation table for normalizing the "Distributor ID" attribute returned by @@ -60,11 +95,11 @@ #: #: * Value: Normalized value. NORMALIZED_LSB_ID = { - 'enterpriseenterpriseas': 'oracle', # Oracle Enterprise Linux 4 - 'enterpriseenterpriseserver': 'oracle', # Oracle Linux 5 - 'redhatenterpriseworkstation': 'rhel', # RHEL 6, 7 Workstation - 'redhatenterpriseserver': 'rhel', # RHEL 6, 7 Server - 'redhatenterprisecomputenode': 'rhel', # RHEL 6 ComputeNode + "enterpriseenterpriseas": "oracle", # Oracle Enterprise Linux 4 + "enterpriseenterpriseserver": "oracle", # Oracle Linux 5 + "redhatenterpriseworkstation": "rhel", # RHEL 6, 7 Workstation + "redhatenterpriseserver": "rhel", # RHEL 6, 7 Server + "redhatenterprisecomputenode": "rhel", # RHEL 6 ComputeNode } #: Translation table for normalizing the distro ID derived from the file name @@ -75,30 +110,39 @@ #: #: * Value: Normalized value. NORMALIZED_DISTRO_ID = { - 'redhat': 'rhel', # RHEL 6.x, 7.x + "redhat": "rhel", # RHEL 6.x, 7.x } # Pattern for content of distro release file (reversed) _DISTRO_RELEASE_CONTENT_REVERSED_PATTERN = re.compile( - r'(?:[^)]*\)(.*)\()? *(?:STL )?([\d.+\-a-z]*\d) *(?:esaeler *)?(.+)') + r"(?:[^)]*\)(.*)\()? *(?:STL )?([\d.+\-a-z]*\d) *(?:esaeler *)?(.+)" +) # Pattern for base file name of distro release file -_DISTRO_RELEASE_BASENAME_PATTERN = re.compile( - r'(\w+)[-_](release|version)$') +_DISTRO_RELEASE_BASENAME_PATTERN = re.compile(r"(\w+)[-_](release|version)$") # Base file names to be ignored when searching for distro release file _DISTRO_RELEASE_IGNORE_BASENAMES = ( - 'debian_version', - 'lsb-release', - 'oem-release', + "debian_version", + "lsb-release", + "oem-release", _OS_RELEASE_BASENAME, - 'system-release', - 'plesk-release', + "system-release", + "plesk-release", + "iredmail-release", ) def linux_distribution(full_distribution_name=True): + # type: (bool) -> Tuple[str, str, str] """ + .. deprecated:: 1.6.0 + + :func:`distro.linux_distribution()` is deprecated. It should only be + used as a compatibility shim with Python's + :py:func:`platform.linux_distribution()`. Please use :func:`distro.id`, + :func:`distro.version` and :func:`distro.name` instead. + Return information about the current OS distribution as a tuple ``(id_name, version, codename)`` with items as follows: @@ -122,10 +166,18 @@ def linux_distribution(full_distribution_name=True): method normalizes the distro ID string to a reliable machine-readable value for a number of popular OS distributions. """ + warnings.warn( + "distro.linux_distribution() is deprecated. It should only be used as a " + "compatibility shim with Python's platform.linux_distribution(). Please use " + "distro.id(), distro.version() and distro.name() instead.", + DeprecationWarning, + stacklevel=2, + ) return _distro.linux_distribution(full_distribution_name) def id(): + # type: () -> str """ Return the distro ID of the current distribution, as a machine-readable string. @@ -205,6 +257,7 @@ def id(): def name(pretty=False): + # type: (bool) -> str """ Return the name of the current OS distribution, as a human-readable string. @@ -244,6 +297,7 @@ def name(pretty=False): def version(pretty=False, best=False): + # type: (bool, bool) -> str """ Return the version of the current OS distribution, as a human-readable string. @@ -288,6 +342,7 @@ def version(pretty=False, best=False): def version_parts(best=False): + # type: (bool) -> Tuple[str, str, str] """ Return the version of the current OS distribution as a tuple ``(major, minor, build_number)`` with items as follows: @@ -305,6 +360,7 @@ def version_parts(best=False): def major_version(best=False): + # type: (bool) -> str """ Return the major version of the current OS distribution, as a string, if provided. @@ -318,6 +374,7 @@ def major_version(best=False): def minor_version(best=False): + # type: (bool) -> str """ Return the minor version of the current OS distribution, as a string, if provided. @@ -331,6 +388,7 @@ def minor_version(best=False): def build_number(best=False): + # type: (bool) -> str """ Return the build number of the current OS distribution, as a string, if provided. @@ -344,6 +402,7 @@ def build_number(best=False): def like(): + # type: () -> str """ Return a space-separated list of distro IDs of distributions that are closely related to the current OS distribution in regards to packaging @@ -361,6 +420,7 @@ def like(): def codename(): + # type: () -> str """ Return the codename for the release of the current OS distribution, as a string. @@ -385,6 +445,7 @@ def codename(): def info(pretty=False, best=False): + # type: (bool, bool) -> InfoDict """ Return certain machine-readable information items about the current OS distribution in a dictionary, as shown in the following example: @@ -429,6 +490,7 @@ def info(pretty=False, best=False): def os_release_info(): + # type: () -> Dict[str, str] """ Return a dictionary containing key-value pairs for the information items from the os-release file data source of the current OS distribution. @@ -439,6 +501,7 @@ def os_release_info(): def lsb_release_info(): + # type: () -> Dict[str, str] """ Return a dictionary containing key-value pairs for the information items from the lsb_release command data source of the current OS distribution. @@ -450,6 +513,7 @@ def lsb_release_info(): def distro_release_info(): + # type: () -> Dict[str, str] """ Return a dictionary containing key-value pairs for the information items from the distro release file data source of the current OS distribution. @@ -460,6 +524,7 @@ def distro_release_info(): def uname_info(): + # type: () -> Dict[str, str] """ Return a dictionary containing key-value pairs for the information items from the distro release file data source of the current OS distribution. @@ -468,6 +533,7 @@ def uname_info(): def os_release_attr(attribute): + # type: (str) -> str """ Return a single named information item from the os-release file data source of the current OS distribution. @@ -487,6 +553,7 @@ def os_release_attr(attribute): def lsb_release_attr(attribute): + # type: (str) -> str """ Return a single named information item from the lsb_release command output data source of the current OS distribution. @@ -507,6 +574,7 @@ def lsb_release_attr(attribute): def distro_release_attr(attribute): + # type: (str) -> str """ Return a single named information item from the distro release file data source of the current OS distribution. @@ -526,6 +594,7 @@ def distro_release_attr(attribute): def uname_attr(attribute): + # type: (str) -> str """ Return a single named information item from the distro release file data source of the current OS distribution. @@ -542,19 +611,26 @@ def uname_attr(attribute): return _distro.uname_attr(attribute) -class cached_property(object): - """A version of @property which caches the value. On access, it calls the - underlying function and sets the value in `__dict__` so future accesses - will not re-call the property. - """ - def __init__(self, f): - self._fname = f.__name__ - self._f = f +try: + from functools import cached_property +except ImportError: + # Python < 3.8 + class cached_property(object): # type: ignore + """A version of @property which caches the value. On access, it calls the + underlying function and sets the value in `__dict__` so future accesses + will not re-call the property. + """ + + def __init__(self, f): + # type: (Callable[[Any], Any]) -> None + self._fname = f.__name__ + self._f = f - def __get__(self, obj, owner): - assert obj is not None, 'call {} on an instance'.format(self._fname) - ret = obj.__dict__[self._fname] = self._f(obj) - return ret + def __get__(self, obj, owner): + # type: (Any, Type[Any]) -> Any + assert obj is not None, "call {} on an instance".format(self._fname) + ret = obj.__dict__[self._fname] = self._f(obj) + return ret class LinuxDistribution(object): @@ -575,11 +651,15 @@ class LinuxDistribution(object): lsb_release command. """ - def __init__(self, - include_lsb=True, - os_release_file='', - distro_release_file='', - include_uname=True): + def __init__( + self, + include_lsb=True, + os_release_file="", + distro_release_file="", + include_uname=True, + root_dir=None, + ): + # type: (bool, str, str, bool, Optional[str]) -> None """ The initialization method of this class gathers information from the available data sources, and stores that in private instance attributes. @@ -618,6 +698,9 @@ def __init__(self, the program execution path the data source for the uname command will be empty. + * ``root_dir`` (string): The absolute path to the root directory to use + to find distro-related information files. + Public instance attributes: * ``os_release_file`` (string): The path name of the @@ -647,28 +730,50 @@ def __init__(self, * :py:exc:`UnicodeError`: A data source has unexpected characters or uses an unexpected encoding. """ - self.os_release_file = os_release_file or \ - os.path.join(_UNIXCONFDIR, _OS_RELEASE_BASENAME) - self.distro_release_file = distro_release_file or '' # updated later + self.root_dir = root_dir + self.etc_dir = os.path.join(root_dir, "etc") if root_dir else _UNIXCONFDIR + self.usr_lib_dir = ( + os.path.join(root_dir, "usr/lib") if root_dir else _UNIXUSRLIBDIR + ) + + if os_release_file: + self.os_release_file = os_release_file + else: + etc_dir_os_release_file = os.path.join(self.etc_dir, _OS_RELEASE_BASENAME) + usr_lib_os_release_file = os.path.join( + self.usr_lib_dir, _OS_RELEASE_BASENAME + ) + + # NOTE: The idea is to respect order **and** have it set + # at all times for API backwards compatibility. + if os.path.isfile(etc_dir_os_release_file) or not os.path.isfile( + usr_lib_os_release_file + ): + self.os_release_file = etc_dir_os_release_file + else: + self.os_release_file = usr_lib_os_release_file + + self.distro_release_file = distro_release_file or "" # updated later self.include_lsb = include_lsb self.include_uname = include_uname def __repr__(self): - """Return repr of all info - """ - return \ - "LinuxDistribution(" \ - "os_release_file={self.os_release_file!r}, " \ - "distro_release_file={self.distro_release_file!r}, " \ - "include_lsb={self.include_lsb!r}, " \ - "include_uname={self.include_uname!r}, " \ - "_os_release_info={self._os_release_info!r}, " \ - "_lsb_release_info={self._lsb_release_info!r}, " \ - "_distro_release_info={self._distro_release_info!r}, " \ - "_uname_info={self._uname_info!r})".format( - self=self) + # type: () -> str + """Return repr of all info""" + return ( + "LinuxDistribution(" + "os_release_file={self.os_release_file!r}, " + "distro_release_file={self.distro_release_file!r}, " + "include_lsb={self.include_lsb!r}, " + "include_uname={self.include_uname!r}, " + "_os_release_info={self._os_release_info!r}, " + "_lsb_release_info={self._lsb_release_info!r}, " + "_distro_release_info={self._distro_release_info!r}, " + "_uname_info={self._uname_info!r})".format(self=self) + ) def linux_distribution(self, full_distribution_name=True): + # type: (bool) -> Tuple[str, str, str] """ Return information about the OS distribution that is compatible with Python's :func:`platform.linux_distribution`, supporting a subset @@ -679,92 +784,102 @@ def linux_distribution(self, full_distribution_name=True): return ( self.name() if full_distribution_name else self.id(), self.version(), - self.codename() + self.codename(), ) def id(self): + # type: () -> str """Return the distro ID of the OS distribution, as a string. For details, see :func:`distro.id`. """ + def normalize(distro_id, table): - distro_id = distro_id.lower().replace(' ', '_') + # type: (str, Dict[str, str]) -> str + distro_id = distro_id.lower().replace(" ", "_") return table.get(distro_id, distro_id) - distro_id = self.os_release_attr('id') + distro_id = self.os_release_attr("id") if distro_id: return normalize(distro_id, NORMALIZED_OS_ID) - distro_id = self.lsb_release_attr('distributor_id') + distro_id = self.lsb_release_attr("distributor_id") if distro_id: return normalize(distro_id, NORMALIZED_LSB_ID) - distro_id = self.distro_release_attr('id') + distro_id = self.distro_release_attr("id") if distro_id: return normalize(distro_id, NORMALIZED_DISTRO_ID) - distro_id = self.uname_attr('id') + distro_id = self.uname_attr("id") if distro_id: return normalize(distro_id, NORMALIZED_DISTRO_ID) - return '' + return "" def name(self, pretty=False): + # type: (bool) -> str """ Return the name of the OS distribution, as a string. For details, see :func:`distro.name`. """ - name = self.os_release_attr('name') \ - or self.lsb_release_attr('distributor_id') \ - or self.distro_release_attr('name') \ - or self.uname_attr('name') + name = ( + self.os_release_attr("name") + or self.lsb_release_attr("distributor_id") + or self.distro_release_attr("name") + or self.uname_attr("name") + ) if pretty: - name = self.os_release_attr('pretty_name') \ - or self.lsb_release_attr('description') + name = self.os_release_attr("pretty_name") or self.lsb_release_attr( + "description" + ) if not name: - name = self.distro_release_attr('name') \ - or self.uname_attr('name') + name = self.distro_release_attr("name") or self.uname_attr("name") version = self.version(pretty=True) if version: - name = name + ' ' + version - return name or '' + name = name + " " + version + return name or "" def version(self, pretty=False, best=False): + # type: (bool, bool) -> str """ Return the version of the OS distribution, as a string. For details, see :func:`distro.version`. """ versions = [ - self.os_release_attr('version_id'), - self.lsb_release_attr('release'), - self.distro_release_attr('version_id'), - self._parse_distro_release_content( - self.os_release_attr('pretty_name')).get('version_id', ''), + self.os_release_attr("version_id"), + self.lsb_release_attr("release"), + self.distro_release_attr("version_id"), + self._parse_distro_release_content(self.os_release_attr("pretty_name")).get( + "version_id", "" + ), self._parse_distro_release_content( - self.lsb_release_attr('description')).get('version_id', ''), - self.uname_attr('release') + self.lsb_release_attr("description") + ).get("version_id", ""), + self.uname_attr("release"), ] - version = '' + version = "" if best: # This algorithm uses the last version in priority order that has # the best precision. If the versions are not in conflict, that # does not matter; otherwise, using the last one instead of the # first one might be considered a surprise. for v in versions: - if v.count(".") > version.count(".") or version == '': + if v.count(".") > version.count(".") or version == "": version = v else: for v in versions: - if v != '': + if v != "": version = v break if pretty and version and self.codename(): - version = '{0} ({1})'.format(version, self.codename()) + version = "{0} ({1})".format(version, self.codename()) return version def version_parts(self, best=False): + # type: (bool) -> Tuple[str, str, str] """ Return the version of the OS distribution, as a tuple of version numbers. @@ -773,14 +888,15 @@ def version_parts(self, best=False): """ version_str = self.version(best=best) if version_str: - version_regex = re.compile(r'(\d+)\.?(\d+)?\.?(\d+)?') + version_regex = re.compile(r"(\d+)\.?(\d+)?\.?(\d+)?") matches = version_regex.match(version_str) if matches: major, minor, build_number = matches.groups() - return major, minor or '', build_number or '' - return '', '', '' + return major, minor or "", build_number or "" + return "", "", "" def major_version(self, best=False): + # type: (bool) -> str """ Return the major version number of the current distribution. @@ -789,6 +905,7 @@ def major_version(self, best=False): return self.version_parts(best)[0] def minor_version(self, best=False): + # type: (bool) -> str """ Return the minor version number of the current distribution. @@ -797,6 +914,7 @@ def minor_version(self, best=False): return self.version_parts(best)[1] def build_number(self, best=False): + # type: (bool) -> str """ Return the build number of the current distribution. @@ -805,14 +923,16 @@ def build_number(self, best=False): return self.version_parts(best)[2] def like(self): + # type: () -> str """ Return the IDs of distributions that are like the OS distribution. For details, see :func:`distro.like`. """ - return self.os_release_attr('id_like') or '' + return self.os_release_attr("id_like") or "" def codename(self): + # type: () -> str """ Return the codename of the OS distribution. @@ -821,13 +941,16 @@ def codename(self): try: # Handle os_release specially since distros might purposefully set # this to empty string to have no codename - return self._os_release_info['codename'] + return self._os_release_info["codename"] except KeyError: - return self.lsb_release_attr('codename') \ - or self.distro_release_attr('codename') \ - or '' + return ( + self.lsb_release_attr("codename") + or self.distro_release_attr("codename") + or "" + ) def info(self, pretty=False, best=False): + # type: (bool, bool) -> InfoDict """ Return certain machine-readable information about the OS distribution. @@ -840,13 +963,14 @@ def info(self, pretty=False, best=False): version_parts=dict( major=self.major_version(best), minor=self.minor_version(best), - build_number=self.build_number(best) + build_number=self.build_number(best), ), like=self.like(), codename=self.codename(), ) def os_release_info(self): + # type: () -> Dict[str, str] """ Return a dictionary containing key-value pairs for the information items from the os-release file data source of the OS distribution. @@ -856,6 +980,7 @@ def os_release_info(self): return self._os_release_info def lsb_release_info(self): + # type: () -> Dict[str, str] """ Return a dictionary containing key-value pairs for the information items from the lsb_release command data source of the OS @@ -866,6 +991,7 @@ def lsb_release_info(self): return self._lsb_release_info def distro_release_info(self): + # type: () -> Dict[str, str] """ Return a dictionary containing key-value pairs for the information items from the distro release file data source of the OS @@ -876,6 +1002,7 @@ def distro_release_info(self): return self._distro_release_info def uname_info(self): + # type: () -> Dict[str, str] """ Return a dictionary containing key-value pairs for the information items from the uname command data source of the OS distribution. @@ -885,43 +1012,48 @@ def uname_info(self): return self._uname_info def os_release_attr(self, attribute): + # type: (str) -> str """ Return a single named information item from the os-release file data source of the OS distribution. For details, see :func:`distro.os_release_attr`. """ - return self._os_release_info.get(attribute, '') + return self._os_release_info.get(attribute, "") def lsb_release_attr(self, attribute): + # type: (str) -> str """ Return a single named information item from the lsb_release command output data source of the OS distribution. For details, see :func:`distro.lsb_release_attr`. """ - return self._lsb_release_info.get(attribute, '') + return self._lsb_release_info.get(attribute, "") def distro_release_attr(self, attribute): + # type: (str) -> str """ Return a single named information item from the distro release file data source of the OS distribution. For details, see :func:`distro.distro_release_attr`. """ - return self._distro_release_info.get(attribute, '') + return self._distro_release_info.get(attribute, "") def uname_attr(self, attribute): + # type: (str) -> str """ Return a single named information item from the uname command output data source of the OS distribution. - For details, see :func:`distro.uname_release_attr`. + For details, see :func:`distro.uname_attr`. """ - return self._uname_info.get(attribute, '') + return self._uname_info.get(attribute, "") @cached_property def _os_release_info(self): + # type: () -> Dict[str, str] """ Get the information items from the specified os-release file. @@ -935,6 +1067,7 @@ def _os_release_info(self): @staticmethod def _parse_os_release_content(lines): + # type: (TextIO) -> Dict[str, str] """ Parse the lines of an os-release file. @@ -959,7 +1092,7 @@ def _parse_os_release_content(lines): # parsed content is a unicode object. The following fix resolves that # (... but it should be fixed in shlex...): if sys.version_info[0] == 2 and isinstance(lexer.wordchars, bytes): - lexer.wordchars = lexer.wordchars.decode('iso-8859-1') + lexer.wordchars = lexer.wordchars.decode("iso-8859-1") tokens = list(lexer) for token in tokens: @@ -969,37 +1102,38 @@ def _parse_os_release_content(lines): # stripped, etc.), so the tokens are now either: # * variable assignments: var=value # * commands or their arguments (not allowed in os-release) - if '=' in token: - k, v = token.split('=', 1) + if "=" in token: + k, v = token.split("=", 1) props[k.lower()] = v else: # Ignore any tokens that are not variable assignments pass - if 'version_codename' in props: + if "version_codename" in props: # os-release added a version_codename field. Use that in # preference to anything else Note that some distros purposefully # do not have code names. They should be setting # version_codename="" - props['codename'] = props['version_codename'] - elif 'ubuntu_codename' in props: + props["codename"] = props["version_codename"] + elif "ubuntu_codename" in props: # Same as above but a non-standard field name used on older Ubuntus - props['codename'] = props['ubuntu_codename'] - elif 'version' in props: + props["codename"] = props["ubuntu_codename"] + elif "version" in props: # If there is no version_codename, parse it from the version - codename = re.search(r'(\(\D+\))|,(\s+)?\D+', props['version']) - if codename: - codename = codename.group() - codename = codename.strip('()') - codename = codename.strip(',') + match = re.search(r"(\(\D+\))|,(\s+)?\D+", props["version"]) + if match: + codename = match.group() + codename = codename.strip("()") + codename = codename.strip(",") codename = codename.strip() # codename appears within paranthese. - props['codename'] = codename + props["codename"] = codename return props @cached_property def _lsb_release_info(self): + # type: () -> Dict[str, str] """ Get the information items from the lsb_release command output. @@ -1008,17 +1142,19 @@ def _lsb_release_info(self): """ if not self.include_lsb: return {} - with open(os.devnull, 'w') as devnull: + with open(os.devnull, "wb") as devnull: try: - cmd = ('lsb_release', '-a') + cmd = ("lsb_release", "-a") stdout = subprocess.check_output(cmd, stderr=devnull) - except OSError: # Command not found + # Command not found or lsb_release returned error + except (OSError, subprocess.CalledProcessError): return {} content = self._to_str(stdout).splitlines() return self._parse_lsb_release_content(content) @staticmethod def _parse_lsb_release_content(lines): + # type: (Iterable[str]) -> Dict[str, str] """ Parse the output of the lsb_release command. @@ -1033,19 +1169,20 @@ def _parse_lsb_release_content(lines): """ props = {} for line in lines: - kv = line.strip('\n').split(':', 1) + kv = line.strip("\n").split(":", 1) if len(kv) != 2: # Ignore lines without colon. continue k, v = kv - props.update({k.replace(' ', '_').lower(): v.strip()}) + props.update({k.replace(" ", "_").lower(): v.strip()}) return props @cached_property def _uname_info(self): - with open(os.devnull, 'w') as devnull: + # type: () -> Dict[str, str] + with open(os.devnull, "wb") as devnull: try: - cmd = ('uname', '-rs') + cmd = ("uname", "-rs") stdout = subprocess.check_output(cmd, stderr=devnull) except OSError: return {} @@ -1054,25 +1191,27 @@ def _uname_info(self): @staticmethod def _parse_uname_content(lines): + # type: (Sequence[str]) -> Dict[str, str] props = {} - match = re.search(r'^([^\s]+)\s+([\d\.]+)', lines[0].strip()) + match = re.search(r"^([^\s]+)\s+([\d\.]+)", lines[0].strip()) if match: name, version = match.groups() # This is to prevent the Linux kernel version from # appearing as the 'best' version on otherwise # identifiable distributions. - if name == 'Linux': + if name == "Linux": return {} - props['id'] = name.lower() - props['name'] = name - props['release'] = version + props["id"] = name.lower() + props["name"] = name + props["release"] = version return props @staticmethod def _to_str(text): + # type: (Union[bytes, str]) -> str encoding = sys.getfilesystemencoding() - encoding = 'utf-8' if encoding == 'ascii' else encoding + encoding = "utf-8" if encoding == "ascii" else encoding if sys.version_info[0] >= 3: if isinstance(text, bytes): @@ -1085,6 +1224,7 @@ def _to_str(text): @cached_property def _distro_release_info(self): + # type: () -> Dict[str, str] """ Get the information items from the specified distro release file. @@ -1094,23 +1234,21 @@ def _distro_release_info(self): if self.distro_release_file: # If it was specified, we use it and parse what we can, even if # its file name or content does not match the expected pattern. - distro_info = self._parse_distro_release_file( - self.distro_release_file) + distro_info = self._parse_distro_release_file(self.distro_release_file) basename = os.path.basename(self.distro_release_file) # The file name pattern for user-specified distro release files # is somewhat more tolerant (compared to when searching for the # file), because we want to use what was specified as best as # possible. match = _DISTRO_RELEASE_BASENAME_PATTERN.match(basename) - if 'name' in distro_info \ - and 'cloudlinux' in distro_info['name'].lower(): - distro_info['id'] = 'cloudlinux' + if "name" in distro_info and "cloudlinux" in distro_info["name"].lower(): + distro_info["id"] = "cloudlinux" elif match: - distro_info['id'] = match.group(1) + distro_info["id"] = match.group(1) return distro_info else: try: - basenames = os.listdir(_UNIXCONFDIR) + basenames = os.listdir(self.etc_dir) # We sort for repeatability in cases where there are multiple # distro specific files; e.g. CentOS, Oracle, Enterprise all # containing `redhat-release` on top of their own. @@ -1120,38 +1258,41 @@ def _distro_release_info(self): # sure about the *-release files. Check common entries of # /etc for information. If they turn out to not be there the # error is handled in `_parse_distro_release_file()`. - basenames = ['SuSE-release', - 'arch-release', - 'base-release', - 'centos-release', - 'fedora-release', - 'gentoo-release', - 'mageia-release', - 'mandrake-release', - 'mandriva-release', - 'mandrivalinux-release', - 'manjaro-release', - 'oracle-release', - 'redhat-release', - 'sl-release', - 'slackware-version'] + basenames = [ + "SuSE-release", + "arch-release", + "base-release", + "centos-release", + "fedora-release", + "gentoo-release", + "mageia-release", + "mandrake-release", + "mandriva-release", + "mandrivalinux-release", + "manjaro-release", + "oracle-release", + "redhat-release", + "sl-release", + "slackware-version", + ] for basename in basenames: if basename in _DISTRO_RELEASE_IGNORE_BASENAMES: continue match = _DISTRO_RELEASE_BASENAME_PATTERN.match(basename) if match: - filepath = os.path.join(_UNIXCONFDIR, basename) + filepath = os.path.join(self.etc_dir, basename) distro_info = self._parse_distro_release_file(filepath) - if 'name' in distro_info: + if "name" in distro_info: # The name is always present if the pattern matches self.distro_release_file = filepath - distro_info['id'] = match.group(1) - if 'cloudlinux' in distro_info['name'].lower(): - distro_info['id'] = 'cloudlinux' + distro_info["id"] = match.group(1) + if "cloudlinux" in distro_info["name"].lower(): + distro_info["id"] = "cloudlinux" return distro_info return {} def _parse_distro_release_file(self, filepath): + # type: (str) -> Dict[str, str] """ Parse a distro release file. @@ -1170,11 +1311,12 @@ def _parse_distro_release_file(self, filepath): except (OSError, IOError): # Ignore not being able to read a specific, seemingly version # related file. - # See https://github.com/nir0s/distro/issues/162 + # See https://github.com/python-distro/distro/issues/162 return {} @staticmethod def _parse_distro_release_content(line): + # type: (str) -> Dict[str, str] """ Parse a line from a distro release file. @@ -1185,18 +1327,17 @@ def _parse_distro_release_content(line): Returns: A dictionary containing all information items. """ - matches = _DISTRO_RELEASE_CONTENT_REVERSED_PATTERN.match( - line.strip()[::-1]) + matches = _DISTRO_RELEASE_CONTENT_REVERSED_PATTERN.match(line.strip()[::-1]) distro_info = {} if matches: # regexp ensures non-None - distro_info['name'] = matches.group(3)[::-1] + distro_info["name"] = matches.group(3)[::-1] if matches.group(2): - distro_info['version_id'] = matches.group(2)[::-1] + distro_info["version_id"] = matches.group(2)[::-1] if matches.group(1): - distro_info['codename'] = matches.group(1)[::-1] + distro_info["codename"] = matches.group(1)[::-1] elif line: - distro_info['name'] = line.strip() + distro_info["name"] = line.strip() return distro_info @@ -1204,27 +1345,42 @@ def _parse_distro_release_content(line): def main(): + # type: () -> None logger = logging.getLogger(__name__) logger.setLevel(logging.DEBUG) logger.addHandler(logging.StreamHandler(sys.stdout)) parser = argparse.ArgumentParser(description="OS distro info tool") parser.add_argument( - '--json', - '-j', - help="Output in machine readable format", - action="store_true") + "--json", "-j", help="Output in machine readable format", action="store_true" + ) + + parser.add_argument( + "--root-dir", + "-r", + type=str, + dest="root_dir", + help="Path to the root filesystem directory (defaults to /)", + ) + args = parser.parse_args() + if args.root_dir: + dist = LinuxDistribution( + include_lsb=False, include_uname=False, root_dir=args.root_dir + ) + else: + dist = _distro + if args.json: - logger.info(json.dumps(info(), indent=4, sort_keys=True)) + logger.info(json.dumps(dist.info(), indent=4, sort_keys=True)) else: - logger.info('Name: %s', name(pretty=True)) - distribution_version = version(pretty=True) - logger.info('Version: %s', distribution_version) - distribution_codename = codename() - logger.info('Codename: %s', distribution_codename) + logger.info("Name: %s", dist.name(pretty=True)) + distribution_version = dist.version(pretty=True) + logger.info("Version: %s", distribution_version) + distribution_codename = dist.codename() + logger.info("Codename: %s", distribution_codename) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/pipenv/patched/notpip/_vendor/html5lib/LICENSE b/pipenv/patched/notpip/_vendor/html5lib/LICENSE deleted file mode 100644 index c87fa7a000..0000000000 --- a/pipenv/patched/notpip/_vendor/html5lib/LICENSE +++ /dev/null @@ -1,20 +0,0 @@ -Copyright (c) 2006-2013 James Graham and other contributors - -Permission is hereby granted, free of charge, to any person obtaining -a copy of this software and associated documentation files (the -"Software"), to deal in the Software without restriction, including -without limitation the rights to use, copy, modify, merge, publish, -distribute, sublicense, and/or sell copies of the Software, and to -permit persons to whom the Software is furnished to do so, subject to -the following conditions: - -The above copyright notice and this permission notice shall be -included in all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, -EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF -MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND -NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE -LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION -WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/pipenv/patched/notpip/_vendor/idna/codec.py b/pipenv/patched/notpip/_vendor/idna/codec.py index 080f22a3b5..1ca9ba62c2 100644 --- a/pipenv/patched/notpip/_vendor/idna/codec.py +++ b/pipenv/patched/notpip/_vendor/idna/codec.py @@ -7,8 +7,7 @@ class Codec(codecs.Codec): - def encode(self, data, errors='strict'): - # type: (str, str) -> Tuple[bytes, int] + def encode(self, data: str, errors: str = 'strict') -> Tuple[bytes, int]: if errors != 'strict': raise IDNAError('Unsupported error handling \"{}\"'.format(errors)) @@ -17,8 +16,7 @@ def encode(self, data, errors='strict'): return encode(data), len(data) - def decode(self, data, errors='strict'): - # type: (bytes, str) -> Tuple[str, int] + def decode(self, data: bytes, errors: str = 'strict') -> Tuple[str, int]: if errors != 'strict': raise IDNAError('Unsupported error handling \"{}\"'.format(errors)) @@ -28,8 +26,7 @@ def decode(self, data, errors='strict'): return decode(data), len(data) class IncrementalEncoder(codecs.BufferedIncrementalEncoder): - def _buffer_encode(self, data, errors, final): # type: ignore - # type: (str, str, bool) -> Tuple[str, int] + def _buffer_encode(self, data: str, errors: str, final: bool) -> Tuple[str, int]: # type: ignore if errors != 'strict': raise IDNAError('Unsupported error handling \"{}\"'.format(errors)) @@ -62,8 +59,7 @@ def _buffer_encode(self, data, errors, final): # type: ignore return result_str, size class IncrementalDecoder(codecs.BufferedIncrementalDecoder): - def _buffer_decode(self, data, errors, final): # type: ignore - # type: (str, str, bool) -> Tuple[str, int] + def _buffer_decode(self, data: str, errors: str, final: bool) -> Tuple[str, int]: # type: ignore if errors != 'strict': raise IDNAError('Unsupported error handling \"{}\"'.format(errors)) @@ -103,8 +99,7 @@ class StreamReader(Codec, codecs.StreamReader): pass -def getregentry(): - # type: () -> codecs.CodecInfo +def getregentry() -> codecs.CodecInfo: # Compatibility as a search_function for codecs.register() return codecs.CodecInfo( name='idna', diff --git a/pipenv/patched/notpip/_vendor/idna/compat.py b/pipenv/patched/notpip/_vendor/idna/compat.py index dc896c766c..786e6bda63 100644 --- a/pipenv/patched/notpip/_vendor/idna/compat.py +++ b/pipenv/patched/notpip/_vendor/idna/compat.py @@ -2,15 +2,12 @@ from .codec import * from typing import Any, Union -def ToASCII(label): - # type: (str) -> bytes +def ToASCII(label: str) -> bytes: return encode(label) -def ToUnicode(label): - # type: (Union[bytes, bytearray]) -> str +def ToUnicode(label: Union[bytes, bytearray]) -> str: return decode(label) -def nameprep(s): - # type: (Any) -> None +def nameprep(s: Any) -> None: raise NotImplementedError('IDNA 2008 does not utilise nameprep protocol') diff --git a/pipenv/patched/notpip/_vendor/idna/core.py b/pipenv/patched/notpip/_vendor/idna/core.py index d6051297d5..55ab967885 100644 --- a/pipenv/patched/notpip/_vendor/idna/core.py +++ b/pipenv/patched/notpip/_vendor/idna/core.py @@ -29,43 +29,36 @@ class InvalidCodepointContext(IDNAError): pass -def _combining_class(cp): - # type: (int) -> int +def _combining_class(cp: int) -> int: v = unicodedata.combining(chr(cp)) if v == 0: if not unicodedata.name(chr(cp)): raise ValueError('Unknown character in unicodedata') return v -def _is_script(cp, script): - # type: (str, str) -> bool +def _is_script(cp: str, script: str) -> bool: return intranges_contain(ord(cp), idnadata.scripts[script]) -def _punycode(s): - # type: (str) -> bytes +def _punycode(s: str) -> bytes: return s.encode('punycode') -def _unot(s): - # type: (int) -> str +def _unot(s: int) -> str: return 'U+{:04X}'.format(s) -def valid_label_length(label): - # type: (Union[bytes, str]) -> bool +def valid_label_length(label: Union[bytes, str]) -> bool: if len(label) > 63: return False return True -def valid_string_length(label, trailing_dot): - # type: (Union[bytes, str], bool) -> bool +def valid_string_length(label: Union[bytes, str], trailing_dot: bool) -> bool: if len(label) > (254 if trailing_dot else 253): return False return True -def check_bidi(label, check_ltr=False): - # type: (str, bool) -> bool +def check_bidi(label: str, check_ltr: bool = False) -> bool: # Bidi rules should only be applied if string contains RTL characters bidi_label = False for (idx, cp) in enumerate(label, 1): @@ -124,15 +117,13 @@ def check_bidi(label, check_ltr=False): return True -def check_initial_combiner(label): - # type: (str) -> bool +def check_initial_combiner(label: str) -> bool: if unicodedata.category(label[0])[0] == 'M': raise IDNAError('Label begins with an illegal combining character') return True -def check_hyphen_ok(label): - # type: (str) -> bool +def check_hyphen_ok(label: str) -> bool: if label[2:4] == '--': raise IDNAError('Label has disallowed hyphens in 3rd and 4th position') if label[0] == '-' or label[-1] == '-': @@ -140,14 +131,12 @@ def check_hyphen_ok(label): return True -def check_nfc(label): - # type: (str) -> None +def check_nfc(label: str) -> None: if unicodedata.normalize('NFC', label) != label: raise IDNAError('Label must be in Normalization Form C') -def valid_contextj(label, pos): - # type: (str, int) -> bool +def valid_contextj(label: str, pos: int) -> bool: cp_value = ord(label[pos]) if cp_value == 0x200c: @@ -190,8 +179,7 @@ def valid_contextj(label, pos): return False -def valid_contexto(label, pos, exception=False): - # type: (str, int, bool) -> bool +def valid_contexto(label: str, pos: int, exception: bool = False) -> bool: cp_value = ord(label[pos]) if cp_value == 0x00b7: @@ -233,8 +221,7 @@ def valid_contexto(label, pos, exception=False): return False -def check_label(label): - # type: (Union[str, bytes, bytearray]) -> None +def check_label(label: Union[str, bytes, bytearray]) -> None: if isinstance(label, (bytes, bytearray)): label = label.decode('utf-8') if len(label) == 0: @@ -265,8 +252,7 @@ def check_label(label): check_bidi(label) -def alabel(label): - # type: (str) -> bytes +def alabel(label: str) -> bytes: try: label_bytes = label.encode('ascii') ulabel(label_bytes) @@ -290,8 +276,7 @@ def alabel(label): return label_bytes -def ulabel(label): - # type: (Union[str, bytes, bytearray]) -> str +def ulabel(label: Union[str, bytes, bytearray]) -> str: if not isinstance(label, (bytes, bytearray)): try: label_bytes = label.encode('ascii') @@ -312,13 +297,15 @@ def ulabel(label): check_label(label_bytes) return label_bytes.decode('ascii') - label = label_bytes.decode('punycode') + try: + label = label_bytes.decode('punycode') + except UnicodeError: + raise IDNAError('Invalid A-label') check_label(label) return label -def uts46_remap(domain, std3_rules=True, transitional=False): - # type: (str, bool, bool) -> str +def uts46_remap(domain: str, std3_rules: bool = True, transitional: bool = False) -> str: """Re-map the characters in the string according to UTS46 processing.""" from .uts46data import uts46data output = '' @@ -350,8 +337,7 @@ def uts46_remap(domain, std3_rules=True, transitional=False): return unicodedata.normalize('NFC', output) -def encode(s, strict=False, uts46=False, std3_rules=False, transitional=False): - # type: (Union[str, bytes, bytearray], bool, bool, bool, bool) -> bytes +def encode(s: Union[str, bytes, bytearray], strict: bool = False, uts46: bool = False, std3_rules: bool = False, transitional: bool = False) -> bytes: if isinstance(s, (bytes, bytearray)): s = s.decode('ascii') if uts46: @@ -381,10 +367,12 @@ def encode(s, strict=False, uts46=False, std3_rules=False, transitional=False): return s -def decode(s, strict=False, uts46=False, std3_rules=False): - # type: (Union[str, bytes, bytearray], bool, bool, bool) -> str - if isinstance(s, (bytes, bytearray)): - s = s.decode('ascii') +def decode(s: Union[str, bytes, bytearray], strict: bool = False, uts46: bool = False, std3_rules: bool = False) -> str: + try: + if isinstance(s, (bytes, bytearray)): + s = s.decode('ascii') + except UnicodeDecodeError: + raise IDNAError('Invalid ASCII in A-label') if uts46: s = uts46_remap(s, std3_rules, False) trailing_dot = False diff --git a/pipenv/patched/notpip/_vendor/idna/idnadata.py b/pipenv/patched/notpip/_vendor/idna/idnadata.py index b86a3e06e4..1b5805d15e 100644 --- a/pipenv/patched/notpip/_vendor/idna/idnadata.py +++ b/pipenv/patched/notpip/_vendor/idna/idnadata.py @@ -1,6 +1,6 @@ # This file is automatically generated by tools/idna-data -__version__ = '13.0.0' +__version__ = '14.0.0' scripts = { 'Greek': ( 0x37000000374, @@ -49,12 +49,13 @@ 0x30210000302a, 0x30380000303c, 0x340000004dc0, - 0x4e0000009ffd, + 0x4e000000a000, 0xf9000000fa6e, 0xfa700000fada, + 0x16fe200016fe4, 0x16ff000016ff2, - 0x200000002a6de, - 0x2a7000002b735, + 0x200000002a6e0, + 0x2a7000002b739, 0x2b7400002b81e, 0x2b8200002cea2, 0x2ceb00002ebe1, @@ -75,7 +76,7 @@ 'Hiragana': ( 0x304100003097, 0x309d000030a0, - 0x1b0010001b11f, + 0x1b0010001b120, 0x1b1500001b153, 0x1f2000001f201, ), @@ -87,7 +88,11 @@ 0x330000003358, 0xff660000ff70, 0xff710000ff9e, + 0x1aff00001aff4, + 0x1aff50001affc, + 0x1affd0001afff, 0x1b0000001b001, + 0x1b1200001b123, 0x1b1640001b168, ), } @@ -405,6 +410,39 @@ 0x868: 68, 0x869: 82, 0x86a: 82, + 0x870: 82, + 0x871: 82, + 0x872: 82, + 0x873: 82, + 0x874: 82, + 0x875: 82, + 0x876: 82, + 0x877: 82, + 0x878: 82, + 0x879: 82, + 0x87a: 82, + 0x87b: 82, + 0x87c: 82, + 0x87d: 82, + 0x87e: 82, + 0x87f: 82, + 0x880: 82, + 0x881: 82, + 0x882: 82, + 0x883: 67, + 0x884: 67, + 0x885: 67, + 0x886: 68, + 0x887: 85, + 0x888: 85, + 0x889: 68, + 0x88a: 68, + 0x88b: 68, + 0x88c: 68, + 0x88d: 68, + 0x88e: 82, + 0x890: 85, + 0x891: 85, 0x8a0: 68, 0x8a1: 68, 0x8a2: 68, @@ -426,6 +464,7 @@ 0x8b2: 82, 0x8b3: 68, 0x8b4: 68, + 0x8b5: 68, 0x8b6: 68, 0x8b7: 68, 0x8b8: 68, @@ -444,6 +483,7 @@ 0x8c5: 68, 0x8c6: 68, 0x8c7: 68, + 0x8c8: 68, 0x8e2: 85, 0x1806: 85, 0x1807: 68, @@ -768,6 +808,24 @@ 0x10f52: 68, 0x10f53: 68, 0x10f54: 82, + 0x10f70: 68, + 0x10f71: 68, + 0x10f72: 68, + 0x10f73: 68, + 0x10f74: 82, + 0x10f75: 82, + 0x10f76: 68, + 0x10f77: 68, + 0x10f78: 68, + 0x10f79: 68, + 0x10f7a: 68, + 0x10f7b: 68, + 0x10f7c: 68, + 0x10f7d: 68, + 0x10f7e: 68, + 0x10f7f: 68, + 0x10f80: 68, + 0x10f81: 68, 0x10fb0: 68, 0x10fb1: 85, 0x10fb2: 68, @@ -1168,9 +1226,9 @@ 0x8000000082e, 0x8400000085c, 0x8600000086b, - 0x8a0000008b5, - 0x8b6000008c8, - 0x8d3000008e2, + 0x87000000888, + 0x8890000088f, + 0x898000008e2, 0x8e300000958, 0x96000000964, 0x96600000970, @@ -1252,11 +1310,12 @@ 0xc0e00000c11, 0xc1200000c29, 0xc2a00000c3a, - 0xc3d00000c45, + 0xc3c00000c45, 0xc4600000c49, 0xc4a00000c4e, 0xc5500000c57, 0xc5800000c5b, + 0xc5d00000c5e, 0xc6000000c64, 0xc6600000c70, 0xc8000000c84, @@ -1269,7 +1328,7 @@ 0xcc600000cc9, 0xcca00000cce, 0xcd500000cd7, - 0xcde00000cdf, + 0xcdd00000cdf, 0xce000000ce4, 0xce600000cf0, 0xcf100000cf3, @@ -1366,9 +1425,8 @@ 0x16810000169b, 0x16a0000016eb, 0x16f1000016f9, - 0x17000000170d, - 0x170e00001715, - 0x172000001735, + 0x170000001716, + 0x171f00001735, 0x174000001754, 0x17600000176d, 0x176e00001771, @@ -1397,8 +1455,8 @@ 0x1a9000001a9a, 0x1aa700001aa8, 0x1ab000001abe, - 0x1abf00001ac1, - 0x1b0000001b4c, + 0x1abf00001acf, + 0x1b0000001b4d, 0x1b5000001b5a, 0x1b6b00001b74, 0x1b8000001bf4, @@ -1413,8 +1471,7 @@ 0x1d4e00001d4f, 0x1d6b00001d78, 0x1d7900001d9b, - 0x1dc000001dfa, - 0x1dfb00001e00, + 0x1dc000001e00, 0x1e0100001e02, 0x1e0300001e04, 0x1e0500001e06, @@ -1563,7 +1620,7 @@ 0x1ff600001ff7, 0x214e0000214f, 0x218400002185, - 0x2c3000002c5f, + 0x2c3000002c60, 0x2c6100002c62, 0x2c6500002c67, 0x2c6800002c69, @@ -1652,8 +1709,7 @@ 0x31a0000031c0, 0x31f000003200, 0x340000004dc0, - 0x4e0000009ffd, - 0xa0000000a48d, + 0x4e000000a48d, 0xa4d00000a4fe, 0xa5000000a60d, 0xa6100000a62c, @@ -1766,9 +1822,16 @@ 0xa7bb0000a7bc, 0xa7bd0000a7be, 0xa7bf0000a7c0, + 0xa7c10000a7c2, 0xa7c30000a7c4, 0xa7c80000a7c9, 0xa7ca0000a7cb, + 0xa7d10000a7d2, + 0xa7d30000a7d4, + 0xa7d50000a7d6, + 0xa7d70000a7d8, + 0xa7d90000a7da, + 0xa7f20000a7f5, 0xa7f60000a7f8, 0xa7fa0000a828, 0xa82c0000a82d, @@ -1834,9 +1897,16 @@ 0x104d8000104fc, 0x1050000010528, 0x1053000010564, + 0x10597000105a2, + 0x105a3000105b2, + 0x105b3000105ba, + 0x105bb000105bd, 0x1060000010737, 0x1074000010756, 0x1076000010768, + 0x1078000010786, + 0x10787000107b1, + 0x107b2000107bb, 0x1080000010806, 0x1080800010809, 0x1080a00010836, @@ -1876,11 +1946,13 @@ 0x10f0000010f1d, 0x10f2700010f28, 0x10f3000010f51, + 0x10f7000010f86, 0x10fb000010fc5, 0x10fe000010ff7, 0x1100000011047, - 0x1106600011070, + 0x1106600011076, 0x1107f000110bb, + 0x110c2000110c3, 0x110d0000110e9, 0x110f0000110fa, 0x1110000011135, @@ -1934,6 +2006,7 @@ 0x117000001171b, 0x1171d0001172c, 0x117300001173a, + 0x1174000011747, 0x118000001183b, 0x118c0000118ea, 0x118ff00011907, @@ -1952,7 +2025,7 @@ 0x11a4700011a48, 0x11a5000011a9a, 0x11a9d00011a9e, - 0x11ac000011af9, + 0x11ab000011af9, 0x11c0000011c09, 0x11c0a00011c37, 0x11c3800011c41, @@ -1977,11 +2050,14 @@ 0x11fb000011fb1, 0x120000001239a, 0x1248000012544, + 0x12f9000012ff1, 0x130000001342f, 0x1440000014647, 0x1680000016a39, 0x16a4000016a5f, 0x16a6000016a6a, + 0x16a7000016abf, + 0x16ac000016aca, 0x16ad000016aee, 0x16af000016af5, 0x16b0000016b37, @@ -1999,7 +2075,10 @@ 0x17000000187f8, 0x1880000018cd6, 0x18d0000018d09, - 0x1b0000001b11f, + 0x1aff00001aff4, + 0x1aff50001affc, + 0x1affd0001afff, + 0x1b0000001b123, 0x1b1500001b153, 0x1b1640001b168, 0x1b1700001b2fc, @@ -2008,12 +2087,15 @@ 0x1bc800001bc89, 0x1bc900001bc9a, 0x1bc9d0001bc9f, + 0x1cf000001cf2e, + 0x1cf300001cf47, 0x1da000001da37, 0x1da3b0001da6d, 0x1da750001da76, 0x1da840001da85, 0x1da9b0001daa0, 0x1daa10001dab0, + 0x1df000001df1f, 0x1e0000001e007, 0x1e0080001e019, 0x1e01b0001e022, @@ -2023,14 +2105,19 @@ 0x1e1300001e13e, 0x1e1400001e14a, 0x1e14e0001e14f, + 0x1e2900001e2af, 0x1e2c00001e2fa, + 0x1e7e00001e7e7, + 0x1e7e80001e7ec, + 0x1e7ed0001e7ef, + 0x1e7f00001e7ff, 0x1e8000001e8c5, 0x1e8d00001e8d7, 0x1e9220001e94c, 0x1e9500001e95a, 0x1fbf00001fbfa, - 0x200000002a6de, - 0x2a7000002b735, + 0x200000002a6e0, + 0x2a7000002b739, 0x2b7400002b81e, 0x2b8200002cea2, 0x2ceb00002ebe1, diff --git a/pipenv/patched/notpip/_vendor/idna/intranges.py b/pipenv/patched/notpip/_vendor/idna/intranges.py index ee364a9048..6a43b04753 100644 --- a/pipenv/patched/notpip/_vendor/idna/intranges.py +++ b/pipenv/patched/notpip/_vendor/idna/intranges.py @@ -8,8 +8,7 @@ import bisect from typing import List, Tuple -def intranges_from_list(list_): - # type: (List[int]) -> Tuple[int, ...] +def intranges_from_list(list_: List[int]) -> Tuple[int, ...]: """Represent a list of integers as a sequence of ranges: ((start_0, end_0), (start_1, end_1), ...), such that the original integers are exactly those x such that start_i <= x < end_i for some i. @@ -30,17 +29,14 @@ def intranges_from_list(list_): return tuple(ranges) -def _encode_range(start, end): - # type: (int, int) -> int +def _encode_range(start: int, end: int) -> int: return (start << 32) | end -def _decode_range(r): - # type: (int) -> Tuple[int, int] +def _decode_range(r: int) -> Tuple[int, int]: return (r >> 32), (r & ((1 << 32) - 1)) -def intranges_contain(int_, ranges): - # type: (int, Tuple[int, ...]) -> bool +def intranges_contain(int_: int, ranges: Tuple[int, ...]) -> bool: """Determine if `int_` falls into one of the ranges in `ranges`.""" tuple_ = _encode_range(int_, 0) pos = bisect.bisect_left(ranges, tuple_) diff --git a/pipenv/patched/notpip/_vendor/idna/package_data.py b/pipenv/patched/notpip/_vendor/idna/package_data.py index e096d1d52d..f5ea87c12b 100644 --- a/pipenv/patched/notpip/_vendor/idna/package_data.py +++ b/pipenv/patched/notpip/_vendor/idna/package_data.py @@ -1,2 +1,2 @@ -__version__ = '3.2' +__version__ = '3.3' diff --git a/pipenv/patched/notpip/_vendor/idna/uts46data.py b/pipenv/patched/notpip/_vendor/idna/uts46data.py index f382ce389e..8f65705ee9 100644 --- a/pipenv/patched/notpip/_vendor/idna/uts46data.py +++ b/pipenv/patched/notpip/_vendor/idna/uts46data.py @@ -1,13 +1,14 @@ # This file is automatically generated by tools/idna-data +# vim: set fileencoding=utf-8 : from typing import List, Tuple, Union + """IDNA Mapping Table from UTS46.""" -__version__ = '13.0.0' -def _seg_0(): - # type: () -> List[Union[Tuple[int, str], Tuple[int, str, str]]] +__version__ = '14.0.0' +def _seg_0() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: return [ (0x0, '3'), (0x1, '3'), @@ -111,8 +112,7 @@ def _seg_0(): (0x63, 'V'), ] -def _seg_1(): - # type: () -> List[Union[Tuple[int, str], Tuple[int, str, str]]] +def _seg_1() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: return [ (0x64, 'V'), (0x65, 'V'), @@ -216,8 +216,7 @@ def _seg_1(): (0xC7, 'M', 'ç'), ] -def _seg_2(): - # type: () -> List[Union[Tuple[int, str], Tuple[int, str, str]]] +def _seg_2() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: return [ (0xC8, 'M', 'è'), (0xC9, 'M', 'é'), @@ -321,8 +320,7 @@ def _seg_2(): (0x12B, 'V'), ] -def _seg_3(): - # type: () -> List[Union[Tuple[int, str], Tuple[int, str, str]]] +def _seg_3() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: return [ (0x12C, 'M', 'ĭ'), (0x12D, 'V'), @@ -426,8 +424,7 @@ def _seg_3(): (0x193, 'M', 'ɠ'), ] -def _seg_4(): - # type: () -> List[Union[Tuple[int, str], Tuple[int, str, str]]] +def _seg_4() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: return [ (0x194, 'M', 'ɣ'), (0x195, 'V'), @@ -531,8 +528,7 @@ def _seg_4(): (0x20C, 'M', 'ȍ'), ] -def _seg_5(): - # type: () -> List[Union[Tuple[int, str], Tuple[int, str, str]]] +def _seg_5() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: return [ (0x20D, 'V'), (0x20E, 'M', 'ȏ'), @@ -636,8 +632,7 @@ def _seg_5(): (0x377, 'V'), ] -def _seg_6(): - # type: () -> List[Union[Tuple[int, str], Tuple[int, str, str]]] +def _seg_6() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: return [ (0x378, 'X'), (0x37A, '3', ' ι'), @@ -741,8 +736,7 @@ def _seg_6(): (0x402, 'M', 'ђ'), ] -def _seg_7(): - # type: () -> List[Union[Tuple[int, str], Tuple[int, str, str]]] +def _seg_7() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: return [ (0x403, 'M', 'ѓ'), (0x404, 'M', 'є'), @@ -846,8 +840,7 @@ def _seg_7(): (0x49D, 'V'), ] -def _seg_8(): - # type: () -> List[Union[Tuple[int, str], Tuple[int, str, str]]] +def _seg_8() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: return [ (0x49E, 'M', 'ҟ'), (0x49F, 'V'), @@ -951,8 +944,7 @@ def _seg_8(): (0x502, 'M', 'ԃ'), ] -def _seg_9(): - # type: () -> List[Union[Tuple[int, str], Tuple[int, str, str]]] +def _seg_9() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: return [ (0x503, 'V'), (0x504, 'M', 'ԅ'), @@ -1053,11 +1045,10 @@ def _seg_9(): (0x5F5, 'X'), (0x606, 'V'), (0x61C, 'X'), - (0x61E, 'V'), + (0x61D, 'V'), ] -def _seg_10(): - # type: () -> List[Union[Tuple[int, str], Tuple[int, str, str]]] +def _seg_10() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: return [ (0x675, 'M', 'اٴ'), (0x676, 'M', 'وٴ'), @@ -1083,11 +1074,9 @@ def _seg_10(): (0x85F, 'X'), (0x860, 'V'), (0x86B, 'X'), - (0x8A0, 'V'), - (0x8B5, 'X'), - (0x8B6, 'V'), - (0x8C8, 'X'), - (0x8D3, 'V'), + (0x870, 'V'), + (0x88F, 'X'), + (0x898, 'V'), (0x8E2, 'X'), (0x8E3, 'V'), (0x958, 'M', 'क़'), @@ -1159,13 +1148,12 @@ def _seg_10(): (0xA59, 'M', 'ਖ਼'), (0xA5A, 'M', 'ਗ਼'), (0xA5B, 'M', 'ਜ਼'), + (0xA5C, 'V'), + (0xA5D, 'X'), ] -def _seg_11(): - # type: () -> List[Union[Tuple[int, str], Tuple[int, str, str]]] +def _seg_11() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: return [ - (0xA5C, 'V'), - (0xA5D, 'X'), (0xA5E, 'M', 'ਫ਼'), (0xA5F, 'X'), (0xA66, 'V'), @@ -1264,15 +1252,14 @@ def _seg_11(): (0xC0E, 'V'), (0xC11, 'X'), (0xC12, 'V'), + (0xC29, 'X'), + (0xC2A, 'V'), ] -def _seg_12(): - # type: () -> List[Union[Tuple[int, str], Tuple[int, str, str]]] +def _seg_12() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: return [ - (0xC29, 'X'), - (0xC2A, 'V'), (0xC3A, 'X'), - (0xC3D, 'V'), + (0xC3C, 'V'), (0xC45, 'X'), (0xC46, 'V'), (0xC49, 'X'), @@ -1282,6 +1269,8 @@ def _seg_12(): (0xC57, 'X'), (0xC58, 'V'), (0xC5B, 'X'), + (0xC5D, 'V'), + (0xC5E, 'X'), (0xC60, 'V'), (0xC64, 'X'), (0xC66, 'V'), @@ -1304,7 +1293,7 @@ def _seg_12(): (0xCCE, 'X'), (0xCD5, 'V'), (0xCD7, 'X'), - (0xCDE, 'V'), + (0xCDD, 'V'), (0xCDF, 'X'), (0xCE0, 'V'), (0xCE4, 'X'), @@ -1371,8 +1360,7 @@ def _seg_12(): (0xEB4, 'V'), ] -def _seg_13(): - # type: () -> List[Union[Tuple[int, str], Tuple[int, str, str]]] +def _seg_13() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: return [ (0xEBE, 'X'), (0xEC0, 'V'), @@ -1476,8 +1464,7 @@ def _seg_13(): (0x1312, 'V'), ] -def _seg_14(): - # type: () -> List[Union[Tuple[int, str], Tuple[int, str, str]]] +def _seg_14() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: return [ (0x1316, 'X'), (0x1318, 'V'), @@ -1502,10 +1489,8 @@ def _seg_14(): (0x16A0, 'V'), (0x16F9, 'X'), (0x1700, 'V'), - (0x170D, 'X'), - (0x170E, 'V'), - (0x1715, 'X'), - (0x1720, 'V'), + (0x1716, 'X'), + (0x171F, 'V'), (0x1737, 'X'), (0x1740, 'V'), (0x1754, 'X'), @@ -1528,6 +1513,7 @@ def _seg_14(): (0x1807, 'V'), (0x180B, 'I'), (0x180E, 'X'), + (0x180F, 'I'), (0x1810, 'V'), (0x181A, 'X'), (0x1820, 'V'), @@ -1567,11 +1553,11 @@ def _seg_14(): (0x1AA0, 'V'), (0x1AAE, 'X'), (0x1AB0, 'V'), - (0x1AC1, 'X'), + (0x1ACF, 'X'), (0x1B00, 'V'), - (0x1B4C, 'X'), + (0x1B4D, 'X'), (0x1B50, 'V'), - (0x1B7D, 'X'), + (0x1B7F, 'X'), (0x1B80, 'V'), (0x1BF4, 'X'), (0x1BFC, 'V'), @@ -1579,12 +1565,11 @@ def _seg_14(): (0x1C3B, 'V'), (0x1C4A, 'X'), (0x1C4D, 'V'), + (0x1C80, 'M', 'в'), ] -def _seg_15(): - # type: () -> List[Union[Tuple[int, str], Tuple[int, str, str]]] +def _seg_15() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: return [ - (0x1C80, 'M', 'в'), (0x1C81, 'M', 'д'), (0x1C82, 'M', 'о'), (0x1C83, 'M', 'с'), @@ -1684,12 +1669,11 @@ def _seg_15(): (0x1D50, 'M', 'm'), (0x1D51, 'M', 'ŋ'), (0x1D52, 'M', 'o'), + (0x1D53, 'M', 'ɔ'), ] -def _seg_16(): - # type: () -> List[Union[Tuple[int, str], Tuple[int, str, str]]] +def _seg_16() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: return [ - (0x1D53, 'M', 'ɔ'), (0x1D54, 'M', 'ᴖ'), (0x1D55, 'M', 'ᴗ'), (0x1D56, 'M', 'p'), @@ -1754,8 +1738,6 @@ def _seg_16(): (0x1DBE, 'M', 'ʒ'), (0x1DBF, 'M', 'θ'), (0x1DC0, 'V'), - (0x1DFA, 'X'), - (0x1DFB, 'V'), (0x1E00, 'M', 'ḁ'), (0x1E01, 'V'), (0x1E02, 'M', 'ḃ'), @@ -1789,14 +1771,13 @@ def _seg_16(): (0x1E1E, 'M', 'ḟ'), (0x1E1F, 'V'), (0x1E20, 'M', 'ḡ'), - ] - -def _seg_17(): - # type: () -> List[Union[Tuple[int, str], Tuple[int, str, str]]] - return [ (0x1E21, 'V'), (0x1E22, 'M', 'ḣ'), (0x1E23, 'V'), + ] + +def _seg_17() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: + return [ (0x1E24, 'M', 'ḥ'), (0x1E25, 'V'), (0x1E26, 'M', 'ḧ'), @@ -1894,14 +1875,13 @@ def _seg_17(): (0x1E82, 'M', 'ẃ'), (0x1E83, 'V'), (0x1E84, 'M', 'ẅ'), - ] - -def _seg_18(): - # type: () -> List[Union[Tuple[int, str], Tuple[int, str, str]]] - return [ (0x1E85, 'V'), (0x1E86, 'M', 'ẇ'), (0x1E87, 'V'), + ] + +def _seg_18() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: + return [ (0x1E88, 'M', 'ẉ'), (0x1E89, 'V'), (0x1E8A, 'M', 'ẋ'), @@ -1999,14 +1979,13 @@ def _seg_18(): (0x1EEB, 'V'), (0x1EEC, 'M', 'ử'), (0x1EED, 'V'), - ] - -def _seg_19(): - # type: () -> List[Union[Tuple[int, str], Tuple[int, str, str]]] - return [ (0x1EEE, 'M', 'ữ'), (0x1EEF, 'V'), (0x1EF0, 'M', 'ự'), + ] + +def _seg_19() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: + return [ (0x1EF1, 'V'), (0x1EF2, 'M', 'ỳ'), (0x1EF3, 'V'), @@ -2104,14 +2083,13 @@ def _seg_19(): (0x1F82, 'M', 'ἂι'), (0x1F83, 'M', 'ἃι'), (0x1F84, 'M', 'ἄι'), - ] - -def _seg_20(): - # type: () -> List[Union[Tuple[int, str], Tuple[int, str, str]]] - return [ (0x1F85, 'M', 'ἅι'), (0x1F86, 'M', 'ἆι'), (0x1F87, 'M', 'ἇι'), + ] + +def _seg_20() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: + return [ (0x1F88, 'M', 'ἀι'), (0x1F89, 'M', 'ἁι'), (0x1F8A, 'M', 'ἂι'), @@ -2209,14 +2187,13 @@ def _seg_20(): (0x1FF0, 'X'), (0x1FF2, 'M', 'ὼι'), (0x1FF3, 'M', 'ωι'), - ] - -def _seg_21(): - # type: () -> List[Union[Tuple[int, str], Tuple[int, str, str]]] - return [ (0x1FF4, 'M', 'ώι'), (0x1FF5, 'X'), (0x1FF6, 'V'), + ] + +def _seg_21() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: + return [ (0x1FF7, 'M', 'ῶι'), (0x1FF8, 'M', 'ὸ'), (0x1FF9, 'M', 'ό'), @@ -2309,19 +2286,18 @@ def _seg_21(): (0x20A0, 'V'), (0x20A8, 'M', 'rs'), (0x20A9, 'V'), - (0x20C0, 'X'), + (0x20C1, 'X'), (0x20D0, 'V'), (0x20F1, 'X'), (0x2100, '3', 'a/c'), (0x2101, '3', 'a/s'), - ] - -def _seg_22(): - # type: () -> List[Union[Tuple[int, str], Tuple[int, str, str]]] - return [ (0x2102, 'M', 'c'), (0x2103, 'M', '°c'), (0x2104, 'V'), + ] + +def _seg_22() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: + return [ (0x2105, '3', 'c/o'), (0x2106, '3', 'c/u'), (0x2107, 'M', 'ɛ'), @@ -2419,14 +2395,13 @@ def _seg_22(): (0x2177, 'M', 'viii'), (0x2178, 'M', 'ix'), (0x2179, 'M', 'x'), - ] - -def _seg_23(): - # type: () -> List[Union[Tuple[int, str], Tuple[int, str, str]]] - return [ (0x217A, 'M', 'xi'), (0x217B, 'M', 'xii'), (0x217C, 'M', 'l'), + ] + +def _seg_23() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: + return [ (0x217D, 'M', 'c'), (0x217E, 'M', 'd'), (0x217F, 'M', 'm'), @@ -2524,14 +2499,13 @@ def _seg_23(): (0x24B7, 'M', 'b'), (0x24B8, 'M', 'c'), (0x24B9, 'M', 'd'), - ] - -def _seg_24(): - # type: () -> List[Union[Tuple[int, str], Tuple[int, str, str]]] - return [ (0x24BA, 'M', 'e'), (0x24BB, 'M', 'f'), (0x24BC, 'M', 'g'), + ] + +def _seg_24() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: + return [ (0x24BD, 'M', 'h'), (0x24BE, 'M', 'i'), (0x24BF, 'M', 'j'), @@ -2629,23 +2603,21 @@ def _seg_24(): (0x2C23, 'M', 'ⱓ'), (0x2C24, 'M', 'ⱔ'), (0x2C25, 'M', 'ⱕ'), - ] - -def _seg_25(): - # type: () -> List[Union[Tuple[int, str], Tuple[int, str, str]]] - return [ (0x2C26, 'M', 'ⱖ'), (0x2C27, 'M', 'ⱗ'), (0x2C28, 'M', 'ⱘ'), + ] + +def _seg_25() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: + return [ (0x2C29, 'M', 'ⱙ'), (0x2C2A, 'M', 'ⱚ'), (0x2C2B, 'M', 'ⱛ'), (0x2C2C, 'M', 'ⱜ'), (0x2C2D, 'M', 'ⱝ'), (0x2C2E, 'M', 'ⱞ'), - (0x2C2F, 'X'), + (0x2C2F, 'M', 'ⱟ'), (0x2C30, 'V'), - (0x2C5F, 'X'), (0x2C60, 'M', 'ⱡ'), (0x2C61, 'V'), (0x2C62, 'M', 'ɫ'), @@ -2734,15 +2706,14 @@ def _seg_25(): (0x2CBC, 'M', 'ⲽ'), (0x2CBD, 'V'), (0x2CBE, 'M', 'ⲿ'), - ] - -def _seg_26(): - # type: () -> List[Union[Tuple[int, str], Tuple[int, str, str]]] - return [ (0x2CBF, 'V'), (0x2CC0, 'M', 'ⳁ'), (0x2CC1, 'V'), (0x2CC2, 'M', 'ⳃ'), + ] + +def _seg_26() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: + return [ (0x2CC3, 'V'), (0x2CC4, 'M', 'ⳅ'), (0x2CC5, 'V'), @@ -2813,7 +2784,7 @@ def _seg_26(): (0x2DD8, 'V'), (0x2DDF, 'X'), (0x2DE0, 'V'), - (0x2E53, 'X'), + (0x2E5E, 'X'), (0x2E80, 'V'), (0x2E9A, 'X'), (0x2E9B, 'V'), @@ -2839,15 +2810,14 @@ def _seg_26(): (0x2F0F, 'M', '几'), (0x2F10, 'M', '凵'), (0x2F11, 'M', '刀'), - ] - -def _seg_27(): - # type: () -> List[Union[Tuple[int, str], Tuple[int, str, str]]] - return [ (0x2F12, 'M', '力'), (0x2F13, 'M', '勹'), (0x2F14, 'M', '匕'), (0x2F15, 'M', '匚'), + ] + +def _seg_27() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: + return [ (0x2F16, 'M', '匸'), (0x2F17, 'M', '十'), (0x2F18, 'M', '卜'), @@ -2944,15 +2914,14 @@ def _seg_27(): (0x2F73, 'M', '穴'), (0x2F74, 'M', '立'), (0x2F75, 'M', '竹'), - ] - -def _seg_28(): - # type: () -> List[Union[Tuple[int, str], Tuple[int, str, str]]] - return [ (0x2F76, 'M', '米'), (0x2F77, 'M', '糸'), (0x2F78, 'M', '缶'), (0x2F79, 'M', '网'), + ] + +def _seg_28() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: + return [ (0x2F7A, 'M', '羊'), (0x2F7B, 'M', '羽'), (0x2F7C, 'M', '老'), @@ -3049,15 +3018,14 @@ def _seg_28(): (0x3000, '3', ' '), (0x3001, 'V'), (0x3002, 'M', '.'), - ] - -def _seg_29(): - # type: () -> List[Union[Tuple[int, str], Tuple[int, str, str]]] - return [ (0x3003, 'V'), (0x3036, 'M', '〒'), (0x3037, 'V'), (0x3038, 'M', '十'), + ] + +def _seg_29() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: + return [ (0x3039, 'M', '卄'), (0x303A, 'M', '卅'), (0x303B, 'V'), @@ -3154,15 +3122,14 @@ def _seg_29(): (0x317E, 'M', 'ᄶ'), (0x317F, 'M', 'ᅀ'), (0x3180, 'M', 'ᅇ'), - ] - -def _seg_30(): - # type: () -> List[Union[Tuple[int, str], Tuple[int, str, str]]] - return [ (0x3181, 'M', 'ᅌ'), (0x3182, 'M', 'ᇱ'), (0x3183, 'M', 'ᇲ'), (0x3184, 'M', 'ᅗ'), + ] + +def _seg_30() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: + return [ (0x3185, 'M', 'ᅘ'), (0x3186, 'M', 'ᅙ'), (0x3187, 'M', 'ᆄ'), @@ -3259,15 +3226,14 @@ def _seg_30(): (0x3240, '3', '(祭)'), (0x3241, '3', '(休)'), (0x3242, '3', '(自)'), - ] - -def _seg_31(): - # type: () -> List[Union[Tuple[int, str], Tuple[int, str, str]]] - return [ (0x3243, '3', '(至)'), (0x3244, 'M', '問'), (0x3245, 'M', '幼'), (0x3246, 'M', '文'), + ] + +def _seg_31() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: + return [ (0x3247, 'M', '箏'), (0x3248, 'V'), (0x3250, 'M', 'pte'), @@ -3364,15 +3330,14 @@ def _seg_31(): (0x32AB, 'M', '学'), (0x32AC, 'M', '監'), (0x32AD, 'M', '企'), - ] - -def _seg_32(): - # type: () -> List[Union[Tuple[int, str], Tuple[int, str, str]]] - return [ (0x32AE, 'M', '資'), (0x32AF, 'M', '協'), (0x32B0, 'M', '夜'), (0x32B1, 'M', '36'), + ] + +def _seg_32() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: + return [ (0x32B2, 'M', '37'), (0x32B3, 'M', '38'), (0x32B4, 'M', '39'), @@ -3469,15 +3434,14 @@ def _seg_32(): (0x330F, 'M', 'ガンマ'), (0x3310, 'M', 'ギガ'), (0x3311, 'M', 'ギニー'), - ] - -def _seg_33(): - # type: () -> List[Union[Tuple[int, str], Tuple[int, str, str]]] - return [ (0x3312, 'M', 'キュリー'), (0x3313, 'M', 'ギルダー'), (0x3314, 'M', 'キロ'), (0x3315, 'M', 'キログラム'), + ] + +def _seg_33() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: + return [ (0x3316, 'M', 'キロメートル'), (0x3317, 'M', 'キロワット'), (0x3318, 'M', 'グラム'), @@ -3574,15 +3538,14 @@ def _seg_33(): (0x3373, 'M', 'au'), (0x3374, 'M', 'bar'), (0x3375, 'M', 'ov'), - ] - -def _seg_34(): - # type: () -> List[Union[Tuple[int, str], Tuple[int, str, str]]] - return [ (0x3376, 'M', 'pc'), (0x3377, 'M', 'dm'), (0x3378, 'M', 'dm2'), (0x3379, 'M', 'dm3'), + ] + +def _seg_34() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: + return [ (0x337A, 'M', 'iu'), (0x337B, 'M', '平成'), (0x337C, 'M', '昭和'), @@ -3679,15 +3642,14 @@ def _seg_34(): (0x33D7, 'M', 'ph'), (0x33D8, 'X'), (0x33D9, 'M', 'ppm'), - ] - -def _seg_35(): - # type: () -> List[Union[Tuple[int, str], Tuple[int, str, str]]] - return [ (0x33DA, 'M', 'pr'), (0x33DB, 'M', 'sr'), (0x33DC, 'M', 'sv'), (0x33DD, 'M', 'wb'), + ] + +def _seg_35() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: + return [ (0x33DE, 'M', 'v∕m'), (0x33DF, 'M', 'a∕m'), (0x33E0, 'M', '1日'), @@ -3723,8 +3685,6 @@ def _seg_35(): (0x33FE, 'M', '31日'), (0x33FF, 'M', 'gal'), (0x3400, 'V'), - (0x9FFD, 'X'), - (0xA000, 'V'), (0xA48D, 'X'), (0xA490, 'V'), (0xA4C7, 'X'), @@ -3784,17 +3744,16 @@ def _seg_35(): (0xA685, 'V'), (0xA686, 'M', 'ꚇ'), (0xA687, 'V'), - ] - -def _seg_36(): - # type: () -> List[Union[Tuple[int, str], Tuple[int, str, str]]] - return [ (0xA688, 'M', 'ꚉ'), (0xA689, 'V'), (0xA68A, 'M', 'ꚋ'), (0xA68B, 'V'), (0xA68C, 'M', 'ꚍ'), (0xA68D, 'V'), + ] + +def _seg_36() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: + return [ (0xA68E, 'M', 'ꚏ'), (0xA68F, 'V'), (0xA690, 'M', 'ꚑ'), @@ -3889,17 +3848,16 @@ def _seg_36(): (0xA76C, 'M', 'ꝭ'), (0xA76D, 'V'), (0xA76E, 'M', 'ꝯ'), - ] - -def _seg_37(): - # type: () -> List[Union[Tuple[int, str], Tuple[int, str, str]]] - return [ (0xA76F, 'V'), (0xA770, 'M', 'ꝯ'), (0xA771, 'V'), (0xA779, 'M', 'ꝺ'), (0xA77A, 'V'), (0xA77B, 'M', 'ꝼ'), + ] + +def _seg_37() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: + return [ (0xA77C, 'V'), (0xA77D, 'M', 'ᵹ'), (0xA77E, 'M', 'ꝿ'), @@ -3962,7 +3920,8 @@ def _seg_37(): (0xA7BD, 'V'), (0xA7BE, 'M', 'ꞿ'), (0xA7BF, 'V'), - (0xA7C0, 'X'), + (0xA7C0, 'M', 'ꟁ'), + (0xA7C1, 'V'), (0xA7C2, 'M', 'ꟃ'), (0xA7C3, 'V'), (0xA7C4, 'M', 'ꞔ'), @@ -3973,6 +3932,20 @@ def _seg_37(): (0xA7C9, 'M', 'ꟊ'), (0xA7CA, 'V'), (0xA7CB, 'X'), + (0xA7D0, 'M', 'ꟑ'), + (0xA7D1, 'V'), + (0xA7D2, 'X'), + (0xA7D3, 'V'), + (0xA7D4, 'X'), + (0xA7D5, 'V'), + (0xA7D6, 'M', 'ꟗ'), + (0xA7D7, 'V'), + (0xA7D8, 'M', 'ꟙ'), + (0xA7D9, 'V'), + (0xA7DA, 'X'), + (0xA7F2, 'M', 'c'), + (0xA7F3, 'M', 'f'), + (0xA7F4, 'M', 'q'), (0xA7F5, 'M', 'ꟶ'), (0xA7F6, 'V'), (0xA7F8, 'M', 'ħ'), @@ -3985,6 +3958,10 @@ def _seg_37(): (0xA878, 'X'), (0xA880, 'V'), (0xA8C6, 'X'), + ] + +def _seg_38() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: + return [ (0xA8CE, 'V'), (0xA8DA, 'X'), (0xA8E0, 'V'), @@ -3994,11 +3971,6 @@ def _seg_37(): (0xA980, 'V'), (0xA9CE, 'X'), (0xA9CF, 'V'), - ] - -def _seg_38(): - # type: () -> List[Union[Tuple[int, str], Tuple[int, str, str]]] - return [ (0xA9DA, 'X'), (0xA9DE, 'V'), (0xA9FF, 'X'), @@ -4090,6 +4062,10 @@ def _seg_38(): (0xABA8, 'M', 'Ꮨ'), (0xABA9, 'M', 'Ꮩ'), (0xABAA, 'M', 'Ꮪ'), + ] + +def _seg_39() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: + return [ (0xABAB, 'M', 'Ꮫ'), (0xABAC, 'M', 'Ꮬ'), (0xABAD, 'M', 'Ꮭ'), @@ -4099,11 +4075,6 @@ def _seg_38(): (0xABB1, 'M', 'Ꮱ'), (0xABB2, 'M', 'Ꮲ'), (0xABB3, 'M', 'Ꮳ'), - ] - -def _seg_39(): - # type: () -> List[Union[Tuple[int, str], Tuple[int, str, str]]] - return [ (0xABB4, 'M', 'Ꮴ'), (0xABB5, 'M', 'Ꮵ'), (0xABB6, 'M', 'Ꮶ'), @@ -4195,6 +4166,10 @@ def _seg_39(): (0xF943, 'M', '弄'), (0xF944, 'M', '籠'), (0xF945, 'M', '聾'), + ] + +def _seg_40() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: + return [ (0xF946, 'M', '牢'), (0xF947, 'M', '磊'), (0xF948, 'M', '賂'), @@ -4204,11 +4179,6 @@ def _seg_39(): (0xF94C, 'M', '樓'), (0xF94D, 'M', '淚'), (0xF94E, 'M', '漏'), - ] - -def _seg_40(): - # type: () -> List[Union[Tuple[int, str], Tuple[int, str, str]]] - return [ (0xF94F, 'M', '累'), (0xF950, 'M', '縷'), (0xF951, 'M', '陋'), @@ -4300,6 +4270,10 @@ def _seg_40(): (0xF9A7, 'M', '獵'), (0xF9A8, 'M', '令'), (0xF9A9, 'M', '囹'), + ] + +def _seg_41() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: + return [ (0xF9AA, 'M', '寧'), (0xF9AB, 'M', '嶺'), (0xF9AC, 'M', '怜'), @@ -4309,11 +4283,6 @@ def _seg_40(): (0xF9B0, 'M', '聆'), (0xF9B1, 'M', '鈴'), (0xF9B2, 'M', '零'), - ] - -def _seg_41(): - # type: () -> List[Union[Tuple[int, str], Tuple[int, str, str]]] - return [ (0xF9B3, 'M', '靈'), (0xF9B4, 'M', '領'), (0xF9B5, 'M', '例'), @@ -4405,6 +4374,10 @@ def _seg_41(): (0xFA0B, 'M', '廓'), (0xFA0C, 'M', '兀'), (0xFA0D, 'M', '嗀'), + ] + +def _seg_42() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: + return [ (0xFA0E, 'V'), (0xFA10, 'M', '塚'), (0xFA11, 'V'), @@ -4414,11 +4387,6 @@ def _seg_41(): (0xFA16, 'M', '猪'), (0xFA17, 'M', '益'), (0xFA18, 'M', '礼'), - ] - -def _seg_42(): - # type: () -> List[Union[Tuple[int, str], Tuple[int, str, str]]] - return [ (0xFA19, 'M', '神'), (0xFA1A, 'M', '祥'), (0xFA1B, 'M', '福'), @@ -4510,6 +4478,10 @@ def _seg_42(): (0xFA76, 'M', '勇'), (0xFA77, 'M', '勺'), (0xFA78, 'M', '喝'), + ] + +def _seg_43() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: + return [ (0xFA79, 'M', '啕'), (0xFA7A, 'M', '喙'), (0xFA7B, 'M', '嗢'), @@ -4519,11 +4491,6 @@ def _seg_42(): (0xFA7F, 'M', '奔'), (0xFA80, 'M', '婢'), (0xFA81, 'M', '嬨'), - ] - -def _seg_43(): - # type: () -> List[Union[Tuple[int, str], Tuple[int, str, str]]] - return [ (0xFA82, 'M', '廒'), (0xFA83, 'M', '廙'), (0xFA84, 'M', '彩'), @@ -4615,6 +4582,10 @@ def _seg_43(): (0xFADA, 'X'), (0xFB00, 'M', 'ff'), (0xFB01, 'M', 'fi'), + ] + +def _seg_44() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: + return [ (0xFB02, 'M', 'fl'), (0xFB03, 'M', 'ffi'), (0xFB04, 'M', 'ffl'), @@ -4624,11 +4595,6 @@ def _seg_43(): (0xFB14, 'M', 'մե'), (0xFB15, 'M', 'մի'), (0xFB16, 'M', 'վն'), - ] - -def _seg_44(): - # type: () -> List[Union[Tuple[int, str], Tuple[int, str, str]]] - return [ (0xFB17, 'M', 'մխ'), (0xFB18, 'X'), (0xFB1D, 'M', 'יִ'), @@ -4713,13 +4679,17 @@ def _seg_44(): (0xFBAE, 'M', 'ے'), (0xFBB0, 'M', 'ۓ'), (0xFBB2, 'V'), - (0xFBC2, 'X'), + (0xFBC3, 'X'), (0xFBD3, 'M', 'ڭ'), (0xFBD7, 'M', 'ۇ'), (0xFBD9, 'M', 'ۆ'), (0xFBDB, 'M', 'ۈ'), (0xFBDD, 'M', 'ۇٴ'), (0xFBDE, 'M', 'ۋ'), + ] + +def _seg_45() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: + return [ (0xFBE0, 'M', 'ۅ'), (0xFBE2, 'M', 'ۉ'), (0xFBE4, 'M', 'ې'), @@ -4729,11 +4699,6 @@ def _seg_44(): (0xFBEE, 'M', 'ئو'), (0xFBF0, 'M', 'ئۇ'), (0xFBF2, 'M', 'ئۆ'), - ] - -def _seg_45(): - # type: () -> List[Union[Tuple[int, str], Tuple[int, str, str]]] - return [ (0xFBF4, 'M', 'ئۈ'), (0xFBF6, 'M', 'ئې'), (0xFBF9, 'M', 'ئى'), @@ -4825,6 +4790,10 @@ def _seg_45(): (0xFC54, 'M', 'هي'), (0xFC55, 'M', 'يج'), (0xFC56, 'M', 'يح'), + ] + +def _seg_46() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: + return [ (0xFC57, 'M', 'يخ'), (0xFC58, 'M', 'يم'), (0xFC59, 'M', 'يى'), @@ -4834,11 +4803,6 @@ def _seg_45(): (0xFC5D, 'M', 'ىٰ'), (0xFC5E, '3', ' ٌّ'), (0xFC5F, '3', ' ٍّ'), - ] - -def _seg_46(): - # type: () -> List[Union[Tuple[int, str], Tuple[int, str, str]]] - return [ (0xFC60, '3', ' َّ'), (0xFC61, '3', ' ُّ'), (0xFC62, '3', ' ِّ'), @@ -4930,6 +4894,10 @@ def _seg_46(): (0xFCB8, 'M', 'طح'), (0xFCB9, 'M', 'ظم'), (0xFCBA, 'M', 'عج'), + ] + +def _seg_47() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: + return [ (0xFCBB, 'M', 'عم'), (0xFCBC, 'M', 'غج'), (0xFCBD, 'M', 'غم'), @@ -4939,11 +4907,6 @@ def _seg_46(): (0xFCC1, 'M', 'فم'), (0xFCC2, 'M', 'قح'), (0xFCC3, 'M', 'قم'), - ] - -def _seg_47(): - # type: () -> List[Union[Tuple[int, str], Tuple[int, str, str]]] - return [ (0xFCC4, 'M', 'كج'), (0xFCC5, 'M', 'كح'), (0xFCC6, 'M', 'كخ'), @@ -5035,6 +4998,10 @@ def _seg_47(): (0xFD1C, 'M', 'حي'), (0xFD1D, 'M', 'جى'), (0xFD1E, 'M', 'جي'), + ] + +def _seg_48() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: + return [ (0xFD1F, 'M', 'خى'), (0xFD20, 'M', 'خي'), (0xFD21, 'M', 'صى'), @@ -5044,11 +5011,6 @@ def _seg_47(): (0xFD25, 'M', 'شج'), (0xFD26, 'M', 'شح'), (0xFD27, 'M', 'شخ'), - ] - -def _seg_48(): - # type: () -> List[Union[Tuple[int, str], Tuple[int, str, str]]] - return [ (0xFD28, 'M', 'شم'), (0xFD29, 'M', 'شر'), (0xFD2A, 'M', 'سر'), @@ -5071,7 +5033,6 @@ def _seg_48(): (0xFD3B, 'M', 'ظم'), (0xFD3C, 'M', 'اً'), (0xFD3E, 'V'), - (0xFD40, 'X'), (0xFD50, 'M', 'تجم'), (0xFD51, 'M', 'تحج'), (0xFD53, 'M', 'تحم'), @@ -5141,6 +5102,10 @@ def _seg_48(): (0xFDA4, 'M', 'تمى'), (0xFDA5, 'M', 'جمي'), (0xFDA6, 'M', 'جحى'), + ] + +def _seg_49() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: + return [ (0xFDA7, 'M', 'جمى'), (0xFDA8, 'M', 'سخى'), (0xFDA9, 'M', 'صحي'), @@ -5149,11 +5114,6 @@ def _seg_48(): (0xFDAC, 'M', 'لجي'), (0xFDAD, 'M', 'لمي'), (0xFDAE, 'M', 'يحي'), - ] - -def _seg_49(): - # type: () -> List[Union[Tuple[int, str], Tuple[int, str, str]]] - return [ (0xFDAF, 'M', 'يجي'), (0xFDB0, 'M', 'يمي'), (0xFDB1, 'M', 'ممي'), @@ -5180,6 +5140,8 @@ def _seg_49(): (0xFDC6, 'M', 'سخي'), (0xFDC7, 'M', 'نجي'), (0xFDC8, 'X'), + (0xFDCF, 'V'), + (0xFDD0, 'X'), (0xFDF0, 'M', 'صلے'), (0xFDF1, 'M', 'قلے'), (0xFDF2, 'M', 'الله'), @@ -5194,7 +5156,6 @@ def _seg_49(): (0xFDFB, '3', 'جل جلاله'), (0xFDFC, 'M', 'ریال'), (0xFDFD, 'V'), - (0xFDFE, 'X'), (0xFE00, 'I'), (0xFE10, '3', ','), (0xFE11, 'M', '、'), @@ -5245,6 +5206,10 @@ def _seg_49(): (0xFE5B, '3', '{'), (0xFE5C, '3', '}'), (0xFE5D, 'M', '〔'), + ] + +def _seg_50() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: + return [ (0xFE5E, 'M', '〕'), (0xFE5F, '3', '#'), (0xFE60, '3', '&'), @@ -5254,11 +5219,6 @@ def _seg_49(): (0xFE64, '3', '<'), (0xFE65, '3', '>'), (0xFE66, '3', '='), - ] - -def _seg_50(): - # type: () -> List[Union[Tuple[int, str], Tuple[int, str, str]]] - return [ (0xFE67, 'X'), (0xFE68, '3', '\\'), (0xFE69, '3', '$'), @@ -5350,6 +5310,10 @@ def _seg_50(): (0xFF18, 'M', '8'), (0xFF19, 'M', '9'), (0xFF1A, '3', ':'), + ] + +def _seg_51() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: + return [ (0xFF1B, '3', ';'), (0xFF1C, '3', '<'), (0xFF1D, '3', '='), @@ -5359,11 +5323,6 @@ def _seg_50(): (0xFF21, 'M', 'a'), (0xFF22, 'M', 'b'), (0xFF23, 'M', 'c'), - ] - -def _seg_51(): - # type: () -> List[Union[Tuple[int, str], Tuple[int, str, str]]] - return [ (0xFF24, 'M', 'd'), (0xFF25, 'M', 'e'), (0xFF26, 'M', 'f'), @@ -5455,6 +5414,10 @@ def _seg_51(): (0xFF7C, 'M', 'シ'), (0xFF7D, 'M', 'ス'), (0xFF7E, 'M', 'セ'), + ] + +def _seg_52() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: + return [ (0xFF7F, 'M', 'ソ'), (0xFF80, 'M', 'タ'), (0xFF81, 'M', 'チ'), @@ -5464,11 +5427,6 @@ def _seg_51(): (0xFF85, 'M', 'ナ'), (0xFF86, 'M', 'ニ'), (0xFF87, 'M', 'ヌ'), - ] - -def _seg_52(): - # type: () -> List[Union[Tuple[int, str], Tuple[int, str, str]]] - return [ (0xFF88, 'M', 'ネ'), (0xFF89, 'M', 'ノ'), (0xFF8A, 'M', 'ハ'), @@ -5560,6 +5518,10 @@ def _seg_52(): (0xFFE7, 'X'), (0xFFE8, 'M', '│'), (0xFFE9, 'M', '←'), + ] + +def _seg_53() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: + return [ (0xFFEA, 'M', '↑'), (0xFFEB, 'M', '→'), (0xFFEC, 'M', '↓'), @@ -5569,11 +5531,6 @@ def _seg_52(): (0x10000, 'V'), (0x1000C, 'X'), (0x1000D, 'V'), - ] - -def _seg_53(): - # type: () -> List[Union[Tuple[int, str], Tuple[int, str, str]]] - return [ (0x10027, 'X'), (0x10028, 'V'), (0x1003B, 'X'), @@ -5665,6 +5622,10 @@ def _seg_53(): (0x104B3, 'M', '𐓛'), (0x104B4, 'M', '𐓜'), (0x104B5, 'M', '𐓝'), + ] + +def _seg_54() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: + return [ (0x104B6, 'M', '𐓞'), (0x104B7, 'M', '𐓟'), (0x104B8, 'M', '𐓠'), @@ -5674,11 +5635,6 @@ def _seg_53(): (0x104BC, 'M', '𐓤'), (0x104BD, 'M', '𐓥'), (0x104BE, 'M', '𐓦'), - ] - -def _seg_54(): - # type: () -> List[Union[Tuple[int, str], Tuple[int, str, str]]] - return [ (0x104BF, 'M', '𐓧'), (0x104C0, 'M', '𐓨'), (0x104C1, 'M', '𐓩'), @@ -5708,13 +5664,123 @@ def _seg_54(): (0x10530, 'V'), (0x10564, 'X'), (0x1056F, 'V'), - (0x10570, 'X'), + (0x10570, 'M', '𐖗'), + (0x10571, 'M', '𐖘'), + (0x10572, 'M', '𐖙'), + (0x10573, 'M', '𐖚'), + (0x10574, 'M', '𐖛'), + (0x10575, 'M', '𐖜'), + (0x10576, 'M', '𐖝'), + (0x10577, 'M', '𐖞'), + (0x10578, 'M', '𐖟'), + (0x10579, 'M', '𐖠'), + (0x1057A, 'M', '𐖡'), + (0x1057B, 'X'), + (0x1057C, 'M', '𐖣'), + (0x1057D, 'M', '𐖤'), + (0x1057E, 'M', '𐖥'), + (0x1057F, 'M', '𐖦'), + (0x10580, 'M', '𐖧'), + (0x10581, 'M', '𐖨'), + (0x10582, 'M', '𐖩'), + (0x10583, 'M', '𐖪'), + (0x10584, 'M', '𐖫'), + (0x10585, 'M', '𐖬'), + (0x10586, 'M', '𐖭'), + (0x10587, 'M', '𐖮'), + (0x10588, 'M', '𐖯'), + (0x10589, 'M', '𐖰'), + (0x1058A, 'M', '𐖱'), + (0x1058B, 'X'), + (0x1058C, 'M', '𐖳'), + (0x1058D, 'M', '𐖴'), + (0x1058E, 'M', '𐖵'), + (0x1058F, 'M', '𐖶'), + (0x10590, 'M', '𐖷'), + (0x10591, 'M', '𐖸'), + (0x10592, 'M', '𐖹'), + (0x10593, 'X'), + (0x10594, 'M', '𐖻'), + (0x10595, 'M', '𐖼'), + (0x10596, 'X'), + (0x10597, 'V'), + (0x105A2, 'X'), + (0x105A3, 'V'), + (0x105B2, 'X'), + (0x105B3, 'V'), + (0x105BA, 'X'), + (0x105BB, 'V'), + (0x105BD, 'X'), (0x10600, 'V'), (0x10737, 'X'), (0x10740, 'V'), (0x10756, 'X'), (0x10760, 'V'), (0x10768, 'X'), + (0x10780, 'V'), + (0x10781, 'M', 'ː'), + (0x10782, 'M', 'ˑ'), + (0x10783, 'M', 'æ'), + (0x10784, 'M', 'ʙ'), + (0x10785, 'M', 'ɓ'), + (0x10786, 'X'), + (0x10787, 'M', 'ʣ'), + (0x10788, 'M', 'ꭦ'), + ] + +def _seg_55() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: + return [ + (0x10789, 'M', 'ʥ'), + (0x1078A, 'M', 'ʤ'), + (0x1078B, 'M', 'ɖ'), + (0x1078C, 'M', 'ɗ'), + (0x1078D, 'M', 'ᶑ'), + (0x1078E, 'M', 'ɘ'), + (0x1078F, 'M', 'ɞ'), + (0x10790, 'M', 'ʩ'), + (0x10791, 'M', 'ɤ'), + (0x10792, 'M', 'ɢ'), + (0x10793, 'M', 'ɠ'), + (0x10794, 'M', 'ʛ'), + (0x10795, 'M', 'ħ'), + (0x10796, 'M', 'ʜ'), + (0x10797, 'M', 'ɧ'), + (0x10798, 'M', 'ʄ'), + (0x10799, 'M', 'ʪ'), + (0x1079A, 'M', 'ʫ'), + (0x1079B, 'M', 'ɬ'), + (0x1079C, 'M', '𝼄'), + (0x1079D, 'M', 'ꞎ'), + (0x1079E, 'M', 'ɮ'), + (0x1079F, 'M', '𝼅'), + (0x107A0, 'M', 'ʎ'), + (0x107A1, 'M', '𝼆'), + (0x107A2, 'M', 'ø'), + (0x107A3, 'M', 'ɶ'), + (0x107A4, 'M', 'ɷ'), + (0x107A5, 'M', 'q'), + (0x107A6, 'M', 'ɺ'), + (0x107A7, 'M', '𝼈'), + (0x107A8, 'M', 'ɽ'), + (0x107A9, 'M', 'ɾ'), + (0x107AA, 'M', 'ʀ'), + (0x107AB, 'M', 'ʨ'), + (0x107AC, 'M', 'ʦ'), + (0x107AD, 'M', 'ꭧ'), + (0x107AE, 'M', 'ʧ'), + (0x107AF, 'M', 'ʈ'), + (0x107B0, 'M', 'ⱱ'), + (0x107B1, 'X'), + (0x107B2, 'M', 'ʏ'), + (0x107B3, 'M', 'ʡ'), + (0x107B4, 'M', 'ʢ'), + (0x107B5, 'M', 'ʘ'), + (0x107B6, 'M', 'ǀ'), + (0x107B7, 'M', 'ǁ'), + (0x107B8, 'M', 'ǂ'), + (0x107B9, 'M', '𝼊'), + (0x107BA, 'M', '𝼞'), + (0x107BB, 'X'), (0x10800, 'V'), (0x10806, 'X'), (0x10808, 'V'), @@ -5764,6 +5830,10 @@ def _seg_54(): (0x10A60, 'V'), (0x10AA0, 'X'), (0x10AC0, 'V'), + ] + +def _seg_56() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: + return [ (0x10AE7, 'X'), (0x10AEB, 'V'), (0x10AF7, 'X'), @@ -5779,11 +5849,6 @@ def _seg_54(): (0x10B9D, 'X'), (0x10BA9, 'V'), (0x10BB0, 'X'), - ] - -def _seg_55(): - # type: () -> List[Union[Tuple[int, str], Tuple[int, str, str]]] - return [ (0x10C00, 'V'), (0x10C49, 'X'), (0x10C80, 'M', '𐳀'), @@ -5856,6 +5921,8 @@ def _seg_55(): (0x10F28, 'X'), (0x10F30, 'V'), (0x10F5A, 'X'), + (0x10F70, 'V'), + (0x10F8A, 'X'), (0x10FB0, 'V'), (0x10FCC, 'X'), (0x10FE0, 'V'), @@ -5863,11 +5930,15 @@ def _seg_55(): (0x11000, 'V'), (0x1104E, 'X'), (0x11052, 'V'), - (0x11070, 'X'), + (0x11076, 'X'), (0x1107F, 'V'), (0x110BD, 'X'), (0x110BE, 'V'), - (0x110C2, 'X'), + ] + +def _seg_57() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: + return [ + (0x110C3, 'X'), (0x110D0, 'V'), (0x110E9, 'X'), (0x110F0, 'V'), @@ -5884,11 +5955,6 @@ def _seg_55(): (0x111F5, 'X'), (0x11200, 'V'), (0x11212, 'X'), - ] - -def _seg_56(): - # type: () -> List[Union[Tuple[int, str], Tuple[int, str, str]]] - return [ (0x11213, 'V'), (0x1123F, 'X'), (0x11280, 'V'), @@ -5954,7 +6020,7 @@ def _seg_56(): (0x11660, 'V'), (0x1166D, 'X'), (0x11680, 'V'), - (0x116B9, 'X'), + (0x116BA, 'X'), (0x116C0, 'V'), (0x116CA, 'X'), (0x11700, 'V'), @@ -5962,7 +6028,7 @@ def _seg_56(): (0x1171D, 'V'), (0x1172C, 'X'), (0x11730, 'V'), - (0x11740, 'X'), + (0x11747, 'X'), (0x11800, 'V'), (0x1183C, 'X'), (0x118A0, 'M', '𑣀'), @@ -5972,6 +6038,10 @@ def _seg_56(): (0x118A4, 'M', '𑣄'), (0x118A5, 'M', '𑣅'), (0x118A6, 'M', '𑣆'), + ] + +def _seg_58() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: + return [ (0x118A7, 'M', '𑣇'), (0x118A8, 'M', '𑣈'), (0x118A9, 'M', '𑣉'), @@ -5989,11 +6059,6 @@ def _seg_56(): (0x118B5, 'M', '𑣕'), (0x118B6, 'M', '𑣖'), (0x118B7, 'M', '𑣗'), - ] - -def _seg_57(): - # type: () -> List[Union[Tuple[int, str], Tuple[int, str, str]]] - return [ (0x118B8, 'M', '𑣘'), (0x118B9, 'M', '𑣙'), (0x118BA, 'M', '𑣚'), @@ -6030,7 +6095,7 @@ def _seg_57(): (0x11A48, 'X'), (0x11A50, 'V'), (0x11AA3, 'X'), - (0x11AC0, 'V'), + (0x11AB0, 'V'), (0x11AF9, 'X'), (0x11C00, 'V'), (0x11C09, 'X'), @@ -6077,6 +6142,10 @@ def _seg_57(): (0x11FB0, 'V'), (0x11FB1, 'X'), (0x11FC0, 'V'), + ] + +def _seg_59() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: + return [ (0x11FF2, 'X'), (0x11FFF, 'V'), (0x1239A, 'X'), @@ -6086,6 +6155,8 @@ def _seg_57(): (0x12475, 'X'), (0x12480, 'V'), (0x12544, 'X'), + (0x12F90, 'V'), + (0x12FF3, 'X'), (0x13000, 'V'), (0x1342F, 'X'), (0x14400, 'V'), @@ -6094,15 +6165,12 @@ def _seg_57(): (0x16A39, 'X'), (0x16A40, 'V'), (0x16A5F, 'X'), - ] - -def _seg_58(): - # type: () -> List[Union[Tuple[int, str], Tuple[int, str, str]]] - return [ (0x16A60, 'V'), (0x16A6A, 'X'), (0x16A6E, 'V'), - (0x16A70, 'X'), + (0x16ABF, 'X'), + (0x16AC0, 'V'), + (0x16ACA, 'X'), (0x16AD0, 'V'), (0x16AEE, 'X'), (0x16AF0, 'V'), @@ -6167,11 +6235,21 @@ def _seg_58(): (0x18CD6, 'X'), (0x18D00, 'V'), (0x18D09, 'X'), + (0x1AFF0, 'V'), + (0x1AFF4, 'X'), + (0x1AFF5, 'V'), + (0x1AFFC, 'X'), + (0x1AFFD, 'V'), + (0x1AFFF, 'X'), (0x1B000, 'V'), - (0x1B11F, 'X'), + (0x1B123, 'X'), (0x1B150, 'V'), (0x1B153, 'X'), (0x1B164, 'V'), + ] + +def _seg_60() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: + return [ (0x1B168, 'X'), (0x1B170, 'V'), (0x1B2FC, 'X'), @@ -6186,6 +6264,12 @@ def _seg_58(): (0x1BC9C, 'V'), (0x1BCA0, 'I'), (0x1BCA4, 'X'), + (0x1CF00, 'V'), + (0x1CF2E, 'X'), + (0x1CF30, 'V'), + (0x1CF47, 'X'), + (0x1CF50, 'V'), + (0x1CFC4, 'X'), (0x1D000, 'V'), (0x1D0F6, 'X'), (0x1D100, 'V'), @@ -6199,11 +6283,6 @@ def _seg_58(): (0x1D163, 'M', '𝅘𝅥𝅱'), (0x1D164, 'M', '𝅘𝅥𝅲'), (0x1D165, 'V'), - ] - -def _seg_59(): - # type: () -> List[Union[Tuple[int, str], Tuple[int, str, str]]] - return [ (0x1D173, 'X'), (0x1D17B, 'V'), (0x1D1BB, 'M', '𝆹𝅥'), @@ -6213,7 +6292,7 @@ def _seg_59(): (0x1D1BF, 'M', '𝆹𝅥𝅯'), (0x1D1C0, 'M', '𝆺𝅥𝅯'), (0x1D1C1, 'V'), - (0x1D1E9, 'X'), + (0x1D1EB, 'X'), (0x1D200, 'V'), (0x1D246, 'X'), (0x1D2E0, 'V'), @@ -6271,6 +6350,10 @@ def _seg_59(): (0x1D42E, 'M', 'u'), (0x1D42F, 'M', 'v'), (0x1D430, 'M', 'w'), + ] + +def _seg_61() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: + return [ (0x1D431, 'M', 'x'), (0x1D432, 'M', 'y'), (0x1D433, 'M', 'z'), @@ -6304,11 +6387,6 @@ def _seg_59(): (0x1D44F, 'M', 'b'), (0x1D450, 'M', 'c'), (0x1D451, 'M', 'd'), - ] - -def _seg_60(): - # type: () -> List[Union[Tuple[int, str], Tuple[int, str, str]]] - return [ (0x1D452, 'M', 'e'), (0x1D453, 'M', 'f'), (0x1D454, 'M', 'g'), @@ -6376,6 +6454,10 @@ def _seg_60(): (0x1D492, 'M', 'q'), (0x1D493, 'M', 'r'), (0x1D494, 'M', 's'), + ] + +def _seg_62() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: + return [ (0x1D495, 'M', 't'), (0x1D496, 'M', 'u'), (0x1D497, 'M', 'v'), @@ -6409,11 +6491,6 @@ def _seg_60(): (0x1D4B6, 'M', 'a'), (0x1D4B7, 'M', 'b'), (0x1D4B8, 'M', 'c'), - ] - -def _seg_61(): - # type: () -> List[Union[Tuple[int, str], Tuple[int, str, str]]] - return [ (0x1D4B9, 'M', 'd'), (0x1D4BA, 'X'), (0x1D4BB, 'M', 'f'), @@ -6481,6 +6558,10 @@ def _seg_61(): (0x1D4F9, 'M', 'p'), (0x1D4FA, 'M', 'q'), (0x1D4FB, 'M', 'r'), + ] + +def _seg_63() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: + return [ (0x1D4FC, 'M', 's'), (0x1D4FD, 'M', 't'), (0x1D4FE, 'M', 'u'), @@ -6514,11 +6595,6 @@ def _seg_61(): (0x1D51B, 'M', 'x'), (0x1D51C, 'M', 'y'), (0x1D51D, 'X'), - ] - -def _seg_62(): - # type: () -> List[Union[Tuple[int, str], Tuple[int, str, str]]] - return [ (0x1D51E, 'M', 'a'), (0x1D51F, 'M', 'b'), (0x1D520, 'M', 'c'), @@ -6586,6 +6662,10 @@ def _seg_62(): (0x1D560, 'M', 'o'), (0x1D561, 'M', 'p'), (0x1D562, 'M', 'q'), + ] + +def _seg_64() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: + return [ (0x1D563, 'M', 'r'), (0x1D564, 'M', 's'), (0x1D565, 'M', 't'), @@ -6619,11 +6699,6 @@ def _seg_62(): (0x1D581, 'M', 'v'), (0x1D582, 'M', 'w'), (0x1D583, 'M', 'x'), - ] - -def _seg_63(): - # type: () -> List[Union[Tuple[int, str], Tuple[int, str, str]]] - return [ (0x1D584, 'M', 'y'), (0x1D585, 'M', 'z'), (0x1D586, 'M', 'a'), @@ -6691,6 +6766,10 @@ def _seg_63(): (0x1D5C4, 'M', 'k'), (0x1D5C5, 'M', 'l'), (0x1D5C6, 'M', 'm'), + ] + +def _seg_65() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: + return [ (0x1D5C7, 'M', 'n'), (0x1D5C8, 'M', 'o'), (0x1D5C9, 'M', 'p'), @@ -6724,11 +6803,6 @@ def _seg_63(): (0x1D5E5, 'M', 'r'), (0x1D5E6, 'M', 's'), (0x1D5E7, 'M', 't'), - ] - -def _seg_64(): - # type: () -> List[Union[Tuple[int, str], Tuple[int, str, str]]] - return [ (0x1D5E8, 'M', 'u'), (0x1D5E9, 'M', 'v'), (0x1D5EA, 'M', 'w'), @@ -6796,6 +6870,10 @@ def _seg_64(): (0x1D628, 'M', 'g'), (0x1D629, 'M', 'h'), (0x1D62A, 'M', 'i'), + ] + +def _seg_66() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: + return [ (0x1D62B, 'M', 'j'), (0x1D62C, 'M', 'k'), (0x1D62D, 'M', 'l'), @@ -6829,11 +6907,6 @@ def _seg_64(): (0x1D649, 'M', 'n'), (0x1D64A, 'M', 'o'), (0x1D64B, 'M', 'p'), - ] - -def _seg_65(): - # type: () -> List[Union[Tuple[int, str], Tuple[int, str, str]]] - return [ (0x1D64C, 'M', 'q'), (0x1D64D, 'M', 'r'), (0x1D64E, 'M', 's'), @@ -6901,6 +6974,10 @@ def _seg_65(): (0x1D68C, 'M', 'c'), (0x1D68D, 'M', 'd'), (0x1D68E, 'M', 'e'), + ] + +def _seg_67() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: + return [ (0x1D68F, 'M', 'f'), (0x1D690, 'M', 'g'), (0x1D691, 'M', 'h'), @@ -6934,11 +7011,6 @@ def _seg_65(): (0x1D6AE, 'M', 'η'), (0x1D6AF, 'M', 'θ'), (0x1D6B0, 'M', 'ι'), - ] - -def _seg_66(): - # type: () -> List[Union[Tuple[int, str], Tuple[int, str, str]]] - return [ (0x1D6B1, 'M', 'κ'), (0x1D6B2, 'M', 'λ'), (0x1D6B3, 'M', 'μ'), @@ -7006,6 +7078,10 @@ def _seg_66(): (0x1D6F2, 'M', 'ρ'), (0x1D6F3, 'M', 'θ'), (0x1D6F4, 'M', 'σ'), + ] + +def _seg_68() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: + return [ (0x1D6F5, 'M', 'τ'), (0x1D6F6, 'M', 'υ'), (0x1D6F7, 'M', 'φ'), @@ -7039,11 +7115,6 @@ def _seg_66(): (0x1D714, 'M', 'ω'), (0x1D715, 'M', '∂'), (0x1D716, 'M', 'ε'), - ] - -def _seg_67(): - # type: () -> List[Union[Tuple[int, str], Tuple[int, str, str]]] - return [ (0x1D717, 'M', 'θ'), (0x1D718, 'M', 'κ'), (0x1D719, 'M', 'φ'), @@ -7111,6 +7182,10 @@ def _seg_67(): (0x1D758, 'M', 'γ'), (0x1D759, 'M', 'δ'), (0x1D75A, 'M', 'ε'), + ] + +def _seg_69() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: + return [ (0x1D75B, 'M', 'ζ'), (0x1D75C, 'M', 'η'), (0x1D75D, 'M', 'θ'), @@ -7144,11 +7219,6 @@ def _seg_67(): (0x1D779, 'M', 'κ'), (0x1D77A, 'M', 'λ'), (0x1D77B, 'M', 'μ'), - ] - -def _seg_68(): - # type: () -> List[Union[Tuple[int, str], Tuple[int, str, str]]] - return [ (0x1D77C, 'M', 'ν'), (0x1D77D, 'M', 'ξ'), (0x1D77E, 'M', 'ο'), @@ -7216,6 +7286,10 @@ def _seg_68(): (0x1D7BE, 'M', 'υ'), (0x1D7BF, 'M', 'φ'), (0x1D7C0, 'M', 'χ'), + ] + +def _seg_70() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: + return [ (0x1D7C1, 'M', 'ψ'), (0x1D7C2, 'M', 'ω'), (0x1D7C3, 'M', '∂'), @@ -7249,11 +7323,6 @@ def _seg_68(): (0x1D7E1, 'M', '9'), (0x1D7E2, 'M', '0'), (0x1D7E3, 'M', '1'), - ] - -def _seg_69(): - # type: () -> List[Union[Tuple[int, str], Tuple[int, str, str]]] - return [ (0x1D7E4, 'M', '2'), (0x1D7E5, 'M', '3'), (0x1D7E6, 'M', '4'), @@ -7288,6 +7357,8 @@ def _seg_69(): (0x1DAA0, 'X'), (0x1DAA1, 'V'), (0x1DAB0, 'X'), + (0x1DF00, 'V'), + (0x1DF1F, 'X'), (0x1E000, 'V'), (0x1E007, 'X'), (0x1E008, 'V'), @@ -7306,10 +7377,24 @@ def _seg_69(): (0x1E14A, 'X'), (0x1E14E, 'V'), (0x1E150, 'X'), + (0x1E290, 'V'), + (0x1E2AF, 'X'), (0x1E2C0, 'V'), (0x1E2FA, 'X'), (0x1E2FF, 'V'), (0x1E300, 'X'), + (0x1E7E0, 'V'), + (0x1E7E7, 'X'), + (0x1E7E8, 'V'), + (0x1E7EC, 'X'), + (0x1E7ED, 'V'), + (0x1E7EF, 'X'), + (0x1E7F0, 'V'), + ] + +def _seg_71() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: + return [ + (0x1E7FF, 'X'), (0x1E800, 'V'), (0x1E8C5, 'X'), (0x1E8C7, 'V'), @@ -7354,11 +7439,6 @@ def _seg_69(): (0x1E95A, 'X'), (0x1E95E, 'V'), (0x1E960, 'X'), - ] - -def _seg_70(): - # type: () -> List[Union[Tuple[int, str], Tuple[int, str, str]]] - return [ (0x1EC71, 'V'), (0x1ECB5, 'X'), (0x1ED01, 'V'), @@ -7414,6 +7494,10 @@ def _seg_70(): (0x1EE31, 'M', 'ص'), (0x1EE32, 'M', 'ق'), (0x1EE33, 'X'), + ] + +def _seg_72() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: + return [ (0x1EE34, 'M', 'ش'), (0x1EE35, 'M', 'ت'), (0x1EE36, 'M', 'ث'), @@ -7459,11 +7543,6 @@ def _seg_70(): (0x1EE68, 'M', 'ط'), (0x1EE69, 'M', 'ي'), (0x1EE6A, 'M', 'ك'), - ] - -def _seg_71(): - # type: () -> List[Union[Tuple[int, str], Tuple[int, str, str]]] - return [ (0x1EE6B, 'X'), (0x1EE6C, 'M', 'م'), (0x1EE6D, 'M', 'ن'), @@ -7519,6 +7598,10 @@ def _seg_71(): (0x1EEA3, 'M', 'د'), (0x1EEA4, 'X'), (0x1EEA5, 'M', 'و'), + ] + +def _seg_73() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: + return [ (0x1EEA6, 'M', 'ز'), (0x1EEA7, 'M', 'ح'), (0x1EEA8, 'M', 'ط'), @@ -7564,11 +7647,6 @@ def _seg_71(): (0x1F106, '3', '5,'), (0x1F107, '3', '6,'), (0x1F108, '3', '7,'), - ] - -def _seg_72(): - # type: () -> List[Union[Tuple[int, str], Tuple[int, str, str]]] - return [ (0x1F109, '3', '8,'), (0x1F10A, '3', '9,'), (0x1F10B, 'V'), @@ -7624,6 +7702,10 @@ def _seg_72(): (0x1F141, 'M', 'r'), (0x1F142, 'M', 's'), (0x1F143, 'M', 't'), + ] + +def _seg_74() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: + return [ (0x1F144, 'M', 'u'), (0x1F145, 'M', 'v'), (0x1F146, 'M', 'w'), @@ -7669,11 +7751,6 @@ def _seg_72(): (0x1F221, 'M', '終'), (0x1F222, 'M', '生'), (0x1F223, 'M', '販'), - ] - -def _seg_73(): - # type: () -> List[Union[Tuple[int, str], Tuple[int, str, str]]] - return [ (0x1F224, 'M', '声'), (0x1F225, 'M', '吹'), (0x1F226, 'M', '演'), @@ -7716,7 +7793,7 @@ def _seg_73(): (0x1F266, 'X'), (0x1F300, 'V'), (0x1F6D8, 'X'), - (0x1F6E0, 'V'), + (0x1F6DD, 'V'), (0x1F6ED, 'X'), (0x1F6F0, 'V'), (0x1F6FD, 'X'), @@ -7726,7 +7803,13 @@ def _seg_73(): (0x1F7D9, 'X'), (0x1F7E0, 'V'), (0x1F7EC, 'X'), + (0x1F7F0, 'V'), + (0x1F7F1, 'X'), (0x1F800, 'V'), + ] + +def _seg_75() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: + return [ (0x1F80C, 'X'), (0x1F810, 'V'), (0x1F848, 'X'), @@ -7739,27 +7822,27 @@ def _seg_73(): (0x1F8B0, 'V'), (0x1F8B2, 'X'), (0x1F900, 'V'), - (0x1F979, 'X'), - (0x1F97A, 'V'), - (0x1F9CC, 'X'), - (0x1F9CD, 'V'), (0x1FA54, 'X'), (0x1FA60, 'V'), (0x1FA6E, 'X'), (0x1FA70, 'V'), (0x1FA75, 'X'), (0x1FA78, 'V'), - (0x1FA7B, 'X'), + (0x1FA7D, 'X'), (0x1FA80, 'V'), (0x1FA87, 'X'), (0x1FA90, 'V'), - (0x1FAA9, 'X'), + (0x1FAAD, 'X'), (0x1FAB0, 'V'), - (0x1FAB7, 'X'), + (0x1FABB, 'X'), (0x1FAC0, 'V'), - (0x1FAC3, 'X'), + (0x1FAC6, 'X'), (0x1FAD0, 'V'), - (0x1FAD7, 'X'), + (0x1FADA, 'X'), + (0x1FAE0, 'V'), + (0x1FAE8, 'X'), + (0x1FAF0, 'V'), + (0x1FAF7, 'X'), (0x1FB00, 'V'), (0x1FB93, 'X'), (0x1FB94, 'V'), @@ -7774,16 +7857,11 @@ def _seg_73(): (0x1FBF7, 'M', '7'), (0x1FBF8, 'M', '8'), (0x1FBF9, 'M', '9'), - ] - -def _seg_74(): - # type: () -> List[Union[Tuple[int, str], Tuple[int, str, str]]] - return [ (0x1FBFA, 'X'), (0x20000, 'V'), - (0x2A6DE, 'X'), + (0x2A6E0, 'X'), (0x2A700, 'V'), - (0x2B735, 'X'), + (0x2B739, 'X'), (0x2B740, 'V'), (0x2B81E, 'X'), (0x2B820, 'V'), @@ -7832,6 +7910,10 @@ def _seg_74(): (0x2F827, 'M', '勤'), (0x2F828, 'M', '勺'), (0x2F829, 'M', '包'), + ] + +def _seg_76() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: + return [ (0x2F82A, 'M', '匆'), (0x2F82B, 'M', '北'), (0x2F82C, 'M', '卉'), @@ -7879,11 +7961,6 @@ def _seg_74(): (0x2F859, 'M', '𡓤'), (0x2F85A, 'M', '売'), (0x2F85B, 'M', '壷'), - ] - -def _seg_75(): - # type: () -> List[Union[Tuple[int, str], Tuple[int, str, str]]] - return [ (0x2F85C, 'M', '夆'), (0x2F85D, 'M', '多'), (0x2F85E, 'M', '夢'), @@ -7937,6 +8014,10 @@ def _seg_75(): (0x2F88F, 'M', '𪎒'), (0x2F890, 'M', '廾'), (0x2F891, 'M', '𢌱'), + ] + +def _seg_77() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: + return [ (0x2F893, 'M', '舁'), (0x2F894, 'M', '弢'), (0x2F896, 'M', '㣇'), @@ -7984,11 +8065,6 @@ def _seg_75(): (0x2F8C0, 'M', '揅'), (0x2F8C1, 'M', '掩'), (0x2F8C2, 'M', '㨮'), - ] - -def _seg_76(): - # type: () -> List[Union[Tuple[int, str], Tuple[int, str, str]]] - return [ (0x2F8C3, 'M', '摩'), (0x2F8C4, 'M', '摾'), (0x2F8C5, 'M', '撝'), @@ -8042,6 +8118,10 @@ def _seg_76(): (0x2F8F5, 'M', '殺'), (0x2F8F6, 'M', '殻'), (0x2F8F7, 'M', '𣪍'), + ] + +def _seg_78() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: + return [ (0x2F8F8, 'M', '𡴋'), (0x2F8F9, 'M', '𣫺'), (0x2F8FA, 'M', '汎'), @@ -8089,11 +8169,6 @@ def _seg_76(): (0x2F924, 'M', '犀'), (0x2F925, 'M', '犕'), (0x2F926, 'M', '𤜵'), - ] - -def _seg_77(): - # type: () -> List[Union[Tuple[int, str], Tuple[int, str, str]]] - return [ (0x2F927, 'M', '𤠔'), (0x2F928, 'M', '獺'), (0x2F929, 'M', '王'), @@ -8147,6 +8222,10 @@ def _seg_77(): (0x2F95B, 'M', '穏'), (0x2F95C, 'M', '𥥼'), (0x2F95D, 'M', '𥪧'), + ] + +def _seg_79() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: + return [ (0x2F95F, 'X'), (0x2F960, 'M', '䈂'), (0x2F961, 'M', '𥮫'), @@ -8194,11 +8273,6 @@ def _seg_77(): (0x2F98B, 'M', '舁'), (0x2F98C, 'M', '舄'), (0x2F98D, 'M', '辞'), - ] - -def _seg_78(): - # type: () -> List[Union[Tuple[int, str], Tuple[int, str, str]]] - return [ (0x2F98E, 'M', '䑫'), (0x2F98F, 'M', '芑'), (0x2F990, 'M', '芋'), @@ -8252,6 +8326,10 @@ def _seg_78(): (0x2F9C0, 'M', '蟡'), (0x2F9C1, 'M', '蠁'), (0x2F9C2, 'M', '䗹'), + ] + +def _seg_80() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: + return [ (0x2F9C3, 'M', '衠'), (0x2F9C4, 'M', '衣'), (0x2F9C5, 'M', '𧙧'), @@ -8299,11 +8377,6 @@ def _seg_78(): (0x2F9EF, 'M', '䦕'), (0x2F9F0, 'M', '閷'), (0x2F9F1, 'M', '𨵷'), - ] - -def _seg_79(): - # type: () -> List[Union[Tuple[int, str], Tuple[int, str, str]]] - return [ (0x2F9F2, 'M', '䧦'), (0x2F9F3, 'M', '雃'), (0x2F9F4, 'M', '嶲'), @@ -8435,4 +8508,5 @@ def _seg_79(): + _seg_77() + _seg_78() + _seg_79() + + _seg_80() ) # type: Tuple[Union[Tuple[int, str], Tuple[int, str, str]], ...] diff --git a/pipenv/patched/notpip/_vendor/msgpack/_version.py b/pipenv/patched/notpip/_vendor/msgpack/_version.py index 1c83c8ed3d..fb878b353d 100644 --- a/pipenv/patched/notpip/_vendor/msgpack/_version.py +++ b/pipenv/patched/notpip/_vendor/msgpack/_version.py @@ -1 +1 @@ -version = (1, 0, 2) +version = (1, 0, 3) diff --git a/pipenv/patched/notpip/_vendor/msgpack/fallback.py b/pipenv/patched/notpip/_vendor/msgpack/fallback.py index 0bfa94eacb..b27acb2951 100644 --- a/pipenv/patched/notpip/_vendor/msgpack/fallback.py +++ b/pipenv/patched/notpip/_vendor/msgpack/fallback.py @@ -1,5 +1,4 @@ """Fallback pure Python implementation of msgpack""" - from datetime import datetime as _DateTime import sys import struct @@ -148,6 +147,38 @@ def _unpack_from(f, b, o=0): else: _unpack_from = struct.unpack_from +_NO_FORMAT_USED = "" +_MSGPACK_HEADERS = { + 0xC4: (1, _NO_FORMAT_USED, TYPE_BIN), + 0xC5: (2, ">H", TYPE_BIN), + 0xC6: (4, ">I", TYPE_BIN), + 0xC7: (2, "Bb", TYPE_EXT), + 0xC8: (3, ">Hb", TYPE_EXT), + 0xC9: (5, ">Ib", TYPE_EXT), + 0xCA: (4, ">f"), + 0xCB: (8, ">d"), + 0xCC: (1, _NO_FORMAT_USED), + 0xCD: (2, ">H"), + 0xCE: (4, ">I"), + 0xCF: (8, ">Q"), + 0xD0: (1, "b"), + 0xD1: (2, ">h"), + 0xD2: (4, ">i"), + 0xD3: (8, ">q"), + 0xD4: (1, "b1s", TYPE_EXT), + 0xD5: (2, "b2s", TYPE_EXT), + 0xD6: (4, "b4s", TYPE_EXT), + 0xD7: (8, "b8s", TYPE_EXT), + 0xD8: (16, "b16s", TYPE_EXT), + 0xD9: (1, _NO_FORMAT_USED, TYPE_RAW), + 0xDA: (2, ">H", TYPE_RAW), + 0xDB: (4, ">I", TYPE_RAW), + 0xDC: (2, ">H", TYPE_ARRAY), + 0xDD: (4, ">I", TYPE_ARRAY), + 0xDE: (2, ">H", TYPE_MAP), + 0xDF: (4, ">I", TYPE_MAP), +} + class Unpacker(object): """Streaming unpacker. @@ -229,7 +260,7 @@ class Unpacker(object): Example of streaming deserialize from socket:: - unpacker = Unpacker(max_buffer_size) + unpacker = Unpacker() while True: buf = sock.recv(1024**2) if not buf: @@ -354,7 +385,7 @@ def feed(self, next_bytes): self._buffer.extend(view) def _consume(self): - """ Gets rid of the used parts of the buffer. """ + """Gets rid of the used parts of the buffer.""" self._stream_offset += self._buff_i - self._buf_checkpoint self._buf_checkpoint = self._buff_i @@ -409,7 +440,7 @@ def _reserve(self, n, raise_outofdata=True): self._buff_i = 0 # rollback raise OutOfData - def _read_header(self, execute=EX_CONSTRUCT): + def _read_header(self): typ = TYPE_IMMEDIATE n = 0 obj = None @@ -424,205 +455,95 @@ def _read_header(self, execute=EX_CONSTRUCT): n = b & 0b00011111 typ = TYPE_RAW if n > self._max_str_len: - raise ValueError("%s exceeds max_str_len(%s)", n, self._max_str_len) + raise ValueError("%s exceeds max_str_len(%s)" % (n, self._max_str_len)) obj = self._read(n) elif b & 0b11110000 == 0b10010000: n = b & 0b00001111 typ = TYPE_ARRAY if n > self._max_array_len: - raise ValueError("%s exceeds max_array_len(%s)", n, self._max_array_len) + raise ValueError( + "%s exceeds max_array_len(%s)" % (n, self._max_array_len) + ) elif b & 0b11110000 == 0b10000000: n = b & 0b00001111 typ = TYPE_MAP if n > self._max_map_len: - raise ValueError("%s exceeds max_map_len(%s)", n, self._max_map_len) + raise ValueError("%s exceeds max_map_len(%s)" % (n, self._max_map_len)) elif b == 0xC0: obj = None elif b == 0xC2: obj = False elif b == 0xC3: obj = True - elif b == 0xC4: - typ = TYPE_BIN - self._reserve(1) - n = self._buffer[self._buff_i] - self._buff_i += 1 - if n > self._max_bin_len: - raise ValueError("%s exceeds max_bin_len(%s)" % (n, self._max_bin_len)) - obj = self._read(n) - elif b == 0xC5: - typ = TYPE_BIN - self._reserve(2) - n = _unpack_from(">H", self._buffer, self._buff_i)[0] - self._buff_i += 2 - if n > self._max_bin_len: - raise ValueError("%s exceeds max_bin_len(%s)" % (n, self._max_bin_len)) - obj = self._read(n) - elif b == 0xC6: - typ = TYPE_BIN - self._reserve(4) - n = _unpack_from(">I", self._buffer, self._buff_i)[0] - self._buff_i += 4 + elif 0xC4 <= b <= 0xC6: + size, fmt, typ = _MSGPACK_HEADERS[b] + self._reserve(size) + if len(fmt) > 0: + n = _unpack_from(fmt, self._buffer, self._buff_i)[0] + else: + n = self._buffer[self._buff_i] + self._buff_i += size if n > self._max_bin_len: raise ValueError("%s exceeds max_bin_len(%s)" % (n, self._max_bin_len)) obj = self._read(n) - elif b == 0xC7: # ext 8 - typ = TYPE_EXT - self._reserve(2) - L, n = _unpack_from("Bb", self._buffer, self._buff_i) - self._buff_i += 2 - if L > self._max_ext_len: - raise ValueError("%s exceeds max_ext_len(%s)" % (L, self._max_ext_len)) - obj = self._read(L) - elif b == 0xC8: # ext 16 - typ = TYPE_EXT - self._reserve(3) - L, n = _unpack_from(">Hb", self._buffer, self._buff_i) - self._buff_i += 3 + elif 0xC7 <= b <= 0xC9: + size, fmt, typ = _MSGPACK_HEADERS[b] + self._reserve(size) + L, n = _unpack_from(fmt, self._buffer, self._buff_i) + self._buff_i += size if L > self._max_ext_len: raise ValueError("%s exceeds max_ext_len(%s)" % (L, self._max_ext_len)) obj = self._read(L) - elif b == 0xC9: # ext 32 - typ = TYPE_EXT - self._reserve(5) - L, n = _unpack_from(">Ib", self._buffer, self._buff_i) - self._buff_i += 5 - if L > self._max_ext_len: - raise ValueError("%s exceeds max_ext_len(%s)" % (L, self._max_ext_len)) - obj = self._read(L) - elif b == 0xCA: - self._reserve(4) - obj = _unpack_from(">f", self._buffer, self._buff_i)[0] - self._buff_i += 4 - elif b == 0xCB: - self._reserve(8) - obj = _unpack_from(">d", self._buffer, self._buff_i)[0] - self._buff_i += 8 - elif b == 0xCC: - self._reserve(1) - obj = self._buffer[self._buff_i] - self._buff_i += 1 - elif b == 0xCD: - self._reserve(2) - obj = _unpack_from(">H", self._buffer, self._buff_i)[0] - self._buff_i += 2 - elif b == 0xCE: - self._reserve(4) - obj = _unpack_from(">I", self._buffer, self._buff_i)[0] - self._buff_i += 4 - elif b == 0xCF: - self._reserve(8) - obj = _unpack_from(">Q", self._buffer, self._buff_i)[0] - self._buff_i += 8 - elif b == 0xD0: - self._reserve(1) - obj = _unpack_from("b", self._buffer, self._buff_i)[0] - self._buff_i += 1 - elif b == 0xD1: - self._reserve(2) - obj = _unpack_from(">h", self._buffer, self._buff_i)[0] - self._buff_i += 2 - elif b == 0xD2: - self._reserve(4) - obj = _unpack_from(">i", self._buffer, self._buff_i)[0] - self._buff_i += 4 - elif b == 0xD3: - self._reserve(8) - obj = _unpack_from(">q", self._buffer, self._buff_i)[0] - self._buff_i += 8 - elif b == 0xD4: # fixext 1 - typ = TYPE_EXT - if self._max_ext_len < 1: - raise ValueError("%s exceeds max_ext_len(%s)" % (1, self._max_ext_len)) - self._reserve(2) - n, obj = _unpack_from("b1s", self._buffer, self._buff_i) - self._buff_i += 2 - elif b == 0xD5: # fixext 2 - typ = TYPE_EXT - if self._max_ext_len < 2: - raise ValueError("%s exceeds max_ext_len(%s)" % (2, self._max_ext_len)) - self._reserve(3) - n, obj = _unpack_from("b2s", self._buffer, self._buff_i) - self._buff_i += 3 - elif b == 0xD6: # fixext 4 - typ = TYPE_EXT - if self._max_ext_len < 4: - raise ValueError("%s exceeds max_ext_len(%s)" % (4, self._max_ext_len)) - self._reserve(5) - n, obj = _unpack_from("b4s", self._buffer, self._buff_i) - self._buff_i += 5 - elif b == 0xD7: # fixext 8 - typ = TYPE_EXT - if self._max_ext_len < 8: - raise ValueError("%s exceeds max_ext_len(%s)" % (8, self._max_ext_len)) - self._reserve(9) - n, obj = _unpack_from("b8s", self._buffer, self._buff_i) - self._buff_i += 9 - elif b == 0xD8: # fixext 16 - typ = TYPE_EXT - if self._max_ext_len < 16: - raise ValueError("%s exceeds max_ext_len(%s)" % (16, self._max_ext_len)) - self._reserve(17) - n, obj = _unpack_from("b16s", self._buffer, self._buff_i) - self._buff_i += 17 - elif b == 0xD9: - typ = TYPE_RAW - self._reserve(1) - n = self._buffer[self._buff_i] - self._buff_i += 1 - if n > self._max_str_len: - raise ValueError("%s exceeds max_str_len(%s)", n, self._max_str_len) - obj = self._read(n) - elif b == 0xDA: - typ = TYPE_RAW - self._reserve(2) - (n,) = _unpack_from(">H", self._buffer, self._buff_i) - self._buff_i += 2 - if n > self._max_str_len: - raise ValueError("%s exceeds max_str_len(%s)", n, self._max_str_len) - obj = self._read(n) - elif b == 0xDB: - typ = TYPE_RAW - self._reserve(4) - (n,) = _unpack_from(">I", self._buffer, self._buff_i) - self._buff_i += 4 + elif 0xCA <= b <= 0xD3: + size, fmt = _MSGPACK_HEADERS[b] + self._reserve(size) + if len(fmt) > 0: + obj = _unpack_from(fmt, self._buffer, self._buff_i)[0] + else: + obj = self._buffer[self._buff_i] + self._buff_i += size + elif 0xD4 <= b <= 0xD8: + size, fmt, typ = _MSGPACK_HEADERS[b] + if self._max_ext_len < size: + raise ValueError( + "%s exceeds max_ext_len(%s)" % (size, self._max_ext_len) + ) + self._reserve(size + 1) + n, obj = _unpack_from(fmt, self._buffer, self._buff_i) + self._buff_i += size + 1 + elif 0xD9 <= b <= 0xDB: + size, fmt, typ = _MSGPACK_HEADERS[b] + self._reserve(size) + if len(fmt) > 0: + (n,) = _unpack_from(fmt, self._buffer, self._buff_i) + else: + n = self._buffer[self._buff_i] + self._buff_i += size if n > self._max_str_len: - raise ValueError("%s exceeds max_str_len(%s)", n, self._max_str_len) + raise ValueError("%s exceeds max_str_len(%s)" % (n, self._max_str_len)) obj = self._read(n) - elif b == 0xDC: - typ = TYPE_ARRAY - self._reserve(2) - (n,) = _unpack_from(">H", self._buffer, self._buff_i) - self._buff_i += 2 - if n > self._max_array_len: - raise ValueError("%s exceeds max_array_len(%s)", n, self._max_array_len) - elif b == 0xDD: - typ = TYPE_ARRAY - self._reserve(4) - (n,) = _unpack_from(">I", self._buffer, self._buff_i) - self._buff_i += 4 + elif 0xDC <= b <= 0xDD: + size, fmt, typ = _MSGPACK_HEADERS[b] + self._reserve(size) + (n,) = _unpack_from(fmt, self._buffer, self._buff_i) + self._buff_i += size if n > self._max_array_len: - raise ValueError("%s exceeds max_array_len(%s)", n, self._max_array_len) - elif b == 0xDE: - self._reserve(2) - (n,) = _unpack_from(">H", self._buffer, self._buff_i) - self._buff_i += 2 - if n > self._max_map_len: - raise ValueError("%s exceeds max_map_len(%s)", n, self._max_map_len) - typ = TYPE_MAP - elif b == 0xDF: - self._reserve(4) - (n,) = _unpack_from(">I", self._buffer, self._buff_i) - self._buff_i += 4 + raise ValueError( + "%s exceeds max_array_len(%s)" % (n, self._max_array_len) + ) + elif 0xDE <= b <= 0xDF: + size, fmt, typ = _MSGPACK_HEADERS[b] + self._reserve(size) + (n,) = _unpack_from(fmt, self._buffer, self._buff_i) + self._buff_i += size if n > self._max_map_len: - raise ValueError("%s exceeds max_map_len(%s)", n, self._max_map_len) - typ = TYPE_MAP + raise ValueError("%s exceeds max_map_len(%s)" % (n, self._max_map_len)) else: raise FormatError("Unknown header: 0x%x" % b) return typ, n, obj def _unpack(self, execute=EX_CONSTRUCT): - typ, n, obj = self._read_header(execute) + typ, n, obj = self._read_header() if execute == EX_READ_ARRAY_HEADER: if typ != TYPE_ARRAY: @@ -953,6 +874,10 @@ def _pack( obj = self._default(obj) default_used = 1 continue + + if self._datetime and check(obj, _DateTime): + raise ValueError("Cannot serialize %r where tzinfo=None" % (obj,)) + raise TypeError("Cannot serialize %r" % (obj,)) def pack(self, obj): diff --git a/pipenv/patched/notpip/_vendor/packaging/LICENSE b/pipenv/patched/notpip/_vendor/packaging/LICENSE deleted file mode 100644 index 6f62d44e4e..0000000000 --- a/pipenv/patched/notpip/_vendor/packaging/LICENSE +++ /dev/null @@ -1,3 +0,0 @@ -This software is made available under the terms of *either* of the licenses -found in LICENSE.APACHE or LICENSE.BSD. Contributions to this software is made -under the terms of *both* these licenses. diff --git a/pipenv/patched/notpip/_vendor/packaging/__about__.py b/pipenv/patched/notpip/_vendor/packaging/__about__.py index e70d692c85..3551bc2d29 100644 --- a/pipenv/patched/notpip/_vendor/packaging/__about__.py +++ b/pipenv/patched/notpip/_vendor/packaging/__about__.py @@ -17,7 +17,7 @@ __summary__ = "Core utilities for Python packages" __uri__ = "https://github.com/pypa/packaging" -__version__ = "21.0" +__version__ = "21.3" __author__ = "Donald Stufft and individual contributors" __email__ = "donald@stufft.io" diff --git a/pipenv/patched/notpip/_vendor/packaging/_musllinux.py b/pipenv/patched/notpip/_vendor/packaging/_musllinux.py index 85450fafa3..8ac3059ba3 100644 --- a/pipenv/patched/notpip/_vendor/packaging/_musllinux.py +++ b/pipenv/patched/notpip/_vendor/packaging/_musllinux.py @@ -98,7 +98,7 @@ def _get_musl_version(executable: str) -> Optional[_MuslVersion]: with contextlib.ExitStack() as stack: try: f = stack.enter_context(open(executable, "rb")) - except IOError: + except OSError: return None ld = _parse_ld_musl_from_elf(f) if not ld: diff --git a/pipenv/patched/notpip/_vendor/packaging/_structures.py b/pipenv/patched/notpip/_vendor/packaging/_structures.py index 951549753a..90a6465f96 100644 --- a/pipenv/patched/notpip/_vendor/packaging/_structures.py +++ b/pipenv/patched/notpip/_vendor/packaging/_structures.py @@ -19,9 +19,6 @@ def __le__(self, other: object) -> bool: def __eq__(self, other: object) -> bool: return isinstance(other, self.__class__) - def __ne__(self, other: object) -> bool: - return not isinstance(other, self.__class__) - def __gt__(self, other: object) -> bool: return True @@ -51,9 +48,6 @@ def __le__(self, other: object) -> bool: def __eq__(self, other: object) -> bool: return isinstance(other, self.__class__) - def __ne__(self, other: object) -> bool: - return not isinstance(other, self.__class__) - def __gt__(self, other: object) -> bool: return False diff --git a/pipenv/patched/notpip/_vendor/packaging/specifiers.py b/pipenv/patched/notpip/_vendor/packaging/specifiers.py index ce66bd4add..0e218a6f9f 100644 --- a/pipenv/patched/notpip/_vendor/packaging/specifiers.py +++ b/pipenv/patched/notpip/_vendor/packaging/specifiers.py @@ -57,13 +57,6 @@ def __eq__(self, other: object) -> bool: objects are equal. """ - @abc.abstractmethod - def __ne__(self, other: object) -> bool: - """ - Returns a boolean representing whether or not the two Specifier like - objects are not equal. - """ - @abc.abstractproperty def prereleases(self) -> Optional[bool]: """ @@ -119,7 +112,7 @@ def __repr__(self) -> str: else "" ) - return "<{}({!r}{})>".format(self.__class__.__name__, str(self), pre) + return f"<{self.__class__.__name__}({str(self)!r}{pre})>" def __str__(self) -> str: return "{}{}".format(*self._spec) @@ -142,17 +135,6 @@ def __eq__(self, other: object) -> bool: return self._canonical_spec == other._canonical_spec - def __ne__(self, other: object) -> bool: - if isinstance(other, str): - try: - other = self.__class__(str(other)) - except InvalidSpecifier: - return NotImplemented - elif not isinstance(other, self.__class__): - return NotImplemented - - return self._spec != other._spec - def _get_operator(self, op: str) -> CallableOperator: operator_callable: CallableOperator = getattr( self, f"_compare_{self._operators[op]}" @@ -667,7 +649,7 @@ def __repr__(self) -> str: else "" ) - return "".format(str(self), pre) + return f"" def __str__(self) -> str: return ",".join(sorted(str(s) for s in self._specs)) @@ -706,14 +688,6 @@ def __eq__(self, other: object) -> bool: return self._specs == other._specs - def __ne__(self, other: object) -> bool: - if isinstance(other, (str, _IndividualSpecifier)): - other = SpecifierSet(str(other)) - elif not isinstance(other, SpecifierSet): - return NotImplemented - - return self._specs != other._specs - def __len__(self) -> int: return len(self._specs) diff --git a/pipenv/patched/notpip/_vendor/packaging/tags.py b/pipenv/patched/notpip/_vendor/packaging/tags.py index 82a47cdae8..9a3d25a71c 100644 --- a/pipenv/patched/notpip/_vendor/packaging/tags.py +++ b/pipenv/patched/notpip/_vendor/packaging/tags.py @@ -90,7 +90,7 @@ def __str__(self) -> str: return f"{self._interpreter}-{self._abi}-{self._platform}" def __repr__(self) -> str: - return "<{self} @ {self_id}>".format(self=self, self_id=id(self)) + return f"<{self} @ {id(self)}>" def parse_tag(tag: str) -> FrozenSet[Tag]: @@ -192,7 +192,7 @@ def cpython_tags( if not python_version: python_version = sys.version_info[:2] - interpreter = "cp{}".format(_version_nodot(python_version[:2])) + interpreter = f"cp{_version_nodot(python_version[:2])}" if abis is None: if len(python_version) > 1: @@ -207,7 +207,7 @@ def cpython_tags( except ValueError: pass - platforms = list(platforms or _platform_tags()) + platforms = list(platforms or platform_tags()) for abi in abis: for platform_ in platforms: yield Tag(interpreter, abi, platform_) @@ -251,7 +251,7 @@ def generic_tags( interpreter = "".join([interp_name, interp_version]) if abis is None: abis = _generic_abi() - platforms = list(platforms or _platform_tags()) + platforms = list(platforms or platform_tags()) abis = list(abis) if "none" not in abis: abis.append("none") @@ -268,11 +268,11 @@ def _py_interpreter_range(py_version: PythonVersion) -> Iterator[str]: all previous versions of that major version. """ if len(py_version) > 1: - yield "py{version}".format(version=_version_nodot(py_version[:2])) - yield "py{major}".format(major=py_version[0]) + yield f"py{_version_nodot(py_version[:2])}" + yield f"py{py_version[0]}" if len(py_version) > 1: for minor in range(py_version[1] - 1, -1, -1): - yield "py{version}".format(version=_version_nodot((py_version[0], minor))) + yield f"py{_version_nodot((py_version[0], minor))}" def compatible_tags( @@ -290,7 +290,7 @@ def compatible_tags( """ if not python_version: python_version = sys.version_info[:2] - platforms = list(platforms or _platform_tags()) + platforms = list(platforms or platform_tags()) for version in _py_interpreter_range(python_version): for platform_ in platforms: yield Tag(version, "none", platform_) @@ -431,7 +431,7 @@ def _generic_platforms() -> Iterator[str]: yield _normalize_string(sysconfig.get_platform()) -def _platform_tags() -> Iterator[str]: +def platform_tags() -> Iterator[str]: """ Provides the platform tags for this installation. """ @@ -481,4 +481,7 @@ def sys_tags(*, warn: bool = False) -> Iterator[Tag]: else: yield from generic_tags() - yield from compatible_tags() + if interp_name == "pp": + yield from compatible_tags(interpreter="pp3") + else: + yield from compatible_tags() diff --git a/pipenv/patched/notpip/_vendor/pep517/LICENSE b/pipenv/patched/notpip/_vendor/pep517/LICENSE deleted file mode 100644 index b0ae9dbc26..0000000000 --- a/pipenv/patched/notpip/_vendor/pep517/LICENSE +++ /dev/null @@ -1,21 +0,0 @@ -The MIT License (MIT) - -Copyright (c) 2017 Thomas Kluyver - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in -all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -THE SOFTWARE. diff --git a/pipenv/patched/notpip/_vendor/pep517/__init__.py b/pipenv/patched/notpip/_vendor/pep517/__init__.py index f064d60c8b..2b6b885679 100644 --- a/pipenv/patched/notpip/_vendor/pep517/__init__.py +++ b/pipenv/patched/notpip/_vendor/pep517/__init__.py @@ -1,6 +1,6 @@ """Wrappers to build Python packages using PEP 517 hooks """ -__version__ = '0.11.0' +__version__ = '0.12.0' from .wrappers import * # noqa: F401, F403 diff --git a/pipenv/patched/notpip/_vendor/pep517/build.py b/pipenv/patched/notpip/_vendor/pep517/build.py index 3b75214532..bc463b2ba6 100644 --- a/pipenv/patched/notpip/_vendor/pep517/build.py +++ b/pipenv/patched/notpip/_vendor/pep517/build.py @@ -31,7 +31,7 @@ def load_system(source_dir): Load the build system from a source dir (pyproject.toml). """ pyproject = os.path.join(source_dir, 'pyproject.toml') - with io.open(pyproject, encoding="utf-8") as f: + with io.open(pyproject, 'rb') as f: pyproject_data = toml_load(f) return pyproject_data['build-system'] diff --git a/pipenv/patched/notpip/_vendor/pep517/check.py b/pipenv/patched/notpip/_vendor/pep517/check.py index 719be04033..bf3c722641 100644 --- a/pipenv/patched/notpip/_vendor/pep517/check.py +++ b/pipenv/patched/notpip/_vendor/pep517/check.py @@ -142,7 +142,7 @@ def check(source_dir): return False try: - with io.open(pyproject, encoding="utf-8") as f: + with io.open(pyproject, 'rb') as f: pyproject_data = toml_load(f) # Ensure the mandatory data can be loaded buildsys = pyproject_data['build-system'] diff --git a/pipenv/patched/notpip/_vendor/pep517/compat.py b/pipenv/patched/notpip/_vendor/pep517/compat.py index c2c4aaa8ff..5cee307c61 100644 --- a/pipenv/patched/notpip/_vendor/pep517/compat.py +++ b/pipenv/patched/notpip/_vendor/pep517/compat.py @@ -1,4 +1,5 @@ """Python 2/3 compatibility""" +import io import json import sys @@ -35,7 +36,15 @@ def read_json(path): if sys.version_info < (3, 6): - from pipenv.vendor.toml import load as toml_load # noqa: F401 + from pipenv.vendor.toml import load as _toml_load # noqa: F401 + + def toml_load(f): + w = io.TextIOWrapper(f, encoding="utf8", newline="") + try: + return _toml_load(w) + finally: + w.detach() + from pipenv.vendor.toml import TomlDecodeError as TOMLDecodeError # noqa: F401 else: from pipenv.patched.notpip._vendor.tomli import load as toml_load # noqa: F401 diff --git a/pipenv/patched/notpip/_vendor/pep517/envbuild.py b/pipenv/patched/notpip/_vendor/pep517/envbuild.py index 7c2344bf3b..fe8873c64a 100644 --- a/pipenv/patched/notpip/_vendor/pep517/envbuild.py +++ b/pipenv/patched/notpip/_vendor/pep517/envbuild.py @@ -19,7 +19,7 @@ def _load_pyproject(source_dir): with io.open( os.path.join(source_dir, 'pyproject.toml'), - encoding="utf-8", + 'rb', ) as f: pyproject_data = toml_load(f) buildsys = pyproject_data['build-system'] diff --git a/pipenv/patched/notpip/_vendor/pep517/in_process/_in_process.py b/pipenv/patched/notpip/_vendor/pep517/in_process/_in_process.py index c7f5f0577f..954a4ab05e 100644 --- a/pipenv/patched/notpip/_vendor/pep517/in_process/_in_process.py +++ b/pipenv/patched/notpip/_vendor/pep517/in_process/_in_process.py @@ -103,6 +103,19 @@ def _build_backend(): return obj +def _supported_features(): + """Return the list of options features supported by the backend. + + Returns a list of strings. + The only possible value is 'build_editable'. + """ + backend = _build_backend() + features = [] + if hasattr(backend, "build_editable"): + features.append("build_editable") + return features + + def get_requires_for_build_wheel(config_settings): """Invoke the optional get_requires_for_build_wheel hook @@ -312,6 +325,7 @@ def build_sdist(sdist_directory, config_settings): 'build_editable', 'get_requires_for_build_sdist', 'build_sdist', + '_supported_features', } diff --git a/pipenv/patched/notpip/_vendor/pep517/wrappers.py b/pipenv/patched/notpip/_vendor/pep517/wrappers.py index 52da22e825..e031ed7087 100644 --- a/pipenv/patched/notpip/_vendor/pep517/wrappers.py +++ b/pipenv/patched/notpip/_vendor/pep517/wrappers.py @@ -154,6 +154,10 @@ def subprocess_runner(self, runner): finally: self._subprocess_runner = prev + def _supported_features(self): + """Return the list of optional features supported by the backend.""" + return self._call_hook('_supported_features', {}) + def get_requires_for_build_wheel(self, config_settings=None): """Identify packages required for building a wheel diff --git a/pipenv/patched/notpip/_vendor/pkg_resources/LICENSE b/pipenv/patched/notpip/_vendor/pkg_resources/LICENSE deleted file mode 100644 index 6e0693b4b0..0000000000 --- a/pipenv/patched/notpip/_vendor/pkg_resources/LICENSE +++ /dev/null @@ -1,19 +0,0 @@ -Copyright (C) 2016 Jason R Coombs - -Permission is hereby granted, free of charge, to any person obtaining a copy of -this software and associated documentation files (the "Software"), to deal in -the Software without restriction, including without limitation the rights to -use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies -of the Software, and to permit persons to whom the Software is furnished to do -so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. diff --git a/pipenv/patched/notpip/_vendor/pkg_resources/__init__.py b/pipenv/patched/notpip/_vendor/pkg_resources/__init__.py index b1cb5de7a6..8573b146b3 100644 --- a/pipenv/patched/notpip/_vendor/pkg_resources/__init__.py +++ b/pipenv/patched/notpip/_vendor/pkg_resources/__init__.py @@ -77,7 +77,7 @@ importlib_machinery = None from . import py31compat -from pipenv.patched.notpip._vendor import appdirs +from pipenv.patched.notpip._vendor import platformdirs from pipenv.patched.notpip._vendor import packaging __import__('pipenv.patched.notpip._vendor.packaging.version') __import__('pipenv.patched.notpip._vendor.packaging.specifiers') @@ -1310,7 +1310,7 @@ def get_default_cache(): """ return ( os.environ.get('PYTHON_EGG_CACHE') - or appdirs.user_cache_dir(appname='Python-Eggs') + or platformdirs.user_cache_dir(appname='Python-Eggs') ) diff --git a/pipenv/patched/notpip/_vendor/platformdirs/__init__.py b/pipenv/patched/notpip/_vendor/platformdirs/__init__.py new file mode 100644 index 0000000000..7749e5e119 --- /dev/null +++ b/pipenv/patched/notpip/_vendor/platformdirs/__init__.py @@ -0,0 +1,331 @@ +""" +Utilities for determining application-specific dirs. See for details and +usage. +""" +from __future__ import annotations + +import importlib +import os +import sys +from pathlib import Path +from typing import TYPE_CHECKING + +if TYPE_CHECKING: + from pipenv.patched.notpip._vendor.typing_extensions import Literal # pragma: no cover + +from .api import PlatformDirsABC +from .version import __version__, __version_info__ + + +def _set_platform_dir_class() -> type[PlatformDirsABC]: + if os.getenv("ANDROID_DATA") == "/data" and os.getenv("ANDROID_ROOT") == "/system": + module, name = "pipenv.patched.notpip._vendor.platformdirs.android", "Android" + elif sys.platform == "win32": + module, name = "pipenv.patched.notpip._vendor.platformdirs.windows", "Windows" + elif sys.platform == "darwin": + module, name = "pipenv.patched.notpip._vendor.platformdirs.macos", "MacOS" + else: + module, name = "pipenv.patched.notpip._vendor.platformdirs.unix", "Unix" + result: type[PlatformDirsABC] = getattr(importlib.import_module(module), name) + return result + + +PlatformDirs = _set_platform_dir_class() #: Currently active platform +AppDirs = PlatformDirs #: Backwards compatibility with appdirs + + +def user_data_dir( + appname: str | None = None, + appauthor: str | None | Literal[False] = None, + version: str | None = None, + roaming: bool = False, +) -> str: + """ + :param appname: See `appname `. + :param appauthor: See `appauthor `. + :param version: See `version `. + :param roaming: See `roaming `. + :returns: data directory tied to the user + """ + return PlatformDirs(appname=appname, appauthor=appauthor, version=version, roaming=roaming).user_data_dir + + +def site_data_dir( + appname: str | None = None, + appauthor: str | None | Literal[False] = None, + version: str | None = None, + multipath: bool = False, +) -> str: + """ + :param appname: See `appname `. + :param appauthor: See `appauthor `. + :param version: See `version `. + :param multipath: See `roaming `. + :returns: data directory shared by users + """ + return PlatformDirs(appname=appname, appauthor=appauthor, version=version, multipath=multipath).site_data_dir + + +def user_config_dir( + appname: str | None = None, + appauthor: str | None | Literal[False] = None, + version: str | None = None, + roaming: bool = False, +) -> str: + """ + :param appname: See `appname `. + :param appauthor: See `appauthor `. + :param version: See `version `. + :param roaming: See `roaming `. + :returns: config directory tied to the user + """ + return PlatformDirs(appname=appname, appauthor=appauthor, version=version, roaming=roaming).user_config_dir + + +def site_config_dir( + appname: str | None = None, + appauthor: str | None | Literal[False] = None, + version: str | None = None, + multipath: bool = False, +) -> str: + """ + :param appname: See `appname `. + :param appauthor: See `appauthor `. + :param version: See `version `. + :param multipath: See `roaming `. + :returns: config directory shared by the users + """ + return PlatformDirs(appname=appname, appauthor=appauthor, version=version, multipath=multipath).site_config_dir + + +def user_cache_dir( + appname: str | None = None, + appauthor: str | None | Literal[False] = None, + version: str | None = None, + opinion: bool = True, +) -> str: + """ + :param appname: See `appname `. + :param appauthor: See `appauthor `. + :param version: See `version `. + :param opinion: See `roaming `. + :returns: cache directory tied to the user + """ + return PlatformDirs(appname=appname, appauthor=appauthor, version=version, opinion=opinion).user_cache_dir + + +def user_state_dir( + appname: str | None = None, + appauthor: str | None | Literal[False] = None, + version: str | None = None, + roaming: bool = False, +) -> str: + """ + :param appname: See `appname `. + :param appauthor: See `appauthor `. + :param version: See `version `. + :param roaming: See `roaming `. + :returns: state directory tied to the user + """ + return PlatformDirs(appname=appname, appauthor=appauthor, version=version, roaming=roaming).user_state_dir + + +def user_log_dir( + appname: str | None = None, + appauthor: str | None | Literal[False] = None, + version: str | None = None, + opinion: bool = True, +) -> str: + """ + :param appname: See `appname `. + :param appauthor: See `appauthor `. + :param version: See `version `. + :param opinion: See `roaming `. + :returns: log directory tied to the user + """ + return PlatformDirs(appname=appname, appauthor=appauthor, version=version, opinion=opinion).user_log_dir + + +def user_documents_dir() -> str: + """ + :returns: documents directory tied to the user + """ + return PlatformDirs().user_documents_dir + + +def user_runtime_dir( + appname: str | None = None, + appauthor: str | None | Literal[False] = None, + version: str | None = None, + opinion: bool = True, +) -> str: + """ + :param appname: See `appname `. + :param appauthor: See `appauthor `. + :param version: See `version `. + :param opinion: See `opinion `. + :returns: runtime directory tied to the user + """ + return PlatformDirs(appname=appname, appauthor=appauthor, version=version, opinion=opinion).user_runtime_dir + + +def user_data_path( + appname: str | None = None, + appauthor: str | None | Literal[False] = None, + version: str | None = None, + roaming: bool = False, +) -> Path: + """ + :param appname: See `appname `. + :param appauthor: See `appauthor `. + :param version: See `version `. + :param roaming: See `roaming `. + :returns: data path tied to the user + """ + return PlatformDirs(appname=appname, appauthor=appauthor, version=version, roaming=roaming).user_data_path + + +def site_data_path( + appname: str | None = None, + appauthor: str | None | Literal[False] = None, + version: str | None = None, + multipath: bool = False, +) -> Path: + """ + :param appname: See `appname `. + :param appauthor: See `appauthor `. + :param version: See `version `. + :param multipath: See `multipath `. + :returns: data path shared by users + """ + return PlatformDirs(appname=appname, appauthor=appauthor, version=version, multipath=multipath).site_data_path + + +def user_config_path( + appname: str | None = None, + appauthor: str | None | Literal[False] = None, + version: str | None = None, + roaming: bool = False, +) -> Path: + """ + :param appname: See `appname `. + :param appauthor: See `appauthor `. + :param version: See `version `. + :param roaming: See `roaming `. + :returns: config path tied to the user + """ + return PlatformDirs(appname=appname, appauthor=appauthor, version=version, roaming=roaming).user_config_path + + +def site_config_path( + appname: str | None = None, + appauthor: str | None | Literal[False] = None, + version: str | None = None, + multipath: bool = False, +) -> Path: + """ + :param appname: See `appname `. + :param appauthor: See `appauthor `. + :param version: See `version `. + :param multipath: See `roaming `. + :returns: config path shared by the users + """ + return PlatformDirs(appname=appname, appauthor=appauthor, version=version, multipath=multipath).site_config_path + + +def user_cache_path( + appname: str | None = None, + appauthor: str | None | Literal[False] = None, + version: str | None = None, + opinion: bool = True, +) -> Path: + """ + :param appname: See `appname `. + :param appauthor: See `appauthor `. + :param version: See `version `. + :param opinion: See `roaming `. + :returns: cache path tied to the user + """ + return PlatformDirs(appname=appname, appauthor=appauthor, version=version, opinion=opinion).user_cache_path + + +def user_state_path( + appname: str | None = None, + appauthor: str | None | Literal[False] = None, + version: str | None = None, + roaming: bool = False, +) -> Path: + """ + :param appname: See `appname `. + :param appauthor: See `appauthor `. + :param version: See `version `. + :param roaming: See `roaming `. + :returns: state path tied to the user + """ + return PlatformDirs(appname=appname, appauthor=appauthor, version=version, roaming=roaming).user_state_path + + +def user_log_path( + appname: str | None = None, + appauthor: str | None | Literal[False] = None, + version: str | None = None, + opinion: bool = True, +) -> Path: + """ + :param appname: See `appname `. + :param appauthor: See `appauthor `. + :param version: See `version `. + :param opinion: See `roaming `. + :returns: log path tied to the user + """ + return PlatformDirs(appname=appname, appauthor=appauthor, version=version, opinion=opinion).user_log_path + + +def user_documents_path() -> Path: + """ + :returns: documents path tied to the user + """ + return PlatformDirs().user_documents_path + + +def user_runtime_path( + appname: str | None = None, + appauthor: str | None | Literal[False] = None, + version: str | None = None, + opinion: bool = True, +) -> Path: + """ + :param appname: See `appname `. + :param appauthor: See `appauthor `. + :param version: See `version `. + :param opinion: See `opinion `. + :returns: runtime path tied to the user + """ + return PlatformDirs(appname=appname, appauthor=appauthor, version=version, opinion=opinion).user_runtime_path + + +__all__ = [ + "__version__", + "__version_info__", + "PlatformDirs", + "AppDirs", + "PlatformDirsABC", + "user_data_dir", + "user_config_dir", + "user_cache_dir", + "user_state_dir", + "user_log_dir", + "user_documents_dir", + "user_runtime_dir", + "site_data_dir", + "site_config_dir", + "user_data_path", + "user_config_path", + "user_cache_path", + "user_state_path", + "user_log_path", + "user_documents_path", + "user_runtime_path", + "site_data_path", + "site_config_path", +] diff --git a/pipenv/patched/notpip/_vendor/platformdirs/__main__.py b/pipenv/patched/notpip/_vendor/platformdirs/__main__.py new file mode 100644 index 0000000000..b264b14b40 --- /dev/null +++ b/pipenv/patched/notpip/_vendor/platformdirs/__main__.py @@ -0,0 +1,46 @@ +from __future__ import annotations + +from pipenv.patched.notpip._vendor.platformdirs import PlatformDirs, __version__ + +PROPS = ( + "user_data_dir", + "user_config_dir", + "user_cache_dir", + "user_state_dir", + "user_log_dir", + "user_documents_dir", + "user_runtime_dir", + "site_data_dir", + "site_config_dir", +) + + +def main() -> None: + app_name = "MyApp" + app_author = "MyCompany" + + print(f"-- platformdirs {__version__} --") + + print("-- app dirs (with optional 'version')") + dirs = PlatformDirs(app_name, app_author, version="1.0") + for prop in PROPS: + print(f"{prop}: {getattr(dirs, prop)}") + + print("\n-- app dirs (without optional 'version')") + dirs = PlatformDirs(app_name, app_author) + for prop in PROPS: + print(f"{prop}: {getattr(dirs, prop)}") + + print("\n-- app dirs (without optional 'appauthor')") + dirs = PlatformDirs(app_name) + for prop in PROPS: + print(f"{prop}: {getattr(dirs, prop)}") + + print("\n-- app dirs (with disabled 'appauthor')") + dirs = PlatformDirs(app_name, appauthor=False) + for prop in PROPS: + print(f"{prop}: {getattr(dirs, prop)}") + + +if __name__ == "__main__": + main() diff --git a/pipenv/patched/notpip/_vendor/platformdirs/android.py b/pipenv/patched/notpip/_vendor/platformdirs/android.py new file mode 100644 index 0000000000..a68405871f --- /dev/null +++ b/pipenv/patched/notpip/_vendor/platformdirs/android.py @@ -0,0 +1,119 @@ +from __future__ import annotations + +import os +import re +import sys +from functools import lru_cache + +from .api import PlatformDirsABC + + +class Android(PlatformDirsABC): + """ + Follows the guidance `from here `_. Makes use of the + `appname ` and + `version `. + """ + + @property + def user_data_dir(self) -> str: + """:return: data directory tied to the user, e.g. ``/data/user///files/``""" + return self._append_app_name_and_version(_android_folder(), "files") + + @property + def site_data_dir(self) -> str: + """:return: data directory shared by users, same as `user_data_dir`""" + return self.user_data_dir + + @property + def user_config_dir(self) -> str: + """ + :return: config directory tied to the user, e.g. ``/data/user///shared_prefs/`` + """ + return self._append_app_name_and_version(_android_folder(), "shared_prefs") + + @property + def site_config_dir(self) -> str: + """:return: config directory shared by the users, same as `user_config_dir`""" + return self.user_config_dir + + @property + def user_cache_dir(self) -> str: + """:return: cache directory tied to the user, e.g. e.g. ``/data/user///cache/``""" + return self._append_app_name_and_version(_android_folder(), "cache") + + @property + def user_state_dir(self) -> str: + """:return: state directory tied to the user, same as `user_data_dir`""" + return self.user_data_dir + + @property + def user_log_dir(self) -> str: + """ + :return: log directory tied to the user, same as `user_cache_dir` if not opinionated else ``log`` in it, + e.g. ``/data/user///cache//log`` + """ + path = self.user_cache_dir + if self.opinion: + path = os.path.join(path, "log") + return path + + @property + def user_documents_dir(self) -> str: + """ + :return: documents directory tied to the user e.g. ``/storage/emulated/0/Documents`` + """ + return _android_documents_folder() + + @property + def user_runtime_dir(self) -> str: + """ + :return: runtime directory tied to the user, same as `user_cache_dir` if not opinionated else ``tmp`` in it, + e.g. ``/data/user///cache//tmp`` + """ + path = self.user_cache_dir + if self.opinion: + path = os.path.join(path, "tmp") + return path + + +@lru_cache(maxsize=1) +def _android_folder() -> str: + """:return: base folder for the Android OS""" + try: + # First try to get path to android app via pyjnius + from jnius import autoclass + + Context = autoclass("android.content.Context") # noqa: N806 + result: str = Context.getFilesDir().getParentFile().getAbsolutePath() + except Exception: + # if fails find an android folder looking path on the sys.path + pattern = re.compile(r"/data/(data|user/\d+)/(.+)/files") + for path in sys.path: + if pattern.match(path): + result = path.split("/files")[0] + break + else: + raise OSError("Cannot find path to android app folder") + return result + + +@lru_cache(maxsize=1) +def _android_documents_folder() -> str: + """:return: documents folder for the Android OS""" + # Get directories with pyjnius + try: + from jnius import autoclass + + Context = autoclass("android.content.Context") # noqa: N806 + Environment = autoclass("android.os.Environment") # noqa: N806 + documents_dir: str = Context.getExternalFilesDir(Environment.DIRECTORY_DOCUMENTS).getAbsolutePath() + except Exception: + documents_dir = "/storage/emulated/0/Documents" + + return documents_dir + + +__all__ = [ + "Android", +] diff --git a/pipenv/patched/notpip/_vendor/platformdirs/api.py b/pipenv/patched/notpip/_vendor/platformdirs/api.py new file mode 100644 index 0000000000..6f6e2c2c69 --- /dev/null +++ b/pipenv/patched/notpip/_vendor/platformdirs/api.py @@ -0,0 +1,156 @@ +from __future__ import annotations + +import os +import sys +from abc import ABC, abstractmethod +from pathlib import Path + +if sys.version_info >= (3, 8): # pragma: no branch + from typing import Literal # pragma: no cover + + +class PlatformDirsABC(ABC): + """ + Abstract base class for platform directories. + """ + + def __init__( + self, + appname: str | None = None, + appauthor: str | None | Literal[False] = None, + version: str | None = None, + roaming: bool = False, + multipath: bool = False, + opinion: bool = True, + ): + """ + Create a new platform directory. + + :param appname: See `appname`. + :param appauthor: See `appauthor`. + :param version: See `version`. + :param roaming: See `roaming`. + :param multipath: See `multipath`. + :param opinion: See `opinion`. + """ + self.appname = appname #: The name of application. + self.appauthor = appauthor + """ + The name of the app author or distributing body for this application. Typically, it is the owning company name. + Defaults to `appname`. You may pass ``False`` to disable it. + """ + self.version = version + """ + An optional version path element to append to the path. You might want to use this if you want multiple versions + of your app to be able to run independently. If used, this would typically be ``.``. + """ + self.roaming = roaming + """ + Whether to use the roaming appdata directory on Windows. That means that for users on a Windows network setup + for roaming profiles, this user data will be synced on login (see + `here `_). + """ + self.multipath = multipath + """ + An optional parameter only applicable to Unix/Linux which indicates that the entire list of data dirs should be + returned. By default, the first item would only be returned. + """ + self.opinion = opinion #: A flag to indicating to use opinionated values. + + def _append_app_name_and_version(self, *base: str) -> str: + params = list(base[1:]) + if self.appname: + params.append(self.appname) + if self.version: + params.append(self.version) + return os.path.join(base[0], *params) + + @property + @abstractmethod + def user_data_dir(self) -> str: + """:return: data directory tied to the user""" + + @property + @abstractmethod + def site_data_dir(self) -> str: + """:return: data directory shared by users""" + + @property + @abstractmethod + def user_config_dir(self) -> str: + """:return: config directory tied to the user""" + + @property + @abstractmethod + def site_config_dir(self) -> str: + """:return: config directory shared by the users""" + + @property + @abstractmethod + def user_cache_dir(self) -> str: + """:return: cache directory tied to the user""" + + @property + @abstractmethod + def user_state_dir(self) -> str: + """:return: state directory tied to the user""" + + @property + @abstractmethod + def user_log_dir(self) -> str: + """:return: log directory tied to the user""" + + @property + @abstractmethod + def user_documents_dir(self) -> str: + """:return: documents directory tied to the user""" + + @property + @abstractmethod + def user_runtime_dir(self) -> str: + """:return: runtime directory tied to the user""" + + @property + def user_data_path(self) -> Path: + """:return: data path tied to the user""" + return Path(self.user_data_dir) + + @property + def site_data_path(self) -> Path: + """:return: data path shared by users""" + return Path(self.site_data_dir) + + @property + def user_config_path(self) -> Path: + """:return: config path tied to the user""" + return Path(self.user_config_dir) + + @property + def site_config_path(self) -> Path: + """:return: config path shared by the users""" + return Path(self.site_config_dir) + + @property + def user_cache_path(self) -> Path: + """:return: cache path tied to the user""" + return Path(self.user_cache_dir) + + @property + def user_state_path(self) -> Path: + """:return: state path tied to the user""" + return Path(self.user_state_dir) + + @property + def user_log_path(self) -> Path: + """:return: log path tied to the user""" + return Path(self.user_log_dir) + + @property + def user_documents_path(self) -> Path: + """:return: documents path tied to the user""" + return Path(self.user_documents_dir) + + @property + def user_runtime_path(self) -> Path: + """:return: runtime path tied to the user""" + return Path(self.user_runtime_dir) diff --git a/pipenv/patched/notpip/_vendor/platformdirs/macos.py b/pipenv/patched/notpip/_vendor/platformdirs/macos.py new file mode 100644 index 0000000000..a01337c776 --- /dev/null +++ b/pipenv/patched/notpip/_vendor/platformdirs/macos.py @@ -0,0 +1,64 @@ +from __future__ import annotations + +import os + +from .api import PlatformDirsABC + + +class MacOS(PlatformDirsABC): + """ + Platform directories for the macOS operating system. Follows the guidance from `Apple documentation + `_. + Makes use of the `appname ` and + `version `. + """ + + @property + def user_data_dir(self) -> str: + """:return: data directory tied to the user, e.g. ``~/Library/Application Support/$appname/$version``""" + return self._append_app_name_and_version(os.path.expanduser("~/Library/Application Support/")) + + @property + def site_data_dir(self) -> str: + """:return: data directory shared by users, e.g. ``/Library/Application Support/$appname/$version``""" + return self._append_app_name_and_version("/Library/Application Support") + + @property + def user_config_dir(self) -> str: + """:return: config directory tied to the user, e.g. ``~/Library/Preferences/$appname/$version``""" + return self._append_app_name_and_version(os.path.expanduser("~/Library/Preferences/")) + + @property + def site_config_dir(self) -> str: + """:return: config directory shared by the users, e.g. ``/Library/Preferences/$appname``""" + return self._append_app_name_and_version("/Library/Preferences") + + @property + def user_cache_dir(self) -> str: + """:return: cache directory tied to the user, e.g. ``~/Library/Caches/$appname/$version``""" + return self._append_app_name_and_version(os.path.expanduser("~/Library/Caches")) + + @property + def user_state_dir(self) -> str: + """:return: state directory tied to the user, same as `user_data_dir`""" + return self.user_data_dir + + @property + def user_log_dir(self) -> str: + """:return: log directory tied to the user, e.g. ``~/Library/Logs/$appname/$version``""" + return self._append_app_name_and_version(os.path.expanduser("~/Library/Logs")) + + @property + def user_documents_dir(self) -> str: + """:return: documents directory tied to the user, e.g. ``~/Documents``""" + return os.path.expanduser("~/Documents") + + @property + def user_runtime_dir(self) -> str: + """:return: runtime directory tied to the user, e.g. ``~/Library/Caches/TemporaryItems/$appname/$version``""" + return self._append_app_name_and_version(os.path.expanduser("~/Library/Caches/TemporaryItems")) + + +__all__ = [ + "MacOS", +] diff --git a/pipenv/patched/notpip/_vendor/platformdirs/unix.py b/pipenv/patched/notpip/_vendor/platformdirs/unix.py new file mode 100644 index 0000000000..2fbd4d4f36 --- /dev/null +++ b/pipenv/patched/notpip/_vendor/platformdirs/unix.py @@ -0,0 +1,181 @@ +from __future__ import annotations + +import os +import sys +from configparser import ConfigParser +from pathlib import Path + +from .api import PlatformDirsABC + +if sys.platform.startswith("linux"): # pragma: no branch # no op check, only to please the type checker + from os import getuid +else: + + def getuid() -> int: + raise RuntimeError("should only be used on Linux") + + +class Unix(PlatformDirsABC): + """ + On Unix/Linux, we follow the + `XDG Basedir Spec `_. The spec allows + overriding directories with environment variables. The examples show are the default values, alongside the name of + the environment variable that overrides them. Makes use of the + `appname `, + `version `, + `multipath `, + `opinion `. + """ + + @property + def user_data_dir(self) -> str: + """ + :return: data directory tied to the user, e.g. ``~/.local/share/$appname/$version`` or + ``$XDG_DATA_HOME/$appname/$version`` + """ + path = os.environ.get("XDG_DATA_HOME", "") + if not path.strip(): + path = os.path.expanduser("~/.local/share") + return self._append_app_name_and_version(path) + + @property + def site_data_dir(self) -> str: + """ + :return: data directories shared by users (if `multipath ` is + enabled and ``XDG_DATA_DIR`` is set and a multi path the response is also a multi path separated by the OS + path separator), e.g. ``/usr/local/share/$appname/$version`` or ``/usr/share/$appname/$version`` + """ + # XDG default for $XDG_DATA_DIRS; only first, if multipath is False + path = os.environ.get("XDG_DATA_DIRS", "") + if not path.strip(): + path = f"/usr/local/share{os.pathsep}/usr/share" + return self._with_multi_path(path) + + def _with_multi_path(self, path: str) -> str: + path_list = path.split(os.pathsep) + if not self.multipath: + path_list = path_list[0:1] + path_list = [self._append_app_name_and_version(os.path.expanduser(p)) for p in path_list] + return os.pathsep.join(path_list) + + @property + def user_config_dir(self) -> str: + """ + :return: config directory tied to the user, e.g. ``~/.config/$appname/$version`` or + ``$XDG_CONFIG_HOME/$appname/$version`` + """ + path = os.environ.get("XDG_CONFIG_HOME", "") + if not path.strip(): + path = os.path.expanduser("~/.config") + return self._append_app_name_and_version(path) + + @property + def site_config_dir(self) -> str: + """ + :return: config directories shared by users (if `multipath ` + is enabled and ``XDG_DATA_DIR`` is set and a multi path the response is also a multi path separated by the OS + path separator), e.g. ``/etc/xdg/$appname/$version`` + """ + # XDG default for $XDG_CONFIG_DIRS only first, if multipath is False + path = os.environ.get("XDG_CONFIG_DIRS", "") + if not path.strip(): + path = "/etc/xdg" + return self._with_multi_path(path) + + @property + def user_cache_dir(self) -> str: + """ + :return: cache directory tied to the user, e.g. ``~/.cache/$appname/$version`` or + ``~/$XDG_CACHE_HOME/$appname/$version`` + """ + path = os.environ.get("XDG_CACHE_HOME", "") + if not path.strip(): + path = os.path.expanduser("~/.cache") + return self._append_app_name_and_version(path) + + @property + def user_state_dir(self) -> str: + """ + :return: state directory tied to the user, e.g. ``~/.local/state/$appname/$version`` or + ``$XDG_STATE_HOME/$appname/$version`` + """ + path = os.environ.get("XDG_STATE_HOME", "") + if not path.strip(): + path = os.path.expanduser("~/.local/state") + return self._append_app_name_and_version(path) + + @property + def user_log_dir(self) -> str: + """ + :return: log directory tied to the user, same as `user_data_dir` if not opinionated else ``log`` in it + """ + path = self.user_cache_dir + if self.opinion: + path = os.path.join(path, "log") + return path + + @property + def user_documents_dir(self) -> str: + """ + :return: documents directory tied to the user, e.g. ``~/Documents`` + """ + documents_dir = _get_user_dirs_folder("XDG_DOCUMENTS_DIR") + if documents_dir is None: + documents_dir = os.environ.get("XDG_DOCUMENTS_DIR", "").strip() + if not documents_dir: + documents_dir = os.path.expanduser("~/Documents") + + return documents_dir + + @property + def user_runtime_dir(self) -> str: + """ + :return: runtime directory tied to the user, e.g. ``/run/user/$(id -u)/$appname/$version`` or + ``$XDG_RUNTIME_DIR/$appname/$version`` + """ + path = os.environ.get("XDG_RUNTIME_DIR", "") + if not path.strip(): + path = f"/run/user/{getuid()}" + return self._append_app_name_and_version(path) + + @property + def site_data_path(self) -> Path: + """:return: data path shared by users. Only return first item, even if ``multipath`` is set to ``True``""" + return self._first_item_as_path_if_multipath(self.site_data_dir) + + @property + def site_config_path(self) -> Path: + """:return: config path shared by the users. Only return first item, even if ``multipath`` is set to ``True``""" + return self._first_item_as_path_if_multipath(self.site_config_dir) + + def _first_item_as_path_if_multipath(self, directory: str) -> Path: + if self.multipath: + # If multipath is True, the first path is returned. + directory = directory.split(os.pathsep)[0] + return Path(directory) + + +def _get_user_dirs_folder(key: str) -> str | None: + """Return directory from user-dirs.dirs config file. See https://freedesktop.org/wiki/Software/xdg-user-dirs/""" + user_dirs_config_path = os.path.join(Unix().user_config_dir, "user-dirs.dirs") + if os.path.exists(user_dirs_config_path): + parser = ConfigParser() + + with open(user_dirs_config_path) as stream: + # Add fake section header, so ConfigParser doesn't complain + parser.read_string(f"[top]\n{stream.read()}") + + if key not in parser["top"]: + return None + + path = parser["top"][key].strip('"') + # Handle relative home paths + path = path.replace("$HOME", os.path.expanduser("~")) + return path + + return None + + +__all__ = [ + "Unix", +] diff --git a/pipenv/patched/notpip/_vendor/platformdirs/version.py b/pipenv/patched/notpip/_vendor/platformdirs/version.py new file mode 100644 index 0000000000..175ded8561 --- /dev/null +++ b/pipenv/patched/notpip/_vendor/platformdirs/version.py @@ -0,0 +1,4 @@ +""" Version information """ + +__version__ = "2.4.1" +__version_info__ = (2, 4, 1) diff --git a/pipenv/patched/notpip/_vendor/platformdirs/windows.py b/pipenv/patched/notpip/_vendor/platformdirs/windows.py new file mode 100644 index 0000000000..ef972bdf29 --- /dev/null +++ b/pipenv/patched/notpip/_vendor/platformdirs/windows.py @@ -0,0 +1,182 @@ +from __future__ import annotations + +import ctypes +import os +from functools import lru_cache +from typing import Callable + +from .api import PlatformDirsABC + + +class Windows(PlatformDirsABC): + """`MSDN on where to store app data files + `_. + Makes use of the + `appname `, + `appauthor `, + `version `, + `roaming `, + `opinion `.""" + + @property + def user_data_dir(self) -> str: + """ + :return: data directory tied to the user, e.g. + ``%USERPROFILE%\\AppData\\Local\\$appauthor\\$appname`` (not roaming) or + ``%USERPROFILE%\\AppData\\Roaming\\$appauthor\\$appname`` (roaming) + """ + const = "CSIDL_APPDATA" if self.roaming else "CSIDL_LOCAL_APPDATA" + path = os.path.normpath(get_win_folder(const)) + return self._append_parts(path) + + def _append_parts(self, path: str, *, opinion_value: str | None = None) -> str: + params = [] + if self.appname: + if self.appauthor is not False: + author = self.appauthor or self.appname + params.append(author) + params.append(self.appname) + if opinion_value is not None and self.opinion: + params.append(opinion_value) + if self.version: + params.append(self.version) + return os.path.join(path, *params) + + @property + def site_data_dir(self) -> str: + """:return: data directory shared by users, e.g. ``C:\\ProgramData\\$appauthor\\$appname``""" + path = os.path.normpath(get_win_folder("CSIDL_COMMON_APPDATA")) + return self._append_parts(path) + + @property + def user_config_dir(self) -> str: + """:return: config directory tied to the user, same as `user_data_dir`""" + return self.user_data_dir + + @property + def site_config_dir(self) -> str: + """:return: config directory shared by the users, same as `site_data_dir`""" + return self.site_data_dir + + @property + def user_cache_dir(self) -> str: + """ + :return: cache directory tied to the user (if opinionated with ``Cache`` folder within ``$appname``) e.g. + ``%USERPROFILE%\\AppData\\Local\\$appauthor\\$appname\\Cache\\$version`` + """ + path = os.path.normpath(get_win_folder("CSIDL_LOCAL_APPDATA")) + return self._append_parts(path, opinion_value="Cache") + + @property + def user_state_dir(self) -> str: + """:return: state directory tied to the user, same as `user_data_dir`""" + return self.user_data_dir + + @property + def user_log_dir(self) -> str: + """ + :return: log directory tied to the user, same as `user_data_dir` if not opinionated else ``Logs`` in it + """ + path = self.user_data_dir + if self.opinion: + path = os.path.join(path, "Logs") + return path + + @property + def user_documents_dir(self) -> str: + """ + :return: documents directory tied to the user e.g. ``%USERPROFILE%\\Documents`` + """ + return os.path.normpath(get_win_folder("CSIDL_PERSONAL")) + + @property + def user_runtime_dir(self) -> str: + """ + :return: runtime directory tied to the user, e.g. + ``%USERPROFILE%\\AppData\\Local\\Temp\\$appauthor\\$appname`` + """ + path = os.path.normpath(os.path.join(get_win_folder("CSIDL_LOCAL_APPDATA"), "Temp")) + return self._append_parts(path) + + +def get_win_folder_from_env_vars(csidl_name: str) -> str: + """Get folder from environment variables.""" + if csidl_name == "CSIDL_PERSONAL": # does not have an environment name + return os.path.join(os.path.normpath(os.environ["USERPROFILE"]), "Documents") + + env_var_name = { + "CSIDL_APPDATA": "APPDATA", + "CSIDL_COMMON_APPDATA": "ALLUSERSPROFILE", + "CSIDL_LOCAL_APPDATA": "LOCALAPPDATA", + }.get(csidl_name) + if env_var_name is None: + raise ValueError(f"Unknown CSIDL name: {csidl_name}") + result = os.environ.get(env_var_name) + if result is None: + raise ValueError(f"Unset environment variable: {env_var_name}") + return result + + +def get_win_folder_from_registry(csidl_name: str) -> str: + """Get folder from the registry. + + This is a fallback technique at best. I'm not sure if using the + registry for this guarantees us the correct answer for all CSIDL_* + names. + """ + shell_folder_name = { + "CSIDL_APPDATA": "AppData", + "CSIDL_COMMON_APPDATA": "Common AppData", + "CSIDL_LOCAL_APPDATA": "Local AppData", + "CSIDL_PERSONAL": "Personal", + }.get(csidl_name) + if shell_folder_name is None: + raise ValueError(f"Unknown CSIDL name: {csidl_name}") + + import winreg + + key = winreg.OpenKey(winreg.HKEY_CURRENT_USER, r"Software\Microsoft\Windows\CurrentVersion\Explorer\Shell Folders") + directory, _ = winreg.QueryValueEx(key, shell_folder_name) + return str(directory) + + +def get_win_folder_via_ctypes(csidl_name: str) -> str: + """Get folder with ctypes.""" + csidl_const = { + "CSIDL_APPDATA": 26, + "CSIDL_COMMON_APPDATA": 35, + "CSIDL_LOCAL_APPDATA": 28, + "CSIDL_PERSONAL": 5, + }.get(csidl_name) + if csidl_const is None: + raise ValueError(f"Unknown CSIDL name: {csidl_name}") + + buf = ctypes.create_unicode_buffer(1024) + windll = getattr(ctypes, "windll") # noqa: B009 # using getattr to avoid false positive with mypy type checker + windll.shell32.SHGetFolderPathW(None, csidl_const, None, 0, buf) + + # Downgrade to short path name if it has highbit chars. + if any(ord(c) > 255 for c in buf): + buf2 = ctypes.create_unicode_buffer(1024) + if windll.kernel32.GetShortPathNameW(buf.value, buf2, 1024): + buf = buf2 + + return buf.value + + +def _pick_get_win_folder() -> Callable[[str], str]: + if hasattr(ctypes, "windll"): + return get_win_folder_via_ctypes + try: + import winreg # noqa: F401 + except ImportError: + return get_win_folder_from_env_vars + else: + return get_win_folder_from_registry + + +get_win_folder = lru_cache(maxsize=None)(_pick_get_win_folder()) + +__all__ = [ + "Windows", +] diff --git a/pipenv/patched/notpip/_vendor/progress/LICENSE b/pipenv/patched/notpip/_vendor/progress/LICENSE deleted file mode 100644 index 059cc05661..0000000000 --- a/pipenv/patched/notpip/_vendor/progress/LICENSE +++ /dev/null @@ -1,13 +0,0 @@ -# Copyright (c) 2012 Giorgos Verigakis -# -# Permission to use, copy, modify, and distribute this software for any -# purpose with or without fee is hereby granted, provided that the above -# copyright notice and this permission notice appear in all copies. -# -# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES -# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF -# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR -# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES -# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN -# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF -# OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. diff --git a/pipenv/patched/notpip/_vendor/progress/__init__.py b/pipenv/patched/notpip/_vendor/progress/__init__.py index e434c257fe..b434b300ad 100644 --- a/pipenv/patched/notpip/_vendor/progress/__init__.py +++ b/pipenv/patched/notpip/_vendor/progress/__init__.py @@ -1,4 +1,4 @@ -# Copyright (c) 2012 Giorgos Verigakis +# Copyright (c) 2012 Georgios Verigakis # # Permission to use, copy, modify, and distribute this software for any # purpose with or without fee is hereby granted, provided that the above @@ -24,7 +24,7 @@ from time import time as monotonic -__version__ = '1.5' +__version__ = '1.6' HIDE_CURSOR = '\x1b[?25l' SHOW_CURSOR = '\x1b[?25h' @@ -46,14 +46,19 @@ def __init__(self, message='', **kwargs): for key, val in kwargs.items(): setattr(self, key, val) - self._width = 0 + self._max_width = 0 + self._hidden_cursor = False self.message = message if self.file and self.is_tty(): if self.hide_cursor: print(HIDE_CURSOR, end='', file=self.file) - print(self.message, end='', file=self.file) - self.file.flush() + self._hidden_cursor = True + self.writeln('') + + def __del__(self): + if self._hidden_cursor: + print(SHOW_CURSOR, end='', file=self.file) def __getitem__(self, key): if key.startswith('_'): @@ -85,31 +90,30 @@ def update(self): def start(self): pass - def clearln(self): - if self.file and self.is_tty(): - print('\r\x1b[K', end='', file=self.file) - - def write(self, s): - if self.file and self.is_tty(): - line = self.message + s.ljust(self._width) - print('\r' + line, end='', file=self.file) - self._width = max(self._width, len(s)) - self.file.flush() - def writeln(self, line): if self.file and self.is_tty(): - self.clearln() - print(line, end='', file=self.file) + width = len(line) + if width < self._max_width: + # Add padding to cover previous contents + line += ' ' * (self._max_width - width) + else: + self._max_width = width + print('\r' + line, end='', file=self.file) self.file.flush() def finish(self): if self.file and self.is_tty(): print(file=self.file) - if self.hide_cursor: + if self._hidden_cursor: print(SHOW_CURSOR, end='', file=self.file) + self._hidden_cursor = False def is_tty(self): - return self.file.isatty() if self.check_tty else True + try: + return self.file.isatty() if self.check_tty else True + except AttributeError: + msg = "%s has no attribute 'isatty'. Try setting check_tty=False." % self + raise AttributeError(msg) def next(self, n=1): now = monotonic() @@ -120,10 +124,13 @@ def next(self, n=1): self.update() def iter(self, it): + self.iter_value = None with self: for x in it: + self.iter_value = x yield x self.next() + del self.iter_value def __enter__(self): self.start() @@ -152,6 +159,8 @@ def percent(self): @property def progress(self): + if self.max == 0: + return 0 return min(1, self.index / self.max) @property @@ -171,7 +180,10 @@ def iter(self, it): except TypeError: pass + self.iter_value = None with self: for x in it: + self.iter_value = x yield x self.next() + del self.iter_value diff --git a/pipenv/patched/notpip/_vendor/progress/bar.py b/pipenv/patched/notpip/_vendor/progress/bar.py index 8819efda65..df4e8b61f8 100644 --- a/pipenv/patched/notpip/_vendor/progress/bar.py +++ b/pipenv/patched/notpip/_vendor/progress/bar.py @@ -1,6 +1,6 @@ # -*- coding: utf-8 -*- -# Copyright (c) 2012 Giorgos Verigakis +# Copyright (c) 2012 Georgios Verigakis # # Permission to use, copy, modify, and distribute this software for any # purpose with or without fee is hereby granted, provided that the above @@ -19,6 +19,7 @@ import sys from . import Progress +from .colors import color class Bar(Progress): @@ -28,13 +29,14 @@ class Bar(Progress): bar_suffix = '| ' empty_fill = ' ' fill = '#' + color = None def update(self): filled_length = int(self.width * self.progress) empty_length = self.width - filled_length message = self.message % self - bar = self.fill * filled_length + bar = color(self.fill * filled_length, fg=self.color) empty = self.empty_fill * empty_length suffix = self.suffix % self line = ''.join([message, self.bar_prefix, bar, empty, self.bar_suffix, @@ -74,7 +76,7 @@ def update(self): nempty = self.width - nfull # Number of empty chars message = self.message % self - bar = self.phases[-1] * nfull + bar = color(self.phases[-1] * nfull, fg=self.color) current = self.phases[phase] if phase > 0 else '' empty = self.empty_fill * max(0, nempty - len(current)) suffix = self.suffix % self diff --git a/pipenv/patched/notpip/_vendor/progress/colors.py b/pipenv/patched/notpip/_vendor/progress/colors.py new file mode 100644 index 0000000000..4e770f868b --- /dev/null +++ b/pipenv/patched/notpip/_vendor/progress/colors.py @@ -0,0 +1,79 @@ +# -*- coding: utf-8 -*- + +# Copyright (c) 2020 Georgios Verigakis +# +# Permission to use, copy, modify, and distribute this software for any +# purpose with or without fee is hereby granted, provided that the above +# copyright notice and this permission notice appear in all copies. +# +# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES +# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF +# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR +# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES +# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN +# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF +# OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + +from functools import partial + + +COLORS = ('black', 'red', 'green', 'yellow', 'blue', 'magenta', 'cyan', + 'white') +STYLES = ('bold', 'faint', 'italic', 'underline', 'blink', 'blink2', + 'negative', 'concealed', 'crossed') + + +def color(s, fg=None, bg=None, style=None): + sgr = [] + + if fg: + if fg in COLORS: + sgr.append(str(30 + COLORS.index(fg))) + elif isinstance(fg, int) and 0 <= fg <= 255: + sgr.append('38;5;%d' % int(fg)) + else: + raise Exception('Invalid color "%s"' % fg) + + if bg: + if bg in COLORS: + sgr.append(str(40 + COLORS.index(bg))) + elif isinstance(bg, int) and 0 <= bg <= 255: + sgr.append('48;5;%d' % bg) + else: + raise Exception('Invalid color "%s"' % bg) + + if style: + for st in style.split('+'): + if st in STYLES: + sgr.append(str(1 + STYLES.index(st))) + else: + raise Exception('Invalid style "%s"' % st) + + if sgr: + prefix = '\x1b[' + ';'.join(sgr) + 'm' + suffix = '\x1b[0m' + return prefix + s + suffix + else: + return s + + +# Foreground shortcuts +black = partial(color, fg='black') +red = partial(color, fg='red') +green = partial(color, fg='green') +yellow = partial(color, fg='yellow') +blue = partial(color, fg='blue') +magenta = partial(color, fg='magenta') +cyan = partial(color, fg='cyan') +white = partial(color, fg='white') + +# Style shortcuts +bold = partial(color, style='bold') +faint = partial(color, style='faint') +italic = partial(color, style='italic') +underline = partial(color, style='underline') +blink = partial(color, style='blink') +blink2 = partial(color, style='blink2') +negative = partial(color, style='negative') +concealed = partial(color, style='concealed') +crossed = partial(color, style='crossed') diff --git a/pipenv/patched/notpip/_vendor/progress/counter.py b/pipenv/patched/notpip/_vendor/progress/counter.py index d955ca4771..d0fbe7ef35 100644 --- a/pipenv/patched/notpip/_vendor/progress/counter.py +++ b/pipenv/patched/notpip/_vendor/progress/counter.py @@ -1,6 +1,6 @@ # -*- coding: utf-8 -*- -# Copyright (c) 2012 Giorgos Verigakis +# Copyright (c) 2012 Georgios Verigakis # # Permission to use, copy, modify, and distribute this software for any # purpose with or without fee is hereby granted, provided that the above @@ -20,12 +20,16 @@ class Counter(Infinite): def update(self): - self.write(str(self.index)) + message = self.message % self + line = ''.join([message, str(self.index)]) + self.writeln(line) class Countdown(Progress): def update(self): - self.write(str(self.remaining)) + message = self.message % self + line = ''.join([message, str(self.remaining)]) + self.writeln(line) class Stack(Progress): @@ -34,7 +38,9 @@ class Stack(Progress): def update(self): nphases = len(self.phases) i = min(nphases - 1, int(self.progress * nphases)) - self.write(self.phases[i]) + message = self.message % self + line = ''.join([message, self.phases[i]]) + self.writeln(line) class Pie(Stack): diff --git a/pipenv/patched/notpip/_vendor/progress/spinner.py b/pipenv/patched/notpip/_vendor/progress/spinner.py index 4e100cabb9..d593a203e0 100644 --- a/pipenv/patched/notpip/_vendor/progress/spinner.py +++ b/pipenv/patched/notpip/_vendor/progress/spinner.py @@ -1,6 +1,6 @@ # -*- coding: utf-8 -*- -# Copyright (c) 2012 Giorgos Verigakis +# Copyright (c) 2012 Georgios Verigakis # # Permission to use, copy, modify, and distribute this software for any # purpose with or without fee is hereby granted, provided that the above @@ -24,7 +24,9 @@ class Spinner(Infinite): def update(self): i = self.index % len(self.phases) - self.write(self.phases[i]) + message = self.message % self + line = ''.join([message, self.phases[i]]) + self.writeln(line) class PieSpinner(Spinner): diff --git a/pipenv/patched/notpip/_vendor/pygments/__init__.py b/pipenv/patched/notpip/_vendor/pygments/__init__.py new file mode 100644 index 0000000000..22c50b356a --- /dev/null +++ b/pipenv/patched/notpip/_vendor/pygments/__init__.py @@ -0,0 +1,83 @@ +""" + Pygments + ~~~~~~~~ + + Pygments is a syntax highlighting package written in Python. + + It is a generic syntax highlighter for general use in all kinds of software + such as forum systems, wikis or other applications that need to prettify + source code. Highlights are: + + * a wide range of common languages and markup formats is supported + * special attention is paid to details, increasing quality by a fair amount + * support for new languages and formats are added easily + * a number of output formats, presently HTML, LaTeX, RTF, SVG, all image + formats that PIL supports, and ANSI sequences + * it is usable as a command-line tool and as a library + * ... and it highlights even Brainfuck! + + The `Pygments master branch`_ is installable with ``easy_install Pygments==dev``. + + .. _Pygments master branch: + https://github.com/pygments/pygments/archive/master.zip#egg=Pygments-dev + + :copyright: Copyright 2006-2021 by the Pygments team, see AUTHORS. + :license: BSD, see LICENSE for details. +""" +from io import StringIO, BytesIO + +__version__ = '2.11.2' +__docformat__ = 'restructuredtext' + +__all__ = ['lex', 'format', 'highlight'] + + +def lex(code, lexer): + """ + Lex ``code`` with ``lexer`` and return an iterable of tokens. + """ + try: + return lexer.get_tokens(code) + except TypeError as err: + if (isinstance(err.args[0], str) and + ('unbound method get_tokens' in err.args[0] or + 'missing 1 required positional argument' in err.args[0])): + raise TypeError('lex() argument must be a lexer instance, ' + 'not a class') + raise + + +def format(tokens, formatter, outfile=None): # pylint: disable=redefined-builtin + """ + Format a tokenlist ``tokens`` with the formatter ``formatter``. + + If ``outfile`` is given and a valid file object (an object + with a ``write`` method), the result will be written to it, otherwise + it is returned as a string. + """ + try: + if not outfile: + realoutfile = getattr(formatter, 'encoding', None) and BytesIO() or StringIO() + formatter.format(tokens, realoutfile) + return realoutfile.getvalue() + else: + formatter.format(tokens, outfile) + except TypeError as err: + if (isinstance(err.args[0], str) and + ('unbound method format' in err.args[0] or + 'missing 1 required positional argument' in err.args[0])): + raise TypeError('format() argument must be a formatter instance, ' + 'not a class') + raise + + +def highlight(code, lexer, formatter, outfile=None): + """ + Lex ``code`` with ``lexer`` and format it with the formatter ``formatter``. + + If ``outfile`` is given and a valid file object (an object + with a ``write`` method), the result will be written to it, otherwise + it is returned as a string. + """ + return format(lex(code, lexer), formatter, outfile) + diff --git a/pipenv/patched/notpip/_vendor/pygments/__main__.py b/pipenv/patched/notpip/_vendor/pygments/__main__.py new file mode 100644 index 0000000000..f4734659a0 --- /dev/null +++ b/pipenv/patched/notpip/_vendor/pygments/__main__.py @@ -0,0 +1,17 @@ +""" + pygments.__main__ + ~~~~~~~~~~~~~~~~~ + + Main entry point for ``python -m pygments``. + + :copyright: Copyright 2006-2021 by the Pygments team, see AUTHORS. + :license: BSD, see LICENSE for details. +""" + +import sys +from pipenv.patched.notpip._vendor.pygments.cmdline import main + +try: + sys.exit(main(sys.argv)) +except KeyboardInterrupt: + sys.exit(1) diff --git a/pipenv/patched/notpip/_vendor/pygments/cmdline.py b/pipenv/patched/notpip/_vendor/pygments/cmdline.py new file mode 100644 index 0000000000..43720fc03f --- /dev/null +++ b/pipenv/patched/notpip/_vendor/pygments/cmdline.py @@ -0,0 +1,663 @@ +""" + pygments.cmdline + ~~~~~~~~~~~~~~~~ + + Command line interface. + + :copyright: Copyright 2006-2021 by the Pygments team, see AUTHORS. + :license: BSD, see LICENSE for details. +""" + +import os +import sys +import shutil +import argparse +from textwrap import dedent + +from pipenv.patched.notpip._vendor.pygments import __version__, highlight +from pipenv.patched.notpip._vendor.pygments.util import ClassNotFound, OptionError, docstring_headline, \ + guess_decode, guess_decode_from_terminal, terminal_encoding, \ + UnclosingTextIOWrapper +from pipenv.patched.notpip._vendor.pygments.lexers import get_all_lexers, get_lexer_by_name, guess_lexer, \ + load_lexer_from_file, get_lexer_for_filename, find_lexer_class_for_filename +from pipenv.patched.notpip._vendor.pygments.lexers.special import TextLexer +from pipenv.patched.notpip._vendor.pygments.formatters.latex import LatexEmbeddedLexer, LatexFormatter +from pipenv.patched.notpip._vendor.pygments.formatters import get_all_formatters, get_formatter_by_name, \ + load_formatter_from_file, get_formatter_for_filename, find_formatter_class +from pipenv.patched.notpip._vendor.pygments.formatters.terminal import TerminalFormatter +from pipenv.patched.notpip._vendor.pygments.formatters.terminal256 import Terminal256Formatter +from pipenv.patched.notpip._vendor.pygments.filters import get_all_filters, find_filter_class +from pipenv.patched.notpip._vendor.pygments.styles import get_all_styles, get_style_by_name + + +def _parse_options(o_strs): + opts = {} + if not o_strs: + return opts + for o_str in o_strs: + if not o_str.strip(): + continue + o_args = o_str.split(',') + for o_arg in o_args: + o_arg = o_arg.strip() + try: + o_key, o_val = o_arg.split('=', 1) + o_key = o_key.strip() + o_val = o_val.strip() + except ValueError: + opts[o_arg] = True + else: + opts[o_key] = o_val + return opts + + +def _parse_filters(f_strs): + filters = [] + if not f_strs: + return filters + for f_str in f_strs: + if ':' in f_str: + fname, fopts = f_str.split(':', 1) + filters.append((fname, _parse_options([fopts]))) + else: + filters.append((f_str, {})) + return filters + + +def _print_help(what, name): + try: + if what == 'lexer': + cls = get_lexer_by_name(name) + print("Help on the %s lexer:" % cls.name) + print(dedent(cls.__doc__)) + elif what == 'formatter': + cls = find_formatter_class(name) + print("Help on the %s formatter:" % cls.name) + print(dedent(cls.__doc__)) + elif what == 'filter': + cls = find_filter_class(name) + print("Help on the %s filter:" % name) + print(dedent(cls.__doc__)) + return 0 + except (AttributeError, ValueError): + print("%s not found!" % what, file=sys.stderr) + return 1 + + +def _print_list(what): + if what == 'lexer': + print() + print("Lexers:") + print("~~~~~~~") + + info = [] + for fullname, names, exts, _ in get_all_lexers(): + tup = (', '.join(names)+':', fullname, + exts and '(filenames ' + ', '.join(exts) + ')' or '') + info.append(tup) + info.sort() + for i in info: + print(('* %s\n %s %s') % i) + + elif what == 'formatter': + print() + print("Formatters:") + print("~~~~~~~~~~~") + + info = [] + for cls in get_all_formatters(): + doc = docstring_headline(cls) + tup = (', '.join(cls.aliases) + ':', doc, cls.filenames and + '(filenames ' + ', '.join(cls.filenames) + ')' or '') + info.append(tup) + info.sort() + for i in info: + print(('* %s\n %s %s') % i) + + elif what == 'filter': + print() + print("Filters:") + print("~~~~~~~~") + + for name in get_all_filters(): + cls = find_filter_class(name) + print("* " + name + ':') + print(" %s" % docstring_headline(cls)) + + elif what == 'style': + print() + print("Styles:") + print("~~~~~~~") + + for name in get_all_styles(): + cls = get_style_by_name(name) + print("* " + name + ':') + print(" %s" % docstring_headline(cls)) + + +def _print_list_as_json(requested_items): + import json + result = {} + if 'lexer' in requested_items: + info = {} + for fullname, names, filenames, mimetypes in get_all_lexers(): + info[fullname] = { + 'aliases': names, + 'filenames': filenames, + 'mimetypes': mimetypes + } + result['lexers'] = info + + if 'formatter' in requested_items: + info = {} + for cls in get_all_formatters(): + doc = docstring_headline(cls) + info[cls.name] = { + 'aliases': cls.aliases, + 'filenames': cls.filenames, + 'doc': doc + } + result['formatters'] = info + + if 'filter' in requested_items: + info = {} + for name in get_all_filters(): + cls = find_filter_class(name) + info[name] = { + 'doc': docstring_headline(cls) + } + result['filters'] = info + + if 'style' in requested_items: + info = {} + for name in get_all_styles(): + cls = get_style_by_name(name) + info[name] = { + 'doc': docstring_headline(cls) + } + result['styles'] = info + + json.dump(result, sys.stdout) + +def main_inner(parser, argns): + if argns.help: + parser.print_help() + return 0 + + if argns.V: + print('Pygments version %s, (c) 2006-2021 by Georg Brandl, Matthäus ' + 'Chajdas and contributors.' % __version__) + return 0 + + def is_only_option(opt): + return not any(v for (k, v) in vars(argns).items() if k != opt) + + # handle ``pygmentize -L`` + if argns.L is not None: + arg_set = set() + for k, v in vars(argns).items(): + if v: + arg_set.add(k) + + arg_set.discard('L') + arg_set.discard('json') + + if arg_set: + parser.print_help(sys.stderr) + return 2 + + # print version + if not argns.json: + main(['', '-V']) + allowed_types = {'lexer', 'formatter', 'filter', 'style'} + largs = [arg.rstrip('s') for arg in argns.L] + if any(arg not in allowed_types for arg in largs): + parser.print_help(sys.stderr) + return 0 + if not largs: + largs = allowed_types + if not argns.json: + for arg in largs: + _print_list(arg) + else: + _print_list_as_json(largs) + return 0 + + # handle ``pygmentize -H`` + if argns.H: + if not is_only_option('H'): + parser.print_help(sys.stderr) + return 2 + what, name = argns.H + if what not in ('lexer', 'formatter', 'filter'): + parser.print_help(sys.stderr) + return 2 + return _print_help(what, name) + + # parse -O options + parsed_opts = _parse_options(argns.O or []) + + # parse -P options + for p_opt in argns.P or []: + try: + name, value = p_opt.split('=', 1) + except ValueError: + parsed_opts[p_opt] = True + else: + parsed_opts[name] = value + + # encodings + inencoding = parsed_opts.get('inencoding', parsed_opts.get('encoding')) + outencoding = parsed_opts.get('outencoding', parsed_opts.get('encoding')) + + # handle ``pygmentize -N`` + if argns.N: + lexer = find_lexer_class_for_filename(argns.N) + if lexer is None: + lexer = TextLexer + + print(lexer.aliases[0]) + return 0 + + # handle ``pygmentize -C`` + if argns.C: + inp = sys.stdin.buffer.read() + try: + lexer = guess_lexer(inp, inencoding=inencoding) + except ClassNotFound: + lexer = TextLexer + + print(lexer.aliases[0]) + return 0 + + # handle ``pygmentize -S`` + S_opt = argns.S + a_opt = argns.a + if S_opt is not None: + f_opt = argns.f + if not f_opt: + parser.print_help(sys.stderr) + return 2 + if argns.l or argns.INPUTFILE: + parser.print_help(sys.stderr) + return 2 + + try: + parsed_opts['style'] = S_opt + fmter = get_formatter_by_name(f_opt, **parsed_opts) + except ClassNotFound as err: + print(err, file=sys.stderr) + return 1 + + print(fmter.get_style_defs(a_opt or '')) + return 0 + + # if no -S is given, -a is not allowed + if argns.a is not None: + parser.print_help(sys.stderr) + return 2 + + # parse -F options + F_opts = _parse_filters(argns.F or []) + + # -x: allow custom (eXternal) lexers and formatters + allow_custom_lexer_formatter = bool(argns.x) + + # select lexer + lexer = None + + # given by name? + lexername = argns.l + if lexername: + # custom lexer, located relative to user's cwd + if allow_custom_lexer_formatter and '.py' in lexername: + try: + filename = None + name = None + if ':' in lexername: + filename, name = lexername.rsplit(':', 1) + + if '.py' in name: + # This can happen on Windows: If the lexername is + # C:\lexer.py -- return to normal load path in that case + name = None + + if filename and name: + lexer = load_lexer_from_file(filename, name, + **parsed_opts) + else: + lexer = load_lexer_from_file(lexername, **parsed_opts) + except ClassNotFound as err: + print('Error:', err, file=sys.stderr) + return 1 + else: + try: + lexer = get_lexer_by_name(lexername, **parsed_opts) + except (OptionError, ClassNotFound) as err: + print('Error:', err, file=sys.stderr) + return 1 + + # read input code + code = None + + if argns.INPUTFILE: + if argns.s: + print('Error: -s option not usable when input file specified', + file=sys.stderr) + return 2 + + infn = argns.INPUTFILE + try: + with open(infn, 'rb') as infp: + code = infp.read() + except Exception as err: + print('Error: cannot read infile:', err, file=sys.stderr) + return 1 + if not inencoding: + code, inencoding = guess_decode(code) + + # do we have to guess the lexer? + if not lexer: + try: + lexer = get_lexer_for_filename(infn, code, **parsed_opts) + except ClassNotFound as err: + if argns.g: + try: + lexer = guess_lexer(code, **parsed_opts) + except ClassNotFound: + lexer = TextLexer(**parsed_opts) + else: + print('Error:', err, file=sys.stderr) + return 1 + except OptionError as err: + print('Error:', err, file=sys.stderr) + return 1 + + elif not argns.s: # treat stdin as full file (-s support is later) + # read code from terminal, always in binary mode since we want to + # decode ourselves and be tolerant with it + code = sys.stdin.buffer.read() # use .buffer to get a binary stream + if not inencoding: + code, inencoding = guess_decode_from_terminal(code, sys.stdin) + # else the lexer will do the decoding + if not lexer: + try: + lexer = guess_lexer(code, **parsed_opts) + except ClassNotFound: + lexer = TextLexer(**parsed_opts) + + else: # -s option needs a lexer with -l + if not lexer: + print('Error: when using -s a lexer has to be selected with -l', + file=sys.stderr) + return 2 + + # process filters + for fname, fopts in F_opts: + try: + lexer.add_filter(fname, **fopts) + except ClassNotFound as err: + print('Error:', err, file=sys.stderr) + return 1 + + # select formatter + outfn = argns.o + fmter = argns.f + if fmter: + # custom formatter, located relative to user's cwd + if allow_custom_lexer_formatter and '.py' in fmter: + try: + filename = None + name = None + if ':' in fmter: + # Same logic as above for custom lexer + filename, name = fmter.rsplit(':', 1) + + if '.py' in name: + name = None + + if filename and name: + fmter = load_formatter_from_file(filename, name, + **parsed_opts) + else: + fmter = load_formatter_from_file(fmter, **parsed_opts) + except ClassNotFound as err: + print('Error:', err, file=sys.stderr) + return 1 + else: + try: + fmter = get_formatter_by_name(fmter, **parsed_opts) + except (OptionError, ClassNotFound) as err: + print('Error:', err, file=sys.stderr) + return 1 + + if outfn: + if not fmter: + try: + fmter = get_formatter_for_filename(outfn, **parsed_opts) + except (OptionError, ClassNotFound) as err: + print('Error:', err, file=sys.stderr) + return 1 + try: + outfile = open(outfn, 'wb') + except Exception as err: + print('Error: cannot open outfile:', err, file=sys.stderr) + return 1 + else: + if not fmter: + if '256' in os.environ.get('TERM', ''): + fmter = Terminal256Formatter(**parsed_opts) + else: + fmter = TerminalFormatter(**parsed_opts) + outfile = sys.stdout.buffer + + # determine output encoding if not explicitly selected + if not outencoding: + if outfn: + # output file? use lexer encoding for now (can still be None) + fmter.encoding = inencoding + else: + # else use terminal encoding + fmter.encoding = terminal_encoding(sys.stdout) + + # provide coloring under Windows, if possible + if not outfn and sys.platform in ('win32', 'cygwin') and \ + fmter.name in ('Terminal', 'Terminal256'): # pragma: no cover + # unfortunately colorama doesn't support binary streams on Py3 + outfile = UnclosingTextIOWrapper(outfile, encoding=fmter.encoding) + fmter.encoding = None + try: + import pipenv.patched.notpip._vendor.colorama.initialise as colorama_initialise + except ImportError: + pass + else: + outfile = colorama_initialise.wrap_stream( + outfile, convert=None, strip=None, autoreset=False, wrap=True) + + # When using the LaTeX formatter and the option `escapeinside` is + # specified, we need a special lexer which collects escaped text + # before running the chosen language lexer. + escapeinside = parsed_opts.get('escapeinside', '') + if len(escapeinside) == 2 and isinstance(fmter, LatexFormatter): + left = escapeinside[0] + right = escapeinside[1] + lexer = LatexEmbeddedLexer(left, right, lexer) + + # ... and do it! + if not argns.s: + # process whole input as per normal... + try: + highlight(code, lexer, fmter, outfile) + finally: + if outfn: + outfile.close() + return 0 + else: + # line by line processing of stdin (eg: for 'tail -f')... + try: + while 1: + line = sys.stdin.buffer.readline() + if not line: + break + if not inencoding: + line = guess_decode_from_terminal(line, sys.stdin)[0] + highlight(line, lexer, fmter, outfile) + if hasattr(outfile, 'flush'): + outfile.flush() + return 0 + except KeyboardInterrupt: # pragma: no cover + return 0 + finally: + if outfn: + outfile.close() + + +class HelpFormatter(argparse.HelpFormatter): + def __init__(self, prog, indent_increment=2, max_help_position=16, width=None): + if width is None: + try: + width = shutil.get_terminal_size().columns - 2 + except Exception: + pass + argparse.HelpFormatter.__init__(self, prog, indent_increment, + max_help_position, width) + + +def main(args=sys.argv): + """ + Main command line entry point. + """ + desc = "Highlight an input file and write the result to an output file." + parser = argparse.ArgumentParser(description=desc, add_help=False, + formatter_class=HelpFormatter) + + operation = parser.add_argument_group('Main operation') + lexersel = operation.add_mutually_exclusive_group() + lexersel.add_argument( + '-l', metavar='LEXER', + help='Specify the lexer to use. (Query names with -L.) If not ' + 'given and -g is not present, the lexer is guessed from the filename.') + lexersel.add_argument( + '-g', action='store_true', + help='Guess the lexer from the file contents, or pass through ' + 'as plain text if nothing can be guessed.') + operation.add_argument( + '-F', metavar='FILTER[:options]', action='append', + help='Add a filter to the token stream. (Query names with -L.) ' + 'Filter options are given after a colon if necessary.') + operation.add_argument( + '-f', metavar='FORMATTER', + help='Specify the formatter to use. (Query names with -L.) ' + 'If not given, the formatter is guessed from the output filename, ' + 'and defaults to the terminal formatter if the output is to the ' + 'terminal or an unknown file extension.') + operation.add_argument( + '-O', metavar='OPTION=value[,OPTION=value,...]', action='append', + help='Give options to the lexer and formatter as a comma-separated ' + 'list of key-value pairs. ' + 'Example: `-O bg=light,python=cool`.') + operation.add_argument( + '-P', metavar='OPTION=value', action='append', + help='Give a single option to the lexer and formatter - with this ' + 'you can pass options whose value contains commas and equal signs. ' + 'Example: `-P "heading=Pygments, the Python highlighter"`.') + operation.add_argument( + '-o', metavar='OUTPUTFILE', + help='Where to write the output. Defaults to standard output.') + + operation.add_argument( + 'INPUTFILE', nargs='?', + help='Where to read the input. Defaults to standard input.') + + flags = parser.add_argument_group('Operation flags') + flags.add_argument( + '-v', action='store_true', + help='Print a detailed traceback on unhandled exceptions, which ' + 'is useful for debugging and bug reports.') + flags.add_argument( + '-s', action='store_true', + help='Process lines one at a time until EOF, rather than waiting to ' + 'process the entire file. This only works for stdin, only for lexers ' + 'with no line-spanning constructs, and is intended for streaming ' + 'input such as you get from `tail -f`. ' + 'Example usage: `tail -f sql.log | pygmentize -s -l sql`.') + flags.add_argument( + '-x', action='store_true', + help='Allow custom lexers and formatters to be loaded from a .py file ' + 'relative to the current working directory. For example, ' + '`-l ./customlexer.py -x`. By default, this option expects a file ' + 'with a class named CustomLexer or CustomFormatter; you can also ' + 'specify your own class name with a colon (`-l ./lexer.py:MyLexer`). ' + 'Users should be very careful not to use this option with untrusted ' + 'files, because it will import and run them.') + flags.add_argument('--json', help='Output as JSON. This can ' + 'be only used in conjunction with -L.', + default=False, + action='store_true') + + special_modes_group = parser.add_argument_group( + 'Special modes - do not do any highlighting') + special_modes = special_modes_group.add_mutually_exclusive_group() + special_modes.add_argument( + '-S', metavar='STYLE -f formatter', + help='Print style definitions for STYLE for a formatter ' + 'given with -f. The argument given by -a is formatter ' + 'dependent.') + special_modes.add_argument( + '-L', nargs='*', metavar='WHAT', + help='List lexers, formatters, styles or filters -- ' + 'give additional arguments for the thing(s) you want to list ' + '(e.g. "styles"), or omit them to list everything.') + special_modes.add_argument( + '-N', metavar='FILENAME', + help='Guess and print out a lexer name based solely on the given ' + 'filename. Does not take input or highlight anything. If no specific ' + 'lexer can be determined, "text" is printed.') + special_modes.add_argument( + '-C', action='store_true', + help='Like -N, but print out a lexer name based solely on ' + 'a given content from standard input.') + special_modes.add_argument( + '-H', action='store', nargs=2, metavar=('NAME', 'TYPE'), + help='Print detailed help for the object of type , ' + 'where is one of "lexer", "formatter" or "filter".') + special_modes.add_argument( + '-V', action='store_true', + help='Print the package version.') + special_modes.add_argument( + '-h', '--help', action='store_true', + help='Print this help.') + special_modes_group.add_argument( + '-a', metavar='ARG', + help='Formatter-specific additional argument for the -S (print ' + 'style sheet) mode.') + + argns = parser.parse_args(args[1:]) + + try: + return main_inner(parser, argns) + except Exception: + if argns.v: + print(file=sys.stderr) + print('*' * 65, file=sys.stderr) + print('An unhandled exception occurred while highlighting.', + file=sys.stderr) + print('Please report the whole traceback to the issue tracker at', + file=sys.stderr) + print('.', + file=sys.stderr) + print('*' * 65, file=sys.stderr) + print(file=sys.stderr) + raise + import traceback + info = traceback.format_exception(*sys.exc_info()) + msg = info[-1].strip() + if len(info) >= 3: + # extract relevant file and position info + msg += '\n (f%s)' % info[-2].split('\n')[0].strip()[1:] + print(file=sys.stderr) + print('*** Error while highlighting:', file=sys.stderr) + print(msg, file=sys.stderr) + print('*** If this is a bug you want to report, please rerun with -v.', + file=sys.stderr) + return 1 diff --git a/pipenv/patched/notpip/_vendor/pygments/console.py b/pipenv/patched/notpip/_vendor/pygments/console.py new file mode 100644 index 0000000000..8dd08abebc --- /dev/null +++ b/pipenv/patched/notpip/_vendor/pygments/console.py @@ -0,0 +1,70 @@ +""" + pygments.console + ~~~~~~~~~~~~~~~~ + + Format colored console output. + + :copyright: Copyright 2006-2021 by the Pygments team, see AUTHORS. + :license: BSD, see LICENSE for details. +""" + +esc = "\x1b[" + +codes = {} +codes[""] = "" +codes["reset"] = esc + "39;49;00m" + +codes["bold"] = esc + "01m" +codes["faint"] = esc + "02m" +codes["standout"] = esc + "03m" +codes["underline"] = esc + "04m" +codes["blink"] = esc + "05m" +codes["overline"] = esc + "06m" + +dark_colors = ["black", "red", "green", "yellow", "blue", + "magenta", "cyan", "gray"] +light_colors = ["brightblack", "brightred", "brightgreen", "brightyellow", "brightblue", + "brightmagenta", "brightcyan", "white"] + +x = 30 +for d, l in zip(dark_colors, light_colors): + codes[d] = esc + "%im" % x + codes[l] = esc + "%im" % (60 + x) + x += 1 + +del d, l, x + +codes["white"] = codes["bold"] + + +def reset_color(): + return codes["reset"] + + +def colorize(color_key, text): + return codes[color_key] + text + codes["reset"] + + +def ansiformat(attr, text): + """ + Format ``text`` with a color and/or some attributes:: + + color normal color + *color* bold color + _color_ underlined color + +color+ blinking color + """ + result = [] + if attr[:1] == attr[-1:] == '+': + result.append(codes['blink']) + attr = attr[1:-1] + if attr[:1] == attr[-1:] == '*': + result.append(codes['bold']) + attr = attr[1:-1] + if attr[:1] == attr[-1:] == '_': + result.append(codes['underline']) + attr = attr[1:-1] + result.append(codes[attr]) + result.append(text) + result.append(codes['reset']) + return ''.join(result) diff --git a/pipenv/patched/notpip/_vendor/pygments/filter.py b/pipenv/patched/notpip/_vendor/pygments/filter.py new file mode 100644 index 0000000000..85b4829878 --- /dev/null +++ b/pipenv/patched/notpip/_vendor/pygments/filter.py @@ -0,0 +1,71 @@ +""" + pygments.filter + ~~~~~~~~~~~~~~~ + + Module that implements the default filter. + + :copyright: Copyright 2006-2021 by the Pygments team, see AUTHORS. + :license: BSD, see LICENSE for details. +""" + + +def apply_filters(stream, filters, lexer=None): + """ + Use this method to apply an iterable of filters to + a stream. If lexer is given it's forwarded to the + filter, otherwise the filter receives `None`. + """ + def _apply(filter_, stream): + yield from filter_.filter(lexer, stream) + for filter_ in filters: + stream = _apply(filter_, stream) + return stream + + +def simplefilter(f): + """ + Decorator that converts a function into a filter:: + + @simplefilter + def lowercase(self, lexer, stream, options): + for ttype, value in stream: + yield ttype, value.lower() + """ + return type(f.__name__, (FunctionFilter,), { + '__module__': getattr(f, '__module__'), + '__doc__': f.__doc__, + 'function': f, + }) + + +class Filter: + """ + Default filter. Subclass this class or use the `simplefilter` + decorator to create own filters. + """ + + def __init__(self, **options): + self.options = options + + def filter(self, lexer, stream): + raise NotImplementedError() + + +class FunctionFilter(Filter): + """ + Abstract class used by `simplefilter` to create simple + function filters on the fly. The `simplefilter` decorator + automatically creates subclasses of this class for + functions passed to it. + """ + function = None + + def __init__(self, **options): + if not hasattr(self, 'function'): + raise TypeError('%r used without bound function' % + self.__class__.__name__) + Filter.__init__(self, **options) + + def filter(self, lexer, stream): + # pylint: disable=not-callable + yield from self.function(lexer, stream, self.options) diff --git a/pipenv/patched/notpip/_vendor/pygments/filters/__init__.py b/pipenv/patched/notpip/_vendor/pygments/filters/__init__.py new file mode 100644 index 0000000000..704f69239c --- /dev/null +++ b/pipenv/patched/notpip/_vendor/pygments/filters/__init__.py @@ -0,0 +1,937 @@ +""" + pygments.filters + ~~~~~~~~~~~~~~~~ + + Module containing filter lookup functions and default + filters. + + :copyright: Copyright 2006-2021 by the Pygments team, see AUTHORS. + :license: BSD, see LICENSE for details. +""" + +import re + +from pipenv.patched.notpip._vendor.pygments.token import String, Comment, Keyword, Name, Error, Whitespace, \ + string_to_tokentype +from pipenv.patched.notpip._vendor.pygments.filter import Filter +from pipenv.patched.notpip._vendor.pygments.util import get_list_opt, get_int_opt, get_bool_opt, \ + get_choice_opt, ClassNotFound, OptionError +from pipenv.patched.notpip._vendor.pygments.plugin import find_plugin_filters + + +def find_filter_class(filtername): + """Lookup a filter by name. Return None if not found.""" + if filtername in FILTERS: + return FILTERS[filtername] + for name, cls in find_plugin_filters(): + if name == filtername: + return cls + return None + + +def get_filter_by_name(filtername, **options): + """Return an instantiated filter. + + Options are passed to the filter initializer if wanted. + Raise a ClassNotFound if not found. + """ + cls = find_filter_class(filtername) + if cls: + return cls(**options) + else: + raise ClassNotFound('filter %r not found' % filtername) + + +def get_all_filters(): + """Return a generator of all filter names.""" + yield from FILTERS + for name, _ in find_plugin_filters(): + yield name + + +def _replace_special(ttype, value, regex, specialttype, + replacefunc=lambda x: x): + last = 0 + for match in regex.finditer(value): + start, end = match.start(), match.end() + if start != last: + yield ttype, value[last:start] + yield specialttype, replacefunc(value[start:end]) + last = end + if last != len(value): + yield ttype, value[last:] + + +class CodeTagFilter(Filter): + """Highlight special code tags in comments and docstrings. + + Options accepted: + + `codetags` : list of strings + A list of strings that are flagged as code tags. The default is to + highlight ``XXX``, ``TODO``, ``BUG`` and ``NOTE``. + """ + + def __init__(self, **options): + Filter.__init__(self, **options) + tags = get_list_opt(options, 'codetags', + ['XXX', 'TODO', 'BUG', 'NOTE']) + self.tag_re = re.compile(r'\b(%s)\b' % '|'.join([ + re.escape(tag) for tag in tags if tag + ])) + + def filter(self, lexer, stream): + regex = self.tag_re + for ttype, value in stream: + if ttype in String.Doc or \ + ttype in Comment and \ + ttype not in Comment.Preproc: + yield from _replace_special(ttype, value, regex, Comment.Special) + else: + yield ttype, value + + +class SymbolFilter(Filter): + """Convert mathematical symbols such as \\ in Isabelle + or \\longrightarrow in LaTeX into Unicode characters. + + This is mostly useful for HTML or console output when you want to + approximate the source rendering you'd see in an IDE. + + Options accepted: + + `lang` : string + The symbol language. Must be one of ``'isabelle'`` or + ``'latex'``. The default is ``'isabelle'``. + """ + + latex_symbols = { + '\\alpha' : '\U000003b1', + '\\beta' : '\U000003b2', + '\\gamma' : '\U000003b3', + '\\delta' : '\U000003b4', + '\\varepsilon' : '\U000003b5', + '\\zeta' : '\U000003b6', + '\\eta' : '\U000003b7', + '\\vartheta' : '\U000003b8', + '\\iota' : '\U000003b9', + '\\kappa' : '\U000003ba', + '\\lambda' : '\U000003bb', + '\\mu' : '\U000003bc', + '\\nu' : '\U000003bd', + '\\xi' : '\U000003be', + '\\pi' : '\U000003c0', + '\\varrho' : '\U000003c1', + '\\sigma' : '\U000003c3', + '\\tau' : '\U000003c4', + '\\upsilon' : '\U000003c5', + '\\varphi' : '\U000003c6', + '\\chi' : '\U000003c7', + '\\psi' : '\U000003c8', + '\\omega' : '\U000003c9', + '\\Gamma' : '\U00000393', + '\\Delta' : '\U00000394', + '\\Theta' : '\U00000398', + '\\Lambda' : '\U0000039b', + '\\Xi' : '\U0000039e', + '\\Pi' : '\U000003a0', + '\\Sigma' : '\U000003a3', + '\\Upsilon' : '\U000003a5', + '\\Phi' : '\U000003a6', + '\\Psi' : '\U000003a8', + '\\Omega' : '\U000003a9', + '\\leftarrow' : '\U00002190', + '\\longleftarrow' : '\U000027f5', + '\\rightarrow' : '\U00002192', + '\\longrightarrow' : '\U000027f6', + '\\Leftarrow' : '\U000021d0', + '\\Longleftarrow' : '\U000027f8', + '\\Rightarrow' : '\U000021d2', + '\\Longrightarrow' : '\U000027f9', + '\\leftrightarrow' : '\U00002194', + '\\longleftrightarrow' : '\U000027f7', + '\\Leftrightarrow' : '\U000021d4', + '\\Longleftrightarrow' : '\U000027fa', + '\\mapsto' : '\U000021a6', + '\\longmapsto' : '\U000027fc', + '\\relbar' : '\U00002500', + '\\Relbar' : '\U00002550', + '\\hookleftarrow' : '\U000021a9', + '\\hookrightarrow' : '\U000021aa', + '\\leftharpoondown' : '\U000021bd', + '\\rightharpoondown' : '\U000021c1', + '\\leftharpoonup' : '\U000021bc', + '\\rightharpoonup' : '\U000021c0', + '\\rightleftharpoons' : '\U000021cc', + '\\leadsto' : '\U0000219d', + '\\downharpoonleft' : '\U000021c3', + '\\downharpoonright' : '\U000021c2', + '\\upharpoonleft' : '\U000021bf', + '\\upharpoonright' : '\U000021be', + '\\restriction' : '\U000021be', + '\\uparrow' : '\U00002191', + '\\Uparrow' : '\U000021d1', + '\\downarrow' : '\U00002193', + '\\Downarrow' : '\U000021d3', + '\\updownarrow' : '\U00002195', + '\\Updownarrow' : '\U000021d5', + '\\langle' : '\U000027e8', + '\\rangle' : '\U000027e9', + '\\lceil' : '\U00002308', + '\\rceil' : '\U00002309', + '\\lfloor' : '\U0000230a', + '\\rfloor' : '\U0000230b', + '\\flqq' : '\U000000ab', + '\\frqq' : '\U000000bb', + '\\bot' : '\U000022a5', + '\\top' : '\U000022a4', + '\\wedge' : '\U00002227', + '\\bigwedge' : '\U000022c0', + '\\vee' : '\U00002228', + '\\bigvee' : '\U000022c1', + '\\forall' : '\U00002200', + '\\exists' : '\U00002203', + '\\nexists' : '\U00002204', + '\\neg' : '\U000000ac', + '\\Box' : '\U000025a1', + '\\Diamond' : '\U000025c7', + '\\vdash' : '\U000022a2', + '\\models' : '\U000022a8', + '\\dashv' : '\U000022a3', + '\\surd' : '\U0000221a', + '\\le' : '\U00002264', + '\\ge' : '\U00002265', + '\\ll' : '\U0000226a', + '\\gg' : '\U0000226b', + '\\lesssim' : '\U00002272', + '\\gtrsim' : '\U00002273', + '\\lessapprox' : '\U00002a85', + '\\gtrapprox' : '\U00002a86', + '\\in' : '\U00002208', + '\\notin' : '\U00002209', + '\\subset' : '\U00002282', + '\\supset' : '\U00002283', + '\\subseteq' : '\U00002286', + '\\supseteq' : '\U00002287', + '\\sqsubset' : '\U0000228f', + '\\sqsupset' : '\U00002290', + '\\sqsubseteq' : '\U00002291', + '\\sqsupseteq' : '\U00002292', + '\\cap' : '\U00002229', + '\\bigcap' : '\U000022c2', + '\\cup' : '\U0000222a', + '\\bigcup' : '\U000022c3', + '\\sqcup' : '\U00002294', + '\\bigsqcup' : '\U00002a06', + '\\sqcap' : '\U00002293', + '\\Bigsqcap' : '\U00002a05', + '\\setminus' : '\U00002216', + '\\propto' : '\U0000221d', + '\\uplus' : '\U0000228e', + '\\bigplus' : '\U00002a04', + '\\sim' : '\U0000223c', + '\\doteq' : '\U00002250', + '\\simeq' : '\U00002243', + '\\approx' : '\U00002248', + '\\asymp' : '\U0000224d', + '\\cong' : '\U00002245', + '\\equiv' : '\U00002261', + '\\Join' : '\U000022c8', + '\\bowtie' : '\U00002a1d', + '\\prec' : '\U0000227a', + '\\succ' : '\U0000227b', + '\\preceq' : '\U0000227c', + '\\succeq' : '\U0000227d', + '\\parallel' : '\U00002225', + '\\mid' : '\U000000a6', + '\\pm' : '\U000000b1', + '\\mp' : '\U00002213', + '\\times' : '\U000000d7', + '\\div' : '\U000000f7', + '\\cdot' : '\U000022c5', + '\\star' : '\U000022c6', + '\\circ' : '\U00002218', + '\\dagger' : '\U00002020', + '\\ddagger' : '\U00002021', + '\\lhd' : '\U000022b2', + '\\rhd' : '\U000022b3', + '\\unlhd' : '\U000022b4', + '\\unrhd' : '\U000022b5', + '\\triangleleft' : '\U000025c3', + '\\triangleright' : '\U000025b9', + '\\triangle' : '\U000025b3', + '\\triangleq' : '\U0000225c', + '\\oplus' : '\U00002295', + '\\bigoplus' : '\U00002a01', + '\\otimes' : '\U00002297', + '\\bigotimes' : '\U00002a02', + '\\odot' : '\U00002299', + '\\bigodot' : '\U00002a00', + '\\ominus' : '\U00002296', + '\\oslash' : '\U00002298', + '\\dots' : '\U00002026', + '\\cdots' : '\U000022ef', + '\\sum' : '\U00002211', + '\\prod' : '\U0000220f', + '\\coprod' : '\U00002210', + '\\infty' : '\U0000221e', + '\\int' : '\U0000222b', + '\\oint' : '\U0000222e', + '\\clubsuit' : '\U00002663', + '\\diamondsuit' : '\U00002662', + '\\heartsuit' : '\U00002661', + '\\spadesuit' : '\U00002660', + '\\aleph' : '\U00002135', + '\\emptyset' : '\U00002205', + '\\nabla' : '\U00002207', + '\\partial' : '\U00002202', + '\\flat' : '\U0000266d', + '\\natural' : '\U0000266e', + '\\sharp' : '\U0000266f', + '\\angle' : '\U00002220', + '\\copyright' : '\U000000a9', + '\\textregistered' : '\U000000ae', + '\\textonequarter' : '\U000000bc', + '\\textonehalf' : '\U000000bd', + '\\textthreequarters' : '\U000000be', + '\\textordfeminine' : '\U000000aa', + '\\textordmasculine' : '\U000000ba', + '\\euro' : '\U000020ac', + '\\pounds' : '\U000000a3', + '\\yen' : '\U000000a5', + '\\textcent' : '\U000000a2', + '\\textcurrency' : '\U000000a4', + '\\textdegree' : '\U000000b0', + } + + isabelle_symbols = { + '\\' : '\U0001d7ec', + '\\' : '\U0001d7ed', + '\\' : '\U0001d7ee', + '\\' : '\U0001d7ef', + '\\' : '\U0001d7f0', + '\\' : '\U0001d7f1', + '\\' : '\U0001d7f2', + '\\' : '\U0001d7f3', + '\\' : '\U0001d7f4', + '\\' : '\U0001d7f5', + '\\' : '\U0001d49c', + '\\' : '\U0000212c', + '\\' : '\U0001d49e', + '\\' : '\U0001d49f', + '\\' : '\U00002130', + '\\' : '\U00002131', + '\\' : '\U0001d4a2', + '\\' : '\U0000210b', + '\\' : '\U00002110', + '\\' : '\U0001d4a5', + '\\' : '\U0001d4a6', + '\\' : '\U00002112', + '\\' : '\U00002133', + '\\' : '\U0001d4a9', + '\\' : '\U0001d4aa', + '\\

' : '\U0001d5c9', + '\\' : '\U0001d5ca', + '\\' : '\U0001d5cb', + '\\' : '\U0001d5cc', + '\\' : '\U0001d5cd', + '\\' : '\U0001d5ce', + '\\' : '\U0001d5cf', + '\\' : '\U0001d5d0', + '\\' : '\U0001d5d1', + '\\' : '\U0001d5d2', + '\\' : '\U0001d5d3', + '\\' : '\U0001d504', + '\\' : '\U0001d505', + '\\' : '\U0000212d', + '\\

' : '\U0001d507', + '\\' : '\U0001d508', + '\\' : '\U0001d509', + '\\' : '\U0001d50a', + '\\' : '\U0000210c', + '\\' : '\U00002111', + '\\' : '\U0001d50d', + '\\' : '\U0001d50e', + '\\' : '\U0001d50f', + '\\' : '\U0001d510', + '\\' : '\U0001d511', + '\\' : '\U0001d512', + '\\' : '\U0001d513', + '\\' : '\U0001d514', + '\\' : '\U0000211c', + '\\' : '\U0001d516', + '\\' : '\U0001d517', + '\\' : '\U0001d518', + '\\' : '\U0001d519', + '\\' : '\U0001d51a', + '\\' : '\U0001d51b', + '\\' : '\U0001d51c', + '\\' : '\U00002128', + '\\' : '\U0001d51e', + '\\' : '\U0001d51f', + '\\' : '\U0001d520', + '\\
' : '\U0001d521', + '\\' : '\U0001d522', + '\\' : '\U0001d523', + '\\' : '\U0001d524', + '\\' : '\U0001d525', + '\\' : '\U0001d526', + '\\' : '\U0001d527', + '\\' : '\U0001d528', + '\\' : '\U0001d529', + '\\' : '\U0001d52a', + '\\' : '\U0001d52b', + '\\' : '\U0001d52c', + '\\' : '\U0001d52d', + '\\' : '\U0001d52e', + '\\' : '\U0001d52f', + '\\' : '\U0001d530', + '\\' : '\U0001d531', + '\\' : '\U0001d532', + '\\' : '\U0001d533', + '\\' : '\U0001d534', + '\\' : '\U0001d535', + '\\' : '\U0001d536', + '\\' : '\U0001d537', + '\\' : '\U000003b1', + '\\' : '\U000003b2', + '\\' : '\U000003b3', + '\\' : '\U000003b4', + '\\' : '\U000003b5', + '\\' : '\U000003b6', + '\\' : '\U000003b7', + '\\' : '\U000003b8', + '\\' : '\U000003b9', + '\\' : '\U000003ba', + '\\' : '\U000003bb', + '\\' : '\U000003bc', + '\\' : '\U000003bd', + '\\' : '\U000003be', + '\\' : '\U000003c0', + '\\' : '\U000003c1', + '\\' : '\U000003c3', + '\\' : '\U000003c4', + '\\' : '\U000003c5', + '\\' : '\U000003c6', + '\\' : '\U000003c7', + '\\' : '\U000003c8', + '\\' : '\U000003c9', + '\\' : '\U00000393', + '\\' : '\U00000394', + '\\' : '\U00000398', + '\\' : '\U0000039b', + '\\' : '\U0000039e', + '\\' : '\U000003a0', + '\\' : '\U000003a3', + '\\' : '\U000003a5', + '\\' : '\U000003a6', + '\\' : '\U000003a8', + '\\' : '\U000003a9', + '\\' : '\U0001d539', + '\\' : '\U00002102', + '\\' : '\U00002115', + '\\' : '\U0000211a', + '\\' : '\U0000211d', + '\\' : '\U00002124', + '\\' : '\U00002190', + '\\' : '\U000027f5', + '\\' : '\U00002192', + '\\' : '\U000027f6', + '\\' : '\U000021d0', + '\\' : '\U000027f8', + '\\' : '\U000021d2', + '\\' : '\U000027f9', + '\\' : '\U00002194', + '\\' : '\U000027f7', + '\\' : '\U000021d4', + '\\' : '\U000027fa', + '\\' : '\U000021a6', + '\\' : '\U000027fc', + '\\' : '\U00002500', + '\\' : '\U00002550', + '\\' : '\U000021a9', + '\\' : '\U000021aa', + '\\' : '\U000021bd', + '\\' : '\U000021c1', + '\\' : '\U000021bc', + '\\' : '\U000021c0', + '\\' : '\U000021cc', + '\\' : '\U0000219d', + '\\' : '\U000021c3', + '\\' : '\U000021c2', + '\\' : '\U000021bf', + '\\' : '\U000021be', + '\\' : '\U000021be', + '\\' : '\U00002237', + '\\' : '\U00002191', + '\\' : '\U000021d1', + '\\' : '\U00002193', + '\\' : '\U000021d3', + '\\' : '\U00002195', + '\\' : '\U000021d5', + '\\' : '\U000027e8', + '\\' : '\U000027e9', + '\\' : '\U00002308', + '\\' : '\U00002309', + '\\' : '\U0000230a', + '\\' : '\U0000230b', + '\\' : '\U00002987', + '\\' : '\U00002988', + '\\' : '\U000027e6', + '\\' : '\U000027e7', + '\\' : '\U00002983', + '\\' : '\U00002984', + '\\' : '\U000000ab', + '\\' : '\U000000bb', + '\\' : '\U000022a5', + '\\' : '\U000022a4', + '\\' : '\U00002227', + '\\' : '\U000022c0', + '\\' : '\U00002228', + '\\' : '\U000022c1', + '\\' : '\U00002200', + '\\' : '\U00002203', + '\\' : '\U00002204', + '\\' : '\U000000ac', + '\\' : '\U000025a1', + '\\' : '\U000025c7', + '\\' : '\U000022a2', + '\\' : '\U000022a8', + '\\' : '\U000022a9', + '\\' : '\U000022ab', + '\\' : '\U000022a3', + '\\' : '\U0000221a', + '\\' : '\U00002264', + '\\' : '\U00002265', + '\\' : '\U0000226a', + '\\' : '\U0000226b', + '\\' : '\U00002272', + '\\' : '\U00002273', + '\\' : '\U00002a85', + '\\' : '\U00002a86', + '\\' : '\U00002208', + '\\' : '\U00002209', + '\\' : '\U00002282', + '\\' : '\U00002283', + '\\' : '\U00002286', + '\\' : '\U00002287', + '\\' : '\U0000228f', + '\\' : '\U00002290', + '\\' : '\U00002291', + '\\' : '\U00002292', + '\\' : '\U00002229', + '\\' : '\U000022c2', + '\\' : '\U0000222a', + '\\' : '\U000022c3', + '\\' : '\U00002294', + '\\' : '\U00002a06', + '\\' : '\U00002293', + '\\' : '\U00002a05', + '\\' : '\U00002216', + '\\' : '\U0000221d', + '\\' : '\U0000228e', + '\\' : '\U00002a04', + '\\' : '\U00002260', + '\\' : '\U0000223c', + '\\' : '\U00002250', + '\\' : '\U00002243', + '\\' : '\U00002248', + '\\' : '\U0000224d', + '\\' : '\U00002245', + '\\' : '\U00002323', + '\\' : '\U00002261', + '\\' : '\U00002322', + '\\' : '\U000022c8', + '\\' : '\U00002a1d', + '\\' : '\U0000227a', + '\\' : '\U0000227b', + '\\' : '\U0000227c', + '\\' : '\U0000227d', + '\\' : '\U00002225', + '\\' : '\U000000a6', + '\\' : '\U000000b1', + '\\' : '\U00002213', + '\\' : '\U000000d7', + '\\
' : '\U000000f7', + '\\' : '\U000022c5', + '\\' : '\U000022c6', + '\\' : '\U00002219', + '\\' : '\U00002218', + '\\' : '\U00002020', + '\\' : '\U00002021', + '\\' : '\U000022b2', + '\\' : '\U000022b3', + '\\' : '\U000022b4', + '\\' : '\U000022b5', + '\\' : '\U000025c3', + '\\' : '\U000025b9', + '\\' : '\U000025b3', + '\\' : '\U0000225c', + '\\' : '\U00002295', + '\\' : '\U00002a01', + '\\' : '\U00002297', + '\\' : '\U00002a02', + '\\' : '\U00002299', + '\\' : '\U00002a00', + '\\' : '\U00002296', + '\\' : '\U00002298', + '\\' : '\U00002026', + '\\' : '\U000022ef', + '\\' : '\U00002211', + '\\' : '\U0000220f', + '\\' : '\U00002210', + '\\' : '\U0000221e', + '\\' : '\U0000222b', + '\\' : '\U0000222e', + '\\' : '\U00002663', + '\\' : '\U00002662', + '\\' : '\U00002661', + '\\' : '\U00002660', + '\\' : '\U00002135', + '\\' : '\U00002205', + '\\' : '\U00002207', + '\\' : '\U00002202', + '\\' : '\U0000266d', + '\\' : '\U0000266e', + '\\' : '\U0000266f', + '\\' : '\U00002220', + '\\' : '\U000000a9', + '\\' : '\U000000ae', + '\\' : '\U000000ad', + '\\' : '\U000000af', + '\\' : '\U000000bc', + '\\' : '\U000000bd', + '\\' : '\U000000be', + '\\' : '\U000000aa', + '\\' : '\U000000ba', + '\\
' : '\U000000a7', + '\\' : '\U000000b6', + '\\' : '\U000000a1', + '\\' : '\U000000bf', + '\\' : '\U000020ac', + '\\' : '\U000000a3', + '\\' : '\U000000a5', + '\\' : '\U000000a2', + '\\' : '\U000000a4', + '\\' : '\U000000b0', + '\\' : '\U00002a3f', + '\\' : '\U00002127', + '\\' : '\U000025ca', + '\\' : '\U00002118', + '\\' : '\U00002240', + '\\' : '\U000022c4', + '\\' : '\U000000b4', + '\\' : '\U00000131', + '\\' : '\U000000a8', + '\\' : '\U000000b8', + '\\' : '\U000002dd', + '\\' : '\U000003f5', + '\\' : '\U000023ce', + '\\' : '\U00002039', + '\\' : '\U0000203a', + '\\' : '\U00002302', + '\\<^sub>' : '\U000021e9', + '\\<^sup>' : '\U000021e7', + '\\<^bold>' : '\U00002759', + '\\<^bsub>' : '\U000021d8', + '\\<^esub>' : '\U000021d9', + '\\<^bsup>' : '\U000021d7', + '\\<^esup>' : '\U000021d6', + } + + lang_map = {'isabelle' : isabelle_symbols, 'latex' : latex_symbols} + + def __init__(self, **options): + Filter.__init__(self, **options) + lang = get_choice_opt(options, 'lang', + ['isabelle', 'latex'], 'isabelle') + self.symbols = self.lang_map[lang] + + def filter(self, lexer, stream): + for ttype, value in stream: + if value in self.symbols: + yield ttype, self.symbols[value] + else: + yield ttype, value + + +class KeywordCaseFilter(Filter): + """Convert keywords to lowercase or uppercase or capitalize them, which + means first letter uppercase, rest lowercase. + + This can be useful e.g. if you highlight Pascal code and want to adapt the + code to your styleguide. + + Options accepted: + + `case` : string + The casing to convert keywords to. Must be one of ``'lower'``, + ``'upper'`` or ``'capitalize'``. The default is ``'lower'``. + """ + + def __init__(self, **options): + Filter.__init__(self, **options) + case = get_choice_opt(options, 'case', + ['lower', 'upper', 'capitalize'], 'lower') + self.convert = getattr(str, case) + + def filter(self, lexer, stream): + for ttype, value in stream: + if ttype in Keyword: + yield ttype, self.convert(value) + else: + yield ttype, value + + +class NameHighlightFilter(Filter): + """Highlight a normal Name (and Name.*) token with a different token type. + + Example:: + + filter = NameHighlightFilter( + names=['foo', 'bar', 'baz'], + tokentype=Name.Function, + ) + + This would highlight the names "foo", "bar" and "baz" + as functions. `Name.Function` is the default token type. + + Options accepted: + + `names` : list of strings + A list of names that should be given the different token type. + There is no default. + `tokentype` : TokenType or string + A token type or a string containing a token type name that is + used for highlighting the strings in `names`. The default is + `Name.Function`. + """ + + def __init__(self, **options): + Filter.__init__(self, **options) + self.names = set(get_list_opt(options, 'names', [])) + tokentype = options.get('tokentype') + if tokentype: + self.tokentype = string_to_tokentype(tokentype) + else: + self.tokentype = Name.Function + + def filter(self, lexer, stream): + for ttype, value in stream: + if ttype in Name and value in self.names: + yield self.tokentype, value + else: + yield ttype, value + + +class ErrorToken(Exception): + pass + + +class RaiseOnErrorTokenFilter(Filter): + """Raise an exception when the lexer generates an error token. + + Options accepted: + + `excclass` : Exception class + The exception class to raise. + The default is `pygments.filters.ErrorToken`. + + .. versionadded:: 0.8 + """ + + def __init__(self, **options): + Filter.__init__(self, **options) + self.exception = options.get('excclass', ErrorToken) + try: + # issubclass() will raise TypeError if first argument is not a class + if not issubclass(self.exception, Exception): + raise TypeError + except TypeError: + raise OptionError('excclass option is not an exception class') + + def filter(self, lexer, stream): + for ttype, value in stream: + if ttype is Error: + raise self.exception(value) + yield ttype, value + + +class VisibleWhitespaceFilter(Filter): + """Convert tabs, newlines and/or spaces to visible characters. + + Options accepted: + + `spaces` : string or bool + If this is a one-character string, spaces will be replaces by this string. + If it is another true value, spaces will be replaced by ``·`` (unicode + MIDDLE DOT). If it is a false value, spaces will not be replaced. The + default is ``False``. + `tabs` : string or bool + The same as for `spaces`, but the default replacement character is ``»`` + (unicode RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK). The default value + is ``False``. Note: this will not work if the `tabsize` option for the + lexer is nonzero, as tabs will already have been expanded then. + `tabsize` : int + If tabs are to be replaced by this filter (see the `tabs` option), this + is the total number of characters that a tab should be expanded to. + The default is ``8``. + `newlines` : string or bool + The same as for `spaces`, but the default replacement character is ``¶`` + (unicode PILCROW SIGN). The default value is ``False``. + `wstokentype` : bool + If true, give whitespace the special `Whitespace` token type. This allows + styling the visible whitespace differently (e.g. greyed out), but it can + disrupt background colors. The default is ``True``. + + .. versionadded:: 0.8 + """ + + def __init__(self, **options): + Filter.__init__(self, **options) + for name, default in [('spaces', '·'), + ('tabs', '»'), + ('newlines', '¶')]: + opt = options.get(name, False) + if isinstance(opt, str) and len(opt) == 1: + setattr(self, name, opt) + else: + setattr(self, name, (opt and default or '')) + tabsize = get_int_opt(options, 'tabsize', 8) + if self.tabs: + self.tabs += ' ' * (tabsize - 1) + if self.newlines: + self.newlines += '\n' + self.wstt = get_bool_opt(options, 'wstokentype', True) + + def filter(self, lexer, stream): + if self.wstt: + spaces = self.spaces or ' ' + tabs = self.tabs or '\t' + newlines = self.newlines or '\n' + regex = re.compile(r'\s') + + def replacefunc(wschar): + if wschar == ' ': + return spaces + elif wschar == '\t': + return tabs + elif wschar == '\n': + return newlines + return wschar + + for ttype, value in stream: + yield from _replace_special(ttype, value, regex, Whitespace, + replacefunc) + else: + spaces, tabs, newlines = self.spaces, self.tabs, self.newlines + # simpler processing + for ttype, value in stream: + if spaces: + value = value.replace(' ', spaces) + if tabs: + value = value.replace('\t', tabs) + if newlines: + value = value.replace('\n', newlines) + yield ttype, value + + +class GobbleFilter(Filter): + """Gobbles source code lines (eats initial characters). + + This filter drops the first ``n`` characters off every line of code. This + may be useful when the source code fed to the lexer is indented by a fixed + amount of space that isn't desired in the output. + + Options accepted: + + `n` : int + The number of characters to gobble. + + .. versionadded:: 1.2 + """ + def __init__(self, **options): + Filter.__init__(self, **options) + self.n = get_int_opt(options, 'n', 0) + + def gobble(self, value, left): + if left < len(value): + return value[left:], 0 + else: + return '', left - len(value) + + def filter(self, lexer, stream): + n = self.n + left = n # How many characters left to gobble. + for ttype, value in stream: + # Remove ``left`` tokens from first line, ``n`` from all others. + parts = value.split('\n') + (parts[0], left) = self.gobble(parts[0], left) + for i in range(1, len(parts)): + (parts[i], left) = self.gobble(parts[i], n) + value = '\n'.join(parts) + + if value != '': + yield ttype, value + + +class TokenMergeFilter(Filter): + """Merges consecutive tokens with the same token type in the output + stream of a lexer. + + .. versionadded:: 1.2 + """ + def __init__(self, **options): + Filter.__init__(self, **options) + + def filter(self, lexer, stream): + current_type = None + current_value = None + for ttype, value in stream: + if ttype is current_type: + current_value += value + else: + if current_type is not None: + yield current_type, current_value + current_type = ttype + current_value = value + if current_type is not None: + yield current_type, current_value + + +FILTERS = { + 'codetagify': CodeTagFilter, + 'keywordcase': KeywordCaseFilter, + 'highlight': NameHighlightFilter, + 'raiseonerror': RaiseOnErrorTokenFilter, + 'whitespace': VisibleWhitespaceFilter, + 'gobble': GobbleFilter, + 'tokenmerge': TokenMergeFilter, + 'symbols': SymbolFilter, +} diff --git a/pipenv/patched/notpip/_vendor/pygments/formatter.py b/pipenv/patched/notpip/_vendor/pygments/formatter.py new file mode 100644 index 0000000000..f7627a468d --- /dev/null +++ b/pipenv/patched/notpip/_vendor/pygments/formatter.py @@ -0,0 +1,94 @@ +""" + pygments.formatter + ~~~~~~~~~~~~~~~~~~ + + Base formatter class. + + :copyright: Copyright 2006-2021 by the Pygments team, see AUTHORS. + :license: BSD, see LICENSE for details. +""" + +import codecs + +from pipenv.patched.notpip._vendor.pygments.util import get_bool_opt +from pipenv.patched.notpip._vendor.pygments.styles import get_style_by_name + +__all__ = ['Formatter'] + + +def _lookup_style(style): + if isinstance(style, str): + return get_style_by_name(style) + return style + + +class Formatter: + """ + Converts a token stream to text. + + Options accepted: + + ``style`` + The style to use, can be a string or a Style subclass + (default: "default"). Not used by e.g. the + TerminalFormatter. + ``full`` + Tells the formatter to output a "full" document, i.e. + a complete self-contained document. This doesn't have + any effect for some formatters (default: false). + ``title`` + If ``full`` is true, the title that should be used to + caption the document (default: ''). + ``encoding`` + If given, must be an encoding name. This will be used to + convert the Unicode token strings to byte strings in the + output. If it is "" or None, Unicode strings will be written + to the output file, which most file-like objects do not + support (default: None). + ``outencoding`` + Overrides ``encoding`` if given. + """ + + #: Name of the formatter + name = None + + #: Shortcuts for the formatter + aliases = [] + + #: fn match rules + filenames = [] + + #: If True, this formatter outputs Unicode strings when no encoding + #: option is given. + unicodeoutput = True + + def __init__(self, **options): + self.style = _lookup_style(options.get('style', 'default')) + self.full = get_bool_opt(options, 'full', False) + self.title = options.get('title', '') + self.encoding = options.get('encoding', None) or None + if self.encoding in ('guess', 'chardet'): + # can happen for e.g. pygmentize -O encoding=guess + self.encoding = 'utf-8' + self.encoding = options.get('outencoding') or self.encoding + self.options = options + + def get_style_defs(self, arg=''): + """ + Return the style definitions for the current style as a string. + + ``arg`` is an additional argument whose meaning depends on the + formatter used. Note that ``arg`` can also be a list or tuple + for some formatters like the html formatter. + """ + return '' + + def format(self, tokensource, outfile): + """ + Format ``tokensource``, an iterable of ``(tokentype, tokenstring)`` + tuples and write it into ``outfile``. + """ + if self.encoding: + # wrap the outfile in a StreamWriter + outfile = codecs.lookup(self.encoding)[3](outfile) + return self.format_unencoded(tokensource, outfile) diff --git a/pipenv/patched/notpip/_vendor/pygments/formatters/__init__.py b/pipenv/patched/notpip/_vendor/pygments/formatters/__init__.py new file mode 100644 index 0000000000..3f5479b2a2 --- /dev/null +++ b/pipenv/patched/notpip/_vendor/pygments/formatters/__init__.py @@ -0,0 +1,153 @@ +""" + pygments.formatters + ~~~~~~~~~~~~~~~~~~~ + + Pygments formatters. + + :copyright: Copyright 2006-2021 by the Pygments team, see AUTHORS. + :license: BSD, see LICENSE for details. +""" + +import re +import sys +import types +import fnmatch +from os.path import basename + +from pipenv.patched.notpip._vendor.pygments.formatters._mapping import FORMATTERS +from pipenv.patched.notpip._vendor.pygments.plugin import find_plugin_formatters +from pipenv.patched.notpip._vendor.pygments.util import ClassNotFound + +__all__ = ['get_formatter_by_name', 'get_formatter_for_filename', + 'get_all_formatters', 'load_formatter_from_file'] + list(FORMATTERS) + +_formatter_cache = {} # classes by name +_pattern_cache = {} + + +def _fn_matches(fn, glob): + """Return whether the supplied file name fn matches pattern filename.""" + if glob not in _pattern_cache: + pattern = _pattern_cache[glob] = re.compile(fnmatch.translate(glob)) + return pattern.match(fn) + return _pattern_cache[glob].match(fn) + + +def _load_formatters(module_name): + """Load a formatter (and all others in the module too).""" + mod = __import__(module_name, None, None, ['__all__']) + for formatter_name in mod.__all__: + cls = getattr(mod, formatter_name) + _formatter_cache[cls.name] = cls + + +def get_all_formatters(): + """Return a generator for all formatter classes.""" + # NB: this returns formatter classes, not info like get_all_lexers(). + for info in FORMATTERS.values(): + if info[1] not in _formatter_cache: + _load_formatters(info[0]) + yield _formatter_cache[info[1]] + for _, formatter in find_plugin_formatters(): + yield formatter + + +def find_formatter_class(alias): + """Lookup a formatter by alias. + + Returns None if not found. + """ + for module_name, name, aliases, _, _ in FORMATTERS.values(): + if alias in aliases: + if name not in _formatter_cache: + _load_formatters(module_name) + return _formatter_cache[name] + for _, cls in find_plugin_formatters(): + if alias in cls.aliases: + return cls + + +def get_formatter_by_name(_alias, **options): + """Lookup and instantiate a formatter by alias. + + Raises ClassNotFound if not found. + """ + cls = find_formatter_class(_alias) + if cls is None: + raise ClassNotFound("no formatter found for name %r" % _alias) + return cls(**options) + + +def load_formatter_from_file(filename, formattername="CustomFormatter", + **options): + """Load a formatter from a file. + + This method expects a file located relative to the current working + directory, which contains a class named CustomFormatter. By default, + it expects the Formatter to be named CustomFormatter; you can specify + your own class name as the second argument to this function. + + Users should be very careful with the input, because this method + is equivalent to running eval on the input file. + + Raises ClassNotFound if there are any problems importing the Formatter. + + .. versionadded:: 2.2 + """ + try: + # This empty dict will contain the namespace for the exec'd file + custom_namespace = {} + with open(filename, 'rb') as f: + exec(f.read(), custom_namespace) + # Retrieve the class `formattername` from that namespace + if formattername not in custom_namespace: + raise ClassNotFound('no valid %s class found in %s' % + (formattername, filename)) + formatter_class = custom_namespace[formattername] + # And finally instantiate it with the options + return formatter_class(**options) + except OSError as err: + raise ClassNotFound('cannot read %s: %s' % (filename, err)) + except ClassNotFound: + raise + except Exception as err: + raise ClassNotFound('error when loading custom formatter: %s' % err) + + +def get_formatter_for_filename(fn, **options): + """Lookup and instantiate a formatter by filename pattern. + + Raises ClassNotFound if not found. + """ + fn = basename(fn) + for modname, name, _, filenames, _ in FORMATTERS.values(): + for filename in filenames: + if _fn_matches(fn, filename): + if name not in _formatter_cache: + _load_formatters(modname) + return _formatter_cache[name](**options) + for cls in find_plugin_formatters(): + for filename in cls.filenames: + if _fn_matches(fn, filename): + return cls(**options) + raise ClassNotFound("no formatter found for file name %r" % fn) + + +class _automodule(types.ModuleType): + """Automatically import formatters.""" + + def __getattr__(self, name): + info = FORMATTERS.get(name) + if info: + _load_formatters(info[0]) + cls = _formatter_cache[info[1]] + setattr(self, name, cls) + return cls + raise AttributeError(name) + + +oldmod = sys.modules[__name__] +newmod = _automodule(__name__) +newmod.__dict__.update(oldmod.__dict__) +sys.modules[__name__] = newmod +del newmod.newmod, newmod.oldmod, newmod.sys, newmod.types diff --git a/pipenv/patched/notpip/_vendor/pygments/formatters/_mapping.py b/pipenv/patched/notpip/_vendor/pygments/formatters/_mapping.py new file mode 100644 index 0000000000..f8e69b9b7d --- /dev/null +++ b/pipenv/patched/notpip/_vendor/pygments/formatters/_mapping.py @@ -0,0 +1,84 @@ +""" + pygments.formatters._mapping + ~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + + Formatter mapping definitions. This file is generated by itself. Everytime + you change something on a builtin formatter definition, run this script from + the formatters folder to update it. + + Do not alter the FORMATTERS dictionary by hand. + + :copyright: Copyright 2006-2021 by the Pygments team, see AUTHORS. + :license: BSD, see LICENSE for details. +""" + +FORMATTERS = { + 'BBCodeFormatter': ('pygments.formatters.bbcode', 'BBCode', ('bbcode', 'bb'), (), 'Format tokens with BBcodes. These formatting codes are used by many bulletin boards, so you can highlight your sourcecode with pygments before posting it there.'), + 'BmpImageFormatter': ('pygments.formatters.img', 'img_bmp', ('bmp', 'bitmap'), ('*.bmp',), 'Create a bitmap image from source code. This uses the Python Imaging Library to generate a pixmap from the source code.'), + 'GifImageFormatter': ('pygments.formatters.img', 'img_gif', ('gif',), ('*.gif',), 'Create a GIF image from source code. This uses the Python Imaging Library to generate a pixmap from the source code.'), + 'GroffFormatter': ('pygments.formatters.groff', 'groff', ('groff', 'troff', 'roff'), (), 'Format tokens with groff escapes to change their color and font style.'), + 'HtmlFormatter': ('pygments.formatters.html', 'HTML', ('html',), ('*.html', '*.htm'), "Format tokens as HTML 4 ```` tags within a ``
`` tag, wrapped in a ``
`` tag. The ``
``'s CSS class can be set by the `cssclass` option."), + 'IRCFormatter': ('pygments.formatters.irc', 'IRC', ('irc', 'IRC'), (), 'Format tokens with IRC color sequences'), + 'ImageFormatter': ('pygments.formatters.img', 'img', ('img', 'IMG', 'png'), ('*.png',), 'Create a PNG image from source code. This uses the Python Imaging Library to generate a pixmap from the source code.'), + 'JpgImageFormatter': ('pygments.formatters.img', 'img_jpg', ('jpg', 'jpeg'), ('*.jpg',), 'Create a JPEG image from source code. This uses the Python Imaging Library to generate a pixmap from the source code.'), + 'LatexFormatter': ('pygments.formatters.latex', 'LaTeX', ('latex', 'tex'), ('*.tex',), 'Format tokens as LaTeX code. This needs the `fancyvrb` and `color` standard packages.'), + 'NullFormatter': ('pygments.formatters.other', 'Text only', ('text', 'null'), ('*.txt',), 'Output the text unchanged without any formatting.'), + 'PangoMarkupFormatter': ('pygments.formatters.pangomarkup', 'Pango Markup', ('pango', 'pangomarkup'), (), 'Format tokens as Pango Markup code. It can then be rendered to an SVG.'), + 'RawTokenFormatter': ('pygments.formatters.other', 'Raw tokens', ('raw', 'tokens'), ('*.raw',), 'Format tokens as a raw representation for storing token streams.'), + 'RtfFormatter': ('pygments.formatters.rtf', 'RTF', ('rtf',), ('*.rtf',), 'Format tokens as RTF markup. This formatter automatically outputs full RTF documents with color information and other useful stuff. Perfect for Copy and Paste into Microsoft(R) Word(R) documents.'), + 'SvgFormatter': ('pygments.formatters.svg', 'SVG', ('svg',), ('*.svg',), 'Format tokens as an SVG graphics file. This formatter is still experimental. Each line of code is a ```` element with explicit ``x`` and ``y`` coordinates containing ```` elements with the individual token styles.'), + 'Terminal256Formatter': ('pygments.formatters.terminal256', 'Terminal256', ('terminal256', 'console256', '256'), (), 'Format tokens with ANSI color sequences, for output in a 256-color terminal or console. Like in `TerminalFormatter` color sequences are terminated at newlines, so that paging the output works correctly.'), + 'TerminalFormatter': ('pygments.formatters.terminal', 'Terminal', ('terminal', 'console'), (), 'Format tokens with ANSI color sequences, for output in a text console. Color sequences are terminated at newlines, so that paging the output works correctly.'), + 'TerminalTrueColorFormatter': ('pygments.formatters.terminal256', 'TerminalTrueColor', ('terminal16m', 'console16m', '16m'), (), 'Format tokens with ANSI color sequences, for output in a true-color terminal or console. Like in `TerminalFormatter` color sequences are terminated at newlines, so that paging the output works correctly.'), + 'TestcaseFormatter': ('pygments.formatters.other', 'Testcase', ('testcase',), (), 'Format tokens as appropriate for a new testcase.') +} + +if __name__ == '__main__': # pragma: no cover + import sys + import os + + # lookup formatters + found_formatters = [] + imports = [] + sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..', '..')) + from pipenv.patched.notpip._vendor.pygments.util import docstring_headline + + for root, dirs, files in os.walk('.'): + for filename in files: + if filename.endswith('.py') and not filename.startswith('_'): + module_name = 'pygments.formatters%s.%s' % ( + root[1:].replace('/', '.'), filename[:-3]) + print(module_name) + module = __import__(module_name, None, None, ['']) + for formatter_name in module.__all__: + formatter = getattr(module, formatter_name) + found_formatters.append( + '%r: %r' % (formatter_name, + (module_name, + formatter.name, + tuple(formatter.aliases), + tuple(formatter.filenames), + docstring_headline(formatter)))) + # sort them to make the diff minimal + found_formatters.sort() + + # extract useful sourcecode from this file + with open(__file__) as fp: + content = fp.read() + # replace crnl to nl for Windows. + # + # Note that, originally, contributers should keep nl of master + # repository, for example by using some kind of automatic + # management EOL, like `EolExtension + # `. + content = content.replace("\r\n", "\n") + header = content[:content.find('FORMATTERS = {')] + footer = content[content.find("if __name__ == '__main__':"):] + + # write new file + with open(__file__, 'w') as fp: + fp.write(header) + fp.write('FORMATTERS = {\n %s\n}\n\n' % ',\n '.join(found_formatters)) + fp.write(footer) + + print ('=== %d formatters processed.' % len(found_formatters)) diff --git a/pipenv/patched/notpip/_vendor/pygments/formatters/bbcode.py b/pipenv/patched/notpip/_vendor/pygments/formatters/bbcode.py new file mode 100644 index 0000000000..d23c64e7c9 --- /dev/null +++ b/pipenv/patched/notpip/_vendor/pygments/formatters/bbcode.py @@ -0,0 +1,108 @@ +""" + pygments.formatters.bbcode + ~~~~~~~~~~~~~~~~~~~~~~~~~~ + + BBcode formatter. + + :copyright: Copyright 2006-2021 by the Pygments team, see AUTHORS. + :license: BSD, see LICENSE for details. +""" + + +from pipenv.patched.notpip._vendor.pygments.formatter import Formatter +from pipenv.patched.notpip._vendor.pygments.util import get_bool_opt + +__all__ = ['BBCodeFormatter'] + + +class BBCodeFormatter(Formatter): + """ + Format tokens with BBcodes. These formatting codes are used by many + bulletin boards, so you can highlight your sourcecode with pygments before + posting it there. + + This formatter has no support for background colors and borders, as there + are no common BBcode tags for that. + + Some board systems (e.g. phpBB) don't support colors in their [code] tag, + so you can't use the highlighting together with that tag. + Text in a [code] tag usually is shown with a monospace font (which this + formatter can do with the ``monofont`` option) and no spaces (which you + need for indentation) are removed. + + Additional options accepted: + + `style` + The style to use, can be a string or a Style subclass (default: + ``'default'``). + + `codetag` + If set to true, put the output into ``[code]`` tags (default: + ``false``) + + `monofont` + If set to true, add a tag to show the code with a monospace font + (default: ``false``). + """ + name = 'BBCode' + aliases = ['bbcode', 'bb'] + filenames = [] + + def __init__(self, **options): + Formatter.__init__(self, **options) + self._code = get_bool_opt(options, 'codetag', False) + self._mono = get_bool_opt(options, 'monofont', False) + + self.styles = {} + self._make_styles() + + def _make_styles(self): + for ttype, ndef in self.style: + start = end = '' + if ndef['color']: + start += '[color=#%s]' % ndef['color'] + end = '[/color]' + end + if ndef['bold']: + start += '[b]' + end = '[/b]' + end + if ndef['italic']: + start += '[i]' + end = '[/i]' + end + if ndef['underline']: + start += '[u]' + end = '[/u]' + end + # there are no common BBcodes for background-color and border + + self.styles[ttype] = start, end + + def format_unencoded(self, tokensource, outfile): + if self._code: + outfile.write('[code]') + if self._mono: + outfile.write('[font=monospace]') + + lastval = '' + lasttype = None + + for ttype, value in tokensource: + while ttype not in self.styles: + ttype = ttype.parent + if ttype == lasttype: + lastval += value + else: + if lastval: + start, end = self.styles[lasttype] + outfile.write(''.join((start, lastval, end))) + lastval = value + lasttype = ttype + + if lastval: + start, end = self.styles[lasttype] + outfile.write(''.join((start, lastval, end))) + + if self._mono: + outfile.write('[/font]') + if self._code: + outfile.write('[/code]') + if self._code or self._mono: + outfile.write('\n') diff --git a/pipenv/patched/notpip/_vendor/pygments/formatters/groff.py b/pipenv/patched/notpip/_vendor/pygments/formatters/groff.py new file mode 100644 index 0000000000..872019e02b --- /dev/null +++ b/pipenv/patched/notpip/_vendor/pygments/formatters/groff.py @@ -0,0 +1,168 @@ +""" + pygments.formatters.groff + ~~~~~~~~~~~~~~~~~~~~~~~~~ + + Formatter for groff output. + + :copyright: Copyright 2006-2021 by the Pygments team, see AUTHORS. + :license: BSD, see LICENSE for details. +""" + +import math +from pipenv.patched.notpip._vendor.pygments.formatter import Formatter +from pipenv.patched.notpip._vendor.pygments.util import get_bool_opt, get_int_opt + +__all__ = ['GroffFormatter'] + + +class GroffFormatter(Formatter): + """ + Format tokens with groff escapes to change their color and font style. + + .. versionadded:: 2.11 + + Additional options accepted: + + `style` + The style to use, can be a string or a Style subclass (default: + ``'default'``). + + `monospaced` + If set to true, monospace font will be used (default: ``true``). + + `linenos` + If set to true, print the line numbers (default: ``false``). + + `wrap` + Wrap lines to the specified number of characters. Disabled if set to 0 + (default: ``0``). + """ + + name = 'groff' + aliases = ['groff','troff','roff'] + filenames = [] + + def __init__(self, **options): + Formatter.__init__(self, **options) + + self.monospaced = get_bool_opt(options, 'monospaced', True) + self.linenos = get_bool_opt(options, 'linenos', False) + self._lineno = 0 + self.wrap = get_int_opt(options, 'wrap', 0) + self._linelen = 0 + + self.styles = {} + self._make_styles() + + + def _make_styles(self): + regular = '\\f[CR]' if self.monospaced else '\\f[R]' + bold = '\\f[CB]' if self.monospaced else '\\f[B]' + italic = '\\f[CI]' if self.monospaced else '\\f[I]' + + for ttype, ndef in self.style: + start = end = '' + if ndef['color']: + start += '\\m[%s]' % ndef['color'] + end = '\\m[]' + end + if ndef['bold']: + start += bold + end = regular + end + if ndef['italic']: + start += italic + end = regular + end + if ndef['bgcolor']: + start += '\\M[%s]' % ndef['bgcolor'] + end = '\\M[]' + end + + self.styles[ttype] = start, end + + + def _define_colors(self, outfile): + colors = set() + for _, ndef in self.style: + if ndef['color'] is not None: + colors.add(ndef['color']) + + for color in colors: + outfile.write('.defcolor ' + color + ' rgb #' + color + '\n') + + + def _write_lineno(self, outfile): + self._lineno += 1 + outfile.write("%s% 4d " % (self._lineno != 1 and '\n' or '', self._lineno)) + + + def _wrap_line(self, line): + length = len(line.rstrip('\n')) + space = ' ' if self.linenos else '' + newline = '' + + if length > self.wrap: + for i in range(0, math.floor(length / self.wrap)): + chunk = line[i*self.wrap:i*self.wrap+self.wrap] + newline += (chunk + '\n' + space) + remainder = length % self.wrap + if remainder > 0: + newline += line[-remainder-1:] + self._linelen = remainder + elif self._linelen + length > self.wrap: + newline = ('\n' + space) + line + self._linelen = length + else: + newline = line + self._linelen += length + + return newline + + + def _escape_chars(self, text): + text = text.replace('\\', '\\[u005C]'). \ + replace('.', '\\[char46]'). \ + replace('\'', '\\[u0027]'). \ + replace('`', '\\[u0060]'). \ + replace('~', '\\[u007E]') + copy = text + + for char in copy: + if len(char) != len(char.encode()): + uni = char.encode('unicode_escape') \ + .decode()[1:] \ + .replace('x', 'u00') \ + .upper() + text = text.replace(char, '\\[u' + uni[1:] + ']') + + return text + + + def format_unencoded(self, tokensource, outfile): + self._define_colors(outfile) + + outfile.write('.nf\n\\f[CR]\n') + + if self.linenos: + self._write_lineno(outfile) + + for ttype, value in tokensource: + start, end = self.styles[ttype] + + for line in value.splitlines(True): + if self.wrap > 0: + line = self._wrap_line(line) + + if start and end: + text = self._escape_chars(line.rstrip('\n')) + if text != '': + outfile.write(''.join((start, text, end))) + else: + outfile.write(self._escape_chars(line.rstrip('\n'))) + + if line.endswith('\n'): + if self.linenos: + self._write_lineno(outfile) + self._linelen = 0 + else: + outfile.write('\n') + self._linelen = 0 + + outfile.write('\n.fi') diff --git a/pipenv/patched/notpip/_vendor/pygments/formatters/html.py b/pipenv/patched/notpip/_vendor/pygments/formatters/html.py new file mode 100644 index 0000000000..c9b11770ca --- /dev/null +++ b/pipenv/patched/notpip/_vendor/pygments/formatters/html.py @@ -0,0 +1,983 @@ +""" + pygments.formatters.html + ~~~~~~~~~~~~~~~~~~~~~~~~ + + Formatter for HTML output. + + :copyright: Copyright 2006-2021 by the Pygments team, see AUTHORS. + :license: BSD, see LICENSE for details. +""" + +import functools +import os +import sys +import os.path +from io import StringIO + +from pipenv.patched.notpip._vendor.pygments.formatter import Formatter +from pipenv.patched.notpip._vendor.pygments.token import Token, Text, STANDARD_TYPES +from pipenv.patched.notpip._vendor.pygments.util import get_bool_opt, get_int_opt, get_list_opt + +try: + import ctags +except ImportError: + ctags = None + +__all__ = ['HtmlFormatter'] + + +_escape_html_table = { + ord('&'): '&', + ord('<'): '<', + ord('>'): '>', + ord('"'): '"', + ord("'"): ''', +} + + +def escape_html(text, table=_escape_html_table): + """Escape &, <, > as well as single and double quotes for HTML.""" + return text.translate(table) + + +def webify(color): + if color.startswith('calc') or color.startswith('var'): + return color + else: + return '#' + color + + +def _get_ttype_class(ttype): + fname = STANDARD_TYPES.get(ttype) + if fname: + return fname + aname = '' + while fname is None: + aname = '-' + ttype[-1] + aname + ttype = ttype.parent + fname = STANDARD_TYPES.get(ttype) + return fname + aname + + +CSSFILE_TEMPLATE = '''\ +/* +generated by Pygments +Copyright 2006-2021 by the Pygments team. +Licensed under the BSD license, see LICENSE for details. +*/ +%(styledefs)s +''' + +DOC_HEADER = '''\ + + + + + %(title)s + + + + +

%(title)s

+ +''' + +DOC_HEADER_EXTERNALCSS = '''\ + + + + + %(title)s + + + + +

%(title)s

+ +''' + +DOC_FOOTER = '''\ + + +''' + + +class HtmlFormatter(Formatter): + r""" + Format tokens as HTML 4 ```` tags within a ``
`` tag, wrapped
+    in a ``
`` tag. The ``
``'s CSS class can be set by the `cssclass` + option. + + If the `linenos` option is set to ``"table"``, the ``
`` is
+    additionally wrapped inside a ```` which has one row and two
+    cells: one containing the line numbers and one containing the code.
+    Example:
+
+    .. sourcecode:: html
+
+        
+
+ + +
+
1
+            2
+
+
def foo(bar):
+              pass
+            
+
+ + (whitespace added to improve clarity). + + Wrapping can be disabled using the `nowrap` option. + + A list of lines can be specified using the `hl_lines` option to make these + lines highlighted (as of Pygments 0.11). + + With the `full` option, a complete HTML 4 document is output, including + the style definitions inside a `` + + + + +
{code}
+
+ + +""" + +_TERM_COLORS = {"256color": ColorSystem.EIGHT_BIT, "16color": ColorSystem.STANDARD} + + +class ConsoleDimensions(NamedTuple): + """Size of the terminal.""" + + width: int + """The width of the console in 'cells'.""" + height: int + """The height of the console in lines.""" + + +@dataclass +class ConsoleOptions: + """Options for __rich_console__ method.""" + + size: ConsoleDimensions + """Size of console.""" + legacy_windows: bool + """legacy_windows: flag for legacy windows.""" + min_width: int + """Minimum width of renderable.""" + max_width: int + """Maximum width of renderable.""" + is_terminal: bool + """True if the target is a terminal, otherwise False.""" + encoding: str + """Encoding of terminal.""" + max_height: int + """Height of container (starts as terminal)""" + justify: Optional[JustifyMethod] = None + """Justify value override for renderable.""" + overflow: Optional[OverflowMethod] = None + """Overflow value override for renderable.""" + no_wrap: Optional[bool] = False + """Disable wrapping for text.""" + highlight: Optional[bool] = None + """Highlight override for render_str.""" + markup: Optional[bool] = None + """Enable markup when rendering strings.""" + height: Optional[int] = None + + @property + def ascii_only(self) -> bool: + """Check if renderables should use ascii only.""" + return not self.encoding.startswith("utf") + + def copy(self) -> "ConsoleOptions": + """Return a copy of the options. + + Returns: + ConsoleOptions: a copy of self. + """ + options: ConsoleOptions = ConsoleOptions.__new__(ConsoleOptions) + options.__dict__ = self.__dict__.copy() + return options + + def update( + self, + *, + width: Union[int, NoChange] = NO_CHANGE, + min_width: Union[int, NoChange] = NO_CHANGE, + max_width: Union[int, NoChange] = NO_CHANGE, + justify: Union[Optional[JustifyMethod], NoChange] = NO_CHANGE, + overflow: Union[Optional[OverflowMethod], NoChange] = NO_CHANGE, + no_wrap: Union[Optional[bool], NoChange] = NO_CHANGE, + highlight: Union[Optional[bool], NoChange] = NO_CHANGE, + markup: Union[Optional[bool], NoChange] = NO_CHANGE, + height: Union[Optional[int], NoChange] = NO_CHANGE, + ) -> "ConsoleOptions": + """Update values, return a copy.""" + options = self.copy() + if not isinstance(width, NoChange): + options.min_width = options.max_width = max(0, width) + if not isinstance(min_width, NoChange): + options.min_width = min_width + if not isinstance(max_width, NoChange): + options.max_width = max_width + if not isinstance(justify, NoChange): + options.justify = justify + if not isinstance(overflow, NoChange): + options.overflow = overflow + if not isinstance(no_wrap, NoChange): + options.no_wrap = no_wrap + if not isinstance(highlight, NoChange): + options.highlight = highlight + if not isinstance(markup, NoChange): + options.markup = markup + if not isinstance(height, NoChange): + if height is not None: + options.max_height = height + options.height = None if height is None else max(0, height) + return options + + def update_width(self, width: int) -> "ConsoleOptions": + """Update just the width, return a copy. + + Args: + width (int): New width (sets both min_width and max_width) + + Returns: + ~ConsoleOptions: New console options instance. + """ + options = self.copy() + options.min_width = options.max_width = max(0, width) + return options + + def update_height(self, height: int) -> "ConsoleOptions": + """Update the height, and return a copy. + + Args: + height (int): New height + + Returns: + ~ConsoleOptions: New Console options instance. + """ + options = self.copy() + options.max_height = options.height = height + return options + + def update_dimensions(self, width: int, height: int) -> "ConsoleOptions": + """Update the width and height, and return a copy. + + Args: + width (int): New width (sets both min_width and max_width). + height (int): New height. + + Returns: + ~ConsoleOptions: New console options instance. + """ + options = self.copy() + options.min_width = options.max_width = max(0, width) + options.height = options.max_height = height + return options + + +@runtime_checkable +class RichCast(Protocol): + """An object that may be 'cast' to a console renderable.""" + + def __rich__(self) -> Union["ConsoleRenderable", str]: # pragma: no cover + ... + + +@runtime_checkable +class ConsoleRenderable(Protocol): + """An object that supports the console protocol.""" + + def __rich_console__( + self, console: "Console", options: "ConsoleOptions" + ) -> "RenderResult": # pragma: no cover + ... + + +# A type that may be rendered by Console. +RenderableType = Union[ConsoleRenderable, RichCast, str] + + +# The result of calling a __rich_console__ method. +RenderResult = Iterable[Union[RenderableType, Segment]] + + +_null_highlighter = NullHighlighter() + + +class CaptureError(Exception): + """An error in the Capture context manager.""" + + +class NewLine: + """A renderable to generate new line(s)""" + + def __init__(self, count: int = 1) -> None: + self.count = count + + def __rich_console__( + self, console: "Console", options: "ConsoleOptions" + ) -> Iterable[Segment]: + yield Segment("\n" * self.count) + + +class ScreenUpdate: + """Render a list of lines at a given offset.""" + + def __init__(self, lines: List[List[Segment]], x: int, y: int) -> None: + self._lines = lines + self.x = x + self.y = y + + def __rich_console__( + self, console: "Console", options: ConsoleOptions + ) -> RenderResult: + x = self.x + move_to = Control.move_to + for offset, line in enumerate(self._lines, self.y): + yield move_to(x, offset) + yield from line + + +class Capture: + """Context manager to capture the result of printing to the console. + See :meth:`~rich.console.Console.capture` for how to use. + + Args: + console (Console): A console instance to capture output. + """ + + def __init__(self, console: "Console") -> None: + self._console = console + self._result: Optional[str] = None + + def __enter__(self) -> "Capture": + self._console.begin_capture() + return self + + def __exit__( + self, + exc_type: Optional[Type[BaseException]], + exc_val: Optional[BaseException], + exc_tb: Optional[TracebackType], + ) -> None: + self._result = self._console.end_capture() + + def get(self) -> str: + """Get the result of the capture.""" + if self._result is None: + raise CaptureError( + "Capture result is not available until context manager exits." + ) + return self._result + + +class ThemeContext: + """A context manager to use a temporary theme. See :meth:`~rich.console.Console.use_theme` for usage.""" + + def __init__(self, console: "Console", theme: Theme, inherit: bool = True) -> None: + self.console = console + self.theme = theme + self.inherit = inherit + + def __enter__(self) -> "ThemeContext": + self.console.push_theme(self.theme) + return self + + def __exit__( + self, + exc_type: Optional[Type[BaseException]], + exc_val: Optional[BaseException], + exc_tb: Optional[TracebackType], + ) -> None: + self.console.pop_theme() + + +class PagerContext: + """A context manager that 'pages' content. See :meth:`~rich.console.Console.pager` for usage.""" + + def __init__( + self, + console: "Console", + pager: Optional[Pager] = None, + styles: bool = False, + links: bool = False, + ) -> None: + self._console = console + self.pager = SystemPager() if pager is None else pager + self.styles = styles + self.links = links + + def __enter__(self) -> "PagerContext": + self._console._enter_buffer() + return self + + def __exit__( + self, + exc_type: Optional[Type[BaseException]], + exc_val: Optional[BaseException], + exc_tb: Optional[TracebackType], + ) -> None: + if exc_type is None: + with self._console._lock: + buffer: List[Segment] = self._console._buffer[:] + del self._console._buffer[:] + segments: Iterable[Segment] = buffer + if not self.styles: + segments = Segment.strip_styles(segments) + elif not self.links: + segments = Segment.strip_links(segments) + content = self._console._render_buffer(segments) + self.pager.show(content) + self._console._exit_buffer() + + +class ScreenContext: + """A context manager that enables an alternative screen. See :meth:`~rich.console.Console.screen` for usage.""" + + def __init__( + self, console: "Console", hide_cursor: bool, style: StyleType = "" + ) -> None: + self.console = console + self.hide_cursor = hide_cursor + self.screen = Screen(style=style) + self._changed = False + + def update( + self, *renderables: RenderableType, style: Optional[StyleType] = None + ) -> None: + """Update the screen. + + Args: + renderable (RenderableType, optional): Optional renderable to replace current renderable, + or None for no change. Defaults to None. + style: (Style, optional): Replacement style, or None for no change. Defaults to None. + """ + if renderables: + self.screen.renderable = ( + Group(*renderables) if len(renderables) > 1 else renderables[0] + ) + if style is not None: + self.screen.style = style + self.console.print(self.screen, end="") + + def __enter__(self) -> "ScreenContext": + self._changed = self.console.set_alt_screen(True) + if self._changed and self.hide_cursor: + self.console.show_cursor(False) + return self + + def __exit__( + self, + exc_type: Optional[Type[BaseException]], + exc_val: Optional[BaseException], + exc_tb: Optional[TracebackType], + ) -> None: + if self._changed: + self.console.set_alt_screen(False) + if self.hide_cursor: + self.console.show_cursor(True) + + +class Group: + """Takes a group of renderables and returns a renderable object that renders the group. + + Args: + renderables (Iterable[RenderableType]): An iterable of renderable objects. + fit (bool, optional): Fit dimension of group to contents, or fill available space. Defaults to True. + """ + + def __init__(self, *renderables: "RenderableType", fit: bool = True) -> None: + self._renderables = renderables + self.fit = fit + self._render: Optional[List[RenderableType]] = None + + @property + def renderables(self) -> List["RenderableType"]: + if self._render is None: + self._render = list(self._renderables) + return self._render + + def __rich_measure__( + self, console: "Console", options: "ConsoleOptions" + ) -> "Measurement": + if self.fit: + return measure_renderables(console, options, self.renderables) + else: + return Measurement(options.max_width, options.max_width) + + def __rich_console__( + self, console: "Console", options: "ConsoleOptions" + ) -> RenderResult: + yield from self.renderables + + +def group(fit: bool = True) -> Callable[..., Callable[..., Group]]: + """A decorator that turns an iterable of renderables in to a group. + + Args: + fit (bool, optional): Fit dimension of group to contents, or fill available space. Defaults to True. + """ + + def decorator( + method: Callable[..., Iterable[RenderableType]] + ) -> Callable[..., Group]: + """Convert a method that returns an iterable of renderables in to a Group.""" + + @wraps(method) + def _replace(*args: Any, **kwargs: Any) -> Group: + renderables = method(*args, **kwargs) + return Group(*renderables, fit=fit) + + return _replace + + return decorator + + +def _is_jupyter() -> bool: # pragma: no cover + """Check if we're running in a Jupyter notebook.""" + try: + get_ipython # type: ignore + except NameError: + return False + ipython = get_ipython() # type: ignore + shell = ipython.__class__.__name__ + if "google.colab" in str(ipython.__class__) or shell == "ZMQInteractiveShell": + return True # Jupyter notebook or qtconsole + elif shell == "TerminalInteractiveShell": + return False # Terminal running IPython + else: + return False # Other type (?) + + +COLOR_SYSTEMS = { + "standard": ColorSystem.STANDARD, + "256": ColorSystem.EIGHT_BIT, + "truecolor": ColorSystem.TRUECOLOR, + "windows": ColorSystem.WINDOWS, +} + + +_COLOR_SYSTEMS_NAMES = {system: name for name, system in COLOR_SYSTEMS.items()} + + +@dataclass +class ConsoleThreadLocals(threading.local): + """Thread local values for Console context.""" + + theme_stack: ThemeStack + buffer: List[Segment] = field(default_factory=list) + buffer_index: int = 0 + + +class RenderHook(ABC): + """Provides hooks in to the render process.""" + + @abstractmethod + def process_renderables( + self, renderables: List[ConsoleRenderable] + ) -> List[ConsoleRenderable]: + """Called with a list of objects to render. + + This method can return a new list of renderables, or modify and return the same list. + + Args: + renderables (List[ConsoleRenderable]): A number of renderable objects. + + Returns: + List[ConsoleRenderable]: A replacement list of renderables. + """ + + +_windows_console_features: Optional["WindowsConsoleFeatures"] = None + + +def get_windows_console_features() -> "WindowsConsoleFeatures": # pragma: no cover + global _windows_console_features + if _windows_console_features is not None: + return _windows_console_features + from ._windows import get_windows_console_features + + _windows_console_features = get_windows_console_features() + return _windows_console_features + + +def detect_legacy_windows() -> bool: + """Detect legacy Windows.""" + return WINDOWS and not get_windows_console_features().vt + + +if detect_legacy_windows(): # pragma: no cover + from pipenv.patched.notpip._vendor.colorama import init + + init(strip=False) + + +class Console: + """A high level console interface. + + Args: + color_system (str, optional): The color system supported by your terminal, + either ``"standard"``, ``"256"`` or ``"truecolor"``. Leave as ``"auto"`` to autodetect. + force_terminal (Optional[bool], optional): Enable/disable terminal control codes, or None to auto-detect terminal. Defaults to None. + force_jupyter (Optional[bool], optional): Enable/disable Jupyter rendering, or None to auto-detect Jupyter. Defaults to None. + force_interactive (Optional[bool], optional): Enable/disable interactive mode, or None to auto detect. Defaults to None. + soft_wrap (Optional[bool], optional): Set soft wrap default on print method. Defaults to False. + theme (Theme, optional): An optional style theme object, or ``None`` for default theme. + stderr (bool, optional): Use stderr rather than stdout if ``file`` is not specified. Defaults to False. + file (IO, optional): A file object where the console should write to. Defaults to stdout. + quiet (bool, Optional): Boolean to suppress all output. Defaults to False. + width (int, optional): The width of the terminal. Leave as default to auto-detect width. + height (int, optional): The height of the terminal. Leave as default to auto-detect height. + style (StyleType, optional): Style to apply to all output, or None for no style. Defaults to None. + no_color (Optional[bool], optional): Enabled no color mode, or None to auto detect. Defaults to None. + tab_size (int, optional): Number of spaces used to replace a tab character. Defaults to 8. + record (bool, optional): Boolean to enable recording of terminal output, + required to call :meth:`export_html` and :meth:`export_text`. Defaults to False. + markup (bool, optional): Boolean to enable :ref:`console_markup`. Defaults to True. + emoji (bool, optional): Enable emoji code. Defaults to True. + emoji_variant (str, optional): Optional emoji variant, either "text" or "emoji". Defaults to None. + highlight (bool, optional): Enable automatic highlighting. Defaults to True. + log_time (bool, optional): Boolean to enable logging of time by :meth:`log` methods. Defaults to True. + log_path (bool, optional): Boolean to enable the logging of the caller by :meth:`log`. Defaults to True. + log_time_format (Union[str, TimeFormatterCallable], optional): If ``log_time`` is enabled, either string for strftime or callable that formats the time. Defaults to "[%X] ". + highlighter (HighlighterType, optional): Default highlighter. + legacy_windows (bool, optional): Enable legacy Windows mode, or ``None`` to auto detect. Defaults to ``None``. + safe_box (bool, optional): Restrict box options that don't render on legacy Windows. + get_datetime (Callable[[], datetime], optional): Callable that gets the current time as a datetime.datetime object (used by Console.log), + or None for datetime.now. + get_time (Callable[[], time], optional): Callable that gets the current time in seconds, default uses time.monotonic. + """ + + _environ: Mapping[str, str] = os.environ + + def __init__( + self, + *, + color_system: Optional[ + Literal["auto", "standard", "256", "truecolor", "windows"] + ] = "auto", + force_terminal: Optional[bool] = None, + force_jupyter: Optional[bool] = None, + force_interactive: Optional[bool] = None, + soft_wrap: bool = False, + theme: Optional[Theme] = None, + stderr: bool = False, + file: Optional[IO[str]] = None, + quiet: bool = False, + width: Optional[int] = None, + height: Optional[int] = None, + style: Optional[StyleType] = None, + no_color: Optional[bool] = None, + tab_size: int = 8, + record: bool = False, + markup: bool = True, + emoji: bool = True, + emoji_variant: Optional[EmojiVariant] = None, + highlight: bool = True, + log_time: bool = True, + log_path: bool = True, + log_time_format: Union[str, FormatTimeCallable] = "[%X]", + highlighter: Optional["HighlighterType"] = ReprHighlighter(), + legacy_windows: Optional[bool] = None, + safe_box: bool = True, + get_datetime: Optional[Callable[[], datetime]] = None, + get_time: Optional[Callable[[], float]] = None, + _environ: Optional[Mapping[str, str]] = None, + ): + # Copy of os.environ allows us to replace it for testing + if _environ is not None: + self._environ = _environ + + self.is_jupyter = _is_jupyter() if force_jupyter is None else force_jupyter + if self.is_jupyter: + width = width or 93 + height = height or 100 + + self.soft_wrap = soft_wrap + self._width = width + self._height = height + self.tab_size = tab_size + self.record = record + self._markup = markup + self._emoji = emoji + self._emoji_variant: Optional[EmojiVariant] = emoji_variant + self._highlight = highlight + self.legacy_windows: bool = ( + (detect_legacy_windows() and not self.is_jupyter) + if legacy_windows is None + else legacy_windows + ) + if width is None: + columns = self._environ.get("COLUMNS") + if columns is not None and columns.isdigit(): + width = int(columns) - self.legacy_windows + if height is None: + lines = self._environ.get("LINES") + if lines is not None and lines.isdigit(): + height = int(lines) + + self.soft_wrap = soft_wrap + self._width = width + self._height = height + + self._color_system: Optional[ColorSystem] + self._force_terminal = force_terminal + self._file = file + self.quiet = quiet + self.stderr = stderr + + if color_system is None: + self._color_system = None + elif color_system == "auto": + self._color_system = self._detect_color_system() + else: + self._color_system = COLOR_SYSTEMS[color_system] + + self._lock = threading.RLock() + self._log_render = LogRender( + show_time=log_time, + show_path=log_path, + time_format=log_time_format, + ) + self.highlighter: HighlighterType = highlighter or _null_highlighter + self.safe_box = safe_box + self.get_datetime = get_datetime or datetime.now + self.get_time = get_time or monotonic + self.style = style + self.no_color = ( + no_color if no_color is not None else "NO_COLOR" in self._environ + ) + self.is_interactive = ( + (self.is_terminal and not self.is_dumb_terminal) + if force_interactive is None + else force_interactive + ) + + self._record_buffer_lock = threading.RLock() + self._thread_locals = ConsoleThreadLocals( + theme_stack=ThemeStack(themes.DEFAULT if theme is None else theme) + ) + self._record_buffer: List[Segment] = [] + self._render_hooks: List[RenderHook] = [] + self._live: Optional["Live"] = None + self._is_alt_screen = False + + def __repr__(self) -> str: + return f"" + + @property + def file(self) -> IO[str]: + """Get the file object to write to.""" + file = self._file or (sys.stderr if self.stderr else sys.stdout) + file = getattr(file, "rich_proxied_file", file) + return file + + @file.setter + def file(self, new_file: IO[str]) -> None: + """Set a new file object.""" + self._file = new_file + + @property + def _buffer(self) -> List[Segment]: + """Get a thread local buffer.""" + return self._thread_locals.buffer + + @property + def _buffer_index(self) -> int: + """Get a thread local buffer.""" + return self._thread_locals.buffer_index + + @_buffer_index.setter + def _buffer_index(self, value: int) -> None: + self._thread_locals.buffer_index = value + + @property + def _theme_stack(self) -> ThemeStack: + """Get the thread local theme stack.""" + return self._thread_locals.theme_stack + + def _detect_color_system(self) -> Optional[ColorSystem]: + """Detect color system from env vars.""" + if self.is_jupyter: + return ColorSystem.TRUECOLOR + if not self.is_terminal or self.is_dumb_terminal: + return None + if WINDOWS: # pragma: no cover + if self.legacy_windows: # pragma: no cover + return ColorSystem.WINDOWS + windows_console_features = get_windows_console_features() + return ( + ColorSystem.TRUECOLOR + if windows_console_features.truecolor + else ColorSystem.EIGHT_BIT + ) + else: + color_term = self._environ.get("COLORTERM", "").strip().lower() + if color_term in ("truecolor", "24bit"): + return ColorSystem.TRUECOLOR + term = self._environ.get("TERM", "").strip().lower() + _term_name, _hyphen, colors = term.rpartition("-") + color_system = _TERM_COLORS.get(colors, ColorSystem.STANDARD) + return color_system + + def _enter_buffer(self) -> None: + """Enter in to a buffer context, and buffer all output.""" + self._buffer_index += 1 + + def _exit_buffer(self) -> None: + """Leave buffer context, and render content if required.""" + self._buffer_index -= 1 + self._check_buffer() + + def set_live(self, live: "Live") -> None: + """Set Live instance. Used by Live context manager. + + Args: + live (Live): Live instance using this Console. + + Raises: + errors.LiveError: If this Console has a Live context currently active. + """ + with self._lock: + if self._live is not None: + raise errors.LiveError("Only one live display may be active at once") + self._live = live + + def clear_live(self) -> None: + """Clear the Live instance.""" + with self._lock: + self._live = None + + def push_render_hook(self, hook: RenderHook) -> None: + """Add a new render hook to the stack. + + Args: + hook (RenderHook): Render hook instance. + """ + with self._lock: + self._render_hooks.append(hook) + + def pop_render_hook(self) -> None: + """Pop the last renderhook from the stack.""" + with self._lock: + self._render_hooks.pop() + + def __enter__(self) -> "Console": + """Own context manager to enter buffer context.""" + self._enter_buffer() + return self + + def __exit__(self, exc_type: Any, exc_value: Any, traceback: Any) -> None: + """Exit buffer context.""" + self._exit_buffer() + + def begin_capture(self) -> None: + """Begin capturing console output. Call :meth:`end_capture` to exit capture mode and return output.""" + self._enter_buffer() + + def end_capture(self) -> str: + """End capture mode and return captured string. + + Returns: + str: Console output. + """ + render_result = self._render_buffer(self._buffer) + del self._buffer[:] + self._exit_buffer() + return render_result + + def push_theme(self, theme: Theme, *, inherit: bool = True) -> None: + """Push a new theme on to the top of the stack, replacing the styles from the previous theme. + Generally speaking, you should call :meth:`~rich.console.Console.use_theme` to get a context manager, rather + than calling this method directly. + + Args: + theme (Theme): A theme instance. + inherit (bool, optional): Inherit existing styles. Defaults to True. + """ + self._theme_stack.push_theme(theme, inherit=inherit) + + def pop_theme(self) -> None: + """Remove theme from top of stack, restoring previous theme.""" + self._theme_stack.pop_theme() + + def use_theme(self, theme: Theme, *, inherit: bool = True) -> ThemeContext: + """Use a different theme for the duration of the context manager. + + Args: + theme (Theme): Theme instance to user. + inherit (bool, optional): Inherit existing console styles. Defaults to True. + + Returns: + ThemeContext: [description] + """ + return ThemeContext(self, theme, inherit) + + @property + def color_system(self) -> Optional[str]: + """Get color system string. + + Returns: + Optional[str]: "standard", "256" or "truecolor". + """ + + if self._color_system is not None: + return _COLOR_SYSTEMS_NAMES[self._color_system] + else: + return None + + @property + def encoding(self) -> str: + """Get the encoding of the console file, e.g. ``"utf-8"``. + + Returns: + str: A standard encoding string. + """ + return (getattr(self.file, "encoding", "utf-8") or "utf-8").lower() + + @property + def is_terminal(self) -> bool: + """Check if the console is writing to a terminal. + + Returns: + bool: True if the console writing to a device capable of + understanding terminal codes, otherwise False. + """ + if self._force_terminal is not None: + return self._force_terminal + isatty: Optional[Callable[[], bool]] = getattr(self.file, "isatty", None) + try: + return False if isatty is None else isatty() + except ValueError: + # in some situation (at the end of a pytest run for example) isatty() can raise + # ValueError: I/O operation on closed file + # return False because we aren't in a terminal anymore + return False + + @property + def is_dumb_terminal(self) -> bool: + """Detect dumb terminal. + + Returns: + bool: True if writing to a dumb terminal, otherwise False. + + """ + _term = self._environ.get("TERM", "") + is_dumb = _term.lower() in ("dumb", "unknown") + return self.is_terminal and is_dumb + + @property + def options(self) -> ConsoleOptions: + """Get default console options.""" + return ConsoleOptions( + max_height=self.size.height, + size=self.size, + legacy_windows=self.legacy_windows, + min_width=1, + max_width=self.width, + encoding=self.encoding, + is_terminal=self.is_terminal, + ) + + @property + def size(self) -> ConsoleDimensions: + """Get the size of the console. + + Returns: + ConsoleDimensions: A named tuple containing the dimensions. + """ + + if self._width is not None and self._height is not None: + return ConsoleDimensions(self._width - self.legacy_windows, self._height) + + if self.is_dumb_terminal: + return ConsoleDimensions(80, 25) + + width: Optional[int] = None + height: Optional[int] = None + + if WINDOWS: # pragma: no cover + try: + width, height = os.get_terminal_size() + except OSError: # Probably not a terminal + pass + else: + try: + width, height = os.get_terminal_size(sys.__stdin__.fileno()) + except (AttributeError, ValueError, OSError): + try: + width, height = os.get_terminal_size(sys.__stdout__.fileno()) + except (AttributeError, ValueError, OSError): + pass + + columns = self._environ.get("COLUMNS") + if columns is not None and columns.isdigit(): + width = int(columns) + lines = self._environ.get("LINES") + if lines is not None and lines.isdigit(): + height = int(lines) + + # get_terminal_size can report 0, 0 if run from pseudo-terminal + width = width or 80 + height = height or 25 + return ConsoleDimensions( + width - self.legacy_windows if self._width is None else self._width, + height if self._height is None else self._height, + ) + + @size.setter + def size(self, new_size: Tuple[int, int]) -> None: + """Set a new size for the terminal. + + Args: + new_size (Tuple[int, int]): New width and height. + """ + width, height = new_size + self._width = width + self._height = height + + @property + def width(self) -> int: + """Get the width of the console. + + Returns: + int: The width (in characters) of the console. + """ + return self.size.width + + @width.setter + def width(self, width: int) -> None: + """Set width. + + Args: + width (int): New width. + """ + self._width = width + + @property + def height(self) -> int: + """Get the height of the console. + + Returns: + int: The height (in lines) of the console. + """ + return self.size.height + + @height.setter + def height(self, height: int) -> None: + """Set height. + + Args: + height (int): new height. + """ + self._height = height + + def bell(self) -> None: + """Play a 'bell' sound (if supported by the terminal).""" + self.control(Control.bell()) + + def capture(self) -> Capture: + """A context manager to *capture* the result of print() or log() in a string, + rather than writing it to the console. + + Example: + >>> from rich.console import Console + >>> console = Console() + >>> with console.capture() as capture: + ... console.print("[bold magenta]Hello World[/]") + >>> print(capture.get()) + + Returns: + Capture: Context manager with disables writing to the terminal. + """ + capture = Capture(self) + return capture + + def pager( + self, pager: Optional[Pager] = None, styles: bool = False, links: bool = False + ) -> PagerContext: + """A context manager to display anything printed within a "pager". The pager application + is defined by the system and will typically support at least pressing a key to scroll. + + Args: + pager (Pager, optional): A pager object, or None to use :class:`~rich.pager.SystemPager`. Defaults to None. + styles (bool, optional): Show styles in pager. Defaults to False. + links (bool, optional): Show links in pager. Defaults to False. + + Example: + >>> from rich.console import Console + >>> from rich.__main__ import make_test_card + >>> console = Console() + >>> with console.pager(): + console.print(make_test_card()) + + Returns: + PagerContext: A context manager. + """ + return PagerContext(self, pager=pager, styles=styles, links=links) + + def line(self, count: int = 1) -> None: + """Write new line(s). + + Args: + count (int, optional): Number of new lines. Defaults to 1. + """ + + assert count >= 0, "count must be >= 0" + self.print(NewLine(count)) + + def clear(self, home: bool = True) -> None: + """Clear the screen. + + Args: + home (bool, optional): Also move the cursor to 'home' position. Defaults to True. + """ + if home: + self.control(Control.clear(), Control.home()) + else: + self.control(Control.clear()) + + def status( + self, + status: RenderableType, + *, + spinner: str = "dots", + spinner_style: str = "status.spinner", + speed: float = 1.0, + refresh_per_second: float = 12.5, + ) -> "Status": + """Display a status and spinner. + + Args: + status (RenderableType): A status renderable (str or Text typically). + spinner (str, optional): Name of spinner animation (see python -m rich.spinner). Defaults to "dots". + spinner_style (StyleType, optional): Style of spinner. Defaults to "status.spinner". + speed (float, optional): Speed factor for spinner animation. Defaults to 1.0. + refresh_per_second (float, optional): Number of refreshes per second. Defaults to 12.5. + + Returns: + Status: A Status object that may be used as a context manager. + """ + from .status import Status + + status_renderable = Status( + status, + console=self, + spinner=spinner, + spinner_style=spinner_style, + speed=speed, + refresh_per_second=refresh_per_second, + ) + return status_renderable + + def show_cursor(self, show: bool = True) -> bool: + """Show or hide the cursor. + + Args: + show (bool, optional): Set visibility of the cursor. + """ + if self.is_terminal and not self.legacy_windows: + self.control(Control.show_cursor(show)) + return True + return False + + def set_alt_screen(self, enable: bool = True) -> bool: + """Enables alternative screen mode. + + Note, if you enable this mode, you should ensure that is disabled before + the application exits. See :meth:`~rich.Console.screen` for a context manager + that handles this for you. + + Args: + enable (bool, optional): Enable (True) or disable (False) alternate screen. Defaults to True. + + Returns: + bool: True if the control codes were written. + + """ + changed = False + if self.is_terminal and not self.legacy_windows: + self.control(Control.alt_screen(enable)) + changed = True + self._is_alt_screen = enable + return changed + + @property + def is_alt_screen(self) -> bool: + """Check if the alt screen was enabled. + + Returns: + bool: True if the alt screen was enabled, otherwise False. + """ + return self._is_alt_screen + + def screen( + self, hide_cursor: bool = True, style: Optional[StyleType] = None + ) -> "ScreenContext": + """Context manager to enable and disable 'alternative screen' mode. + + Args: + hide_cursor (bool, optional): Also hide the cursor. Defaults to False. + style (Style, optional): Optional style for screen. Defaults to None. + + Returns: + ~ScreenContext: Context which enables alternate screen on enter, and disables it on exit. + """ + return ScreenContext(self, hide_cursor=hide_cursor, style=style or "") + + def measure( + self, renderable: RenderableType, *, options: Optional[ConsoleOptions] = None + ) -> Measurement: + """Measure a renderable. Returns a :class:`~rich.measure.Measurement` object which contains + information regarding the number of characters required to print the renderable. + + Args: + renderable (RenderableType): Any renderable or string. + options (Optional[ConsoleOptions], optional): Options to use when measuring, or None + to use default options. Defaults to None. + + Returns: + Measurement: A measurement of the renderable. + """ + measurement = Measurement.get(self, options or self.options, renderable) + return measurement + + def render( + self, renderable: RenderableType, options: Optional[ConsoleOptions] = None + ) -> Iterable[Segment]: + """Render an object in to an iterable of `Segment` instances. + + This method contains the logic for rendering objects with the console protocol. + You are unlikely to need to use it directly, unless you are extending the library. + + Args: + renderable (RenderableType): An object supporting the console protocol, or + an object that may be converted to a string. + options (ConsoleOptions, optional): An options object, or None to use self.options. Defaults to None. + + Returns: + Iterable[Segment]: An iterable of segments that may be rendered. + """ + + _options = options or self.options + if _options.max_width < 1: + # No space to render anything. This prevents potential recursion errors. + return + render_iterable: RenderResult + + renderable = rich_cast(renderable) + if hasattr(renderable, "__rich_console__") and not isclass(renderable): + render_iterable = renderable.__rich_console__(self, _options) # type: ignore + elif isinstance(renderable, str): + text_renderable = self.render_str( + renderable, highlight=_options.highlight, markup=_options.markup + ) + render_iterable = text_renderable.__rich_console__(self, _options) + else: + raise errors.NotRenderableError( + f"Unable to render {renderable!r}; " + "A str, Segment or object with __rich_console__ method is required" + ) + + try: + iter_render = iter(render_iterable) + except TypeError: + raise errors.NotRenderableError( + f"object {render_iterable!r} is not renderable" + ) + _Segment = Segment + for render_output in iter_render: + if isinstance(render_output, _Segment): + yield render_output + else: + yield from self.render(render_output, _options) + + def render_lines( + self, + renderable: RenderableType, + options: Optional[ConsoleOptions] = None, + *, + style: Optional[Style] = None, + pad: bool = True, + new_lines: bool = False, + ) -> List[List[Segment]]: + """Render objects in to a list of lines. + + The output of render_lines is useful when further formatting of rendered console text + is required, such as the Panel class which draws a border around any renderable object. + + Args: + renderable (RenderableType): Any object renderable in the console. + options (Optional[ConsoleOptions], optional): Console options, or None to use self.options. Default to ``None``. + style (Style, optional): Optional style to apply to renderables. Defaults to ``None``. + pad (bool, optional): Pad lines shorter than render width. Defaults to ``True``. + new_lines (bool, optional): Include "\n" characters at end of lines. + + Returns: + List[List[Segment]]: A list of lines, where a line is a list of Segment objects. + """ + with self._lock: + render_options = options or self.options + _rendered = self.render(renderable, render_options) + if style: + _rendered = Segment.apply_style(_rendered, style) + lines = list( + islice( + Segment.split_and_crop_lines( + _rendered, + render_options.max_width, + include_new_lines=new_lines, + pad=pad, + ), + None, + render_options.height, + ) + ) + if render_options.height is not None: + extra_lines = render_options.height - len(lines) + if extra_lines > 0: + pad_line = [ + [Segment(" " * render_options.max_width, style), Segment("\n")] + if new_lines + else [Segment(" " * render_options.max_width, style)] + ] + lines.extend(pad_line * extra_lines) + + return lines + + def render_str( + self, + text: str, + *, + style: Union[str, Style] = "", + justify: Optional[JustifyMethod] = None, + overflow: Optional[OverflowMethod] = None, + emoji: Optional[bool] = None, + markup: Optional[bool] = None, + highlight: Optional[bool] = None, + highlighter: Optional[HighlighterType] = None, + ) -> "Text": + """Convert a string to a Text instance. This is is called automatically if + you print or log a string. + + Args: + text (str): Text to render. + style (Union[str, Style], optional): Style to apply to rendered text. + justify (str, optional): Justify method: "default", "left", "center", "full", or "right". Defaults to ``None``. + overflow (str, optional): Overflow method: "crop", "fold", or "ellipsis". Defaults to ``None``. + emoji (Optional[bool], optional): Enable emoji, or ``None`` to use Console default. + markup (Optional[bool], optional): Enable markup, or ``None`` to use Console default. + highlight (Optional[bool], optional): Enable highlighting, or ``None`` to use Console default. + highlighter (HighlighterType, optional): Optional highlighter to apply. + Returns: + ConsoleRenderable: Renderable object. + + """ + emoji_enabled = emoji or (emoji is None and self._emoji) + markup_enabled = markup or (markup is None and self._markup) + highlight_enabled = highlight or (highlight is None and self._highlight) + + if markup_enabled: + rich_text = render_markup( + text, + style=style, + emoji=emoji_enabled, + emoji_variant=self._emoji_variant, + ) + rich_text.justify = justify + rich_text.overflow = overflow + else: + rich_text = Text( + _emoji_replace(text, default_variant=self._emoji_variant) + if emoji_enabled + else text, + justify=justify, + overflow=overflow, + style=style, + ) + + _highlighter = (highlighter or self.highlighter) if highlight_enabled else None + if _highlighter is not None: + highlight_text = _highlighter(str(rich_text)) + highlight_text.copy_styles(rich_text) + return highlight_text + + return rich_text + + def get_style( + self, name: Union[str, Style], *, default: Optional[Union[Style, str]] = None + ) -> Style: + """Get a Style instance by it's theme name or parse a definition. + + Args: + name (str): The name of a style or a style definition. + + Returns: + Style: A Style object. + + Raises: + MissingStyle: If no style could be parsed from name. + + """ + if isinstance(name, Style): + return name + + try: + style = self._theme_stack.get(name) + if style is None: + style = Style.parse(name) + return style.copy() if style.link else style + except errors.StyleSyntaxError as error: + if default is not None: + return self.get_style(default) + raise errors.MissingStyle( + f"Failed to get style {name!r}; {error}" + ) from None + + def _collect_renderables( + self, + objects: Iterable[Any], + sep: str, + end: str, + *, + justify: Optional[JustifyMethod] = None, + emoji: Optional[bool] = None, + markup: Optional[bool] = None, + highlight: Optional[bool] = None, + ) -> List[ConsoleRenderable]: + """Combine a number of renderables and text into one renderable. + + Args: + objects (Iterable[Any]): Anything that Rich can render. + sep (str): String to write between print data. + end (str): String to write at end of print data. + justify (str, optional): One of "left", "right", "center", or "full". Defaults to ``None``. + emoji (Optional[bool], optional): Enable emoji code, or ``None`` to use console default. + markup (Optional[bool], optional): Enable markup, or ``None`` to use console default. + highlight (Optional[bool], optional): Enable automatic highlighting, or ``None`` to use console default. + + Returns: + List[ConsoleRenderable]: A list of things to render. + """ + renderables: List[ConsoleRenderable] = [] + _append = renderables.append + text: List[Text] = [] + append_text = text.append + + append = _append + if justify in ("left", "center", "right"): + + def align_append(renderable: RenderableType) -> None: + _append(Align(renderable, cast(AlignMethod, justify))) + + append = align_append + + _highlighter: HighlighterType = _null_highlighter + if highlight or (highlight is None and self._highlight): + _highlighter = self.highlighter + + def check_text() -> None: + if text: + sep_text = Text(sep, justify=justify, end=end) + append(sep_text.join(text)) + del text[:] + + for renderable in objects: + renderable = rich_cast(renderable) + if isinstance(renderable, str): + append_text( + self.render_str( + renderable, emoji=emoji, markup=markup, highlighter=_highlighter + ) + ) + elif isinstance(renderable, Text): + append_text(renderable) + elif isinstance(renderable, ConsoleRenderable): + check_text() + append(renderable) + elif is_expandable(renderable): + check_text() + append(Pretty(renderable, highlighter=_highlighter)) + else: + append_text(_highlighter(str(renderable))) + + check_text() + + if self.style is not None: + style = self.get_style(self.style) + renderables = [Styled(renderable, style) for renderable in renderables] + + return renderables + + def rule( + self, + title: TextType = "", + *, + characters: str = "─", + style: Union[str, Style] = "rule.line", + align: AlignMethod = "center", + ) -> None: + """Draw a line with optional centered title. + + Args: + title (str, optional): Text to render over the rule. Defaults to "". + characters (str, optional): Character(s) to form the line. Defaults to "─". + style (str, optional): Style of line. Defaults to "rule.line". + align (str, optional): How to align the title, one of "left", "center", or "right". Defaults to "center". + """ + from .rule import Rule + + rule = Rule(title=title, characters=characters, style=style, align=align) + self.print(rule) + + def control(self, *control: Control) -> None: + """Insert non-printing control codes. + + Args: + control_codes (str): Control codes, such as those that may move the cursor. + """ + if not self.is_dumb_terminal: + with self: + self._buffer.extend(_control.segment for _control in control) + + def out( + self, + *objects: Any, + sep: str = " ", + end: str = "\n", + style: Optional[Union[str, Style]] = None, + highlight: Optional[bool] = None, + ) -> None: + """Output to the terminal. This is a low-level way of writing to the terminal which unlike + :meth:`~rich.console.Console.print` won't pretty print, wrap text, or apply markup, but will + optionally apply highlighting and a basic style. + + Args: + sep (str, optional): String to write between print data. Defaults to " ". + end (str, optional): String to write at end of print data. Defaults to "\\\\n". + style (Union[str, Style], optional): A style to apply to output. Defaults to None. + highlight (Optional[bool], optional): Enable automatic highlighting, or ``None`` to use + console default. Defaults to ``None``. + """ + raw_output: str = sep.join(str(_object) for _object in objects) + self.print( + raw_output, + style=style, + highlight=highlight, + emoji=False, + markup=False, + no_wrap=True, + overflow="ignore", + crop=False, + end=end, + ) + + def print( + self, + *objects: Any, + sep: str = " ", + end: str = "\n", + style: Optional[Union[str, Style]] = None, + justify: Optional[JustifyMethod] = None, + overflow: Optional[OverflowMethod] = None, + no_wrap: Optional[bool] = None, + emoji: Optional[bool] = None, + markup: Optional[bool] = None, + highlight: Optional[bool] = None, + width: Optional[int] = None, + height: Optional[int] = None, + crop: bool = True, + soft_wrap: Optional[bool] = None, + new_line_start: bool = False, + ) -> None: + """Print to the console. + + Args: + objects (positional args): Objects to log to the terminal. + sep (str, optional): String to write between print data. Defaults to " ". + end (str, optional): String to write at end of print data. Defaults to "\\\\n". + style (Union[str, Style], optional): A style to apply to output. Defaults to None. + justify (str, optional): Justify method: "default", "left", "right", "center", or "full". Defaults to ``None``. + overflow (str, optional): Overflow method: "ignore", "crop", "fold", or "ellipsis". Defaults to None. + no_wrap (Optional[bool], optional): Disable word wrapping. Defaults to None. + emoji (Optional[bool], optional): Enable emoji code, or ``None`` to use console default. Defaults to ``None``. + markup (Optional[bool], optional): Enable markup, or ``None`` to use console default. Defaults to ``None``. + highlight (Optional[bool], optional): Enable automatic highlighting, or ``None`` to use console default. Defaults to ``None``. + width (Optional[int], optional): Width of output, or ``None`` to auto-detect. Defaults to ``None``. + crop (Optional[bool], optional): Crop output to width of terminal. Defaults to True. + soft_wrap (bool, optional): Enable soft wrap mode which disables word wrapping and cropping of text or ``None`` for + Console default. Defaults to ``None``. + new_line_start (bool, False): Insert a new line at the start if the output contains more than one line. Defaults to ``False``. + """ + if not objects: + objects = (NewLine(),) + + if soft_wrap is None: + soft_wrap = self.soft_wrap + if soft_wrap: + if no_wrap is None: + no_wrap = True + if overflow is None: + overflow = "ignore" + crop = False + render_hooks = self._render_hooks[:] + with self: + renderables = self._collect_renderables( + objects, + sep, + end, + justify=justify, + emoji=emoji, + markup=markup, + highlight=highlight, + ) + for hook in render_hooks: + renderables = hook.process_renderables(renderables) + render_options = self.options.update( + justify=justify, + overflow=overflow, + width=min(width, self.width) if width is not None else NO_CHANGE, + height=height, + no_wrap=no_wrap, + markup=markup, + highlight=highlight, + ) + + new_segments: List[Segment] = [] + extend = new_segments.extend + render = self.render + if style is None: + for renderable in renderables: + extend(render(renderable, render_options)) + else: + for renderable in renderables: + extend( + Segment.apply_style( + render(renderable, render_options), self.get_style(style) + ) + ) + if new_line_start: + if ( + len("".join(segment.text for segment in new_segments).splitlines()) + > 1 + ): + new_segments.insert(0, Segment.line()) + if crop: + buffer_extend = self._buffer.extend + for line in Segment.split_and_crop_lines( + new_segments, self.width, pad=False + ): + buffer_extend(line) + else: + self._buffer.extend(new_segments) + + def print_json( + self, + json: Optional[str] = None, + *, + data: Any = None, + indent: Union[None, int, str] = 2, + highlight: bool = True, + skip_keys: bool = False, + ensure_ascii: bool = True, + check_circular: bool = True, + allow_nan: bool = True, + default: Optional[Callable[[Any], Any]] = None, + sort_keys: bool = False, + ) -> None: + """Pretty prints JSON. Output will be valid JSON. + + Args: + json (Optional[str]): A string containing JSON. + data (Any): If json is not supplied, then encode this data. + indent (Union[None, int, str], optional): Number of spaces to indent. Defaults to 2. + highlight (bool, optional): Enable highlighting of output: Defaults to True. + skip_keys (bool, optional): Skip keys not of a basic type. Defaults to False. + ensure_ascii (bool, optional): Escape all non-ascii characters. Defaults to False. + check_circular (bool, optional): Check for circular references. Defaults to True. + allow_nan (bool, optional): Allow NaN and Infinity values. Defaults to True. + default (Callable, optional): A callable that converts values that can not be encoded + in to something that can be JSON encoded. Defaults to None. + sort_keys (bool, optional): Sort dictionary keys. Defaults to False. + """ + from pipenv.patched.notpip._vendor.rich.json import JSON + + if json is None: + json_renderable = JSON.from_data( + data, + indent=indent, + highlight=highlight, + skip_keys=skip_keys, + ensure_ascii=ensure_ascii, + check_circular=check_circular, + allow_nan=allow_nan, + default=default, + sort_keys=sort_keys, + ) + else: + if not isinstance(json, str): + raise TypeError( + f"json must be str. Did you mean print_json(data={json!r}) ?" + ) + json_renderable = JSON( + json, + indent=indent, + highlight=highlight, + skip_keys=skip_keys, + ensure_ascii=ensure_ascii, + check_circular=check_circular, + allow_nan=allow_nan, + default=default, + sort_keys=sort_keys, + ) + self.print(json_renderable, soft_wrap=True) + + def update_screen( + self, + renderable: RenderableType, + *, + region: Optional[Region] = None, + options: Optional[ConsoleOptions] = None, + ) -> None: + """Update the screen at a given offset. + + Args: + renderable (RenderableType): A Rich renderable. + region (Region, optional): Region of screen to update, or None for entire screen. Defaults to None. + x (int, optional): x offset. Defaults to 0. + y (int, optional): y offset. Defaults to 0. + + Raises: + errors.NoAltScreen: If the Console isn't in alt screen mode. + + """ + if not self.is_alt_screen: + raise errors.NoAltScreen("Alt screen must be enabled to call update_screen") + render_options = options or self.options + if region is None: + x = y = 0 + render_options = render_options.update_dimensions( + render_options.max_width, render_options.height or self.height + ) + else: + x, y, width, height = region + render_options = render_options.update_dimensions(width, height) + + lines = self.render_lines(renderable, options=render_options) + self.update_screen_lines(lines, x, y) + + def update_screen_lines( + self, lines: List[List[Segment]], x: int = 0, y: int = 0 + ) -> None: + """Update lines of the screen at a given offset. + + Args: + lines (List[List[Segment]]): Rendered lines (as produced by :meth:`~rich.Console.render_lines`). + x (int, optional): x offset (column no). Defaults to 0. + y (int, optional): y offset (column no). Defaults to 0. + + Raises: + errors.NoAltScreen: If the Console isn't in alt screen mode. + """ + if not self.is_alt_screen: + raise errors.NoAltScreen("Alt screen must be enabled to call update_screen") + screen_update = ScreenUpdate(lines, x, y) + segments = self.render(screen_update) + self._buffer.extend(segments) + self._check_buffer() + + def print_exception( + self, + *, + width: Optional[int] = 100, + extra_lines: int = 3, + theme: Optional[str] = None, + word_wrap: bool = False, + show_locals: bool = False, + suppress: Iterable[Union[str, ModuleType]] = (), + max_frames: int = 100, + ) -> None: + """Prints a rich render of the last exception and traceback. + + Args: + width (Optional[int], optional): Number of characters used to render code. Defaults to 88. + extra_lines (int, optional): Additional lines of code to render. Defaults to 3. + theme (str, optional): Override pygments theme used in traceback + word_wrap (bool, optional): Enable word wrapping of long lines. Defaults to False. + show_locals (bool, optional): Enable display of local variables. Defaults to False. + suppress (Iterable[Union[str, ModuleType]]): Optional sequence of modules or paths to exclude from traceback. + max_frames (int): Maximum number of frames to show in a traceback, 0 for no maximum. Defaults to 100. + """ + from .traceback import Traceback + + traceback = Traceback( + width=width, + extra_lines=extra_lines, + theme=theme, + word_wrap=word_wrap, + show_locals=show_locals, + suppress=suppress, + max_frames=max_frames, + ) + self.print(traceback) + + @staticmethod + def _caller_frame_info( + offset: int, + currentframe: Callable[[], Optional[FrameType]] = inspect.currentframe, + ) -> Tuple[str, int, Dict[str, Any]]: + """Get caller frame information. + + Args: + offset (int): the caller offset within the current frame stack. + currentframe (Callable[[], Optional[FrameType]], optional): the callable to use to + retrieve the current frame. Defaults to ``inspect.currentframe``. + + Returns: + Tuple[str, int, Dict[str, Any]]: A tuple containing the filename, the line number and + the dictionary of local variables associated with the caller frame. + + Raises: + RuntimeError: If the stack offset is invalid. + """ + # Ignore the frame of this local helper + offset += 1 + + frame = currentframe() + if frame is not None: + # Use the faster currentframe where implemented + while offset and frame: + frame = frame.f_back + offset -= 1 + assert frame is not None + return frame.f_code.co_filename, frame.f_lineno, frame.f_locals + else: + # Fallback to the slower stack + frame_info = inspect.stack()[offset] + return frame_info.filename, frame_info.lineno, frame_info.frame.f_locals + + def log( + self, + *objects: Any, + sep: str = " ", + end: str = "\n", + style: Optional[Union[str, Style]] = None, + justify: Optional[JustifyMethod] = None, + emoji: Optional[bool] = None, + markup: Optional[bool] = None, + highlight: Optional[bool] = None, + log_locals: bool = False, + _stack_offset: int = 1, + ) -> None: + """Log rich content to the terminal. + + Args: + objects (positional args): Objects to log to the terminal. + sep (str, optional): String to write between print data. Defaults to " ". + end (str, optional): String to write at end of print data. Defaults to "\\\\n". + style (Union[str, Style], optional): A style to apply to output. Defaults to None. + justify (str, optional): One of "left", "right", "center", or "full". Defaults to ``None``. + overflow (str, optional): Overflow method: "crop", "fold", or "ellipsis". Defaults to None. + emoji (Optional[bool], optional): Enable emoji code, or ``None`` to use console default. Defaults to None. + markup (Optional[bool], optional): Enable markup, or ``None`` to use console default. Defaults to None. + highlight (Optional[bool], optional): Enable automatic highlighting, or ``None`` to use console default. Defaults to None. + log_locals (bool, optional): Boolean to enable logging of locals where ``log()`` + was called. Defaults to False. + _stack_offset (int, optional): Offset of caller from end of call stack. Defaults to 1. + """ + if not objects: + objects = (NewLine(),) + + render_hooks = self._render_hooks[:] + + with self: + renderables = self._collect_renderables( + objects, + sep, + end, + justify=justify, + emoji=emoji, + markup=markup, + highlight=highlight, + ) + if style is not None: + renderables = [Styled(renderable, style) for renderable in renderables] + + filename, line_no, locals = self._caller_frame_info(_stack_offset) + link_path = None if filename.startswith("<") else os.path.abspath(filename) + path = filename.rpartition(os.sep)[-1] + if log_locals: + locals_map = { + key: value + for key, value in locals.items() + if not key.startswith("__") + } + renderables.append(render_scope(locals_map, title="[i]locals")) + + renderables = [ + self._log_render( + self, + renderables, + log_time=self.get_datetime(), + path=path, + line_no=line_no, + link_path=link_path, + ) + ] + for hook in render_hooks: + renderables = hook.process_renderables(renderables) + new_segments: List[Segment] = [] + extend = new_segments.extend + render = self.render + render_options = self.options + for renderable in renderables: + extend(render(renderable, render_options)) + buffer_extend = self._buffer.extend + for line in Segment.split_and_crop_lines( + new_segments, self.width, pad=False + ): + buffer_extend(line) + + def _check_buffer(self) -> None: + """Check if the buffer may be rendered.""" + if self.quiet: + del self._buffer[:] + return + with self._lock: + if self._buffer_index == 0: + if self.is_jupyter: # pragma: no cover + from .jupyter import display + + display(self._buffer, self._render_buffer(self._buffer[:])) + del self._buffer[:] + else: + text = self._render_buffer(self._buffer[:]) + del self._buffer[:] + if text: + try: + if WINDOWS: # pragma: no cover + # https://bugs.python.org/issue37871 + write = self.file.write + for line in text.splitlines(True): + write(line) + else: + self.file.write(text) + self.file.flush() + except UnicodeEncodeError as error: + error.reason = f"{error.reason}\n*** You may need to add PYTHONIOENCODING=utf-8 to your environment ***" + raise + + def _render_buffer(self, buffer: Iterable[Segment]) -> str: + """Render buffered output, and clear buffer.""" + output: List[str] = [] + append = output.append + color_system = self._color_system + legacy_windows = self.legacy_windows + if self.record: + with self._record_buffer_lock: + self._record_buffer.extend(buffer) + not_terminal = not self.is_terminal + if self.no_color and color_system: + buffer = Segment.remove_color(buffer) + for text, style, control in buffer: + if style: + append( + style.render( + text, + color_system=color_system, + legacy_windows=legacy_windows, + ) + ) + elif not (not_terminal and control): + append(text) + + rendered = "".join(output) + return rendered + + def input( + self, + prompt: TextType = "", + *, + markup: bool = True, + emoji: bool = True, + password: bool = False, + stream: Optional[TextIO] = None, + ) -> str: + """Displays a prompt and waits for input from the user. The prompt may contain color / style. + + It works in the same way as Python's builtin :func:`input` function and provides elaborate line editing and history features if Python's builtin :mod:`readline` module is previously loaded. + + Args: + prompt (Union[str, Text]): Text to render in the prompt. + markup (bool, optional): Enable console markup (requires a str prompt). Defaults to True. + emoji (bool, optional): Enable emoji (requires a str prompt). Defaults to True. + password: (bool, optional): Hide typed text. Defaults to False. + stream: (TextIO, optional): Optional file to read input from (rather than stdin). Defaults to None. + + Returns: + str: Text read from stdin. + """ + prompt_str = "" + if prompt: + with self.capture() as capture: + self.print(prompt, markup=markup, emoji=emoji, end="") + prompt_str = capture.get() + if self.legacy_windows: + # Legacy windows doesn't like ANSI codes in getpass or input (colorama bug)? + self.file.write(prompt_str) + prompt_str = "" + if password: + result = getpass(prompt_str, stream=stream) + else: + if stream: + self.file.write(prompt_str) + result = stream.readline() + else: + result = input(prompt_str) + return result + + def export_text(self, *, clear: bool = True, styles: bool = False) -> str: + """Generate text from console contents (requires record=True argument in constructor). + + Args: + clear (bool, optional): Clear record buffer after exporting. Defaults to ``True``. + styles (bool, optional): If ``True``, ansi escape codes will be included. ``False`` for plain text. + Defaults to ``False``. + + Returns: + str: String containing console contents. + + """ + assert ( + self.record + ), "To export console contents set record=True in the constructor or instance" + + with self._record_buffer_lock: + if styles: + text = "".join( + (style.render(text) if style else text) + for text, style, _ in self._record_buffer + ) + else: + text = "".join( + segment.text + for segment in self._record_buffer + if not segment.control + ) + if clear: + del self._record_buffer[:] + return text + + def save_text(self, path: str, *, clear: bool = True, styles: bool = False) -> None: + """Generate text from console and save to a given location (requires record=True argument in constructor). + + Args: + path (str): Path to write text files. + clear (bool, optional): Clear record buffer after exporting. Defaults to ``True``. + styles (bool, optional): If ``True``, ansi style codes will be included. ``False`` for plain text. + Defaults to ``False``. + + """ + text = self.export_text(clear=clear, styles=styles) + with open(path, "wt", encoding="utf-8") as write_file: + write_file.write(text) + + def export_html( + self, + *, + theme: Optional[TerminalTheme] = None, + clear: bool = True, + code_format: Optional[str] = None, + inline_styles: bool = False, + ) -> str: + """Generate HTML from console contents (requires record=True argument in constructor). + + Args: + theme (TerminalTheme, optional): TerminalTheme object containing console colors. + clear (bool, optional): Clear record buffer after exporting. Defaults to ``True``. + code_format (str, optional): Format string to render HTML, should contain {foreground} + {background} and {code}. + inline_styles (bool, optional): If ``True`` styles will be inlined in to spans, which makes files + larger but easier to cut and paste markup. If ``False``, styles will be embedded in a style tag. + Defaults to False. + + Returns: + str: String containing console contents as HTML. + """ + assert ( + self.record + ), "To export console contents set record=True in the constructor or instance" + fragments: List[str] = [] + append = fragments.append + _theme = theme or DEFAULT_TERMINAL_THEME + stylesheet = "" + + render_code_format = CONSOLE_HTML_FORMAT if code_format is None else code_format + + with self._record_buffer_lock: + if inline_styles: + for text, style, _ in Segment.filter_control( + Segment.simplify(self._record_buffer) + ): + text = escape(text) + if style: + rule = style.get_html_style(_theme) + if style.link: + text = f'
{text}' + text = f'{text}' if rule else text + append(text) + else: + styles: Dict[str, int] = {} + for text, style, _ in Segment.filter_control( + Segment.simplify(self._record_buffer) + ): + text = escape(text) + if style: + rule = style.get_html_style(_theme) + style_number = styles.setdefault(rule, len(styles) + 1) + if style.link: + text = f'{text}' + else: + text = f'{text}' + append(text) + stylesheet_rules: List[str] = [] + stylesheet_append = stylesheet_rules.append + for style_rule, style_number in styles.items(): + if style_rule: + stylesheet_append(f".r{style_number} {{{style_rule}}}") + stylesheet = "\n".join(stylesheet_rules) + + rendered_code = render_code_format.format( + code="".join(fragments), + stylesheet=stylesheet, + foreground=_theme.foreground_color.hex, + background=_theme.background_color.hex, + ) + if clear: + del self._record_buffer[:] + return rendered_code + + def save_html( + self, + path: str, + *, + theme: Optional[TerminalTheme] = None, + clear: bool = True, + code_format: str = CONSOLE_HTML_FORMAT, + inline_styles: bool = False, + ) -> None: + """Generate HTML from console contents and write to a file (requires record=True argument in constructor). + + Args: + path (str): Path to write html file. + theme (TerminalTheme, optional): TerminalTheme object containing console colors. + clear (bool, optional): Clear record buffer after exporting. Defaults to ``True``. + code_format (str, optional): Format string to render HTML, should contain {foreground} + {background} and {code}. + inline_styles (bool, optional): If ``True`` styles will be inlined in to spans, which makes files + larger but easier to cut and paste markup. If ``False``, styles will be embedded in a style tag. + Defaults to False. + + """ + html = self.export_html( + theme=theme, + clear=clear, + code_format=code_format, + inline_styles=inline_styles, + ) + with open(path, "wt", encoding="utf-8") as write_file: + write_file.write(html) + + +if __name__ == "__main__": # pragma: no cover + console = Console() + + console.log( + "JSONRPC [i]request[/i]", + 5, + 1.3, + True, + False, + None, + { + "jsonrpc": "2.0", + "method": "subtract", + "params": {"minuend": 42, "subtrahend": 23}, + "id": 3, + }, + ) + + console.log("Hello, World!", "{'a': 1}", repr(console)) + + console.print( + { + "name": None, + "empty": [], + "quiz": { + "sport": { + "answered": True, + "q1": { + "question": "Which one is correct team name in NBA?", + "options": [ + "New York Bulls", + "Los Angeles Kings", + "Golden State Warriors", + "Huston Rocket", + ], + "answer": "Huston Rocket", + }, + }, + "maths": { + "answered": False, + "q1": { + "question": "5 + 7 = ?", + "options": [10, 11, 12, 13], + "answer": 12, + }, + "q2": { + "question": "12 - 8 = ?", + "options": [1, 2, 3, 4], + "answer": 4, + }, + }, + }, + } + ) + console.log("foo") diff --git a/pipenv/patched/notpip/_vendor/rich/constrain.py b/pipenv/patched/notpip/_vendor/rich/constrain.py new file mode 100644 index 0000000000..65fdf56342 --- /dev/null +++ b/pipenv/patched/notpip/_vendor/rich/constrain.py @@ -0,0 +1,37 @@ +from typing import Optional, TYPE_CHECKING + +from .jupyter import JupyterMixin +from .measure import Measurement + +if TYPE_CHECKING: + from .console import Console, ConsoleOptions, RenderableType, RenderResult + + +class Constrain(JupyterMixin): + """Constrain the width of a renderable to a given number of characters. + + Args: + renderable (RenderableType): A renderable object. + width (int, optional): The maximum width (in characters) to render. Defaults to 80. + """ + + def __init__(self, renderable: "RenderableType", width: Optional[int] = 80) -> None: + self.renderable = renderable + self.width = width + + def __rich_console__( + self, console: "Console", options: "ConsoleOptions" + ) -> "RenderResult": + if self.width is None: + yield self.renderable + else: + child_options = options.update_width(min(self.width, options.max_width)) + yield from console.render(self.renderable, child_options) + + def __rich_measure__( + self, console: "Console", options: "ConsoleOptions" + ) -> "Measurement": + if self.width is not None: + options = options.update_width(self.width) + measurement = Measurement.get(console, options, self.renderable) + return measurement diff --git a/pipenv/patched/notpip/_vendor/rich/containers.py b/pipenv/patched/notpip/_vendor/rich/containers.py new file mode 100644 index 0000000000..e29cf36899 --- /dev/null +++ b/pipenv/patched/notpip/_vendor/rich/containers.py @@ -0,0 +1,167 @@ +from itertools import zip_longest +from typing import ( + Iterator, + Iterable, + List, + Optional, + Union, + overload, + TypeVar, + TYPE_CHECKING, +) + +if TYPE_CHECKING: + from .console import ( + Console, + ConsoleOptions, + JustifyMethod, + OverflowMethod, + RenderResult, + RenderableType, + ) + from .text import Text + +from .cells import cell_len +from .measure import Measurement + +T = TypeVar("T") + + +class Renderables: + """A list subclass which renders its contents to the console.""" + + def __init__( + self, renderables: Optional[Iterable["RenderableType"]] = None + ) -> None: + self._renderables: List["RenderableType"] = ( + list(renderables) if renderables is not None else [] + ) + + def __rich_console__( + self, console: "Console", options: "ConsoleOptions" + ) -> "RenderResult": + """Console render method to insert line-breaks.""" + yield from self._renderables + + def __rich_measure__( + self, console: "Console", options: "ConsoleOptions" + ) -> "Measurement": + dimensions = [ + Measurement.get(console, options, renderable) + for renderable in self._renderables + ] + if not dimensions: + return Measurement(1, 1) + _min = max(dimension.minimum for dimension in dimensions) + _max = max(dimension.maximum for dimension in dimensions) + return Measurement(_min, _max) + + def append(self, renderable: "RenderableType") -> None: + self._renderables.append(renderable) + + def __iter__(self) -> Iterable["RenderableType"]: + return iter(self._renderables) + + +class Lines: + """A list subclass which can render to the console.""" + + def __init__(self, lines: Iterable["Text"] = ()) -> None: + self._lines: List["Text"] = list(lines) + + def __repr__(self) -> str: + return f"Lines({self._lines!r})" + + def __iter__(self) -> Iterator["Text"]: + return iter(self._lines) + + @overload + def __getitem__(self, index: int) -> "Text": + ... + + @overload + def __getitem__(self, index: slice) -> List["Text"]: + ... + + def __getitem__(self, index: Union[slice, int]) -> Union["Text", List["Text"]]: + return self._lines[index] + + def __setitem__(self, index: int, value: "Text") -> "Lines": + self._lines[index] = value + return self + + def __len__(self) -> int: + return self._lines.__len__() + + def __rich_console__( + self, console: "Console", options: "ConsoleOptions" + ) -> "RenderResult": + """Console render method to insert line-breaks.""" + yield from self._lines + + def append(self, line: "Text") -> None: + self._lines.append(line) + + def extend(self, lines: Iterable["Text"]) -> None: + self._lines.extend(lines) + + def pop(self, index: int = -1) -> "Text": + return self._lines.pop(index) + + def justify( + self, + console: "Console", + width: int, + justify: "JustifyMethod" = "left", + overflow: "OverflowMethod" = "fold", + ) -> None: + """Justify and overflow text to a given width. + + Args: + console (Console): Console instance. + width (int): Number of characters per line. + justify (str, optional): Default justify method for text: "left", "center", "full" or "right". Defaults to "left". + overflow (str, optional): Default overflow for text: "crop", "fold", or "ellipsis". Defaults to "fold". + + """ + from .text import Text + + if justify == "left": + for line in self._lines: + line.truncate(width, overflow=overflow, pad=True) + elif justify == "center": + for line in self._lines: + line.rstrip() + line.truncate(width, overflow=overflow) + line.pad_left((width - cell_len(line.plain)) // 2) + line.pad_right(width - cell_len(line.plain)) + elif justify == "right": + for line in self._lines: + line.rstrip() + line.truncate(width, overflow=overflow) + line.pad_left(width - cell_len(line.plain)) + elif justify == "full": + for line_index, line in enumerate(self._lines): + if line_index == len(self._lines) - 1: + break + words = line.split(" ") + words_size = sum(cell_len(word.plain) for word in words) + num_spaces = len(words) - 1 + spaces = [1 for _ in range(num_spaces)] + index = 0 + if spaces: + while words_size + num_spaces < width: + spaces[len(spaces) - index - 1] += 1 + num_spaces += 1 + index = (index + 1) % len(spaces) + tokens: List[Text] = [] + for index, (word, next_word) in enumerate( + zip_longest(words, words[1:]) + ): + tokens.append(word) + if index < len(spaces): + style = word.get_style_at_offset(console, -1) + next_style = next_word.get_style_at_offset(console, 0) + space_style = style if style == next_style else line.style + tokens.append(Text(" " * spaces[index], style=space_style)) + self[line_index] = Text("").join(tokens) diff --git a/pipenv/patched/notpip/_vendor/rich/control.py b/pipenv/patched/notpip/_vendor/rich/control.py new file mode 100644 index 0000000000..c98d0d7d98 --- /dev/null +++ b/pipenv/patched/notpip/_vendor/rich/control.py @@ -0,0 +1,175 @@ +from typing import Any, Callable, Dict, Iterable, List, TYPE_CHECKING, Union + +from .segment import ControlCode, ControlType, Segment + +if TYPE_CHECKING: + from .console import Console, ConsoleOptions, RenderResult + +STRIP_CONTROL_CODES = [ + 8, # Backspace + 11, # Vertical tab + 12, # Form feed + 13, # Carriage return +] +_CONTROL_TRANSLATE = {_codepoint: None for _codepoint in STRIP_CONTROL_CODES} + + +CONTROL_CODES_FORMAT: Dict[int, Callable[..., str]] = { + ControlType.BELL: lambda: "\x07", + ControlType.CARRIAGE_RETURN: lambda: "\r", + ControlType.HOME: lambda: "\x1b[H", + ControlType.CLEAR: lambda: "\x1b[2J", + ControlType.ENABLE_ALT_SCREEN: lambda: "\x1b[?1049h", + ControlType.DISABLE_ALT_SCREEN: lambda: "\x1b[?1049l", + ControlType.SHOW_CURSOR: lambda: "\x1b[?25h", + ControlType.HIDE_CURSOR: lambda: "\x1b[?25l", + ControlType.CURSOR_UP: lambda param: f"\x1b[{param}A", + ControlType.CURSOR_DOWN: lambda param: f"\x1b[{param}B", + ControlType.CURSOR_FORWARD: lambda param: f"\x1b[{param}C", + ControlType.CURSOR_BACKWARD: lambda param: f"\x1b[{param}D", + ControlType.CURSOR_MOVE_TO_COLUMN: lambda param: f"\x1b[{param+1}G", + ControlType.ERASE_IN_LINE: lambda param: f"\x1b[{param}K", + ControlType.CURSOR_MOVE_TO: lambda x, y: f"\x1b[{y+1};{x+1}H", +} + + +class Control: + """A renderable that inserts a control code (non printable but may move cursor). + + Args: + *codes (str): Positional arguments are either a :class:`~rich.segment.ControlType` enum or a + tuple of ControlType and an integer parameter + """ + + __slots__ = ["segment"] + + def __init__(self, *codes: Union[ControlType, ControlCode]) -> None: + control_codes: List[ControlCode] = [ + (code,) if isinstance(code, ControlType) else code for code in codes + ] + _format_map = CONTROL_CODES_FORMAT + rendered_codes = "".join( + _format_map[code](*parameters) for code, *parameters in control_codes + ) + self.segment = Segment(rendered_codes, None, control_codes) + + @classmethod + def bell(cls) -> "Control": + """Ring the 'bell'.""" + return cls(ControlType.BELL) + + @classmethod + def home(cls) -> "Control": + """Move cursor to 'home' position.""" + return cls(ControlType.HOME) + + @classmethod + def move(cls, x: int = 0, y: int = 0) -> "Control": + """Move cursor relative to current position. + + Args: + x (int): X offset. + y (int): Y offset. + + Returns: + ~Control: Control object. + + """ + + def get_codes() -> Iterable[ControlCode]: + control = ControlType + if x: + yield ( + control.CURSOR_FORWARD if x > 0 else control.CURSOR_BACKWARD, + abs(x), + ) + if y: + yield ( + control.CURSOR_DOWN if y > 0 else control.CURSOR_UP, + abs(y), + ) + + control = cls(*get_codes()) + return control + + @classmethod + def move_to_column(cls, x: int, y: int = 0) -> "Control": + """Move to the given column, optionally add offset to row. + + Returns: + x (int): absolute x (column) + y (int): optional y offset (row) + + Returns: + ~Control: Control object. + """ + + return ( + cls( + (ControlType.CURSOR_MOVE_TO_COLUMN, x), + ( + ControlType.CURSOR_DOWN if y > 0 else ControlType.CURSOR_UP, + abs(y), + ), + ) + if y + else cls((ControlType.CURSOR_MOVE_TO_COLUMN, x)) + ) + + @classmethod + def move_to(cls, x: int, y: int) -> "Control": + """Move cursor to absolute position. + + Args: + x (int): x offset (column) + y (int): y offset (row) + + Returns: + ~Control: Control object. + """ + return cls((ControlType.CURSOR_MOVE_TO, x, y)) + + @classmethod + def clear(cls) -> "Control": + """Clear the screen.""" + return cls(ControlType.CLEAR) + + @classmethod + def show_cursor(cls, show: bool) -> "Control": + """Show or hide the cursor.""" + return cls(ControlType.SHOW_CURSOR if show else ControlType.HIDE_CURSOR) + + @classmethod + def alt_screen(cls, enable: bool) -> "Control": + """Enable or disable alt screen.""" + if enable: + return cls(ControlType.ENABLE_ALT_SCREEN, ControlType.HOME) + else: + return cls(ControlType.DISABLE_ALT_SCREEN) + + def __str__(self) -> str: + return self.segment.text + + def __rich_console__( + self, console: "Console", options: "ConsoleOptions" + ) -> "RenderResult": + if self.segment.text: + yield self.segment + + +def strip_control_codes( + text: str, _translate_table: Dict[int, None] = _CONTROL_TRANSLATE +) -> str: + """Remove control codes from text. + + Args: + text (str): A string possibly contain control codes. + + Returns: + str: String with control codes removed. + """ + return text.translate(_translate_table) + + +if __name__ == "__main__": # pragma: no cover + print(strip_control_codes("hello\rWorld")) diff --git a/pipenv/patched/notpip/_vendor/rich/default_styles.py b/pipenv/patched/notpip/_vendor/rich/default_styles.py new file mode 100644 index 0000000000..1f930bba01 --- /dev/null +++ b/pipenv/patched/notpip/_vendor/rich/default_styles.py @@ -0,0 +1,183 @@ +from typing import Dict + +from .style import Style + + +DEFAULT_STYLES: Dict[str, Style] = { + "none": Style.null(), + "reset": Style( + color="default", + bgcolor="default", + dim=False, + bold=False, + italic=False, + underline=False, + blink=False, + blink2=False, + reverse=False, + conceal=False, + strike=False, + ), + "dim": Style(dim=True), + "bright": Style(dim=False), + "bold": Style(bold=True), + "strong": Style(bold=True), + "code": Style(reverse=True, bold=True), + "italic": Style(italic=True), + "emphasize": Style(italic=True), + "underline": Style(underline=True), + "blink": Style(blink=True), + "blink2": Style(blink2=True), + "reverse": Style(reverse=True), + "strike": Style(strike=True), + "black": Style(color="black"), + "red": Style(color="red"), + "green": Style(color="green"), + "yellow": Style(color="yellow"), + "magenta": Style(color="magenta"), + "cyan": Style(color="cyan"), + "white": Style(color="white"), + "inspect.attr": Style(color="yellow", italic=True), + "inspect.attr.dunder": Style(color="yellow", italic=True, dim=True), + "inspect.callable": Style(bold=True, color="red"), + "inspect.def": Style(italic=True, color="bright_cyan"), + "inspect.error": Style(bold=True, color="red"), + "inspect.equals": Style(), + "inspect.help": Style(color="cyan"), + "inspect.doc": Style(dim=True), + "inspect.value.border": Style(color="green"), + "live.ellipsis": Style(bold=True, color="red"), + "layout.tree.row": Style(dim=False, color="red"), + "layout.tree.column": Style(dim=False, color="blue"), + "logging.keyword": Style(bold=True, color="yellow"), + "logging.level.notset": Style(dim=True), + "logging.level.debug": Style(color="green"), + "logging.level.info": Style(color="blue"), + "logging.level.warning": Style(color="red"), + "logging.level.error": Style(color="red", bold=True), + "logging.level.critical": Style(color="red", bold=True, reverse=True), + "log.level": Style.null(), + "log.time": Style(color="cyan", dim=True), + "log.message": Style.null(), + "log.path": Style(dim=True), + "repr.ellipsis": Style(color="yellow"), + "repr.indent": Style(color="green", dim=True), + "repr.error": Style(color="red", bold=True), + "repr.str": Style(color="green", italic=False, bold=False), + "repr.brace": Style(bold=True), + "repr.comma": Style(bold=True), + "repr.ipv4": Style(bold=True, color="bright_green"), + "repr.ipv6": Style(bold=True, color="bright_green"), + "repr.eui48": Style(bold=True, color="bright_green"), + "repr.eui64": Style(bold=True, color="bright_green"), + "repr.tag_start": Style(bold=True), + "repr.tag_name": Style(color="bright_magenta", bold=True), + "repr.tag_contents": Style(color="default"), + "repr.tag_end": Style(bold=True), + "repr.attrib_name": Style(color="yellow", italic=False), + "repr.attrib_equal": Style(bold=True), + "repr.attrib_value": Style(color="magenta", italic=False), + "repr.number": Style(color="cyan", bold=True, italic=False), + "repr.bool_true": Style(color="bright_green", italic=True), + "repr.bool_false": Style(color="bright_red", italic=True), + "repr.none": Style(color="magenta", italic=True), + "repr.url": Style(underline=True, color="bright_blue", italic=False, bold=False), + "repr.uuid": Style(color="bright_yellow", bold=False), + "repr.call": Style(color="magenta", bold=True), + "repr.path": Style(color="magenta"), + "repr.filename": Style(color="bright_magenta"), + "rule.line": Style(color="bright_green"), + "rule.text": Style.null(), + "json.brace": Style(bold=True), + "json.bool_true": Style(color="bright_green", italic=True), + "json.bool_false": Style(color="bright_red", italic=True), + "json.null": Style(color="magenta", italic=True), + "json.number": Style(color="cyan", bold=True, italic=False), + "json.str": Style(color="green", italic=False, bold=False), + "json.key": Style(color="blue", bold=True), + "prompt": Style.null(), + "prompt.choices": Style(color="magenta", bold=True), + "prompt.default": Style(color="cyan", bold=True), + "prompt.invalid": Style(color="red"), + "prompt.invalid.choice": Style(color="red"), + "pretty": Style.null(), + "scope.border": Style(color="blue"), + "scope.key": Style(color="yellow", italic=True), + "scope.key.special": Style(color="yellow", italic=True, dim=True), + "scope.equals": Style(color="red"), + "table.header": Style(bold=True), + "table.footer": Style(bold=True), + "table.cell": Style.null(), + "table.title": Style(italic=True), + "table.caption": Style(italic=True, dim=True), + "traceback.error": Style(color="red", italic=True), + "traceback.border.syntax_error": Style(color="bright_red"), + "traceback.border": Style(color="red"), + "traceback.text": Style.null(), + "traceback.title": Style(color="red", bold=True), + "traceback.exc_type": Style(color="bright_red", bold=True), + "traceback.exc_value": Style.null(), + "traceback.offset": Style(color="bright_red", bold=True), + "bar.back": Style(color="grey23"), + "bar.complete": Style(color="rgb(249,38,114)"), + "bar.finished": Style(color="rgb(114,156,31)"), + "bar.pulse": Style(color="rgb(249,38,114)"), + "progress.description": Style.null(), + "progress.filesize": Style(color="green"), + "progress.filesize.total": Style(color="green"), + "progress.download": Style(color="green"), + "progress.elapsed": Style(color="yellow"), + "progress.percentage": Style(color="magenta"), + "progress.remaining": Style(color="cyan"), + "progress.data.speed": Style(color="red"), + "progress.spinner": Style(color="green"), + "status.spinner": Style(color="green"), + "tree": Style(), + "tree.line": Style(), + "markdown.paragraph": Style(), + "markdown.text": Style(), + "markdown.emph": Style(italic=True), + "markdown.strong": Style(bold=True), + "markdown.code": Style(bgcolor="black", color="bright_white"), + "markdown.code_block": Style(dim=True, color="cyan", bgcolor="black"), + "markdown.block_quote": Style(color="magenta"), + "markdown.list": Style(color="cyan"), + "markdown.item": Style(), + "markdown.item.bullet": Style(color="yellow", bold=True), + "markdown.item.number": Style(color="yellow", bold=True), + "markdown.hr": Style(color="yellow"), + "markdown.h1.border": Style(), + "markdown.h1": Style(bold=True), + "markdown.h2": Style(bold=True, underline=True), + "markdown.h3": Style(bold=True), + "markdown.h4": Style(bold=True, dim=True), + "markdown.h5": Style(underline=True), + "markdown.h6": Style(italic=True), + "markdown.h7": Style(italic=True, dim=True), + "markdown.link": Style(color="bright_blue"), + "markdown.link_url": Style(color="blue"), +} + + +if __name__ == "__main__": # pragma: no cover + import argparse + import io + + from pipenv.patched.notpip._vendor.rich.console import Console + from pipenv.patched.notpip._vendor.rich.table import Table + from pipenv.patched.notpip._vendor.rich.text import Text + + parser = argparse.ArgumentParser() + parser.add_argument("--html", action="store_true", help="Export as HTML table") + args = parser.parse_args() + html: bool = args.html + console = Console(record=True, width=70, file=io.StringIO()) if html else Console() + + table = Table("Name", "Styling") + + for style_name, style in DEFAULT_STYLES.items(): + table.add_row(Text(style_name, style=style), str(style)) + + console.print(table) + if html: + print(console.export_html(inline_styles=True)) diff --git a/pipenv/patched/notpip/_vendor/rich/diagnose.py b/pipenv/patched/notpip/_vendor/rich/diagnose.py new file mode 100644 index 0000000000..7175a7d0a4 --- /dev/null +++ b/pipenv/patched/notpip/_vendor/rich/diagnose.py @@ -0,0 +1,6 @@ +if __name__ == "__main__": # pragma: no cover + from pipenv.patched.notpip._vendor.rich.console import Console + from pipenv.patched.notpip._vendor.rich import inspect + + console = Console() + inspect(console) diff --git a/pipenv/patched/notpip/_vendor/rich/emoji.py b/pipenv/patched/notpip/_vendor/rich/emoji.py new file mode 100644 index 0000000000..7e1c8ffa1d --- /dev/null +++ b/pipenv/patched/notpip/_vendor/rich/emoji.py @@ -0,0 +1,96 @@ +import sys +from typing import TYPE_CHECKING, Optional, Union + +from .jupyter import JupyterMixin +from .segment import Segment +from .style import Style +from ._emoji_codes import EMOJI +from ._emoji_replace import _emoji_replace + +if sys.version_info >= (3, 8): + from typing import Literal +else: + from pipenv.patched.notpip._vendor.typing_extensions import Literal # pragma: no cover + + +if TYPE_CHECKING: + from .console import Console, ConsoleOptions, RenderResult + + +EmojiVariant = Literal["emoji", "text"] + + +class NoEmoji(Exception): + """No emoji by that name.""" + + +class Emoji(JupyterMixin): + __slots__ = ["name", "style", "_char", "variant"] + + VARIANTS = {"text": "\uFE0E", "emoji": "\uFE0F"} + + def __init__( + self, + name: str, + style: Union[str, Style] = "none", + variant: Optional[EmojiVariant] = None, + ) -> None: + """A single emoji character. + + Args: + name (str): Name of emoji. + style (Union[str, Style], optional): Optional style. Defaults to None. + + Raises: + NoEmoji: If the emoji doesn't exist. + """ + self.name = name + self.style = style + self.variant = variant + try: + self._char = EMOJI[name] + except KeyError: + raise NoEmoji(f"No emoji called {name!r}") + if variant is not None: + self._char += self.VARIANTS.get(variant, "") + + @classmethod + def replace(cls, text: str) -> str: + """Replace emoji markup with corresponding unicode characters. + + Args: + text (str): A string with emojis codes, e.g. "Hello :smiley:!" + + Returns: + str: A string with emoji codes replaces with actual emoji. + """ + return _emoji_replace(text) + + def __repr__(self) -> str: + return f"" + + def __str__(self) -> str: + return self._char + + def __rich_console__( + self, console: "Console", options: "ConsoleOptions" + ) -> "RenderResult": + yield Segment(self._char, console.get_style(self.style)) + + +if __name__ == "__main__": # pragma: no cover + import sys + + from pipenv.patched.notpip._vendor.rich.columns import Columns + from pipenv.patched.notpip._vendor.rich.console import Console + + console = Console(record=True) + + columns = Columns( + (f":{name}: {name}" for name in sorted(EMOJI.keys()) if "\u200D" not in name), + column_first=True, + ) + + console.print(columns) + if len(sys.argv) > 1: + console.save_html(sys.argv[1]) diff --git a/pipenv/patched/notpip/_vendor/rich/errors.py b/pipenv/patched/notpip/_vendor/rich/errors.py new file mode 100644 index 0000000000..0bcbe53ef5 --- /dev/null +++ b/pipenv/patched/notpip/_vendor/rich/errors.py @@ -0,0 +1,34 @@ +class ConsoleError(Exception): + """An error in console operation.""" + + +class StyleError(Exception): + """An error in styles.""" + + +class StyleSyntaxError(ConsoleError): + """Style was badly formatted.""" + + +class MissingStyle(StyleError): + """No such style.""" + + +class StyleStackError(ConsoleError): + """Style stack is invalid.""" + + +class NotRenderableError(ConsoleError): + """Object is not renderable.""" + + +class MarkupError(ConsoleError): + """Markup was badly formatted.""" + + +class LiveError(ConsoleError): + """Error related to Live display.""" + + +class NoAltScreen(ConsoleError): + """Alt screen mode was required.""" diff --git a/pipenv/patched/notpip/_vendor/rich/file_proxy.py b/pipenv/patched/notpip/_vendor/rich/file_proxy.py new file mode 100644 index 0000000000..3ec593a5a4 --- /dev/null +++ b/pipenv/patched/notpip/_vendor/rich/file_proxy.py @@ -0,0 +1,54 @@ +import io +from typing import List, Any, IO, TYPE_CHECKING + +from .ansi import AnsiDecoder +from .text import Text + +if TYPE_CHECKING: + from .console import Console + + +class FileProxy(io.TextIOBase): + """Wraps a file (e.g. sys.stdout) and redirects writes to a console.""" + + def __init__(self, console: "Console", file: IO[str]) -> None: + self.__console = console + self.__file = file + self.__buffer: List[str] = [] + self.__ansi_decoder = AnsiDecoder() + + @property + def rich_proxied_file(self) -> IO[str]: + """Get proxied file.""" + return self.__file + + def __getattr__(self, name: str) -> Any: + return getattr(self.__file, name) + + def write(self, text: str) -> int: + if not isinstance(text, str): + raise TypeError(f"write() argument must be str, not {type(text).__name__}") + buffer = self.__buffer + lines: List[str] = [] + while text: + line, new_line, text = text.partition("\n") + if new_line: + lines.append("".join(buffer) + line) + del buffer[:] + else: + buffer.append(line) + break + if lines: + console = self.__console + with console: + output = Text("\n").join( + self.__ansi_decoder.decode_line(line) for line in lines + ) + console.print(output) + return len(text) + + def flush(self) -> None: + buffer = self.__buffer + if buffer: + self.__console.print("".join(buffer)) + del buffer[:] diff --git a/pipenv/patched/notpip/_vendor/rich/filesize.py b/pipenv/patched/notpip/_vendor/rich/filesize.py new file mode 100644 index 0000000000..b3a0996b05 --- /dev/null +++ b/pipenv/patched/notpip/_vendor/rich/filesize.py @@ -0,0 +1,89 @@ +# coding: utf-8 +"""Functions for reporting filesizes. Borrowed from https://github.com/PyFilesystem/pyfilesystem2 + +The functions declared in this module should cover the different +usecases needed to generate a string representation of a file size +using several different units. Since there are many standards regarding +file size units, three different functions have been implemented. + +See Also: + * `Wikipedia: Binary prefix `_ + +""" + +__all__ = ["decimal"] + +from typing import Iterable, List, Tuple, Optional + + +def _to_str( + size: int, + suffixes: Iterable[str], + base: int, + *, + precision: Optional[int] = 1, + separator: Optional[str] = " ", +) -> str: + if size == 1: + return "1 byte" + elif size < base: + return "{:,} bytes".format(size) + + for i, suffix in enumerate(suffixes, 2): # noqa: B007 + unit = base ** i + if size < unit: + break + return "{:,.{precision}f}{separator}{}".format( + (base * size / unit), + suffix, + precision=precision, + separator=separator, + ) + + +def pick_unit_and_suffix(size: int, suffixes: List[str], base: int) -> Tuple[int, str]: + """Pick a suffix and base for the given size.""" + for i, suffix in enumerate(suffixes): + unit = base ** i + if size < unit * base: + break + return unit, suffix + + +def decimal( + size: int, + *, + precision: Optional[int] = 1, + separator: Optional[str] = " ", +) -> str: + """Convert a filesize in to a string (powers of 1000, SI prefixes). + + In this convention, ``1000 B = 1 kB``. + + This is typically the format used to advertise the storage + capacity of USB flash drives and the like (*256 MB* meaning + actually a storage capacity of more than *256 000 000 B*), + or used by **Mac OS X** since v10.6 to report file sizes. + + Arguments: + int (size): A file size. + int (precision): The number of decimal places to include (default = 1). + str (separator): The string to separate the value from the units (default = " "). + + Returns: + `str`: A string containing a abbreviated file size and units. + + Example: + >>> filesize.decimal(30000) + '30.0 kB' + >>> filesize.decimal(30000, precision=2, separator="") + '30.00kB' + + """ + return _to_str( + size, + ("kB", "MB", "GB", "TB", "PB", "EB", "ZB", "YB"), + 1000, + precision=precision, + separator=separator, + ) diff --git a/pipenv/patched/notpip/_vendor/rich/highlighter.py b/pipenv/patched/notpip/_vendor/rich/highlighter.py new file mode 100644 index 0000000000..8afdd017b6 --- /dev/null +++ b/pipenv/patched/notpip/_vendor/rich/highlighter.py @@ -0,0 +1,147 @@ +from abc import ABC, abstractmethod +from typing import List, Union + +from .text import Text + + +def _combine_regex(*regexes: str) -> str: + """Combine a number of regexes in to a single regex. + + Returns: + str: New regex with all regexes ORed together. + """ + return "|".join(regexes) + + +class Highlighter(ABC): + """Abstract base class for highlighters.""" + + def __call__(self, text: Union[str, Text]) -> Text: + """Highlight a str or Text instance. + + Args: + text (Union[str, ~Text]): Text to highlight. + + Raises: + TypeError: If not called with text or str. + + Returns: + Text: A test instance with highlighting applied. + """ + if isinstance(text, str): + highlight_text = Text(text) + elif isinstance(text, Text): + highlight_text = text.copy() + else: + raise TypeError(f"str or Text instance required, not {text!r}") + self.highlight(highlight_text) + return highlight_text + + @abstractmethod + def highlight(self, text: Text) -> None: + """Apply highlighting in place to text. + + Args: + text (~Text): A text object highlight. + """ + + +class NullHighlighter(Highlighter): + """A highlighter object that doesn't highlight. + + May be used to disable highlighting entirely. + + """ + + def highlight(self, text: Text) -> None: + """Nothing to do""" + + +class RegexHighlighter(Highlighter): + """Applies highlighting from a list of regular expressions.""" + + highlights: List[str] = [] + base_style: str = "" + + def highlight(self, text: Text) -> None: + """Highlight :class:`rich.text.Text` using regular expressions. + + Args: + text (~Text): Text to highlighted. + + """ + + highlight_regex = text.highlight_regex + for re_highlight in self.highlights: + highlight_regex(re_highlight, style_prefix=self.base_style) + + +class ReprHighlighter(RegexHighlighter): + """Highlights the text typically produced from ``__repr__`` methods.""" + + base_style = "repr." + highlights = [ + r"(?P\<)(?P[\w\-\.\:]*)(?P[\w\W]*?)(?P\>)", + r"(?P[\w_]{1,50})=(?P\"?[\w_]+\"?)?", + r"(?P[\{\[\(\)\]\}])", + _combine_regex( + r"(?P[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3})", + r"(?P([A-Fa-f0-9]{1,4}::?){1,7}[A-Fa-f0-9]{1,4})", + r"(?P(?:[0-9A-Fa-f]{1,2}-){7}[0-9A-Fa-f]{1,2}|(?:[0-9A-Fa-f]{1,2}:){7}[0-9A-Fa-f]{1,2}|(?:[0-9A-Fa-f]{4}\.){3}[0-9A-Fa-f]{4})", + r"(?P(?:[0-9A-Fa-f]{1,2}-){5}[0-9A-Fa-f]{1,2}|(?:[0-9A-Fa-f]{1,2}:){5}[0-9A-Fa-f]{1,2}|(?:[0-9A-Fa-f]{4}\.){2}[0-9A-Fa-f]{4})", + r"(?P[\w\.]*?)\(", + r"\b(?PTrue)\b|\b(?PFalse)\b|\b(?PNone)\b", + r"(?P\.\.\.)", + r"(?P(?\B(\/[\w\.\-\_\+]+)*\/)(?P[\w\.\-\_\+]*)?", + r"(?b?\'\'\'.*?(?[a-fA-F0-9]{8}\-[a-fA-F0-9]{4}\-[a-fA-F0-9]{4}\-[a-fA-F0-9]{4}\-[a-fA-F0-9]{12})", + r"(?P(file|https|http|ws|wss):\/\/[0-9a-zA-Z\$\-\_\+\!`\(\)\,\.\?\/\;\:\&\=\%\#]*)", + ), + ] + + +class JSONHighlighter(RegexHighlighter): + """Highlights JSON""" + + base_style = "json." + highlights = [ + _combine_regex( + r"(?P[\{\[\(\)\]\}])", + r"\b(?Ptrue)\b|\b(?Pfalse)\b|\b(?Pnull)\b", + r"(?P(?b?\".*?(?b?\".*?(? None: + data = loads(json) + json = dumps( + data, + indent=indent, + skipkeys=skip_keys, + ensure_ascii=ensure_ascii, + check_circular=check_circular, + allow_nan=allow_nan, + default=default, + sort_keys=sort_keys, + ) + highlighter = JSONHighlighter() if highlight else NullHighlighter() + self.text = highlighter(json) + self.text.no_wrap = True + self.text.overflow = None + + @classmethod + def from_data( + cls, + data: Any, + indent: Union[None, int, str] = 2, + highlight: bool = True, + skip_keys: bool = False, + ensure_ascii: bool = True, + check_circular: bool = True, + allow_nan: bool = True, + default: Optional[Callable[[Any], Any]] = None, + sort_keys: bool = False, + ) -> "JSON": + """Encodes a JSON object from arbitrary data. + + Args: + data (Any): An object that may be encoded in to JSON + indent (Union[None, int, str], optional): Number of characters to indent by. Defaults to 2. + highlight (bool, optional): Enable highlighting. Defaults to True. + default (Callable, optional): Optional callable which will be called for objects that cannot be serialized. Defaults to None. + skip_keys (bool, optional): Skip keys not of a basic type. Defaults to False. + ensure_ascii (bool, optional): Escape all non-ascii characters. Defaults to False. + check_circular (bool, optional): Check for circular references. Defaults to True. + allow_nan (bool, optional): Allow NaN and Infinity values. Defaults to True. + default (Callable, optional): A callable that converts values that can not be encoded + in to something that can be JSON encoded. Defaults to None. + sort_keys (bool, optional): Sort dictionary keys. Defaults to False. + + Returns: + JSON: New JSON object from the given data. + """ + json_instance: "JSON" = cls.__new__(cls) + json = dumps( + data, + indent=indent, + skipkeys=skip_keys, + ensure_ascii=ensure_ascii, + check_circular=check_circular, + allow_nan=allow_nan, + default=default, + sort_keys=sort_keys, + ) + highlighter = JSONHighlighter() if highlight else NullHighlighter() + json_instance.text = highlighter(json) + json_instance.text.no_wrap = True + json_instance.text.overflow = None + return json_instance + + def __rich__(self) -> Text: + return self.text + + +if __name__ == "__main__": + + import argparse + import sys + + parser = argparse.ArgumentParser(description="Pretty print json") + parser.add_argument( + "path", + metavar="PATH", + help="path to file, or - for stdin", + ) + parser.add_argument( + "-i", + "--indent", + metavar="SPACES", + type=int, + help="Number of spaces in an indent", + default=2, + ) + args = parser.parse_args() + + from pipenv.patched.notpip._vendor.rich.console import Console + + console = Console() + error_console = Console(stderr=True) + + try: + if args.path == "-": + json_data = sys.stdin.read() + else: + with open(args.path, "rt") as json_file: + json_data = json_file.read() + except Exception as error: + error_console.print(f"Unable to read {args.path!r}; {error}") + sys.exit(-1) + + console.print(JSON(json_data, indent=args.indent), soft_wrap=True) diff --git a/pipenv/patched/notpip/_vendor/rich/jupyter.py b/pipenv/patched/notpip/_vendor/rich/jupyter.py new file mode 100644 index 0000000000..bedf5cb19a --- /dev/null +++ b/pipenv/patched/notpip/_vendor/rich/jupyter.py @@ -0,0 +1,92 @@ +from typing import Any, Dict, Iterable, List + +from . import get_console +from .segment import Segment +from .terminal_theme import DEFAULT_TERMINAL_THEME + +JUPYTER_HTML_FORMAT = """\ +
{code}
+""" + + +class JupyterRenderable: + """A shim to write html to Jupyter notebook.""" + + def __init__(self, html: str, text: str) -> None: + self.html = html + self.text = text + + def _repr_mimebundle_( + self, include: Iterable[str], exclude: Iterable[str], **kwargs: Any + ) -> Dict[str, str]: + data = {"text/plain": self.text, "text/html": self.html} + if include: + data = {k: v for (k, v) in data.items() if k in include} + if exclude: + data = {k: v for (k, v) in data.items() if k not in exclude} + return data + + +class JupyterMixin: + """Add to an Rich renderable to make it render in Jupyter notebook.""" + + __slots__ = () + + def _repr_mimebundle_( + self, include: Iterable[str], exclude: Iterable[str], **kwargs: Any + ) -> Dict[str, str]: + console = get_console() + segments = list(console.render(self, console.options)) # type: ignore + html = _render_segments(segments) + text = console._render_buffer(segments) + data = {"text/plain": text, "text/html": html} + if include: + data = {k: v for (k, v) in data.items() if k in include} + if exclude: + data = {k: v for (k, v) in data.items() if k not in exclude} + return data + + +def _render_segments(segments: Iterable[Segment]) -> str: + def escape(text: str) -> str: + """Escape html.""" + return text.replace("&", "&").replace("<", "<").replace(">", ">") + + fragments: List[str] = [] + append_fragment = fragments.append + theme = DEFAULT_TERMINAL_THEME + for text, style, control in Segment.simplify(segments): + if control: + continue + text = escape(text) + if style: + rule = style.get_html_style(theme) + text = f'{text}' if rule else text + if style.link: + text = f'{text}' + append_fragment(text) + + code = "".join(fragments) + html = JUPYTER_HTML_FORMAT.format(code=code) + + return html + + +def display(segments: Iterable[Segment], text: str) -> None: + """Render segments to Jupyter.""" + html = _render_segments(segments) + jupyter_renderable = JupyterRenderable(html, text) + try: + from IPython.display import display as ipython_display + + ipython_display(jupyter_renderable) + except ModuleNotFoundError: + # Handle the case where the Console has force_jupyter=True, + # but IPython is not installed. + pass + + +def print(*args: Any, **kwargs: Any) -> None: + """Proxy for Console print.""" + console = get_console() + return console.print(*args, **kwargs) diff --git a/pipenv/patched/notpip/_vendor/rich/layout.py b/pipenv/patched/notpip/_vendor/rich/layout.py new file mode 100644 index 0000000000..9e75f5ac39 --- /dev/null +++ b/pipenv/patched/notpip/_vendor/rich/layout.py @@ -0,0 +1,444 @@ +from abc import ABC, abstractmethod +from itertools import islice +from operator import itemgetter +from threading import RLock +from typing import ( + TYPE_CHECKING, + Dict, + Iterable, + List, + NamedTuple, + Optional, + Sequence, + Tuple, + Union, +) + +from ._ratio import ratio_resolve +from .align import Align +from .console import Console, ConsoleOptions, RenderableType, RenderResult +from .highlighter import ReprHighlighter +from .panel import Panel +from .pretty import Pretty +from .repr import rich_repr, Result +from .region import Region +from .segment import Segment +from .style import StyleType + +if TYPE_CHECKING: + from pipenv.patched.notpip._vendor.rich.tree import Tree + + +class LayoutRender(NamedTuple): + """An individual layout render.""" + + region: Region + render: List[List[Segment]] + + +RegionMap = Dict["Layout", Region] +RenderMap = Dict["Layout", LayoutRender] + + +class LayoutError(Exception): + """Layout related error.""" + + +class NoSplitter(LayoutError): + """Requested splitter does not exist.""" + + +class _Placeholder: + """An internal renderable used as a Layout placeholder.""" + + highlighter = ReprHighlighter() + + def __init__(self, layout: "Layout", style: StyleType = "") -> None: + self.layout = layout + self.style = style + + def __rich_console__( + self, console: Console, options: ConsoleOptions + ) -> RenderResult: + width = options.max_width + height = options.height or options.size.height + layout = self.layout + title = ( + f"{layout.name!r} ({width} x {height})" + if layout.name + else f"({width} x {height})" + ) + yield Panel( + Align.center(Pretty(layout), vertical="middle"), + style=self.style, + title=self.highlighter(title), + border_style="blue", + ) + + +class Splitter(ABC): + """Base class for a splitter.""" + + name: str = "" + + @abstractmethod + def get_tree_icon(self) -> str: + """Get the icon (emoji) used in layout.tree""" + + @abstractmethod + def divide( + self, children: Sequence["Layout"], region: Region + ) -> Iterable[Tuple["Layout", Region]]: + """Divide a region amongst several child layouts. + + Args: + children (Sequence(Layout)): A number of child layouts. + region (Region): A rectangular region to divide. + """ + + +class RowSplitter(Splitter): + """Split a layout region in to rows.""" + + name = "row" + + def get_tree_icon(self) -> str: + return "[layout.tree.row]⬌" + + def divide( + self, children: Sequence["Layout"], region: Region + ) -> Iterable[Tuple["Layout", Region]]: + x, y, width, height = region + render_widths = ratio_resolve(width, children) + offset = 0 + _Region = Region + for child, child_width in zip(children, render_widths): + yield child, _Region(x + offset, y, child_width, height) + offset += child_width + + +class ColumnSplitter(Splitter): + """Split a layout region in to columns.""" + + name = "column" + + def get_tree_icon(self) -> str: + return "[layout.tree.column]⬍" + + def divide( + self, children: Sequence["Layout"], region: Region + ) -> Iterable[Tuple["Layout", Region]]: + x, y, width, height = region + render_heights = ratio_resolve(height, children) + offset = 0 + _Region = Region + for child, child_height in zip(children, render_heights): + yield child, _Region(x, y + offset, width, child_height) + offset += child_height + + +@rich_repr +class Layout: + """A renderable to divide a fixed height in to rows or columns. + + Args: + renderable (RenderableType, optional): Renderable content, or None for placeholder. Defaults to None. + name (str, optional): Optional identifier for Layout. Defaults to None. + size (int, optional): Optional fixed size of layout. Defaults to None. + minimum_size (int, optional): Minimum size of layout. Defaults to 1. + ratio (int, optional): Optional ratio for flexible layout. Defaults to 1. + visible (bool, optional): Visibility of layout. Defaults to True. + """ + + splitters = {"row": RowSplitter, "column": ColumnSplitter} + + def __init__( + self, + renderable: Optional[RenderableType] = None, + *, + name: Optional[str] = None, + size: Optional[int] = None, + minimum_size: int = 1, + ratio: int = 1, + visible: bool = True, + height: Optional[int] = None, + ) -> None: + self._renderable = renderable or _Placeholder(self) + self.size = size + self.minimum_size = minimum_size + self.ratio = ratio + self.name = name + self.visible = visible + self.height = height + self.splitter: Splitter = self.splitters["column"]() + self._children: List[Layout] = [] + self._render_map: RenderMap = {} + self._lock = RLock() + + def __rich_repr__(self) -> Result: + yield "name", self.name, None + yield "size", self.size, None + yield "minimum_size", self.minimum_size, 1 + yield "ratio", self.ratio, 1 + + @property + def renderable(self) -> RenderableType: + """Layout renderable.""" + return self if self._children else self._renderable + + @property + def children(self) -> List["Layout"]: + """Gets (visible) layout children.""" + return [child for child in self._children if child.visible] + + @property + def map(self) -> RenderMap: + """Get a map of the last render.""" + return self._render_map + + def get(self, name: str) -> Optional["Layout"]: + """Get a named layout, or None if it doesn't exist. + + Args: + name (str): Name of layout. + + Returns: + Optional[Layout]: Layout instance or None if no layout was found. + """ + if self.name == name: + return self + else: + for child in self._children: + named_layout = child.get(name) + if named_layout is not None: + return named_layout + return None + + def __getitem__(self, name: str) -> "Layout": + layout = self.get(name) + if layout is None: + raise KeyError(f"No layout with name {name!r}") + return layout + + @property + def tree(self) -> "Tree": + """Get a tree renderable to show layout structure.""" + from pipenv.patched.notpip._vendor.rich.styled import Styled + from pipenv.patched.notpip._vendor.rich.table import Table + from pipenv.patched.notpip._vendor.rich.tree import Tree + + def summary(layout: "Layout") -> Table: + + icon = layout.splitter.get_tree_icon() + + table = Table.grid(padding=(0, 1, 0, 0)) + + text: RenderableType = ( + Pretty(layout) if layout.visible else Styled(Pretty(layout), "dim") + ) + table.add_row(icon, text) + _summary = table + return _summary + + layout = self + tree = Tree( + summary(layout), + guide_style=f"layout.tree.{layout.splitter.name}", + highlight=True, + ) + + def recurse(tree: "Tree", layout: "Layout") -> None: + for child in layout._children: + recurse( + tree.add( + summary(child), + guide_style=f"layout.tree.{child.splitter.name}", + ), + child, + ) + + recurse(tree, self) + return tree + + def split( + self, + *layouts: Union["Layout", RenderableType], + splitter: Union[Splitter, str] = "column", + ) -> None: + """Split the layout in to multiple sub-layouts. + + Args: + *layouts (Layout): Positional arguments should be (sub) Layout instances. + splitter (Union[Splitter, str]): Splitter instance or name of splitter. + """ + _layouts = [ + layout if isinstance(layout, Layout) else Layout(layout) + for layout in layouts + ] + try: + self.splitter = ( + splitter + if isinstance(splitter, Splitter) + else self.splitters[splitter]() + ) + except KeyError: + raise NoSplitter(f"No splitter called {splitter!r}") + self._children[:] = _layouts + + def add_split(self, *layouts: Union["Layout", RenderableType]) -> None: + """Add a new layout(s) to existing split. + + Args: + *layouts (Union[Layout, RenderableType]): Positional arguments should be renderables or (sub) Layout instances. + + """ + _layouts = ( + layout if isinstance(layout, Layout) else Layout(layout) + for layout in layouts + ) + self._children.extend(_layouts) + + def split_row(self, *layouts: Union["Layout", RenderableType]) -> None: + """Split the layout in tow a row (Layouts side by side). + + Args: + *layouts (Layout): Positional arguments should be (sub) Layout instances. + """ + self.split(*layouts, splitter="row") + + def split_column(self, *layouts: Union["Layout", RenderableType]) -> None: + """Split the layout in to a column (layouts stacked on top of each other). + + Args: + *layouts (Layout): Positional arguments should be (sub) Layout instances. + """ + self.split(*layouts, splitter="column") + + def unsplit(self) -> None: + """Reset splits to initial state.""" + del self._children[:] + + def update(self, renderable: RenderableType) -> None: + """Update renderable. + + Args: + renderable (RenderableType): New renderable object. + """ + with self._lock: + self._renderable = renderable + + def refresh_screen(self, console: "Console", layout_name: str) -> None: + """Refresh a sub-layout. + + Args: + console (Console): Console instance where Layout is to be rendered. + layout_name (str): Name of layout. + """ + with self._lock: + layout = self[layout_name] + region, _lines = self._render_map[layout] + (x, y, width, height) = region + lines = console.render_lines( + layout, console.options.update_dimensions(width, height) + ) + self._render_map[layout] = LayoutRender(region, lines) + console.update_screen_lines(lines, x, y) + + def _make_region_map(self, width: int, height: int) -> RegionMap: + """Create a dict that maps layout on to Region.""" + stack: List[Tuple[Layout, Region]] = [(self, Region(0, 0, width, height))] + push = stack.append + pop = stack.pop + layout_regions: List[Tuple[Layout, Region]] = [] + append_layout_region = layout_regions.append + while stack: + append_layout_region(pop()) + layout, region = layout_regions[-1] + children = layout.children + if children: + for child_and_region in layout.splitter.divide(children, region): + push(child_and_region) + + region_map = { + layout: region + for layout, region in sorted(layout_regions, key=itemgetter(1)) + } + return region_map + + def render(self, console: Console, options: ConsoleOptions) -> RenderMap: + """Render the sub_layouts. + + Args: + console (Console): Console instance. + options (ConsoleOptions): Console options. + + Returns: + RenderMap: A dict that maps Layout on to a tuple of Region, lines + """ + render_width = options.max_width + render_height = options.height or console.height + region_map = self._make_region_map(render_width, render_height) + layout_regions = [ + (layout, region) + for layout, region in region_map.items() + if not layout.children + ] + render_map: Dict["Layout", "LayoutRender"] = {} + render_lines = console.render_lines + update_dimensions = options.update_dimensions + + for layout, region in layout_regions: + lines = render_lines( + layout.renderable, update_dimensions(region.width, region.height) + ) + render_map[layout] = LayoutRender(region, lines) + return render_map + + def __rich_console__( + self, console: Console, options: ConsoleOptions + ) -> RenderResult: + with self._lock: + width = options.max_width or console.width + height = options.height or console.height + render_map = self.render(console, options.update_dimensions(width, height)) + self._render_map = render_map + layout_lines: List[List[Segment]] = [[] for _ in range(height)] + _islice = islice + for (region, lines) in render_map.values(): + _x, y, _layout_width, layout_height = region + for row, line in zip( + _islice(layout_lines, y, y + layout_height), lines + ): + row.extend(line) + + new_line = Segment.line() + for layout_row in layout_lines: + yield from layout_row + yield new_line + + +if __name__ == "__main__": + from pipenv.patched.notpip._vendor.rich.console import Console + + console = Console() + layout = Layout() + + layout.split_column( + Layout(name="header", size=3), + Layout(ratio=1, name="main"), + Layout(size=10, name="footer"), + ) + + layout["main"].split_row(Layout(name="side"), Layout(name="body", ratio=2)) + + layout["body"].split_row(Layout(name="content", ratio=2), Layout(name="s2")) + + layout["s2"].split_column( + Layout(name="top"), Layout(name="middle"), Layout(name="bottom") + ) + + layout["side"].split_column(Layout(layout.tree, name="left1"), Layout(name="left2")) + + layout["content"].update("foo") + + console.print(layout) diff --git a/pipenv/patched/notpip/_vendor/rich/live.py b/pipenv/patched/notpip/_vendor/rich/live.py new file mode 100644 index 0000000000..6db5b605f9 --- /dev/null +++ b/pipenv/patched/notpip/_vendor/rich/live.py @@ -0,0 +1,365 @@ +import sys +from threading import Event, RLock, Thread +from types import TracebackType +from typing import IO, Any, Callable, List, Optional, TextIO, Type, cast + +from . import get_console +from .console import Console, ConsoleRenderable, RenderableType, RenderHook +from .control import Control +from .file_proxy import FileProxy +from .jupyter import JupyterMixin +from .live_render import LiveRender, VerticalOverflowMethod +from .screen import Screen +from .text import Text + + +class _RefreshThread(Thread): + """A thread that calls refresh() at regular intervals.""" + + def __init__(self, live: "Live", refresh_per_second: float) -> None: + self.live = live + self.refresh_per_second = refresh_per_second + self.done = Event() + super().__init__(daemon=True) + + def stop(self) -> None: + self.done.set() + + def run(self) -> None: + while not self.done.wait(1 / self.refresh_per_second): + with self.live._lock: + if not self.done.is_set(): + self.live.refresh() + + +class Live(JupyterMixin, RenderHook): + """Renders an auto-updating live display of any given renderable. + + Args: + renderable (RenderableType, optional): The renderable to live display. Defaults to displaying nothing. + console (Console, optional): Optional Console instance. Default will an internal Console instance writing to stdout. + screen (bool, optional): Enable alternate screen mode. Defaults to False. + auto_refresh (bool, optional): Enable auto refresh. If disabled, you will need to call `refresh()` or `update()` with refresh flag. Defaults to True + refresh_per_second (float, optional): Number of times per second to refresh the live display. Defaults to 4. + transient (bool, optional): Clear the renderable on exit (has no effect when screen=True). Defaults to False. + redirect_stdout (bool, optional): Enable redirection of stdout, so ``print`` may be used. Defaults to True. + redirect_stderr (bool, optional): Enable redirection of stderr. Defaults to True. + vertical_overflow (VerticalOverflowMethod, optional): How to handle renderable when it is too tall for the console. Defaults to "ellipsis". + get_renderable (Callable[[], RenderableType], optional): Optional callable to get renderable. Defaults to None. + """ + + def __init__( + self, + renderable: Optional[RenderableType] = None, + *, + console: Optional[Console] = None, + screen: bool = False, + auto_refresh: bool = True, + refresh_per_second: float = 4, + transient: bool = False, + redirect_stdout: bool = True, + redirect_stderr: bool = True, + vertical_overflow: VerticalOverflowMethod = "ellipsis", + get_renderable: Optional[Callable[[], RenderableType]] = None, + ) -> None: + assert refresh_per_second > 0, "refresh_per_second must be > 0" + self._renderable = renderable + self.console = console if console is not None else get_console() + self._screen = screen + self._alt_screen = False + + self._redirect_stdout = redirect_stdout + self._redirect_stderr = redirect_stderr + self._restore_stdout: Optional[IO[str]] = None + self._restore_stderr: Optional[IO[str]] = None + + self._lock = RLock() + self.ipy_widget: Optional[Any] = None + self.auto_refresh = auto_refresh + self._started: bool = False + self.transient = True if screen else transient + + self._refresh_thread: Optional[_RefreshThread] = None + self.refresh_per_second = refresh_per_second + + self.vertical_overflow = vertical_overflow + self._get_renderable = get_renderable + self._live_render = LiveRender( + self.get_renderable(), vertical_overflow=vertical_overflow + ) + + @property + def is_started(self) -> bool: + """Check if live display has been started.""" + return self._started + + def get_renderable(self) -> RenderableType: + renderable = ( + self._get_renderable() + if self._get_renderable is not None + else self._renderable + ) + return renderable or "" + + def start(self, refresh: bool = False) -> None: + """Start live rendering display. + + Args: + refresh (bool, optional): Also refresh. Defaults to False. + """ + with self._lock: + if self._started: + return + self.console.set_live(self) + self._started = True + if self._screen: + self._alt_screen = self.console.set_alt_screen(True) + self.console.show_cursor(False) + self._enable_redirect_io() + self.console.push_render_hook(self) + if refresh: + self.refresh() + if self.auto_refresh: + self._refresh_thread = _RefreshThread(self, self.refresh_per_second) + self._refresh_thread.start() + + def stop(self) -> None: + """Stop live rendering display.""" + with self._lock: + if not self._started: + return + self.console.clear_live() + self._started = False + + if self.auto_refresh and self._refresh_thread is not None: + self._refresh_thread.stop() + self._refresh_thread = None + # allow it to fully render on the last even if overflow + self.vertical_overflow = "visible" + with self.console: + try: + if not self._alt_screen and not self.console.is_jupyter: + self.refresh() + finally: + self._disable_redirect_io() + self.console.pop_render_hook() + if not self._alt_screen and self.console.is_terminal: + self.console.line() + self.console.show_cursor(True) + if self._alt_screen: + self.console.set_alt_screen(False) + + if self.transient and not self._alt_screen: + self.console.control(self._live_render.restore_cursor()) + if self.ipy_widget is not None and self.transient: + self.ipy_widget.close() # pragma: no cover + + def __enter__(self) -> "Live": + self.start(refresh=self._renderable is not None) + return self + + def __exit__( + self, + exc_type: Optional[Type[BaseException]], + exc_val: Optional[BaseException], + exc_tb: Optional[TracebackType], + ) -> None: + self.stop() + + def _enable_redirect_io(self) -> None: + """Enable redirecting of stdout / stderr.""" + if self.console.is_terminal or self.console.is_jupyter: + if self._redirect_stdout and not isinstance(sys.stdout, FileProxy): + self._restore_stdout = sys.stdout + sys.stdout = cast("TextIO", FileProxy(self.console, sys.stdout)) + if self._redirect_stderr and not isinstance(sys.stderr, FileProxy): + self._restore_stderr = sys.stderr + sys.stderr = cast("TextIO", FileProxy(self.console, sys.stderr)) + + def _disable_redirect_io(self) -> None: + """Disable redirecting of stdout / stderr.""" + if self._restore_stdout: + sys.stdout = cast("TextIO", self._restore_stdout) + self._restore_stdout = None + if self._restore_stderr: + sys.stderr = cast("TextIO", self._restore_stderr) + self._restore_stderr = None + + @property + def renderable(self) -> RenderableType: + """Get the renderable that is being displayed + + Returns: + RenderableType: Displayed renderable. + """ + renderable = self.get_renderable() + return Screen(renderable) if self._alt_screen else renderable + + def update(self, renderable: RenderableType, *, refresh: bool = False) -> None: + """Update the renderable that is being displayed + + Args: + renderable (RenderableType): New renderable to use. + refresh (bool, optional): Refresh the display. Defaults to False. + """ + with self._lock: + self._renderable = renderable + if refresh: + self.refresh() + + def refresh(self) -> None: + """Update the display of the Live Render.""" + with self._lock: + self._live_render.set_renderable(self.renderable) + if self.console.is_jupyter: # pragma: no cover + try: + from IPython.display import display + from ipywidgets import Output + except ImportError: + import warnings + + warnings.warn('install "ipywidgets" for Jupyter support') + else: + if self.ipy_widget is None: + self.ipy_widget = Output() + display(self.ipy_widget) + + with self.ipy_widget: + self.ipy_widget.clear_output(wait=True) + self.console.print(self._live_render.renderable) + elif self.console.is_terminal and not self.console.is_dumb_terminal: + with self.console: + self.console.print(Control()) + elif ( + not self._started and not self.transient + ): # if it is finished allow files or dumb-terminals to see final result + with self.console: + self.console.print(Control()) + + def process_renderables( + self, renderables: List[ConsoleRenderable] + ) -> List[ConsoleRenderable]: + """Process renderables to restore cursor and display progress.""" + self._live_render.vertical_overflow = self.vertical_overflow + if self.console.is_interactive: + # lock needs acquiring as user can modify live_render renderable at any time unlike in Progress. + with self._lock: + reset = ( + Control.home() + if self._alt_screen + else self._live_render.position_cursor() + ) + renderables = [reset, *renderables, self._live_render] + elif ( + not self._started and not self.transient + ): # if it is finished render the final output for files or dumb_terminals + renderables = [*renderables, self._live_render] + + return renderables + + +if __name__ == "__main__": # pragma: no cover + import random + import time + from itertools import cycle + from typing import Dict, List, Tuple + + from .align import Align + from .console import Console + from .live import Live as Live + from .panel import Panel + from .rule import Rule + from .syntax import Syntax + from .table import Table + + console = Console() + + syntax = Syntax( + '''def loop_last(values: Iterable[T]) -> Iterable[Tuple[bool, T]]: + """Iterate and generate a tuple with a flag for last value.""" + iter_values = iter(values) + try: + previous_value = next(iter_values) + except StopIteration: + return + for value in iter_values: + yield False, previous_value + previous_value = value + yield True, previous_value''', + "python", + line_numbers=True, + ) + + table = Table("foo", "bar", "baz") + table.add_row("1", "2", "3") + + progress_renderables = [ + "You can make the terminal shorter and taller to see the live table hide" + "Text may be printed while the progress bars are rendering.", + Panel("In fact, [i]any[/i] renderable will work"), + "Such as [magenta]tables[/]...", + table, + "Pretty printed structures...", + {"type": "example", "text": "Pretty printed"}, + "Syntax...", + syntax, + Rule("Give it a try!"), + ] + + examples = cycle(progress_renderables) + + exchanges = [ + "SGD", + "MYR", + "EUR", + "USD", + "AUD", + "JPY", + "CNH", + "HKD", + "CAD", + "INR", + "DKK", + "GBP", + "RUB", + "NZD", + "MXN", + "IDR", + "TWD", + "THB", + "VND", + ] + with Live(console=console) as live_table: + exchange_rate_dict: Dict[Tuple[str, str], float] = {} + + for index in range(100): + select_exchange = exchanges[index % len(exchanges)] + + for exchange in exchanges: + if exchange == select_exchange: + continue + time.sleep(0.4) + if random.randint(0, 10) < 1: + console.log(next(examples)) + exchange_rate_dict[(select_exchange, exchange)] = 200 / ( + (random.random() * 320) + 1 + ) + if len(exchange_rate_dict) > len(exchanges) - 1: + exchange_rate_dict.pop(list(exchange_rate_dict.keys())[0]) + table = Table(title="Exchange Rates") + + table.add_column("Source Currency") + table.add_column("Destination Currency") + table.add_column("Exchange Rate") + + for ((source, dest), exchange_rate) in exchange_rate_dict.items(): + table.add_row( + source, + dest, + Text( + f"{exchange_rate:.4f}", + style="red" if exchange_rate < 1.0 else "green", + ), + ) + + live_table.update(Align.center(table)) diff --git a/pipenv/patched/notpip/_vendor/rich/live_render.py b/pipenv/patched/notpip/_vendor/rich/live_render.py new file mode 100644 index 0000000000..e4057dcbdb --- /dev/null +++ b/pipenv/patched/notpip/_vendor/rich/live_render.py @@ -0,0 +1,113 @@ +import sys +from typing import Optional, Tuple + +if sys.version_info >= (3, 8): + from typing import Literal +else: + from pipenv.patched.notpip._vendor.typing_extensions import Literal # pragma: no cover + + +from ._loop import loop_last +from .console import Console, ConsoleOptions, RenderableType, RenderResult +from .control import Control +from .segment import ControlType, Segment +from .style import StyleType +from .text import Text + +VerticalOverflowMethod = Literal["crop", "ellipsis", "visible"] + + +class LiveRender: + """Creates a renderable that may be updated. + + Args: + renderable (RenderableType): Any renderable object. + style (StyleType, optional): An optional style to apply to the renderable. Defaults to "". + """ + + def __init__( + self, + renderable: RenderableType, + style: StyleType = "", + vertical_overflow: VerticalOverflowMethod = "ellipsis", + ) -> None: + self.renderable = renderable + self.style = style + self.vertical_overflow = vertical_overflow + self._shape: Optional[Tuple[int, int]] = None + + def set_renderable(self, renderable: RenderableType) -> None: + """Set a new renderable. + + Args: + renderable (RenderableType): Any renderable object, including str. + """ + self.renderable = renderable + + def position_cursor(self) -> Control: + """Get control codes to move cursor to beginning of live render. + + Returns: + Control: A control instance that may be printed. + """ + if self._shape is not None: + _, height = self._shape + return Control( + ControlType.CARRIAGE_RETURN, + (ControlType.ERASE_IN_LINE, 2), + *( + ( + (ControlType.CURSOR_UP, 1), + (ControlType.ERASE_IN_LINE, 2), + ) + * (height - 1) + ) + ) + return Control() + + def restore_cursor(self) -> Control: + """Get control codes to clear the render and restore the cursor to its previous position. + + Returns: + Control: A Control instance that may be printed. + """ + if self._shape is not None: + _, height = self._shape + return Control( + ControlType.CARRIAGE_RETURN, + *((ControlType.CURSOR_UP, 1), (ControlType.ERASE_IN_LINE, 2)) * height + ) + return Control() + + def __rich_console__( + self, console: Console, options: ConsoleOptions + ) -> RenderResult: + + renderable = self.renderable + style = console.get_style(self.style) + lines = console.render_lines(renderable, options, style=style, pad=False) + shape = Segment.get_shape(lines) + + _, height = shape + if height > options.size.height: + if self.vertical_overflow == "crop": + lines = lines[: options.size.height] + shape = Segment.get_shape(lines) + elif self.vertical_overflow == "ellipsis": + lines = lines[: (options.size.height - 1)] + overflow_text = Text( + "...", + overflow="crop", + justify="center", + end="", + style="live.ellipsis", + ) + lines.append(list(console.render(overflow_text))) + shape = Segment.get_shape(lines) + self._shape = shape + + new_line = Segment.line() + for last, line in loop_last(lines): + yield from line + if not last: + yield new_line diff --git a/pipenv/patched/notpip/_vendor/rich/logging.py b/pipenv/patched/notpip/_vendor/rich/logging.py new file mode 100644 index 0000000000..002f1f7bf1 --- /dev/null +++ b/pipenv/patched/notpip/_vendor/rich/logging.py @@ -0,0 +1,268 @@ +import logging +from datetime import datetime +from logging import Handler, LogRecord +from pathlib import Path +from typing import ClassVar, List, Optional, Type, Union + +from . import get_console +from ._log_render import LogRender, FormatTimeCallable +from .console import Console, ConsoleRenderable +from .highlighter import Highlighter, ReprHighlighter +from .text import Text +from .traceback import Traceback + + +class RichHandler(Handler): + """A logging handler that renders output with Rich. The time / level / message and file are displayed in columns. + The level is color coded, and the message is syntax highlighted. + + Note: + Be careful when enabling console markup in log messages if you have configured logging for libraries not + under your control. If a dependency writes messages containing square brackets, it may not produce the intended output. + + Args: + level (Union[int, str], optional): Log level. Defaults to logging.NOTSET. + console (:class:`~rich.console.Console`, optional): Optional console instance to write logs. + Default will use a global console instance writing to stdout. + show_time (bool, optional): Show a column for the time. Defaults to True. + omit_repeated_times (bool, optional): Omit repetition of the same time. Defaults to True. + show_level (bool, optional): Show a column for the level. Defaults to True. + show_path (bool, optional): Show the path to the original log call. Defaults to True. + enable_link_path (bool, optional): Enable terminal link of path column to file. Defaults to True. + highlighter (Highlighter, optional): Highlighter to style log messages, or None to use ReprHighlighter. Defaults to None. + markup (bool, optional): Enable console markup in log messages. Defaults to False. + rich_tracebacks (bool, optional): Enable rich tracebacks with syntax highlighting and formatting. Defaults to False. + tracebacks_width (Optional[int], optional): Number of characters used to render tracebacks, or None for full width. Defaults to None. + tracebacks_extra_lines (int, optional): Additional lines of code to render tracebacks, or None for full width. Defaults to None. + tracebacks_theme (str, optional): Override pygments theme used in traceback. + tracebacks_word_wrap (bool, optional): Enable word wrapping of long tracebacks lines. Defaults to True. + tracebacks_show_locals (bool, optional): Enable display of locals in tracebacks. Defaults to False. + locals_max_length (int, optional): Maximum length of containers before abbreviating, or None for no abbreviation. + Defaults to 10. + locals_max_string (int, optional): Maximum length of string before truncating, or None to disable. Defaults to 80. + log_time_format (Union[str, TimeFormatterCallable], optional): If ``log_time`` is enabled, either string for strftime or callable that formats the time. Defaults to "[%x %X] ". + """ + + KEYWORDS: ClassVar[Optional[List[str]]] = [ + "GET", + "POST", + "HEAD", + "PUT", + "DELETE", + "OPTIONS", + "TRACE", + "PATCH", + ] + HIGHLIGHTER_CLASS: ClassVar[Type[Highlighter]] = ReprHighlighter + + def __init__( + self, + level: Union[int, str] = logging.NOTSET, + console: Optional[Console] = None, + *, + show_time: bool = True, + omit_repeated_times: bool = True, + show_level: bool = True, + show_path: bool = True, + enable_link_path: bool = True, + highlighter: Optional[Highlighter] = None, + markup: bool = False, + rich_tracebacks: bool = False, + tracebacks_width: Optional[int] = None, + tracebacks_extra_lines: int = 3, + tracebacks_theme: Optional[str] = None, + tracebacks_word_wrap: bool = True, + tracebacks_show_locals: bool = False, + locals_max_length: int = 10, + locals_max_string: int = 80, + log_time_format: Union[str, FormatTimeCallable] = "[%x %X]", + ) -> None: + super().__init__(level=level) + self.console = console or get_console() + self.highlighter = highlighter or self.HIGHLIGHTER_CLASS() + self._log_render = LogRender( + show_time=show_time, + show_level=show_level, + show_path=show_path, + time_format=log_time_format, + omit_repeated_times=omit_repeated_times, + level_width=None, + ) + self.enable_link_path = enable_link_path + self.markup = markup + self.rich_tracebacks = rich_tracebacks + self.tracebacks_width = tracebacks_width + self.tracebacks_extra_lines = tracebacks_extra_lines + self.tracebacks_theme = tracebacks_theme + self.tracebacks_word_wrap = tracebacks_word_wrap + self.tracebacks_show_locals = tracebacks_show_locals + self.locals_max_length = locals_max_length + self.locals_max_string = locals_max_string + + def get_level_text(self, record: LogRecord) -> Text: + """Get the level name from the record. + + Args: + record (LogRecord): LogRecord instance. + + Returns: + Text: A tuple of the style and level name. + """ + level_name = record.levelname + level_text = Text.styled( + level_name.ljust(8), f"logging.level.{level_name.lower()}" + ) + return level_text + + def emit(self, record: LogRecord) -> None: + """Invoked by logging.""" + message = self.format(record) + traceback = None + if ( + self.rich_tracebacks + and record.exc_info + and record.exc_info != (None, None, None) + ): + exc_type, exc_value, exc_traceback = record.exc_info + assert exc_type is not None + assert exc_value is not None + traceback = Traceback.from_exception( + exc_type, + exc_value, + exc_traceback, + width=self.tracebacks_width, + extra_lines=self.tracebacks_extra_lines, + theme=self.tracebacks_theme, + word_wrap=self.tracebacks_word_wrap, + show_locals=self.tracebacks_show_locals, + locals_max_length=self.locals_max_length, + locals_max_string=self.locals_max_string, + ) + message = record.getMessage() + if self.formatter: + record.message = record.getMessage() + formatter = self.formatter + if hasattr(formatter, "usesTime") and formatter.usesTime(): + record.asctime = formatter.formatTime(record, formatter.datefmt) + message = formatter.formatMessage(record) + + message_renderable = self.render_message(record, message) + log_renderable = self.render( + record=record, traceback=traceback, message_renderable=message_renderable + ) + try: + self.console.print(log_renderable) + except Exception: + self.handleError(record) + + def render_message(self, record: LogRecord, message: str) -> "ConsoleRenderable": + """Render message text in to Text. + + record (LogRecord): logging Record. + message (str): String containing log message. + + Returns: + ConsoleRenderable: Renderable to display log message. + """ + use_markup = getattr(record, "markup", self.markup) + message_text = Text.from_markup(message) if use_markup else Text(message) + + highlighter = getattr(record, "highlighter", self.highlighter) + if highlighter: + message_text = highlighter(message_text) + + if self.KEYWORDS: + message_text.highlight_words(self.KEYWORDS, "logging.keyword") + return message_text + + def render( + self, + *, + record: LogRecord, + traceback: Optional[Traceback], + message_renderable: "ConsoleRenderable", + ) -> "ConsoleRenderable": + """Render log for display. + + Args: + record (LogRecord): logging Record. + traceback (Optional[Traceback]): Traceback instance or None for no Traceback. + message_renderable (ConsoleRenderable): Renderable (typically Text) containing log message contents. + + Returns: + ConsoleRenderable: Renderable to display log. + """ + path = Path(record.pathname).name + level = self.get_level_text(record) + time_format = None if self.formatter is None else self.formatter.datefmt + log_time = datetime.fromtimestamp(record.created) + + log_renderable = self._log_render( + self.console, + [message_renderable] if not traceback else [message_renderable, traceback], + log_time=log_time, + time_format=time_format, + level=level, + path=path, + line_no=record.lineno, + link_path=record.pathname if self.enable_link_path else None, + ) + return log_renderable + + +if __name__ == "__main__": # pragma: no cover + from time import sleep + + FORMAT = "%(message)s" + # FORMAT = "%(asctime)-15s - %(levelname)s - %(message)s" + logging.basicConfig( + level="NOTSET", + format=FORMAT, + datefmt="[%X]", + handlers=[RichHandler(rich_tracebacks=True, tracebacks_show_locals=True)], + ) + log = logging.getLogger("rich") + + log.info("Server starting...") + log.info("Listening on http://127.0.0.1:8080") + sleep(1) + + log.info("GET /index.html 200 1298") + log.info("GET /imgs/backgrounds/back1.jpg 200 54386") + log.info("GET /css/styles.css 200 54386") + log.warning("GET /favicon.ico 404 242") + sleep(1) + + log.debug( + "JSONRPC request\n--> %r\n<-- %r", + { + "version": "1.1", + "method": "confirmFruitPurchase", + "params": [["apple", "orange", "mangoes", "pomelo"], 1.123], + "id": "194521489", + }, + {"version": "1.1", "result": True, "error": None, "id": "194521489"}, + ) + log.debug( + "Loading configuration file /adasd/asdasd/qeqwe/qwrqwrqwr/sdgsdgsdg/werwerwer/dfgerert/ertertert/ertetert/werwerwer" + ) + log.error("Unable to find 'pomelo' in database!") + log.info("POST /jsonrpc/ 200 65532") + log.info("POST /admin/ 401 42234") + log.warning("password was rejected for admin site.") + + def divide() -> None: + number = 1 + divisor = 0 + foos = ["foo"] * 100 + log.debug("in divide") + try: + number / divisor + except: + log.exception("An error of some kind occurred!") + + divide() + sleep(1) + log.critical("Out of memory!") + log.info("Server exited with code=-1") + log.info("[bold]EXITING...[/bold]", extra=dict(markup=True)) diff --git a/pipenv/patched/notpip/_vendor/rich/markup.py b/pipenv/patched/notpip/_vendor/rich/markup.py new file mode 100644 index 0000000000..c95df685ca --- /dev/null +++ b/pipenv/patched/notpip/_vendor/rich/markup.py @@ -0,0 +1,244 @@ +from ast import literal_eval +from operator import attrgetter +import re +from typing import Callable, Iterable, List, Match, NamedTuple, Optional, Tuple, Union + +from .errors import MarkupError +from .style import Style +from .text import Span, Text +from .emoji import EmojiVariant +from ._emoji_replace import _emoji_replace + + +RE_TAGS = re.compile( + r"""((\\*)\[([a-z#\/@].*?)\])""", + re.VERBOSE, +) + +RE_HANDLER = re.compile(r"^([\w\.]*?)(\(.*?\))?$") + + +class Tag(NamedTuple): + """A tag in console markup.""" + + name: str + """The tag name. e.g. 'bold'.""" + parameters: Optional[str] + """Any additional parameters after the name.""" + + def __str__(self) -> str: + return ( + self.name if self.parameters is None else f"{self.name} {self.parameters}" + ) + + @property + def markup(self) -> str: + """Get the string representation of this tag.""" + return ( + f"[{self.name}]" + if self.parameters is None + else f"[{self.name}={self.parameters}]" + ) + + +_ReStringMatch = Match[str] # regex match object +_ReSubCallable = Callable[[_ReStringMatch], str] # Callable invoked by re.sub +_EscapeSubMethod = Callable[[_ReSubCallable, str], str] # Sub method of a compiled re + + +def escape( + markup: str, _escape: _EscapeSubMethod = re.compile(r"(\\*)(\[[a-z#\/@].*?\])").sub +) -> str: + """Escapes text so that it won't be interpreted as markup. + + Args: + markup (str): Content to be inserted in to markup. + + Returns: + str: Markup with square brackets escaped. + """ + + def escape_backslashes(match: Match[str]) -> str: + """Called by re.sub replace matches.""" + backslashes, text = match.groups() + return f"{backslashes}{backslashes}\\{text}" + + markup = _escape(escape_backslashes, markup) + return markup + + +def _parse(markup: str) -> Iterable[Tuple[int, Optional[str], Optional[Tag]]]: + """Parse markup in to an iterable of tuples of (position, text, tag). + + Args: + markup (str): A string containing console markup + + """ + position = 0 + _divmod = divmod + _Tag = Tag + for match in RE_TAGS.finditer(markup): + full_text, escapes, tag_text = match.groups() + start, end = match.span() + if start > position: + yield start, markup[position:start], None + if escapes: + backslashes, escaped = _divmod(len(escapes), 2) + if backslashes: + # Literal backslashes + yield start, "\\" * backslashes, None + start += backslashes * 2 + if escaped: + # Escape of tag + yield start, full_text[len(escapes) :], None + position = end + continue + text, equals, parameters = tag_text.partition("=") + yield start, None, _Tag(text, parameters if equals else None) + position = end + if position < len(markup): + yield position, markup[position:], None + + +def render( + markup: str, + style: Union[str, Style] = "", + emoji: bool = True, + emoji_variant: Optional[EmojiVariant] = None, +) -> Text: + """Render console markup in to a Text instance. + + Args: + markup (str): A string containing console markup. + emoji (bool, optional): Also render emoji code. Defaults to True. + + Raises: + MarkupError: If there is a syntax error in the markup. + + Returns: + Text: A test instance. + """ + emoji_replace = _emoji_replace + if "[" not in markup: + return Text( + emoji_replace(markup, default_variant=emoji_variant) if emoji else markup, + style=style, + ) + text = Text(style=style) + append = text.append + normalize = Style.normalize + + style_stack: List[Tuple[int, Tag]] = [] + pop = style_stack.pop + + spans: List[Span] = [] + append_span = spans.append + + _Span = Span + _Tag = Tag + + def pop_style(style_name: str) -> Tuple[int, Tag]: + """Pop tag matching given style name.""" + for index, (_, tag) in enumerate(reversed(style_stack), 1): + if tag.name == style_name: + return pop(-index) + raise KeyError(style_name) + + for position, plain_text, tag in _parse(markup): + if plain_text is not None: + append(emoji_replace(plain_text) if emoji else plain_text) + elif tag is not None: + if tag.name.startswith("/"): # Closing tag + style_name = tag.name[1:].strip() + + if style_name: # explicit close + style_name = normalize(style_name) + try: + start, open_tag = pop_style(style_name) + except KeyError: + raise MarkupError( + f"closing tag '{tag.markup}' at position {position} doesn't match any open tag" + ) from None + else: # implicit close + try: + start, open_tag = pop() + except IndexError: + raise MarkupError( + f"closing tag '[/]' at position {position} has nothing to close" + ) from None + + if open_tag.name.startswith("@"): + if open_tag.parameters: + handler_name = "" + parameters = open_tag.parameters.strip() + handler_match = RE_HANDLER.match(parameters) + if handler_match is not None: + handler_name, match_parameters = handler_match.groups() + parameters = ( + "()" if match_parameters is None else match_parameters + ) + + try: + meta_params = literal_eval(parameters) + except SyntaxError as error: + raise MarkupError( + f"error parsing {parameters!r} in {open_tag.parameters!r}; {error.msg}" + ) + except Exception as error: + raise MarkupError( + f"error parsing {open_tag.parameters!r}; {error}" + ) from None + + if handler_name: + meta_params = ( + handler_name, + meta_params + if isinstance(meta_params, tuple) + else (meta_params,), + ) + + else: + meta_params = () + + append_span( + _Span( + start, len(text), Style(meta={open_tag.name: meta_params}) + ) + ) + else: + append_span(_Span(start, len(text), str(open_tag))) + + else: # Opening tag + normalized_tag = _Tag(normalize(tag.name), tag.parameters) + style_stack.append((len(text), normalized_tag)) + + text_length = len(text) + while style_stack: + start, tag = style_stack.pop() + style = str(tag) + if style: + append_span(_Span(start, text_length, style)) + + text.spans = sorted(spans[::-1], key=attrgetter("start")) + return text + + +if __name__ == "__main__": # pragma: no cover + + MARKUP = [ + "[red]Hello World[/red]", + "[magenta]Hello [b]World[/b]", + "[bold]Bold[italic] bold and italic [/bold]italic[/italic]", + "Click [link=https://www.willmcgugan.com]here[/link] to visit my Blog", + ":warning-emoji: [bold red blink] DANGER![/]", + ] + + from pipenv.patched.notpip._vendor.rich.table import Table + from pipenv.patched.notpip._vendor.rich import print + + grid = Table("Markup", "Result", padding=(0, 1)) + + for markup in MARKUP: + grid.add_row(Text(markup), markup) + + print(grid) diff --git a/pipenv/patched/notpip/_vendor/rich/measure.py b/pipenv/patched/notpip/_vendor/rich/measure.py new file mode 100644 index 0000000000..aea238df93 --- /dev/null +++ b/pipenv/patched/notpip/_vendor/rich/measure.py @@ -0,0 +1,149 @@ +from operator import itemgetter +from typing import Callable, Iterable, NamedTuple, Optional, TYPE_CHECKING + +from . import errors +from .protocol import is_renderable, rich_cast + +if TYPE_CHECKING: + from .console import Console, ConsoleOptions, RenderableType + + +class Measurement(NamedTuple): + """Stores the minimum and maximum widths (in characters) required to render an object.""" + + minimum: int + """Minimum number of cells required to render.""" + maximum: int + """Maximum number of cells required to render.""" + + @property + def span(self) -> int: + """Get difference between maximum and minimum.""" + return self.maximum - self.minimum + + def normalize(self) -> "Measurement": + """Get measurement that ensures that minimum <= maximum and minimum >= 0 + + Returns: + Measurement: A normalized measurement. + """ + minimum, maximum = self + minimum = min(max(0, minimum), maximum) + return Measurement(max(0, minimum), max(0, max(minimum, maximum))) + + def with_maximum(self, width: int) -> "Measurement": + """Get a RenderableWith where the widths are <= width. + + Args: + width (int): Maximum desired width. + + Returns: + Measurement: New Measurement object. + """ + minimum, maximum = self + return Measurement(min(minimum, width), min(maximum, width)) + + def with_minimum(self, width: int) -> "Measurement": + """Get a RenderableWith where the widths are >= width. + + Args: + width (int): Minimum desired width. + + Returns: + Measurement: New Measurement object. + """ + minimum, maximum = self + width = max(0, width) + return Measurement(max(minimum, width), max(maximum, width)) + + def clamp( + self, min_width: Optional[int] = None, max_width: Optional[int] = None + ) -> "Measurement": + """Clamp a measurement within the specified range. + + Args: + min_width (int): Minimum desired width, or ``None`` for no minimum. Defaults to None. + max_width (int): Maximum desired width, or ``None`` for no maximum. Defaults to None. + + Returns: + Measurement: New Measurement object. + """ + measurement = self + if min_width is not None: + measurement = measurement.with_minimum(min_width) + if max_width is not None: + measurement = measurement.with_maximum(max_width) + return measurement + + @classmethod + def get( + cls, console: "Console", options: "ConsoleOptions", renderable: "RenderableType" + ) -> "Measurement": + """Get a measurement for a renderable. + + Args: + console (~rich.console.Console): Console instance. + options (~rich.console.ConsoleOptions): Console options. + renderable (RenderableType): An object that may be rendered with Rich. + + Raises: + errors.NotRenderableError: If the object is not renderable. + + Returns: + Measurement: Measurement object containing range of character widths required to render the object. + """ + _max_width = options.max_width + if _max_width < 1: + return Measurement(0, 0) + if isinstance(renderable, str): + renderable = console.render_str(renderable, markup=options.markup) + renderable = rich_cast(renderable) + if is_renderable(renderable): + get_console_width: Optional[ + Callable[["Console", "ConsoleOptions"], "Measurement"] + ] = getattr(renderable, "__rich_measure__", None) + if get_console_width is not None: + render_width = ( + get_console_width(console, options) + .normalize() + .with_maximum(_max_width) + ) + if render_width.maximum < 1: + return Measurement(0, 0) + return render_width.normalize() + else: + return Measurement(0, _max_width) + else: + raise errors.NotRenderableError( + f"Unable to get render width for {renderable!r}; " + "a str, Segment, or object with __rich_console__ method is required" + ) + + +def measure_renderables( + console: "Console", + options: "ConsoleOptions", + renderables: Iterable["RenderableType"], +) -> "Measurement": + """Get a measurement that would fit a number of renderables. + + Args: + console (~rich.console.Console): Console instance. + options (~rich.console.ConsoleOptions): Console options. + renderables (Iterable[RenderableType]): One or more renderable objects. + + Returns: + Measurement: Measurement object containing range of character widths required to + contain all given renderables. + """ + if not renderables: + return Measurement(0, 0) + get_measurement = Measurement.get + measurements = [ + get_measurement(console, options, renderable) for renderable in renderables + ] + measured_width = Measurement( + max(measurements, key=itemgetter(0)).minimum, + max(measurements, key=itemgetter(1)).maximum, + ) + return measured_width diff --git a/pipenv/patched/notpip/_vendor/rich/padding.py b/pipenv/patched/notpip/_vendor/rich/padding.py new file mode 100644 index 0000000000..8746090863 --- /dev/null +++ b/pipenv/patched/notpip/_vendor/rich/padding.py @@ -0,0 +1,141 @@ +from typing import cast, List, Optional, Tuple, TYPE_CHECKING, Union + +if TYPE_CHECKING: + from .console import ( + Console, + ConsoleOptions, + RenderableType, + RenderResult, + ) +from .jupyter import JupyterMixin +from .measure import Measurement +from .style import Style +from .segment import Segment + + +PaddingDimensions = Union[int, Tuple[int], Tuple[int, int], Tuple[int, int, int, int]] + + +class Padding(JupyterMixin): + """Draw space around content. + + Example: + >>> print(Padding("Hello", (2, 4), style="on blue")) + + Args: + renderable (RenderableType): String or other renderable. + pad (Union[int, Tuple[int]]): Padding for top, right, bottom, and left borders. + May be specified with 1, 2, or 4 integers (CSS style). + style (Union[str, Style], optional): Style for padding characters. Defaults to "none". + expand (bool, optional): Expand padding to fit available width. Defaults to True. + """ + + def __init__( + self, + renderable: "RenderableType", + pad: "PaddingDimensions" = (0, 0, 0, 0), + *, + style: Union[str, Style] = "none", + expand: bool = True, + ): + self.renderable = renderable + self.top, self.right, self.bottom, self.left = self.unpack(pad) + self.style = style + self.expand = expand + + @classmethod + def indent(cls, renderable: "RenderableType", level: int) -> "Padding": + """Make padding instance to render an indent. + + Args: + renderable (RenderableType): String or other renderable. + level (int): Number of characters to indent. + + Returns: + Padding: A Padding instance. + """ + + return Padding(renderable, pad=(0, 0, 0, level), expand=False) + + @staticmethod + def unpack(pad: "PaddingDimensions") -> Tuple[int, int, int, int]: + """Unpack padding specified in CSS style.""" + if isinstance(pad, int): + return (pad, pad, pad, pad) + if len(pad) == 1: + _pad = pad[0] + return (_pad, _pad, _pad, _pad) + if len(pad) == 2: + pad_top, pad_right = cast(Tuple[int, int], pad) + return (pad_top, pad_right, pad_top, pad_right) + if len(pad) == 4: + top, right, bottom, left = cast(Tuple[int, int, int, int], pad) + return (top, right, bottom, left) + raise ValueError(f"1, 2 or 4 integers required for padding; {len(pad)} given") + + def __repr__(self) -> str: + return f"Padding({self.renderable!r}, ({self.top},{self.right},{self.bottom},{self.left}))" + + def __rich_console__( + self, console: "Console", options: "ConsoleOptions" + ) -> "RenderResult": + style = console.get_style(self.style) + if self.expand: + width = options.max_width + else: + width = min( + Measurement.get(console, options, self.renderable).maximum + + self.left + + self.right, + options.max_width, + ) + render_options = options.update_width(width - self.left - self.right) + if render_options.height is not None: + render_options = render_options.update_height( + height=render_options.height - self.top - self.bottom + ) + lines = console.render_lines( + self.renderable, render_options, style=style, pad=True + ) + _Segment = Segment + + left = _Segment(" " * self.left, style) if self.left else None + right = ( + [_Segment(f'{" " * self.right}', style), _Segment.line()] + if self.right + else [_Segment.line()] + ) + blank_line: Optional[List[Segment]] = None + if self.top: + blank_line = [_Segment(f'{" " * width}\n', style)] + yield from blank_line * self.top + if left: + for line in lines: + yield left + yield from line + yield from right + else: + for line in lines: + yield from line + yield from right + if self.bottom: + blank_line = blank_line or [_Segment(f'{" " * width}\n', style)] + yield from blank_line * self.bottom + + def __rich_measure__( + self, console: "Console", options: "ConsoleOptions" + ) -> "Measurement": + max_width = options.max_width + extra_width = self.left + self.right + if max_width - extra_width < 1: + return Measurement(max_width, max_width) + measure_min, measure_max = Measurement.get(console, options, self.renderable) + measurement = Measurement(measure_min + extra_width, measure_max + extra_width) + measurement = measurement.with_maximum(max_width) + return measurement + + +if __name__ == "__main__": # pragma: no cover + from pipenv.patched.notpip._vendor.rich import print + + print(Padding("Hello, World", (2, 4), style="on blue")) diff --git a/pipenv/patched/notpip/_vendor/rich/pager.py b/pipenv/patched/notpip/_vendor/rich/pager.py new file mode 100644 index 0000000000..dbfb973e36 --- /dev/null +++ b/pipenv/patched/notpip/_vendor/rich/pager.py @@ -0,0 +1,34 @@ +from abc import ABC, abstractmethod +from typing import Any, Callable + + +class Pager(ABC): + """Base class for a pager.""" + + @abstractmethod + def show(self, content: str) -> None: + """Show content in pager. + + Args: + content (str): Content to be displayed. + """ + + +class SystemPager(Pager): + """Uses the pager installed on the system.""" + + def _pager(self, content: str) -> Any: #  pragma: no cover + return __import__("pydoc").pager(content) + + def show(self, content: str) -> None: + """Use the same pager used by pydoc.""" + self._pager(content) + + +if __name__ == "__main__": # pragma: no cover + from .__main__ import make_test_card + from .console import Console + + console = Console() + with console.pager(styles=True): + console.print(make_test_card()) diff --git a/pipenv/patched/notpip/_vendor/rich/palette.py b/pipenv/patched/notpip/_vendor/rich/palette.py new file mode 100644 index 0000000000..f2b1e25ff1 --- /dev/null +++ b/pipenv/patched/notpip/_vendor/rich/palette.py @@ -0,0 +1,100 @@ +from math import sqrt +from functools import lru_cache +from typing import Sequence, Tuple, TYPE_CHECKING + +from .color_triplet import ColorTriplet + +if TYPE_CHECKING: + from pipenv.patched.notpip._vendor.rich.table import Table + + +class Palette: + """A palette of available colors.""" + + def __init__(self, colors: Sequence[Tuple[int, int, int]]): + self._colors = colors + + def __getitem__(self, number: int) -> ColorTriplet: + return ColorTriplet(*self._colors[number]) + + def __rich__(self) -> "Table": + from pipenv.patched.notpip._vendor.rich.color import Color + from pipenv.patched.notpip._vendor.rich.style import Style + from pipenv.patched.notpip._vendor.rich.text import Text + from pipenv.patched.notpip._vendor.rich.table import Table + + table = Table( + "index", + "RGB", + "Color", + title="Palette", + caption=f"{len(self._colors)} colors", + highlight=True, + caption_justify="right", + ) + for index, color in enumerate(self._colors): + table.add_row( + str(index), + repr(color), + Text(" " * 16, style=Style(bgcolor=Color.from_rgb(*color))), + ) + return table + + # This is somewhat inefficient and needs caching + @lru_cache(maxsize=1024) + def match(self, color: Tuple[int, int, int]) -> int: + """Find a color from a palette that most closely matches a given color. + + Args: + color (Tuple[int, int, int]): RGB components in range 0 > 255. + + Returns: + int: Index of closes matching color. + """ + red1, green1, blue1 = color + _sqrt = sqrt + get_color = self._colors.__getitem__ + + def get_color_distance(index: int) -> float: + """Get the distance to a color.""" + red2, green2, blue2 = get_color(index) + red_mean = (red1 + red2) // 2 + red = red1 - red2 + green = green1 - green2 + blue = blue1 - blue2 + return _sqrt( + (((512 + red_mean) * red * red) >> 8) + + 4 * green * green + + (((767 - red_mean) * blue * blue) >> 8) + ) + + min_index = min(range(len(self._colors)), key=get_color_distance) + return min_index + + +if __name__ == "__main__": # pragma: no cover + import colorsys + from typing import Iterable + from pipenv.patched.notpip._vendor.rich.color import Color + from pipenv.patched.notpip._vendor.rich.console import Console, ConsoleOptions + from pipenv.patched.notpip._vendor.rich.segment import Segment + from pipenv.patched.notpip._vendor.rich.style import Style + + class ColorBox: + def __rich_console__( + self, console: Console, options: ConsoleOptions + ) -> Iterable[Segment]: + height = console.size.height - 3 + for y in range(0, height): + for x in range(options.max_width): + h = x / options.max_width + l = y / (height + 1) + r1, g1, b1 = colorsys.hls_to_rgb(h, l, 1.0) + r2, g2, b2 = colorsys.hls_to_rgb(h, l + (1 / height / 2), 1.0) + bgcolor = Color.from_rgb(r1 * 255, g1 * 255, b1 * 255) + color = Color.from_rgb(r2 * 255, g2 * 255, b2 * 255) + yield Segment("▄", Style(color=color, bgcolor=bgcolor)) + yield Segment.line() + + console = Console() + console.print(ColorBox()) diff --git a/pipenv/patched/notpip/_vendor/rich/panel.py b/pipenv/patched/notpip/_vendor/rich/panel.py new file mode 100644 index 0000000000..151fe5f017 --- /dev/null +++ b/pipenv/patched/notpip/_vendor/rich/panel.py @@ -0,0 +1,250 @@ +from typing import Optional, TYPE_CHECKING + +from .box import Box, ROUNDED + +from .align import AlignMethod +from .jupyter import JupyterMixin +from .measure import Measurement, measure_renderables +from .padding import Padding, PaddingDimensions +from .style import StyleType +from .text import Text, TextType +from .segment import Segment + +if TYPE_CHECKING: + from .console import Console, ConsoleOptions, RenderableType, RenderResult + + +class Panel(JupyterMixin): + """A console renderable that draws a border around its contents. + + Example: + >>> console.print(Panel("Hello, World!")) + + Args: + renderable (RenderableType): A console renderable object. + box (Box, optional): A Box instance that defines the look of the border (see :ref:`appendix_box`. + Defaults to box.ROUNDED. + safe_box (bool, optional): Disable box characters that don't display on windows legacy terminal with *raster* fonts. Defaults to True. + expand (bool, optional): If True the panel will stretch to fill the console + width, otherwise it will be sized to fit the contents. Defaults to True. + style (str, optional): The style of the panel (border and contents). Defaults to "none". + border_style (str, optional): The style of the border. Defaults to "none". + width (Optional[int], optional): Optional width of panel. Defaults to None to auto-detect. + height (Optional[int], optional): Optional height of panel. Defaults to None to auto-detect. + padding (Optional[PaddingDimensions]): Optional padding around renderable. Defaults to 0. + highlight (bool, optional): Enable automatic highlighting of panel title (if str). Defaults to False. + """ + + def __init__( + self, + renderable: "RenderableType", + box: Box = ROUNDED, + *, + title: Optional[TextType] = None, + title_align: AlignMethod = "center", + subtitle: Optional[TextType] = None, + subtitle_align: AlignMethod = "center", + safe_box: Optional[bool] = None, + expand: bool = True, + style: StyleType = "none", + border_style: StyleType = "none", + width: Optional[int] = None, + height: Optional[int] = None, + padding: PaddingDimensions = (0, 1), + highlight: bool = False, + ) -> None: + self.renderable = renderable + self.box = box + self.title = title + self.title_align: AlignMethod = title_align + self.subtitle = subtitle + self.subtitle_align = subtitle_align + self.safe_box = safe_box + self.expand = expand + self.style = style + self.border_style = border_style + self.width = width + self.height = height + self.padding = padding + self.highlight = highlight + + @classmethod + def fit( + cls, + renderable: "RenderableType", + box: Box = ROUNDED, + *, + title: Optional[TextType] = None, + title_align: AlignMethod = "center", + subtitle: Optional[TextType] = None, + subtitle_align: AlignMethod = "center", + safe_box: Optional[bool] = None, + style: StyleType = "none", + border_style: StyleType = "none", + width: Optional[int] = None, + padding: PaddingDimensions = (0, 1), + ) -> "Panel": + """An alternative constructor that sets expand=False.""" + return cls( + renderable, + box, + title=title, + title_align=title_align, + subtitle=subtitle, + subtitle_align=subtitle_align, + safe_box=safe_box, + style=style, + border_style=border_style, + width=width, + padding=padding, + expand=False, + ) + + @property + def _title(self) -> Optional[Text]: + if self.title: + title_text = ( + Text.from_markup(self.title) + if isinstance(self.title, str) + else self.title.copy() + ) + title_text.end = "" + title_text.plain = title_text.plain.replace("\n", " ") + title_text.no_wrap = True + title_text.expand_tabs() + title_text.pad(1) + return title_text + return None + + @property + def _subtitle(self) -> Optional[Text]: + if self.subtitle: + subtitle_text = ( + Text.from_markup(self.subtitle) + if isinstance(self.subtitle, str) + else self.subtitle.copy() + ) + subtitle_text.end = "" + subtitle_text.plain = subtitle_text.plain.replace("\n", " ") + subtitle_text.no_wrap = True + subtitle_text.expand_tabs() + subtitle_text.pad(1) + return subtitle_text + return None + + def __rich_console__( + self, console: "Console", options: "ConsoleOptions" + ) -> "RenderResult": + _padding = Padding.unpack(self.padding) + renderable = ( + Padding(self.renderable, _padding) if any(_padding) else self.renderable + ) + style = console.get_style(self.style) + border_style = style + console.get_style(self.border_style) + width = ( + options.max_width + if self.width is None + else min(options.max_width, self.width) + ) + + safe_box: bool = console.safe_box if self.safe_box is None else self.safe_box + box = self.box.substitute(options, safe=safe_box) + + title_text = self._title + if title_text is not None: + title_text.style = border_style + + child_width = ( + width - 2 + if self.expand + else console.measure( + renderable, options=options.update_width(width - 2) + ).maximum + ) + child_height = self.height or options.height or None + if child_height: + child_height -= 2 + if title_text is not None: + child_width = min( + options.max_width - 2, max(child_width, title_text.cell_len + 2) + ) + + width = child_width + 2 + child_options = options.update( + width=child_width, height=child_height, highlight=self.highlight + ) + lines = console.render_lines(renderable, child_options, style=style) + + line_start = Segment(box.mid_left, border_style) + line_end = Segment(f"{box.mid_right}", border_style) + new_line = Segment.line() + if title_text is None or width <= 4: + yield Segment(box.get_top([width - 2]), border_style) + else: + title_text.align(self.title_align, width - 4, character=box.top) + yield Segment(box.top_left + box.top, border_style) + yield from console.render(title_text) + yield Segment(box.top + box.top_right, border_style) + + yield new_line + for line in lines: + yield line_start + yield from line + yield line_end + yield new_line + + subtitle_text = self._subtitle + if subtitle_text is not None: + subtitle_text.style = border_style + + if subtitle_text is None or width <= 4: + yield Segment(box.get_bottom([width - 2]), border_style) + else: + subtitle_text.align(self.subtitle_align, width - 4, character=box.bottom) + yield Segment(box.bottom_left + box.bottom, border_style) + yield from console.render(subtitle_text) + yield Segment(box.bottom + box.bottom_right, border_style) + + yield new_line + + def __rich_measure__( + self, console: "Console", options: "ConsoleOptions" + ) -> "Measurement": + _title = self._title + _, right, _, left = Padding.unpack(self.padding) + padding = left + right + renderables = [self.renderable, _title] if _title else [self.renderable] + + if self.width is None: + width = ( + measure_renderables( + console, + options.update_width(options.max_width - padding - 2), + renderables, + ).maximum + + padding + + 2 + ) + else: + width = self.width + return Measurement(width, width) + + +if __name__ == "__main__": # pragma: no cover + from .console import Console + + c = Console() + + from .padding import Padding + from .box import ROUNDED, DOUBLE + + p = Panel( + "Hello, World!", + title="rich.Panel", + style="white on blue", + box=DOUBLE, + padding=1, + ) + + c.print() + c.print(p) diff --git a/pipenv/patched/notpip/_vendor/rich/pretty.py b/pipenv/patched/notpip/_vendor/rich/pretty.py new file mode 100644 index 0000000000..4cbaebd252 --- /dev/null +++ b/pipenv/patched/notpip/_vendor/rich/pretty.py @@ -0,0 +1,903 @@ +import builtins +import dataclasses +import inspect +import os +import re +import sys +from array import array +from collections import Counter, UserDict, UserList, defaultdict, deque +from dataclasses import dataclass, fields, is_dataclass +from inspect import isclass +from itertools import islice +from types import MappingProxyType +from typing import ( + TYPE_CHECKING, + Any, + Callable, + DefaultDict, + Dict, + Iterable, + List, + Optional, + Set, + Tuple, + Union, +) + +from pipenv.patched.notpip._vendor.rich.repr import RichReprResult + +try: + import pipenv.vendor.attr as _attr_module +except ImportError: # pragma: no cover + _attr_module = None # type: ignore + + +from . import get_console +from ._loop import loop_last +from ._pick import pick_bool +from .abc import RichRenderable +from .cells import cell_len +from .highlighter import ReprHighlighter +from .jupyter import JupyterMixin, JupyterRenderable +from .measure import Measurement +from .text import Text + +if TYPE_CHECKING: + from .console import ( + Console, + ConsoleOptions, + HighlighterType, + JustifyMethod, + OverflowMethod, + RenderResult, + ) + + +def _is_attr_object(obj: Any) -> bool: + """Check if an object was created with attrs module.""" + return _attr_module is not None and _attr_module.has(type(obj)) + + +def _get_attr_fields(obj: Any) -> Iterable["_attr_module.Attribute[Any]"]: + """Get fields for an attrs object.""" + return _attr_module.fields(type(obj)) if _attr_module is not None else [] + + +def _is_dataclass_repr(obj: object) -> bool: + """Check if an instance of a dataclass contains the default repr. + + Args: + obj (object): A dataclass instance. + + Returns: + bool: True if the default repr is used, False if there is a custom repr. + """ + # Digging in to a lot of internals here + # Catching all exceptions in case something is missing on a non CPython implementation + try: + return obj.__repr__.__code__.co_filename == dataclasses.__file__ + except Exception: # pragma: no coverage + return False + + +def _ipy_display_hook( + value: Any, + console: Optional["Console"] = None, + overflow: "OverflowMethod" = "ignore", + crop: bool = False, + indent_guides: bool = False, + max_length: Optional[int] = None, + max_string: Optional[int] = None, + expand_all: bool = False, +) -> None: + from .console import ConsoleRenderable # needed here to prevent circular import + + # always skip rich generated jupyter renderables or None values + if isinstance(value, JupyterRenderable) or value is None: + return + + console = console or get_console() + if console.is_jupyter: + # Delegate rendering to IPython if the object (and IPython) supports it + # https://ipython.readthedocs.io/en/stable/config/integrating.html#rich-display + ipython_repr_methods = [ + "_repr_html_", + "_repr_markdown_", + "_repr_json_", + "_repr_latex_", + "_repr_jpeg_", + "_repr_png_", + "_repr_svg_", + "_repr_mimebundle_", + ] + for repr_method in ipython_repr_methods: + method = getattr(value, repr_method, None) + if inspect.ismethod(method): + # Calling the method ourselves isn't ideal. The interface for the `_repr_*_` methods + # specifies that if they return None, then they should not be rendered + # by the notebook. + try: + repr_result = method() + except Exception: + continue # If the method raises, treat it as if it doesn't exist, try any others + if repr_result is not None: + return # Delegate rendering to IPython + + # certain renderables should start on a new line + if isinstance(value, ConsoleRenderable): + console.line() + + console.print( + value + if isinstance(value, RichRenderable) + else Pretty( + value, + overflow=overflow, + indent_guides=indent_guides, + max_length=max_length, + max_string=max_string, + expand_all=expand_all, + margin=12, + ), + crop=crop, + new_line_start=True, + ) + + +def install( + console: Optional["Console"] = None, + overflow: "OverflowMethod" = "ignore", + crop: bool = False, + indent_guides: bool = False, + max_length: Optional[int] = None, + max_string: Optional[int] = None, + expand_all: bool = False, +) -> None: + """Install automatic pretty printing in the Python REPL. + + Args: + console (Console, optional): Console instance or ``None`` to use global console. Defaults to None. + overflow (Optional[OverflowMethod], optional): Overflow method. Defaults to "ignore". + crop (Optional[bool], optional): Enable cropping of long lines. Defaults to False. + indent_guides (bool, optional): Enable indentation guides. Defaults to False. + max_length (int, optional): Maximum length of containers before abbreviating, or None for no abbreviation. + Defaults to None. + max_string (int, optional): Maximum length of string before truncating, or None to disable. Defaults to None. + expand_all (bool, optional): Expand all containers. Defaults to False. + max_frames (int): Maximum number of frames to show in a traceback, 0 for no maximum. Defaults to 100. + """ + from pipenv.patched.notpip._vendor.rich import get_console + + console = console or get_console() + assert console is not None + + def display_hook(value: Any) -> None: + """Replacement sys.displayhook which prettifies objects with Rich.""" + if value is not None: + assert console is not None + builtins._ = None # type: ignore + console.print( + value + if isinstance(value, RichRenderable) + else Pretty( + value, + overflow=overflow, + indent_guides=indent_guides, + max_length=max_length, + max_string=max_string, + expand_all=expand_all, + ), + crop=crop, + ) + builtins._ = value # type: ignore + + try: # pragma: no cover + ip = get_ipython() # type: ignore + from IPython.core.formatters import BaseFormatter + + class RichFormatter(BaseFormatter): # type: ignore + pprint: bool = True + + def __call__(self, value: Any) -> Any: + if self.pprint: + return _ipy_display_hook( + value, + console=get_console(), + overflow=overflow, + indent_guides=indent_guides, + max_length=max_length, + max_string=max_string, + expand_all=expand_all, + ) + else: + return repr(value) + + # replace plain text formatter with rich formatter + rich_formatter = RichFormatter() + ip.display_formatter.formatters["text/plain"] = rich_formatter + except Exception: + sys.displayhook = display_hook + + +class Pretty(JupyterMixin): + """A rich renderable that pretty prints an object. + + Args: + _object (Any): An object to pretty print. + highlighter (HighlighterType, optional): Highlighter object to apply to result, or None for ReprHighlighter. Defaults to None. + indent_size (int, optional): Number of spaces in indent. Defaults to 4. + justify (JustifyMethod, optional): Justify method, or None for default. Defaults to None. + overflow (OverflowMethod, optional): Overflow method, or None for default. Defaults to None. + no_wrap (Optional[bool], optional): Disable word wrapping. Defaults to False. + indent_guides (bool, optional): Enable indentation guides. Defaults to False. + max_length (int, optional): Maximum length of containers before abbreviating, or None for no abbreviation. + Defaults to None. + max_string (int, optional): Maximum length of string before truncating, or None to disable. Defaults to None. + max_depth (int, optional): Maximum depth of nested data structures, or None for no maximum. Defaults to None. + expand_all (bool, optional): Expand all containers. Defaults to False. + margin (int, optional): Subtrace a margin from width to force containers to expand earlier. Defaults to 0. + insert_line (bool, optional): Insert a new line if the output has multiple new lines. Defaults to False. + """ + + def __init__( + self, + _object: Any, + highlighter: Optional["HighlighterType"] = None, + *, + indent_size: int = 4, + justify: Optional["JustifyMethod"] = None, + overflow: Optional["OverflowMethod"] = None, + no_wrap: Optional[bool] = False, + indent_guides: bool = False, + max_length: Optional[int] = None, + max_string: Optional[int] = None, + max_depth: Optional[int] = None, + expand_all: bool = False, + margin: int = 0, + insert_line: bool = False, + ) -> None: + self._object = _object + self.highlighter = highlighter or ReprHighlighter() + self.indent_size = indent_size + self.justify: Optional["JustifyMethod"] = justify + self.overflow: Optional["OverflowMethod"] = overflow + self.no_wrap = no_wrap + self.indent_guides = indent_guides + self.max_length = max_length + self.max_string = max_string + self.max_depth = max_depth + self.expand_all = expand_all + self.margin = margin + self.insert_line = insert_line + + def __rich_console__( + self, console: "Console", options: "ConsoleOptions" + ) -> "RenderResult": + pretty_str = pretty_repr( + self._object, + max_width=options.max_width - self.margin, + indent_size=self.indent_size, + max_length=self.max_length, + max_string=self.max_string, + max_depth=self.max_depth, + expand_all=self.expand_all, + ) + pretty_text = Text( + pretty_str, + justify=self.justify or options.justify, + overflow=self.overflow or options.overflow, + no_wrap=pick_bool(self.no_wrap, options.no_wrap), + style="pretty", + ) + pretty_text = ( + self.highlighter(pretty_text) + if pretty_text + else Text( + f"{type(self._object)}.__repr__ returned empty string", + style="dim italic", + ) + ) + if self.indent_guides and not options.ascii_only: + pretty_text = pretty_text.with_indent_guides( + self.indent_size, style="repr.indent" + ) + if self.insert_line and "\n" in pretty_text: + yield "" + yield pretty_text + + def __rich_measure__( + self, console: "Console", options: "ConsoleOptions" + ) -> "Measurement": + pretty_str = pretty_repr( + self._object, + max_width=options.max_width, + indent_size=self.indent_size, + max_length=self.max_length, + max_string=self.max_string, + ) + text_width = ( + max(cell_len(line) for line in pretty_str.splitlines()) if pretty_str else 0 + ) + return Measurement(text_width, text_width) + + +def _get_braces_for_defaultdict(_object: DefaultDict[Any, Any]) -> Tuple[str, str, str]: + return ( + f"defaultdict({_object.default_factory!r}, {{", + "})", + f"defaultdict({_object.default_factory!r}, {{}})", + ) + + +def _get_braces_for_array(_object: "array[Any]") -> Tuple[str, str, str]: + return (f"array({_object.typecode!r}, [", "])", "array({_object.typecode!r})") + + +_BRACES: Dict[type, Callable[[Any], Tuple[str, str, str]]] = { + os._Environ: lambda _object: ("environ({", "})", "environ({})"), + array: _get_braces_for_array, + defaultdict: _get_braces_for_defaultdict, + Counter: lambda _object: ("Counter({", "})", "Counter()"), + deque: lambda _object: ("deque([", "])", "deque()"), + dict: lambda _object: ("{", "}", "{}"), + UserDict: lambda _object: ("{", "}", "{}"), + frozenset: lambda _object: ("frozenset({", "})", "frozenset()"), + list: lambda _object: ("[", "]", "[]"), + UserList: lambda _object: ("[", "]", "[]"), + set: lambda _object: ("{", "}", "set()"), + tuple: lambda _object: ("(", ")", "()"), + MappingProxyType: lambda _object: ("mappingproxy({", "})", "mappingproxy({})"), +} +_CONTAINERS = tuple(_BRACES.keys()) +_MAPPING_CONTAINERS = (dict, os._Environ, MappingProxyType, UserDict) + + +def is_expandable(obj: Any) -> bool: + """Check if an object may be expanded by pretty print.""" + return ( + isinstance(obj, _CONTAINERS) + or (is_dataclass(obj)) + or (hasattr(obj, "__rich_repr__")) + or _is_attr_object(obj) + ) and not isclass(obj) + + +@dataclass +class Node: + """A node in a repr tree. May be atomic or a container.""" + + key_repr: str = "" + value_repr: str = "" + open_brace: str = "" + close_brace: str = "" + empty: str = "" + last: bool = False + is_tuple: bool = False + children: Optional[List["Node"]] = None + key_separator = ": " + separator: str = ", " + + def iter_tokens(self) -> Iterable[str]: + """Generate tokens for this node.""" + if self.key_repr: + yield self.key_repr + yield self.key_separator + if self.value_repr: + yield self.value_repr + elif self.children is not None: + if self.children: + yield self.open_brace + if self.is_tuple and len(self.children) == 1: + yield from self.children[0].iter_tokens() + yield "," + else: + for child in self.children: + yield from child.iter_tokens() + if not child.last: + yield self.separator + yield self.close_brace + else: + yield self.empty + + def check_length(self, start_length: int, max_length: int) -> bool: + """Check the length fits within a limit. + + Args: + start_length (int): Starting length of the line (indent, prefix, suffix). + max_length (int): Maximum length. + + Returns: + bool: True if the node can be rendered within max length, otherwise False. + """ + total_length = start_length + for token in self.iter_tokens(): + total_length += cell_len(token) + if total_length > max_length: + return False + return True + + def __str__(self) -> str: + repr_text = "".join(self.iter_tokens()) + return repr_text + + def render( + self, max_width: int = 80, indent_size: int = 4, expand_all: bool = False + ) -> str: + """Render the node to a pretty repr. + + Args: + max_width (int, optional): Maximum width of the repr. Defaults to 80. + indent_size (int, optional): Size of indents. Defaults to 4. + expand_all (bool, optional): Expand all levels. Defaults to False. + + Returns: + str: A repr string of the original object. + """ + lines = [_Line(node=self, is_root=True)] + line_no = 0 + while line_no < len(lines): + line = lines[line_no] + if line.expandable and not line.expanded: + if expand_all or not line.check_length(max_width): + lines[line_no : line_no + 1] = line.expand(indent_size) + line_no += 1 + + repr_str = "\n".join(str(line) for line in lines) + return repr_str + + +@dataclass +class _Line: + """A line in repr output.""" + + parent: Optional["_Line"] = None + is_root: bool = False + node: Optional[Node] = None + text: str = "" + suffix: str = "" + whitespace: str = "" + expanded: bool = False + last: bool = False + + @property + def expandable(self) -> bool: + """Check if the line may be expanded.""" + return bool(self.node is not None and self.node.children) + + def check_length(self, max_length: int) -> bool: + """Check this line fits within a given number of cells.""" + start_length = ( + len(self.whitespace) + cell_len(self.text) + cell_len(self.suffix) + ) + assert self.node is not None + return self.node.check_length(start_length, max_length) + + def expand(self, indent_size: int) -> Iterable["_Line"]: + """Expand this line by adding children on their own line.""" + node = self.node + assert node is not None + whitespace = self.whitespace + assert node.children + if node.key_repr: + new_line = yield _Line( + text=f"{node.key_repr}{node.key_separator}{node.open_brace}", + whitespace=whitespace, + ) + else: + new_line = yield _Line(text=node.open_brace, whitespace=whitespace) + child_whitespace = self.whitespace + " " * indent_size + tuple_of_one = node.is_tuple and len(node.children) == 1 + for last, child in loop_last(node.children): + separator = "," if tuple_of_one else node.separator + line = _Line( + parent=new_line, + node=child, + whitespace=child_whitespace, + suffix=separator, + last=last and not tuple_of_one, + ) + yield line + + yield _Line( + text=node.close_brace, + whitespace=whitespace, + suffix=self.suffix, + last=self.last, + ) + + def __str__(self) -> str: + if self.last: + return f"{self.whitespace}{self.text}{self.node or ''}" + else: + return ( + f"{self.whitespace}{self.text}{self.node or ''}{self.suffix.rstrip()}" + ) + + +def traverse( + _object: Any, + max_length: Optional[int] = None, + max_string: Optional[int] = None, + max_depth: Optional[int] = None, +) -> Node: + """Traverse object and generate a tree. + + Args: + _object (Any): Object to be traversed. + max_length (int, optional): Maximum length of containers before abbreviating, or None for no abbreviation. + Defaults to None. + max_string (int, optional): Maximum length of string before truncating, or None to disable truncating. + Defaults to None. + max_depth (int, optional): Maximum depth of data structures, or None for no maximum. + Defaults to None. + + Returns: + Node: The root of a tree structure which can be used to render a pretty repr. + """ + + def to_repr(obj: Any) -> str: + """Get repr string for an object, but catch errors.""" + if ( + max_string is not None + and isinstance(obj, (bytes, str)) + and len(obj) > max_string + ): + truncated = len(obj) - max_string + obj_repr = f"{obj[:max_string]!r}+{truncated}" + else: + try: + obj_repr = repr(obj) + except Exception as error: + obj_repr = f"" + return obj_repr + + visited_ids: Set[int] = set() + push_visited = visited_ids.add + pop_visited = visited_ids.remove + + def _traverse(obj: Any, root: bool = False, depth: int = 0) -> Node: + """Walk the object depth first.""" + + obj_type = type(obj) + py_version = (sys.version_info.major, sys.version_info.minor) + children: List[Node] + reached_max_depth = max_depth is not None and depth >= max_depth + + def iter_rich_args(rich_args: Any) -> Iterable[Union[Any, Tuple[str, Any]]]: + for arg in rich_args: + if isinstance(arg, tuple): + if len(arg) == 3: + key, child, default = arg + if default == child: + continue + yield key, child + elif len(arg) == 2: + key, child = arg + yield key, child + elif len(arg) == 1: + yield arg[0] + else: + yield arg + + try: + fake_attributes = hasattr( + obj, "awehoi234_wdfjwljet234_234wdfoijsdfmmnxpi492" + ) + except Exception: + fake_attributes = False + + rich_repr_result: Optional[RichReprResult] = None + if not fake_attributes: + try: + if hasattr(obj, "__rich_repr__") and not isclass(obj): + rich_repr_result = obj.__rich_repr__() + except Exception: + pass + + if rich_repr_result is not None: + angular = getattr(obj.__rich_repr__, "angular", False) + args = list(iter_rich_args(rich_repr_result)) + class_name = obj.__class__.__name__ + + if args: + children = [] + append = children.append + + if reached_max_depth: + node = Node(value_repr=f"...") + else: + if angular: + node = Node( + open_brace=f"<{class_name} ", + close_brace=">", + children=children, + last=root, + separator=" ", + ) + else: + node = Node( + open_brace=f"{class_name}(", + close_brace=")", + children=children, + last=root, + ) + for last, arg in loop_last(args): + if isinstance(arg, tuple): + key, child = arg + child_node = _traverse(child, depth=depth + 1) + child_node.last = last + child_node.key_repr = key + child_node.key_separator = "=" + append(child_node) + else: + child_node = _traverse(arg, depth=depth + 1) + child_node.last = last + append(child_node) + else: + node = Node( + value_repr=f"<{class_name}>" if angular else f"{class_name}()", + children=[], + last=root, + ) + elif _is_attr_object(obj) and not fake_attributes: + children = [] + append = children.append + + attr_fields = _get_attr_fields(obj) + if attr_fields: + if reached_max_depth: + node = Node(value_repr=f"...") + else: + node = Node( + open_brace=f"{obj.__class__.__name__}(", + close_brace=")", + children=children, + last=root, + ) + + def iter_attrs() -> Iterable[ + Tuple[str, Any, Optional[Callable[[Any], str]]] + ]: + """Iterate over attr fields and values.""" + for attr in attr_fields: + if attr.repr: + try: + value = getattr(obj, attr.name) + except Exception as error: + # Can happen, albeit rarely + yield (attr.name, error, None) + else: + yield ( + attr.name, + value, + attr.repr if callable(attr.repr) else None, + ) + + for last, (name, value, repr_callable) in loop_last(iter_attrs()): + if repr_callable: + child_node = Node(value_repr=str(repr_callable(value))) + else: + child_node = _traverse(value, depth=depth + 1) + child_node.last = last + child_node.key_repr = name + child_node.key_separator = "=" + append(child_node) + else: + node = Node( + value_repr=f"{obj.__class__.__name__}()", children=[], last=root + ) + + elif ( + is_dataclass(obj) + and not isinstance(obj, type) + and not fake_attributes + and (_is_dataclass_repr(obj) or py_version == (3, 6)) + ): + obj_id = id(obj) + if obj_id in visited_ids: + # Recursion detected + return Node(value_repr="...") + push_visited(obj_id) + + children = [] + append = children.append + if reached_max_depth: + node = Node(value_repr=f"...") + else: + node = Node( + open_brace=f"{obj.__class__.__name__}(", + close_brace=")", + children=children, + last=root, + ) + + for last, field in loop_last( + field for field in fields(obj) if field.repr + ): + child_node = _traverse(getattr(obj, field.name), depth=depth + 1) + child_node.key_repr = field.name + child_node.last = last + child_node.key_separator = "=" + append(child_node) + + pop_visited(obj_id) + + elif isinstance(obj, _CONTAINERS): + for container_type in _CONTAINERS: + if isinstance(obj, container_type): + obj_type = container_type + break + + obj_id = id(obj) + if obj_id in visited_ids: + # Recursion detected + return Node(value_repr="...") + push_visited(obj_id) + + open_brace, close_brace, empty = _BRACES[obj_type](obj) + + if reached_max_depth: + node = Node(value_repr=f"...", last=root) + elif obj_type.__repr__ != type(obj).__repr__: + node = Node(value_repr=to_repr(obj), last=root) + elif obj: + children = [] + node = Node( + open_brace=open_brace, + close_brace=close_brace, + children=children, + last=root, + ) + append = children.append + num_items = len(obj) + last_item_index = num_items - 1 + + if isinstance(obj, _MAPPING_CONTAINERS): + iter_items = iter(obj.items()) + if max_length is not None: + iter_items = islice(iter_items, max_length) + for index, (key, child) in enumerate(iter_items): + child_node = _traverse(child, depth=depth + 1) + child_node.key_repr = to_repr(key) + child_node.last = index == last_item_index + append(child_node) + else: + iter_values = iter(obj) + if max_length is not None: + iter_values = islice(iter_values, max_length) + for index, child in enumerate(iter_values): + child_node = _traverse(child, depth=depth + 1) + child_node.last = index == last_item_index + append(child_node) + if max_length is not None and num_items > max_length: + append(Node(value_repr=f"... +{num_items-max_length}", last=True)) + else: + node = Node(empty=empty, children=[], last=root) + + pop_visited(obj_id) + else: + node = Node(value_repr=to_repr(obj), last=root) + node.is_tuple = isinstance(obj, tuple) + return node + + node = _traverse(_object, root=True) + return node + + +def pretty_repr( + _object: Any, + *, + max_width: int = 80, + indent_size: int = 4, + max_length: Optional[int] = None, + max_string: Optional[int] = None, + max_depth: Optional[int] = None, + expand_all: bool = False, +) -> str: + """Prettify repr string by expanding on to new lines to fit within a given width. + + Args: + _object (Any): Object to repr. + max_width (int, optional): Desired maximum width of repr string. Defaults to 80. + indent_size (int, optional): Number of spaces to indent. Defaults to 4. + max_length (int, optional): Maximum length of containers before abbreviating, or None for no abbreviation. + Defaults to None. + max_string (int, optional): Maximum length of string before truncating, or None to disable truncating. + Defaults to None. + max_depth (int, optional): Maximum depth of nested data structure, or None for no depth. + Defaults to None. + expand_all (bool, optional): Expand all containers regardless of available width. Defaults to False. + + Returns: + str: A possibly multi-line representation of the object. + """ + + if isinstance(_object, Node): + node = _object + else: + node = traverse( + _object, max_length=max_length, max_string=max_string, max_depth=max_depth + ) + repr_str = node.render( + max_width=max_width, indent_size=indent_size, expand_all=expand_all + ) + return repr_str + + +def pprint( + _object: Any, + *, + console: Optional["Console"] = None, + indent_guides: bool = True, + max_length: Optional[int] = None, + max_string: Optional[int] = None, + max_depth: Optional[int] = None, + expand_all: bool = False, +) -> None: + """A convenience function for pretty printing. + + Args: + _object (Any): Object to pretty print. + console (Console, optional): Console instance, or None to use default. Defaults to None. + max_length (int, optional): Maximum length of containers before abbreviating, or None for no abbreviation. + Defaults to None. + max_string (int, optional): Maximum length of strings before truncating, or None to disable. Defaults to None. + max_depth (int, optional): Maximum depth for nested data structures, or None for unlimited depth. Defaults to None. + indent_guides (bool, optional): Enable indentation guides. Defaults to True. + expand_all (bool, optional): Expand all containers. Defaults to False. + """ + _console = get_console() if console is None else console + _console.print( + Pretty( + _object, + max_length=max_length, + max_string=max_string, + max_depth=max_depth, + indent_guides=indent_guides, + expand_all=expand_all, + overflow="ignore", + ), + soft_wrap=True, + ) + + +if __name__ == "__main__": # pragma: no cover + + class BrokenRepr: + def __repr__(self) -> str: + 1 / 0 + return "this will fail" + + d = defaultdict(int) + d["foo"] = 5 + data = { + "foo": [ + 1, + "Hello World!", + 100.123, + 323.232, + 432324.0, + {5, 6, 7, (1, 2, 3, 4), 8}, + ], + "bar": frozenset({1, 2, 3}), + "defaultdict": defaultdict( + list, {"crumble": ["apple", "rhubarb", "butter", "sugar", "flour"]} + ), + "counter": Counter( + [ + "apple", + "orange", + "pear", + "kumquat", + "kumquat", + "durian" * 100, + ] + ), + "atomic": (False, True, None), + "Broken": BrokenRepr(), + } + data["foo"].append(data) # type: ignore + + from pipenv.patched.notpip._vendor.rich import print + + print(Pretty(data, indent_guides=True, max_string=20)) diff --git a/pipenv/patched/notpip/_vendor/rich/progress.py b/pipenv/patched/notpip/_vendor/rich/progress.py new file mode 100644 index 0000000000..1f670db438 --- /dev/null +++ b/pipenv/patched/notpip/_vendor/rich/progress.py @@ -0,0 +1,1036 @@ +from abc import ABC, abstractmethod +from collections import deque +from collections.abc import Sized +from dataclasses import dataclass, field +from datetime import timedelta +from math import ceil +from threading import Event, RLock, Thread +from types import TracebackType +from typing import ( + Any, + Callable, + Deque, + Dict, + Iterable, + List, + NamedTuple, + NewType, + Optional, + Sequence, + Tuple, + Type, + TypeVar, + Union, +) + +from . import filesize, get_console +from .console import Console, JustifyMethod, RenderableType, Group +from .highlighter import Highlighter +from .jupyter import JupyterMixin +from .live import Live +from .progress_bar import ProgressBar +from .spinner import Spinner +from .style import StyleType +from .table import Column, Table +from .text import Text, TextType + +TaskID = NewType("TaskID", int) + +ProgressType = TypeVar("ProgressType") + +GetTimeCallable = Callable[[], float] + + +class _TrackThread(Thread): + """A thread to periodically update progress.""" + + def __init__(self, progress: "Progress", task_id: "TaskID", update_period: float): + self.progress = progress + self.task_id = task_id + self.update_period = update_period + self.done = Event() + + self.completed = 0 + super().__init__() + + def run(self) -> None: + task_id = self.task_id + advance = self.progress.advance + update_period = self.update_period + last_completed = 0 + wait = self.done.wait + while not wait(update_period): + completed = self.completed + if last_completed != completed: + advance(task_id, completed - last_completed) + last_completed = completed + + self.progress.update(self.task_id, completed=self.completed, refresh=True) + + def __enter__(self) -> "_TrackThread": + self.start() + return self + + def __exit__( + self, + exc_type: Optional[Type[BaseException]], + exc_val: Optional[BaseException], + exc_tb: Optional[TracebackType], + ) -> None: + self.done.set() + self.join() + + +def track( + sequence: Union[Sequence[ProgressType], Iterable[ProgressType]], + description: str = "Working...", + total: Optional[float] = None, + auto_refresh: bool = True, + console: Optional[Console] = None, + transient: bool = False, + get_time: Optional[Callable[[], float]] = None, + refresh_per_second: float = 10, + style: StyleType = "bar.back", + complete_style: StyleType = "bar.complete", + finished_style: StyleType = "bar.finished", + pulse_style: StyleType = "bar.pulse", + update_period: float = 0.1, + disable: bool = False, +) -> Iterable[ProgressType]: + """Track progress by iterating over a sequence. + + Args: + sequence (Iterable[ProgressType]): A sequence (must support "len") you wish to iterate over. + description (str, optional): Description of task show next to progress bar. Defaults to "Working". + total: (float, optional): Total number of steps. Default is len(sequence). + auto_refresh (bool, optional): Automatic refresh, disable to force a refresh after each iteration. Default is True. + transient: (bool, optional): Clear the progress on exit. Defaults to False. + console (Console, optional): Console to write to. Default creates internal Console instance. + refresh_per_second (float): Number of times per second to refresh the progress information. Defaults to 10. + style (StyleType, optional): Style for the bar background. Defaults to "bar.back". + complete_style (StyleType, optional): Style for the completed bar. Defaults to "bar.complete". + finished_style (StyleType, optional): Style for a finished bar. Defaults to "bar.done". + pulse_style (StyleType, optional): Style for pulsing bars. Defaults to "bar.pulse". + update_period (float, optional): Minimum time (in seconds) between calls to update(). Defaults to 0.1. + disable (bool, optional): Disable display of progress. + Returns: + Iterable[ProgressType]: An iterable of the values in the sequence. + + """ + + columns: List["ProgressColumn"] = ( + [TextColumn("[progress.description]{task.description}")] if description else [] + ) + columns.extend( + ( + BarColumn( + style=style, + complete_style=complete_style, + finished_style=finished_style, + pulse_style=pulse_style, + ), + TextColumn("[progress.percentage]{task.percentage:>3.0f}%"), + TimeRemainingColumn(), + ) + ) + progress = Progress( + *columns, + auto_refresh=auto_refresh, + console=console, + transient=transient, + get_time=get_time, + refresh_per_second=refresh_per_second or 10, + disable=disable, + ) + + with progress: + yield from progress.track( + sequence, total=total, description=description, update_period=update_period + ) + + +class ProgressColumn(ABC): + """Base class for a widget to use in progress display.""" + + max_refresh: Optional[float] = None + + def __init__(self, table_column: Optional[Column] = None) -> None: + self._table_column = table_column + self._renderable_cache: Dict[TaskID, Tuple[float, RenderableType]] = {} + self._update_time: Optional[float] = None + + def get_table_column(self) -> Column: + """Get a table column, used to build tasks table.""" + return self._table_column or Column() + + def __call__(self, task: "Task") -> RenderableType: + """Called by the Progress object to return a renderable for the given task. + + Args: + task (Task): An object containing information regarding the task. + + Returns: + RenderableType: Anything renderable (including str). + """ + current_time = task.get_time() + if self.max_refresh is not None and not task.completed: + try: + timestamp, renderable = self._renderable_cache[task.id] + except KeyError: + pass + else: + if timestamp + self.max_refresh > current_time: + return renderable + + renderable = self.render(task) + self._renderable_cache[task.id] = (current_time, renderable) + return renderable + + @abstractmethod + def render(self, task: "Task") -> RenderableType: + """Should return a renderable object.""" + + +class RenderableColumn(ProgressColumn): + """A column to insert an arbitrary column. + + Args: + renderable (RenderableType, optional): Any renderable. Defaults to empty string. + """ + + def __init__( + self, renderable: RenderableType = "", *, table_column: Optional[Column] = None + ): + self.renderable = renderable + super().__init__(table_column=table_column) + + def render(self, task: "Task") -> RenderableType: + return self.renderable + + +class SpinnerColumn(ProgressColumn): + """A column with a 'spinner' animation. + + Args: + spinner_name (str, optional): Name of spinner animation. Defaults to "dots". + style (StyleType, optional): Style of spinner. Defaults to "progress.spinner". + speed (float, optional): Speed factor of spinner. Defaults to 1.0. + finished_text (TextType, optional): Text used when task is finished. Defaults to " ". + """ + + def __init__( + self, + spinner_name: str = "dots", + style: Optional[StyleType] = "progress.spinner", + speed: float = 1.0, + finished_text: TextType = " ", + table_column: Optional[Column] = None, + ): + self.spinner = Spinner(spinner_name, style=style, speed=speed) + self.finished_text = ( + Text.from_markup(finished_text) + if isinstance(finished_text, str) + else finished_text + ) + super().__init__(table_column=table_column) + + def set_spinner( + self, + spinner_name: str, + spinner_style: Optional[StyleType] = "progress.spinner", + speed: float = 1.0, + ) -> None: + """Set a new spinner. + + Args: + spinner_name (str): Spinner name, see python -m rich.spinner. + spinner_style (Optional[StyleType], optional): Spinner style. Defaults to "progress.spinner". + speed (float, optional): Speed factor of spinner. Defaults to 1.0. + """ + self.spinner = Spinner(spinner_name, style=spinner_style, speed=speed) + + def render(self, task: "Task") -> RenderableType: + text = ( + self.finished_text + if task.finished + else self.spinner.render(task.get_time()) + ) + return text + + +class TextColumn(ProgressColumn): + """A column containing text.""" + + def __init__( + self, + text_format: str, + style: StyleType = "none", + justify: JustifyMethod = "left", + markup: bool = True, + highlighter: Optional[Highlighter] = None, + table_column: Optional[Column] = None, + ) -> None: + self.text_format = text_format + self.justify: JustifyMethod = justify + self.style = style + self.markup = markup + self.highlighter = highlighter + super().__init__(table_column=table_column or Column(no_wrap=True)) + + def render(self, task: "Task") -> Text: + _text = self.text_format.format(task=task) + if self.markup: + text = Text.from_markup(_text, style=self.style, justify=self.justify) + else: + text = Text(_text, style=self.style, justify=self.justify) + if self.highlighter: + self.highlighter.highlight(text) + return text + + +class BarColumn(ProgressColumn): + """Renders a visual progress bar. + + Args: + bar_width (Optional[int], optional): Width of bar or None for full width. Defaults to 40. + style (StyleType, optional): Style for the bar background. Defaults to "bar.back". + complete_style (StyleType, optional): Style for the completed bar. Defaults to "bar.complete". + finished_style (StyleType, optional): Style for a finished bar. Defaults to "bar.done". + pulse_style (StyleType, optional): Style for pulsing bars. Defaults to "bar.pulse". + """ + + def __init__( + self, + bar_width: Optional[int] = 40, + style: StyleType = "bar.back", + complete_style: StyleType = "bar.complete", + finished_style: StyleType = "bar.finished", + pulse_style: StyleType = "bar.pulse", + table_column: Optional[Column] = None, + ) -> None: + self.bar_width = bar_width + self.style = style + self.complete_style = complete_style + self.finished_style = finished_style + self.pulse_style = pulse_style + super().__init__(table_column=table_column) + + def render(self, task: "Task") -> ProgressBar: + """Gets a progress bar widget for a task.""" + return ProgressBar( + total=max(0, task.total), + completed=max(0, task.completed), + width=None if self.bar_width is None else max(1, self.bar_width), + pulse=not task.started, + animation_time=task.get_time(), + style=self.style, + complete_style=self.complete_style, + finished_style=self.finished_style, + pulse_style=self.pulse_style, + ) + + +class TimeElapsedColumn(ProgressColumn): + """Renders time elapsed.""" + + def render(self, task: "Task") -> Text: + """Show time remaining.""" + elapsed = task.finished_time if task.finished else task.elapsed + if elapsed is None: + return Text("-:--:--", style="progress.elapsed") + delta = timedelta(seconds=int(elapsed)) + return Text(str(delta), style="progress.elapsed") + + +class TimeRemainingColumn(ProgressColumn): + """Renders estimated time remaining.""" + + # Only refresh twice a second to prevent jitter + max_refresh = 0.5 + + def render(self, task: "Task") -> Text: + """Show time remaining.""" + remaining = task.time_remaining + if remaining is None: + return Text("-:--:--", style="progress.remaining") + remaining_delta = timedelta(seconds=int(remaining)) + return Text(str(remaining_delta), style="progress.remaining") + + +class FileSizeColumn(ProgressColumn): + """Renders completed filesize.""" + + def render(self, task: "Task") -> Text: + """Show data completed.""" + data_size = filesize.decimal(int(task.completed)) + return Text(data_size, style="progress.filesize") + + +class TotalFileSizeColumn(ProgressColumn): + """Renders total filesize.""" + + def render(self, task: "Task") -> Text: + """Show data completed.""" + data_size = filesize.decimal(int(task.total)) + return Text(data_size, style="progress.filesize.total") + + +class DownloadColumn(ProgressColumn): + """Renders file size downloaded and total, e.g. '0.5/2.3 GB'. + + Args: + binary_units (bool, optional): Use binary units, KiB, MiB etc. Defaults to False. + """ + + def __init__( + self, binary_units: bool = False, table_column: Optional[Column] = None + ) -> None: + self.binary_units = binary_units + super().__init__(table_column=table_column) + + def render(self, task: "Task") -> Text: + """Calculate common unit for completed and total.""" + completed = int(task.completed) + total = int(task.total) + if self.binary_units: + unit, suffix = filesize.pick_unit_and_suffix( + total, + ["bytes", "KiB", "MiB", "GiB", "TiB", "PiB", "EiB", "ZiB", "YiB"], + 1024, + ) + else: + unit, suffix = filesize.pick_unit_and_suffix( + total, ["bytes", "KB", "MB", "GB", "TB", "PB", "EB", "ZB", "YB"], 1000 + ) + completed_ratio = completed / unit + total_ratio = total / unit + precision = 0 if unit == 1 else 1 + completed_str = f"{completed_ratio:,.{precision}f}" + total_str = f"{total_ratio:,.{precision}f}" + download_status = f"{completed_str}/{total_str} {suffix}" + download_text = Text(download_status, style="progress.download") + return download_text + + +class TransferSpeedColumn(ProgressColumn): + """Renders human readable transfer speed.""" + + def render(self, task: "Task") -> Text: + """Show data transfer speed.""" + speed = task.finished_speed or task.speed + if speed is None: + return Text("?", style="progress.data.speed") + data_speed = filesize.decimal(int(speed)) + return Text(f"{data_speed}/s", style="progress.data.speed") + + +class ProgressSample(NamedTuple): + """Sample of progress for a given time.""" + + timestamp: float + """Timestamp of sample.""" + completed: float + """Number of steps completed.""" + + +@dataclass +class Task: + """Information regarding a progress task. + + This object should be considered read-only outside of the :class:`~Progress` class. + + """ + + id: TaskID + """Task ID associated with this task (used in Progress methods).""" + + description: str + """str: Description of the task.""" + + total: float + """str: Total number of steps in this task.""" + + completed: float + """float: Number of steps completed""" + + _get_time: GetTimeCallable + """Callable to get the current time.""" + + finished_time: Optional[float] = None + """float: Time task was finished.""" + + visible: bool = True + """bool: Indicates if this task is visible in the progress display.""" + + fields: Dict[str, Any] = field(default_factory=dict) + """dict: Arbitrary fields passed in via Progress.update.""" + + start_time: Optional[float] = field(default=None, init=False, repr=False) + """Optional[float]: Time this task was started, or None if not started.""" + + stop_time: Optional[float] = field(default=None, init=False, repr=False) + """Optional[float]: Time this task was stopped, or None if not stopped.""" + + finished_speed: Optional[float] = None + """Optional[float]: The last speed for a finished task.""" + + _progress: Deque[ProgressSample] = field( + default_factory=deque, init=False, repr=False + ) + + _lock: RLock = field(repr=False, default_factory=RLock) + """Thread lock.""" + + def get_time(self) -> float: + """float: Get the current time, in seconds.""" + return self._get_time() + + @property + def started(self) -> bool: + """bool: Check if the task as started.""" + return self.start_time is not None + + @property + def remaining(self) -> float: + """float: Get the number of steps remaining.""" + return self.total - self.completed + + @property + def elapsed(self) -> Optional[float]: + """Optional[float]: Time elapsed since task was started, or ``None`` if the task hasn't started.""" + if self.start_time is None: + return None + if self.stop_time is not None: + return self.stop_time - self.start_time + return self.get_time() - self.start_time + + @property + def finished(self) -> bool: + """Check if the task has finished.""" + return self.finished_time is not None + + @property + def percentage(self) -> float: + """float: Get progress of task as a percentage.""" + if not self.total: + return 0.0 + completed = (self.completed / self.total) * 100.0 + completed = min(100.0, max(0.0, completed)) + return completed + + @property + def speed(self) -> Optional[float]: + """Optional[float]: Get the estimated speed in steps per second.""" + if self.start_time is None: + return None + with self._lock: + progress = self._progress + if not progress: + return None + total_time = progress[-1].timestamp - progress[0].timestamp + if total_time == 0: + return None + iter_progress = iter(progress) + next(iter_progress) + total_completed = sum(sample.completed for sample in iter_progress) + speed = total_completed / total_time + return speed + + @property + def time_remaining(self) -> Optional[float]: + """Optional[float]: Get estimated time to completion, or ``None`` if no data.""" + if self.finished: + return 0.0 + speed = self.speed + if not speed: + return None + estimate = ceil(self.remaining / speed) + return estimate + + def _reset(self) -> None: + """Reset progress.""" + self._progress.clear() + self.finished_time = None + self.finished_speed = None + + +class Progress(JupyterMixin): + """Renders an auto-updating progress bar(s). + + Args: + console (Console, optional): Optional Console instance. Default will an internal Console instance writing to stdout. + auto_refresh (bool, optional): Enable auto refresh. If disabled, you will need to call `refresh()`. + refresh_per_second (Optional[float], optional): Number of times per second to refresh the progress information or None to use default (10). Defaults to None. + speed_estimate_period: (float, optional): Period (in seconds) used to calculate the speed estimate. Defaults to 30. + transient: (bool, optional): Clear the progress on exit. Defaults to False. + redirect_stdout: (bool, optional): Enable redirection of stdout, so ``print`` may be used. Defaults to True. + redirect_stderr: (bool, optional): Enable redirection of stderr. Defaults to True. + get_time: (Callable, optional): A callable that gets the current time, or None to use Console.get_time. Defaults to None. + disable (bool, optional): Disable progress display. Defaults to False + expand (bool, optional): Expand tasks table to fit width. Defaults to False. + """ + + def __init__( + self, + *columns: Union[str, ProgressColumn], + console: Optional[Console] = None, + auto_refresh: bool = True, + refresh_per_second: float = 10, + speed_estimate_period: float = 30.0, + transient: bool = False, + redirect_stdout: bool = True, + redirect_stderr: bool = True, + get_time: Optional[GetTimeCallable] = None, + disable: bool = False, + expand: bool = False, + ) -> None: + assert ( + refresh_per_second is None or refresh_per_second > 0 + ), "refresh_per_second must be > 0" + self._lock = RLock() + self.columns = columns or ( + TextColumn("[progress.description]{task.description}"), + BarColumn(), + TextColumn("[progress.percentage]{task.percentage:>3.0f}%"), + TimeRemainingColumn(), + ) + self.speed_estimate_period = speed_estimate_period + + self.disable = disable + self.expand = expand + self._tasks: Dict[TaskID, Task] = {} + self._task_index: TaskID = TaskID(0) + self.live = Live( + console=console or get_console(), + auto_refresh=auto_refresh, + refresh_per_second=refresh_per_second, + transient=transient, + redirect_stdout=redirect_stdout, + redirect_stderr=redirect_stderr, + get_renderable=self.get_renderable, + ) + self.get_time = get_time or self.console.get_time + self.print = self.console.print + self.log = self.console.log + + @property + def console(self) -> Console: + return self.live.console + + @property + def tasks(self) -> List[Task]: + """Get a list of Task instances.""" + with self._lock: + return list(self._tasks.values()) + + @property + def task_ids(self) -> List[TaskID]: + """A list of task IDs.""" + with self._lock: + return list(self._tasks.keys()) + + @property + def finished(self) -> bool: + """Check if all tasks have been completed.""" + with self._lock: + if not self._tasks: + return True + return all(task.finished for task in self._tasks.values()) + + def start(self) -> None: + """Start the progress display.""" + if not self.disable: + self.live.start(refresh=True) + + def stop(self) -> None: + """Stop the progress display.""" + self.live.stop() + if not self.console.is_interactive: + self.console.print() + + def __enter__(self) -> "Progress": + self.start() + return self + + def __exit__( + self, + exc_type: Optional[Type[BaseException]], + exc_val: Optional[BaseException], + exc_tb: Optional[TracebackType], + ) -> None: + self.stop() + + def track( + self, + sequence: Union[Iterable[ProgressType], Sequence[ProgressType]], + total: Optional[float] = None, + task_id: Optional[TaskID] = None, + description: str = "Working...", + update_period: float = 0.1, + ) -> Iterable[ProgressType]: + """Track progress by iterating over a sequence. + + Args: + sequence (Sequence[ProgressType]): A sequence of values you want to iterate over and track progress. + total: (float, optional): Total number of steps. Default is len(sequence). + task_id: (TaskID): Task to track. Default is new task. + description: (str, optional): Description of task, if new task is created. + update_period (float, optional): Minimum time (in seconds) between calls to update(). Defaults to 0.1. + + Returns: + Iterable[ProgressType]: An iterable of values taken from the provided sequence. + """ + + if total is None: + if isinstance(sequence, Sized): + task_total = float(len(sequence)) + else: + raise ValueError( + f"unable to get size of {sequence!r}, please specify 'total'" + ) + else: + task_total = total + + if task_id is None: + task_id = self.add_task(description, total=task_total) + else: + self.update(task_id, total=task_total) + + if self.live.auto_refresh: + with _TrackThread(self, task_id, update_period) as track_thread: + for value in sequence: + yield value + track_thread.completed += 1 + else: + advance = self.advance + refresh = self.refresh + for value in sequence: + yield value + advance(task_id, 1) + refresh() + + def start_task(self, task_id: TaskID) -> None: + """Start a task. + + Starts a task (used when calculating elapsed time). You may need to call this manually, + if you called ``add_task`` with ``start=False``. + + Args: + task_id (TaskID): ID of task. + """ + with self._lock: + task = self._tasks[task_id] + if task.start_time is None: + task.start_time = self.get_time() + + def stop_task(self, task_id: TaskID) -> None: + """Stop a task. + + This will freeze the elapsed time on the task. + + Args: + task_id (TaskID): ID of task. + """ + with self._lock: + task = self._tasks[task_id] + current_time = self.get_time() + if task.start_time is None: + task.start_time = current_time + task.stop_time = current_time + + def update( + self, + task_id: TaskID, + *, + total: Optional[float] = None, + completed: Optional[float] = None, + advance: Optional[float] = None, + description: Optional[str] = None, + visible: Optional[bool] = None, + refresh: bool = False, + **fields: Any, + ) -> None: + """Update information associated with a task. + + Args: + task_id (TaskID): Task id (returned by add_task). + total (float, optional): Updates task.total if not None. + completed (float, optional): Updates task.completed if not None. + advance (float, optional): Add a value to task.completed if not None. + description (str, optional): Change task description if not None. + visible (bool, optional): Set visible flag if not None. + refresh (bool): Force a refresh of progress information. Default is False. + **fields (Any): Additional data fields required for rendering. + """ + with self._lock: + task = self._tasks[task_id] + completed_start = task.completed + + if total is not None and total != task.total: + task.total = total + task._reset() + if advance is not None: + task.completed += advance + if completed is not None: + task.completed = completed + if description is not None: + task.description = description + if visible is not None: + task.visible = visible + task.fields.update(fields) + update_completed = task.completed - completed_start + + current_time = self.get_time() + old_sample_time = current_time - self.speed_estimate_period + _progress = task._progress + + popleft = _progress.popleft + while _progress and _progress[0].timestamp < old_sample_time: + popleft() + while len(_progress) > 1000: + popleft() + if update_completed > 0: + _progress.append(ProgressSample(current_time, update_completed)) + if task.completed >= task.total and task.finished_time is None: + task.finished_time = task.elapsed + + if refresh: + self.refresh() + + def reset( + self, + task_id: TaskID, + *, + start: bool = True, + total: Optional[float] = None, + completed: int = 0, + visible: Optional[bool] = None, + description: Optional[str] = None, + **fields: Any, + ) -> None: + """Reset a task so completed is 0 and the clock is reset. + + Args: + task_id (TaskID): ID of task. + start (bool, optional): Start the task after reset. Defaults to True. + total (float, optional): New total steps in task, or None to use current total. Defaults to None. + completed (int, optional): Number of steps completed. Defaults to 0. + **fields (str): Additional data fields required for rendering. + """ + current_time = self.get_time() + with self._lock: + task = self._tasks[task_id] + task._reset() + task.start_time = current_time if start else None + if total is not None: + task.total = total + task.completed = completed + if visible is not None: + task.visible = visible + if fields: + task.fields = fields + if description is not None: + task.description = description + task.finished_time = None + self.refresh() + + def advance(self, task_id: TaskID, advance: float = 1) -> None: + """Advance task by a number of steps. + + Args: + task_id (TaskID): ID of task. + advance (float): Number of steps to advance. Default is 1. + """ + current_time = self.get_time() + with self._lock: + task = self._tasks[task_id] + completed_start = task.completed + task.completed += advance + update_completed = task.completed - completed_start + old_sample_time = current_time - self.speed_estimate_period + _progress = task._progress + + popleft = _progress.popleft + while _progress and _progress[0].timestamp < old_sample_time: + popleft() + while len(_progress) > 1000: + popleft() + _progress.append(ProgressSample(current_time, update_completed)) + if task.completed >= task.total and task.finished_time is None: + task.finished_time = task.elapsed + task.finished_speed = task.speed + + def refresh(self) -> None: + """Refresh (render) the progress information.""" + if not self.disable and self.live.is_started: + self.live.refresh() + + def get_renderable(self) -> RenderableType: + """Get a renderable for the progress display.""" + renderable = Group(*self.get_renderables()) + return renderable + + def get_renderables(self) -> Iterable[RenderableType]: + """Get a number of renderables for the progress display.""" + table = self.make_tasks_table(self.tasks) + yield table + + def make_tasks_table(self, tasks: Iterable[Task]) -> Table: + """Get a table to render the Progress display. + + Args: + tasks (Iterable[Task]): An iterable of Task instances, one per row of the table. + + Returns: + Table: A table instance. + """ + table_columns = ( + ( + Column(no_wrap=True) + if isinstance(_column, str) + else _column.get_table_column().copy() + ) + for _column in self.columns + ) + table = Table.grid(*table_columns, padding=(0, 1), expand=self.expand) + + for task in tasks: + if task.visible: + table.add_row( + *( + ( + column.format(task=task) + if isinstance(column, str) + else column(task) + ) + for column in self.columns + ) + ) + return table + + def __rich__(self) -> RenderableType: + """Makes the Progress class itself renderable.""" + with self._lock: + return self.get_renderable() + + def add_task( + self, + description: str, + start: bool = True, + total: float = 100.0, + completed: int = 0, + visible: bool = True, + **fields: Any, + ) -> TaskID: + """Add a new 'task' to the Progress display. + + Args: + description (str): A description of the task. + start (bool, optional): Start the task immediately (to calculate elapsed time). If set to False, + you will need to call `start` manually. Defaults to True. + total (float, optional): Number of total steps in the progress if know. Defaults to 100. + completed (int, optional): Number of steps completed so far.. Defaults to 0. + visible (bool, optional): Enable display of the task. Defaults to True. + **fields (str): Additional data fields required for rendering. + + Returns: + TaskID: An ID you can use when calling `update`. + """ + with self._lock: + task = Task( + self._task_index, + description, + total, + completed, + visible=visible, + fields=fields, + _get_time=self.get_time, + _lock=self._lock, + ) + self._tasks[self._task_index] = task + if start: + self.start_task(self._task_index) + new_task_index = self._task_index + self._task_index = TaskID(int(self._task_index) + 1) + self.refresh() + return new_task_index + + def remove_task(self, task_id: TaskID) -> None: + """Delete a task if it exists. + + Args: + task_id (TaskID): A task ID. + + """ + with self._lock: + del self._tasks[task_id] + + +if __name__ == "__main__": # pragma: no coverage + + import random + import time + + from .panel import Panel + from .rule import Rule + from .syntax import Syntax + from .table import Table + + syntax = Syntax( + '''def loop_last(values: Iterable[T]) -> Iterable[Tuple[bool, T]]: + """Iterate and generate a tuple with a flag for last value.""" + iter_values = iter(values) + try: + previous_value = next(iter_values) + except StopIteration: + return + for value in iter_values: + yield False, previous_value + previous_value = value + yield True, previous_value''', + "python", + line_numbers=True, + ) + + table = Table("foo", "bar", "baz") + table.add_row("1", "2", "3") + + progress_renderables = [ + "Text may be printed while the progress bars are rendering.", + Panel("In fact, [i]any[/i] renderable will work"), + "Such as [magenta]tables[/]...", + table, + "Pretty printed structures...", + {"type": "example", "text": "Pretty printed"}, + "Syntax...", + syntax, + Rule("Give it a try!"), + ] + + from itertools import cycle + + examples = cycle(progress_renderables) + + console = Console(record=True) + + with Progress( + SpinnerColumn(), + TextColumn("[progress.description]{task.description}"), + BarColumn(), + TextColumn("[progress.percentage]{task.percentage:>3.0f}%"), + TimeRemainingColumn(), + TimeElapsedColumn(), + console=console, + transient=True, + ) as progress: + + task1 = progress.add_task("[red]Downloading", total=1000) + task2 = progress.add_task("[green]Processing", total=1000) + task3 = progress.add_task("[yellow]Thinking", total=1000, start=False) + + while not progress.finished: + progress.update(task1, advance=0.5) + progress.update(task2, advance=0.3) + time.sleep(0.01) + if random.randint(0, 100) < 1: + progress.log(next(examples)) diff --git a/pipenv/patched/notpip/_vendor/rich/progress_bar.py b/pipenv/patched/notpip/_vendor/rich/progress_bar.py new file mode 100644 index 0000000000..1797b5f786 --- /dev/null +++ b/pipenv/patched/notpip/_vendor/rich/progress_bar.py @@ -0,0 +1,216 @@ +import math +from functools import lru_cache +from time import monotonic +from typing import Iterable, List, Optional + +from .color import Color, blend_rgb +from .color_triplet import ColorTriplet +from .console import Console, ConsoleOptions, RenderResult +from .jupyter import JupyterMixin +from .measure import Measurement +from .segment import Segment +from .style import Style, StyleType + +# Number of characters before 'pulse' animation repeats +PULSE_SIZE = 20 + + +class ProgressBar(JupyterMixin): + """Renders a (progress) bar. Used by rich.progress. + + Args: + total (float, optional): Number of steps in the bar. Defaults to 100. + completed (float, optional): Number of steps completed. Defaults to 0. + width (int, optional): Width of the bar, or ``None`` for maximum width. Defaults to None. + pulse (bool, optional): Enable pulse effect. Defaults to False. + style (StyleType, optional): Style for the bar background. Defaults to "bar.back". + complete_style (StyleType, optional): Style for the completed bar. Defaults to "bar.complete". + finished_style (StyleType, optional): Style for a finished bar. Defaults to "bar.done". + pulse_style (StyleType, optional): Style for pulsing bars. Defaults to "bar.pulse". + animation_time (Optional[float], optional): Time in seconds to use for animation, or None to use system time. + """ + + def __init__( + self, + total: float = 100.0, + completed: float = 0, + width: Optional[int] = None, + pulse: bool = False, + style: StyleType = "bar.back", + complete_style: StyleType = "bar.complete", + finished_style: StyleType = "bar.finished", + pulse_style: StyleType = "bar.pulse", + animation_time: Optional[float] = None, + ): + self.total = total + self.completed = completed + self.width = width + self.pulse = pulse + self.style = style + self.complete_style = complete_style + self.finished_style = finished_style + self.pulse_style = pulse_style + self.animation_time = animation_time + + self._pulse_segments: Optional[List[Segment]] = None + + def __repr__(self) -> str: + return f"" + + @property + def percentage_completed(self) -> float: + """Calculate percentage complete.""" + completed = (self.completed / self.total) * 100.0 + completed = min(100, max(0.0, completed)) + return completed + + @lru_cache(maxsize=16) + def _get_pulse_segments( + self, + fore_style: Style, + back_style: Style, + color_system: str, + no_color: bool, + ascii: bool = False, + ) -> List[Segment]: + """Get a list of segments to render a pulse animation. + + Returns: + List[Segment]: A list of segments, one segment per character. + """ + bar = "-" if ascii else "━" + segments: List[Segment] = [] + if color_system not in ("standard", "eight_bit", "truecolor") or no_color: + segments += [Segment(bar, fore_style)] * (PULSE_SIZE // 2) + segments += [Segment(" " if no_color else bar, back_style)] * ( + PULSE_SIZE - (PULSE_SIZE // 2) + ) + return segments + + append = segments.append + fore_color = ( + fore_style.color.get_truecolor() + if fore_style.color + else ColorTriplet(255, 0, 255) + ) + back_color = ( + back_style.color.get_truecolor() + if back_style.color + else ColorTriplet(0, 0, 0) + ) + cos = math.cos + pi = math.pi + _Segment = Segment + _Style = Style + from_triplet = Color.from_triplet + + for index in range(PULSE_SIZE): + position = index / PULSE_SIZE + fade = 0.5 + cos((position * pi * 2)) / 2.0 + color = blend_rgb(fore_color, back_color, cross_fade=fade) + append(_Segment(bar, _Style(color=from_triplet(color)))) + return segments + + def update(self, completed: float, total: Optional[float] = None) -> None: + """Update progress with new values. + + Args: + completed (float): Number of steps completed. + total (float, optional): Total number of steps, or ``None`` to not change. Defaults to None. + """ + self.completed = completed + self.total = total if total is not None else self.total + + def _render_pulse( + self, console: Console, width: int, ascii: bool = False + ) -> Iterable[Segment]: + """Renders the pulse animation. + + Args: + console (Console): Console instance. + width (int): Width in characters of pulse animation. + + Returns: + RenderResult: [description] + + Yields: + Iterator[Segment]: Segments to render pulse + """ + fore_style = console.get_style(self.pulse_style, default="white") + back_style = console.get_style(self.style, default="black") + + pulse_segments = self._get_pulse_segments( + fore_style, back_style, console.color_system, console.no_color, ascii=ascii + ) + segment_count = len(pulse_segments) + current_time = ( + monotonic() if self.animation_time is None else self.animation_time + ) + segments = pulse_segments * (int(width / segment_count) + 2) + offset = int(-current_time * 15) % segment_count + segments = segments[offset : offset + width] + yield from segments + + def __rich_console__( + self, console: Console, options: ConsoleOptions + ) -> RenderResult: + + width = min(self.width or options.max_width, options.max_width) + ascii = options.legacy_windows or options.ascii_only + if self.pulse: + yield from self._render_pulse(console, width, ascii=ascii) + return + + completed = min(self.total, max(0, self.completed)) + + bar = "-" if ascii else "━" + half_bar_right = " " if ascii else "╸" + half_bar_left = " " if ascii else "╺" + complete_halves = ( + int(width * 2 * completed / self.total) if self.total else width * 2 + ) + bar_count = complete_halves // 2 + half_bar_count = complete_halves % 2 + style = console.get_style(self.style) + complete_style = console.get_style( + self.complete_style if self.completed < self.total else self.finished_style + ) + _Segment = Segment + if bar_count: + yield _Segment(bar * bar_count, complete_style) + if half_bar_count: + yield _Segment(half_bar_right * half_bar_count, complete_style) + + if not console.no_color: + remaining_bars = width - bar_count - half_bar_count + if remaining_bars and console.color_system is not None: + if not half_bar_count and bar_count: + yield _Segment(half_bar_left, style) + remaining_bars -= 1 + if remaining_bars: + yield _Segment(bar * remaining_bars, style) + + def __rich_measure__( + self, console: Console, options: ConsoleOptions + ) -> Measurement: + return ( + Measurement(self.width, self.width) + if self.width is not None + else Measurement(4, options.max_width) + ) + + +if __name__ == "__main__": # pragma: no cover + console = Console() + bar = ProgressBar(width=50, total=100) + + import time + + console.show_cursor(False) + for n in range(0, 101, 1): + bar.update(n) + console.print(bar) + console.file.write("\r") + time.sleep(0.05) + console.show_cursor(True) + console.print() diff --git a/pipenv/patched/notpip/_vendor/rich/prompt.py b/pipenv/patched/notpip/_vendor/rich/prompt.py new file mode 100644 index 0000000000..db688480fa --- /dev/null +++ b/pipenv/patched/notpip/_vendor/rich/prompt.py @@ -0,0 +1,376 @@ +from typing import Any, Generic, List, Optional, TextIO, TypeVar, Union, overload + +from . import get_console +from .console import Console +from .text import Text, TextType + +PromptType = TypeVar("PromptType") +DefaultType = TypeVar("DefaultType") + + +class PromptError(Exception): + """Exception base class for prompt related errors.""" + + +class InvalidResponse(PromptError): + """Exception to indicate a response was invalid. Raise this within process_response() to indicate an error + and provide an error message. + + Args: + message (Union[str, Text]): Error message. + """ + + def __init__(self, message: TextType) -> None: + self.message = message + + def __rich__(self) -> TextType: + return self.message + + +class PromptBase(Generic[PromptType]): + """Ask the user for input until a valid response is received. This is the base class, see one of + the concrete classes for examples. + + Args: + prompt (TextType, optional): Prompt text. Defaults to "". + console (Console, optional): A Console instance or None to use global console. Defaults to None. + password (bool, optional): Enable password input. Defaults to False. + choices (List[str], optional): A list of valid choices. Defaults to None. + show_default (bool, optional): Show default in prompt. Defaults to True. + show_choices (bool, optional): Show choices in prompt. Defaults to True. + """ + + response_type: type = str + + validate_error_message = "[prompt.invalid]Please enter a valid value" + illegal_choice_message = ( + "[prompt.invalid.choice]Please select one of the available options" + ) + prompt_suffix = ": " + + choices: Optional[List[str]] = None + + def __init__( + self, + prompt: TextType = "", + *, + console: Optional[Console] = None, + password: bool = False, + choices: Optional[List[str]] = None, + show_default: bool = True, + show_choices: bool = True, + ) -> None: + self.console = console or get_console() + self.prompt = ( + Text.from_markup(prompt, style="prompt") + if isinstance(prompt, str) + else prompt + ) + self.password = password + if choices is not None: + self.choices = choices + self.show_default = show_default + self.show_choices = show_choices + + @classmethod + @overload + def ask( + cls, + prompt: TextType = "", + *, + console: Optional[Console] = None, + password: bool = False, + choices: Optional[List[str]] = None, + show_default: bool = True, + show_choices: bool = True, + default: DefaultType, + stream: Optional[TextIO] = None, + ) -> Union[DefaultType, PromptType]: + ... + + @classmethod + @overload + def ask( + cls, + prompt: TextType = "", + *, + console: Optional[Console] = None, + password: bool = False, + choices: Optional[List[str]] = None, + show_default: bool = True, + show_choices: bool = True, + stream: Optional[TextIO] = None, + ) -> PromptType: + ... + + @classmethod + def ask( + cls, + prompt: TextType = "", + *, + console: Optional[Console] = None, + password: bool = False, + choices: Optional[List[str]] = None, + show_default: bool = True, + show_choices: bool = True, + default: Any = ..., + stream: Optional[TextIO] = None, + ) -> Any: + """Shortcut to construct and run a prompt loop and return the result. + + Example: + >>> filename = Prompt.ask("Enter a filename") + + Args: + prompt (TextType, optional): Prompt text. Defaults to "". + console (Console, optional): A Console instance or None to use global console. Defaults to None. + password (bool, optional): Enable password input. Defaults to False. + choices (List[str], optional): A list of valid choices. Defaults to None. + show_default (bool, optional): Show default in prompt. Defaults to True. + show_choices (bool, optional): Show choices in prompt. Defaults to True. + stream (TextIO, optional): Optional text file open for reading to get input. Defaults to None. + """ + _prompt = cls( + prompt, + console=console, + password=password, + choices=choices, + show_default=show_default, + show_choices=show_choices, + ) + return _prompt(default=default, stream=stream) + + def render_default(self, default: DefaultType) -> Text: + """Turn the supplied default in to a Text instance. + + Args: + default (DefaultType): Default value. + + Returns: + Text: Text containing rendering of default value. + """ + return Text(f"({default})", "prompt.default") + + def make_prompt(self, default: DefaultType) -> Text: + """Make prompt text. + + Args: + default (DefaultType): Default value. + + Returns: + Text: Text to display in prompt. + """ + prompt = self.prompt.copy() + prompt.end = "" + + if self.show_choices and self.choices: + _choices = "/".join(self.choices) + choices = f"[{_choices}]" + prompt.append(" ") + prompt.append(choices, "prompt.choices") + + if ( + default != ... + and self.show_default + and isinstance(default, (str, self.response_type)) + ): + prompt.append(" ") + _default = self.render_default(default) + prompt.append(_default) + + prompt.append(self.prompt_suffix) + + return prompt + + @classmethod + def get_input( + cls, + console: Console, + prompt: TextType, + password: bool, + stream: Optional[TextIO] = None, + ) -> str: + """Get input from user. + + Args: + console (Console): Console instance. + prompt (TextType): Prompt text. + password (bool): Enable password entry. + + Returns: + str: String from user. + """ + return console.input(prompt, password=password, stream=stream) + + def check_choice(self, value: str) -> bool: + """Check value is in the list of valid choices. + + Args: + value (str): Value entered by user. + + Returns: + bool: True if choice was valid, otherwise False. + """ + assert self.choices is not None + return value.strip() in self.choices + + def process_response(self, value: str) -> PromptType: + """Process response from user, convert to prompt type. + + Args: + value (str): String typed by user. + + Raises: + InvalidResponse: If ``value`` is invalid. + + Returns: + PromptType: The value to be returned from ask method. + """ + value = value.strip() + try: + return_value = self.response_type(value) + except ValueError: + raise InvalidResponse(self.validate_error_message) + + if self.choices is not None and not self.check_choice(value): + raise InvalidResponse(self.illegal_choice_message) + + return return_value # type: ignore + + def on_validate_error(self, value: str, error: InvalidResponse) -> None: + """Called to handle validation error. + + Args: + value (str): String entered by user. + error (InvalidResponse): Exception instance the initiated the error. + """ + self.console.print(error) + + def pre_prompt(self) -> None: + """Hook to display something before the prompt.""" + + @overload + def __call__(self, *, stream: Optional[TextIO] = None) -> PromptType: + ... + + @overload + def __call__( + self, *, default: DefaultType, stream: Optional[TextIO] = None + ) -> Union[PromptType, DefaultType]: + ... + + def __call__(self, *, default: Any = ..., stream: Optional[TextIO] = None) -> Any: + """Run the prompt loop. + + Args: + default (Any, optional): Optional default value. + + Returns: + PromptType: Processed value. + """ + while True: + self.pre_prompt() + prompt = self.make_prompt(default) + value = self.get_input(self.console, prompt, self.password, stream=stream) + if value == "" and default != ...: + return default + try: + return_value = self.process_response(value) + except InvalidResponse as error: + self.on_validate_error(value, error) + continue + else: + return return_value + + +class Prompt(PromptBase[str]): + """A prompt that returns a str. + + Example: + >>> name = Prompt.ask("Enter your name") + + + """ + + response_type = str + + +class IntPrompt(PromptBase[int]): + """A prompt that returns an integer. + + Example: + >>> burrito_count = IntPrompt.ask("How many burritos do you want to order") + + """ + + response_type = int + validate_error_message = "[prompt.invalid]Please enter a valid integer number" + + +class FloatPrompt(PromptBase[int]): + """A prompt that returns a float. + + Example: + >>> temperature = FloatPrompt.ask("Enter desired temperature") + + """ + + response_type = float + validate_error_message = "[prompt.invalid]Please enter a number" + + +class Confirm(PromptBase[bool]): + """A yes / no confirmation prompt. + + Example: + >>> if Confirm.ask("Continue"): + run_job() + + """ + + response_type = bool + validate_error_message = "[prompt.invalid]Please enter Y or N" + choices: List[str] = ["y", "n"] + + def render_default(self, default: DefaultType) -> Text: + """Render the default as (y) or (n) rather than True/False.""" + yes, no = self.choices + return Text(f"({yes})" if default else f"({no})", style="prompt.default") + + def process_response(self, value: str) -> bool: + """Convert choices to a bool.""" + value = value.strip().lower() + if value not in self.choices: + raise InvalidResponse(self.validate_error_message) + return value == self.choices[0] + + +if __name__ == "__main__": # pragma: no cover + + from pipenv.patched.notpip._vendor.rich import print + + if Confirm.ask("Run [i]prompt[/i] tests?", default=True): + while True: + result = IntPrompt.ask( + ":rocket: Enter a number between [b]1[/b] and [b]10[/b]", default=5 + ) + if result >= 1 and result <= 10: + break + print(":pile_of_poo: [prompt.invalid]Number must be between 1 and 10") + print(f"number={result}") + + while True: + password = Prompt.ask( + "Please enter a password [cyan](must be at least 5 characters)", + password=True, + ) + if len(password) >= 5: + break + print("[prompt.invalid]password too short") + print(f"password={password!r}") + + fruit = Prompt.ask("Enter a fruit", choices=["apple", "orange", "pear"]) + print(f"fruit={fruit!r}") + + else: + print("[b]OK :loudly_crying_face:") diff --git a/pipenv/patched/notpip/_vendor/rich/protocol.py b/pipenv/patched/notpip/_vendor/rich/protocol.py new file mode 100644 index 0000000000..f1f56fd65a --- /dev/null +++ b/pipenv/patched/notpip/_vendor/rich/protocol.py @@ -0,0 +1,42 @@ +from typing import Any, Callable, cast, Set, TYPE_CHECKING +from inspect import isclass + +if TYPE_CHECKING: + from pipenv.patched.notpip._vendor.rich.console import RenderableType + +_GIBBERISH = """aihwerij235234ljsdnp34ksodfipwoe234234jlskjdf""" + + +def is_renderable(check_object: Any) -> bool: + """Check if an object may be rendered by Rich.""" + return ( + isinstance(check_object, str) + or hasattr(check_object, "__rich__") + or hasattr(check_object, "__rich_console__") + ) + + +def rich_cast(renderable: object) -> "RenderableType": + """Cast an object to a renderable by calling __rich__ if present. + + Args: + renderable (object): A potentially renderable object + + Returns: + object: The result of recursively calling __rich__. + """ + from pipenv.patched.notpip._vendor.rich.console import RenderableType + + rich_visited_set: Set[type] = set() # Prevent potential infinite loop + while hasattr(renderable, "__rich__") and not isclass(renderable): + # Detect object which claim to have all the attributes + if hasattr(renderable, _GIBBERISH): + return repr(renderable) + cast_method = getattr(renderable, "__rich__") + renderable = cast_method() + renderable_type = type(renderable) + if renderable_type in rich_visited_set: + break + rich_visited_set.add(renderable_type) + + return cast(RenderableType, renderable) diff --git a/pipenv/patched/notpip/_vendor/rich/region.py b/pipenv/patched/notpip/_vendor/rich/region.py new file mode 100644 index 0000000000..75b3631c38 --- /dev/null +++ b/pipenv/patched/notpip/_vendor/rich/region.py @@ -0,0 +1,10 @@ +from typing import NamedTuple + + +class Region(NamedTuple): + """Defines a rectangular region of the screen.""" + + x: int + y: int + width: int + height: int diff --git a/pipenv/patched/notpip/_vendor/rich/repr.py b/pipenv/patched/notpip/_vendor/rich/repr.py new file mode 100644 index 0000000000..6facd32335 --- /dev/null +++ b/pipenv/patched/notpip/_vendor/rich/repr.py @@ -0,0 +1,151 @@ +from functools import partial +import inspect + +from typing import ( + Any, + Callable, + Iterable, + List, + Optional, + overload, + Union, + Tuple, + Type, + TypeVar, +) + + +T = TypeVar("T") + + +Result = Iterable[Union[Any, Tuple[Any], Tuple[str, Any], Tuple[str, Any, Any]]] +RichReprResult = Result + + +class ReprError(Exception): + """An error occurred when attempting to build a repr.""" + + +@overload +def auto(cls: Optional[T]) -> T: + ... + + +@overload +def auto(*, angular: bool = False) -> Callable[[T], T]: + ... + + +def auto( + cls: Optional[T] = None, *, angular: Optional[bool] = None +) -> Union[T, Callable[[T], T]]: + """Class decorator to create __repr__ from __rich_repr__""" + + def do_replace(cls: Type[T], angular: Optional[bool] = None) -> Type[T]: + def auto_repr(self: Type[T]) -> str: + """Create repr string from __rich_repr__""" + repr_str: List[str] = [] + append = repr_str.append + + angular = getattr(self.__rich_repr__, "angular", False) # type: ignore + for arg in self.__rich_repr__(): # type: ignore + if isinstance(arg, tuple): + if len(arg) == 1: + append(repr(arg[0])) + else: + key, value, *default = arg + if key is None: + append(repr(value)) + else: + if len(default) and default[0] == value: + continue + append(f"{key}={value!r}") + else: + append(repr(arg)) + if angular: + return f"<{self.__class__.__name__} {' '.join(repr_str)}>" + else: + return f"{self.__class__.__name__}({', '.join(repr_str)})" + + def auto_rich_repr(self: Type[T]) -> Result: + """Auto generate __rich_rep__ from signature of __init__""" + try: + signature = inspect.signature(self.__init__) ## type: ignore + for name, param in signature.parameters.items(): + if param.kind == param.POSITIONAL_ONLY: + yield getattr(self, name) + elif param.kind in ( + param.POSITIONAL_OR_KEYWORD, + param.KEYWORD_ONLY, + ): + if param.default == param.empty: + yield getattr(self, param.name) + else: + yield param.name, getattr(self, param.name), param.default + except Exception as error: + raise ReprError( + f"Failed to auto generate __rich_repr__; {error}" + ) from None + + if not hasattr(cls, "__rich_repr__"): + auto_rich_repr.__doc__ = "Build a rich repr" + cls.__rich_repr__ = auto_rich_repr # type: ignore + + auto_repr.__doc__ = "Return repr(self)" + cls.__repr__ = auto_repr # type: ignore + if angular is not None: + cls.__rich_repr__.angular = angular # type: ignore + return cls + + if cls is None: + return partial(do_replace, angular=angular) # type: ignore + else: + return do_replace(cls, angular=angular) # type: ignore + + +@overload +def rich_repr(cls: Optional[T]) -> T: + ... + + +@overload +def rich_repr(*, angular: bool = False) -> Callable[[T], T]: + ... + + +def rich_repr( + cls: Optional[T] = None, *, angular: bool = False +) -> Union[T, Callable[[T], T]]: + if cls is None: + return auto(angular=angular) + else: + return auto(cls) + + +if __name__ == "__main__": + + @auto + class Foo: + def __rich_repr__(self) -> Result: + yield "foo" + yield "bar", {"shopping": ["eggs", "ham", "pineapple"]} + yield "buy", "hand sanitizer" + + foo = Foo() + from pipenv.patched.notpip._vendor.rich.console import Console + + console = Console() + + console.rule("Standard repr") + console.print(foo) + + console.print(foo, width=60) + console.print(foo, width=30) + + console.rule("Angular repr") + Foo.__rich_repr__.angular = True # type: ignore + + console.print(foo) + + console.print(foo, width=60) + console.print(foo, width=30) diff --git a/pipenv/patched/notpip/_vendor/rich/rule.py b/pipenv/patched/notpip/_vendor/rich/rule.py new file mode 100644 index 0000000000..29ee897440 --- /dev/null +++ b/pipenv/patched/notpip/_vendor/rich/rule.py @@ -0,0 +1,115 @@ +from typing import Union + +from .align import AlignMethod +from .cells import cell_len, set_cell_size +from .console import Console, ConsoleOptions, RenderResult +from .jupyter import JupyterMixin +from .style import Style +from .text import Text + + +class Rule(JupyterMixin): + """A console renderable to draw a horizontal rule (line). + + Args: + title (Union[str, Text], optional): Text to render in the rule. Defaults to "". + characters (str, optional): Character(s) used to draw the line. Defaults to "─". + style (StyleType, optional): Style of Rule. Defaults to "rule.line". + end (str, optional): Character at end of Rule. defaults to "\\\\n" + align (str, optional): How to align the title, one of "left", "center", or "right". Defaults to "center". + """ + + def __init__( + self, + title: Union[str, Text] = "", + *, + characters: str = "─", + style: Union[str, Style] = "rule.line", + end: str = "\n", + align: AlignMethod = "center", + ) -> None: + if cell_len(characters) < 1: + raise ValueError( + "'characters' argument must have a cell width of at least 1" + ) + if align not in ("left", "center", "right"): + raise ValueError( + f'invalid value for align, expected "left", "center", "right" (not {align!r})' + ) + self.title = title + self.characters = characters + self.style = style + self.end = end + self.align = align + + def __repr__(self) -> str: + return f"Rule({self.title!r}, {self.characters!r})" + + def __rich_console__( + self, console: Console, options: ConsoleOptions + ) -> RenderResult: + width = options.max_width + + # Python3.6 doesn't have an isascii method on str + isascii = getattr(str, "isascii", None) or ( + lambda s: all(ord(c) < 128 for c in s) + ) + characters = ( + "-" + if (options.ascii_only and not isascii(self.characters)) + else self.characters + ) + + chars_len = cell_len(characters) + if not self.title: + rule_text = Text(characters * ((width // chars_len) + 1), self.style) + rule_text.truncate(width) + rule_text.plain = set_cell_size(rule_text.plain, width) + yield rule_text + return + + if isinstance(self.title, Text): + title_text = self.title + else: + title_text = console.render_str(self.title, style="rule.text") + + title_text.plain = title_text.plain.replace("\n", " ") + title_text.expand_tabs() + rule_text = Text(end=self.end) + + if self.align == "center": + title_text.truncate(width - 4, overflow="ellipsis") + side_width = (width - cell_len(title_text.plain)) // 2 + left = Text(characters * (side_width // chars_len + 1)) + left.truncate(side_width - 1) + right_length = width - cell_len(left.plain) - cell_len(title_text.plain) + right = Text(characters * (side_width // chars_len + 1)) + right.truncate(right_length) + rule_text.append(left.plain + " ", self.style) + rule_text.append(title_text) + rule_text.append(" " + right.plain, self.style) + elif self.align == "left": + title_text.truncate(width - 2, overflow="ellipsis") + rule_text.append(title_text) + rule_text.append(" ") + rule_text.append(characters * (width - rule_text.cell_len), self.style) + elif self.align == "right": + title_text.truncate(width - 2, overflow="ellipsis") + rule_text.append(characters * (width - title_text.cell_len - 1), self.style) + rule_text.append(" ") + rule_text.append(title_text) + + rule_text.plain = set_cell_size(rule_text.plain, width) + yield rule_text + + +if __name__ == "__main__": # pragma: no cover + from pipenv.patched.notpip._vendor.rich.console import Console + import sys + + try: + text = sys.argv[1] + except IndexError: + text = "Hello, World" + console = Console() + console.print(Rule(title=text)) diff --git a/pipenv/patched/notpip/_vendor/rich/scope.py b/pipenv/patched/notpip/_vendor/rich/scope.py new file mode 100644 index 0000000000..145d5faa21 --- /dev/null +++ b/pipenv/patched/notpip/_vendor/rich/scope.py @@ -0,0 +1,86 @@ +from collections.abc import Mapping +from typing import TYPE_CHECKING, Any, Optional, Tuple + +from .highlighter import ReprHighlighter +from .panel import Panel +from .pretty import Pretty +from .table import Table +from .text import Text, TextType + +if TYPE_CHECKING: + from .console import ConsoleRenderable + + +def render_scope( + scope: "Mapping[str, Any]", + *, + title: Optional[TextType] = None, + sort_keys: bool = True, + indent_guides: bool = False, + max_length: Optional[int] = None, + max_string: Optional[int] = None, +) -> "ConsoleRenderable": + """Render python variables in a given scope. + + Args: + scope (Mapping): A mapping containing variable names and values. + title (str, optional): Optional title. Defaults to None. + sort_keys (bool, optional): Enable sorting of items. Defaults to True. + indent_guides (bool, optional): Enable indentaton guides. Defaults to False. + max_length (int, optional): Maximum length of containers before abbreviating, or None for no abbreviation. + Defaults to None. + max_string (int, optional): Maximum length of string before truncating, or None to disable. Defaults to None. + + Returns: + ConsoleRenderable: A renderable object. + """ + highlighter = ReprHighlighter() + items_table = Table.grid(padding=(0, 1), expand=False) + items_table.add_column(justify="right") + + def sort_items(item: Tuple[str, Any]) -> Tuple[bool, str]: + """Sort special variables first, then alphabetically.""" + key, _ = item + return (not key.startswith("__"), key.lower()) + + items = sorted(scope.items(), key=sort_items) if sort_keys else scope.items() + for key, value in items: + key_text = Text.assemble( + (key, "scope.key.special" if key.startswith("__") else "scope.key"), + (" =", "scope.equals"), + ) + items_table.add_row( + key_text, + Pretty( + value, + highlighter=highlighter, + indent_guides=indent_guides, + max_length=max_length, + max_string=max_string, + ), + ) + return Panel.fit( + items_table, + title=title, + border_style="scope.border", + padding=(0, 1), + ) + + +if __name__ == "__main__": # pragma: no cover + from pipenv.patched.notpip._vendor.rich import print + + print() + + def test(foo: float, bar: float) -> None: + list_of_things = [1, 2, 3, None, 4, True, False, "Hello World"] + dict_of_things = { + "version": "1.1", + "method": "confirmFruitPurchase", + "params": [["apple", "orange", "mangoes", "pomelo"], 1.123], + "id": "194521489", + } + print(render_scope(locals(), title="[i]locals", sort_keys=False)) + + test(20.3423, 3.1427) + print() diff --git a/pipenv/patched/notpip/_vendor/rich/screen.py b/pipenv/patched/notpip/_vendor/rich/screen.py new file mode 100644 index 0000000000..2783ff17a0 --- /dev/null +++ b/pipenv/patched/notpip/_vendor/rich/screen.py @@ -0,0 +1,54 @@ +from typing import Optional, TYPE_CHECKING + +from .segment import Segment +from .style import StyleType +from ._loop import loop_last + + +if TYPE_CHECKING: + from .console import ( + Console, + ConsoleOptions, + RenderResult, + RenderableType, + Group, + ) + + +class Screen: + """A renderable that fills the terminal screen and crops excess. + + Args: + renderable (RenderableType): Child renderable. + style (StyleType, optional): Optional background style. Defaults to None. + """ + + renderable: "RenderableType" + + def __init__( + self, + *renderables: "RenderableType", + style: Optional[StyleType] = None, + application_mode: bool = False, + ) -> None: + from pipenv.patched.notpip._vendor.rich.console import Group + + self.renderable = Group(*renderables) + self.style = style + self.application_mode = application_mode + + def __rich_console__( + self, console: "Console", options: "ConsoleOptions" + ) -> "RenderResult": + width, height = options.size + style = console.get_style(self.style) if self.style else None + render_options = options.update(width=width, height=height) + lines = console.render_lines( + self.renderable or "", render_options, style=style, pad=True + ) + lines = Segment.set_shape(lines, width, height, style=style) + new_line = Segment("\n\r") if self.application_mode else Segment.line() + for last, line in loop_last(lines): + yield from line + if not last: + yield new_line diff --git a/pipenv/patched/notpip/_vendor/rich/segment.py b/pipenv/patched/notpip/_vendor/rich/segment.py new file mode 100644 index 0000000000..0c8a3ba08a --- /dev/null +++ b/pipenv/patched/notpip/_vendor/rich/segment.py @@ -0,0 +1,720 @@ +from enum import IntEnum +from functools import lru_cache +from itertools import filterfalse +from logging import getLogger +from operator import attrgetter +from typing import ( + TYPE_CHECKING, + Dict, + Iterable, + List, + NamedTuple, + Optional, + Sequence, + Tuple, + Type, + Union, +) + +from .cells import ( + _is_single_cell_widths, + cell_len, + get_character_cell_size, + set_cell_size, +) +from .repr import Result, rich_repr +from .style import Style + +if TYPE_CHECKING: + from .console import Console, ConsoleOptions, RenderResult + +log = getLogger("rich") + + +class ControlType(IntEnum): + """Non-printable control codes which typically translate to ANSI codes.""" + + BELL = 1 + CARRIAGE_RETURN = 2 + HOME = 3 + CLEAR = 4 + SHOW_CURSOR = 5 + HIDE_CURSOR = 6 + ENABLE_ALT_SCREEN = 7 + DISABLE_ALT_SCREEN = 8 + CURSOR_UP = 9 + CURSOR_DOWN = 10 + CURSOR_FORWARD = 11 + CURSOR_BACKWARD = 12 + CURSOR_MOVE_TO_COLUMN = 13 + CURSOR_MOVE_TO = 14 + ERASE_IN_LINE = 15 + + +ControlCode = Union[ + Tuple[ControlType], Tuple[ControlType, int], Tuple[ControlType, int, int] +] + + +@rich_repr() +class Segment(NamedTuple): + """A piece of text with associated style. Segments are produced by the Console render process and + are ultimately converted in to strings to be written to the terminal. + + Args: + text (str): A piece of text. + style (:class:`~rich.style.Style`, optional): An optional style to apply to the text. + control (Tuple[ControlCode..], optional): Optional sequence of control codes. + """ + + text: str = "" + """Raw text.""" + style: Optional[Style] = None + """An optional style.""" + control: Optional[Sequence[ControlCode]] = None + """Optional sequence of control codes.""" + + def __rich_repr__(self) -> Result: + yield self.text + if self.control is None: + if self.style is not None: + yield self.style + else: + yield self.style + yield self.control + + def __bool__(self) -> bool: + """Check if the segment contains text.""" + return bool(self.text) + + @property + def cell_length(self) -> int: + """Get cell length of segment.""" + return 0 if self.control else cell_len(self.text) + + @property + def is_control(self) -> bool: + """Check if the segment contains control codes.""" + return self.control is not None + + @classmethod + @lru_cache(1024 * 16) + def _split_cells(cls, segment: "Segment", cut: int) -> Tuple["Segment", "Segment"]: # type: ignore + + text, style, control = segment + _Segment = Segment + + cell_length = segment.cell_length + if cut >= cell_length: + return segment, _Segment("", style, control) + + cell_size = get_character_cell_size + + pos = int((cut / cell_length) * len(text)) + + before = text[:pos] + cell_pos = cell_len(before) + if cell_pos == cut: + return ( + _Segment(before, style, control), + _Segment(text[pos:], style, control), + ) + while pos < len(text): + char = text[pos] + pos += 1 + cell_pos += cell_size(char) + before = text[:pos] + if cell_pos == cut: + return ( + _Segment(before, style, control), + _Segment(text[pos:], style, control), + ) + if cell_pos > cut: + return ( + _Segment(before[: pos - 1] + " ", style, control), + _Segment(" " + text[pos:], style, control), + ) + + def split_cells(self, cut: int) -> Tuple["Segment", "Segment"]: + """Split segment in to two segments at the specified column. + + If the cut point falls in the middle of a 2-cell wide character then it is replaced + by two spaces, to preserve the display width of the parent segment. + + Returns: + Tuple[Segment, Segment]: Two segments. + """ + text, style, control = self + + if _is_single_cell_widths(text): + # Fast path with all 1 cell characters + if cut >= len(text): + return self, Segment("", style, control) + return ( + Segment(text[:cut], style, control), + Segment(text[cut:], style, control), + ) + + return self._split_cells(self, cut) + + @classmethod + def line(cls) -> "Segment": + """Make a new line segment.""" + return cls("\n") + + @classmethod + def apply_style( + cls, + segments: Iterable["Segment"], + style: Optional[Style] = None, + post_style: Optional[Style] = None, + ) -> Iterable["Segment"]: + """Apply style(s) to an iterable of segments. + + Returns an iterable of segments where the style is replaced by ``style + segment.style + post_style``. + + Args: + segments (Iterable[Segment]): Segments to process. + style (Style, optional): Base style. Defaults to None. + post_style (Style, optional): Style to apply on top of segment style. Defaults to None. + + Returns: + Iterable[Segments]: A new iterable of segments (possibly the same iterable). + """ + result_segments = segments + if style: + apply = style.__add__ + result_segments = ( + cls(text, None if control else apply(_style), control) + for text, _style, control in result_segments + ) + if post_style: + result_segments = ( + cls( + text, + ( + None + if control + else (_style + post_style if _style else post_style) + ), + control, + ) + for text, _style, control in result_segments + ) + return result_segments + + @classmethod + def filter_control( + cls, segments: Iterable["Segment"], is_control: bool = False + ) -> Iterable["Segment"]: + """Filter segments by ``is_control`` attribute. + + Args: + segments (Iterable[Segment]): An iterable of Segment instances. + is_control (bool, optional): is_control flag to match in search. + + Returns: + Iterable[Segment]: And iterable of Segment instances. + + """ + if is_control: + return filter(attrgetter("control"), segments) + else: + return filterfalse(attrgetter("control"), segments) + + @classmethod + def split_lines(cls, segments: Iterable["Segment"]) -> Iterable[List["Segment"]]: + """Split a sequence of segments in to a list of lines. + + Args: + segments (Iterable[Segment]): Segments potentially containing line feeds. + + Yields: + Iterable[List[Segment]]: Iterable of segment lists, one per line. + """ + line: List[Segment] = [] + append = line.append + + for segment in segments: + if "\n" in segment.text and not segment.control: + text, style, _ = segment + while text: + _text, new_line, text = text.partition("\n") + if _text: + append(cls(_text, style)) + if new_line: + yield line + line = [] + append = line.append + else: + append(segment) + if line: + yield line + + @classmethod + def split_and_crop_lines( + cls, + segments: Iterable["Segment"], + length: int, + style: Optional[Style] = None, + pad: bool = True, + include_new_lines: bool = True, + ) -> Iterable[List["Segment"]]: + """Split segments in to lines, and crop lines greater than a given length. + + Args: + segments (Iterable[Segment]): An iterable of segments, probably + generated from console.render. + length (int): Desired line length. + style (Style, optional): Style to use for any padding. + pad (bool): Enable padding of lines that are less than `length`. + + Returns: + Iterable[List[Segment]]: An iterable of lines of segments. + """ + line: List[Segment] = [] + append = line.append + + adjust_line_length = cls.adjust_line_length + new_line_segment = cls("\n") + + for segment in segments: + if "\n" in segment.text and not segment.control: + text, style, _ = segment + while text: + _text, new_line, text = text.partition("\n") + if _text: + append(cls(_text, style)) + if new_line: + cropped_line = adjust_line_length( + line, length, style=style, pad=pad + ) + if include_new_lines: + cropped_line.append(new_line_segment) + yield cropped_line + del line[:] + else: + append(segment) + if line: + yield adjust_line_length(line, length, style=style, pad=pad) + + @classmethod + def adjust_line_length( + cls, + line: List["Segment"], + length: int, + style: Optional[Style] = None, + pad: bool = True, + ) -> List["Segment"]: + """Adjust a line to a given width (cropping or padding as required). + + Args: + segments (Iterable[Segment]): A list of segments in a single line. + length (int): The desired width of the line. + style (Style, optional): The style of padding if used (space on the end). Defaults to None. + pad (bool, optional): Pad lines with spaces if they are shorter than `length`. Defaults to True. + + Returns: + List[Segment]: A line of segments with the desired length. + """ + line_length = sum(segment.cell_length for segment in line) + new_line: List[Segment] + + if line_length < length: + if pad: + new_line = line + [cls(" " * (length - line_length), style)] + else: + new_line = line[:] + elif line_length > length: + new_line = [] + append = new_line.append + line_length = 0 + for segment in line: + segment_length = segment.cell_length + if line_length + segment_length < length or segment.control: + append(segment) + line_length += segment_length + else: + text, segment_style, _ = segment + text = set_cell_size(text, length - line_length) + append(cls(text, segment_style)) + break + else: + new_line = line[:] + return new_line + + @classmethod + def get_line_length(cls, line: List["Segment"]) -> int: + """Get the length of list of segments. + + Args: + line (List[Segment]): A line encoded as a list of Segments (assumes no '\\\\n' characters), + + Returns: + int: The length of the line. + """ + _cell_len = cell_len + return sum(_cell_len(segment.text) for segment in line) + + @classmethod + def get_shape(cls, lines: List[List["Segment"]]) -> Tuple[int, int]: + """Get the shape (enclosing rectangle) of a list of lines. + + Args: + lines (List[List[Segment]]): A list of lines (no '\\\\n' characters). + + Returns: + Tuple[int, int]: Width and height in characters. + """ + get_line_length = cls.get_line_length + max_width = max(get_line_length(line) for line in lines) if lines else 0 + return (max_width, len(lines)) + + @classmethod + def set_shape( + cls, + lines: List[List["Segment"]], + width: int, + height: Optional[int] = None, + style: Optional[Style] = None, + new_lines: bool = False, + ) -> List[List["Segment"]]: + """Set the shape of a list of lines (enclosing rectangle). + + Args: + lines (List[List[Segment]]): A list of lines. + width (int): Desired width. + height (int, optional): Desired height or None for no change. + style (Style, optional): Style of any padding added. + new_lines (bool, optional): Padded lines should include "\n". Defaults to False. + + Returns: + List[List[Segment]]: New list of lines. + """ + _height = height or len(lines) + + blank = ( + [cls(" " * width + "\n", style)] if new_lines else [cls(" " * width, style)] + ) + + adjust_line_length = cls.adjust_line_length + shaped_lines = lines[:_height] + shaped_lines[:] = [ + adjust_line_length(line, width, style=style) for line in lines + ] + if len(shaped_lines) < _height: + shaped_lines.extend([blank] * (_height - len(shaped_lines))) + return shaped_lines + + @classmethod + def align_top( + cls: Type["Segment"], + lines: List[List["Segment"]], + width: int, + height: int, + style: Style, + new_lines: bool = False, + ) -> List[List["Segment"]]: + """Aligns lines to top (adds extra lines to bottom as required). + + Args: + lines (List[List[Segment]]): A list of lines. + width (int): Desired width. + height (int, optional): Desired height or None for no change. + style (Style): Style of any padding added. + new_lines (bool, optional): Padded lines should include "\n". Defaults to False. + + Returns: + List[List[Segment]]: New list of lines. + """ + extra_lines = height - len(lines) + if not extra_lines: + return lines[:] + lines = lines[:height] + blank = cls(" " * width + "\n", style) if new_lines else cls(" " * width, style) + lines = lines + [[blank]] * extra_lines + return lines + + @classmethod + def align_bottom( + cls: Type["Segment"], + lines: List[List["Segment"]], + width: int, + height: int, + style: Style, + new_lines: bool = False, + ) -> List[List["Segment"]]: + """Aligns render to bottom (adds extra lines above as required). + + Args: + lines (List[List[Segment]]): A list of lines. + width (int): Desired width. + height (int, optional): Desired height or None for no change. + style (Style): Style of any padding added. Defaults to None. + new_lines (bool, optional): Padded lines should include "\n". Defaults to False. + + Returns: + List[List[Segment]]: New list of lines. + """ + extra_lines = height - len(lines) + if not extra_lines: + return lines[:] + lines = lines[:height] + blank = cls(" " * width + "\n", style) if new_lines else cls(" " * width, style) + lines = [[blank]] * extra_lines + lines + return lines + + @classmethod + def align_middle( + cls: Type["Segment"], + lines: List[List["Segment"]], + width: int, + height: int, + style: Style, + new_lines: bool = False, + ) -> List[List["Segment"]]: + """Aligns lines to middle (adds extra lines to above and below as required). + + Args: + lines (List[List[Segment]]): A list of lines. + width (int): Desired width. + height (int, optional): Desired height or None for no change. + style (Style): Style of any padding added. + new_lines (bool, optional): Padded lines should include "\n". Defaults to False. + + Returns: + List[List[Segment]]: New list of lines. + """ + extra_lines = height - len(lines) + if not extra_lines: + return lines[:] + lines = lines[:height] + blank = cls(" " * width + "\n", style) if new_lines else cls(" " * width, style) + top_lines = extra_lines // 2 + bottom_lines = extra_lines - top_lines + lines = [[blank]] * top_lines + lines + [[blank]] * bottom_lines + return lines + + @classmethod + def simplify(cls, segments: Iterable["Segment"]) -> Iterable["Segment"]: + """Simplify an iterable of segments by combining contiguous segments with the same style. + + Args: + segments (Iterable[Segment]): An iterable of segments. + + Returns: + Iterable[Segment]: A possibly smaller iterable of segments that will render the same way. + """ + iter_segments = iter(segments) + try: + last_segment = next(iter_segments) + except StopIteration: + return + + _Segment = Segment + for segment in iter_segments: + if last_segment.style == segment.style and not segment.control: + last_segment = _Segment( + last_segment.text + segment.text, last_segment.style + ) + else: + yield last_segment + last_segment = segment + yield last_segment + + @classmethod + def strip_links(cls, segments: Iterable["Segment"]) -> Iterable["Segment"]: + """Remove all links from an iterable of styles. + + Args: + segments (Iterable[Segment]): An iterable segments. + + Yields: + Segment: Segments with link removed. + """ + for segment in segments: + if segment.control or segment.style is None: + yield segment + else: + text, style, _control = segment + yield cls(text, style.update_link(None) if style else None) + + @classmethod + def strip_styles(cls, segments: Iterable["Segment"]) -> Iterable["Segment"]: + """Remove all styles from an iterable of segments. + + Args: + segments (Iterable[Segment]): An iterable segments. + + Yields: + Segment: Segments with styles replace with None + """ + for text, _style, control in segments: + yield cls(text, None, control) + + @classmethod + def remove_color(cls, segments: Iterable["Segment"]) -> Iterable["Segment"]: + """Remove all color from an iterable of segments. + + Args: + segments (Iterable[Segment]): An iterable segments. + + Yields: + Segment: Segments with colorless style. + """ + + cache: Dict[Style, Style] = {} + for text, style, control in segments: + if style: + colorless_style = cache.get(style) + if colorless_style is None: + colorless_style = style.without_color + cache[style] = colorless_style + yield cls(text, colorless_style, control) + else: + yield cls(text, None, control) + + @classmethod + def divide( + cls, segments: Iterable["Segment"], cuts: Iterable[int] + ) -> Iterable[List["Segment"]]: + """Divides an iterable of segments in to portions. + + Args: + cuts (Iterable[int]): Cell positions where to divide. + + Yields: + [Iterable[List[Segment]]]: An iterable of Segments in List. + """ + split_segments: List["Segment"] = [] + add_segment = split_segments.append + + iter_cuts = iter(cuts) + + while True: + try: + cut = next(iter_cuts) + except StopIteration: + return [] + if cut != 0: + break + yield [] + pos = 0 + + for segment in segments: + while segment.text: + end_pos = pos + segment.cell_length + if end_pos < cut: + add_segment(segment) + pos = end_pos + break + + try: + if end_pos == cut: + add_segment(segment) + yield split_segments[:] + del split_segments[:] + pos = end_pos + break + else: + before, segment = segment.split_cells(cut - pos) + add_segment(before) + yield split_segments[:] + del split_segments[:] + pos = cut + finally: + try: + cut = next(iter_cuts) + except StopIteration: + if split_segments: + yield split_segments[:] + return + yield split_segments[:] + + +class Segments: + """A simple renderable to render an iterable of segments. This class may be useful if + you want to print segments outside of a __rich_console__ method. + + Args: + segments (Iterable[Segment]): An iterable of segments. + new_lines (bool, optional): Add new lines between segments. Defaults to False. + """ + + def __init__(self, segments: Iterable[Segment], new_lines: bool = False) -> None: + self.segments = list(segments) + self.new_lines = new_lines + + def __rich_console__( + self, console: "Console", options: "ConsoleOptions" + ) -> "RenderResult": + if self.new_lines: + line = Segment.line() + for segment in self.segments: + yield segment + yield line + else: + yield from self.segments + + +class SegmentLines: + def __init__(self, lines: Iterable[List[Segment]], new_lines: bool = False) -> None: + """A simple renderable containing a number of lines of segments. May be used as an intermediate + in rendering process. + + Args: + lines (Iterable[List[Segment]]): Lists of segments forming lines. + new_lines (bool, optional): Insert new lines after each line. Defaults to False. + """ + self.lines = list(lines) + self.new_lines = new_lines + + def __rich_console__( + self, console: "Console", options: "ConsoleOptions" + ) -> "RenderResult": + if self.new_lines: + new_line = Segment.line() + for line in self.lines: + yield from line + yield new_line + else: + for line in self.lines: + yield from line + + +if __name__ == "__main__": + + if __name__ == "__main__": # pragma: no cover + from pipenv.patched.notpip._vendor.rich.console import Console + from pipenv.patched.notpip._vendor.rich.syntax import Syntax + from pipenv.patched.notpip._vendor.rich.text import Text + + code = """from rich.console import Console + console = Console() + text = Text.from_markup("Hello, [bold magenta]World[/]!") + console.print(text)""" + + text = Text.from_markup("Hello, [bold magenta]World[/]!") + + console = Console() + + console.rule("rich.Segment") + console.print( + "A Segment is the last step in the Rich render process before generating text with ANSI codes." + ) + console.print("\nConsider the following code:\n") + console.print(Syntax(code, "python", line_numbers=True)) + console.print() + console.print( + "When you call [b]print()[/b], Rich [i]renders[/i] the object in to the the following:\n" + ) + fragments = list(console.render(text)) + console.print(fragments) + console.print() + console.print( + "The Segments are then processed to produce the following output:\n" + ) + console.print(text) + console.print( + "\nYou will only need to know this if you are implementing your own Rich renderables." + ) diff --git a/pipenv/patched/notpip/_vendor/rich/spinner.py b/pipenv/patched/notpip/_vendor/rich/spinner.py new file mode 100644 index 0000000000..5b13b1e9ba --- /dev/null +++ b/pipenv/patched/notpip/_vendor/rich/spinner.py @@ -0,0 +1,134 @@ +from typing import cast, List, Optional, TYPE_CHECKING + +from ._spinners import SPINNERS +from .measure import Measurement +from .table import Table +from .text import Text + +if TYPE_CHECKING: + from .console import Console, ConsoleOptions, RenderResult, RenderableType + from .style import StyleType + + +class Spinner: + def __init__( + self, + name: str, + text: "RenderableType" = "", + *, + style: Optional["StyleType"] = None, + speed: float = 1.0, + ) -> None: + """A spinner animation. + + Args: + name (str): Name of spinner (run python -m rich.spinner). + text (RenderableType, optional): A renderable to display at the right of the spinner (str or Text typically). Defaults to "". + style (StyleType, optional): Style for spinner animation. Defaults to None. + speed (float, optional): Speed factor for animation. Defaults to 1.0. + + Raises: + KeyError: If name isn't one of the supported spinner animations. + """ + try: + spinner = SPINNERS[name] + except KeyError: + raise KeyError(f"no spinner called {name!r}") + self.text = Text.from_markup(text) if isinstance(text, str) else text + self.frames = cast(List[str], spinner["frames"])[:] + self.interval = cast(float, spinner["interval"]) + self.start_time: Optional[float] = None + self.style = style + self.speed = speed + self.frame_no_offset: float = 0.0 + self._update_speed = 0.0 + + def __rich_console__( + self, console: "Console", options: "ConsoleOptions" + ) -> "RenderResult": + yield self.render(console.get_time()) + + def __rich_measure__( + self, console: "Console", options: "ConsoleOptions" + ) -> Measurement: + text = self.render(0) + return Measurement.get(console, options, text) + + def render(self, time: float) -> "RenderableType": + """Render the spinner for a given time. + + Args: + time (float): Time in seconds. + + Returns: + RenderableType: A renderable containing animation frame. + """ + if self.start_time is None: + self.start_time = time + + frame_no = ((time - self.start_time) * self.speed) / ( + self.interval / 1000.0 + ) + self.frame_no_offset + frame = Text( + self.frames[int(frame_no) % len(self.frames)], style=self.style or "" + ) + + if self._update_speed: + self.frame_no_offset = frame_no + self.start_time = time + self.speed = self._update_speed + self._update_speed = 0.0 + + if not self.text: + return frame + elif isinstance(self.text, (str, Text)): + return Text.assemble(frame, " ", self.text) + else: + table = Table.grid(padding=1) + table.add_row(frame, self.text) + return table + + def update( + self, + *, + text: "RenderableType" = "", + style: Optional["StyleType"] = None, + speed: Optional[float] = None, + ) -> None: + """Updates attributes of a spinner after it has been started. + + Args: + text (RenderableType, optional): A renderable to display at the right of the spinner (str or Text typically). Defaults to "". + style (StyleType, optional): Style for spinner animation. Defaults to None. + speed (float, optional): Speed factor for animation. Defaults to None. + """ + if text: + self.text = Text.from_markup(text) if isinstance(text, str) else text + if style: + self.style = style + if speed: + self._update_speed = speed + + +if __name__ == "__main__": # pragma: no cover + from time import sleep + + from .columns import Columns + from .panel import Panel + from .live import Live + + all_spinners = Columns( + [ + Spinner(spinner_name, text=Text(repr(spinner_name), style="green")) + for spinner_name in sorted(SPINNERS.keys()) + ], + column_first=True, + expand=True, + ) + + with Live( + Panel(all_spinners, title="Spinners", border_style="blue"), + refresh_per_second=20, + ) as live: + while True: + sleep(0.1) diff --git a/pipenv/patched/notpip/_vendor/rich/status.py b/pipenv/patched/notpip/_vendor/rich/status.py new file mode 100644 index 0000000000..09eff405ec --- /dev/null +++ b/pipenv/patched/notpip/_vendor/rich/status.py @@ -0,0 +1,132 @@ +from types import TracebackType +from typing import Optional, Type + +from .console import Console, RenderableType +from .jupyter import JupyterMixin +from .live import Live +from .spinner import Spinner +from .style import StyleType + + +class Status(JupyterMixin): + """Displays a status indicator with a 'spinner' animation. + + Args: + status (RenderableType): A status renderable (str or Text typically). + console (Console, optional): Console instance to use, or None for global console. Defaults to None. + spinner (str, optional): Name of spinner animation (see python -m rich.spinner). Defaults to "dots". + spinner_style (StyleType, optional): Style of spinner. Defaults to "status.spinner". + speed (float, optional): Speed factor for spinner animation. Defaults to 1.0. + refresh_per_second (float, optional): Number of refreshes per second. Defaults to 12.5. + """ + + def __init__( + self, + status: RenderableType, + *, + console: Optional[Console] = None, + spinner: str = "dots", + spinner_style: StyleType = "status.spinner", + speed: float = 1.0, + refresh_per_second: float = 12.5, + ): + self.status = status + self.spinner_style = spinner_style + self.speed = speed + self._spinner = Spinner(spinner, text=status, style=spinner_style, speed=speed) + self._live = Live( + self.renderable, + console=console, + refresh_per_second=refresh_per_second, + transient=True, + ) + + @property + def renderable(self) -> Spinner: + return self._spinner + + @property + def console(self) -> "Console": + """Get the Console used by the Status objects.""" + return self._live.console + + def update( + self, + status: Optional[RenderableType] = None, + *, + spinner: Optional[str] = None, + spinner_style: Optional[StyleType] = None, + speed: Optional[float] = None, + ) -> None: + """Update status. + + Args: + status (Optional[RenderableType], optional): New status renderable or None for no change. Defaults to None. + spinner (Optional[str], optional): New spinner or None for no change. Defaults to None. + spinner_style (Optional[StyleType], optional): New spinner style or None for no change. Defaults to None. + speed (Optional[float], optional): Speed factor for spinner animation or None for no change. Defaults to None. + """ + if status is not None: + self.status = status + if spinner_style is not None: + self.spinner_style = spinner_style + if speed is not None: + self.speed = speed + if spinner is not None: + self._spinner = Spinner( + spinner, text=self.status, style=self.spinner_style, speed=self.speed + ) + self._live.update(self.renderable, refresh=True) + else: + self._spinner.update( + text=self.status, style=self.spinner_style, speed=self.speed + ) + + def start(self) -> None: + """Start the status animation.""" + self._live.start() + + def stop(self) -> None: + """Stop the spinner animation.""" + self._live.stop() + + def __rich__(self) -> RenderableType: + return self.renderable + + def __enter__(self) -> "Status": + self.start() + return self + + def __exit__( + self, + exc_type: Optional[Type[BaseException]], + exc_val: Optional[BaseException], + exc_tb: Optional[TracebackType], + ) -> None: + self.stop() + + +if __name__ == "__main__": # pragma: no cover + + from time import sleep + + from .console import Console + + console = Console() + with console.status("[magenta]Covid detector booting up") as status: + sleep(3) + console.log("Importing advanced AI") + sleep(3) + console.log("Advanced Covid AI Ready") + sleep(3) + status.update(status="[bold blue] Scanning for Covid", spinner="earth") + sleep(3) + console.log("Found 10,000,000,000 copies of Covid32.exe") + sleep(3) + status.update( + status="[bold red]Moving Covid32.exe to Trash", + spinner="bouncingBall", + spinner_style="yellow", + ) + sleep(5) + console.print("[bold green]Covid deleted successfully") diff --git a/pipenv/patched/notpip/_vendor/rich/style.py b/pipenv/patched/notpip/_vendor/rich/style.py new file mode 100644 index 0000000000..0787c33147 --- /dev/null +++ b/pipenv/patched/notpip/_vendor/rich/style.py @@ -0,0 +1,785 @@ +import sys +from functools import lru_cache +from marshal import loads, dumps +from random import randint +from typing import Any, cast, Dict, Iterable, List, Optional, Type, Union + +from . import errors +from .color import Color, ColorParseError, ColorSystem, blend_rgb +from .repr import rich_repr, Result +from .terminal_theme import DEFAULT_TERMINAL_THEME, TerminalTheme + + +# Style instances and style definitions are often interchangeable +StyleType = Union[str, "Style"] + + +class _Bit: + """A descriptor to get/set a style attribute bit.""" + + __slots__ = ["bit"] + + def __init__(self, bit_no: int) -> None: + self.bit = 1 << bit_no + + def __get__(self, obj: "Style", objtype: Type["Style"]) -> Optional[bool]: + if obj._set_attributes & self.bit: + return obj._attributes & self.bit != 0 + return None + + +@rich_repr +class Style: + """A terminal style. + + A terminal style consists of a color (`color`), a background color (`bgcolor`), and a number of attributes, such + as bold, italic etc. The attributes have 3 states: they can either be on + (``True``), off (``False``), or not set (``None``). + + Args: + color (Union[Color, str], optional): Color of terminal text. Defaults to None. + bgcolor (Union[Color, str], optional): Color of terminal background. Defaults to None. + bold (bool, optional): Enable bold text. Defaults to None. + dim (bool, optional): Enable dim text. Defaults to None. + italic (bool, optional): Enable italic text. Defaults to None. + underline (bool, optional): Enable underlined text. Defaults to None. + blink (bool, optional): Enabled blinking text. Defaults to None. + blink2 (bool, optional): Enable fast blinking text. Defaults to None. + reverse (bool, optional): Enabled reverse text. Defaults to None. + conceal (bool, optional): Enable concealed text. Defaults to None. + strike (bool, optional): Enable strikethrough text. Defaults to None. + underline2 (bool, optional): Enable doubly underlined text. Defaults to None. + frame (bool, optional): Enable framed text. Defaults to None. + encircle (bool, optional): Enable encircled text. Defaults to None. + overline (bool, optional): Enable overlined text. Defaults to None. + link (str, link): Link URL. Defaults to None. + + """ + + _color: Optional[Color] + _bgcolor: Optional[Color] + _attributes: int + _set_attributes: int + _hash: int + _null: bool + _meta: Optional[bytes] + + __slots__ = [ + "_color", + "_bgcolor", + "_attributes", + "_set_attributes", + "_link", + "_link_id", + "_ansi", + "_style_definition", + "_hash", + "_null", + "_meta", + ] + + # maps bits on to SGR parameter + _style_map = { + 0: "1", + 1: "2", + 2: "3", + 3: "4", + 4: "5", + 5: "6", + 6: "7", + 7: "8", + 8: "9", + 9: "21", + 10: "51", + 11: "52", + 12: "53", + } + + STYLE_ATTRIBUTES = { + "dim": "dim", + "d": "dim", + "bold": "bold", + "b": "bold", + "italic": "italic", + "i": "italic", + "underline": "underline", + "u": "underline", + "blink": "blink", + "blink2": "blink2", + "reverse": "reverse", + "r": "reverse", + "conceal": "conceal", + "c": "conceal", + "strike": "strike", + "s": "strike", + "underline2": "underline2", + "uu": "underline2", + "frame": "frame", + "encircle": "encircle", + "overline": "overline", + "o": "overline", + } + + def __init__( + self, + *, + color: Optional[Union[Color, str]] = None, + bgcolor: Optional[Union[Color, str]] = None, + bold: Optional[bool] = None, + dim: Optional[bool] = None, + italic: Optional[bool] = None, + underline: Optional[bool] = None, + blink: Optional[bool] = None, + blink2: Optional[bool] = None, + reverse: Optional[bool] = None, + conceal: Optional[bool] = None, + strike: Optional[bool] = None, + underline2: Optional[bool] = None, + frame: Optional[bool] = None, + encircle: Optional[bool] = None, + overline: Optional[bool] = None, + link: Optional[str] = None, + meta: Optional[Dict[str, Any]] = None, + ): + self._ansi: Optional[str] = None + self._style_definition: Optional[str] = None + + def _make_color(color: Union[Color, str]) -> Color: + return color if isinstance(color, Color) else Color.parse(color) + + self._color = None if color is None else _make_color(color) + self._bgcolor = None if bgcolor is None else _make_color(bgcolor) + self._set_attributes = sum( + ( + bold is not None, + dim is not None and 2, + italic is not None and 4, + underline is not None and 8, + blink is not None and 16, + blink2 is not None and 32, + reverse is not None and 64, + conceal is not None and 128, + strike is not None and 256, + underline2 is not None and 512, + frame is not None and 1024, + encircle is not None and 2048, + overline is not None and 4096, + ) + ) + self._attributes = ( + sum( + ( + bold and 1 or 0, + dim and 2 or 0, + italic and 4 or 0, + underline and 8 or 0, + blink and 16 or 0, + blink2 and 32 or 0, + reverse and 64 or 0, + conceal and 128 or 0, + strike and 256 or 0, + underline2 and 512 or 0, + frame and 1024 or 0, + encircle and 2048 or 0, + overline and 4096 or 0, + ) + ) + if self._set_attributes + else 0 + ) + + self._link = link + self._link_id = f"{randint(0, 999999)}" if link else "" + self._meta = None if meta is None else dumps(meta) + self._hash = hash( + ( + self._color, + self._bgcolor, + self._attributes, + self._set_attributes, + link, + self._meta, + ) + ) + self._null = not (self._set_attributes or color or bgcolor or link or meta) + + @classmethod + def null(cls) -> "Style": + """Create an 'null' style, equivalent to Style(), but more performant.""" + return NULL_STYLE + + @classmethod + def from_color( + cls, color: Optional[Color] = None, bgcolor: Optional[Color] = None + ) -> "Style": + """Create a new style with colors and no attributes. + + Returns: + color (Optional[Color]): A (foreground) color, or None for no color. Defaults to None. + bgcolor (Optional[Color]): A (background) color, or None for no color. Defaults to None. + """ + style: Style = cls.__new__(Style) + style._ansi = None + style._style_definition = None + style._color = color + style._bgcolor = bgcolor + style._set_attributes = 0 + style._attributes = 0 + style._link = None + style._link_id = "" + style._meta = None + style._hash = hash( + ( + color, + bgcolor, + None, + None, + None, + None, + ) + ) + style._null = not (color or bgcolor) + return style + + @classmethod + def from_meta(cls, meta: Optional[Dict[str, Any]]) -> "Style": + """Create a new style with meta data. + + Returns: + meta (Optional[Dict[str, Any]]): A dictionary of meta data. Defaults to None. + """ + style: Style = cls.__new__(Style) + style._ansi = None + style._style_definition = None + style._color = None + style._bgcolor = None + style._set_attributes = 0 + style._attributes = 0 + style._link = None + style._link_id = "" + style._meta = dumps(meta) + style._hash = hash( + ( + None, + None, + None, + None, + None, + style._meta, + ) + ) + style._null = not (meta) + return style + + @classmethod + def on(cls, meta: Optional[Dict[str, Any]] = None, **handlers: Any) -> "Style": + """Create a blank style with meta information. + + Example: + style = Style.on(click=self.on_click) + + Args: + meta (Optiona[Dict[str, Any]], optional): An optional dict of meta information. + **handlers (Any): Keyword arguments are translated in to handlers. + + Returns: + Style: A Style with meta information attached. + """ + meta = {} if meta is None else meta + meta.update({f"@{key}": value for key, value in handlers.items()}) + return cls.from_meta(meta) + + bold = _Bit(0) + dim = _Bit(1) + italic = _Bit(2) + underline = _Bit(3) + blink = _Bit(4) + blink2 = _Bit(5) + reverse = _Bit(6) + conceal = _Bit(7) + strike = _Bit(8) + underline2 = _Bit(9) + frame = _Bit(10) + encircle = _Bit(11) + overline = _Bit(12) + + @property + def link_id(self) -> str: + """Get a link id, used in ansi code for links.""" + return self._link_id + + def __str__(self) -> str: + """Re-generate style definition from attributes.""" + if self._style_definition is None: + attributes: List[str] = [] + append = attributes.append + bits = self._set_attributes + if bits & 0b0000000001111: + if bits & 1: + append("bold" if self.bold else "not bold") + if bits & (1 << 1): + append("dim" if self.dim else "not dim") + if bits & (1 << 2): + append("italic" if self.italic else "not italic") + if bits & (1 << 3): + append("underline" if self.underline else "not underline") + if bits & 0b0000111110000: + if bits & (1 << 4): + append("blink" if self.blink else "not blink") + if bits & (1 << 5): + append("blink2" if self.blink2 else "not blink2") + if bits & (1 << 6): + append("reverse" if self.reverse else "not reverse") + if bits & (1 << 7): + append("conceal" if self.conceal else "not conceal") + if bits & (1 << 8): + append("strike" if self.strike else "not strike") + if bits & 0b1111000000000: + if bits & (1 << 9): + append("underline2" if self.underline2 else "not underline2") + if bits & (1 << 10): + append("frame" if self.frame else "not frame") + if bits & (1 << 11): + append("encircle" if self.encircle else "not encircle") + if bits & (1 << 12): + append("overline" if self.overline else "not overline") + if self._color is not None: + append(self._color.name) + if self._bgcolor is not None: + append("on") + append(self._bgcolor.name) + if self._link: + append("link") + append(self._link) + self._style_definition = " ".join(attributes) or "none" + return self._style_definition + + def __bool__(self) -> bool: + """A Style is false if it has no attributes, colors, or links.""" + return not self._null + + def _make_ansi_codes(self, color_system: ColorSystem) -> str: + """Generate ANSI codes for this style. + + Args: + color_system (ColorSystem): Color system. + + Returns: + str: String containing codes. + """ + if self._ansi is None: + sgr: List[str] = [] + append = sgr.append + _style_map = self._style_map + attributes = self._attributes & self._set_attributes + if attributes: + if attributes & 1: + append(_style_map[0]) + if attributes & 2: + append(_style_map[1]) + if attributes & 4: + append(_style_map[2]) + if attributes & 8: + append(_style_map[3]) + if attributes & 0b0000111110000: + for bit in range(4, 9): + if attributes & (1 << bit): + append(_style_map[bit]) + if attributes & 0b1111000000000: + for bit in range(9, 13): + if attributes & (1 << bit): + append(_style_map[bit]) + if self._color is not None: + sgr.extend(self._color.downgrade(color_system).get_ansi_codes()) + if self._bgcolor is not None: + sgr.extend( + self._bgcolor.downgrade(color_system).get_ansi_codes( + foreground=False + ) + ) + self._ansi = ";".join(sgr) + return self._ansi + + @classmethod + @lru_cache(maxsize=1024) + def normalize(cls, style: str) -> str: + """Normalize a style definition so that styles with the same effect have the same string + representation. + + Args: + style (str): A style definition. + + Returns: + str: Normal form of style definition. + """ + try: + return str(cls.parse(style)) + except errors.StyleSyntaxError: + return style.strip().lower() + + @classmethod + def pick_first(cls, *values: Optional[StyleType]) -> StyleType: + """Pick first non-None style.""" + for value in values: + if value is not None: + return value + raise ValueError("expected at least one non-None style") + + def __rich_repr__(self) -> Result: + yield "color", self.color, None + yield "bgcolor", self.bgcolor, None + yield "bold", self.bold, None, + yield "dim", self.dim, None, + yield "italic", self.italic, None + yield "underline", self.underline, None, + yield "blink", self.blink, None + yield "blink2", self.blink2, None + yield "reverse", self.reverse, None + yield "conceal", self.conceal, None + yield "strike", self.strike, None + yield "underline2", self.underline2, None + yield "frame", self.frame, None + yield "encircle", self.encircle, None + yield "link", self.link, None + if self._meta: + yield "meta", self.meta + + def __eq__(self, other: Any) -> bool: + if not isinstance(other, Style): + return NotImplemented + return ( + self._color == other._color + and self._bgcolor == other._bgcolor + and self._set_attributes == other._set_attributes + and self._attributes == other._attributes + and self._link == other._link + and self._meta == other._meta + ) + + def __hash__(self) -> int: + return self._hash + + @property + def color(self) -> Optional[Color]: + """The foreground color or None if it is not set.""" + return self._color + + @property + def bgcolor(self) -> Optional[Color]: + """The background color or None if it is not set.""" + return self._bgcolor + + @property + def link(self) -> Optional[str]: + """Link text, if set.""" + return self._link + + @property + def transparent_background(self) -> bool: + """Check if the style specified a transparent background.""" + return self.bgcolor is None or self.bgcolor.is_default + + @property + def background_style(self) -> "Style": + """A Style with background only.""" + return Style(bgcolor=self.bgcolor) + + @property + def meta(self) -> Dict[str, Any]: + """Get meta information (can not be changed after construction).""" + return {} if self._meta is None else cast(Dict[str, Any], loads(self._meta)) + + @property + def without_color(self) -> "Style": + """Get a copy of the style with color removed.""" + if self._null: + return NULL_STYLE + style: Style = self.__new__(Style) + style._ansi = None + style._style_definition = None + style._color = None + style._bgcolor = None + style._attributes = self._attributes + style._set_attributes = self._set_attributes + style._link = self._link + style._link_id = f"{randint(0, 999999)}" if self._link else "" + style._hash = self._hash + style._null = False + style._meta = None + return style + + @classmethod + @lru_cache(maxsize=4096) + def parse(cls, style_definition: str) -> "Style": + """Parse a style definition. + + Args: + style_definition (str): A string containing a style. + + Raises: + errors.StyleSyntaxError: If the style definition syntax is invalid. + + Returns: + `Style`: A Style instance. + """ + if style_definition.strip() == "none" or not style_definition: + return cls.null() + + STYLE_ATTRIBUTES = cls.STYLE_ATTRIBUTES + color: Optional[str] = None + bgcolor: Optional[str] = None + attributes: Dict[str, Optional[Any]] = {} + link: Optional[str] = None + + words = iter(style_definition.split()) + for original_word in words: + word = original_word.lower() + if word == "on": + word = next(words, "") + if not word: + raise errors.StyleSyntaxError("color expected after 'on'") + try: + Color.parse(word) is None + except ColorParseError as error: + raise errors.StyleSyntaxError( + f"unable to parse {word!r} as background color; {error}" + ) from None + bgcolor = word + + elif word == "not": + word = next(words, "") + attribute = STYLE_ATTRIBUTES.get(word) + if attribute is None: + raise errors.StyleSyntaxError( + f"expected style attribute after 'not', found {word!r}" + ) + attributes[attribute] = False + + elif word == "link": + word = next(words, "") + if not word: + raise errors.StyleSyntaxError("URL expected after 'link'") + link = word + + elif word in STYLE_ATTRIBUTES: + attributes[STYLE_ATTRIBUTES[word]] = True + + else: + try: + Color.parse(word) + except ColorParseError as error: + raise errors.StyleSyntaxError( + f"unable to parse {word!r} as color; {error}" + ) from None + color = word + style = Style(color=color, bgcolor=bgcolor, link=link, **attributes) + return style + + @lru_cache(maxsize=1024) + def get_html_style(self, theme: Optional[TerminalTheme] = None) -> str: + """Get a CSS style rule.""" + theme = theme or DEFAULT_TERMINAL_THEME + css: List[str] = [] + append = css.append + + color = self.color + bgcolor = self.bgcolor + if self.reverse: + color, bgcolor = bgcolor, color + if self.dim: + foreground_color = ( + theme.foreground_color if color is None else color.get_truecolor(theme) + ) + color = Color.from_triplet( + blend_rgb(foreground_color, theme.background_color, 0.5) + ) + if color is not None: + theme_color = color.get_truecolor(theme) + append(f"color: {theme_color.hex}") + append(f"text-decoration-color: {theme_color.hex}") + if bgcolor is not None: + theme_color = bgcolor.get_truecolor(theme, foreground=False) + append(f"background-color: {theme_color.hex}") + if self.bold: + append("font-weight: bold") + if self.italic: + append("font-style: italic") + if self.underline: + append("text-decoration: underline") + if self.strike: + append("text-decoration: line-through") + if self.overline: + append("text-decoration: overline") + return "; ".join(css) + + @classmethod + def combine(cls, styles: Iterable["Style"]) -> "Style": + """Combine styles and get result. + + Args: + styles (Iterable[Style]): Styles to combine. + + Returns: + Style: A new style instance. + """ + iter_styles = iter(styles) + return sum(iter_styles, next(iter_styles)) + + @classmethod + def chain(cls, *styles: "Style") -> "Style": + """Combine styles from positional argument in to a single style. + + Args: + *styles (Iterable[Style]): Styles to combine. + + Returns: + Style: A new style instance. + """ + iter_styles = iter(styles) + return sum(iter_styles, next(iter_styles)) + + def copy(self) -> "Style": + """Get a copy of this style. + + Returns: + Style: A new Style instance with identical attributes. + """ + if self._null: + return NULL_STYLE + style: Style = self.__new__(Style) + style._ansi = self._ansi + style._style_definition = self._style_definition + style._color = self._color + style._bgcolor = self._bgcolor + style._attributes = self._attributes + style._set_attributes = self._set_attributes + style._link = self._link + style._link_id = f"{randint(0, 999999)}" if self._link else "" + style._hash = self._hash + style._null = False + style._meta = self._meta + return style + + def update_link(self, link: Optional[str] = None) -> "Style": + """Get a copy with a different value for link. + + Args: + link (str, optional): New value for link. Defaults to None. + + Returns: + Style: A new Style instance. + """ + style: Style = self.__new__(Style) + style._ansi = self._ansi + style._style_definition = self._style_definition + style._color = self._color + style._bgcolor = self._bgcolor + style._attributes = self._attributes + style._set_attributes = self._set_attributes + style._link = link + style._link_id = f"{randint(0, 999999)}" if link else "" + style._hash = self._hash + style._null = False + style._meta = self._meta + return style + + def render( + self, + text: str = "", + *, + color_system: Optional[ColorSystem] = ColorSystem.TRUECOLOR, + legacy_windows: bool = False, + ) -> str: + """Render the ANSI codes for the style. + + Args: + text (str, optional): A string to style. Defaults to "". + color_system (Optional[ColorSystem], optional): Color system to render to. Defaults to ColorSystem.TRUECOLOR. + + Returns: + str: A string containing ANSI style codes. + """ + if not text or color_system is None: + return text + attrs = self._make_ansi_codes(color_system) + rendered = f"\x1b[{attrs}m{text}\x1b[0m" if attrs else text + if self._link and not legacy_windows: + rendered = ( + f"\x1b]8;id={self._link_id};{self._link}\x1b\\{rendered}\x1b]8;;\x1b\\" + ) + return rendered + + def test(self, text: Optional[str] = None) -> None: + """Write text with style directly to terminal. + + This method is for testing purposes only. + + Args: + text (Optional[str], optional): Text to style or None for style name. + + """ + text = text or str(self) + sys.stdout.write(f"{self.render(text)}\n") + + def __add__(self, style: Optional["Style"]) -> "Style": + if not (isinstance(style, Style) or style is None): + return NotImplemented + if style is None or style._null: + return self + if self._null: + return style + new_style: Style = self.__new__(Style) + new_style._ansi = None + new_style._style_definition = None + new_style._color = style._color or self._color + new_style._bgcolor = style._bgcolor or self._bgcolor + new_style._attributes = (self._attributes & ~style._set_attributes) | ( + style._attributes & style._set_attributes + ) + new_style._set_attributes = self._set_attributes | style._set_attributes + new_style._link = style._link or self._link + new_style._link_id = style._link_id or self._link_id + new_style._hash = style._hash + new_style._null = self._null or style._null + if self._meta and style._meta: + new_style._meta = dumps({**self.meta, **style.meta}) + else: + new_style._meta = self._meta or style._meta + return new_style + + +NULL_STYLE = Style() + + +class StyleStack: + """A stack of styles.""" + + __slots__ = ["_stack"] + + def __init__(self, default_style: "Style") -> None: + self._stack: List[Style] = [default_style] + + def __repr__(self) -> str: + return f"" + + @property + def current(self) -> Style: + """Get the Style at the top of the stack.""" + return self._stack[-1] + + def push(self, style: Style) -> None: + """Push a new style on to the stack. + + Args: + style (Style): New style to combine with current style. + """ + self._stack.append(self._stack[-1] + style) + + def pop(self) -> Style: + """Pop last style and discard. + + Returns: + Style: New current style (also available as stack.current) + """ + self._stack.pop() + return self._stack[-1] diff --git a/pipenv/patched/notpip/_vendor/rich/styled.py b/pipenv/patched/notpip/_vendor/rich/styled.py new file mode 100644 index 0000000000..c577a527ca --- /dev/null +++ b/pipenv/patched/notpip/_vendor/rich/styled.py @@ -0,0 +1,42 @@ +from typing import TYPE_CHECKING + +from .measure import Measurement +from .segment import Segment +from .style import StyleType + +if TYPE_CHECKING: + from .console import Console, ConsoleOptions, RenderResult, RenderableType + + +class Styled: + """Apply a style to a renderable. + + Args: + renderable (RenderableType): Any renderable. + style (StyleType): A style to apply across the entire renderable. + """ + + def __init__(self, renderable: "RenderableType", style: "StyleType") -> None: + self.renderable = renderable + self.style = style + + def __rich_console__( + self, console: "Console", options: "ConsoleOptions" + ) -> "RenderResult": + style = console.get_style(self.style) + rendered_segments = console.render(self.renderable, options) + segments = Segment.apply_style(rendered_segments, style) + return segments + + def __rich_measure__( + self, console: "Console", options: "ConsoleOptions" + ) -> Measurement: + return Measurement.get(console, options, self.renderable) + + +if __name__ == "__main__": # pragma: no cover + from pipenv.patched.notpip._vendor.rich import print + from pipenv.patched.notpip._vendor.rich.panel import Panel + + panel = Styled(Panel("hello"), "on blue") + print(panel) diff --git a/pipenv/patched/notpip/_vendor/rich/syntax.py b/pipenv/patched/notpip/_vendor/rich/syntax.py new file mode 100644 index 0000000000..41b9c3b58d --- /dev/null +++ b/pipenv/patched/notpip/_vendor/rich/syntax.py @@ -0,0 +1,735 @@ +import os.path +import platform +from pipenv.patched.notpip._vendor.rich.containers import Lines +import textwrap +from abc import ABC, abstractmethod +from typing import Any, Dict, Iterable, List, Optional, Set, Tuple, Type, Union + +from pipenv.patched.notpip._vendor.pygments.lexer import Lexer +from pipenv.patched.notpip._vendor.pygments.lexers import get_lexer_by_name, guess_lexer_for_filename +from pipenv.patched.notpip._vendor.pygments.style import Style as PygmentsStyle +from pipenv.patched.notpip._vendor.pygments.styles import get_style_by_name +from pipenv.patched.notpip._vendor.pygments.token import ( + Comment, + Error, + Generic, + Keyword, + Name, + Number, + Operator, + String, + Token, + Whitespace, +) +from pipenv.patched.notpip._vendor.pygments.util import ClassNotFound + +from ._loop import loop_first +from .color import Color, blend_rgb +from .console import Console, ConsoleOptions, JustifyMethod, RenderResult +from .jupyter import JupyterMixin +from .measure import Measurement +from .segment import Segment +from .style import Style +from .text import Text + +TokenType = Tuple[str, ...] + +WINDOWS = platform.system() == "Windows" +DEFAULT_THEME = "monokai" + +# The following styles are based on https://github.com/pygments/pygments/blob/master/pygments/formatters/terminal.py +# A few modifications were made + +ANSI_LIGHT: Dict[TokenType, Style] = { + Token: Style(), + Whitespace: Style(color="white"), + Comment: Style(dim=True), + Comment.Preproc: Style(color="cyan"), + Keyword: Style(color="blue"), + Keyword.Type: Style(color="cyan"), + Operator.Word: Style(color="magenta"), + Name.Builtin: Style(color="cyan"), + Name.Function: Style(color="green"), + Name.Namespace: Style(color="cyan", underline=True), + Name.Class: Style(color="green", underline=True), + Name.Exception: Style(color="cyan"), + Name.Decorator: Style(color="magenta", bold=True), + Name.Variable: Style(color="red"), + Name.Constant: Style(color="red"), + Name.Attribute: Style(color="cyan"), + Name.Tag: Style(color="bright_blue"), + String: Style(color="yellow"), + Number: Style(color="blue"), + Generic.Deleted: Style(color="bright_red"), + Generic.Inserted: Style(color="green"), + Generic.Heading: Style(bold=True), + Generic.Subheading: Style(color="magenta", bold=True), + Generic.Prompt: Style(bold=True), + Generic.Error: Style(color="bright_red"), + Error: Style(color="red", underline=True), +} + +ANSI_DARK: Dict[TokenType, Style] = { + Token: Style(), + Whitespace: Style(color="bright_black"), + Comment: Style(dim=True), + Comment.Preproc: Style(color="bright_cyan"), + Keyword: Style(color="bright_blue"), + Keyword.Type: Style(color="bright_cyan"), + Operator.Word: Style(color="bright_magenta"), + Name.Builtin: Style(color="bright_cyan"), + Name.Function: Style(color="bright_green"), + Name.Namespace: Style(color="bright_cyan", underline=True), + Name.Class: Style(color="bright_green", underline=True), + Name.Exception: Style(color="bright_cyan"), + Name.Decorator: Style(color="bright_magenta", bold=True), + Name.Variable: Style(color="bright_red"), + Name.Constant: Style(color="bright_red"), + Name.Attribute: Style(color="bright_cyan"), + Name.Tag: Style(color="bright_blue"), + String: Style(color="yellow"), + Number: Style(color="bright_blue"), + Generic.Deleted: Style(color="bright_red"), + Generic.Inserted: Style(color="bright_green"), + Generic.Heading: Style(bold=True), + Generic.Subheading: Style(color="bright_magenta", bold=True), + Generic.Prompt: Style(bold=True), + Generic.Error: Style(color="bright_red"), + Error: Style(color="red", underline=True), +} + +RICH_SYNTAX_THEMES = {"ansi_light": ANSI_LIGHT, "ansi_dark": ANSI_DARK} + + +class SyntaxTheme(ABC): + """Base class for a syntax theme.""" + + @abstractmethod + def get_style_for_token(self, token_type: TokenType) -> Style: + """Get a style for a given Pygments token.""" + raise NotImplementedError # pragma: no cover + + @abstractmethod + def get_background_style(self) -> Style: + """Get the background color.""" + raise NotImplementedError # pragma: no cover + + +class PygmentsSyntaxTheme(SyntaxTheme): + """Syntax theme that delegates to Pygments theme.""" + + def __init__(self, theme: Union[str, Type[PygmentsStyle]]) -> None: + self._style_cache: Dict[TokenType, Style] = {} + if isinstance(theme, str): + try: + self._pygments_style_class = get_style_by_name(theme) + except ClassNotFound: + self._pygments_style_class = get_style_by_name("default") + else: + self._pygments_style_class = theme + + self._background_color = self._pygments_style_class.background_color + self._background_style = Style(bgcolor=self._background_color) + + def get_style_for_token(self, token_type: TokenType) -> Style: + """Get a style from a Pygments class.""" + try: + return self._style_cache[token_type] + except KeyError: + try: + pygments_style = self._pygments_style_class.style_for_token(token_type) + except KeyError: + style = Style.null() + else: + color = pygments_style["color"] + bgcolor = pygments_style["bgcolor"] + style = Style( + color="#" + color if color else "#000000", + bgcolor="#" + bgcolor if bgcolor else self._background_color, + bold=pygments_style["bold"], + italic=pygments_style["italic"], + underline=pygments_style["underline"], + ) + self._style_cache[token_type] = style + return style + + def get_background_style(self) -> Style: + return self._background_style + + +class ANSISyntaxTheme(SyntaxTheme): + """Syntax theme to use standard colors.""" + + def __init__(self, style_map: Dict[TokenType, Style]) -> None: + self.style_map = style_map + self._missing_style = Style.null() + self._background_style = Style.null() + self._style_cache: Dict[TokenType, Style] = {} + + def get_style_for_token(self, token_type: TokenType) -> Style: + """Look up style in the style map.""" + try: + return self._style_cache[token_type] + except KeyError: + # Styles form a hierarchy + # We need to go from most to least specific + # e.g. ("foo", "bar", "baz") to ("foo", "bar") to ("foo",) + get_style = self.style_map.get + token = tuple(token_type) + style = self._missing_style + while token: + _style = get_style(token) + if _style is not None: + style = _style + break + token = token[:-1] + self._style_cache[token_type] = style + return style + + def get_background_style(self) -> Style: + return self._background_style + + +class Syntax(JupyterMixin): + """Construct a Syntax object to render syntax highlighted code. + + Args: + code (str): Code to highlight. + lexer (Lexer | str): Lexer to use (see https://pygments.org/docs/lexers/) + theme (str, optional): Color theme, aka Pygments style (see https://pygments.org/docs/styles/#getting-a-list-of-available-styles). Defaults to "monokai". + dedent (bool, optional): Enable stripping of initial whitespace. Defaults to False. + line_numbers (bool, optional): Enable rendering of line numbers. Defaults to False. + start_line (int, optional): Starting number for line numbers. Defaults to 1. + line_range (Tuple[int, int], optional): If given should be a tuple of the start and end line to render. + highlight_lines (Set[int]): A set of line numbers to highlight. + code_width: Width of code to render (not including line numbers), or ``None`` to use all available width. + tab_size (int, optional): Size of tabs. Defaults to 4. + word_wrap (bool, optional): Enable word wrapping. + background_color (str, optional): Optional background color, or None to use theme color. Defaults to None. + indent_guides (bool, optional): Show indent guides. Defaults to False. + """ + + _pygments_style_class: Type[PygmentsStyle] + _theme: SyntaxTheme + + @classmethod + def get_theme(cls, name: Union[str, SyntaxTheme]) -> SyntaxTheme: + """Get a syntax theme instance.""" + if isinstance(name, SyntaxTheme): + return name + theme: SyntaxTheme + if name in RICH_SYNTAX_THEMES: + theme = ANSISyntaxTheme(RICH_SYNTAX_THEMES[name]) + else: + theme = PygmentsSyntaxTheme(name) + return theme + + def __init__( + self, + code: str, + lexer: Union[Lexer, str], + *, + theme: Union[str, SyntaxTheme] = DEFAULT_THEME, + dedent: bool = False, + line_numbers: bool = False, + start_line: int = 1, + line_range: Optional[Tuple[int, int]] = None, + highlight_lines: Optional[Set[int]] = None, + code_width: Optional[int] = None, + tab_size: int = 4, + word_wrap: bool = False, + background_color: Optional[str] = None, + indent_guides: bool = False, + ) -> None: + self.code = code + self._lexer = lexer + self.dedent = dedent + self.line_numbers = line_numbers + self.start_line = start_line + self.line_range = line_range + self.highlight_lines = highlight_lines or set() + self.code_width = code_width + self.tab_size = tab_size + self.word_wrap = word_wrap + self.background_color = background_color + self.background_style = ( + Style(bgcolor=background_color) if background_color else Style() + ) + self.indent_guides = indent_guides + + self._theme = self.get_theme(theme) + + @classmethod + def from_path( + cls, + path: str, + encoding: str = "utf-8", + theme: Union[str, SyntaxTheme] = DEFAULT_THEME, + dedent: bool = False, + line_numbers: bool = False, + line_range: Optional[Tuple[int, int]] = None, + start_line: int = 1, + highlight_lines: Optional[Set[int]] = None, + code_width: Optional[int] = None, + tab_size: int = 4, + word_wrap: bool = False, + background_color: Optional[str] = None, + indent_guides: bool = False, + ) -> "Syntax": + """Construct a Syntax object from a file. + + Args: + path (str): Path to file to highlight. + encoding (str): Encoding of file. + theme (str, optional): Color theme, aka Pygments style (see https://pygments.org/docs/styles/#getting-a-list-of-available-styles). Defaults to "emacs". + dedent (bool, optional): Enable stripping of initial whitespace. Defaults to True. + line_numbers (bool, optional): Enable rendering of line numbers. Defaults to False. + start_line (int, optional): Starting number for line numbers. Defaults to 1. + line_range (Tuple[int, int], optional): If given should be a tuple of the start and end line to render. + highlight_lines (Set[int]): A set of line numbers to highlight. + code_width: Width of code to render (not including line numbers), or ``None`` to use all available width. + tab_size (int, optional): Size of tabs. Defaults to 4. + word_wrap (bool, optional): Enable word wrapping of code. + background_color (str, optional): Optional background color, or None to use theme color. Defaults to None. + indent_guides (bool, optional): Show indent guides. Defaults to False. + + Returns: + [Syntax]: A Syntax object that may be printed to the console + """ + with open(path, "rt", encoding=encoding) as code_file: + code = code_file.read() + + lexer = None + lexer_name = "default" + try: + _, ext = os.path.splitext(path) + if ext: + extension = ext.lstrip(".").lower() + lexer = get_lexer_by_name(extension) + lexer_name = lexer.name + except ClassNotFound: + pass + + if lexer is None: + try: + lexer_name = guess_lexer_for_filename(path, code).name + except ClassNotFound: + pass + + return cls( + code, + lexer_name, + theme=theme, + dedent=dedent, + line_numbers=line_numbers, + line_range=line_range, + start_line=start_line, + highlight_lines=highlight_lines, + code_width=code_width, + tab_size=tab_size, + word_wrap=word_wrap, + background_color=background_color, + indent_guides=indent_guides, + ) + + def _get_base_style(self) -> Style: + """Get the base style.""" + default_style = self._theme.get_background_style() + self.background_style + return default_style + + def _get_token_color(self, token_type: TokenType) -> Optional[Color]: + """Get a color (if any) for the given token. + + Args: + token_type (TokenType): A token type tuple from Pygments. + + Returns: + Optional[Color]: Color from theme, or None for no color. + """ + style = self._theme.get_style_for_token(token_type) + return style.color + + @property + def lexer(self) -> Optional[Lexer]: + """The lexer for this syntax, or None if no lexer was found. + + Tries to find the lexer by name if a string was passed to the constructor. + """ + + if isinstance(self._lexer, Lexer): + return self._lexer + try: + return get_lexer_by_name( + self._lexer, + stripnl=False, + ensurenl=True, + tabsize=self.tab_size, + ) + except ClassNotFound: + return None + + def highlight( + self, code: str, line_range: Optional[Tuple[int, int]] = None + ) -> Text: + """Highlight code and return a Text instance. + + Args: + code (str): Code to highlight. + line_range(Tuple[int, int], optional): Optional line range to highlight. + + Returns: + Text: A text instance containing highlighted syntax. + """ + + base_style = self._get_base_style() + justify: JustifyMethod = ( + "default" if base_style.transparent_background else "left" + ) + + text = Text( + justify=justify, + style=base_style, + tab_size=self.tab_size, + no_wrap=not self.word_wrap, + ) + _get_theme_style = self._theme.get_style_for_token + + lexer = self.lexer + + if lexer is None: + text.append(code) + else: + if line_range: + # More complicated path to only stylize a portion of the code + # This speeds up further operations as there are less spans to process + line_start, line_end = line_range + + def line_tokenize() -> Iterable[Tuple[Any, str]]: + """Split tokens to one per line.""" + assert lexer + + for token_type, token in lexer.get_tokens(code): + while token: + line_token, new_line, token = token.partition("\n") + yield token_type, line_token + new_line + + def tokens_to_spans() -> Iterable[Tuple[str, Optional[Style]]]: + """Convert tokens to spans.""" + tokens = iter(line_tokenize()) + line_no = 0 + _line_start = line_start - 1 + + # Skip over tokens until line start + while line_no < _line_start: + _token_type, token = next(tokens) + yield (token, None) + if token.endswith("\n"): + line_no += 1 + # Generate spans until line end + for token_type, token in tokens: + yield (token, _get_theme_style(token_type)) + if token.endswith("\n"): + line_no += 1 + if line_no >= line_end: + break + + text.append_tokens(tokens_to_spans()) + + else: + text.append_tokens( + (token, _get_theme_style(token_type)) + for token_type, token in lexer.get_tokens(code) + ) + if self.background_color is not None: + text.stylize(f"on {self.background_color}") + return text + + def _get_line_numbers_color(self, blend: float = 0.3) -> Color: + background_style = self._theme.get_background_style() + self.background_style + background_color = background_style.bgcolor + if background_color is None or background_color.is_system_defined: + return Color.default() + foreground_color = self._get_token_color(Token.Text) + if foreground_color is None or foreground_color.is_system_defined: + return foreground_color or Color.default() + new_color = blend_rgb( + background_color.get_truecolor(), + foreground_color.get_truecolor(), + cross_fade=blend, + ) + return Color.from_triplet(new_color) + + @property + def _numbers_column_width(self) -> int: + """Get the number of characters used to render the numbers column.""" + column_width = 0 + if self.line_numbers: + column_width = len(str(self.start_line + self.code.count("\n"))) + 2 + return column_width + + def _get_number_styles(self, console: Console) -> Tuple[Style, Style, Style]: + """Get background, number, and highlight styles for line numbers.""" + background_style = self._get_base_style() + if background_style.transparent_background: + return Style.null(), Style(dim=True), Style.null() + if console.color_system in ("256", "truecolor"): + number_style = Style.chain( + background_style, + self._theme.get_style_for_token(Token.Text), + Style(color=self._get_line_numbers_color()), + self.background_style, + ) + highlight_number_style = Style.chain( + background_style, + self._theme.get_style_for_token(Token.Text), + Style(bold=True, color=self._get_line_numbers_color(0.9)), + self.background_style, + ) + else: + number_style = background_style + Style(dim=True) + highlight_number_style = background_style + Style(dim=False) + return background_style, number_style, highlight_number_style + + def __rich_measure__( + self, console: "Console", options: "ConsoleOptions" + ) -> "Measurement": + if self.code_width is not None: + width = self.code_width + self._numbers_column_width + return Measurement(self._numbers_column_width, width) + return Measurement(self._numbers_column_width, options.max_width) + + def __rich_console__( + self, console: Console, options: ConsoleOptions + ) -> RenderResult: + + transparent_background = self._get_base_style().transparent_background + code_width = ( + ( + (options.max_width - self._numbers_column_width - 1) + if self.line_numbers + else options.max_width + ) + if self.code_width is None + else self.code_width + ) + + line_offset = 0 + if self.line_range: + start_line, end_line = self.line_range + line_offset = max(0, start_line - 1) + + ends_on_nl = self.code.endswith("\n") + code = self.code if ends_on_nl else self.code + "\n" + code = textwrap.dedent(code) if self.dedent else code + code = code.expandtabs(self.tab_size) + text = self.highlight(code, self.line_range) + + ( + background_style, + number_style, + highlight_number_style, + ) = self._get_number_styles(console) + + if not self.line_numbers and not self.word_wrap and not self.line_range: + if not ends_on_nl: + text.remove_suffix("\n") + # Simple case of just rendering text + style = ( + self._get_base_style() + + self._theme.get_style_for_token(Comment) + + Style(dim=True) + + self.background_style + ) + if self.indent_guides and not options.ascii_only: + text = text.with_indent_guides(self.tab_size, style=style) + text.overflow = "crop" + if style.transparent_background: + yield from console.render( + text, options=options.update(width=code_width) + ) + else: + syntax_lines = console.render_lines( + text, + options.update(width=code_width, height=None), + style=self.background_style, + pad=True, + new_lines=True, + ) + for syntax_line in syntax_lines: + yield from syntax_line + return + + lines: Union[List[Text], Lines] = text.split("\n", allow_blank=ends_on_nl) + if self.line_range: + lines = lines[line_offset:end_line] + + if self.indent_guides and not options.ascii_only: + style = ( + self._get_base_style() + + self._theme.get_style_for_token(Comment) + + Style(dim=True) + + self.background_style + ) + lines = ( + Text("\n") + .join(lines) + .with_indent_guides(self.tab_size, style=style) + .split("\n", allow_blank=True) + ) + + numbers_column_width = self._numbers_column_width + render_options = options.update(width=code_width) + + highlight_line = self.highlight_lines.__contains__ + _Segment = Segment + padding = _Segment(" " * numbers_column_width + " ", background_style) + new_line = _Segment("\n") + + line_pointer = "> " if options.legacy_windows else "❱ " + + for line_no, line in enumerate(lines, self.start_line + line_offset): + if self.word_wrap: + wrapped_lines = console.render_lines( + line, + render_options.update(height=None), + style=background_style, + pad=not transparent_background, + ) + + else: + segments = list(line.render(console, end="")) + if options.no_wrap: + wrapped_lines = [segments] + else: + wrapped_lines = [ + _Segment.adjust_line_length( + segments, + render_options.max_width, + style=background_style, + pad=not transparent_background, + ) + ] + if self.line_numbers: + for first, wrapped_line in loop_first(wrapped_lines): + if first: + line_column = str(line_no).rjust(numbers_column_width - 2) + " " + if highlight_line(line_no): + yield _Segment(line_pointer, Style(color="red")) + yield _Segment(line_column, highlight_number_style) + else: + yield _Segment(" ", highlight_number_style) + yield _Segment(line_column, number_style) + else: + yield padding + yield from wrapped_line + yield new_line + else: + for wrapped_line in wrapped_lines: + yield from wrapped_line + yield new_line + + +if __name__ == "__main__": # pragma: no cover + + import argparse + import sys + + parser = argparse.ArgumentParser( + description="Render syntax to the console with Rich" + ) + parser.add_argument( + "path", + metavar="PATH", + help="path to file, or - for stdin", + ) + parser.add_argument( + "-c", + "--force-color", + dest="force_color", + action="store_true", + default=None, + help="force color for non-terminals", + ) + parser.add_argument( + "-i", + "--indent-guides", + dest="indent_guides", + action="store_true", + default=False, + help="display indent guides", + ) + parser.add_argument( + "-l", + "--line-numbers", + dest="line_numbers", + action="store_true", + help="render line numbers", + ) + parser.add_argument( + "-w", + "--width", + type=int, + dest="width", + default=None, + help="width of output (default will auto-detect)", + ) + parser.add_argument( + "-r", + "--wrap", + dest="word_wrap", + action="store_true", + default=False, + help="word wrap long lines", + ) + parser.add_argument( + "-s", + "--soft-wrap", + action="store_true", + dest="soft_wrap", + default=False, + help="enable soft wrapping mode", + ) + parser.add_argument( + "-t", "--theme", dest="theme", default="monokai", help="pygments theme" + ) + parser.add_argument( + "-b", + "--background-color", + dest="background_color", + default=None, + help="Override background color", + ) + parser.add_argument( + "-x", + "--lexer", + default="default", + dest="lexer_name", + help="Lexer name", + ) + args = parser.parse_args() + + from pipenv.patched.notpip._vendor.rich.console import Console + + console = Console(force_terminal=args.force_color, width=args.width) + + if args.path == "-": + code = sys.stdin.read() + syntax = Syntax( + code=code, + lexer=args.lexer_name, + line_numbers=args.line_numbers, + word_wrap=args.word_wrap, + theme=args.theme, + background_color=args.background_color, + indent_guides=args.indent_guides, + ) + else: + syntax = Syntax.from_path( + args.path, + line_numbers=args.line_numbers, + word_wrap=args.word_wrap, + theme=args.theme, + background_color=args.background_color, + indent_guides=args.indent_guides, + ) + console.print(syntax, soft_wrap=args.soft_wrap) diff --git a/pipenv/patched/notpip/_vendor/rich/table.py b/pipenv/patched/notpip/_vendor/rich/table.py new file mode 100644 index 0000000000..4f6063b424 --- /dev/null +++ b/pipenv/patched/notpip/_vendor/rich/table.py @@ -0,0 +1,968 @@ +from dataclasses import dataclass, field, replace +from typing import ( + TYPE_CHECKING, + Dict, + Iterable, + List, + NamedTuple, + Optional, + Sequence, + Tuple, + Union, +) + +from . import box, errors +from ._loop import loop_first_last, loop_last +from ._pick import pick_bool +from ._ratio import ratio_distribute, ratio_reduce +from .align import VerticalAlignMethod +from .jupyter import JupyterMixin +from .measure import Measurement +from .padding import Padding, PaddingDimensions +from .protocol import is_renderable +from .segment import Segment +from .style import Style, StyleType +from .text import Text, TextType + +if TYPE_CHECKING: + from .console import ( + Console, + ConsoleOptions, + JustifyMethod, + OverflowMethod, + RenderableType, + RenderResult, + ) + + +@dataclass +class Column: + """Defines a column in a table.""" + + header: "RenderableType" = "" + """RenderableType: Renderable for the header (typically a string)""" + + footer: "RenderableType" = "" + """RenderableType: Renderable for the footer (typically a string)""" + + header_style: StyleType = "" + """StyleType: The style of the header.""" + + footer_style: StyleType = "" + """StyleType: The style of the footer.""" + + style: StyleType = "" + """StyleType: The style of the column.""" + + justify: "JustifyMethod" = "left" + """str: How to justify text within the column ("left", "center", "right", or "full")""" + + vertical: "VerticalAlignMethod" = "top" + """str: How to vertically align content ("top", "middle", or "bottom")""" + + overflow: "OverflowMethod" = "ellipsis" + """str: Overflow method.""" + + width: Optional[int] = None + """Optional[int]: Width of the column, or ``None`` (default) to auto calculate width.""" + + min_width: Optional[int] = None + """Optional[int]: Minimum width of column, or ``None`` for no minimum. Defaults to None.""" + + max_width: Optional[int] = None + """Optional[int]: Maximum width of column, or ``None`` for no maximum. Defaults to None.""" + + ratio: Optional[int] = None + """Optional[int]: Ratio to use when calculating column width, or ``None`` (default) to adapt to column contents.""" + + no_wrap: bool = False + """bool: Prevent wrapping of text within the column. Defaults to ``False``.""" + + _index: int = 0 + """Index of column.""" + + _cells: List["RenderableType"] = field(default_factory=list) + + def copy(self) -> "Column": + """Return a copy of this Column.""" + return replace(self, _cells=[]) + + @property + def cells(self) -> Iterable["RenderableType"]: + """Get all cells in the column, not including header.""" + yield from self._cells + + @property + def flexible(self) -> bool: + """Check if this column is flexible.""" + return self.ratio is not None + + +@dataclass +class Row: + """Information regarding a row.""" + + style: Optional[StyleType] = None + """Style to apply to row.""" + + end_section: bool = False + """Indicated end of section, which will force a line beneath the row.""" + + +class _Cell(NamedTuple): + """A single cell in a table.""" + + style: StyleType + """Style to apply to cell.""" + renderable: "RenderableType" + """Cell renderable.""" + vertical: VerticalAlignMethod + """Cell vertical alignment.""" + + +class Table(JupyterMixin): + """A console renderable to draw a table. + + Args: + *headers (Union[Column, str]): Column headers, either as a string, or :class:`~rich.table.Column` instance. + title (Union[str, Text], optional): The title of the table rendered at the top. Defaults to None. + caption (Union[str, Text], optional): The table caption rendered below. Defaults to None. + width (int, optional): The width in characters of the table, or ``None`` to automatically fit. Defaults to None. + min_width (Optional[int], optional): The minimum width of the table, or ``None`` for no minimum. Defaults to None. + box (box.Box, optional): One of the constants in box.py used to draw the edges (see :ref:`appendix_box`), or ``None`` for no box lines. Defaults to box.HEAVY_HEAD. + safe_box (Optional[bool], optional): Disable box characters that don't display on windows legacy terminal with *raster* fonts. Defaults to True. + padding (PaddingDimensions, optional): Padding for cells (top, right, bottom, left). Defaults to (0, 1). + collapse_padding (bool, optional): Enable collapsing of padding around cells. Defaults to False. + pad_edge (bool, optional): Enable padding of edge cells. Defaults to True. + expand (bool, optional): Expand the table to fit the available space if ``True``, otherwise the table width will be auto-calculated. Defaults to False. + show_header (bool, optional): Show a header row. Defaults to True. + show_footer (bool, optional): Show a footer row. Defaults to False. + show_edge (bool, optional): Draw a box around the outside of the table. Defaults to True. + show_lines (bool, optional): Draw lines between every row. Defaults to False. + leading (bool, optional): Number of blank lines between rows (precludes ``show_lines``). Defaults to 0. + style (Union[str, Style], optional): Default style for the table. Defaults to "none". + row_styles (List[Union, str], optional): Optional list of row styles, if more than one style is given then the styles will alternate. Defaults to None. + header_style (Union[str, Style], optional): Style of the header. Defaults to "table.header". + footer_style (Union[str, Style], optional): Style of the footer. Defaults to "table.footer". + border_style (Union[str, Style], optional): Style of the border. Defaults to None. + title_style (Union[str, Style], optional): Style of the title. Defaults to None. + caption_style (Union[str, Style], optional): Style of the caption. Defaults to None. + title_justify (str, optional): Justify method for title. Defaults to "center". + caption_justify (str, optional): Justify method for caption. Defaults to "center". + highlight (bool, optional): Highlight cell contents (if str). Defaults to False. + """ + + columns: List[Column] + rows: List[Row] + + def __init__( + self, + *headers: Union[Column, str], + title: Optional[TextType] = None, + caption: Optional[TextType] = None, + width: Optional[int] = None, + min_width: Optional[int] = None, + box: Optional[box.Box] = box.HEAVY_HEAD, + safe_box: Optional[bool] = None, + padding: PaddingDimensions = (0, 1), + collapse_padding: bool = False, + pad_edge: bool = True, + expand: bool = False, + show_header: bool = True, + show_footer: bool = False, + show_edge: bool = True, + show_lines: bool = False, + leading: int = 0, + style: StyleType = "none", + row_styles: Optional[Iterable[StyleType]] = None, + header_style: Optional[StyleType] = "table.header", + footer_style: Optional[StyleType] = "table.footer", + border_style: Optional[StyleType] = None, + title_style: Optional[StyleType] = None, + caption_style: Optional[StyleType] = None, + title_justify: "JustifyMethod" = "center", + caption_justify: "JustifyMethod" = "center", + highlight: bool = False, + ) -> None: + + self.columns: List[Column] = [] + self.rows: List[Row] = [] + self.title = title + self.caption = caption + self.width = width + self.min_width = min_width + self.box = box + self.safe_box = safe_box + self._padding = Padding.unpack(padding) + self.pad_edge = pad_edge + self._expand = expand + self.show_header = show_header + self.show_footer = show_footer + self.show_edge = show_edge + self.show_lines = show_lines + self.leading = leading + self.collapse_padding = collapse_padding + self.style = style + self.header_style = header_style or "" + self.footer_style = footer_style or "" + self.border_style = border_style + self.title_style = title_style + self.caption_style = caption_style + self.title_justify: "JustifyMethod" = title_justify + self.caption_justify: "JustifyMethod" = caption_justify + self.highlight = highlight + self.row_styles: Sequence[StyleType] = list(row_styles or []) + append_column = self.columns.append + for header in headers: + if isinstance(header, str): + self.add_column(header=header) + else: + header._index = len(self.columns) + append_column(header) + + @classmethod + def grid( + cls, + *headers: Union[Column, str], + padding: PaddingDimensions = 0, + collapse_padding: bool = True, + pad_edge: bool = False, + expand: bool = False, + ) -> "Table": + """Get a table with no lines, headers, or footer. + + Args: + *headers (Union[Column, str]): Column headers, either as a string, or :class:`~rich.table.Column` instance. + padding (PaddingDimensions, optional): Get padding around cells. Defaults to 0. + collapse_padding (bool, optional): Enable collapsing of padding around cells. Defaults to True. + pad_edge (bool, optional): Enable padding around edges of table. Defaults to False. + expand (bool, optional): Expand the table to fit the available space if ``True``, otherwise the table width will be auto-calculated. Defaults to False. + + Returns: + Table: A table instance. + """ + return cls( + *headers, + box=None, + padding=padding, + collapse_padding=collapse_padding, + show_header=False, + show_footer=False, + show_edge=False, + pad_edge=pad_edge, + expand=expand, + ) + + @property + def expand(self) -> bool: + """Setting a non-None self.width implies expand.""" + return self._expand or self.width is not None + + @expand.setter + def expand(self, expand: bool) -> None: + """Set expand.""" + self._expand = expand + + @property + def _extra_width(self) -> int: + """Get extra width to add to cell content.""" + width = 0 + if self.box and self.show_edge: + width += 2 + if self.box: + width += len(self.columns) - 1 + return width + + @property + def row_count(self) -> int: + """Get the current number of rows.""" + return len(self.rows) + + def get_row_style(self, console: "Console", index: int) -> StyleType: + """Get the current row style.""" + style = Style.null() + if self.row_styles: + style += console.get_style(self.row_styles[index % len(self.row_styles)]) + row_style = self.rows[index].style + if row_style is not None: + style += console.get_style(row_style) + return style + + def __rich_measure__( + self, console: "Console", options: "ConsoleOptions" + ) -> Measurement: + max_width = options.max_width + if self.width is not None: + max_width = self.width + if max_width < 0: + return Measurement(0, 0) + + extra_width = self._extra_width + max_width = sum( + self._calculate_column_widths( + console, options.update_width(max_width - extra_width) + ) + ) + _measure_column = self._measure_column + + measurements = [ + _measure_column(console, options.update_width(max_width), column) + for column in self.columns + ] + minimum_width = ( + sum(measurement.minimum for measurement in measurements) + extra_width + ) + maximum_width = ( + sum(measurement.maximum for measurement in measurements) + extra_width + if (self.width is None) + else self.width + ) + measurement = Measurement(minimum_width, maximum_width) + measurement = measurement.clamp(self.min_width) + return measurement + + @property + def padding(self) -> Tuple[int, int, int, int]: + """Get cell padding.""" + return self._padding + + @padding.setter + def padding(self, padding: PaddingDimensions) -> "Table": + """Set cell padding.""" + self._padding = Padding.unpack(padding) + return self + + def add_column( + self, + header: "RenderableType" = "", + footer: "RenderableType" = "", + *, + header_style: Optional[StyleType] = None, + footer_style: Optional[StyleType] = None, + style: Optional[StyleType] = None, + justify: "JustifyMethod" = "left", + vertical: "VerticalAlignMethod" = "top", + overflow: "OverflowMethod" = "ellipsis", + width: Optional[int] = None, + min_width: Optional[int] = None, + max_width: Optional[int] = None, + ratio: Optional[int] = None, + no_wrap: bool = False, + ) -> None: + """Add a column to the table. + + Args: + header (RenderableType, optional): Text or renderable for the header. + Defaults to "". + footer (RenderableType, optional): Text or renderable for the footer. + Defaults to "". + header_style (Union[str, Style], optional): Style for the header, or None for default. Defaults to None. + footer_style (Union[str, Style], optional): Style for the footer, or None for default. Defaults to None. + style (Union[str, Style], optional): Style for the column cells, or None for default. Defaults to None. + justify (JustifyMethod, optional): Alignment for cells. Defaults to "left". + vertical (VerticalAlignMethod, optional): Vertical alignment, one of "top", "middle", or "bottom". Defaults to "top". + overflow (OverflowMethod): Overflow method: "crop", "fold", "ellipsis". Defaults to "ellipsis". + width (int, optional): Desired width of column in characters, or None to fit to contents. Defaults to None. + min_width (Optional[int], optional): Minimum width of column, or ``None`` for no minimum. Defaults to None. + max_width (Optional[int], optional): Maximum width of column, or ``None`` for no maximum. Defaults to None. + ratio (int, optional): Flexible ratio for the column (requires ``Table.expand`` or ``Table.width``). Defaults to None. + no_wrap (bool, optional): Set to ``True`` to disable wrapping of this column. + """ + + column = Column( + _index=len(self.columns), + header=header, + footer=footer, + header_style=header_style or "", + footer_style=footer_style or "", + style=style or "", + justify=justify, + vertical=vertical, + overflow=overflow, + width=width, + min_width=min_width, + max_width=max_width, + ratio=ratio, + no_wrap=no_wrap, + ) + self.columns.append(column) + + def add_row( + self, + *renderables: Optional["RenderableType"], + style: Optional[StyleType] = None, + end_section: bool = False, + ) -> None: + """Add a row of renderables. + + Args: + *renderables (None or renderable): Each cell in a row must be a renderable object (including str), + or ``None`` for a blank cell. + style (StyleType, optional): An optional style to apply to the entire row. Defaults to None. + end_section (bool, optional): End a section and draw a line. Defaults to False. + + Raises: + errors.NotRenderableError: If you add something that can't be rendered. + """ + + def add_cell(column: Column, renderable: "RenderableType") -> None: + column._cells.append(renderable) + + cell_renderables: List[Optional["RenderableType"]] = list(renderables) + + columns = self.columns + if len(cell_renderables) < len(columns): + cell_renderables = [ + *cell_renderables, + *[None] * (len(columns) - len(cell_renderables)), + ] + for index, renderable in enumerate(cell_renderables): + if index == len(columns): + column = Column(_index=index) + for _ in self.rows: + add_cell(column, Text("")) + self.columns.append(column) + else: + column = columns[index] + if renderable is None: + add_cell(column, "") + elif is_renderable(renderable): + add_cell(column, renderable) + else: + raise errors.NotRenderableError( + f"unable to render {type(renderable).__name__}; a string or other renderable object is required" + ) + self.rows.append(Row(style=style, end_section=end_section)) + + def __rich_console__( + self, console: "Console", options: "ConsoleOptions" + ) -> "RenderResult": + + if not self.columns: + yield Segment("\n") + return + + max_width = options.max_width + if self.width is not None: + max_width = self.width + + extra_width = self._extra_width + widths = self._calculate_column_widths( + console, options.update_width(max_width - extra_width) + ) + table_width = sum(widths) + extra_width + + render_options = options.update( + width=table_width, highlight=self.highlight, height=None + ) + + def render_annotation( + text: TextType, style: StyleType, justify: "JustifyMethod" = "center" + ) -> "RenderResult": + render_text = ( + console.render_str(text, style=style, highlight=False) + if isinstance(text, str) + else text + ) + return console.render( + render_text, options=render_options.update(justify=justify) + ) + + if self.title: + yield from render_annotation( + self.title, + style=Style.pick_first(self.title_style, "table.title"), + justify=self.title_justify, + ) + yield from self._render(console, render_options, widths) + if self.caption: + yield from render_annotation( + self.caption, + style=Style.pick_first(self.caption_style, "table.caption"), + justify=self.caption_justify, + ) + + def _calculate_column_widths( + self, console: "Console", options: "ConsoleOptions" + ) -> List[int]: + """Calculate the widths of each column, including padding, not including borders.""" + max_width = options.max_width + columns = self.columns + width_ranges = [ + self._measure_column(console, options, column) for column in columns + ] + widths = [_range.maximum or 1 for _range in width_ranges] + get_padding_width = self._get_padding_width + extra_width = self._extra_width + if self.expand: + ratios = [col.ratio or 0 for col in columns if col.flexible] + if any(ratios): + fixed_widths = [ + 0 if column.flexible else _range.maximum + for _range, column in zip(width_ranges, columns) + ] + flex_minimum = [ + (column.width or 1) + get_padding_width(column._index) + for column in columns + if column.flexible + ] + flexible_width = max_width - sum(fixed_widths) + flex_widths = ratio_distribute(flexible_width, ratios, flex_minimum) + iter_flex_widths = iter(flex_widths) + for index, column in enumerate(columns): + if column.flexible: + widths[index] = fixed_widths[index] + next(iter_flex_widths) + table_width = sum(widths) + + if table_width > max_width: + widths = self._collapse_widths( + widths, + [(column.width is None and not column.no_wrap) for column in columns], + max_width, + ) + table_width = sum(widths) + # last resort, reduce columns evenly + if table_width > max_width: + excess_width = table_width - max_width + widths = ratio_reduce(excess_width, [1] * len(widths), widths, widths) + table_width = sum(widths) + + width_ranges = [ + self._measure_column(console, options.update_width(width), column) + for width, column in zip(widths, columns) + ] + widths = [_range.maximum or 0 for _range in width_ranges] + + if (table_width < max_width and self.expand) or ( + self.min_width is not None and table_width < (self.min_width - extra_width) + ): + _max_width = ( + max_width + if self.min_width is None + else min(self.min_width - extra_width, max_width) + ) + pad_widths = ratio_distribute(_max_width - table_width, widths) + widths = [_width + pad for _width, pad in zip(widths, pad_widths)] + + return widths + + @classmethod + def _collapse_widths( + cls, widths: List[int], wrapable: List[bool], max_width: int + ) -> List[int]: + """Reduce widths so that the total is under max_width. + + Args: + widths (List[int]): List of widths. + wrapable (List[bool]): List of booleans that indicate if a column may shrink. + max_width (int): Maximum width to reduce to. + + Returns: + List[int]: A new list of widths. + """ + total_width = sum(widths) + excess_width = total_width - max_width + if any(wrapable): + while total_width and excess_width > 0: + max_column = max( + width for width, allow_wrap in zip(widths, wrapable) if allow_wrap + ) + second_max_column = max( + width if allow_wrap and width != max_column else 0 + for width, allow_wrap in zip(widths, wrapable) + ) + column_difference = max_column - second_max_column + ratios = [ + (1 if (width == max_column and allow_wrap) else 0) + for width, allow_wrap in zip(widths, wrapable) + ] + if not any(ratios) or not column_difference: + break + max_reduce = [min(excess_width, column_difference)] * len(widths) + widths = ratio_reduce(excess_width, ratios, max_reduce, widths) + + total_width = sum(widths) + excess_width = total_width - max_width + return widths + + def _get_cells( + self, console: "Console", column_index: int, column: Column + ) -> Iterable[_Cell]: + """Get all the cells with padding and optional header.""" + + collapse_padding = self.collapse_padding + pad_edge = self.pad_edge + padding = self.padding + any_padding = any(padding) + + first_column = column_index == 0 + last_column = column_index == len(self.columns) - 1 + + _padding_cache: Dict[Tuple[bool, bool], Tuple[int, int, int, int]] = {} + + def get_padding(first_row: bool, last_row: bool) -> Tuple[int, int, int, int]: + cached = _padding_cache.get((first_row, last_row)) + if cached: + return cached + top, right, bottom, left = padding + + if collapse_padding: + if not first_column: + left = max(0, left - right) + if not last_row: + bottom = max(0, top - bottom) + + if not pad_edge: + if first_column: + left = 0 + if last_column: + right = 0 + if first_row: + top = 0 + if last_row: + bottom = 0 + _padding = (top, right, bottom, left) + _padding_cache[(first_row, last_row)] = _padding + return _padding + + raw_cells: List[Tuple[StyleType, "RenderableType"]] = [] + _append = raw_cells.append + get_style = console.get_style + if self.show_header: + header_style = get_style(self.header_style or "") + get_style( + column.header_style + ) + _append((header_style, column.header)) + cell_style = get_style(column.style or "") + for cell in column.cells: + _append((cell_style, cell)) + if self.show_footer: + footer_style = get_style(self.footer_style or "") + get_style( + column.footer_style + ) + _append((footer_style, column.footer)) + + if any_padding: + _Padding = Padding + for first, last, (style, renderable) in loop_first_last(raw_cells): + yield _Cell( + style, + _Padding(renderable, get_padding(first, last)), + getattr(renderable, "vertical", None) or column.vertical, + ) + else: + for (style, renderable) in raw_cells: + yield _Cell( + style, + renderable, + getattr(renderable, "vertical", None) or column.vertical, + ) + + def _get_padding_width(self, column_index: int) -> int: + """Get extra width from padding.""" + _, pad_right, _, pad_left = self.padding + if self.collapse_padding: + if column_index > 0: + pad_left = max(0, pad_left - pad_right) + return pad_left + pad_right + + def _measure_column( + self, + console: "Console", + options: "ConsoleOptions", + column: Column, + ) -> Measurement: + """Get the minimum and maximum width of the column.""" + + max_width = options.max_width + if max_width < 1: + return Measurement(0, 0) + + padding_width = self._get_padding_width(column._index) + + if column.width is not None: + # Fixed width column + return Measurement( + column.width + padding_width, column.width + padding_width + ).with_maximum(max_width) + # Flexible column, we need to measure contents + min_widths: List[int] = [] + max_widths: List[int] = [] + append_min = min_widths.append + append_max = max_widths.append + get_render_width = Measurement.get + for cell in self._get_cells(console, column._index, column): + _min, _max = get_render_width(console, options, cell.renderable) + append_min(_min) + append_max(_max) + + measurement = Measurement( + max(min_widths) if min_widths else 1, + max(max_widths) if max_widths else max_width, + ).with_maximum(max_width) + measurement = measurement.clamp( + None if column.min_width is None else column.min_width + padding_width, + None if column.max_width is None else column.max_width + padding_width, + ) + return measurement + + def _render( + self, console: "Console", options: "ConsoleOptions", widths: List[int] + ) -> "RenderResult": + table_style = console.get_style(self.style or "") + + border_style = table_style + console.get_style(self.border_style or "") + _column_cells = ( + self._get_cells(console, column_index, column) + for column_index, column in enumerate(self.columns) + ) + row_cells: List[Tuple[_Cell, ...]] = list(zip(*_column_cells)) + _box = ( + self.box.substitute( + options, safe=pick_bool(self.safe_box, console.safe_box) + ) + if self.box + else None + ) + + # _box = self.box + new_line = Segment.line() + + columns = self.columns + show_header = self.show_header + show_footer = self.show_footer + show_edge = self.show_edge + show_lines = self.show_lines + leading = self.leading + + _Segment = Segment + if _box: + box_segments = [ + ( + _Segment(_box.head_left, border_style), + _Segment(_box.head_right, border_style), + _Segment(_box.head_vertical, border_style), + ), + ( + _Segment(_box.foot_left, border_style), + _Segment(_box.foot_right, border_style), + _Segment(_box.foot_vertical, border_style), + ), + ( + _Segment(_box.mid_left, border_style), + _Segment(_box.mid_right, border_style), + _Segment(_box.mid_vertical, border_style), + ), + ] + if show_edge: + yield _Segment(_box.get_top(widths), border_style) + yield new_line + else: + box_segments = [] + + get_row_style = self.get_row_style + get_style = console.get_style + + for index, (first, last, row_cell) in enumerate(loop_first_last(row_cells)): + header_row = first and show_header + footer_row = last and show_footer + row = ( + self.rows[index - show_header] + if (not header_row and not footer_row) + else None + ) + max_height = 1 + cells: List[List[List[Segment]]] = [] + if header_row or footer_row: + row_style = Style.null() + else: + row_style = get_style( + get_row_style(console, index - 1 if show_header else index) + ) + for width, cell, column in zip(widths, row_cell, columns): + render_options = options.update( + width=width, + justify=column.justify, + no_wrap=column.no_wrap, + overflow=column.overflow, + height=None, + ) + lines = console.render_lines( + cell.renderable, + render_options, + style=get_style(cell.style) + row_style, + ) + max_height = max(max_height, len(lines)) + cells.append(lines) + + row_height = max(len(cell) for cell in cells) + + def align_cell( + cell: List[List[Segment]], + vertical: "VerticalAlignMethod", + width: int, + style: Style, + ) -> List[List[Segment]]: + if header_row: + vertical = "bottom" + elif footer_row: + vertical = "top" + + if vertical == "top": + return _Segment.align_top(cell, width, row_height, style) + elif vertical == "middle": + return _Segment.align_middle(cell, width, row_height, style) + return _Segment.align_bottom(cell, width, row_height, style) + + cells[:] = [ + _Segment.set_shape( + align_cell( + cell, + _cell.vertical, + width, + get_style(_cell.style) + row_style, + ), + width, + max_height, + ) + for width, _cell, cell, column in zip(widths, row_cell, cells, columns) + ] + + if _box: + if last and show_footer: + yield _Segment( + _box.get_row(widths, "foot", edge=show_edge), border_style + ) + yield new_line + left, right, _divider = box_segments[0 if first else (2 if last else 1)] + + # If the column divider is whitespace also style it with the row background + divider = ( + _divider + if _divider.text.strip() + else _Segment( + _divider.text, row_style.background_style + _divider.style + ) + ) + for line_no in range(max_height): + if show_edge: + yield left + for last_cell, rendered_cell in loop_last(cells): + yield from rendered_cell[line_no] + if not last_cell: + yield divider + if show_edge: + yield right + yield new_line + else: + for line_no in range(max_height): + for rendered_cell in cells: + yield from rendered_cell[line_no] + yield new_line + if _box and first and show_header: + yield _Segment( + _box.get_row(widths, "head", edge=show_edge), border_style + ) + yield new_line + end_section = row and row.end_section + if _box and (show_lines or leading or end_section): + if ( + not last + and not (show_footer and index >= len(row_cells) - 2) + and not (show_header and header_row) + ): + if leading: + yield _Segment( + _box.get_row(widths, "mid", edge=show_edge) * leading, + border_style, + ) + else: + yield _Segment( + _box.get_row(widths, "row", edge=show_edge), border_style + ) + yield new_line + + if _box and show_edge: + yield _Segment(_box.get_bottom(widths), border_style) + yield new_line + + +if __name__ == "__main__": # pragma: no cover + from pipenv.patched.notpip._vendor.rich.console import Console + from pipenv.patched.notpip._vendor.rich.highlighter import ReprHighlighter + from pipenv.patched.notpip._vendor.rich.table import Table as Table + + from ._timer import timer + + with timer("Table render"): + table = Table( + title="Star Wars Movies", + caption="Rich example table", + caption_justify="right", + ) + + table.add_column( + "Released", header_style="bright_cyan", style="cyan", no_wrap=True + ) + table.add_column("Title", style="magenta") + table.add_column("Box Office", justify="right", style="green") + + table.add_row( + "Dec 20, 2019", + "Star Wars: The Rise of Skywalker", + "$952,110,690", + ) + table.add_row("May 25, 2018", "Solo: A Star Wars Story", "$393,151,347") + table.add_row( + "Dec 15, 2017", + "Star Wars Ep. V111: The Last Jedi", + "$1,332,539,889", + style="on black", + end_section=True, + ) + table.add_row( + "Dec 16, 2016", + "Rogue One: A Star Wars Story", + "$1,332,439,889", + ) + + def header(text: str) -> None: + console.print() + console.rule(highlight(text)) + console.print() + + console = Console() + highlight = ReprHighlighter() + header("Example Table") + console.print(table, justify="center") + + table.expand = True + header("expand=True") + console.print(table) + + table.width = 50 + header("width=50") + + console.print(table, justify="center") + + table.width = None + table.expand = False + table.row_styles = ["dim", "none"] + header("row_styles=['dim', 'none']") + + console.print(table, justify="center") + + table.width = None + table.expand = False + table.row_styles = ["dim", "none"] + table.leading = 1 + header("leading=1, row_styles=['dim', 'none']") + console.print(table, justify="center") + + table.width = None + table.expand = False + table.row_styles = ["dim", "none"] + table.show_lines = True + table.leading = 0 + header("show_lines=True, row_styles=['dim', 'none']") + console.print(table, justify="center") diff --git a/pipenv/patched/notpip/_vendor/rich/tabulate.py b/pipenv/patched/notpip/_vendor/rich/tabulate.py new file mode 100644 index 0000000000..9384ec4301 --- /dev/null +++ b/pipenv/patched/notpip/_vendor/rich/tabulate.py @@ -0,0 +1,51 @@ +from collections.abc import Mapping +from typing import Any, Optional +import warnings + +from pipenv.patched.notpip._vendor.rich.console import JustifyMethod + +from . import box +from .highlighter import ReprHighlighter +from .pretty import Pretty +from .table import Table + + +def tabulate_mapping( + mapping: "Mapping[Any, Any]", + title: Optional[str] = None, + caption: Optional[str] = None, + title_justify: Optional[JustifyMethod] = None, + caption_justify: Optional[JustifyMethod] = None, +) -> Table: + """Generate a simple table from a mapping. + + Args: + mapping (Mapping): A mapping object (e.g. a dict); + title (str, optional): Optional title to be displayed over the table. + caption (str, optional): Optional caption to be displayed below the table. + title_justify (str, optional): Justify method for title. Defaults to None. + caption_justify (str, optional): Justify method for caption. Defaults to None. + + Returns: + Table: A table instance which may be rendered by the Console. + """ + warnings.warn("tabulate_mapping will be deprecated in Rich v11", DeprecationWarning) + table = Table( + show_header=False, + title=title, + caption=caption, + box=box.ROUNDED, + border_style="blue", + ) + table.title = title + table.caption = caption + if title_justify is not None: + table.title_justify = title_justify + if caption_justify is not None: + table.caption_justify = caption_justify + highlighter = ReprHighlighter() + for key, value in mapping.items(): + table.add_row( + Pretty(key, highlighter=highlighter), Pretty(value, highlighter=highlighter) + ) + return table diff --git a/pipenv/patched/notpip/_vendor/rich/terminal_theme.py b/pipenv/patched/notpip/_vendor/rich/terminal_theme.py new file mode 100644 index 0000000000..801ac0b7b8 --- /dev/null +++ b/pipenv/patched/notpip/_vendor/rich/terminal_theme.py @@ -0,0 +1,55 @@ +from typing import List, Optional, Tuple + +from .color_triplet import ColorTriplet +from .palette import Palette + +_ColorTuple = Tuple[int, int, int] + + +class TerminalTheme: + """A color theme used when exporting console content. + + Args: + background (Tuple[int, int, int]): The background color. + foreground (Tuple[int, int, int]): The foreground (text) color. + normal (List[Tuple[int, int, int]]): A list of 8 normal intensity colors. + bright (List[Tuple[int, int, int]], optional): A list of 8 bright colors, or None + to repeat normal intensity. Defaults to None. + """ + + def __init__( + self, + background: _ColorTuple, + foreground: _ColorTuple, + normal: List[_ColorTuple], + bright: Optional[List[_ColorTuple]] = None, + ) -> None: + self.background_color = ColorTriplet(*background) + self.foreground_color = ColorTriplet(*foreground) + self.ansi_colors = Palette(normal + (bright or normal)) + + +DEFAULT_TERMINAL_THEME = TerminalTheme( + (255, 255, 255), + (0, 0, 0), + [ + (0, 0, 0), + (128, 0, 0), + (0, 128, 0), + (128, 128, 0), + (0, 0, 128), + (128, 0, 128), + (0, 128, 128), + (192, 192, 192), + ], + [ + (128, 128, 128), + (255, 0, 0), + (0, 255, 0), + (255, 255, 0), + (0, 0, 255), + (255, 0, 255), + (0, 255, 255), + (255, 255, 255), + ], +) diff --git a/pipenv/patched/notpip/_vendor/rich/text.py b/pipenv/patched/notpip/_vendor/rich/text.py new file mode 100644 index 0000000000..c8b3ded50d --- /dev/null +++ b/pipenv/patched/notpip/_vendor/rich/text.py @@ -0,0 +1,1282 @@ +import re +from functools import partial, reduce +from math import gcd +from operator import itemgetter +from pipenv.patched.notpip._vendor.rich.emoji import EmojiVariant +from typing import ( + TYPE_CHECKING, + Any, + Callable, + Dict, + Iterable, + List, + NamedTuple, + Optional, + Tuple, + Union, +) + +from ._loop import loop_last +from ._pick import pick_bool +from ._wrap import divide_line +from .align import AlignMethod +from .cells import cell_len, set_cell_size +from .containers import Lines +from .control import strip_control_codes +from .emoji import EmojiVariant +from .jupyter import JupyterMixin +from .measure import Measurement +from .segment import Segment +from .style import Style, StyleType + +if TYPE_CHECKING: # pragma: no cover + from .console import Console, ConsoleOptions, JustifyMethod, OverflowMethod + +DEFAULT_JUSTIFY: "JustifyMethod" = "default" +DEFAULT_OVERFLOW: "OverflowMethod" = "fold" + + +_re_whitespace = re.compile(r"\s+$") + +TextType = Union[str, "Text"] + +GetStyleCallable = Callable[[str], Optional[StyleType]] + + +class Span(NamedTuple): + """A marked up region in some text.""" + + start: int + """Span start index.""" + end: int + """Span end index.""" + style: Union[str, Style] + """Style associated with the span.""" + + def __repr__(self) -> str: + return ( + f"Span({self.start}, {self.end}, {self.style!r})" + if (isinstance(self.style, Style) and self.style._meta) + else f"Span({self.start}, {self.end}, {repr(self.style)})" + ) + + def __bool__(self) -> bool: + return self.end > self.start + + def split(self, offset: int) -> Tuple["Span", Optional["Span"]]: + """Split a span in to 2 from a given offset.""" + + if offset < self.start: + return self, None + if offset >= self.end: + return self, None + + start, end, style = self + span1 = Span(start, min(end, offset), style) + span2 = Span(span1.end, end, style) + return span1, span2 + + def move(self, offset: int) -> "Span": + """Move start and end by a given offset. + + Args: + offset (int): Number of characters to add to start and end. + + Returns: + TextSpan: A new TextSpan with adjusted position. + """ + start, end, style = self + return Span(start + offset, end + offset, style) + + def right_crop(self, offset: int) -> "Span": + """Crop the span at the given offset. + + Args: + offset (int): A value between start and end. + + Returns: + Span: A new (possibly smaller) span. + """ + start, end, style = self + if offset >= end: + return self + return Span(start, min(offset, end), style) + + +class Text(JupyterMixin): + """Text with color / style. + + Args: + text (str, optional): Default unstyled text. Defaults to "". + style (Union[str, Style], optional): Base style for text. Defaults to "". + justify (str, optional): Justify method: "left", "center", "full", "right". Defaults to None. + overflow (str, optional): Overflow method: "crop", "fold", "ellipsis". Defaults to None. + no_wrap (bool, optional): Disable text wrapping, or None for default. Defaults to None. + end (str, optional): Character to end text with. Defaults to "\\\\n". + tab_size (int): Number of spaces per tab, or ``None`` to use ``console.tab_size``. Defaults to 8. + spans (List[Span], optional). A list of predefined style spans. Defaults to None. + """ + + __slots__ = [ + "_text", + "style", + "justify", + "overflow", + "no_wrap", + "end", + "tab_size", + "_spans", + "_length", + ] + + def __init__( + self, + text: str = "", + style: Union[str, Style] = "", + *, + justify: Optional["JustifyMethod"] = None, + overflow: Optional["OverflowMethod"] = None, + no_wrap: Optional[bool] = None, + end: str = "\n", + tab_size: Optional[int] = 8, + spans: Optional[List[Span]] = None, + ) -> None: + self._text = [strip_control_codes(text)] + self.style = style + self.justify: Optional["JustifyMethod"] = justify + self.overflow: Optional["OverflowMethod"] = overflow + self.no_wrap = no_wrap + self.end = end + self.tab_size = tab_size + self._spans: List[Span] = spans or [] + self._length: int = len(text) + + def __len__(self) -> int: + return self._length + + def __bool__(self) -> bool: + return bool(self._length) + + def __str__(self) -> str: + return self.plain + + def __repr__(self) -> str: + return f"" + + def __add__(self, other: Any) -> "Text": + if isinstance(other, (str, Text)): + result = self.copy() + result.append(other) + return result + return NotImplemented + + def __eq__(self, other: object) -> bool: + if not isinstance(other, Text): + return NotImplemented + return self.plain == other.plain and self._spans == other._spans + + def __contains__(self, other: object) -> bool: + if isinstance(other, str): + return other in self.plain + elif isinstance(other, Text): + return other.plain in self.plain + return False + + def __getitem__(self, slice: Union[int, slice]) -> "Text": + def get_text_at(offset: int) -> "Text": + _Span = Span + text = Text( + self.plain[offset], + spans=[ + _Span(0, 1, style) + for start, end, style in self._spans + if end > offset >= start + ], + end="", + ) + return text + + if isinstance(slice, int): + return get_text_at(slice) + else: + start, stop, step = slice.indices(len(self.plain)) + if step == 1: + lines = self.divide([start, stop]) + return lines[1] + else: + # This would be a bit of work to implement efficiently + # For now, its not required + raise TypeError("slices with step!=1 are not supported") + + @property + def cell_len(self) -> int: + """Get the number of cells required to render this text.""" + return cell_len(self.plain) + + @property + def markup(self) -> str: + """Get console markup to render this Text. + + Returns: + str: A string potentially creating markup tags. + """ + from .markup import escape + + output: List[str] = [] + + plain = self.plain + markup_spans = [ + (0, False, self.style), + *((span.start, False, span.style) for span in self._spans), + *((span.end, True, span.style) for span in self._spans), + (len(plain), True, self.style), + ] + markup_spans.sort(key=itemgetter(0, 1)) + position = 0 + append = output.append + for offset, closing, style in markup_spans: + if offset > position: + append(escape(plain[position:offset])) + position = offset + if style: + append(f"[/{style}]" if closing else f"[{style}]") + markup = "".join(output) + return markup + + @classmethod + def from_markup( + cls, + text: str, + *, + style: Union[str, Style] = "", + emoji: bool = True, + emoji_variant: Optional[EmojiVariant] = None, + justify: Optional["JustifyMethod"] = None, + overflow: Optional["OverflowMethod"] = None, + ) -> "Text": + """Create Text instance from markup. + + Args: + text (str): A string containing console markup. + emoji (bool, optional): Also render emoji code. Defaults to True. + justify (str, optional): Justify method: "left", "center", "full", "right". Defaults to None. + overflow (str, optional): Overflow method: "crop", "fold", "ellipsis". Defaults to None. + + Returns: + Text: A Text instance with markup rendered. + """ + from .markup import render + + rendered_text = render(text, style, emoji=emoji, emoji_variant=emoji_variant) + rendered_text.justify = justify + rendered_text.overflow = overflow + return rendered_text + + @classmethod + def from_ansi( + cls, + text: str, + *, + style: Union[str, Style] = "", + justify: Optional["JustifyMethod"] = None, + overflow: Optional["OverflowMethod"] = None, + no_wrap: Optional[bool] = None, + end: str = "\n", + tab_size: Optional[int] = 8, + ) -> "Text": + """Create a Text object from a string containing ANSI escape codes. + + Args: + text (str): A string containing escape codes. + style (Union[str, Style], optional): Base style for text. Defaults to "". + justify (str, optional): Justify method: "left", "center", "full", "right". Defaults to None. + overflow (str, optional): Overflow method: "crop", "fold", "ellipsis". Defaults to None. + no_wrap (bool, optional): Disable text wrapping, or None for default. Defaults to None. + end (str, optional): Character to end text with. Defaults to "\\\\n". + tab_size (int): Number of spaces per tab, or ``None`` to use ``console.tab_size``. Defaults to 8. + """ + from .ansi import AnsiDecoder + + joiner = Text( + "\n", + justify=justify, + overflow=overflow, + no_wrap=no_wrap, + end=end, + tab_size=tab_size, + style=style, + ) + decoder = AnsiDecoder() + result = joiner.join(line for line in decoder.decode(text)) + return result + + @classmethod + def styled( + cls, + text: str, + style: StyleType = "", + *, + justify: Optional["JustifyMethod"] = None, + overflow: Optional["OverflowMethod"] = None, + ) -> "Text": + """Construct a Text instance with a pre-applied styled. A style applied in this way won't be used + to pad the text when it is justified. + + Args: + text (str): A string containing console markup. + style (Union[str, Style]): Style to apply to the text. Defaults to "". + justify (str, optional): Justify method: "left", "center", "full", "right". Defaults to None. + overflow (str, optional): Overflow method: "crop", "fold", "ellipsis". Defaults to None. + + Returns: + Text: A text instance with a style applied to the entire string. + """ + styled_text = cls(text, justify=justify, overflow=overflow) + styled_text.stylize(style) + return styled_text + + @classmethod + def assemble( + cls, + *parts: Union[str, "Text", Tuple[str, StyleType]], + style: Union[str, Style] = "", + justify: Optional["JustifyMethod"] = None, + overflow: Optional["OverflowMethod"] = None, + no_wrap: Optional[bool] = None, + end: str = "\n", + tab_size: int = 8, + meta: Optional[Dict[str, Any]] = None, + ) -> "Text": + """Construct a text instance by combining a sequence of strings with optional styles. + The positional arguments should be either strings, or a tuple of string + style. + + Args: + style (Union[str, Style], optional): Base style for text. Defaults to "". + justify (str, optional): Justify method: "left", "center", "full", "right". Defaults to None. + overflow (str, optional): Overflow method: "crop", "fold", "ellipsis". Defaults to None. + end (str, optional): Character to end text with. Defaults to "\\\\n". + tab_size (int): Number of spaces per tab, or ``None`` to use ``console.tab_size``. Defaults to 8. + meta (Dict[str, Any], optional). Meta data to apply to text, or None for no meta data. Default to None + + Returns: + Text: A new text instance. + """ + text = cls( + style=style, + justify=justify, + overflow=overflow, + no_wrap=no_wrap, + end=end, + tab_size=tab_size, + ) + append = text.append + _Text = Text + for part in parts: + if isinstance(part, (_Text, str)): + append(part) + else: + append(*part) + if meta: + text.apply_meta(meta) + return text + + @property + def plain(self) -> str: + """Get the text as a single string.""" + if len(self._text) != 1: + self._text[:] = ["".join(self._text)] + return self._text[0] + + @plain.setter + def plain(self, new_text: str) -> None: + """Set the text to a new value.""" + if new_text != self.plain: + self._text[:] = [new_text] + old_length = self._length + self._length = len(new_text) + if old_length > self._length: + self._trim_spans() + + @property + def spans(self) -> List[Span]: + """Get a reference to the internal list of spans.""" + return self._spans + + @spans.setter + def spans(self, spans: List[Span]) -> None: + """Set spans.""" + self._spans = spans[:] + + def blank_copy(self, plain: str = "") -> "Text": + """Return a new Text instance with copied meta data (but not the string or spans).""" + copy_self = Text( + plain, + style=self.style, + justify=self.justify, + overflow=self.overflow, + no_wrap=self.no_wrap, + end=self.end, + tab_size=self.tab_size, + ) + return copy_self + + def copy(self) -> "Text": + """Return a copy of this instance.""" + copy_self = Text( + self.plain, + style=self.style, + justify=self.justify, + overflow=self.overflow, + no_wrap=self.no_wrap, + end=self.end, + tab_size=self.tab_size, + ) + copy_self._spans[:] = self._spans + return copy_self + + def stylize( + self, + style: Union[str, Style], + start: int = 0, + end: Optional[int] = None, + ) -> None: + """Apply a style to the text, or a portion of the text. + + Args: + style (Union[str, Style]): Style instance or style definition to apply. + start (int): Start offset (negative indexing is supported). Defaults to 0. + end (Optional[int], optional): End offset (negative indexing is supported), or None for end of text. Defaults to None. + + """ + if style: + length = len(self) + if start < 0: + start = length + start + if end is None: + end = length + if end < 0: + end = length + end + if start >= length or end <= start: + # Span not in text or not valid + return + self._spans.append(Span(start, min(length, end), style)) + + def apply_meta( + self, meta: Dict[str, Any], start: int = 0, end: Optional[int] = None + ) -> None: + """Apply meta data to the text, or a portion of the text. + + Args: + meta (Dict[str, Any]): A dict of meta information. + start (int): Start offset (negative indexing is supported). Defaults to 0. + end (Optional[int], optional): End offset (negative indexing is supported), or None for end of text. Defaults to None. + + """ + style = Style.from_meta(meta) + self.stylize(style, start=start, end=end) + + def on(self, meta: Optional[Dict[str, Any]] = None, **handlers: Any) -> "Text": + """Apply event handlers (used by Textual project). + + Example: + >>> from rich.text import Text + >>> text = Text("hello world") + >>> text.on(click="view.toggle('world')") + + Args: + meta (Dict[str, Any]): Mapping of meta information. + **handlers: Keyword args are prefixed with "@" to defined handlers. + + Returns: + Text: Self is returned to method may be chained. + """ + meta = {} if meta is None else meta + meta.update({f"@{key}": value for key, value in handlers.items()}) + self.stylize(Style.from_meta(meta)) + return self + + def remove_suffix(self, suffix: str) -> None: + """Remove a suffix if it exists. + + Args: + suffix (str): Suffix to remove. + """ + if self.plain.endswith(suffix): + self.right_crop(len(suffix)) + + def get_style_at_offset(self, console: "Console", offset: int) -> Style: + """Get the style of a character at give offset. + + Args: + console (~Console): Console where text will be rendered. + offset (int): Offset in to text (negative indexing supported) + + Returns: + Style: A Style instance. + """ + # TODO: This is a little inefficient, it is only used by full justify + if offset < 0: + offset = len(self) + offset + get_style = console.get_style + style = get_style(self.style).copy() + for start, end, span_style in self._spans: + if end > offset >= start: + style += get_style(span_style, default="") + return style + + def highlight_regex( + self, + re_highlight: str, + style: Optional[Union[GetStyleCallable, StyleType]] = None, + *, + style_prefix: str = "", + ) -> int: + """Highlight text with a regular expression, where group names are + translated to styles. + + Args: + re_highlight (str): A regular expression. + style (Union[GetStyleCallable, StyleType]): Optional style to apply to whole match, or a callable + which accepts the matched text and returns a style. Defaults to None. + style_prefix (str, optional): Optional prefix to add to style group names. + + Returns: + int: Number of regex matches + """ + count = 0 + append_span = self._spans.append + _Span = Span + plain = self.plain + for match in re.finditer(re_highlight, plain): + get_span = match.span + if style: + start, end = get_span() + match_style = style(plain[start:end]) if callable(style) else style + if match_style is not None and end > start: + append_span(_Span(start, end, match_style)) + + count += 1 + for name in match.groupdict().keys(): + start, end = get_span(name) + if start != -1 and end > start: + append_span(_Span(start, end, f"{style_prefix}{name}")) + return count + + def highlight_words( + self, + words: Iterable[str], + style: Union[str, Style], + *, + case_sensitive: bool = True, + ) -> int: + """Highlight words with a style. + + Args: + words (Iterable[str]): Worlds to highlight. + style (Union[str, Style]): Style to apply. + case_sensitive (bool, optional): Enable case sensitive matchings. Defaults to True. + + Returns: + int: Number of words highlighted. + """ + re_words = "|".join(re.escape(word) for word in words) + add_span = self._spans.append + count = 0 + _Span = Span + for match in re.finditer( + re_words, self.plain, flags=0 if case_sensitive else re.IGNORECASE + ): + start, end = match.span(0) + add_span(_Span(start, end, style)) + count += 1 + return count + + def rstrip(self) -> None: + """Strip whitespace from end of text.""" + self.plain = self.plain.rstrip() + + def rstrip_end(self, size: int) -> None: + """Remove whitespace beyond a certain width at the end of the text. + + Args: + size (int): The desired size of the text. + """ + text_length = len(self) + if text_length > size: + excess = text_length - size + whitespace_match = _re_whitespace.search(self.plain) + if whitespace_match is not None: + whitespace_count = len(whitespace_match.group(0)) + self.right_crop(min(whitespace_count, excess)) + + def set_length(self, new_length: int) -> None: + """Set new length of the text, clipping or padding is required.""" + length = len(self) + if length != new_length: + if length < new_length: + self.pad_right(new_length - length) + else: + self.right_crop(length - new_length) + + def __rich_console__( + self, console: "Console", options: "ConsoleOptions" + ) -> Iterable[Segment]: + tab_size: int = console.tab_size or self.tab_size or 8 + justify = self.justify or options.justify or DEFAULT_JUSTIFY + + overflow = self.overflow or options.overflow or DEFAULT_OVERFLOW + + lines = self.wrap( + console, + options.max_width, + justify=justify, + overflow=overflow, + tab_size=tab_size or 8, + no_wrap=pick_bool(self.no_wrap, options.no_wrap, False), + ) + all_lines = Text("\n").join(lines) + yield from all_lines.render(console, end=self.end) + + def __rich_measure__( + self, console: "Console", options: "ConsoleOptions" + ) -> Measurement: + text = self.plain + lines = text.splitlines() + max_text_width = max(cell_len(line) for line in lines) if lines else 0 + words = text.split() + min_text_width = ( + max(cell_len(word) for word in words) if words else max_text_width + ) + return Measurement(min_text_width, max_text_width) + + def render(self, console: "Console", end: str = "") -> Iterable["Segment"]: + """Render the text as Segments. + + Args: + console (Console): Console instance. + end (Optional[str], optional): Optional end character. + + Returns: + Iterable[Segment]: Result of render that may be written to the console. + """ + _Segment = Segment + text = self.plain + if not self._spans: + yield Segment(text) + if end: + yield _Segment(end) + return + get_style = partial(console.get_style, default=Style.null()) + + enumerated_spans = list(enumerate(self._spans, 1)) + style_map = {index: get_style(span.style) for index, span in enumerated_spans} + style_map[0] = get_style(self.style) + + spans = [ + (0, False, 0), + *((span.start, False, index) for index, span in enumerated_spans), + *((span.end, True, index) for index, span in enumerated_spans), + (len(text), True, 0), + ] + spans.sort(key=itemgetter(0, 1)) + + stack: List[int] = [] + stack_append = stack.append + stack_pop = stack.remove + + style_cache: Dict[Tuple[Style, ...], Style] = {} + style_cache_get = style_cache.get + combine = Style.combine + + def get_current_style() -> Style: + """Construct current style from stack.""" + styles = tuple(style_map[_style_id] for _style_id in sorted(stack)) + cached_style = style_cache_get(styles) + if cached_style is not None: + return cached_style + current_style = combine(styles) + style_cache[styles] = current_style + return current_style + + for (offset, leaving, style_id), (next_offset, _, _) in zip(spans, spans[1:]): + if leaving: + stack_pop(style_id) + else: + stack_append(style_id) + if next_offset > offset: + yield _Segment(text[offset:next_offset], get_current_style()) + if end: + yield _Segment(end) + + def join(self, lines: Iterable["Text"]) -> "Text": + """Join text together with this instance as the separator. + + Args: + lines (Iterable[Text]): An iterable of Text instances to join. + + Returns: + Text: A new text instance containing join text. + """ + + new_text = self.blank_copy() + + def iter_text() -> Iterable["Text"]: + if self.plain: + for last, line in loop_last(lines): + yield line + if not last: + yield self + else: + yield from lines + + extend_text = new_text._text.extend + append_span = new_text._spans.append + extend_spans = new_text._spans.extend + offset = 0 + _Span = Span + + for text in iter_text(): + extend_text(text._text) + if text.style: + append_span(_Span(offset, offset + len(text), text.style)) + extend_spans( + _Span(offset + start, offset + end, style) + for start, end, style in text._spans + ) + offset += len(text) + new_text._length = offset + return new_text + + def expand_tabs(self, tab_size: Optional[int] = None) -> None: + """Converts tabs to spaces. + + Args: + tab_size (int, optional): Size of tabs. Defaults to 8. + + """ + if "\t" not in self.plain: + return + pos = 0 + if tab_size is None: + tab_size = self.tab_size + assert tab_size is not None + result = self.blank_copy() + append = result.append + + _style = self.style + for line in self.split("\n", include_separator=True): + parts = line.split("\t", include_separator=True) + for part in parts: + if part.plain.endswith("\t"): + part._text = [part.plain[:-1] + " "] + append(part) + pos += len(part) + spaces = tab_size - ((pos - 1) % tab_size) - 1 + if spaces: + append(" " * spaces, _style) + pos += spaces + else: + append(part) + self._text = [result.plain] + self._length = len(self.plain) + self._spans[:] = result._spans + + def truncate( + self, + max_width: int, + *, + overflow: Optional["OverflowMethod"] = None, + pad: bool = False, + ) -> None: + """Truncate text if it is longer that a given width. + + Args: + max_width (int): Maximum number of characters in text. + overflow (str, optional): Overflow method: "crop", "fold", or "ellipsis". Defaults to None, to use self.overflow. + pad (bool, optional): Pad with spaces if the length is less than max_width. Defaults to False. + """ + _overflow = overflow or self.overflow or DEFAULT_OVERFLOW + if _overflow != "ignore": + length = cell_len(self.plain) + if length > max_width: + if _overflow == "ellipsis": + self.plain = set_cell_size(self.plain, max_width - 1) + "…" + else: + self.plain = set_cell_size(self.plain, max_width) + if pad and length < max_width: + spaces = max_width - length + self._text = [f"{self.plain}{' ' * spaces}"] + self._length = len(self.plain) + + def _trim_spans(self) -> None: + """Remove or modify any spans that are over the end of the text.""" + max_offset = len(self.plain) + _Span = Span + self._spans[:] = [ + ( + span + if span.end < max_offset + else _Span(span.start, min(max_offset, span.end), span.style) + ) + for span in self._spans + if span.start < max_offset + ] + + def pad(self, count: int, character: str = " ") -> None: + """Pad left and right with a given number of characters. + + Args: + count (int): Width of padding. + """ + assert len(character) == 1, "Character must be a string of length 1" + if count: + pad_characters = character * count + self.plain = f"{pad_characters}{self.plain}{pad_characters}" + _Span = Span + self._spans[:] = [ + _Span(start + count, end + count, style) + for start, end, style in self._spans + ] + + def pad_left(self, count: int, character: str = " ") -> None: + """Pad the left with a given character. + + Args: + count (int): Number of characters to pad. + character (str, optional): Character to pad with. Defaults to " ". + """ + assert len(character) == 1, "Character must be a string of length 1" + if count: + self.plain = f"{character * count}{self.plain}" + _Span = Span + self._spans[:] = [ + _Span(start + count, end + count, style) + for start, end, style in self._spans + ] + + def pad_right(self, count: int, character: str = " ") -> None: + """Pad the right with a given character. + + Args: + count (int): Number of characters to pad. + character (str, optional): Character to pad with. Defaults to " ". + """ + assert len(character) == 1, "Character must be a string of length 1" + if count: + self.plain = f"{self.plain}{character * count}" + + def align(self, align: AlignMethod, width: int, character: str = " ") -> None: + """Align text to a given width. + + Args: + align (AlignMethod): One of "left", "center", or "right". + width (int): Desired width. + character (str, optional): Character to pad with. Defaults to " ". + """ + self.truncate(width) + excess_space = width - cell_len(self.plain) + if excess_space: + if align == "left": + self.pad_right(excess_space, character) + elif align == "center": + left = excess_space // 2 + self.pad_left(left, character) + self.pad_right(excess_space - left, character) + else: + self.pad_left(excess_space, character) + + def append( + self, text: Union["Text", str], style: Optional[Union[str, "Style"]] = None + ) -> "Text": + """Add text with an optional style. + + Args: + text (Union[Text, str]): A str or Text to append. + style (str, optional): A style name. Defaults to None. + + Returns: + Text: Returns self for chaining. + """ + + if not isinstance(text, (str, Text)): + raise TypeError("Only str or Text can be appended to Text") + + if len(text): + if isinstance(text, str): + text = strip_control_codes(text) + self._text.append(text) + offset = len(self) + text_length = len(text) + if style is not None: + self._spans.append(Span(offset, offset + text_length, style)) + self._length += text_length + elif isinstance(text, Text): + _Span = Span + if style is not None: + raise ValueError( + "style must not be set when appending Text instance" + ) + text_length = self._length + if text.style is not None: + self._spans.append( + _Span(text_length, text_length + len(text), text.style) + ) + self._text.append(text.plain) + self._spans.extend( + _Span(start + text_length, end + text_length, style) + for start, end, style in text._spans + ) + self._length += len(text) + return self + + def append_text(self, text: "Text") -> "Text": + """Append another Text instance. This method is more performant that Text.append, but + only works for Text. + + Returns: + Text: Returns self for chaining. + """ + _Span = Span + text_length = self._length + if text.style is not None: + self._spans.append(_Span(text_length, text_length + len(text), text.style)) + self._text.append(text.plain) + self._spans.extend( + _Span(start + text_length, end + text_length, style) + for start, end, style in text._spans + ) + self._length += len(text) + return self + + def append_tokens( + self, tokens: Iterable[Tuple[str, Optional[StyleType]]] + ) -> "Text": + """Append iterable of str and style. Style may be a Style instance or a str style definition. + + Args: + pairs (Iterable[Tuple[str, Optional[StyleType]]]): An iterable of tuples containing str content and style. + + Returns: + Text: Returns self for chaining. + """ + append_text = self._text.append + append_span = self._spans.append + _Span = Span + offset = len(self) + for content, style in tokens: + append_text(content) + if style is not None: + append_span(_Span(offset, offset + len(content), style)) + offset += len(content) + self._length = offset + return self + + def copy_styles(self, text: "Text") -> None: + """Copy styles from another Text instance. + + Args: + text (Text): A Text instance to copy styles from, must be the same length. + """ + self._spans.extend(text._spans) + + def split( + self, + separator: str = "\n", + *, + include_separator: bool = False, + allow_blank: bool = False, + ) -> Lines: + """Split rich text in to lines, preserving styles. + + Args: + separator (str, optional): String to split on. Defaults to "\\\\n". + include_separator (bool, optional): Include the separator in the lines. Defaults to False. + allow_blank (bool, optional): Return a blank line if the text ends with a separator. Defaults to False. + + Returns: + List[RichText]: A list of rich text, one per line of the original. + """ + assert separator, "separator must not be empty" + + text = self.plain + if separator not in text: + return Lines([self.copy()]) + + if include_separator: + lines = self.divide( + match.end() for match in re.finditer(re.escape(separator), text) + ) + else: + + def flatten_spans() -> Iterable[int]: + for match in re.finditer(re.escape(separator), text): + start, end = match.span() + yield start + yield end + + lines = Lines( + line for line in self.divide(flatten_spans()) if line.plain != separator + ) + + if not allow_blank and text.endswith(separator): + lines.pop() + + return lines + + def divide(self, offsets: Iterable[int]) -> Lines: + """Divide text in to a number of lines at given offsets. + + Args: + offsets (Iterable[int]): Offsets used to divide text. + + Returns: + Lines: New RichText instances between offsets. + """ + _offsets = list(offsets) + + if not _offsets: + return Lines([self.copy()]) + + text = self.plain + text_length = len(text) + divide_offsets = [0, *_offsets, text_length] + line_ranges = list(zip(divide_offsets, divide_offsets[1:])) + + style = self.style + justify = self.justify + overflow = self.overflow + _Text = Text + new_lines = Lines( + _Text( + text[start:end], + style=style, + justify=justify, + overflow=overflow, + ) + for start, end in line_ranges + ) + if not self._spans: + return new_lines + + _line_appends = [line._spans.append for line in new_lines._lines] + line_count = len(line_ranges) + _Span = Span + + for span_start, span_end, style in self._spans: + + lower_bound = 0 + upper_bound = line_count + start_line_no = (lower_bound + upper_bound) // 2 + + while True: + line_start, line_end = line_ranges[start_line_no] + if span_start < line_start: + upper_bound = start_line_no - 1 + elif span_start > line_end: + lower_bound = start_line_no + 1 + else: + break + start_line_no = (lower_bound + upper_bound) // 2 + + if span_end < line_end: + end_line_no = start_line_no + else: + end_line_no = lower_bound = start_line_no + upper_bound = line_count + + while True: + line_start, line_end = line_ranges[end_line_no] + if span_end < line_start: + upper_bound = end_line_no - 1 + elif span_end > line_end: + lower_bound = end_line_no + 1 + else: + break + end_line_no = (lower_bound + upper_bound) // 2 + + for line_no in range(start_line_no, end_line_no + 1): + line_start, line_end = line_ranges[line_no] + new_start = max(0, span_start - line_start) + new_end = min(span_end - line_start, line_end - line_start) + if new_end > new_start: + _line_appends[line_no](_Span(new_start, new_end, style)) + + return new_lines + + def right_crop(self, amount: int = 1) -> None: + """Remove a number of characters from the end of the text.""" + max_offset = len(self.plain) - amount + _Span = Span + self._spans[:] = [ + ( + span + if span.end < max_offset + else _Span(span.start, min(max_offset, span.end), span.style) + ) + for span in self._spans + if span.start < max_offset + ] + self._text = [self.plain[:-amount]] + self._length -= amount + + def wrap( + self, + console: "Console", + width: int, + *, + justify: Optional["JustifyMethod"] = None, + overflow: Optional["OverflowMethod"] = None, + tab_size: int = 8, + no_wrap: Optional[bool] = None, + ) -> Lines: + """Word wrap the text. + + Args: + console (Console): Console instance. + width (int): Number of characters per line. + emoji (bool, optional): Also render emoji code. Defaults to True. + justify (str, optional): Justify method: "default", "left", "center", "full", "right". Defaults to "default". + overflow (str, optional): Overflow method: "crop", "fold", or "ellipsis". Defaults to None. + tab_size (int, optional): Default tab size. Defaults to 8. + no_wrap (bool, optional): Disable wrapping, Defaults to False. + + Returns: + Lines: Number of lines. + """ + wrap_justify = justify or self.justify or DEFAULT_JUSTIFY + wrap_overflow = overflow or self.overflow or DEFAULT_OVERFLOW + + no_wrap = pick_bool(no_wrap, self.no_wrap, False) or overflow == "ignore" + + lines = Lines() + for line in self.split(allow_blank=True): + if "\t" in line: + line.expand_tabs(tab_size) + if no_wrap: + new_lines = Lines([line]) + else: + offsets = divide_line(str(line), width, fold=wrap_overflow == "fold") + new_lines = line.divide(offsets) + for line in new_lines: + line.rstrip_end(width) + if wrap_justify: + new_lines.justify( + console, width, justify=wrap_justify, overflow=wrap_overflow + ) + for line in new_lines: + line.truncate(width, overflow=wrap_overflow) + lines.extend(new_lines) + return lines + + def fit(self, width: int) -> Lines: + """Fit the text in to given width by chopping in to lines. + + Args: + width (int): Maximum characters in a line. + + Returns: + Lines: List of lines. + """ + lines: Lines = Lines() + append = lines.append + for line in self.split(): + line.set_length(width) + append(line) + return lines + + def detect_indentation(self) -> int: + """Auto-detect indentation of code. + + Returns: + int: Number of spaces used to indent code. + """ + + _indentations = { + len(match.group(1)) + for match in re.finditer(r"^( *)(.*)$", self.plain, flags=re.MULTILINE) + } + + try: + indentation = ( + reduce(gcd, [indent for indent in _indentations if not indent % 2]) or 1 + ) + except TypeError: + indentation = 1 + + return indentation + + def with_indent_guides( + self, + indent_size: Optional[int] = None, + *, + character: str = "│", + style: StyleType = "dim green", + ) -> "Text": + """Adds indent guide lines to text. + + Args: + indent_size (Optional[int]): Size of indentation, or None to auto detect. Defaults to None. + character (str, optional): Character to use for indentation. Defaults to "│". + style (Union[Style, str], optional): Style of indent guides. + + Returns: + Text: New text with indentation guides. + """ + + _indent_size = self.detect_indentation() if indent_size is None else indent_size + + text = self.copy() + text.expand_tabs() + indent_line = f"{character}{' ' * (_indent_size - 1)}" + + re_indent = re.compile(r"^( *)(.*)$") + new_lines: List[Text] = [] + add_line = new_lines.append + blank_lines = 0 + for line in text.split(allow_blank=True): + match = re_indent.match(line.plain) + if not match or not match.group(2): + blank_lines += 1 + continue + indent = match.group(1) + full_indents, remaining_space = divmod(len(indent), _indent_size) + new_indent = f"{indent_line * full_indents}{' ' * remaining_space}" + line.plain = new_indent + line.plain[len(new_indent) :] + line.stylize(style, 0, len(new_indent)) + if blank_lines: + new_lines.extend([Text(new_indent, style=style)] * blank_lines) + blank_lines = 0 + add_line(line) + if blank_lines: + new_lines.extend([Text("", style=style)] * blank_lines) + + new_text = text.blank_copy("\n").join(new_lines) + return new_text + + +if __name__ == "__main__": # pragma: no cover + from pipenv.patched.notpip._vendor.rich.console import Console + + text = Text( + """\nLorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea commodo consequat. Duis aute irure dolor in reprehenderit in voluptate velit esse cillum dolore eu fugiat nulla pariatur. Excepteur sint occaecat cupidatat non proident, sunt in culpa qui officia deserunt mollit anim id est laborum.\n""" + ) + text.highlight_words(["Lorem"], "bold") + text.highlight_words(["ipsum"], "italic") + + console = Console() + + console.rule("justify='left'") + console.print(text, style="red") + console.print() + + console.rule("justify='center'") + console.print(text, style="green", justify="center") + console.print() + + console.rule("justify='right'") + console.print(text, style="blue", justify="right") + console.print() + + console.rule("justify='full'") + console.print(text, style="magenta", justify="full") + console.print() diff --git a/pipenv/patched/notpip/_vendor/rich/theme.py b/pipenv/patched/notpip/_vendor/rich/theme.py new file mode 100644 index 0000000000..bfb3c7f821 --- /dev/null +++ b/pipenv/patched/notpip/_vendor/rich/theme.py @@ -0,0 +1,112 @@ +import configparser +from typing import Dict, List, IO, Mapping, Optional + +from .default_styles import DEFAULT_STYLES +from .style import Style, StyleType + + +class Theme: + """A container for style information, used by :class:`~rich.console.Console`. + + Args: + styles (Dict[str, Style], optional): A mapping of style names on to styles. Defaults to None for a theme with no styles. + inherit (bool, optional): Inherit default styles. Defaults to True. + """ + + styles: Dict[str, Style] + + def __init__( + self, styles: Optional[Mapping[str, StyleType]] = None, inherit: bool = True + ): + self.styles = DEFAULT_STYLES.copy() if inherit else {} + if styles is not None: + self.styles.update( + { + name: style if isinstance(style, Style) else Style.parse(style) + for name, style in styles.items() + } + ) + + @property + def config(self) -> str: + """Get contents of a config file for this theme.""" + config = "[styles]\n" + "\n".join( + f"{name} = {style}" for name, style in sorted(self.styles.items()) + ) + return config + + @classmethod + def from_file( + cls, config_file: IO[str], source: Optional[str] = None, inherit: bool = True + ) -> "Theme": + """Load a theme from a text mode file. + + Args: + config_file (IO[str]): An open conf file. + source (str, optional): The filename of the open file. Defaults to None. + inherit (bool, optional): Inherit default styles. Defaults to True. + + Returns: + Theme: A New theme instance. + """ + config = configparser.ConfigParser() + config.read_file(config_file, source=source) + styles = {name: Style.parse(value) for name, value in config.items("styles")} + theme = Theme(styles, inherit=inherit) + return theme + + @classmethod + def read(cls, path: str, inherit: bool = True) -> "Theme": + """Read a theme from a path. + + Args: + path (str): Path to a config file readable by Python configparser module. + inherit (bool, optional): Inherit default styles. Defaults to True. + + Returns: + Theme: A new theme instance. + """ + with open(path, "rt") as config_file: + return cls.from_file(config_file, source=path, inherit=inherit) + + +class ThemeStackError(Exception): + """Base exception for errors related to the theme stack.""" + + +class ThemeStack: + """A stack of themes. + + Args: + theme (Theme): A theme instance + """ + + def __init__(self, theme: Theme) -> None: + self._entries: List[Dict[str, Style]] = [theme.styles] + self.get = self._entries[-1].get + + def push_theme(self, theme: Theme, inherit: bool = True) -> None: + """Push a theme on the top of the stack. + + Args: + theme (Theme): A Theme instance. + inherit (boolean, optional): Inherit styles from current top of stack. + """ + styles: Dict[str, Style] + styles = ( + {**self._entries[-1], **theme.styles} if inherit else theme.styles.copy() + ) + self._entries.append(styles) + self.get = self._entries[-1].get + + def pop_theme(self) -> None: + """Pop (and discard) the top-most theme.""" + if len(self._entries) == 1: + raise ThemeStackError("Unable to pop base theme") + self._entries.pop() + self.get = self._entries[-1].get + + +if __name__ == "__main__": # pragma: no cover + theme = Theme() + print(theme.config) diff --git a/pipenv/patched/notpip/_vendor/rich/themes.py b/pipenv/patched/notpip/_vendor/rich/themes.py new file mode 100644 index 0000000000..bf6db104a2 --- /dev/null +++ b/pipenv/patched/notpip/_vendor/rich/themes.py @@ -0,0 +1,5 @@ +from .default_styles import DEFAULT_STYLES +from .theme import Theme + + +DEFAULT = Theme(DEFAULT_STYLES) diff --git a/pipenv/patched/notpip/_vendor/rich/traceback.py b/pipenv/patched/notpip/_vendor/rich/traceback.py new file mode 100644 index 0000000000..0f81848085 --- /dev/null +++ b/pipenv/patched/notpip/_vendor/rich/traceback.py @@ -0,0 +1,678 @@ +from __future__ import absolute_import + +import os +import platform +import sys +from dataclasses import dataclass, field +from traceback import walk_tb +from types import ModuleType, TracebackType +from typing import Any, Callable, Dict, Iterable, List, Optional, Sequence, Type, Union + +from pipenv.patched.notpip._vendor.pygments.lexers import guess_lexer_for_filename +from pipenv.patched.notpip._vendor.pygments.token import Comment, Keyword, Name, Number, Operator, String +from pipenv.patched.notpip._vendor.pygments.token import Text as TextToken +from pipenv.patched.notpip._vendor.pygments.token import Token + +from . import pretty +from ._loop import loop_first, loop_last +from .columns import Columns +from .console import Console, ConsoleOptions, ConsoleRenderable, RenderResult, group +from .constrain import Constrain +from .highlighter import RegexHighlighter, ReprHighlighter +from .panel import Panel +from .scope import render_scope +from .style import Style +from .syntax import Syntax +from .text import Text +from .theme import Theme + +WINDOWS = platform.system() == "Windows" + +LOCALS_MAX_LENGTH = 10 +LOCALS_MAX_STRING = 80 + + +def install( + *, + console: Optional[Console] = None, + width: Optional[int] = 100, + extra_lines: int = 3, + theme: Optional[str] = None, + word_wrap: bool = False, + show_locals: bool = False, + indent_guides: bool = True, + suppress: Iterable[Union[str, ModuleType]] = (), + max_frames: int = 100, +) -> Callable[[Type[BaseException], BaseException, Optional[TracebackType]], Any]: + """Install a rich traceback handler. + + Once installed, any tracebacks will be printed with syntax highlighting and rich formatting. + + + Args: + console (Optional[Console], optional): Console to write exception to. Default uses internal Console instance. + width (Optional[int], optional): Width (in characters) of traceback. Defaults to 100. + extra_lines (int, optional): Extra lines of code. Defaults to 3. + theme (Optional[str], optional): Pygments theme to use in traceback. Defaults to ``None`` which will pick + a theme appropriate for the platform. + word_wrap (bool, optional): Enable word wrapping of long lines. Defaults to False. + show_locals (bool, optional): Enable display of local variables. Defaults to False. + indent_guides (bool, optional): Enable indent guides in code and locals. Defaults to True. + suppress (Sequence[Union[str, ModuleType]]): Optional sequence of modules or paths to exclude from traceback. + + Returns: + Callable: The previous exception handler that was replaced. + + """ + traceback_console = Console(file=sys.stderr) if console is None else console + + def excepthook( + type_: Type[BaseException], + value: BaseException, + traceback: Optional[TracebackType], + ) -> None: + traceback_console.print( + Traceback.from_exception( + type_, + value, + traceback, + width=width, + extra_lines=extra_lines, + theme=theme, + word_wrap=word_wrap, + show_locals=show_locals, + indent_guides=indent_guides, + suppress=suppress, + max_frames=max_frames, + ) + ) + + def ipy_excepthook_closure(ip: Any) -> None: # pragma: no cover + tb_data = {} # store information about showtraceback call + default_showtraceback = ip.showtraceback # keep reference of default traceback + + def ipy_show_traceback(*args: Any, **kwargs: Any) -> None: + """wrap the default ip.showtraceback to store info for ip._showtraceback""" + nonlocal tb_data + tb_data = kwargs + default_showtraceback(*args, **kwargs) + + def ipy_display_traceback( + *args: Any, is_syntax: bool = False, **kwargs: Any + ) -> None: + """Internally called traceback from ip._showtraceback""" + nonlocal tb_data + exc_tuple = ip._get_exc_info() + + # do not display trace on syntax error + tb: Optional[TracebackType] = None if is_syntax else exc_tuple[2] + + # determine correct tb_offset + compiled = tb_data.get("running_compiled_code", False) + tb_offset = tb_data.get("tb_offset", 1 if compiled else 0) + # remove ipython internal frames from trace with tb_offset + for _ in range(tb_offset): + if tb is None: + break + tb = tb.tb_next + + excepthook(exc_tuple[0], exc_tuple[1], tb) + tb_data = {} # clear data upon usage + + # replace _showtraceback instead of showtraceback to allow ipython features such as debugging to work + # this is also what the ipython docs recommends to modify when subclassing InteractiveShell + ip._showtraceback = ipy_display_traceback + # add wrapper to capture tb_data + ip.showtraceback = ipy_show_traceback + ip.showsyntaxerror = lambda *args, **kwargs: ipy_display_traceback( + *args, is_syntax=True, **kwargs + ) + + try: # pragma: no cover + # if within ipython, use customized traceback + ip = get_ipython() # type: ignore + ipy_excepthook_closure(ip) + return sys.excepthook + except Exception: + # otherwise use default system hook + old_excepthook = sys.excepthook + sys.excepthook = excepthook + return old_excepthook + + +@dataclass +class Frame: + filename: str + lineno: int + name: str + line: str = "" + locals: Optional[Dict[str, pretty.Node]] = None + + +@dataclass +class _SyntaxError: + offset: int + filename: str + line: str + lineno: int + msg: str + + +@dataclass +class Stack: + exc_type: str + exc_value: str + syntax_error: Optional[_SyntaxError] = None + is_cause: bool = False + frames: List[Frame] = field(default_factory=list) + + +@dataclass +class Trace: + stacks: List[Stack] + + +class PathHighlighter(RegexHighlighter): + highlights = [r"(?P.*/)(?P.+)"] + + +class Traceback: + """A Console renderable that renders a traceback. + + Args: + trace (Trace, optional): A `Trace` object produced from `extract`. Defaults to None, which uses + the last exception. + width (Optional[int], optional): Number of characters used to traceback. Defaults to 100. + extra_lines (int, optional): Additional lines of code to render. Defaults to 3. + theme (str, optional): Override pygments theme used in traceback. + word_wrap (bool, optional): Enable word wrapping of long lines. Defaults to False. + show_locals (bool, optional): Enable display of local variables. Defaults to False. + indent_guides (bool, optional): Enable indent guides in code and locals. Defaults to True. + locals_max_length (int, optional): Maximum length of containers before abbreviating, or None for no abbreviation. + Defaults to 10. + locals_max_string (int, optional): Maximum length of string before truncating, or None to disable. Defaults to 80. + suppress (Sequence[Union[str, ModuleType]]): Optional sequence of modules or paths to exclude from traceback. + max_frames (int): Maximum number of frames to show in a traceback, 0 for no maximum. Defaults to 100. + + """ + + LEXERS = { + "": "text", + ".py": "python", + ".pxd": "cython", + ".pyx": "cython", + ".pxi": "pyrex", + } + + def __init__( + self, + trace: Optional[Trace] = None, + width: Optional[int] = 100, + extra_lines: int = 3, + theme: Optional[str] = None, + word_wrap: bool = False, + show_locals: bool = False, + indent_guides: bool = True, + locals_max_length: int = LOCALS_MAX_LENGTH, + locals_max_string: int = LOCALS_MAX_STRING, + suppress: Iterable[Union[str, ModuleType]] = (), + max_frames: int = 100, + ): + if trace is None: + exc_type, exc_value, traceback = sys.exc_info() + if exc_type is None or exc_value is None or traceback is None: + raise ValueError( + "Value for 'trace' required if not called in except: block" + ) + trace = self.extract( + exc_type, exc_value, traceback, show_locals=show_locals + ) + self.trace = trace + self.width = width + self.extra_lines = extra_lines + self.theme = Syntax.get_theme(theme or "ansi_dark") + self.word_wrap = word_wrap + self.show_locals = show_locals + self.indent_guides = indent_guides + self.locals_max_length = locals_max_length + self.locals_max_string = locals_max_string + + self.suppress: Sequence[str] = [] + for suppress_entity in suppress: + if not isinstance(suppress_entity, str): + assert ( + suppress_entity.__file__ is not None + ), f"{suppress_entity!r} must be a module with '__file__' attribute" + path = os.path.dirname(suppress_entity.__file__) + else: + path = suppress_entity + path = os.path.normpath(os.path.abspath(path)) + self.suppress.append(path) + self.max_frames = max(4, max_frames) if max_frames > 0 else 0 + + @classmethod + def from_exception( + cls, + exc_type: Type[Any], + exc_value: BaseException, + traceback: Optional[TracebackType], + width: Optional[int] = 100, + extra_lines: int = 3, + theme: Optional[str] = None, + word_wrap: bool = False, + show_locals: bool = False, + indent_guides: bool = True, + locals_max_length: int = LOCALS_MAX_LENGTH, + locals_max_string: int = LOCALS_MAX_STRING, + suppress: Iterable[Union[str, ModuleType]] = (), + max_frames: int = 100, + ) -> "Traceback": + """Create a traceback from exception info + + Args: + exc_type (Type[BaseException]): Exception type. + exc_value (BaseException): Exception value. + traceback (TracebackType): Python Traceback object. + width (Optional[int], optional): Number of characters used to traceback. Defaults to 100. + extra_lines (int, optional): Additional lines of code to render. Defaults to 3. + theme (str, optional): Override pygments theme used in traceback. + word_wrap (bool, optional): Enable word wrapping of long lines. Defaults to False. + show_locals (bool, optional): Enable display of local variables. Defaults to False. + indent_guides (bool, optional): Enable indent guides in code and locals. Defaults to True. + locals_max_length (int, optional): Maximum length of containers before abbreviating, or None for no abbreviation. + Defaults to 10. + locals_max_string (int, optional): Maximum length of string before truncating, or None to disable. Defaults to 80. + suppress (Iterable[Union[str, ModuleType]]): Optional sequence of modules or paths to exclude from traceback. + max_frames (int): Maximum number of frames to show in a traceback, 0 for no maximum. Defaults to 100. + + Returns: + Traceback: A Traceback instance that may be printed. + """ + rich_traceback = cls.extract( + exc_type, exc_value, traceback, show_locals=show_locals + ) + return cls( + rich_traceback, + width=width, + extra_lines=extra_lines, + theme=theme, + word_wrap=word_wrap, + show_locals=show_locals, + indent_guides=indent_guides, + locals_max_length=locals_max_length, + locals_max_string=locals_max_string, + suppress=suppress, + max_frames=max_frames, + ) + + @classmethod + def extract( + cls, + exc_type: Type[BaseException], + exc_value: BaseException, + traceback: Optional[TracebackType], + show_locals: bool = False, + locals_max_length: int = LOCALS_MAX_LENGTH, + locals_max_string: int = LOCALS_MAX_STRING, + ) -> Trace: + """Extract traceback information. + + Args: + exc_type (Type[BaseException]): Exception type. + exc_value (BaseException): Exception value. + traceback (TracebackType): Python Traceback object. + show_locals (bool, optional): Enable display of local variables. Defaults to False. + locals_max_length (int, optional): Maximum length of containers before abbreviating, or None for no abbreviation. + Defaults to 10. + locals_max_string (int, optional): Maximum length of string before truncating, or None to disable. Defaults to 80. + + Returns: + Trace: A Trace instance which you can use to construct a `Traceback`. + """ + + stacks: List[Stack] = [] + is_cause = False + + from pipenv.patched.notpip._vendor.rich import _IMPORT_CWD + + def safe_str(_object: Any) -> str: + """Don't allow exceptions from __str__ to propegate.""" + try: + return str(_object) + except Exception: + return "" + + while True: + stack = Stack( + exc_type=safe_str(exc_type.__name__), + exc_value=safe_str(exc_value), + is_cause=is_cause, + ) + + if isinstance(exc_value, SyntaxError): + stack.syntax_error = _SyntaxError( + offset=exc_value.offset or 0, + filename=exc_value.filename or "?", + lineno=exc_value.lineno or 0, + line=exc_value.text or "", + msg=exc_value.msg, + ) + + stacks.append(stack) + append = stack.frames.append + + for frame_summary, line_no in walk_tb(traceback): + filename = frame_summary.f_code.co_filename + if filename and not filename.startswith("<"): + if not os.path.isabs(filename): + filename = os.path.join(_IMPORT_CWD, filename) + frame = Frame( + filename=filename or "?", + lineno=line_no, + name=frame_summary.f_code.co_name, + locals={ + key: pretty.traverse( + value, + max_length=locals_max_length, + max_string=locals_max_string, + ) + for key, value in frame_summary.f_locals.items() + } + if show_locals + else None, + ) + append(frame) + if "_rich_traceback_guard" in frame_summary.f_locals: + del stack.frames[:] + + cause = getattr(exc_value, "__cause__", None) + if cause and cause.__traceback__: + exc_type = cause.__class__ + exc_value = cause + traceback = cause.__traceback__ + if traceback: + is_cause = True + continue + + cause = exc_value.__context__ + if ( + cause + and cause.__traceback__ + and not getattr(exc_value, "__suppress_context__", False) + ): + exc_type = cause.__class__ + exc_value = cause + traceback = cause.__traceback__ + if traceback: + is_cause = False + continue + # No cover, code is reached but coverage doesn't recognize it. + break # pragma: no cover + + trace = Trace(stacks=stacks) + return trace + + def __rich_console__( + self, console: Console, options: ConsoleOptions + ) -> RenderResult: + theme = self.theme + background_style = theme.get_background_style() + token_style = theme.get_style_for_token + + traceback_theme = Theme( + { + "pretty": token_style(TextToken), + "pygments.text": token_style(Token), + "pygments.string": token_style(String), + "pygments.function": token_style(Name.Function), + "pygments.number": token_style(Number), + "repr.indent": token_style(Comment) + Style(dim=True), + "repr.str": token_style(String), + "repr.brace": token_style(TextToken) + Style(bold=True), + "repr.number": token_style(Number), + "repr.bool_true": token_style(Keyword.Constant), + "repr.bool_false": token_style(Keyword.Constant), + "repr.none": token_style(Keyword.Constant), + "scope.border": token_style(String.Delimiter), + "scope.equals": token_style(Operator), + "scope.key": token_style(Name), + "scope.key.special": token_style(Name.Constant) + Style(dim=True), + }, + inherit=False, + ) + + highlighter = ReprHighlighter() + for last, stack in loop_last(reversed(self.trace.stacks)): + if stack.frames: + stack_renderable: ConsoleRenderable = Panel( + self._render_stack(stack), + title="[traceback.title]Traceback [dim](most recent call last)", + style=background_style, + border_style="traceback.border", + expand=True, + padding=(0, 1), + ) + stack_renderable = Constrain(stack_renderable, self.width) + with console.use_theme(traceback_theme): + yield stack_renderable + if stack.syntax_error is not None: + with console.use_theme(traceback_theme): + yield Constrain( + Panel( + self._render_syntax_error(stack.syntax_error), + style=background_style, + border_style="traceback.border.syntax_error", + expand=True, + padding=(0, 1), + width=self.width, + ), + self.width, + ) + yield Text.assemble( + (f"{stack.exc_type}: ", "traceback.exc_type"), + highlighter(stack.syntax_error.msg), + ) + elif stack.exc_value: + yield Text.assemble( + (f"{stack.exc_type}: ", "traceback.exc_type"), + highlighter(stack.exc_value), + ) + else: + yield Text.assemble((f"{stack.exc_type}", "traceback.exc_type")) + + if not last: + if stack.is_cause: + yield Text.from_markup( + "\n[i]The above exception was the direct cause of the following exception:\n", + ) + else: + yield Text.from_markup( + "\n[i]During handling of the above exception, another exception occurred:\n", + ) + + @group() + def _render_syntax_error(self, syntax_error: _SyntaxError) -> RenderResult: + highlighter = ReprHighlighter() + path_highlighter = PathHighlighter() + if syntax_error.filename != "": + text = Text.assemble( + (f" {syntax_error.filename}", "pygments.string"), + (":", "pygments.text"), + (str(syntax_error.lineno), "pygments.number"), + style="pygments.text", + ) + yield path_highlighter(text) + syntax_error_text = highlighter(syntax_error.line.rstrip()) + syntax_error_text.no_wrap = True + offset = min(syntax_error.offset - 1, len(syntax_error_text)) + syntax_error_text.stylize("bold underline", offset, offset) + syntax_error_text += Text.from_markup( + "\n" + " " * offset + "[traceback.offset]▲[/]", + style="pygments.text", + ) + yield syntax_error_text + + @classmethod + def _guess_lexer(cls, filename: str, code: str) -> str: + ext = os.path.splitext(filename)[-1] + if not ext: + # No extension, look at first line to see if it is a hashbang + # Note, this is an educated guess and not a guarantee + # If it fails, the only downside is that the code is highlighted strangely + new_line_index = code.index("\n") + first_line = code[:new_line_index] if new_line_index != -1 else code + if first_line.startswith("#!") and "python" in first_line.lower(): + return "python" + lexer_name = ( + cls.LEXERS.get(ext) or guess_lexer_for_filename(filename, code).name + ) + return lexer_name + + @group() + def _render_stack(self, stack: Stack) -> RenderResult: + path_highlighter = PathHighlighter() + theme = self.theme + code_cache: Dict[str, str] = {} + + def read_code(filename: str) -> str: + """Read files, and cache results on filename. + + Args: + filename (str): Filename to read + + Returns: + str: Contents of file + """ + code = code_cache.get(filename) + if code is None: + with open( + filename, "rt", encoding="utf-8", errors="replace" + ) as code_file: + code = code_file.read() + code_cache[filename] = code + return code + + def render_locals(frame: Frame) -> Iterable[ConsoleRenderable]: + if frame.locals: + yield render_scope( + frame.locals, + title="locals", + indent_guides=self.indent_guides, + max_length=self.locals_max_length, + max_string=self.locals_max_string, + ) + + exclude_frames: Optional[range] = None + if self.max_frames != 0: + exclude_frames = range( + self.max_frames // 2, + len(stack.frames) - self.max_frames // 2, + ) + + excluded = False + for frame_index, frame in enumerate(stack.frames): + + if exclude_frames and frame_index in exclude_frames: + excluded = True + continue + + if excluded: + assert exclude_frames is not None + yield Text( + f"\n... {len(exclude_frames)} frames hidden ...", + justify="center", + style="traceback.error", + ) + excluded = False + + first = frame_index == 1 + frame_filename = frame.filename + suppressed = any(frame_filename.startswith(path) for path in self.suppress) + + text = Text.assemble( + path_highlighter(Text(frame.filename, style="pygments.string")), + (":", "pygments.text"), + (str(frame.lineno), "pygments.number"), + " in ", + (frame.name, "pygments.function"), + style="pygments.text", + ) + if not frame.filename.startswith("<") and not first: + yield "" + yield text + if frame.filename.startswith("<"): + yield from render_locals(frame) + continue + if not suppressed: + try: + code = read_code(frame.filename) + lexer_name = self._guess_lexer(frame.filename, code) + syntax = Syntax( + code, + lexer_name, + theme=theme, + line_numbers=True, + line_range=( + frame.lineno - self.extra_lines, + frame.lineno + self.extra_lines, + ), + highlight_lines={frame.lineno}, + word_wrap=self.word_wrap, + code_width=88, + indent_guides=self.indent_guides, + dedent=False, + ) + yield "" + except Exception as error: + yield Text.assemble( + (f"\n{error}", "traceback.error"), + ) + else: + yield ( + Columns( + [ + syntax, + *render_locals(frame), + ], + padding=1, + ) + if frame.locals + else syntax + ) + + +if __name__ == "__main__": # pragma: no cover + + from .console import Console + + console = Console() + import sys + + def bar(a: Any) -> None: # 这是对亚洲语言支持的测试。面对模棱两可的想法,拒绝猜测的诱惑 + one = 1 + print(one / a) + + def foo(a: Any) -> None: + _rich_traceback_guard = True + zed = { + "characters": { + "Paul Atreides", + "Vladimir Harkonnen", + "Thufir Hawat", + "Duncan Idaho", + }, + "atomic_types": (None, False, True), + } + bar(a) + + def error() -> None: + + try: + try: + foo(0) + except: + slfkjsldkfj # type: ignore + except: + console.print_exception(show_locals=True) + + error() diff --git a/pipenv/patched/notpip/_vendor/rich/tree.py b/pipenv/patched/notpip/_vendor/rich/tree.py new file mode 100644 index 0000000000..7c306960f6 --- /dev/null +++ b/pipenv/patched/notpip/_vendor/rich/tree.py @@ -0,0 +1,249 @@ +from typing import Iterator, List, Optional, Tuple + +from ._loop import loop_first, loop_last +from .console import Console, ConsoleOptions, RenderableType, RenderResult +from .jupyter import JupyterMixin +from .measure import Measurement +from .segment import Segment +from .style import Style, StyleStack, StyleType +from .styled import Styled + + +class Tree(JupyterMixin): + """A renderable for a tree structure. + + Args: + label (RenderableType): The renderable or str for the tree label. + style (StyleType, optional): Style of this tree. Defaults to "tree". + guide_style (StyleType, optional): Style of the guide lines. Defaults to "tree.line". + expanded (bool, optional): Also display children. Defaults to True. + highlight (bool, optional): Highlight renderable (if str). Defaults to False. + """ + + def __init__( + self, + label: RenderableType, + *, + style: StyleType = "tree", + guide_style: StyleType = "tree.line", + expanded: bool = True, + highlight: bool = False, + hide_root: bool = False, + ) -> None: + self.label = label + self.style = style + self.guide_style = guide_style + self.children: List[Tree] = [] + self.expanded = expanded + self.highlight = highlight + self.hide_root = hide_root + + def add( + self, + label: RenderableType, + *, + style: Optional[StyleType] = None, + guide_style: Optional[StyleType] = None, + expanded: bool = True, + highlight: bool = False, + ) -> "Tree": + """Add a child tree. + + Args: + label (RenderableType): The renderable or str for the tree label. + style (StyleType, optional): Style of this tree. Defaults to "tree". + guide_style (StyleType, optional): Style of the guide lines. Defaults to "tree.line". + expanded (bool, optional): Also display children. Defaults to True. + highlight (Optional[bool], optional): Highlight renderable (if str). Defaults to False. + + Returns: + Tree: A new child Tree, which may be further modified. + """ + node = Tree( + label, + style=self.style if style is None else style, + guide_style=self.guide_style if guide_style is None else guide_style, + expanded=expanded, + highlight=self.highlight if highlight is None else highlight, + ) + self.children.append(node) + return node + + def __rich_console__( + self, console: "Console", options: "ConsoleOptions" + ) -> "RenderResult": + + stack: List[Iterator[Tuple[bool, Tree]]] = [] + pop = stack.pop + push = stack.append + new_line = Segment.line() + + get_style = console.get_style + null_style = Style.null() + guide_style = get_style(self.guide_style, default="") or null_style + SPACE, CONTINUE, FORK, END = range(4) + + ASCII_GUIDES = (" ", "| ", "+-- ", "`-- ") + TREE_GUIDES = [ + (" ", "│ ", "├── ", "└── "), + (" ", "┃ ", "┣━━ ", "┗━━ "), + (" ", "║ ", "╠══ ", "╚══ "), + ] + _Segment = Segment + + def make_guide(index: int, style: Style) -> Segment: + """Make a Segment for a level of the guide lines.""" + if options.ascii_only: + line = ASCII_GUIDES[index] + else: + guide = 1 if style.bold else (2 if style.underline2 else 0) + line = TREE_GUIDES[0 if options.legacy_windows else guide][index] + return _Segment(line, style) + + levels: List[Segment] = [make_guide(CONTINUE, guide_style)] + push(iter(loop_last([self]))) + + guide_style_stack = StyleStack(get_style(self.guide_style)) + style_stack = StyleStack(get_style(self.style)) + remove_guide_styles = Style(bold=False, underline2=False) + + depth = 0 + + while stack: + stack_node = pop() + try: + last, node = next(stack_node) + except StopIteration: + levels.pop() + if levels: + guide_style = levels[-1].style or null_style + levels[-1] = make_guide(FORK, guide_style) + guide_style_stack.pop() + style_stack.pop() + continue + push(stack_node) + if last: + levels[-1] = make_guide(END, levels[-1].style or null_style) + + guide_style = guide_style_stack.current + get_style(node.guide_style) + style = style_stack.current + get_style(node.style) + prefix = levels[(2 if self.hide_root else 1) :] + renderable_lines = console.render_lines( + Styled(node.label, style), + options.update( + width=options.max_width + - sum(level.cell_length for level in prefix), + highlight=self.highlight, + height=None, + ), + ) + + if not (depth == 0 and self.hide_root): + for first, line in loop_first(renderable_lines): + if prefix: + yield from _Segment.apply_style( + prefix, + style.background_style, + post_style=remove_guide_styles, + ) + yield from line + yield new_line + if first and prefix: + prefix[-1] = make_guide( + SPACE if last else CONTINUE, prefix[-1].style or null_style + ) + + if node.expanded and node.children: + levels[-1] = make_guide( + SPACE if last else CONTINUE, levels[-1].style or null_style + ) + levels.append( + make_guide(END if len(node.children) == 1 else FORK, guide_style) + ) + style_stack.push(get_style(node.style)) + guide_style_stack.push(get_style(node.guide_style)) + push(iter(loop_last(node.children))) + depth += 1 + + def __rich_measure__( + self, console: "Console", options: "ConsoleOptions" + ) -> "Measurement": + stack: List[Iterator[Tree]] = [iter([self])] + pop = stack.pop + push = stack.append + minimum = 0 + maximum = 0 + measure = Measurement.get + level = 0 + while stack: + iter_tree = pop() + try: + tree = next(iter_tree) + except StopIteration: + level -= 1 + continue + push(iter_tree) + min_measure, max_measure = measure(console, options, tree.label) + indent = level * 4 + minimum = max(min_measure + indent, minimum) + maximum = max(max_measure + indent, maximum) + if tree.expanded and tree.children: + push(iter(tree.children)) + level += 1 + return Measurement(minimum, maximum) + + +if __name__ == "__main__": # pragma: no cover + + from pipenv.patched.notpip._vendor.rich.console import Group + from pipenv.patched.notpip._vendor.rich.markdown import Markdown + from pipenv.patched.notpip._vendor.rich.panel import Panel + from pipenv.patched.notpip._vendor.rich.syntax import Syntax + from pipenv.patched.notpip._vendor.rich.table import Table + + table = Table(row_styles=["", "dim"]) + + table.add_column("Released", style="cyan", no_wrap=True) + table.add_column("Title", style="magenta") + table.add_column("Box Office", justify="right", style="green") + + table.add_row("Dec 20, 2019", "Star Wars: The Rise of Skywalker", "$952,110,690") + table.add_row("May 25, 2018", "Solo: A Star Wars Story", "$393,151,347") + table.add_row("Dec 15, 2017", "Star Wars Ep. V111: The Last Jedi", "$1,332,539,889") + table.add_row("Dec 16, 2016", "Rogue One: A Star Wars Story", "$1,332,439,889") + + code = """\ +class Segment(NamedTuple): + text: str = "" + style: Optional[Style] = None + is_control: bool = False +""" + syntax = Syntax(code, "python", theme="monokai", line_numbers=True) + + markdown = Markdown( + """\ +### example.md +> Hello, World! +> +> Markdown _all_ the things +""" + ) + + root = Tree("🌲 [b green]Rich Tree", highlight=True, hide_root=True) + + node = root.add(":file_folder: Renderables", guide_style="red") + simple_node = node.add(":file_folder: [bold yellow]Atomic", guide_style="uu green") + simple_node.add(Group("📄 Syntax", syntax)) + simple_node.add(Group("📄 Markdown", Panel(markdown, border_style="green"))) + + containers_node = node.add( + ":file_folder: [bold magenta]Containers", guide_style="bold magenta" + ) + containers_node.expanded = True + panel = Panel.fit("Just a panel", border_style="red") + containers_node.add(Group("📄 Panels", panel)) + + containers_node.add(Group("📄 [b magenta]Table", table)) + + console = Console() + console.print(root) diff --git a/pipenv/patched/notpip/_vendor/tomli/LICENSE b/pipenv/patched/notpip/_vendor/tomli/LICENSE deleted file mode 100644 index e859590f88..0000000000 --- a/pipenv/patched/notpip/_vendor/tomli/LICENSE +++ /dev/null @@ -1,21 +0,0 @@ -MIT License - -Copyright (c) 2021 Taneli Hukkinen - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. diff --git a/pipenv/patched/notpip/_vendor/typing_extensions.py b/pipenv/patched/notpip/_vendor/typing_extensions.py new file mode 100644 index 0000000000..9f1c7aa31e --- /dev/null +++ b/pipenv/patched/notpip/_vendor/typing_extensions.py @@ -0,0 +1,2296 @@ +import abc +import collections +import collections.abc +import operator +import sys +import typing + +# After PEP 560, internal typing API was substantially reworked. +# This is especially important for Protocol class which uses internal APIs +# quite extensively. +PEP_560 = sys.version_info[:3] >= (3, 7, 0) + +if PEP_560: + GenericMeta = type +else: + # 3.6 + from typing import GenericMeta, _type_vars # noqa + +# The two functions below are copies of typing internal helpers. +# They are needed by _ProtocolMeta + + +def _no_slots_copy(dct): + dict_copy = dict(dct) + if '__slots__' in dict_copy: + for slot in dict_copy['__slots__']: + dict_copy.pop(slot, None) + return dict_copy + + +def _check_generic(cls, parameters): + if not cls.__parameters__: + raise TypeError(f"{cls} is not a generic class") + alen = len(parameters) + elen = len(cls.__parameters__) + if alen != elen: + raise TypeError(f"Too {'many' if alen > elen else 'few'} arguments for {cls};" + f" actual {alen}, expected {elen}") + + +# Please keep __all__ alphabetized within each category. +__all__ = [ + # Super-special typing primitives. + 'ClassVar', + 'Concatenate', + 'Final', + 'ParamSpec', + 'Self', + 'Type', + + # ABCs (from collections.abc). + 'Awaitable', + 'AsyncIterator', + 'AsyncIterable', + 'Coroutine', + 'AsyncGenerator', + 'AsyncContextManager', + 'ChainMap', + + # Concrete collection types. + 'ContextManager', + 'Counter', + 'Deque', + 'DefaultDict', + 'OrderedDict', + 'TypedDict', + + # Structural checks, a.k.a. protocols. + 'SupportsIndex', + + # One-off things. + 'Annotated', + 'final', + 'IntVar', + 'Literal', + 'NewType', + 'overload', + 'Protocol', + 'runtime', + 'runtime_checkable', + 'Text', + 'TypeAlias', + 'TypeGuard', + 'TYPE_CHECKING', +] + +if PEP_560: + __all__.extend(["get_args", "get_origin", "get_type_hints"]) + +# 3.6.2+ +if hasattr(typing, 'NoReturn'): + NoReturn = typing.NoReturn +# 3.6.0-3.6.1 +else: + class _NoReturn(typing._FinalTypingBase, _root=True): + """Special type indicating functions that never return. + Example:: + + from typing import NoReturn + + def stop() -> NoReturn: + raise Exception('no way') + + This type is invalid in other positions, e.g., ``List[NoReturn]`` + will fail in static type checkers. + """ + __slots__ = () + + def __instancecheck__(self, obj): + raise TypeError("NoReturn cannot be used with isinstance().") + + def __subclasscheck__(self, cls): + raise TypeError("NoReturn cannot be used with issubclass().") + + NoReturn = _NoReturn(_root=True) + +# Some unconstrained type variables. These are used by the container types. +# (These are not for export.) +T = typing.TypeVar('T') # Any type. +KT = typing.TypeVar('KT') # Key type. +VT = typing.TypeVar('VT') # Value type. +T_co = typing.TypeVar('T_co', covariant=True) # Any type covariant containers. +T_contra = typing.TypeVar('T_contra', contravariant=True) # Ditto contravariant. + +ClassVar = typing.ClassVar + +# On older versions of typing there is an internal class named "Final". +# 3.8+ +if hasattr(typing, 'Final') and sys.version_info[:2] >= (3, 7): + Final = typing.Final +# 3.7 +elif sys.version_info[:2] >= (3, 7): + class _FinalForm(typing._SpecialForm, _root=True): + + def __repr__(self): + return 'typing_extensions.' + self._name + + def __getitem__(self, parameters): + item = typing._type_check(parameters, + f'{self._name} accepts only single type') + return typing._GenericAlias(self, (item,)) + + Final = _FinalForm('Final', + doc="""A special typing construct to indicate that a name + cannot be re-assigned or overridden in a subclass. + For example: + + MAX_SIZE: Final = 9000 + MAX_SIZE += 1 # Error reported by type checker + + class Connection: + TIMEOUT: Final[int] = 10 + class FastConnector(Connection): + TIMEOUT = 1 # Error reported by type checker + + There is no runtime checking of these properties.""") +# 3.6 +else: + class _Final(typing._FinalTypingBase, _root=True): + """A special typing construct to indicate that a name + cannot be re-assigned or overridden in a subclass. + For example: + + MAX_SIZE: Final = 9000 + MAX_SIZE += 1 # Error reported by type checker + + class Connection: + TIMEOUT: Final[int] = 10 + class FastConnector(Connection): + TIMEOUT = 1 # Error reported by type checker + + There is no runtime checking of these properties. + """ + + __slots__ = ('__type__',) + + def __init__(self, tp=None, **kwds): + self.__type__ = tp + + def __getitem__(self, item): + cls = type(self) + if self.__type__ is None: + return cls(typing._type_check(item, + f'{cls.__name__[1:]} accepts only single type.'), + _root=True) + raise TypeError(f'{cls.__name__[1:]} cannot be further subscripted') + + def _eval_type(self, globalns, localns): + new_tp = typing._eval_type(self.__type__, globalns, localns) + if new_tp == self.__type__: + return self + return type(self)(new_tp, _root=True) + + def __repr__(self): + r = super().__repr__() + if self.__type__ is not None: + r += f'[{typing._type_repr(self.__type__)}]' + return r + + def __hash__(self): + return hash((type(self).__name__, self.__type__)) + + def __eq__(self, other): + if not isinstance(other, _Final): + return NotImplemented + if self.__type__ is not None: + return self.__type__ == other.__type__ + return self is other + + Final = _Final(_root=True) + + +# 3.8+ +if hasattr(typing, 'final'): + final = typing.final +# 3.6-3.7 +else: + def final(f): + """This decorator can be used to indicate to type checkers that + the decorated method cannot be overridden, and decorated class + cannot be subclassed. For example: + + class Base: + @final + def done(self) -> None: + ... + class Sub(Base): + def done(self) -> None: # Error reported by type checker + ... + @final + class Leaf: + ... + class Other(Leaf): # Error reported by type checker + ... + + There is no runtime checking of these properties. + """ + return f + + +def IntVar(name): + return typing.TypeVar(name) + + +# 3.8+: +if hasattr(typing, 'Literal'): + Literal = typing.Literal +# 3.7: +elif sys.version_info[:2] >= (3, 7): + class _LiteralForm(typing._SpecialForm, _root=True): + + def __repr__(self): + return 'typing_extensions.' + self._name + + def __getitem__(self, parameters): + return typing._GenericAlias(self, parameters) + + Literal = _LiteralForm('Literal', + doc="""A type that can be used to indicate to type checkers + that the corresponding value has a value literally equivalent + to the provided parameter. For example: + + var: Literal[4] = 4 + + The type checker understands that 'var' is literally equal to + the value 4 and no other value. + + Literal[...] cannot be subclassed. There is no runtime + checking verifying that the parameter is actually a value + instead of a type.""") +# 3.6: +else: + class _Literal(typing._FinalTypingBase, _root=True): + """A type that can be used to indicate to type checkers that the + corresponding value has a value literally equivalent to the + provided parameter. For example: + + var: Literal[4] = 4 + + The type checker understands that 'var' is literally equal to the + value 4 and no other value. + + Literal[...] cannot be subclassed. There is no runtime checking + verifying that the parameter is actually a value instead of a type. + """ + + __slots__ = ('__values__',) + + def __init__(self, values=None, **kwds): + self.__values__ = values + + def __getitem__(self, values): + cls = type(self) + if self.__values__ is None: + if not isinstance(values, tuple): + values = (values,) + return cls(values, _root=True) + raise TypeError(f'{cls.__name__[1:]} cannot be further subscripted') + + def _eval_type(self, globalns, localns): + return self + + def __repr__(self): + r = super().__repr__() + if self.__values__ is not None: + r += f'[{", ".join(map(typing._type_repr, self.__values__))}]' + return r + + def __hash__(self): + return hash((type(self).__name__, self.__values__)) + + def __eq__(self, other): + if not isinstance(other, _Literal): + return NotImplemented + if self.__values__ is not None: + return self.__values__ == other.__values__ + return self is other + + Literal = _Literal(_root=True) + + +_overload_dummy = typing._overload_dummy # noqa +overload = typing.overload + + +# This is not a real generic class. Don't use outside annotations. +Type = typing.Type + +# Various ABCs mimicking those in collections.abc. +# A few are simply re-exported for completeness. + + +class _ExtensionsGenericMeta(GenericMeta): + def __subclasscheck__(self, subclass): + """This mimics a more modern GenericMeta.__subclasscheck__() logic + (that does not have problems with recursion) to work around interactions + between collections, typing, and typing_extensions on older + versions of Python, see https://github.com/python/typing/issues/501. + """ + if self.__origin__ is not None: + if sys._getframe(1).f_globals['__name__'] not in ['abc', 'functools']: + raise TypeError("Parameterized generics cannot be used with class " + "or instance checks") + return False + if not self.__extra__: + return super().__subclasscheck__(subclass) + res = self.__extra__.__subclasshook__(subclass) + if res is not NotImplemented: + return res + if self.__extra__ in subclass.__mro__: + return True + for scls in self.__extra__.__subclasses__(): + if isinstance(scls, GenericMeta): + continue + if issubclass(subclass, scls): + return True + return False + + +Awaitable = typing.Awaitable +Coroutine = typing.Coroutine +AsyncIterable = typing.AsyncIterable +AsyncIterator = typing.AsyncIterator + +# 3.6.1+ +if hasattr(typing, 'Deque'): + Deque = typing.Deque +# 3.6.0 +else: + class Deque(collections.deque, typing.MutableSequence[T], + metaclass=_ExtensionsGenericMeta, + extra=collections.deque): + __slots__ = () + + def __new__(cls, *args, **kwds): + if cls._gorg is Deque: + return collections.deque(*args, **kwds) + return typing._generic_new(collections.deque, cls, *args, **kwds) + +ContextManager = typing.ContextManager +# 3.6.2+ +if hasattr(typing, 'AsyncContextManager'): + AsyncContextManager = typing.AsyncContextManager +# 3.6.0-3.6.1 +else: + from _collections_abc import _check_methods as _check_methods_in_mro # noqa + + class AsyncContextManager(typing.Generic[T_co]): + __slots__ = () + + async def __aenter__(self): + return self + + @abc.abstractmethod + async def __aexit__(self, exc_type, exc_value, traceback): + return None + + @classmethod + def __subclasshook__(cls, C): + if cls is AsyncContextManager: + return _check_methods_in_mro(C, "__aenter__", "__aexit__") + return NotImplemented + +DefaultDict = typing.DefaultDict + +# 3.7.2+ +if hasattr(typing, 'OrderedDict'): + OrderedDict = typing.OrderedDict +# 3.7.0-3.7.2 +elif (3, 7, 0) <= sys.version_info[:3] < (3, 7, 2): + OrderedDict = typing._alias(collections.OrderedDict, (KT, VT)) +# 3.6 +else: + class OrderedDict(collections.OrderedDict, typing.MutableMapping[KT, VT], + metaclass=_ExtensionsGenericMeta, + extra=collections.OrderedDict): + + __slots__ = () + + def __new__(cls, *args, **kwds): + if cls._gorg is OrderedDict: + return collections.OrderedDict(*args, **kwds) + return typing._generic_new(collections.OrderedDict, cls, *args, **kwds) + +# 3.6.2+ +if hasattr(typing, 'Counter'): + Counter = typing.Counter +# 3.6.0-3.6.1 +else: + class Counter(collections.Counter, + typing.Dict[T, int], + metaclass=_ExtensionsGenericMeta, extra=collections.Counter): + + __slots__ = () + + def __new__(cls, *args, **kwds): + if cls._gorg is Counter: + return collections.Counter(*args, **kwds) + return typing._generic_new(collections.Counter, cls, *args, **kwds) + +# 3.6.1+ +if hasattr(typing, 'ChainMap'): + ChainMap = typing.ChainMap +elif hasattr(collections, 'ChainMap'): + class ChainMap(collections.ChainMap, typing.MutableMapping[KT, VT], + metaclass=_ExtensionsGenericMeta, + extra=collections.ChainMap): + + __slots__ = () + + def __new__(cls, *args, **kwds): + if cls._gorg is ChainMap: + return collections.ChainMap(*args, **kwds) + return typing._generic_new(collections.ChainMap, cls, *args, **kwds) + +# 3.6.1+ +if hasattr(typing, 'AsyncGenerator'): + AsyncGenerator = typing.AsyncGenerator +# 3.6.0 +else: + class AsyncGenerator(AsyncIterator[T_co], typing.Generic[T_co, T_contra], + metaclass=_ExtensionsGenericMeta, + extra=collections.abc.AsyncGenerator): + __slots__ = () + +NewType = typing.NewType +Text = typing.Text +TYPE_CHECKING = typing.TYPE_CHECKING + + +def _gorg(cls): + """This function exists for compatibility with old typing versions.""" + assert isinstance(cls, GenericMeta) + if hasattr(cls, '_gorg'): + return cls._gorg + while cls.__origin__ is not None: + cls = cls.__origin__ + return cls + + +_PROTO_WHITELIST = ['Callable', 'Awaitable', + 'Iterable', 'Iterator', 'AsyncIterable', 'AsyncIterator', + 'Hashable', 'Sized', 'Container', 'Collection', 'Reversible', + 'ContextManager', 'AsyncContextManager'] + + +def _get_protocol_attrs(cls): + attrs = set() + for base in cls.__mro__[:-1]: # without object + if base.__name__ in ('Protocol', 'Generic'): + continue + annotations = getattr(base, '__annotations__', {}) + for attr in list(base.__dict__.keys()) + list(annotations.keys()): + if (not attr.startswith('_abc_') and attr not in ( + '__abstractmethods__', '__annotations__', '__weakref__', + '_is_protocol', '_is_runtime_protocol', '__dict__', + '__args__', '__slots__', + '__next_in_mro__', '__parameters__', '__origin__', + '__orig_bases__', '__extra__', '__tree_hash__', + '__doc__', '__subclasshook__', '__init__', '__new__', + '__module__', '_MutableMapping__marker', '_gorg')): + attrs.add(attr) + return attrs + + +def _is_callable_members_only(cls): + return all(callable(getattr(cls, attr, None)) for attr in _get_protocol_attrs(cls)) + + +# 3.8+ +if hasattr(typing, 'Protocol'): + Protocol = typing.Protocol +# 3.7 +elif PEP_560: + from typing import _collect_type_vars # noqa + + def _no_init(self, *args, **kwargs): + if type(self)._is_protocol: + raise TypeError('Protocols cannot be instantiated') + + class _ProtocolMeta(abc.ABCMeta): + # This metaclass is a bit unfortunate and exists only because of the lack + # of __instancehook__. + def __instancecheck__(cls, instance): + # We need this method for situations where attributes are + # assigned in __init__. + if ((not getattr(cls, '_is_protocol', False) or + _is_callable_members_only(cls)) and + issubclass(instance.__class__, cls)): + return True + if cls._is_protocol: + if all(hasattr(instance, attr) and + (not callable(getattr(cls, attr, None)) or + getattr(instance, attr) is not None) + for attr in _get_protocol_attrs(cls)): + return True + return super().__instancecheck__(instance) + + class Protocol(metaclass=_ProtocolMeta): + # There is quite a lot of overlapping code with typing.Generic. + # Unfortunately it is hard to avoid this while these live in two different + # modules. The duplicated code will be removed when Protocol is moved to typing. + """Base class for protocol classes. Protocol classes are defined as:: + + class Proto(Protocol): + def meth(self) -> int: + ... + + Such classes are primarily used with static type checkers that recognize + structural subtyping (static duck-typing), for example:: + + class C: + def meth(self) -> int: + return 0 + + def func(x: Proto) -> int: + return x.meth() + + func(C()) # Passes static type check + + See PEP 544 for details. Protocol classes decorated with + @typing_extensions.runtime act as simple-minded runtime protocol that checks + only the presence of given attributes, ignoring their type signatures. + + Protocol classes can be generic, they are defined as:: + + class GenProto(Protocol[T]): + def meth(self) -> T: + ... + """ + __slots__ = () + _is_protocol = True + + def __new__(cls, *args, **kwds): + if cls is Protocol: + raise TypeError("Type Protocol cannot be instantiated; " + "it can only be used as a base class") + return super().__new__(cls) + + @typing._tp_cache + def __class_getitem__(cls, params): + if not isinstance(params, tuple): + params = (params,) + if not params and cls is not typing.Tuple: + raise TypeError( + f"Parameter list to {cls.__qualname__}[...] cannot be empty") + msg = "Parameters to generic types must be types." + params = tuple(typing._type_check(p, msg) for p in params) # noqa + if cls is Protocol: + # Generic can only be subscripted with unique type variables. + if not all(isinstance(p, typing.TypeVar) for p in params): + i = 0 + while isinstance(params[i], typing.TypeVar): + i += 1 + raise TypeError( + "Parameters to Protocol[...] must all be type variables." + f" Parameter {i + 1} is {params[i]}") + if len(set(params)) != len(params): + raise TypeError( + "Parameters to Protocol[...] must all be unique") + else: + # Subscripting a regular Generic subclass. + _check_generic(cls, params) + return typing._GenericAlias(cls, params) + + def __init_subclass__(cls, *args, **kwargs): + tvars = [] + if '__orig_bases__' in cls.__dict__: + error = typing.Generic in cls.__orig_bases__ + else: + error = typing.Generic in cls.__bases__ + if error: + raise TypeError("Cannot inherit from plain Generic") + if '__orig_bases__' in cls.__dict__: + tvars = _collect_type_vars(cls.__orig_bases__) + # Look for Generic[T1, ..., Tn] or Protocol[T1, ..., Tn]. + # If found, tvars must be a subset of it. + # If not found, tvars is it. + # Also check for and reject plain Generic, + # and reject multiple Generic[...] and/or Protocol[...]. + gvars = None + for base in cls.__orig_bases__: + if (isinstance(base, typing._GenericAlias) and + base.__origin__ in (typing.Generic, Protocol)): + # for error messages + the_base = base.__origin__.__name__ + if gvars is not None: + raise TypeError( + "Cannot inherit from Generic[...]" + " and/or Protocol[...] multiple types.") + gvars = base.__parameters__ + if gvars is None: + gvars = tvars + else: + tvarset = set(tvars) + gvarset = set(gvars) + if not tvarset <= gvarset: + s_vars = ', '.join(str(t) for t in tvars if t not in gvarset) + s_args = ', '.join(str(g) for g in gvars) + raise TypeError(f"Some type variables ({s_vars}) are" + f" not listed in {the_base}[{s_args}]") + tvars = gvars + cls.__parameters__ = tuple(tvars) + + # Determine if this is a protocol or a concrete subclass. + if not cls.__dict__.get('_is_protocol', None): + cls._is_protocol = any(b is Protocol for b in cls.__bases__) + + # Set (or override) the protocol subclass hook. + def _proto_hook(other): + if not cls.__dict__.get('_is_protocol', None): + return NotImplemented + if not getattr(cls, '_is_runtime_protocol', False): + if sys._getframe(2).f_globals['__name__'] in ['abc', 'functools']: + return NotImplemented + raise TypeError("Instance and class checks can only be used with" + " @runtime protocols") + if not _is_callable_members_only(cls): + if sys._getframe(2).f_globals['__name__'] in ['abc', 'functools']: + return NotImplemented + raise TypeError("Protocols with non-method members" + " don't support issubclass()") + if not isinstance(other, type): + # Same error as for issubclass(1, int) + raise TypeError('issubclass() arg 1 must be a class') + for attr in _get_protocol_attrs(cls): + for base in other.__mro__: + if attr in base.__dict__: + if base.__dict__[attr] is None: + return NotImplemented + break + annotations = getattr(base, '__annotations__', {}) + if (isinstance(annotations, typing.Mapping) and + attr in annotations and + isinstance(other, _ProtocolMeta) and + other._is_protocol): + break + else: + return NotImplemented + return True + if '__subclasshook__' not in cls.__dict__: + cls.__subclasshook__ = _proto_hook + + # We have nothing more to do for non-protocols. + if not cls._is_protocol: + return + + # Check consistency of bases. + for base in cls.__bases__: + if not (base in (object, typing.Generic) or + base.__module__ == 'collections.abc' and + base.__name__ in _PROTO_WHITELIST or + isinstance(base, _ProtocolMeta) and base._is_protocol): + raise TypeError('Protocols can only inherit from other' + f' protocols, got {repr(base)}') + cls.__init__ = _no_init +# 3.6 +else: + from typing import _next_in_mro, _type_check # noqa + + def _no_init(self, *args, **kwargs): + if type(self)._is_protocol: + raise TypeError('Protocols cannot be instantiated') + + class _ProtocolMeta(GenericMeta): + """Internal metaclass for Protocol. + + This exists so Protocol classes can be generic without deriving + from Generic. + """ + def __new__(cls, name, bases, namespace, + tvars=None, args=None, origin=None, extra=None, orig_bases=None): + # This is just a version copied from GenericMeta.__new__ that + # includes "Protocol" special treatment. (Comments removed for brevity.) + assert extra is None # Protocols should not have extra + if tvars is not None: + assert origin is not None + assert all(isinstance(t, typing.TypeVar) for t in tvars), tvars + else: + tvars = _type_vars(bases) + gvars = None + for base in bases: + if base is typing.Generic: + raise TypeError("Cannot inherit from plain Generic") + if (isinstance(base, GenericMeta) and + base.__origin__ in (typing.Generic, Protocol)): + if gvars is not None: + raise TypeError( + "Cannot inherit from Generic[...] or" + " Protocol[...] multiple times.") + gvars = base.__parameters__ + if gvars is None: + gvars = tvars + else: + tvarset = set(tvars) + gvarset = set(gvars) + if not tvarset <= gvarset: + s_vars = ", ".join(str(t) for t in tvars if t not in gvarset) + s_args = ", ".join(str(g) for g in gvars) + cls_name = "Generic" if any(b.__origin__ is typing.Generic + for b in bases) else "Protocol" + raise TypeError(f"Some type variables ({s_vars}) are" + f" not listed in {cls_name}[{s_args}]") + tvars = gvars + + initial_bases = bases + if (extra is not None and type(extra) is abc.ABCMeta and + extra not in bases): + bases = (extra,) + bases + bases = tuple(_gorg(b) if isinstance(b, GenericMeta) else b + for b in bases) + if any(isinstance(b, GenericMeta) and b is not typing.Generic for b in bases): + bases = tuple(b for b in bases if b is not typing.Generic) + namespace.update({'__origin__': origin, '__extra__': extra}) + self = super(GenericMeta, cls).__new__(cls, name, bases, namespace, + _root=True) + super(GenericMeta, self).__setattr__('_gorg', + self if not origin else + _gorg(origin)) + self.__parameters__ = tvars + self.__args__ = tuple(... if a is typing._TypingEllipsis else + () if a is typing._TypingEmpty else + a for a in args) if args else None + self.__next_in_mro__ = _next_in_mro(self) + if orig_bases is None: + self.__orig_bases__ = initial_bases + elif origin is not None: + self._abc_registry = origin._abc_registry + self._abc_cache = origin._abc_cache + if hasattr(self, '_subs_tree'): + self.__tree_hash__ = (hash(self._subs_tree()) if origin else + super(GenericMeta, self).__hash__()) + return self + + def __init__(cls, *args, **kwargs): + super().__init__(*args, **kwargs) + if not cls.__dict__.get('_is_protocol', None): + cls._is_protocol = any(b is Protocol or + isinstance(b, _ProtocolMeta) and + b.__origin__ is Protocol + for b in cls.__bases__) + if cls._is_protocol: + for base in cls.__mro__[1:]: + if not (base in (object, typing.Generic) or + base.__module__ == 'collections.abc' and + base.__name__ in _PROTO_WHITELIST or + isinstance(base, typing.TypingMeta) and base._is_protocol or + isinstance(base, GenericMeta) and + base.__origin__ is typing.Generic): + raise TypeError(f'Protocols can only inherit from other' + f' protocols, got {repr(base)}') + + cls.__init__ = _no_init + + def _proto_hook(other): + if not cls.__dict__.get('_is_protocol', None): + return NotImplemented + if not isinstance(other, type): + # Same error as for issubclass(1, int) + raise TypeError('issubclass() arg 1 must be a class') + for attr in _get_protocol_attrs(cls): + for base in other.__mro__: + if attr in base.__dict__: + if base.__dict__[attr] is None: + return NotImplemented + break + annotations = getattr(base, '__annotations__', {}) + if (isinstance(annotations, typing.Mapping) and + attr in annotations and + isinstance(other, _ProtocolMeta) and + other._is_protocol): + break + else: + return NotImplemented + return True + if '__subclasshook__' not in cls.__dict__: + cls.__subclasshook__ = _proto_hook + + def __instancecheck__(self, instance): + # We need this method for situations where attributes are + # assigned in __init__. + if ((not getattr(self, '_is_protocol', False) or + _is_callable_members_only(self)) and + issubclass(instance.__class__, self)): + return True + if self._is_protocol: + if all(hasattr(instance, attr) and + (not callable(getattr(self, attr, None)) or + getattr(instance, attr) is not None) + for attr in _get_protocol_attrs(self)): + return True + return super(GenericMeta, self).__instancecheck__(instance) + + def __subclasscheck__(self, cls): + if self.__origin__ is not None: + if sys._getframe(1).f_globals['__name__'] not in ['abc', 'functools']: + raise TypeError("Parameterized generics cannot be used with class " + "or instance checks") + return False + if (self.__dict__.get('_is_protocol', None) and + not self.__dict__.get('_is_runtime_protocol', None)): + if sys._getframe(1).f_globals['__name__'] in ['abc', + 'functools', + 'typing']: + return False + raise TypeError("Instance and class checks can only be used with" + " @runtime protocols") + if (self.__dict__.get('_is_runtime_protocol', None) and + not _is_callable_members_only(self)): + if sys._getframe(1).f_globals['__name__'] in ['abc', + 'functools', + 'typing']: + return super(GenericMeta, self).__subclasscheck__(cls) + raise TypeError("Protocols with non-method members" + " don't support issubclass()") + return super(GenericMeta, self).__subclasscheck__(cls) + + @typing._tp_cache + def __getitem__(self, params): + # We also need to copy this from GenericMeta.__getitem__ to get + # special treatment of "Protocol". (Comments removed for brevity.) + if not isinstance(params, tuple): + params = (params,) + if not params and _gorg(self) is not typing.Tuple: + raise TypeError( + f"Parameter list to {self.__qualname__}[...] cannot be empty") + msg = "Parameters to generic types must be types." + params = tuple(_type_check(p, msg) for p in params) + if self in (typing.Generic, Protocol): + if not all(isinstance(p, typing.TypeVar) for p in params): + raise TypeError( + f"Parameters to {repr(self)}[...] must all be type variables") + if len(set(params)) != len(params): + raise TypeError( + f"Parameters to {repr(self)}[...] must all be unique") + tvars = params + args = params + elif self in (typing.Tuple, typing.Callable): + tvars = _type_vars(params) + args = params + elif self.__origin__ in (typing.Generic, Protocol): + raise TypeError(f"Cannot subscript already-subscripted {repr(self)}") + else: + _check_generic(self, params) + tvars = _type_vars(params) + args = params + + prepend = (self,) if self.__origin__ is None else () + return self.__class__(self.__name__, + prepend + self.__bases__, + _no_slots_copy(self.__dict__), + tvars=tvars, + args=args, + origin=self, + extra=self.__extra__, + orig_bases=self.__orig_bases__) + + class Protocol(metaclass=_ProtocolMeta): + """Base class for protocol classes. Protocol classes are defined as:: + + class Proto(Protocol): + def meth(self) -> int: + ... + + Such classes are primarily used with static type checkers that recognize + structural subtyping (static duck-typing), for example:: + + class C: + def meth(self) -> int: + return 0 + + def func(x: Proto) -> int: + return x.meth() + + func(C()) # Passes static type check + + See PEP 544 for details. Protocol classes decorated with + @typing_extensions.runtime act as simple-minded runtime protocol that checks + only the presence of given attributes, ignoring their type signatures. + + Protocol classes can be generic, they are defined as:: + + class GenProto(Protocol[T]): + def meth(self) -> T: + ... + """ + __slots__ = () + _is_protocol = True + + def __new__(cls, *args, **kwds): + if _gorg(cls) is Protocol: + raise TypeError("Type Protocol cannot be instantiated; " + "it can be used only as a base class") + return typing._generic_new(cls.__next_in_mro__, cls, *args, **kwds) + + +# 3.8+ +if hasattr(typing, 'runtime_checkable'): + runtime_checkable = typing.runtime_checkable +# 3.6-3.7 +else: + def runtime_checkable(cls): + """Mark a protocol class as a runtime protocol, so that it + can be used with isinstance() and issubclass(). Raise TypeError + if applied to a non-protocol class. + + This allows a simple-minded structural check very similar to the + one-offs in collections.abc such as Hashable. + """ + if not isinstance(cls, _ProtocolMeta) or not cls._is_protocol: + raise TypeError('@runtime_checkable can be only applied to protocol classes,' + f' got {cls!r}') + cls._is_runtime_protocol = True + return cls + + +# Exists for backwards compatibility. +runtime = runtime_checkable + + +# 3.8+ +if hasattr(typing, 'SupportsIndex'): + SupportsIndex = typing.SupportsIndex +# 3.6-3.7 +else: + @runtime_checkable + class SupportsIndex(Protocol): + __slots__ = () + + @abc.abstractmethod + def __index__(self) -> int: + pass + + +if sys.version_info >= (3, 9, 2): + # The standard library TypedDict in Python 3.8 does not store runtime information + # about which (if any) keys are optional. See https://bugs.python.org/issue38834 + # The standard library TypedDict in Python 3.9.0/1 does not honour the "total" + # keyword with old-style TypedDict(). See https://bugs.python.org/issue42059 + TypedDict = typing.TypedDict +else: + def _check_fails(cls, other): + try: + if sys._getframe(1).f_globals['__name__'] not in ['abc', + 'functools', + 'typing']: + # Typed dicts are only for static structural subtyping. + raise TypeError('TypedDict does not support instance and class checks') + except (AttributeError, ValueError): + pass + return False + + def _dict_new(*args, **kwargs): + if not args: + raise TypeError('TypedDict.__new__(): not enough arguments') + _, args = args[0], args[1:] # allow the "cls" keyword be passed + return dict(*args, **kwargs) + + _dict_new.__text_signature__ = '($cls, _typename, _fields=None, /, **kwargs)' + + def _typeddict_new(*args, total=True, **kwargs): + if not args: + raise TypeError('TypedDict.__new__(): not enough arguments') + _, args = args[0], args[1:] # allow the "cls" keyword be passed + if args: + typename, args = args[0], args[1:] # allow the "_typename" keyword be passed + elif '_typename' in kwargs: + typename = kwargs.pop('_typename') + import warnings + warnings.warn("Passing '_typename' as keyword argument is deprecated", + DeprecationWarning, stacklevel=2) + else: + raise TypeError("TypedDict.__new__() missing 1 required positional " + "argument: '_typename'") + if args: + try: + fields, = args # allow the "_fields" keyword be passed + except ValueError: + raise TypeError('TypedDict.__new__() takes from 2 to 3 ' + f'positional arguments but {len(args) + 2} ' + 'were given') + elif '_fields' in kwargs and len(kwargs) == 1: + fields = kwargs.pop('_fields') + import warnings + warnings.warn("Passing '_fields' as keyword argument is deprecated", + DeprecationWarning, stacklevel=2) + else: + fields = None + + if fields is None: + fields = kwargs + elif kwargs: + raise TypeError("TypedDict takes either a dict or keyword arguments," + " but not both") + + ns = {'__annotations__': dict(fields)} + try: + # Setting correct module is necessary to make typed dict classes pickleable. + ns['__module__'] = sys._getframe(1).f_globals.get('__name__', '__main__') + except (AttributeError, ValueError): + pass + + return _TypedDictMeta(typename, (), ns, total=total) + + _typeddict_new.__text_signature__ = ('($cls, _typename, _fields=None,' + ' /, *, total=True, **kwargs)') + + class _TypedDictMeta(type): + def __init__(cls, name, bases, ns, total=True): + super().__init__(name, bases, ns) + + def __new__(cls, name, bases, ns, total=True): + # Create new typed dict class object. + # This method is called directly when TypedDict is subclassed, + # or via _typeddict_new when TypedDict is instantiated. This way + # TypedDict supports all three syntaxes described in its docstring. + # Subclasses and instances of TypedDict return actual dictionaries + # via _dict_new. + ns['__new__'] = _typeddict_new if name == 'TypedDict' else _dict_new + tp_dict = super().__new__(cls, name, (dict,), ns) + + annotations = {} + own_annotations = ns.get('__annotations__', {}) + own_annotation_keys = set(own_annotations.keys()) + msg = "TypedDict('Name', {f0: t0, f1: t1, ...}); each t must be a type" + own_annotations = { + n: typing._type_check(tp, msg) for n, tp in own_annotations.items() + } + required_keys = set() + optional_keys = set() + + for base in bases: + annotations.update(base.__dict__.get('__annotations__', {})) + required_keys.update(base.__dict__.get('__required_keys__', ())) + optional_keys.update(base.__dict__.get('__optional_keys__', ())) + + annotations.update(own_annotations) + if total: + required_keys.update(own_annotation_keys) + else: + optional_keys.update(own_annotation_keys) + + tp_dict.__annotations__ = annotations + tp_dict.__required_keys__ = frozenset(required_keys) + tp_dict.__optional_keys__ = frozenset(optional_keys) + if not hasattr(tp_dict, '__total__'): + tp_dict.__total__ = total + return tp_dict + + __instancecheck__ = __subclasscheck__ = _check_fails + + TypedDict = _TypedDictMeta('TypedDict', (dict,), {}) + TypedDict.__module__ = __name__ + TypedDict.__doc__ = \ + """A simple typed name space. At runtime it is equivalent to a plain dict. + + TypedDict creates a dictionary type that expects all of its + instances to have a certain set of keys, with each key + associated with a value of a consistent type. This expectation + is not checked at runtime but is only enforced by type checkers. + Usage:: + + class Point2D(TypedDict): + x: int + y: int + label: str + + a: Point2D = {'x': 1, 'y': 2, 'label': 'good'} # OK + b: Point2D = {'z': 3, 'label': 'bad'} # Fails type check + + assert Point2D(x=1, y=2, label='first') == dict(x=1, y=2, label='first') + + The type info can be accessed via the Point2D.__annotations__ dict, and + the Point2D.__required_keys__ and Point2D.__optional_keys__ frozensets. + TypedDict supports two additional equivalent forms:: + + Point2D = TypedDict('Point2D', x=int, y=int, label=str) + Point2D = TypedDict('Point2D', {'x': int, 'y': int, 'label': str}) + + The class syntax is only supported in Python 3.6+, while two other + syntax forms work for Python 2.7 and 3.2+ + """ + + +# Python 3.9+ has PEP 593 (Annotated and modified get_type_hints) +if hasattr(typing, 'Annotated'): + Annotated = typing.Annotated + get_type_hints = typing.get_type_hints + # Not exported and not a public API, but needed for get_origin() and get_args() + # to work. + _AnnotatedAlias = typing._AnnotatedAlias +# 3.7-3.8 +elif PEP_560: + class _AnnotatedAlias(typing._GenericAlias, _root=True): + """Runtime representation of an annotated type. + + At its core 'Annotated[t, dec1, dec2, ...]' is an alias for the type 't' + with extra annotations. The alias behaves like a normal typing alias, + instantiating is the same as instantiating the underlying type, binding + it to types is also the same. + """ + def __init__(self, origin, metadata): + if isinstance(origin, _AnnotatedAlias): + metadata = origin.__metadata__ + metadata + origin = origin.__origin__ + super().__init__(origin, origin) + self.__metadata__ = metadata + + def copy_with(self, params): + assert len(params) == 1 + new_type = params[0] + return _AnnotatedAlias(new_type, self.__metadata__) + + def __repr__(self): + return (f"typing_extensions.Annotated[{typing._type_repr(self.__origin__)}, " + f"{', '.join(repr(a) for a in self.__metadata__)}]") + + def __reduce__(self): + return operator.getitem, ( + Annotated, (self.__origin__,) + self.__metadata__ + ) + + def __eq__(self, other): + if not isinstance(other, _AnnotatedAlias): + return NotImplemented + if self.__origin__ != other.__origin__: + return False + return self.__metadata__ == other.__metadata__ + + def __hash__(self): + return hash((self.__origin__, self.__metadata__)) + + class Annotated: + """Add context specific metadata to a type. + + Example: Annotated[int, runtime_check.Unsigned] indicates to the + hypothetical runtime_check module that this type is an unsigned int. + Every other consumer of this type can ignore this metadata and treat + this type as int. + + The first argument to Annotated must be a valid type (and will be in + the __origin__ field), the remaining arguments are kept as a tuple in + the __extra__ field. + + Details: + + - It's an error to call `Annotated` with less than two arguments. + - Nested Annotated are flattened:: + + Annotated[Annotated[T, Ann1, Ann2], Ann3] == Annotated[T, Ann1, Ann2, Ann3] + + - Instantiating an annotated type is equivalent to instantiating the + underlying type:: + + Annotated[C, Ann1](5) == C(5) + + - Annotated can be used as a generic type alias:: + + Optimized = Annotated[T, runtime.Optimize()] + Optimized[int] == Annotated[int, runtime.Optimize()] + + OptimizedList = Annotated[List[T], runtime.Optimize()] + OptimizedList[int] == Annotated[List[int], runtime.Optimize()] + """ + + __slots__ = () + + def __new__(cls, *args, **kwargs): + raise TypeError("Type Annotated cannot be instantiated.") + + @typing._tp_cache + def __class_getitem__(cls, params): + if not isinstance(params, tuple) or len(params) < 2: + raise TypeError("Annotated[...] should be used " + "with at least two arguments (a type and an " + "annotation).") + msg = "Annotated[t, ...]: t must be a type." + origin = typing._type_check(params[0], msg) + metadata = tuple(params[1:]) + return _AnnotatedAlias(origin, metadata) + + def __init_subclass__(cls, *args, **kwargs): + raise TypeError( + f"Cannot subclass {cls.__module__}.Annotated" + ) + + def _strip_annotations(t): + """Strips the annotations from a given type. + """ + if isinstance(t, _AnnotatedAlias): + return _strip_annotations(t.__origin__) + if isinstance(t, typing._GenericAlias): + stripped_args = tuple(_strip_annotations(a) for a in t.__args__) + if stripped_args == t.__args__: + return t + res = t.copy_with(stripped_args) + res._special = t._special + return res + return t + + def get_type_hints(obj, globalns=None, localns=None, include_extras=False): + """Return type hints for an object. + + This is often the same as obj.__annotations__, but it handles + forward references encoded as string literals, adds Optional[t] if a + default value equal to None is set and recursively replaces all + 'Annotated[T, ...]' with 'T' (unless 'include_extras=True'). + + The argument may be a module, class, method, or function. The annotations + are returned as a dictionary. For classes, annotations include also + inherited members. + + TypeError is raised if the argument is not of a type that can contain + annotations, and an empty dictionary is returned if no annotations are + present. + + BEWARE -- the behavior of globalns and localns is counterintuitive + (unless you are familiar with how eval() and exec() work). The + search order is locals first, then globals. + + - If no dict arguments are passed, an attempt is made to use the + globals from obj (or the respective module's globals for classes), + and these are also used as the locals. If the object does not appear + to have globals, an empty dictionary is used. + + - If one dict argument is passed, it is used for both globals and + locals. + + - If two dict arguments are passed, they specify globals and + locals, respectively. + """ + hint = typing.get_type_hints(obj, globalns=globalns, localns=localns) + if include_extras: + return hint + return {k: _strip_annotations(t) for k, t in hint.items()} +# 3.6 +else: + + def _is_dunder(name): + """Returns True if name is a __dunder_variable_name__.""" + return len(name) > 4 and name.startswith('__') and name.endswith('__') + + # Prior to Python 3.7 types did not have `copy_with`. A lot of the equality + # checks, argument expansion etc. are done on the _subs_tre. As a result we + # can't provide a get_type_hints function that strips out annotations. + + class AnnotatedMeta(typing.GenericMeta): + """Metaclass for Annotated""" + + def __new__(cls, name, bases, namespace, **kwargs): + if any(b is not object for b in bases): + raise TypeError("Cannot subclass " + str(Annotated)) + return super().__new__(cls, name, bases, namespace, **kwargs) + + @property + def __metadata__(self): + return self._subs_tree()[2] + + def _tree_repr(self, tree): + cls, origin, metadata = tree + if not isinstance(origin, tuple): + tp_repr = typing._type_repr(origin) + else: + tp_repr = origin[0]._tree_repr(origin) + metadata_reprs = ", ".join(repr(arg) for arg in metadata) + return f'{cls}[{tp_repr}, {metadata_reprs}]' + + def _subs_tree(self, tvars=None, args=None): # noqa + if self is Annotated: + return Annotated + res = super()._subs_tree(tvars=tvars, args=args) + # Flatten nested Annotated + if isinstance(res[1], tuple) and res[1][0] is Annotated: + sub_tp = res[1][1] + sub_annot = res[1][2] + return (Annotated, sub_tp, sub_annot + res[2]) + return res + + def _get_cons(self): + """Return the class used to create instance of this type.""" + if self.__origin__ is None: + raise TypeError("Cannot get the underlying type of a " + "non-specialized Annotated type.") + tree = self._subs_tree() + while isinstance(tree, tuple) and tree[0] is Annotated: + tree = tree[1] + if isinstance(tree, tuple): + return tree[0] + else: + return tree + + @typing._tp_cache + def __getitem__(self, params): + if not isinstance(params, tuple): + params = (params,) + if self.__origin__ is not None: # specializing an instantiated type + return super().__getitem__(params) + elif not isinstance(params, tuple) or len(params) < 2: + raise TypeError("Annotated[...] should be instantiated " + "with at least two arguments (a type and an " + "annotation).") + else: + msg = "Annotated[t, ...]: t must be a type." + tp = typing._type_check(params[0], msg) + metadata = tuple(params[1:]) + return self.__class__( + self.__name__, + self.__bases__, + _no_slots_copy(self.__dict__), + tvars=_type_vars((tp,)), + # Metadata is a tuple so it won't be touched by _replace_args et al. + args=(tp, metadata), + origin=self, + ) + + def __call__(self, *args, **kwargs): + cons = self._get_cons() + result = cons(*args, **kwargs) + try: + result.__orig_class__ = self + except AttributeError: + pass + return result + + def __getattr__(self, attr): + # For simplicity we just don't relay all dunder names + if self.__origin__ is not None and not _is_dunder(attr): + return getattr(self._get_cons(), attr) + raise AttributeError(attr) + + def __setattr__(self, attr, value): + if _is_dunder(attr) or attr.startswith('_abc_'): + super().__setattr__(attr, value) + elif self.__origin__ is None: + raise AttributeError(attr) + else: + setattr(self._get_cons(), attr, value) + + def __instancecheck__(self, obj): + raise TypeError("Annotated cannot be used with isinstance().") + + def __subclasscheck__(self, cls): + raise TypeError("Annotated cannot be used with issubclass().") + + class Annotated(metaclass=AnnotatedMeta): + """Add context specific metadata to a type. + + Example: Annotated[int, runtime_check.Unsigned] indicates to the + hypothetical runtime_check module that this type is an unsigned int. + Every other consumer of this type can ignore this metadata and treat + this type as int. + + The first argument to Annotated must be a valid type, the remaining + arguments are kept as a tuple in the __metadata__ field. + + Details: + + - It's an error to call `Annotated` with less than two arguments. + - Nested Annotated are flattened:: + + Annotated[Annotated[T, Ann1, Ann2], Ann3] == Annotated[T, Ann1, Ann2, Ann3] + + - Instantiating an annotated type is equivalent to instantiating the + underlying type:: + + Annotated[C, Ann1](5) == C(5) + + - Annotated can be used as a generic type alias:: + + Optimized = Annotated[T, runtime.Optimize()] + Optimized[int] == Annotated[int, runtime.Optimize()] + + OptimizedList = Annotated[List[T], runtime.Optimize()] + OptimizedList[int] == Annotated[List[int], runtime.Optimize()] + """ + +# Python 3.8 has get_origin() and get_args() but those implementations aren't +# Annotated-aware, so we can't use those. Python 3.9's versions don't support +# ParamSpecArgs and ParamSpecKwargs, so only Python 3.10's versions will do. +if sys.version_info[:2] >= (3, 10): + get_origin = typing.get_origin + get_args = typing.get_args +# 3.7-3.9 +elif PEP_560: + try: + # 3.9+ + from typing import _BaseGenericAlias + except ImportError: + _BaseGenericAlias = typing._GenericAlias + try: + # 3.9+ + from typing import GenericAlias + except ImportError: + GenericAlias = typing._GenericAlias + + def get_origin(tp): + """Get the unsubscripted version of a type. + + This supports generic types, Callable, Tuple, Union, Literal, Final, ClassVar + and Annotated. Return None for unsupported types. Examples:: + + get_origin(Literal[42]) is Literal + get_origin(int) is None + get_origin(ClassVar[int]) is ClassVar + get_origin(Generic) is Generic + get_origin(Generic[T]) is Generic + get_origin(Union[T, int]) is Union + get_origin(List[Tuple[T, T]][int]) == list + get_origin(P.args) is P + """ + if isinstance(tp, _AnnotatedAlias): + return Annotated + if isinstance(tp, (typing._GenericAlias, GenericAlias, _BaseGenericAlias, + ParamSpecArgs, ParamSpecKwargs)): + return tp.__origin__ + if tp is typing.Generic: + return typing.Generic + return None + + def get_args(tp): + """Get type arguments with all substitutions performed. + + For unions, basic simplifications used by Union constructor are performed. + Examples:: + get_args(Dict[str, int]) == (str, int) + get_args(int) == () + get_args(Union[int, Union[T, int], str][int]) == (int, str) + get_args(Union[int, Tuple[T, int]][str]) == (int, Tuple[str, int]) + get_args(Callable[[], T][int]) == ([], int) + """ + if isinstance(tp, _AnnotatedAlias): + return (tp.__origin__,) + tp.__metadata__ + if isinstance(tp, (typing._GenericAlias, GenericAlias)): + if getattr(tp, "_special", False): + return () + res = tp.__args__ + if get_origin(tp) is collections.abc.Callable and res[0] is not Ellipsis: + res = (list(res[:-1]), res[-1]) + return res + return () + + +# 3.10+ +if hasattr(typing, 'TypeAlias'): + TypeAlias = typing.TypeAlias +# 3.9 +elif sys.version_info[:2] >= (3, 9): + class _TypeAliasForm(typing._SpecialForm, _root=True): + def __repr__(self): + return 'typing_extensions.' + self._name + + @_TypeAliasForm + def TypeAlias(self, parameters): + """Special marker indicating that an assignment should + be recognized as a proper type alias definition by type + checkers. + + For example:: + + Predicate: TypeAlias = Callable[..., bool] + + It's invalid when used anywhere except as in the example above. + """ + raise TypeError(f"{self} is not subscriptable") +# 3.7-3.8 +elif sys.version_info[:2] >= (3, 7): + class _TypeAliasForm(typing._SpecialForm, _root=True): + def __repr__(self): + return 'typing_extensions.' + self._name + + TypeAlias = _TypeAliasForm('TypeAlias', + doc="""Special marker indicating that an assignment should + be recognized as a proper type alias definition by type + checkers. + + For example:: + + Predicate: TypeAlias = Callable[..., bool] + + It's invalid when used anywhere except as in the example + above.""") +# 3.6 +else: + class _TypeAliasMeta(typing.TypingMeta): + """Metaclass for TypeAlias""" + + def __repr__(self): + return 'typing_extensions.TypeAlias' + + class _TypeAliasBase(typing._FinalTypingBase, metaclass=_TypeAliasMeta, _root=True): + """Special marker indicating that an assignment should + be recognized as a proper type alias definition by type + checkers. + + For example:: + + Predicate: TypeAlias = Callable[..., bool] + + It's invalid when used anywhere except as in the example above. + """ + __slots__ = () + + def __instancecheck__(self, obj): + raise TypeError("TypeAlias cannot be used with isinstance().") + + def __subclasscheck__(self, cls): + raise TypeError("TypeAlias cannot be used with issubclass().") + + def __repr__(self): + return 'typing_extensions.TypeAlias' + + TypeAlias = _TypeAliasBase(_root=True) + + +# Python 3.10+ has PEP 612 +if hasattr(typing, 'ParamSpecArgs'): + ParamSpecArgs = typing.ParamSpecArgs + ParamSpecKwargs = typing.ParamSpecKwargs +# 3.6-3.9 +else: + class _Immutable: + """Mixin to indicate that object should not be copied.""" + __slots__ = () + + def __copy__(self): + return self + + def __deepcopy__(self, memo): + return self + + class ParamSpecArgs(_Immutable): + """The args for a ParamSpec object. + + Given a ParamSpec object P, P.args is an instance of ParamSpecArgs. + + ParamSpecArgs objects have a reference back to their ParamSpec: + + P.args.__origin__ is P + + This type is meant for runtime introspection and has no special meaning to + static type checkers. + """ + def __init__(self, origin): + self.__origin__ = origin + + def __repr__(self): + return f"{self.__origin__.__name__}.args" + + class ParamSpecKwargs(_Immutable): + """The kwargs for a ParamSpec object. + + Given a ParamSpec object P, P.kwargs is an instance of ParamSpecKwargs. + + ParamSpecKwargs objects have a reference back to their ParamSpec: + + P.kwargs.__origin__ is P + + This type is meant for runtime introspection and has no special meaning to + static type checkers. + """ + def __init__(self, origin): + self.__origin__ = origin + + def __repr__(self): + return f"{self.__origin__.__name__}.kwargs" + +# 3.10+ +if hasattr(typing, 'ParamSpec'): + ParamSpec = typing.ParamSpec +# 3.6-3.9 +else: + + # Inherits from list as a workaround for Callable checks in Python < 3.9.2. + class ParamSpec(list): + """Parameter specification variable. + + Usage:: + + P = ParamSpec('P') + + Parameter specification variables exist primarily for the benefit of static + type checkers. They are used to forward the parameter types of one + callable to another callable, a pattern commonly found in higher order + functions and decorators. They are only valid when used in ``Concatenate``, + or s the first argument to ``Callable``. In Python 3.10 and higher, + they are also supported in user-defined Generics at runtime. + See class Generic for more information on generic types. An + example for annotating a decorator:: + + T = TypeVar('T') + P = ParamSpec('P') + + def add_logging(f: Callable[P, T]) -> Callable[P, T]: + '''A type-safe decorator to add logging to a function.''' + def inner(*args: P.args, **kwargs: P.kwargs) -> T: + logging.info(f'{f.__name__} was called') + return f(*args, **kwargs) + return inner + + @add_logging + def add_two(x: float, y: float) -> float: + '''Add two numbers together.''' + return x + y + + Parameter specification variables defined with covariant=True or + contravariant=True can be used to declare covariant or contravariant + generic types. These keyword arguments are valid, but their actual semantics + are yet to be decided. See PEP 612 for details. + + Parameter specification variables can be introspected. e.g.: + + P.__name__ == 'T' + P.__bound__ == None + P.__covariant__ == False + P.__contravariant__ == False + + Note that only parameter specification variables defined in global scope can + be pickled. + """ + + # Trick Generic __parameters__. + __class__ = typing.TypeVar + + @property + def args(self): + return ParamSpecArgs(self) + + @property + def kwargs(self): + return ParamSpecKwargs(self) + + def __init__(self, name, *, bound=None, covariant=False, contravariant=False): + super().__init__([self]) + self.__name__ = name + self.__covariant__ = bool(covariant) + self.__contravariant__ = bool(contravariant) + if bound: + self.__bound__ = typing._type_check(bound, 'Bound must be a type.') + else: + self.__bound__ = None + + # for pickling: + try: + def_mod = sys._getframe(1).f_globals.get('__name__', '__main__') + except (AttributeError, ValueError): + def_mod = None + if def_mod != 'typing_extensions': + self.__module__ = def_mod + + def __repr__(self): + if self.__covariant__: + prefix = '+' + elif self.__contravariant__: + prefix = '-' + else: + prefix = '~' + return prefix + self.__name__ + + def __hash__(self): + return object.__hash__(self) + + def __eq__(self, other): + return self is other + + def __reduce__(self): + return self.__name__ + + # Hack to get typing._type_check to pass. + def __call__(self, *args, **kwargs): + pass + + if not PEP_560: + # Only needed in 3.6. + def _get_type_vars(self, tvars): + if self not in tvars: + tvars.append(self) + + +# 3.6-3.9 +if not hasattr(typing, 'Concatenate'): + # Inherits from list as a workaround for Callable checks in Python < 3.9.2. + class _ConcatenateGenericAlias(list): + + # Trick Generic into looking into this for __parameters__. + if PEP_560: + __class__ = typing._GenericAlias + else: + __class__ = typing._TypingBase + + # Flag in 3.8. + _special = False + # Attribute in 3.6 and earlier. + _gorg = typing.Generic + + def __init__(self, origin, args): + super().__init__(args) + self.__origin__ = origin + self.__args__ = args + + def __repr__(self): + _type_repr = typing._type_repr + return (f'{_type_repr(self.__origin__)}' + f'[{", ".join(_type_repr(arg) for arg in self.__args__)}]') + + def __hash__(self): + return hash((self.__origin__, self.__args__)) + + # Hack to get typing._type_check to pass in Generic. + def __call__(self, *args, **kwargs): + pass + + @property + def __parameters__(self): + return tuple( + tp for tp in self.__args__ if isinstance(tp, (typing.TypeVar, ParamSpec)) + ) + + if not PEP_560: + # Only required in 3.6. + def _get_type_vars(self, tvars): + if self.__origin__ and self.__parameters__: + typing._get_type_vars(self.__parameters__, tvars) + + +# 3.6-3.9 +@typing._tp_cache +def _concatenate_getitem(self, parameters): + if parameters == (): + raise TypeError("Cannot take a Concatenate of no types.") + if not isinstance(parameters, tuple): + parameters = (parameters,) + if not isinstance(parameters[-1], ParamSpec): + raise TypeError("The last parameter to Concatenate should be a " + "ParamSpec variable.") + msg = "Concatenate[arg, ...]: each arg must be a type." + parameters = tuple(typing._type_check(p, msg) for p in parameters) + return _ConcatenateGenericAlias(self, parameters) + + +# 3.10+ +if hasattr(typing, 'Concatenate'): + Concatenate = typing.Concatenate + _ConcatenateGenericAlias = typing._ConcatenateGenericAlias # noqa +# 3.9 +elif sys.version_info[:2] >= (3, 9): + @_TypeAliasForm + def Concatenate(self, parameters): + """Used in conjunction with ``ParamSpec`` and ``Callable`` to represent a + higher order function which adds, removes or transforms parameters of a + callable. + + For example:: + + Callable[Concatenate[int, P], int] + + See PEP 612 for detailed information. + """ + return _concatenate_getitem(self, parameters) +# 3.7-8 +elif sys.version_info[:2] >= (3, 7): + class _ConcatenateForm(typing._SpecialForm, _root=True): + def __repr__(self): + return 'typing_extensions.' + self._name + + def __getitem__(self, parameters): + return _concatenate_getitem(self, parameters) + + Concatenate = _ConcatenateForm( + 'Concatenate', + doc="""Used in conjunction with ``ParamSpec`` and ``Callable`` to represent a + higher order function which adds, removes or transforms parameters of a + callable. + + For example:: + + Callable[Concatenate[int, P], int] + + See PEP 612 for detailed information. + """) +# 3.6 +else: + class _ConcatenateAliasMeta(typing.TypingMeta): + """Metaclass for Concatenate.""" + + def __repr__(self): + return 'typing_extensions.Concatenate' + + class _ConcatenateAliasBase(typing._FinalTypingBase, + metaclass=_ConcatenateAliasMeta, + _root=True): + """Used in conjunction with ``ParamSpec`` and ``Callable`` to represent a + higher order function which adds, removes or transforms parameters of a + callable. + + For example:: + + Callable[Concatenate[int, P], int] + + See PEP 612 for detailed information. + """ + __slots__ = () + + def __instancecheck__(self, obj): + raise TypeError("Concatenate cannot be used with isinstance().") + + def __subclasscheck__(self, cls): + raise TypeError("Concatenate cannot be used with issubclass().") + + def __repr__(self): + return 'typing_extensions.Concatenate' + + def __getitem__(self, parameters): + return _concatenate_getitem(self, parameters) + + Concatenate = _ConcatenateAliasBase(_root=True) + +# 3.10+ +if hasattr(typing, 'TypeGuard'): + TypeGuard = typing.TypeGuard +# 3.9 +elif sys.version_info[:2] >= (3, 9): + class _TypeGuardForm(typing._SpecialForm, _root=True): + def __repr__(self): + return 'typing_extensions.' + self._name + + @_TypeGuardForm + def TypeGuard(self, parameters): + """Special typing form used to annotate the return type of a user-defined + type guard function. ``TypeGuard`` only accepts a single type argument. + At runtime, functions marked this way should return a boolean. + + ``TypeGuard`` aims to benefit *type narrowing* -- a technique used by static + type checkers to determine a more precise type of an expression within a + program's code flow. Usually type narrowing is done by analyzing + conditional code flow and applying the narrowing to a block of code. The + conditional expression here is sometimes referred to as a "type guard". + + Sometimes it would be convenient to use a user-defined boolean function + as a type guard. Such a function should use ``TypeGuard[...]`` as its + return type to alert static type checkers to this intention. + + Using ``-> TypeGuard`` tells the static type checker that for a given + function: + + 1. The return value is a boolean. + 2. If the return value is ``True``, the type of its argument + is the type inside ``TypeGuard``. + + For example:: + + def is_str(val: Union[str, float]): + # "isinstance" type guard + if isinstance(val, str): + # Type of ``val`` is narrowed to ``str`` + ... + else: + # Else, type of ``val`` is narrowed to ``float``. + ... + + Strict type narrowing is not enforced -- ``TypeB`` need not be a narrower + form of ``TypeA`` (it can even be a wider form) and this may lead to + type-unsafe results. The main reason is to allow for things like + narrowing ``List[object]`` to ``List[str]`` even though the latter is not + a subtype of the former, since ``List`` is invariant. The responsibility of + writing type-safe type guards is left to the user. + + ``TypeGuard`` also works with type variables. For more information, see + PEP 647 (User-Defined Type Guards). + """ + item = typing._type_check(parameters, f'{self} accepts only single type.') + return typing._GenericAlias(self, (item,)) +# 3.7-3.8 +elif sys.version_info[:2] >= (3, 7): + class _TypeGuardForm(typing._SpecialForm, _root=True): + + def __repr__(self): + return 'typing_extensions.' + self._name + + def __getitem__(self, parameters): + item = typing._type_check(parameters, + f'{self._name} accepts only a single type') + return typing._GenericAlias(self, (item,)) + + TypeGuard = _TypeGuardForm( + 'TypeGuard', + doc="""Special typing form used to annotate the return type of a user-defined + type guard function. ``TypeGuard`` only accepts a single type argument. + At runtime, functions marked this way should return a boolean. + + ``TypeGuard`` aims to benefit *type narrowing* -- a technique used by static + type checkers to determine a more precise type of an expression within a + program's code flow. Usually type narrowing is done by analyzing + conditional code flow and applying the narrowing to a block of code. The + conditional expression here is sometimes referred to as a "type guard". + + Sometimes it would be convenient to use a user-defined boolean function + as a type guard. Such a function should use ``TypeGuard[...]`` as its + return type to alert static type checkers to this intention. + + Using ``-> TypeGuard`` tells the static type checker that for a given + function: + + 1. The return value is a boolean. + 2. If the return value is ``True``, the type of its argument + is the type inside ``TypeGuard``. + + For example:: + + def is_str(val: Union[str, float]): + # "isinstance" type guard + if isinstance(val, str): + # Type of ``val`` is narrowed to ``str`` + ... + else: + # Else, type of ``val`` is narrowed to ``float``. + ... + + Strict type narrowing is not enforced -- ``TypeB`` need not be a narrower + form of ``TypeA`` (it can even be a wider form) and this may lead to + type-unsafe results. The main reason is to allow for things like + narrowing ``List[object]`` to ``List[str]`` even though the latter is not + a subtype of the former, since ``List`` is invariant. The responsibility of + writing type-safe type guards is left to the user. + + ``TypeGuard`` also works with type variables. For more information, see + PEP 647 (User-Defined Type Guards). + """) +# 3.6 +else: + class _TypeGuard(typing._FinalTypingBase, _root=True): + """Special typing form used to annotate the return type of a user-defined + type guard function. ``TypeGuard`` only accepts a single type argument. + At runtime, functions marked this way should return a boolean. + + ``TypeGuard`` aims to benefit *type narrowing* -- a technique used by static + type checkers to determine a more precise type of an expression within a + program's code flow. Usually type narrowing is done by analyzing + conditional code flow and applying the narrowing to a block of code. The + conditional expression here is sometimes referred to as a "type guard". + + Sometimes it would be convenient to use a user-defined boolean function + as a type guard. Such a function should use ``TypeGuard[...]`` as its + return type to alert static type checkers to this intention. + + Using ``-> TypeGuard`` tells the static type checker that for a given + function: + + 1. The return value is a boolean. + 2. If the return value is ``True``, the type of its argument + is the type inside ``TypeGuard``. + + For example:: + + def is_str(val: Union[str, float]): + # "isinstance" type guard + if isinstance(val, str): + # Type of ``val`` is narrowed to ``str`` + ... + else: + # Else, type of ``val`` is narrowed to ``float``. + ... + + Strict type narrowing is not enforced -- ``TypeB`` need not be a narrower + form of ``TypeA`` (it can even be a wider form) and this may lead to + type-unsafe results. The main reason is to allow for things like + narrowing ``List[object]`` to ``List[str]`` even though the latter is not + a subtype of the former, since ``List`` is invariant. The responsibility of + writing type-safe type guards is left to the user. + + ``TypeGuard`` also works with type variables. For more information, see + PEP 647 (User-Defined Type Guards). + """ + + __slots__ = ('__type__',) + + def __init__(self, tp=None, **kwds): + self.__type__ = tp + + def __getitem__(self, item): + cls = type(self) + if self.__type__ is None: + return cls(typing._type_check(item, + f'{cls.__name__[1:]} accepts only a single type.'), + _root=True) + raise TypeError(f'{cls.__name__[1:]} cannot be further subscripted') + + def _eval_type(self, globalns, localns): + new_tp = typing._eval_type(self.__type__, globalns, localns) + if new_tp == self.__type__: + return self + return type(self)(new_tp, _root=True) + + def __repr__(self): + r = super().__repr__() + if self.__type__ is not None: + r += f'[{typing._type_repr(self.__type__)}]' + return r + + def __hash__(self): + return hash((type(self).__name__, self.__type__)) + + def __eq__(self, other): + if not isinstance(other, _TypeGuard): + return NotImplemented + if self.__type__ is not None: + return self.__type__ == other.__type__ + return self is other + + TypeGuard = _TypeGuard(_root=True) + +if hasattr(typing, "Self"): + Self = typing.Self +elif sys.version_info[:2] >= (3, 7): + # Vendored from cpython typing._SpecialFrom + class _SpecialForm(typing._Final, _root=True): + __slots__ = ('_name', '__doc__', '_getitem') + + def __init__(self, getitem): + self._getitem = getitem + self._name = getitem.__name__ + self.__doc__ = getitem.__doc__ + + def __getattr__(self, item): + if item in {'__name__', '__qualname__'}: + return self._name + + raise AttributeError(item) + + def __mro_entries__(self, bases): + raise TypeError(f"Cannot subclass {self!r}") + + def __repr__(self): + return f'typing_extensions.{self._name}' + + def __reduce__(self): + return self._name + + def __call__(self, *args, **kwds): + raise TypeError(f"Cannot instantiate {self!r}") + + def __or__(self, other): + return typing.Union[self, other] + + def __ror__(self, other): + return typing.Union[other, self] + + def __instancecheck__(self, obj): + raise TypeError(f"{self} cannot be used with isinstance()") + + def __subclasscheck__(self, cls): + raise TypeError(f"{self} cannot be used with issubclass()") + + @typing._tp_cache + def __getitem__(self, parameters): + return self._getitem(self, parameters) + + @_SpecialForm + def Self(self, params): + """Used to spell the type of "self" in classes. + + Example:: + + from typing import Self + + class ReturnsSelf: + def parse(self, data: bytes) -> Self: + ... + return self + + """ + + raise TypeError(f"{self} is not subscriptable") +else: + class _Self(typing._FinalTypingBase, _root=True): + """Used to spell the type of "self" in classes. + + Example:: + + from typing import Self + + class ReturnsSelf: + def parse(self, data: bytes) -> Self: + ... + return self + + """ + + __slots__ = () + + def __instancecheck__(self, obj): + raise TypeError(f"{self} cannot be used with isinstance().") + + def __subclasscheck__(self, cls): + raise TypeError(f"{self} cannot be used with issubclass().") + + Self = _Self(_root=True) + + +if hasattr(typing, 'Required'): + Required = typing.Required + NotRequired = typing.NotRequired +elif sys.version_info[:2] >= (3, 9): + class _ExtensionsSpecialForm(typing._SpecialForm, _root=True): + def __repr__(self): + return 'typing_extensions.' + self._name + + @_ExtensionsSpecialForm + def Required(self, parameters): + """A special typing construct to mark a key of a total=False TypedDict + as required. For example: + + class Movie(TypedDict, total=False): + title: Required[str] + year: int + + m = Movie( + title='The Matrix', # typechecker error if key is omitted + year=1999, + ) + + There is no runtime checking that a required key is actually provided + when instantiating a related TypedDict. + """ + item = typing._type_check(parameters, f'{self._name} accepts only single type') + return typing._GenericAlias(self, (item,)) + + @_ExtensionsSpecialForm + def NotRequired(self, parameters): + """A special typing construct to mark a key of a TypedDict as + potentially missing. For example: + + class Movie(TypedDict): + title: str + year: NotRequired[int] + + m = Movie( + title='The Matrix', # typechecker error if key is omitted + year=1999, + ) + """ + item = typing._type_check(parameters, f'{self._name} accepts only single type') + return typing._GenericAlias(self, (item,)) + +elif sys.version_info[:2] >= (3, 7): + class _RequiredForm(typing._SpecialForm, _root=True): + def __repr__(self): + return 'typing_extensions.' + self._name + + def __getitem__(self, parameters): + item = typing._type_check(parameters, + '{} accepts only single type'.format(self._name)) + return typing._GenericAlias(self, (item,)) + + Required = _RequiredForm( + 'Required', + doc="""A special typing construct to mark a key of a total=False TypedDict + as required. For example: + + class Movie(TypedDict, total=False): + title: Required[str] + year: int + + m = Movie( + title='The Matrix', # typechecker error if key is omitted + year=1999, + ) + + There is no runtime checking that a required key is actually provided + when instantiating a related TypedDict. + """) + NotRequired = _RequiredForm( + 'NotRequired', + doc="""A special typing construct to mark a key of a TypedDict as + potentially missing. For example: + + class Movie(TypedDict): + title: str + year: NotRequired[int] + + m = Movie( + title='The Matrix', # typechecker error if key is omitted + year=1999, + ) + """) +else: + # NOTE: Modeled after _Final's implementation when _FinalTypingBase available + class _MaybeRequired(typing._FinalTypingBase, _root=True): + __slots__ = ('__type__',) + + def __init__(self, tp=None, **kwds): + self.__type__ = tp + + def __getitem__(self, item): + cls = type(self) + if self.__type__ is None: + return cls(typing._type_check(item, + '{} accepts only single type.'.format(cls.__name__[1:])), + _root=True) + raise TypeError('{} cannot be further subscripted' + .format(cls.__name__[1:])) + + def _eval_type(self, globalns, localns): + new_tp = typing._eval_type(self.__type__, globalns, localns) + if new_tp == self.__type__: + return self + return type(self)(new_tp, _root=True) + + def __repr__(self): + r = super().__repr__() + if self.__type__ is not None: + r += '[{}]'.format(typing._type_repr(self.__type__)) + return r + + def __hash__(self): + return hash((type(self).__name__, self.__type__)) + + def __eq__(self, other): + if not isinstance(other, type(self)): + return NotImplemented + if self.__type__ is not None: + return self.__type__ == other.__type__ + return self is other + + class _Required(_MaybeRequired, _root=True): + """A special typing construct to mark a key of a total=False TypedDict + as required. For example: + + class Movie(TypedDict, total=False): + title: Required[str] + year: int + + m = Movie( + title='The Matrix', # typechecker error if key is omitted + year=1999, + ) + + There is no runtime checking that a required key is actually provided + when instantiating a related TypedDict. + """ + + class _NotRequired(_MaybeRequired, _root=True): + """A special typing construct to mark a key of a TypedDict as + potentially missing. For example: + + class Movie(TypedDict): + title: str + year: NotRequired[int] + + m = Movie( + title='The Matrix', # typechecker error if key is omitted + year=1999, + ) + """ + + Required = _Required(_root=True) + NotRequired = _NotRequired(_root=True) diff --git a/pipenv/patched/notpip/_vendor/urllib3/_version.py b/pipenv/patched/notpip/_vendor/urllib3/_version.py index e8ebee957f..fa8979d73e 100644 --- a/pipenv/patched/notpip/_vendor/urllib3/_version.py +++ b/pipenv/patched/notpip/_vendor/urllib3/_version.py @@ -1,2 +1,2 @@ # This file is protected via CODEOWNERS -__version__ = "1.26.6" +__version__ = "1.26.8" diff --git a/pipenv/patched/notpip/_vendor/urllib3/connection.py b/pipenv/patched/notpip/_vendor/urllib3/connection.py index 4c996659c8..4d92ac6d2c 100644 --- a/pipenv/patched/notpip/_vendor/urllib3/connection.py +++ b/pipenv/patched/notpip/_vendor/urllib3/connection.py @@ -51,15 +51,16 @@ class BrokenPipeError(Exception): SubjectAltNameWarning, SystemTimeWarning, ) -from .packages.ssl_match_hostname import CertificateError, match_hostname from .util import SKIP_HEADER, SKIPPABLE_HEADERS, connection from .util.ssl_ import ( assert_fingerprint, create_urllib3_context, + is_ipaddress, resolve_cert_reqs, resolve_ssl_version, ssl_wrap_socket, ) +from .util.ssl_match_hostname import CertificateError, match_hostname log = logging.getLogger(__name__) @@ -107,6 +108,10 @@ class HTTPConnection(_HTTPConnection, object): #: Whether this connection verifies the host's certificate. is_verified = False + #: Whether this proxy connection (if used) verifies the proxy host's + #: certificate. + proxy_is_verified = None + def __init__(self, *args, **kw): if not six.PY2: kw.pop("strict", None) @@ -490,14 +495,10 @@ def _connect_tls_proxy(self, hostname, conn): self.ca_cert_dir, self.ca_cert_data, ) - # By default urllib3's SSLContext disables `check_hostname` and uses - # a custom check. For proxies we're good with relying on the default - # verification. - ssl_context.check_hostname = True # If no cert was provided, use only the default options for server # certificate validation - return ssl_wrap_socket( + socket = ssl_wrap_socket( sock=conn, ca_certs=self.ca_certs, ca_cert_dir=self.ca_cert_dir, @@ -506,8 +507,37 @@ def _connect_tls_proxy(self, hostname, conn): ssl_context=ssl_context, ) + if ssl_context.verify_mode != ssl.CERT_NONE and not getattr( + ssl_context, "check_hostname", False + ): + # While urllib3 attempts to always turn off hostname matching from + # the TLS library, this cannot always be done. So we check whether + # the TLS Library still thinks it's matching hostnames. + cert = socket.getpeercert() + if not cert.get("subjectAltName", ()): + warnings.warn( + ( + "Certificate for {0} has no `subjectAltName`, falling back to check for a " + "`commonName` for now. This feature is being removed by major browsers and " + "deprecated by RFC 2818. (See https://github.com/urllib3/urllib3/issues/497 " + "for details.)".format(hostname) + ), + SubjectAltNameWarning, + ) + _match_hostname(cert, hostname) + + self.proxy_is_verified = ssl_context.verify_mode == ssl.CERT_REQUIRED + return socket + def _match_hostname(cert, asserted_hostname): + # Our upstream implementation of ssl.match_hostname() + # only applies this normalization to IP addresses so it doesn't + # match DNS SANs so we do the same thing! + stripped_hostname = asserted_hostname.strip("u[]") + if is_ipaddress(stripped_hostname): + asserted_hostname = stripped_hostname + try: match_hostname(cert, asserted_hostname) except CertificateError as e: diff --git a/pipenv/patched/notpip/_vendor/urllib3/connectionpool.py b/pipenv/patched/notpip/_vendor/urllib3/connectionpool.py index 459bbe095b..15bffcb23a 100644 --- a/pipenv/patched/notpip/_vendor/urllib3/connectionpool.py +++ b/pipenv/patched/notpip/_vendor/urllib3/connectionpool.py @@ -2,6 +2,7 @@ import errno import logging +import re import socket import sys import warnings @@ -35,7 +36,6 @@ ) from .packages import six from .packages.six.moves import queue -from .packages.ssl_match_hostname import CertificateError from .request import RequestMethods from .response import HTTPResponse from .util.connection import is_connection_dropped @@ -44,6 +44,7 @@ from .util.request import set_file_position from .util.response import assert_header_parsing from .util.retry import Retry +from .util.ssl_match_hostname import CertificateError from .util.timeout import Timeout from .util.url import Url, _encode_target from .util.url import _normalize_host as normalize_host @@ -301,8 +302,11 @@ def _put_conn(self, conn): pass except queue.Full: # This should never happen if self.block == True - log.warning("Connection pool is full, discarding connection: %s", self.host) - + log.warning( + "Connection pool is full, discarding connection: %s. Connection pool size: %s", + self.host, + self.pool.qsize(), + ) # Connection never got put back into the pool, close it. if conn: conn.close() @@ -745,7 +749,33 @@ def urlopen( # Discard the connection for these exceptions. It will be # replaced during the next _get_conn() call. clean_exit = False - if isinstance(e, (BaseSSLError, CertificateError)): + + def _is_ssl_error_message_from_http_proxy(ssl_error): + # We're trying to detect the message 'WRONG_VERSION_NUMBER' but + # SSLErrors are kinda all over the place when it comes to the message, + # so we try to cover our bases here! + message = " ".join(re.split("[^a-z]", str(ssl_error).lower())) + return ( + "wrong version number" in message or "unknown protocol" in message + ) + + # Try to detect a common user error with proxies which is to + # set an HTTP proxy to be HTTPS when it should be 'http://' + # (ie {'http': 'http://proxy', 'https': 'https://proxy'}) + # Instead we add a nice error message and point to a URL. + if ( + isinstance(e, BaseSSLError) + and self.proxy + and _is_ssl_error_message_from_http_proxy(e) + ): + e = ProxyError( + "Your proxy appears to only use HTTP and not HTTPS, " + "try changing your proxy URL to be HTTP. See: " + "https://urllib3.readthedocs.io/en/1.26.x/advanced-usage.html" + "#https-proxy-error-http-proxy", + SSLError(e), + ) + elif isinstance(e, (BaseSSLError, CertificateError)): e = SSLError(e) elif isinstance(e, (SocketError, NewConnectionError)) and self.proxy: e = ProxyError("Cannot connect to proxy.", e) @@ -1020,6 +1050,17 @@ def _validate_conn(self, conn): InsecureRequestWarning, ) + if getattr(conn, "proxy_is_verified", None) is False: + warnings.warn( + ( + "Unverified HTTPS connection done to an HTTPS proxy. " + "Adding certificate verification is strongly advised. See: " + "https://urllib3.readthedocs.io/en/1.26.x/advanced-usage.html" + "#ssl-warnings" + ), + InsecureRequestWarning, + ) + def connection_from_url(url, **kw): """ diff --git a/pipenv/patched/notpip/_vendor/urllib3/contrib/_securetransport/bindings.py b/pipenv/patched/notpip/_vendor/urllib3/contrib/_securetransport/bindings.py index dd1fc0a2f7..264d564dbd 100644 --- a/pipenv/patched/notpip/_vendor/urllib3/contrib/_securetransport/bindings.py +++ b/pipenv/patched/notpip/_vendor/urllib3/contrib/_securetransport/bindings.py @@ -48,7 +48,7 @@ ) from ctypes.util import find_library -from pipenv.patched.notpip._vendor.urllib3.packages.six import raise_from +from ...packages.six import raise_from if platform.system() != "Darwin": raise ImportError("Only macOS is supported") diff --git a/pipenv/patched/notpip/_vendor/urllib3/contrib/_securetransport/low_level.py b/pipenv/patched/notpip/_vendor/urllib3/contrib/_securetransport/low_level.py index ed8120190c..fa0b245d27 100644 --- a/pipenv/patched/notpip/_vendor/urllib3/contrib/_securetransport/low_level.py +++ b/pipenv/patched/notpip/_vendor/urllib3/contrib/_securetransport/low_level.py @@ -188,6 +188,7 @@ def _cert_array_from_pem(pem_bundle): # We only want to do that if an error occurs: otherwise, the caller # should free. CoreFoundation.CFRelease(cert_array) + raise return cert_array diff --git a/pipenv/patched/notpip/_vendor/urllib3/contrib/pyopenssl.py b/pipenv/patched/notpip/_vendor/urllib3/contrib/pyopenssl.py index bebf30d6e8..57ec7255af 100644 --- a/pipenv/patched/notpip/_vendor/urllib3/contrib/pyopenssl.py +++ b/pipenv/patched/notpip/_vendor/urllib3/contrib/pyopenssl.py @@ -28,8 +28,8 @@ .. code-block:: python try: - import urllib3.contrib.pyopenssl - urllib3.contrib.pyopenssl.inject_into_urllib3() + import pipenv.patched.notpip._vendor.urllib3.contrib.pyopenssl as pyopenssl + pyopenssl.inject_into_urllib3() except ImportError: pass diff --git a/pipenv/patched/notpip/_vendor/urllib3/contrib/securetransport.py b/pipenv/patched/notpip/_vendor/urllib3/contrib/securetransport.py index 4b1bca3d41..936ac7572e 100644 --- a/pipenv/patched/notpip/_vendor/urllib3/contrib/securetransport.py +++ b/pipenv/patched/notpip/_vendor/urllib3/contrib/securetransport.py @@ -19,8 +19,8 @@ To use this module, simply import and inject it:: - import urllib3.contrib.securetransport - urllib3.contrib.securetransport.inject_into_urllib3() + import pipenv.patched.notpip._vendor.urllib3.contrib.securetransport as securetransport + securetransport.inject_into_urllib3() Happy TLSing! diff --git a/pipenv/patched/notpip/_vendor/urllib3/packages/__init__.py b/pipenv/patched/notpip/_vendor/urllib3/packages/__init__.py index fce4caa65d..e69de29bb2 100644 --- a/pipenv/patched/notpip/_vendor/urllib3/packages/__init__.py +++ b/pipenv/patched/notpip/_vendor/urllib3/packages/__init__.py @@ -1,5 +0,0 @@ -from __future__ import absolute_import - -from . import ssl_match_hostname - -__all__ = ("ssl_match_hostname",) diff --git a/pipenv/patched/notpip/_vendor/urllib3/packages/ssl_match_hostname/__init__.py b/pipenv/patched/notpip/_vendor/urllib3/packages/ssl_match_hostname/__init__.py deleted file mode 100644 index ef3fde5206..0000000000 --- a/pipenv/patched/notpip/_vendor/urllib3/packages/ssl_match_hostname/__init__.py +++ /dev/null @@ -1,24 +0,0 @@ -import sys - -try: - # Our match_hostname function is the same as 3.10's, so we only want to - # import the match_hostname function if it's at least that good. - # We also fallback on Python 3.10+ because our code doesn't emit - # deprecation warnings and is the same as Python 3.10 otherwise. - if sys.version_info < (3, 5) or sys.version_info >= (3, 10): - raise ImportError("Fallback to vendored code") - - from ssl import CertificateError, match_hostname -except ImportError: - try: - # Backport of the function from a pypi module - from backports.ssl_match_hostname import ( # type: ignore - CertificateError, - match_hostname, - ) - except ImportError: - # Our vendored copy - from ._implementation import CertificateError, match_hostname # type: ignore - -# Not needed, but documenting what we provide. -__all__ = ("CertificateError", "match_hostname") diff --git a/pipenv/patched/notpip/_vendor/urllib3/util/connection.py b/pipenv/patched/notpip/_vendor/urllib3/util/connection.py index c4ca0e29bc..6af1138f26 100644 --- a/pipenv/patched/notpip/_vendor/urllib3/util/connection.py +++ b/pipenv/patched/notpip/_vendor/urllib3/util/connection.py @@ -2,9 +2,8 @@ import socket -from pipenv.patched.notpip._vendor.urllib3.exceptions import LocationParseError - from ..contrib import _appengine_environ +from ..exceptions import LocationParseError from ..packages import six from .wait import NoWayToWaitForSocketError, wait_for_read diff --git a/pipenv/patched/notpip/_vendor/urllib3/util/proxy.py b/pipenv/patched/notpip/_vendor/urllib3/util/proxy.py index 34f884d5b3..2199cc7b7f 100644 --- a/pipenv/patched/notpip/_vendor/urllib3/util/proxy.py +++ b/pipenv/patched/notpip/_vendor/urllib3/util/proxy.py @@ -45,6 +45,7 @@ def create_proxy_ssl_context( ssl_version=resolve_ssl_version(ssl_version), cert_reqs=resolve_cert_reqs(cert_reqs), ) + if ( not ca_certs and not ca_cert_dir diff --git a/pipenv/patched/notpip/_vendor/urllib3/util/retry.py b/pipenv/patched/notpip/_vendor/urllib3/util/retry.py index c7dc42f1d6..3398323fd7 100644 --- a/pipenv/patched/notpip/_vendor/urllib3/util/retry.py +++ b/pipenv/patched/notpip/_vendor/urllib3/util/retry.py @@ -69,6 +69,24 @@ def DEFAULT_REDIRECT_HEADERS_BLACKLIST(cls, value): ) cls.DEFAULT_REMOVE_HEADERS_ON_REDIRECT = value + @property + def BACKOFF_MAX(cls): + warnings.warn( + "Using 'Retry.BACKOFF_MAX' is deprecated and " + "will be removed in v2.0. Use 'Retry.DEFAULT_BACKOFF_MAX' instead", + DeprecationWarning, + ) + return cls.DEFAULT_BACKOFF_MAX + + @BACKOFF_MAX.setter + def BACKOFF_MAX(cls, value): + warnings.warn( + "Using 'Retry.BACKOFF_MAX' is deprecated and " + "will be removed in v2.0. Use 'Retry.DEFAULT_BACKOFF_MAX' instead", + DeprecationWarning, + ) + cls.DEFAULT_BACKOFF_MAX = value + @six.add_metaclass(_RetryMeta) class Retry(object): @@ -181,7 +199,7 @@ class Retry(object): seconds. If the backoff_factor is 0.1, then :func:`.sleep` will sleep for [0.0s, 0.2s, 0.4s, ...] between retries. It will never be longer - than :attr:`Retry.BACKOFF_MAX`. + than :attr:`Retry.DEFAULT_BACKOFF_MAX`. By default, backoff is disabled (set to 0). @@ -220,7 +238,7 @@ class Retry(object): DEFAULT_REMOVE_HEADERS_ON_REDIRECT = frozenset(["Authorization"]) #: Maximum backoff time. - BACKOFF_MAX = 120 + DEFAULT_BACKOFF_MAX = 120 def __init__( self, @@ -348,7 +366,7 @@ def get_backoff_time(self): return 0 backoff_value = self.backoff_factor * (2 ** (consecutive_errors_len - 1)) - return min(self.BACKOFF_MAX, backoff_value) + return min(self.DEFAULT_BACKOFF_MAX, backoff_value) def parse_retry_after(self, retry_after): # Whitespace: https://tools.ietf.org/html/rfc7230#section-3.2.4 diff --git a/pipenv/patched/notpip/_vendor/urllib3/packages/ssl_match_hostname/_implementation.py b/pipenv/patched/notpip/_vendor/urllib3/util/ssl_match_hostname.py similarity index 96% rename from pipenv/patched/notpip/_vendor/urllib3/packages/ssl_match_hostname/_implementation.py rename to pipenv/patched/notpip/_vendor/urllib3/util/ssl_match_hostname.py index 689208d3c6..a4b4a569cb 100644 --- a/pipenv/patched/notpip/_vendor/urllib3/packages/ssl_match_hostname/_implementation.py +++ b/pipenv/patched/notpip/_vendor/urllib3/util/ssl_match_hostname.py @@ -9,7 +9,7 @@ # ipaddress has been backported to 2.6+ in pypi. If it is installed on the # system, use it to handle IPAddress ServerAltnames (this was added in # python-3.5) otherwise only do DNS matching. This allows -# backports.ssl_match_hostname to continue to be used in Python 2.7. +# util.ssl_match_hostname to continue to be used in Python 2.7. try: import ipaddress except ImportError: @@ -78,7 +78,8 @@ def _dnsname_match(dn, hostname, max_wildcards=1): def _to_unicode(obj): if isinstance(obj, str) and sys.version_info < (3,): - obj = unicode(obj, encoding="ascii", errors="strict") + # ignored flake8 # F821 to support python 2.7 function + obj = unicode(obj, encoding="ascii", errors="strict") # noqa: F821 return obj diff --git a/pipenv/patched/notpip/_vendor/urllib3/util/ssltransport.py b/pipenv/patched/notpip/_vendor/urllib3/util/ssltransport.py index b45c0208ed..4a7105d179 100644 --- a/pipenv/patched/notpip/_vendor/urllib3/util/ssltransport.py +++ b/pipenv/patched/notpip/_vendor/urllib3/util/ssltransport.py @@ -2,8 +2,8 @@ import socket import ssl -from pipenv.patched.notpip._vendor.urllib3.exceptions import ProxySchemeUnsupported -from pipenv.patched.notpip._vendor.urllib3.packages import six +from ..exceptions import ProxySchemeUnsupported +from ..packages import six SSL_BLOCKSIZE = 16384 diff --git a/pipenv/patched/notpip/_vendor/vendor.txt b/pipenv/patched/notpip/_vendor/vendor.txt index 73e493434e..2c93c0f8f2 100644 --- a/pipenv/patched/notpip/_vendor/vendor.txt +++ b/pipenv/patched/notpip/_vendor/vendor.txt @@ -1,20 +1,23 @@ -appdirs==1.4.4 -CacheControl==0.12.6 +CacheControl==0.12.10 # Make sure to update the license in pyproject.toml for this. colorama==0.4.4 -distlib==0.3.2 -distro==1.5.0 +distlib==0.3.3 +distro==1.6.0 html5lib==1.1 -msgpack==1.0.2 -packaging==21.0 -pep517==0.11.0 -progress==1.5 -pyparsing==2.4.7 -requests==2.26.0 - certifi==2021.05.30 +msgpack==1.0.3 +packaging==21.3 +pep517==0.12.0 +platformdirs==2.4.1 +progress==1.6 +pyparsing==3.0.7 +requests==2.27.1 + certifi==2021.10.08 chardet==4.0.0 - idna==3.2 - urllib3==1.26.6 -resolvelib==0.7.1 + idna==3.3 + urllib3==1.26.8 +rich==11.0.0 + pygments==2.11.2 + typing_extensions==4.0.1 +resolvelib==0.8.1 setuptools==44.0.0 six==1.16.0 tenacity==8.0.1 diff --git a/pipenv/patched/notpip/_vendor/distro.LICENSE b/pipenv/patched/notpip/distro.LICENSE similarity index 100% rename from pipenv/patched/notpip/_vendor/distro.LICENSE rename to pipenv/patched/notpip/distro.LICENSE diff --git a/pipenv/patched/notpip/_vendor/six.LICENSE b/pipenv/patched/notpip/six.LICENSE similarity index 100% rename from pipenv/patched/notpip/_vendor/six.LICENSE rename to pipenv/patched/notpip/six.LICENSE diff --git a/pipenv/patched/notpip/_vendor/distlib/LICENSE.txt b/pipenv/patched/notpip/typing_extensions.LICENSE similarity index 86% rename from pipenv/patched/notpip/_vendor/distlib/LICENSE.txt rename to pipenv/patched/notpip/typing_extensions.LICENSE index c31ac56d77..583f9f6e61 100644 --- a/pipenv/patched/notpip/_vendor/distlib/LICENSE.txt +++ b/pipenv/patched/notpip/typing_extensions.LICENSE @@ -36,39 +36,9 @@ the various releases. 2.1 2.0+1.6.1 2001 PSF no 2.0.1 2.0+1.6.1 2001 PSF yes 2.1.1 2.1+2.0.1 2001 PSF yes - 2.2 2.1.1 2001 PSF yes 2.1.2 2.1.1 2002 PSF yes 2.1.3 2.1.2 2002 PSF yes - 2.2.1 2.2 2002 PSF yes - 2.2.2 2.2.1 2002 PSF yes - 2.2.3 2.2.2 2003 PSF yes - 2.3 2.2.2 2002-2003 PSF yes - 2.3.1 2.3 2002-2003 PSF yes - 2.3.2 2.3.1 2002-2003 PSF yes - 2.3.3 2.3.2 2002-2003 PSF yes - 2.3.4 2.3.3 2004 PSF yes - 2.3.5 2.3.4 2005 PSF yes - 2.4 2.3 2004 PSF yes - 2.4.1 2.4 2005 PSF yes - 2.4.2 2.4.1 2005 PSF yes - 2.4.3 2.4.2 2006 PSF yes - 2.4.4 2.4.3 2006 PSF yes - 2.5 2.4 2006 PSF yes - 2.5.1 2.5 2007 PSF yes - 2.5.2 2.5.1 2008 PSF yes - 2.5.3 2.5.2 2008 PSF yes - 2.6 2.5 2008 PSF yes - 2.6.1 2.6 2008 PSF yes - 2.6.2 2.6.1 2009 PSF yes - 2.6.3 2.6.2 2009 PSF yes - 2.6.4 2.6.3 2009 PSF yes - 2.6.5 2.6.4 2010 PSF yes - 3.0 2.6 2008 PSF yes - 3.0.1 3.0 2009 PSF yes - 3.1 3.0.1 2009 PSF yes - 3.1.1 3.1 2009 PSF yes - 3.1.2 3.1 2010 PSF yes - 3.2 3.1 2010 PSF yes + 2.2 and above 2.1.1 2001-now PSF yes Footnotes: @@ -103,9 +73,9 @@ grants Licensee a nonexclusive, royalty-free, world-wide license to reproduce, analyze, test, perform and/or display publicly, prepare derivative works, distribute, and otherwise use Python alone or in any derivative version, provided, however, that PSF's License Agreement and PSF's notice of copyright, -i.e., "Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010 -Python Software Foundation; All Rights Reserved" are retained in Python alone or -in any derivative version prepared by Licensee. +i.e., "Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, +2011, 2012, 2013, 2014 Python Software Foundation; All Rights Reserved" are +retained in Python alone or in any derivative version prepared by Licensee. 3. In the event Licensee prepares a derivative work that is based on or incorporates Python or any part thereof, and wants to make diff --git a/pipenv/patched/patched.txt b/pipenv/patched/patched.txt index 3735025166..d6b9d2d2a8 100644 --- a/pipenv/patched/patched.txt +++ b/pipenv/patched/patched.txt @@ -1,4 +1,4 @@ crayons==0.1.2 -pip==21.2.2 +pip==22.0.4 pipfile==0.0.2 safety==1.10.3 diff --git a/pipenv/patched/yaml3/__init__.py b/pipenv/patched/yaml3/__init__.py index 86d07b5525..465041dce2 100644 --- a/pipenv/patched/yaml3/__init__.py +++ b/pipenv/patched/yaml3/__init__.py @@ -8,7 +8,7 @@ from .loader import * from .dumper import * -__version__ = '5.4.1' +__version__ = '6.0' try: from .cyaml import * __with_libyaml__ = True @@ -18,41 +18,12 @@ import io #------------------------------------------------------------------------------ -# Warnings control +# XXX "Warnings control" is now deprecated. Leaving in the API function to not +# break code that uses it. #------------------------------------------------------------------------------ - -# 'Global' warnings state: -_warnings_enabled = { - 'YAMLLoadWarning': True, -} - -# Get or set global warnings' state def warnings(settings=None): if settings is None: - return _warnings_enabled - - if type(settings) is dict: - for key in settings: - if key in _warnings_enabled: - _warnings_enabled[key] = settings[key] - -# Warn when load() is called without Loader=... -class YAMLLoadWarning(RuntimeWarning): - pass - -def load_warning(method): - if _warnings_enabled['YAMLLoadWarning'] is False: - return - - import warnings - - message = ( - "calling yaml.%s() without Loader=... is deprecated, as the " - "default Loader is unsafe. Please read " - "https://msg.pyyaml.org/load for full details." - ) % method - - warnings.warn(message, YAMLLoadWarning, stacklevel=3) + return {} #------------------------------------------------------------------------------ def scan(stream, Loader=Loader): @@ -100,30 +71,22 @@ def compose_all(stream, Loader=Loader): finally: loader.dispose() -def load(stream, Loader=None): +def load(stream, Loader): """ Parse the first YAML document in a stream and produce the corresponding Python object. """ - if Loader is None: - load_warning('load') - Loader = FullLoader - loader = Loader(stream) try: return loader.get_single_data() finally: loader.dispose() -def load_all(stream, Loader=None): +def load_all(stream, Loader): """ Parse all YAML documents in a stream and produce corresponding Python objects. """ - if Loader is None: - load_warning('load_all') - Loader = FullLoader - loader = Loader(stream) try: while loader.check_data(): diff --git a/pipenv/patched/yaml3/representer.py b/pipenv/patched/yaml3/representer.py index 3b0b192ef3..808ca06dfb 100644 --- a/pipenv/patched/yaml3/representer.py +++ b/pipenv/patched/yaml3/representer.py @@ -369,7 +369,7 @@ def represent_ordered_dict(self, data): Representer.add_representer(tuple, Representer.represent_tuple) -Representer.add_representer(type, +Representer.add_multi_representer(type, Representer.represent_name) Representer.add_representer(collections.OrderedDict, diff --git a/pipenv/patched/yaml3/resolver.py b/pipenv/patched/yaml3/resolver.py index 013896d2f1..3522bdaaf6 100644 --- a/pipenv/patched/yaml3/resolver.py +++ b/pipenv/patched/yaml3/resolver.py @@ -177,7 +177,7 @@ class Resolver(BaseResolver): Resolver.add_implicit_resolver( 'tag:yaml.org,2002:float', re.compile(r'''^(?:[-+]?(?:[0-9][0-9_]*)\.[0-9_]*(?:[eE][-+][0-9]+)? - |\.[0-9_]+(?:[eE][-+][0-9]+)? + |\.[0-9][0-9_]*(?:[eE][-+][0-9]+)? |[-+]?[0-9][0-9_]*(?::[0-5]?[0-9])+\.[0-9_]* |[-+]?\.(?:inf|Inf|INF) |\.(?:nan|NaN|NAN))$''', re.X), diff --git a/pipenv/patched/yaml3/scanner.py b/pipenv/patched/yaml3/scanner.py index 7437ede1c6..de925b07f1 100644 --- a/pipenv/patched/yaml3/scanner.py +++ b/pipenv/patched/yaml3/scanner.py @@ -1211,7 +1211,7 @@ def scan_flow_scalar_non_spaces(self, double, start_mark): for k in range(length): if self.peek(k) not in '0123456789ABCDEFabcdef': raise ScannerError("while scanning a double-quoted scalar", start_mark, - "expected escape sequence of %d hexdecimal numbers, but found %r" % + "expected escape sequence of %d hexadecimal numbers, but found %r" % (length, self.peek(k)), self.get_mark()) code = int(self.prefix(length), 16) chunks.append(chr(code)) @@ -1403,7 +1403,7 @@ def scan_uri_escapes(self, name, start_mark): for k in range(2): if self.peek(k) not in '0123456789ABCDEFabcdef': raise ScannerError("while scanning a %s" % name, start_mark, - "expected URI escape sequence of 2 hexdecimal numbers, but found %r" + "expected URI escape sequence of 2 hexadecimal numbers, but found %r" % self.peek(k), self.get_mark()) codes.append(int(self.prefix(2), 16)) self.forward(2) diff --git a/pipenv/patched/notpip/_vendor/requests/LICENSE b/pipenv/vendor/packaging/LICENSE.APACHE similarity index 99% rename from pipenv/patched/notpip/_vendor/requests/LICENSE rename to pipenv/vendor/packaging/LICENSE.APACHE index 67db858821..f433b1a53f 100644 --- a/pipenv/patched/notpip/_vendor/requests/LICENSE +++ b/pipenv/vendor/packaging/LICENSE.APACHE @@ -173,3 +173,5 @@ defend, and hold each Contributor harmless for any liability incurred by, or claims asserted against, such Contributor by reason of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS diff --git a/pipenv/patched/notpip/_vendor/colorama/LICENSE.txt b/pipenv/vendor/packaging/LICENSE.BSD similarity index 61% rename from pipenv/patched/notpip/_vendor/colorama/LICENSE.txt rename to pipenv/vendor/packaging/LICENSE.BSD index 3105888ec1..42ce7b75c9 100644 --- a/pipenv/patched/notpip/_vendor/colorama/LICENSE.txt +++ b/pipenv/vendor/packaging/LICENSE.BSD @@ -1,19 +1,15 @@ -Copyright (c) 2010 Jonathan Hartley +Copyright (c) Donald Stufft and individual contributors. All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: -* Redistributions of source code must retain the above copyright notice, this - list of conditions and the following disclaimer. + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. -* Redistributions in binary form must reproduce the above copyright notice, - this list of conditions and the following disclaimer in the documentation - and/or other materials provided with the distribution. - -* Neither the name of the copyright holders, nor those of its contributors - may be used to endorse or promote products derived from this software without - specific prior written permission. + 2. Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED diff --git a/pipenv/vendor/packaging/__about__.py b/pipenv/vendor/packaging/__about__.py index e70d692c85..3551bc2d29 100644 --- a/pipenv/vendor/packaging/__about__.py +++ b/pipenv/vendor/packaging/__about__.py @@ -17,7 +17,7 @@ __summary__ = "Core utilities for Python packages" __uri__ = "https://github.com/pypa/packaging" -__version__ = "21.0" +__version__ = "21.3" __author__ = "Donald Stufft and individual contributors" __email__ = "donald@stufft.io" diff --git a/pipenv/vendor/packaging/_musllinux.py b/pipenv/vendor/packaging/_musllinux.py index 85450fafa3..8ac3059ba3 100644 --- a/pipenv/vendor/packaging/_musllinux.py +++ b/pipenv/vendor/packaging/_musllinux.py @@ -98,7 +98,7 @@ def _get_musl_version(executable: str) -> Optional[_MuslVersion]: with contextlib.ExitStack() as stack: try: f = stack.enter_context(open(executable, "rb")) - except IOError: + except OSError: return None ld = _parse_ld_musl_from_elf(f) if not ld: diff --git a/pipenv/vendor/packaging/_structures.py b/pipenv/vendor/packaging/_structures.py index 951549753a..90a6465f96 100644 --- a/pipenv/vendor/packaging/_structures.py +++ b/pipenv/vendor/packaging/_structures.py @@ -19,9 +19,6 @@ def __le__(self, other: object) -> bool: def __eq__(self, other: object) -> bool: return isinstance(other, self.__class__) - def __ne__(self, other: object) -> bool: - return not isinstance(other, self.__class__) - def __gt__(self, other: object) -> bool: return True @@ -51,9 +48,6 @@ def __le__(self, other: object) -> bool: def __eq__(self, other: object) -> bool: return isinstance(other, self.__class__) - def __ne__(self, other: object) -> bool: - return not isinstance(other, self.__class__) - def __gt__(self, other: object) -> bool: return False diff --git a/pipenv/vendor/packaging/specifiers.py b/pipenv/vendor/packaging/specifiers.py index ce66bd4add..0e218a6f9f 100644 --- a/pipenv/vendor/packaging/specifiers.py +++ b/pipenv/vendor/packaging/specifiers.py @@ -57,13 +57,6 @@ def __eq__(self, other: object) -> bool: objects are equal. """ - @abc.abstractmethod - def __ne__(self, other: object) -> bool: - """ - Returns a boolean representing whether or not the two Specifier like - objects are not equal. - """ - @abc.abstractproperty def prereleases(self) -> Optional[bool]: """ @@ -119,7 +112,7 @@ def __repr__(self) -> str: else "" ) - return "<{}({!r}{})>".format(self.__class__.__name__, str(self), pre) + return f"<{self.__class__.__name__}({str(self)!r}{pre})>" def __str__(self) -> str: return "{}{}".format(*self._spec) @@ -142,17 +135,6 @@ def __eq__(self, other: object) -> bool: return self._canonical_spec == other._canonical_spec - def __ne__(self, other: object) -> bool: - if isinstance(other, str): - try: - other = self.__class__(str(other)) - except InvalidSpecifier: - return NotImplemented - elif not isinstance(other, self.__class__): - return NotImplemented - - return self._spec != other._spec - def _get_operator(self, op: str) -> CallableOperator: operator_callable: CallableOperator = getattr( self, f"_compare_{self._operators[op]}" @@ -667,7 +649,7 @@ def __repr__(self) -> str: else "" ) - return "".format(str(self), pre) + return f"" def __str__(self) -> str: return ",".join(sorted(str(s) for s in self._specs)) @@ -706,14 +688,6 @@ def __eq__(self, other: object) -> bool: return self._specs == other._specs - def __ne__(self, other: object) -> bool: - if isinstance(other, (str, _IndividualSpecifier)): - other = SpecifierSet(str(other)) - elif not isinstance(other, SpecifierSet): - return NotImplemented - - return self._specs != other._specs - def __len__(self) -> int: return len(self._specs) diff --git a/pipenv/vendor/packaging/tags.py b/pipenv/vendor/packaging/tags.py index 82a47cdae8..9a3d25a71c 100644 --- a/pipenv/vendor/packaging/tags.py +++ b/pipenv/vendor/packaging/tags.py @@ -90,7 +90,7 @@ def __str__(self) -> str: return f"{self._interpreter}-{self._abi}-{self._platform}" def __repr__(self) -> str: - return "<{self} @ {self_id}>".format(self=self, self_id=id(self)) + return f"<{self} @ {id(self)}>" def parse_tag(tag: str) -> FrozenSet[Tag]: @@ -192,7 +192,7 @@ def cpython_tags( if not python_version: python_version = sys.version_info[:2] - interpreter = "cp{}".format(_version_nodot(python_version[:2])) + interpreter = f"cp{_version_nodot(python_version[:2])}" if abis is None: if len(python_version) > 1: @@ -207,7 +207,7 @@ def cpython_tags( except ValueError: pass - platforms = list(platforms or _platform_tags()) + platforms = list(platforms or platform_tags()) for abi in abis: for platform_ in platforms: yield Tag(interpreter, abi, platform_) @@ -251,7 +251,7 @@ def generic_tags( interpreter = "".join([interp_name, interp_version]) if abis is None: abis = _generic_abi() - platforms = list(platforms or _platform_tags()) + platforms = list(platforms or platform_tags()) abis = list(abis) if "none" not in abis: abis.append("none") @@ -268,11 +268,11 @@ def _py_interpreter_range(py_version: PythonVersion) -> Iterator[str]: all previous versions of that major version. """ if len(py_version) > 1: - yield "py{version}".format(version=_version_nodot(py_version[:2])) - yield "py{major}".format(major=py_version[0]) + yield f"py{_version_nodot(py_version[:2])}" + yield f"py{py_version[0]}" if len(py_version) > 1: for minor in range(py_version[1] - 1, -1, -1): - yield "py{version}".format(version=_version_nodot((py_version[0], minor))) + yield f"py{_version_nodot((py_version[0], minor))}" def compatible_tags( @@ -290,7 +290,7 @@ def compatible_tags( """ if not python_version: python_version = sys.version_info[:2] - platforms = list(platforms or _platform_tags()) + platforms = list(platforms or platform_tags()) for version in _py_interpreter_range(python_version): for platform_ in platforms: yield Tag(version, "none", platform_) @@ -431,7 +431,7 @@ def _generic_platforms() -> Iterator[str]: yield _normalize_string(sysconfig.get_platform()) -def _platform_tags() -> Iterator[str]: +def platform_tags() -> Iterator[str]: """ Provides the platform tags for this installation. """ @@ -481,4 +481,7 @@ def sys_tags(*, warn: bool = False) -> Iterator[Tag]: else: yield from generic_tags() - yield from compatible_tags() + if interp_name == "pp": + yield from compatible_tags(interpreter="pp3") + else: + yield from compatible_tags() diff --git a/pipenv/vendor/pip_shims/compat.py b/pipenv/vendor/pip_shims/compat.py index 4dd596ea7b..a57822f795 100644 --- a/pipenv/vendor/pip_shims/compat.py +++ b/pipenv/vendor/pip_shims/compat.py @@ -709,6 +709,7 @@ def shim_unpack( only_download=None, # type: Optional[bool] downloader_provider=None, # type: Optional[TShimmedFunc] session=None, # type: Optional[Any] + verbosity=0, ): # (...) -> None """ @@ -743,7 +744,7 @@ def shim_unpack( downloader_provider = resolve_possible_shim(downloader_provider) tempdir_manager_provider = resolve_possible_shim(tempdir_manager_provider) required_args = inspect.getargs(unpack_fn.__code__).args # type: ignore - unpack_kwargs = {"download_dir": download_dir} + unpack_kwargs = {"download_dir": download_dir, "verbosity": verbosity} with tempdir_manager_provider(): if ireq: if not link and ireq.link: diff --git a/pipenv/vendor/pipdeptree.py b/pipenv/vendor/pipdeptree.py index e939db37aa..62d3fe1e9f 100644 --- a/pipenv/vendor/pipdeptree.py +++ b/pipenv/vendor/pipdeptree.py @@ -6,7 +6,6 @@ from itertools import chain from collections import defaultdict, deque import argparse -from operator import attrgetter import json from importlib import import_module import tempfile @@ -21,16 +20,18 @@ except ImportError: from collections import Mapping +import pkg_resources + pardir = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) sys.path.append(pardir) from pipenv.vendor.pip_shims.shims import get_installed_distributions, FrozenRequirement -import pkg_resources # inline: -# from graphviz import backend, Digraph +# from graphviz import Digraph +# from graphviz import parameters -__version__ = '2.0.0' +__version__ = '2.2.1' flatten = chain.from_iterable @@ -48,9 +49,7 @@ def sorted_tree(tree): :rtype: collections.OrderedDict """ - return OrderedDict(sorted([(k, sorted(v, key=attrgetter('key'))) - for k, v in tree.items()], - key=lambda kv: kv[0].key)) + return OrderedDict([(k, sorted(v)) for k, v in sorted(tree.items())]) def guess_version(pkg_key, default='?'): @@ -75,6 +74,23 @@ def guess_version(pkg_key, default='?'): def frozen_req_from_dist(dist): + # The `pipenv.patched.notpip._internal.metadata` modules were introduced in 21.1.1 + # and the `pipenv.patched.notpip._internal.operations.freeze.FrozenRequirement` + # class now expects dist to be a subclass of + # `pipenv.patched.notpip._internal.metadata.BaseDistribution`, however the + # `pipenv.patched.notpip._internal.utils.misc.get_installed_distributions` continues + # to return objects of type + # pipenv.patched.notpip._vendor.pkg_resources.DistInfoDistribution. + # + # This is a hacky backward compatible (with older versions of pip) + # fix. + try: + from pipenv.patched.notpip._internal import metadata + except ImportError: + pass + else: + dist = metadata.pkg_resources.Distribution(dist) + try: return FrozenRequirement.from_dist(dist) except TypeError: @@ -117,6 +133,9 @@ def __getattr__(self, key): def __repr__(self): return '<{0}("{1}")>'.format(self.__class__.__name__, self.key) + def __lt__(self, rhs): + return self.key < rhs.key + class DistPackage(Package): """Wrapper class for pkg_resources.Distribution instances @@ -522,6 +541,7 @@ def render_json(tree, indent): :rtype: str """ + tree = tree.sort() return json.dumps([{'package': k.as_dict(), 'dependencies': [v.as_dict() for v in vs]} for k, vs in tree.items()], @@ -579,17 +599,28 @@ def dump_graphviz(tree, output_format='dot', is_reverse=False): """ try: - from graphviz import backend, Digraph + from graphviz import Digraph except ImportError: print('graphviz is not available, but necessary for the output ' 'option. Please install it.', file=sys.stderr) sys.exit(1) - if output_format not in backend.FORMATS: + try: + from graphviz import parameters + except ImportError: + from graphviz import backend + valid_formats = backend.FORMATS + print('Deprecation warning! Please upgrade graphviz to version >=0.18.0 ' + 'Support for older versions will be removed in upcoming release', + file=sys.stderr) + else: + valid_formats = parameters.FORMATS + + if output_format not in valid_formats: print('{0} is not a supported output format.'.format(output_format), file=sys.stderr) print('Supported formats are: {0}'.format( - ', '.join(sorted(backend.FORMATS))), file=sys.stderr) + ', '.join(sorted(valid_formats))), file=sys.stderr) sys.exit(1) graph = Digraph(format=output_format) @@ -666,7 +697,7 @@ def render_conflicts_text(conflicts): print('Warning!!! Possibly conflicting dependencies found:', file=sys.stderr) # Enforce alphabetical order when listing conflicts - pkgs = sorted(conflicts.keys(), key=attrgetter('key')) + pkgs = sorted(conflicts.keys()) for p in pkgs: pkg = p.render_as_root(False) print('* {}'.format(pkg), file=sys.stderr) @@ -815,11 +846,33 @@ def handle_non_host_target(args): return None +def get_installed_distributions(local_only=False, user_only=False): + try: + from pipenv.patched.notpip._internal.metadata import get_environment + except ImportError: + # For backward compatibility with python ver. 2.7 and pip + # version 20.3.4 (latest pip version that works with python + # version 2.7) + from pipenv.patched.notpip._internal.utils import misc + return misc.get_installed_distributions( + local_only=local_only, + user_only=user_only + ) + else: + dists = get_environment(None).iter_installed_distributions( + local_only=local_only, + skip=(), + user_only=user_only + ) + return [d._dist for d in dists] + + def main(): args = _get_args() result = handle_non_host_target(args) if result is not None: return result + pkgs = get_installed_distributions(local_only=args.local_only, user_only=args.user_only) diff --git a/pipenv/vendor/pyparsing.py b/pipenv/vendor/pyparsing.py deleted file mode 100644 index 961738f84c..0000000000 --- a/pipenv/vendor/pyparsing.py +++ /dev/null @@ -1,7107 +0,0 @@ -# -*- coding: utf-8 -*- -# module pyparsing.py -# -# Copyright (c) 2003-2019 Paul T. McGuire -# -# Permission is hereby granted, free of charge, to any person obtaining -# a copy of this software and associated documentation files (the -# "Software"), to deal in the Software without restriction, including -# without limitation the rights to use, copy, modify, merge, publish, -# distribute, sublicense, and/or sell copies of the Software, and to -# permit persons to whom the Software is furnished to do so, subject to -# the following conditions: -# -# The above copyright notice and this permission notice shall be -# included in all copies or substantial portions of the Software. -# -# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, -# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF -# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. -# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY -# CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, -# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE -# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -# - -__doc__ = \ -""" -pyparsing module - Classes and methods to define and execute parsing grammars -============================================================================= - -The pyparsing module is an alternative approach to creating and -executing simple grammars, vs. the traditional lex/yacc approach, or the -use of regular expressions. With pyparsing, you don't need to learn -a new syntax for defining grammars or matching expressions - the parsing -module provides a library of classes that you use to construct the -grammar directly in Python. - -Here is a program to parse "Hello, World!" (or any greeting of the form -``", !"``), built up using :class:`Word`, -:class:`Literal`, and :class:`And` elements -(the :class:`'+'` operators create :class:`And` expressions, -and the strings are auto-converted to :class:`Literal` expressions):: - - from pipenv.vendor.pyparsing import Word, alphas - - # define grammar of a greeting - greet = Word(alphas) + "," + Word(alphas) + "!" - - hello = "Hello, World!" - print (hello, "->", greet.parseString(hello)) - -The program outputs the following:: - - Hello, World! -> ['Hello', ',', 'World', '!'] - -The Python representation of the grammar is quite readable, owing to the -self-explanatory class names, and the use of '+', '|' and '^' operators. - -The :class:`ParseResults` object returned from -:class:`ParserElement.parseString` can be -accessed as a nested list, a dictionary, or an object with named -attributes. - -The pyparsing module handles some of the problems that are typically -vexing when writing text parsers: - - - extra or missing whitespace (the above program will also handle - "Hello,World!", "Hello , World !", etc.) - - quoted strings - - embedded comments - - -Getting Started - ------------------ -Visit the classes :class:`ParserElement` and :class:`ParseResults` to -see the base classes that most other pyparsing -classes inherit from. Use the docstrings for examples of how to: - - - construct literal match expressions from :class:`Literal` and - :class:`CaselessLiteral` classes - - construct character word-group expressions using the :class:`Word` - class - - see how to create repetitive expressions using :class:`ZeroOrMore` - and :class:`OneOrMore` classes - - use :class:`'+'`, :class:`'|'`, :class:`'^'`, - and :class:`'&'` operators to combine simple expressions into - more complex ones - - associate names with your parsed results using - :class:`ParserElement.setResultsName` - - access the parsed data, which is returned as a :class:`ParseResults` - object - - find some helpful expression short-cuts like :class:`delimitedList` - and :class:`oneOf` - - find more useful common expressions in the :class:`pyparsing_common` - namespace class -""" - -__version__ = "2.4.7" -__versionTime__ = "30 Mar 2020 00:43 UTC" -__author__ = "Paul McGuire " - -import string -from weakref import ref as wkref -import copy -import sys -import warnings -import re -import sre_constants -import collections -import pprint -import traceback -import types -from datetime import datetime -from operator import itemgetter -import itertools -from functools import wraps -from contextlib import contextmanager - -try: - # Python 3 - from itertools import filterfalse -except ImportError: - from itertools import ifilterfalse as filterfalse - -try: - from _thread import RLock -except ImportError: - from threading import RLock - -try: - # Python 3 - from collections.abc import Iterable - from collections.abc import MutableMapping, Mapping -except ImportError: - # Python 2.7 - from collections import Iterable - from collections import MutableMapping, Mapping - -try: - from collections import OrderedDict as _OrderedDict -except ImportError: - try: - from ordereddict import OrderedDict as _OrderedDict - except ImportError: - _OrderedDict = None - -try: - from types import SimpleNamespace -except ImportError: - class SimpleNamespace: pass - -# version compatibility configuration -__compat__ = SimpleNamespace() -__compat__.__doc__ = """ - A cross-version compatibility configuration for pyparsing features that will be - released in a future version. By setting values in this configuration to True, - those features can be enabled in prior versions for compatibility development - and testing. - - - collect_all_And_tokens - flag to enable fix for Issue #63 that fixes erroneous grouping - of results names when an And expression is nested within an Or or MatchFirst; set to - True to enable bugfix released in pyparsing 2.3.0, or False to preserve - pre-2.3.0 handling of named results -""" -__compat__.collect_all_And_tokens = True - -__diag__ = SimpleNamespace() -__diag__.__doc__ = """ -Diagnostic configuration (all default to False) - - warn_multiple_tokens_in_named_alternation - flag to enable warnings when a results - name is defined on a MatchFirst or Or expression with one or more And subexpressions - (only warns if __compat__.collect_all_And_tokens is False) - - warn_ungrouped_named_tokens_in_collection - flag to enable warnings when a results - name is defined on a containing expression with ungrouped subexpressions that also - have results names - - warn_name_set_on_empty_Forward - flag to enable warnings whan a Forward is defined - with a results name, but has no contents defined - - warn_on_multiple_string_args_to_oneof - flag to enable warnings whan oneOf is - incorrectly called with multiple str arguments - - enable_debug_on_named_expressions - flag to auto-enable debug on all subsequent - calls to ParserElement.setName() -""" -__diag__.warn_multiple_tokens_in_named_alternation = False -__diag__.warn_ungrouped_named_tokens_in_collection = False -__diag__.warn_name_set_on_empty_Forward = False -__diag__.warn_on_multiple_string_args_to_oneof = False -__diag__.enable_debug_on_named_expressions = False -__diag__._all_names = [nm for nm in vars(__diag__) if nm.startswith("enable_") or nm.startswith("warn_")] - -def _enable_all_warnings(): - __diag__.warn_multiple_tokens_in_named_alternation = True - __diag__.warn_ungrouped_named_tokens_in_collection = True - __diag__.warn_name_set_on_empty_Forward = True - __diag__.warn_on_multiple_string_args_to_oneof = True -__diag__.enable_all_warnings = _enable_all_warnings - - -__all__ = ['__version__', '__versionTime__', '__author__', '__compat__', '__diag__', - 'And', 'CaselessKeyword', 'CaselessLiteral', 'CharsNotIn', 'Combine', 'Dict', 'Each', 'Empty', - 'FollowedBy', 'Forward', 'GoToColumn', 'Group', 'Keyword', 'LineEnd', 'LineStart', 'Literal', - 'PrecededBy', 'MatchFirst', 'NoMatch', 'NotAny', 'OneOrMore', 'OnlyOnce', 'Optional', 'Or', - 'ParseBaseException', 'ParseElementEnhance', 'ParseException', 'ParseExpression', 'ParseFatalException', - 'ParseResults', 'ParseSyntaxException', 'ParserElement', 'QuotedString', 'RecursiveGrammarException', - 'Regex', 'SkipTo', 'StringEnd', 'StringStart', 'Suppress', 'Token', 'TokenConverter', - 'White', 'Word', 'WordEnd', 'WordStart', 'ZeroOrMore', 'Char', - 'alphanums', 'alphas', 'alphas8bit', 'anyCloseTag', 'anyOpenTag', 'cStyleComment', 'col', - 'commaSeparatedList', 'commonHTMLEntity', 'countedArray', 'cppStyleComment', 'dblQuotedString', - 'dblSlashComment', 'delimitedList', 'dictOf', 'downcaseTokens', 'empty', 'hexnums', - 'htmlComment', 'javaStyleComment', 'line', 'lineEnd', 'lineStart', 'lineno', - 'makeHTMLTags', 'makeXMLTags', 'matchOnlyAtCol', 'matchPreviousExpr', 'matchPreviousLiteral', - 'nestedExpr', 'nullDebugAction', 'nums', 'oneOf', 'opAssoc', 'operatorPrecedence', 'printables', - 'punc8bit', 'pythonStyleComment', 'quotedString', 'removeQuotes', 'replaceHTMLEntity', - 'replaceWith', 'restOfLine', 'sglQuotedString', 'srange', 'stringEnd', - 'stringStart', 'traceParseAction', 'unicodeString', 'upcaseTokens', 'withAttribute', - 'indentedBlock', 'originalTextFor', 'ungroup', 'infixNotation', 'locatedExpr', 'withClass', - 'CloseMatch', 'tokenMap', 'pyparsing_common', 'pyparsing_unicode', 'unicode_set', - 'conditionAsParseAction', 're', - ] - -system_version = tuple(sys.version_info)[:3] -PY_3 = system_version[0] == 3 -if PY_3: - _MAX_INT = sys.maxsize - basestring = str - unichr = chr - unicode = str - _ustr = str - - # build list of single arg builtins, that can be used as parse actions - singleArgBuiltins = [sum, len, sorted, reversed, list, tuple, set, any, all, min, max] - -else: - _MAX_INT = sys.maxint - range = xrange - - def _ustr(obj): - """Drop-in replacement for str(obj) that tries to be Unicode - friendly. It first tries str(obj). If that fails with - a UnicodeEncodeError, then it tries unicode(obj). It then - < returns the unicode object | encodes it with the default - encoding | ... >. - """ - if isinstance(obj, unicode): - return obj - - try: - # If this works, then _ustr(obj) has the same behaviour as str(obj), so - # it won't break any existing code. - return str(obj) - - except UnicodeEncodeError: - # Else encode it - ret = unicode(obj).encode(sys.getdefaultencoding(), 'xmlcharrefreplace') - xmlcharref = Regex(r'&#\d+;') - xmlcharref.setParseAction(lambda t: '\\u' + hex(int(t[0][2:-1]))[2:]) - return xmlcharref.transformString(ret) - - # build list of single arg builtins, tolerant of Python version, that can be used as parse actions - singleArgBuiltins = [] - import __builtin__ - - for fname in "sum len sorted reversed list tuple set any all min max".split(): - try: - singleArgBuiltins.append(getattr(__builtin__, fname)) - except AttributeError: - continue - -_generatorType = type((y for y in range(1))) - -def _xml_escape(data): - """Escape &, <, >, ", ', etc. in a string of data.""" - - # ampersand must be replaced first - from_symbols = '&><"\'' - to_symbols = ('&' + s + ';' for s in "amp gt lt quot apos".split()) - for from_, to_ in zip(from_symbols, to_symbols): - data = data.replace(from_, to_) - return data - -alphas = string.ascii_uppercase + string.ascii_lowercase -nums = "0123456789" -hexnums = nums + "ABCDEFabcdef" -alphanums = alphas + nums -_bslash = chr(92) -printables = "".join(c for c in string.printable if c not in string.whitespace) - - -def conditionAsParseAction(fn, message=None, fatal=False): - msg = message if message is not None else "failed user-defined condition" - exc_type = ParseFatalException if fatal else ParseException - fn = _trim_arity(fn) - - @wraps(fn) - def pa(s, l, t): - if not bool(fn(s, l, t)): - raise exc_type(s, l, msg) - - return pa - -class ParseBaseException(Exception): - """base exception class for all parsing runtime exceptions""" - # Performance tuning: we construct a *lot* of these, so keep this - # constructor as small and fast as possible - def __init__(self, pstr, loc=0, msg=None, elem=None): - self.loc = loc - if msg is None: - self.msg = pstr - self.pstr = "" - else: - self.msg = msg - self.pstr = pstr - self.parserElement = elem - self.args = (pstr, loc, msg) - - @classmethod - def _from_exception(cls, pe): - """ - internal factory method to simplify creating one type of ParseException - from another - avoids having __init__ signature conflicts among subclasses - """ - return cls(pe.pstr, pe.loc, pe.msg, pe.parserElement) - - def __getattr__(self, aname): - """supported attributes by name are: - - lineno - returns the line number of the exception text - - col - returns the column number of the exception text - - line - returns the line containing the exception text - """ - if aname == "lineno": - return lineno(self.loc, self.pstr) - elif aname in ("col", "column"): - return col(self.loc, self.pstr) - elif aname == "line": - return line(self.loc, self.pstr) - else: - raise AttributeError(aname) - - def __str__(self): - if self.pstr: - if self.loc >= len(self.pstr): - foundstr = ', found end of text' - else: - foundstr = (', found %r' % self.pstr[self.loc:self.loc + 1]).replace(r'\\', '\\') - else: - foundstr = '' - return ("%s%s (at char %d), (line:%d, col:%d)" % - (self.msg, foundstr, self.loc, self.lineno, self.column)) - def __repr__(self): - return _ustr(self) - def markInputline(self, markerString=">!<"): - """Extracts the exception line from the input string, and marks - the location of the exception with a special symbol. - """ - line_str = self.line - line_column = self.column - 1 - if markerString: - line_str = "".join((line_str[:line_column], - markerString, line_str[line_column:])) - return line_str.strip() - def __dir__(self): - return "lineno col line".split() + dir(type(self)) - -class ParseException(ParseBaseException): - """ - Exception thrown when parse expressions don't match class; - supported attributes by name are: - - lineno - returns the line number of the exception text - - col - returns the column number of the exception text - - line - returns the line containing the exception text - - Example:: - - try: - Word(nums).setName("integer").parseString("ABC") - except ParseException as pe: - print(pe) - print("column: {}".format(pe.col)) - - prints:: - - Expected integer (at char 0), (line:1, col:1) - column: 1 - - """ - - @staticmethod - def explain(exc, depth=16): - """ - Method to take an exception and translate the Python internal traceback into a list - of the pyparsing expressions that caused the exception to be raised. - - Parameters: - - - exc - exception raised during parsing (need not be a ParseException, in support - of Python exceptions that might be raised in a parse action) - - depth (default=16) - number of levels back in the stack trace to list expression - and function names; if None, the full stack trace names will be listed; if 0, only - the failing input line, marker, and exception string will be shown - - Returns a multi-line string listing the ParserElements and/or function names in the - exception's stack trace. - - Note: the diagnostic output will include string representations of the expressions - that failed to parse. These representations will be more helpful if you use `setName` to - give identifiable names to your expressions. Otherwise they will use the default string - forms, which may be cryptic to read. - - explain() is only supported under Python 3. - """ - import inspect - - if depth is None: - depth = sys.getrecursionlimit() - ret = [] - if isinstance(exc, ParseBaseException): - ret.append(exc.line) - ret.append(' ' * (exc.col - 1) + '^') - ret.append("{0}: {1}".format(type(exc).__name__, exc)) - - if depth > 0: - callers = inspect.getinnerframes(exc.__traceback__, context=depth) - seen = set() - for i, ff in enumerate(callers[-depth:]): - frm = ff[0] - - f_self = frm.f_locals.get('self', None) - if isinstance(f_self, ParserElement): - if frm.f_code.co_name not in ('parseImpl', '_parseNoCache'): - continue - if f_self in seen: - continue - seen.add(f_self) - - self_type = type(f_self) - ret.append("{0}.{1} - {2}".format(self_type.__module__, - self_type.__name__, - f_self)) - elif f_self is not None: - self_type = type(f_self) - ret.append("{0}.{1}".format(self_type.__module__, - self_type.__name__)) - else: - code = frm.f_code - if code.co_name in ('wrapper', ''): - continue - - ret.append("{0}".format(code.co_name)) - - depth -= 1 - if not depth: - break - - return '\n'.join(ret) - - -class ParseFatalException(ParseBaseException): - """user-throwable exception thrown when inconsistent parse content - is found; stops all parsing immediately""" - pass - -class ParseSyntaxException(ParseFatalException): - """just like :class:`ParseFatalException`, but thrown internally - when an :class:`ErrorStop` ('-' operator) indicates - that parsing is to stop immediately because an unbacktrackable - syntax error has been found. - """ - pass - -#~ class ReparseException(ParseBaseException): - #~ """Experimental class - parse actions can raise this exception to cause - #~ pyparsing to reparse the input string: - #~ - with a modified input string, and/or - #~ - with a modified start location - #~ Set the values of the ReparseException in the constructor, and raise the - #~ exception in a parse action to cause pyparsing to use the new string/location. - #~ Setting the values as None causes no change to be made. - #~ """ - #~ def __init_( self, newstring, restartLoc ): - #~ self.newParseText = newstring - #~ self.reparseLoc = restartLoc - -class RecursiveGrammarException(Exception): - """exception thrown by :class:`ParserElement.validate` if the - grammar could be improperly recursive - """ - def __init__(self, parseElementList): - self.parseElementTrace = parseElementList - - def __str__(self): - return "RecursiveGrammarException: %s" % self.parseElementTrace - -class _ParseResultsWithOffset(object): - def __init__(self, p1, p2): - self.tup = (p1, p2) - def __getitem__(self, i): - return self.tup[i] - def __repr__(self): - return repr(self.tup[0]) - def setOffset(self, i): - self.tup = (self.tup[0], i) - -class ParseResults(object): - """Structured parse results, to provide multiple means of access to - the parsed data: - - - as a list (``len(results)``) - - by list index (``results[0], results[1]``, etc.) - - by attribute (``results.`` - see :class:`ParserElement.setResultsName`) - - Example:: - - integer = Word(nums) - date_str = (integer.setResultsName("year") + '/' - + integer.setResultsName("month") + '/' - + integer.setResultsName("day")) - # equivalent form: - # date_str = integer("year") + '/' + integer("month") + '/' + integer("day") - - # parseString returns a ParseResults object - result = date_str.parseString("1999/12/31") - - def test(s, fn=repr): - print("%s -> %s" % (s, fn(eval(s)))) - test("list(result)") - test("result[0]") - test("result['month']") - test("result.day") - test("'month' in result") - test("'minutes' in result") - test("result.dump()", str) - - prints:: - - list(result) -> ['1999', '/', '12', '/', '31'] - result[0] -> '1999' - result['month'] -> '12' - result.day -> '31' - 'month' in result -> True - 'minutes' in result -> False - result.dump() -> ['1999', '/', '12', '/', '31'] - - day: 31 - - month: 12 - - year: 1999 - """ - def __new__(cls, toklist=None, name=None, asList=True, modal=True): - if isinstance(toklist, cls): - return toklist - retobj = object.__new__(cls) - retobj.__doinit = True - return retobj - - # Performance tuning: we construct a *lot* of these, so keep this - # constructor as small and fast as possible - def __init__(self, toklist=None, name=None, asList=True, modal=True, isinstance=isinstance): - if self.__doinit: - self.__doinit = False - self.__name = None - self.__parent = None - self.__accumNames = {} - self.__asList = asList - self.__modal = modal - if toklist is None: - toklist = [] - if isinstance(toklist, list): - self.__toklist = toklist[:] - elif isinstance(toklist, _generatorType): - self.__toklist = list(toklist) - else: - self.__toklist = [toklist] - self.__tokdict = dict() - - if name is not None and name: - if not modal: - self.__accumNames[name] = 0 - if isinstance(name, int): - name = _ustr(name) # will always return a str, but use _ustr for consistency - self.__name = name - if not (isinstance(toklist, (type(None), basestring, list)) and toklist in (None, '', [])): - if isinstance(toklist, basestring): - toklist = [toklist] - if asList: - if isinstance(toklist, ParseResults): - self[name] = _ParseResultsWithOffset(ParseResults(toklist.__toklist), 0) - else: - self[name] = _ParseResultsWithOffset(ParseResults(toklist[0]), 0) - self[name].__name = name - else: - try: - self[name] = toklist[0] - except (KeyError, TypeError, IndexError): - self[name] = toklist - - def __getitem__(self, i): - if isinstance(i, (int, slice)): - return self.__toklist[i] - else: - if i not in self.__accumNames: - return self.__tokdict[i][-1][0] - else: - return ParseResults([v[0] for v in self.__tokdict[i]]) - - def __setitem__(self, k, v, isinstance=isinstance): - if isinstance(v, _ParseResultsWithOffset): - self.__tokdict[k] = self.__tokdict.get(k, list()) + [v] - sub = v[0] - elif isinstance(k, (int, slice)): - self.__toklist[k] = v - sub = v - else: - self.__tokdict[k] = self.__tokdict.get(k, list()) + [_ParseResultsWithOffset(v, 0)] - sub = v - if isinstance(sub, ParseResults): - sub.__parent = wkref(self) - - def __delitem__(self, i): - if isinstance(i, (int, slice)): - mylen = len(self.__toklist) - del self.__toklist[i] - - # convert int to slice - if isinstance(i, int): - if i < 0: - i += mylen - i = slice(i, i + 1) - # get removed indices - removed = list(range(*i.indices(mylen))) - removed.reverse() - # fixup indices in token dictionary - for name, occurrences in self.__tokdict.items(): - for j in removed: - for k, (value, position) in enumerate(occurrences): - occurrences[k] = _ParseResultsWithOffset(value, position - (position > j)) - else: - del self.__tokdict[i] - - def __contains__(self, k): - return k in self.__tokdict - - def __len__(self): - return len(self.__toklist) - - def __bool__(self): - return (not not self.__toklist) - __nonzero__ = __bool__ - - def __iter__(self): - return iter(self.__toklist) - - def __reversed__(self): - return iter(self.__toklist[::-1]) - - def _iterkeys(self): - if hasattr(self.__tokdict, "iterkeys"): - return self.__tokdict.iterkeys() - else: - return iter(self.__tokdict) - - def _itervalues(self): - return (self[k] for k in self._iterkeys()) - - def _iteritems(self): - return ((k, self[k]) for k in self._iterkeys()) - - if PY_3: - keys = _iterkeys - """Returns an iterator of all named result keys.""" - - values = _itervalues - """Returns an iterator of all named result values.""" - - items = _iteritems - """Returns an iterator of all named result key-value tuples.""" - - else: - iterkeys = _iterkeys - """Returns an iterator of all named result keys (Python 2.x only).""" - - itervalues = _itervalues - """Returns an iterator of all named result values (Python 2.x only).""" - - iteritems = _iteritems - """Returns an iterator of all named result key-value tuples (Python 2.x only).""" - - def keys(self): - """Returns all named result keys (as a list in Python 2.x, as an iterator in Python 3.x).""" - return list(self.iterkeys()) - - def values(self): - """Returns all named result values (as a list in Python 2.x, as an iterator in Python 3.x).""" - return list(self.itervalues()) - - def items(self): - """Returns all named result key-values (as a list of tuples in Python 2.x, as an iterator in Python 3.x).""" - return list(self.iteritems()) - - def haskeys(self): - """Since keys() returns an iterator, this method is helpful in bypassing - code that looks for the existence of any defined results names.""" - return bool(self.__tokdict) - - def pop(self, *args, **kwargs): - """ - Removes and returns item at specified index (default= ``last``). - Supports both ``list`` and ``dict`` semantics for ``pop()``. If - passed no argument or an integer argument, it will use ``list`` - semantics and pop tokens from the list of parsed tokens. If passed - a non-integer argument (most likely a string), it will use ``dict`` - semantics and pop the corresponding value from any defined results - names. A second default return value argument is supported, just as in - ``dict.pop()``. - - Example:: - - def remove_first(tokens): - tokens.pop(0) - print(OneOrMore(Word(nums)).parseString("0 123 321")) # -> ['0', '123', '321'] - print(OneOrMore(Word(nums)).addParseAction(remove_first).parseString("0 123 321")) # -> ['123', '321'] - - label = Word(alphas) - patt = label("LABEL") + OneOrMore(Word(nums)) - print(patt.parseString("AAB 123 321").dump()) - - # Use pop() in a parse action to remove named result (note that corresponding value is not - # removed from list form of results) - def remove_LABEL(tokens): - tokens.pop("LABEL") - return tokens - patt.addParseAction(remove_LABEL) - print(patt.parseString("AAB 123 321").dump()) - - prints:: - - ['AAB', '123', '321'] - - LABEL: AAB - - ['AAB', '123', '321'] - """ - if not args: - args = [-1] - for k, v in kwargs.items(): - if k == 'default': - args = (args[0], v) - else: - raise TypeError("pop() got an unexpected keyword argument '%s'" % k) - if (isinstance(args[0], int) - or len(args) == 1 - or args[0] in self): - index = args[0] - ret = self[index] - del self[index] - return ret - else: - defaultvalue = args[1] - return defaultvalue - - def get(self, key, defaultValue=None): - """ - Returns named result matching the given key, or if there is no - such name, then returns the given ``defaultValue`` or ``None`` if no - ``defaultValue`` is specified. - - Similar to ``dict.get()``. - - Example:: - - integer = Word(nums) - date_str = integer("year") + '/' + integer("month") + '/' + integer("day") - - result = date_str.parseString("1999/12/31") - print(result.get("year")) # -> '1999' - print(result.get("hour", "not specified")) # -> 'not specified' - print(result.get("hour")) # -> None - """ - if key in self: - return self[key] - else: - return defaultValue - - def insert(self, index, insStr): - """ - Inserts new element at location index in the list of parsed tokens. - - Similar to ``list.insert()``. - - Example:: - - print(OneOrMore(Word(nums)).parseString("0 123 321")) # -> ['0', '123', '321'] - - # use a parse action to insert the parse location in the front of the parsed results - def insert_locn(locn, tokens): - tokens.insert(0, locn) - print(OneOrMore(Word(nums)).addParseAction(insert_locn).parseString("0 123 321")) # -> [0, '0', '123', '321'] - """ - self.__toklist.insert(index, insStr) - # fixup indices in token dictionary - for name, occurrences in self.__tokdict.items(): - for k, (value, position) in enumerate(occurrences): - occurrences[k] = _ParseResultsWithOffset(value, position + (position > index)) - - def append(self, item): - """ - Add single element to end of ParseResults list of elements. - - Example:: - - print(OneOrMore(Word(nums)).parseString("0 123 321")) # -> ['0', '123', '321'] - - # use a parse action to compute the sum of the parsed integers, and add it to the end - def append_sum(tokens): - tokens.append(sum(map(int, tokens))) - print(OneOrMore(Word(nums)).addParseAction(append_sum).parseString("0 123 321")) # -> ['0', '123', '321', 444] - """ - self.__toklist.append(item) - - def extend(self, itemseq): - """ - Add sequence of elements to end of ParseResults list of elements. - - Example:: - - patt = OneOrMore(Word(alphas)) - - # use a parse action to append the reverse of the matched strings, to make a palindrome - def make_palindrome(tokens): - tokens.extend(reversed([t[::-1] for t in tokens])) - return ''.join(tokens) - print(patt.addParseAction(make_palindrome).parseString("lskdj sdlkjf lksd")) # -> 'lskdjsdlkjflksddsklfjkldsjdksl' - """ - if isinstance(itemseq, ParseResults): - self.__iadd__(itemseq) - else: - self.__toklist.extend(itemseq) - - def clear(self): - """ - Clear all elements and results names. - """ - del self.__toklist[:] - self.__tokdict.clear() - - def __getattr__(self, name): - try: - return self[name] - except KeyError: - return "" - - def __add__(self, other): - ret = self.copy() - ret += other - return ret - - def __iadd__(self, other): - if other.__tokdict: - offset = len(self.__toklist) - addoffset = lambda a: offset if a < 0 else a + offset - otheritems = other.__tokdict.items() - otherdictitems = [(k, _ParseResultsWithOffset(v[0], addoffset(v[1]))) - for k, vlist in otheritems for v in vlist] - for k, v in otherdictitems: - self[k] = v - if isinstance(v[0], ParseResults): - v[0].__parent = wkref(self) - - self.__toklist += other.__toklist - self.__accumNames.update(other.__accumNames) - return self - - def __radd__(self, other): - if isinstance(other, int) and other == 0: - # useful for merging many ParseResults using sum() builtin - return self.copy() - else: - # this may raise a TypeError - so be it - return other + self - - def __repr__(self): - return "(%s, %s)" % (repr(self.__toklist), repr(self.__tokdict)) - - def __str__(self): - return '[' + ', '.join(_ustr(i) if isinstance(i, ParseResults) else repr(i) for i in self.__toklist) + ']' - - def _asStringList(self, sep=''): - out = [] - for item in self.__toklist: - if out and sep: - out.append(sep) - if isinstance(item, ParseResults): - out += item._asStringList() - else: - out.append(_ustr(item)) - return out - - def asList(self): - """ - Returns the parse results as a nested list of matching tokens, all converted to strings. - - Example:: - - patt = OneOrMore(Word(alphas)) - result = patt.parseString("sldkj lsdkj sldkj") - # even though the result prints in string-like form, it is actually a pyparsing ParseResults - print(type(result), result) # -> ['sldkj', 'lsdkj', 'sldkj'] - - # Use asList() to create an actual list - result_list = result.asList() - print(type(result_list), result_list) # -> ['sldkj', 'lsdkj', 'sldkj'] - """ - return [res.asList() if isinstance(res, ParseResults) else res for res in self.__toklist] - - def asDict(self): - """ - Returns the named parse results as a nested dictionary. - - Example:: - - integer = Word(nums) - date_str = integer("year") + '/' + integer("month") + '/' + integer("day") - - result = date_str.parseString('12/31/1999') - print(type(result), repr(result)) # -> (['12', '/', '31', '/', '1999'], {'day': [('1999', 4)], 'year': [('12', 0)], 'month': [('31', 2)]}) - - result_dict = result.asDict() - print(type(result_dict), repr(result_dict)) # -> {'day': '1999', 'year': '12', 'month': '31'} - - # even though a ParseResults supports dict-like access, sometime you just need to have a dict - import json - print(json.dumps(result)) # -> Exception: TypeError: ... is not JSON serializable - print(json.dumps(result.asDict())) # -> {"month": "31", "day": "1999", "year": "12"} - """ - if PY_3: - item_fn = self.items - else: - item_fn = self.iteritems - - def toItem(obj): - if isinstance(obj, ParseResults): - if obj.haskeys(): - return obj.asDict() - else: - return [toItem(v) for v in obj] - else: - return obj - - return dict((k, toItem(v)) for k, v in item_fn()) - - def copy(self): - """ - Returns a new copy of a :class:`ParseResults` object. - """ - ret = ParseResults(self.__toklist) - ret.__tokdict = dict(self.__tokdict.items()) - ret.__parent = self.__parent - ret.__accumNames.update(self.__accumNames) - ret.__name = self.__name - return ret - - def asXML(self, doctag=None, namedItemsOnly=False, indent="", formatted=True): - """ - (Deprecated) Returns the parse results as XML. Tags are created for tokens and lists that have defined results names. - """ - nl = "\n" - out = [] - namedItems = dict((v[1], k) for (k, vlist) in self.__tokdict.items() - for v in vlist) - nextLevelIndent = indent + " " - - # collapse out indents if formatting is not desired - if not formatted: - indent = "" - nextLevelIndent = "" - nl = "" - - selfTag = None - if doctag is not None: - selfTag = doctag - else: - if self.__name: - selfTag = self.__name - - if not selfTag: - if namedItemsOnly: - return "" - else: - selfTag = "ITEM" - - out += [nl, indent, "<", selfTag, ">"] - - for i, res in enumerate(self.__toklist): - if isinstance(res, ParseResults): - if i in namedItems: - out += [res.asXML(namedItems[i], - namedItemsOnly and doctag is None, - nextLevelIndent, - formatted)] - else: - out += [res.asXML(None, - namedItemsOnly and doctag is None, - nextLevelIndent, - formatted)] - else: - # individual token, see if there is a name for it - resTag = None - if i in namedItems: - resTag = namedItems[i] - if not resTag: - if namedItemsOnly: - continue - else: - resTag = "ITEM" - xmlBodyText = _xml_escape(_ustr(res)) - out += [nl, nextLevelIndent, "<", resTag, ">", - xmlBodyText, - ""] - - out += [nl, indent, ""] - return "".join(out) - - def __lookup(self, sub): - for k, vlist in self.__tokdict.items(): - for v, loc in vlist: - if sub is v: - return k - return None - - def getName(self): - r""" - Returns the results name for this token expression. Useful when several - different expressions might match at a particular location. - - Example:: - - integer = Word(nums) - ssn_expr = Regex(r"\d\d\d-\d\d-\d\d\d\d") - house_number_expr = Suppress('#') + Word(nums, alphanums) - user_data = (Group(house_number_expr)("house_number") - | Group(ssn_expr)("ssn") - | Group(integer)("age")) - user_info = OneOrMore(user_data) - - result = user_info.parseString("22 111-22-3333 #221B") - for item in result: - print(item.getName(), ':', item[0]) - - prints:: - - age : 22 - ssn : 111-22-3333 - house_number : 221B - """ - if self.__name: - return self.__name - elif self.__parent: - par = self.__parent() - if par: - return par.__lookup(self) - else: - return None - elif (len(self) == 1 - and len(self.__tokdict) == 1 - and next(iter(self.__tokdict.values()))[0][1] in (0, -1)): - return next(iter(self.__tokdict.keys())) - else: - return None - - def dump(self, indent='', full=True, include_list=True, _depth=0): - """ - Diagnostic method for listing out the contents of - a :class:`ParseResults`. Accepts an optional ``indent`` argument so - that this string can be embedded in a nested display of other data. - - Example:: - - integer = Word(nums) - date_str = integer("year") + '/' + integer("month") + '/' + integer("day") - - result = date_str.parseString('12/31/1999') - print(result.dump()) - - prints:: - - ['12', '/', '31', '/', '1999'] - - day: 1999 - - month: 31 - - year: 12 - """ - out = [] - NL = '\n' - if include_list: - out.append(indent + _ustr(self.asList())) - else: - out.append('') - - if full: - if self.haskeys(): - items = sorted((str(k), v) for k, v in self.items()) - for k, v in items: - if out: - out.append(NL) - out.append("%s%s- %s: " % (indent, (' ' * _depth), k)) - if isinstance(v, ParseResults): - if v: - out.append(v.dump(indent=indent, full=full, include_list=include_list, _depth=_depth + 1)) - else: - out.append(_ustr(v)) - else: - out.append(repr(v)) - elif any(isinstance(vv, ParseResults) for vv in self): - v = self - for i, vv in enumerate(v): - if isinstance(vv, ParseResults): - out.append("\n%s%s[%d]:\n%s%s%s" % (indent, - (' ' * (_depth)), - i, - indent, - (' ' * (_depth + 1)), - vv.dump(indent=indent, - full=full, - include_list=include_list, - _depth=_depth + 1))) - else: - out.append("\n%s%s[%d]:\n%s%s%s" % (indent, - (' ' * (_depth)), - i, - indent, - (' ' * (_depth + 1)), - _ustr(vv))) - - return "".join(out) - - def pprint(self, *args, **kwargs): - """ - Pretty-printer for parsed results as a list, using the - `pprint `_ module. - Accepts additional positional or keyword args as defined for - `pprint.pprint `_ . - - Example:: - - ident = Word(alphas, alphanums) - num = Word(nums) - func = Forward() - term = ident | num | Group('(' + func + ')') - func <<= ident + Group(Optional(delimitedList(term))) - result = func.parseString("fna a,b,(fnb c,d,200),100") - result.pprint(width=40) - - prints:: - - ['fna', - ['a', - 'b', - ['(', 'fnb', ['c', 'd', '200'], ')'], - '100']] - """ - pprint.pprint(self.asList(), *args, **kwargs) - - # add support for pickle protocol - def __getstate__(self): - return (self.__toklist, - (self.__tokdict.copy(), - self.__parent is not None and self.__parent() or None, - self.__accumNames, - self.__name)) - - def __setstate__(self, state): - self.__toklist = state[0] - self.__tokdict, par, inAccumNames, self.__name = state[1] - self.__accumNames = {} - self.__accumNames.update(inAccumNames) - if par is not None: - self.__parent = wkref(par) - else: - self.__parent = None - - def __getnewargs__(self): - return self.__toklist, self.__name, self.__asList, self.__modal - - def __dir__(self): - return dir(type(self)) + list(self.keys()) - - @classmethod - def from_dict(cls, other, name=None): - """ - Helper classmethod to construct a ParseResults from a dict, preserving the - name-value relations as results names. If an optional 'name' argument is - given, a nested ParseResults will be returned - """ - def is_iterable(obj): - try: - iter(obj) - except Exception: - return False - else: - if PY_3: - return not isinstance(obj, (str, bytes)) - else: - return not isinstance(obj, basestring) - - ret = cls([]) - for k, v in other.items(): - if isinstance(v, Mapping): - ret += cls.from_dict(v, name=k) - else: - ret += cls([v], name=k, asList=is_iterable(v)) - if name is not None: - ret = cls([ret], name=name) - return ret - -MutableMapping.register(ParseResults) - -def col (loc, strg): - """Returns current column within a string, counting newlines as line separators. - The first column is number 1. - - Note: the default parsing behavior is to expand tabs in the input string - before starting the parsing process. See - :class:`ParserElement.parseString` for more - information on parsing strings containing ```` s, and suggested - methods to maintain a consistent view of the parsed string, the parse - location, and line and column positions within the parsed string. - """ - s = strg - return 1 if 0 < loc < len(s) and s[loc-1] == '\n' else loc - s.rfind("\n", 0, loc) - -def lineno(loc, strg): - """Returns current line number within a string, counting newlines as line separators. - The first line is number 1. - - Note - the default parsing behavior is to expand tabs in the input string - before starting the parsing process. See :class:`ParserElement.parseString` - for more information on parsing strings containing ```` s, and - suggested methods to maintain a consistent view of the parsed string, the - parse location, and line and column positions within the parsed string. - """ - return strg.count("\n", 0, loc) + 1 - -def line(loc, strg): - """Returns the line of text containing loc within a string, counting newlines as line separators. - """ - lastCR = strg.rfind("\n", 0, loc) - nextCR = strg.find("\n", loc) - if nextCR >= 0: - return strg[lastCR + 1:nextCR] - else: - return strg[lastCR + 1:] - -def _defaultStartDebugAction(instring, loc, expr): - print(("Match " + _ustr(expr) + " at loc " + _ustr(loc) + "(%d,%d)" % (lineno(loc, instring), col(loc, instring)))) - -def _defaultSuccessDebugAction(instring, startloc, endloc, expr, toks): - print("Matched " + _ustr(expr) + " -> " + str(toks.asList())) - -def _defaultExceptionDebugAction(instring, loc, expr, exc): - print("Exception raised:" + _ustr(exc)) - -def nullDebugAction(*args): - """'Do-nothing' debug action, to suppress debugging output during parsing.""" - pass - -# Only works on Python 3.x - nonlocal is toxic to Python 2 installs -#~ 'decorator to trim function calls to match the arity of the target' -#~ def _trim_arity(func, maxargs=3): - #~ if func in singleArgBuiltins: - #~ return lambda s,l,t: func(t) - #~ limit = 0 - #~ foundArity = False - #~ def wrapper(*args): - #~ nonlocal limit,foundArity - #~ while 1: - #~ try: - #~ ret = func(*args[limit:]) - #~ foundArity = True - #~ return ret - #~ except TypeError: - #~ if limit == maxargs or foundArity: - #~ raise - #~ limit += 1 - #~ continue - #~ return wrapper - -# this version is Python 2.x-3.x cross-compatible -'decorator to trim function calls to match the arity of the target' -def _trim_arity(func, maxargs=2): - if func in singleArgBuiltins: - return lambda s, l, t: func(t) - limit = [0] - foundArity = [False] - - # traceback return data structure changed in Py3.5 - normalize back to plain tuples - if system_version[:2] >= (3, 5): - def extract_stack(limit=0): - # special handling for Python 3.5.0 - extra deep call stack by 1 - offset = -3 if system_version == (3, 5, 0) else -2 - frame_summary = traceback.extract_stack(limit=-offset + limit - 1)[offset] - return [frame_summary[:2]] - def extract_tb(tb, limit=0): - frames = traceback.extract_tb(tb, limit=limit) - frame_summary = frames[-1] - return [frame_summary[:2]] - else: - extract_stack = traceback.extract_stack - extract_tb = traceback.extract_tb - - # synthesize what would be returned by traceback.extract_stack at the call to - # user's parse action 'func', so that we don't incur call penalty at parse time - - LINE_DIFF = 6 - # IF ANY CODE CHANGES, EVEN JUST COMMENTS OR BLANK LINES, BETWEEN THE NEXT LINE AND - # THE CALL TO FUNC INSIDE WRAPPER, LINE_DIFF MUST BE MODIFIED!!!! - this_line = extract_stack(limit=2)[-1] - pa_call_line_synth = (this_line[0], this_line[1] + LINE_DIFF) - - def wrapper(*args): - while 1: - try: - ret = func(*args[limit[0]:]) - foundArity[0] = True - return ret - except TypeError: - # re-raise TypeErrors if they did not come from our arity testing - if foundArity[0]: - raise - else: - try: - tb = sys.exc_info()[-1] - if not extract_tb(tb, limit=2)[-1][:2] == pa_call_line_synth: - raise - finally: - try: - del tb - except NameError: - pass - - if limit[0] <= maxargs: - limit[0] += 1 - continue - raise - - # copy func name to wrapper for sensible debug output - func_name = "" - try: - func_name = getattr(func, '__name__', - getattr(func, '__class__').__name__) - except Exception: - func_name = str(func) - wrapper.__name__ = func_name - - return wrapper - - -class ParserElement(object): - """Abstract base level parser element class.""" - DEFAULT_WHITE_CHARS = " \n\t\r" - verbose_stacktrace = False - - @staticmethod - def setDefaultWhitespaceChars(chars): - r""" - Overrides the default whitespace chars - - Example:: - - # default whitespace chars are space, and newline - OneOrMore(Word(alphas)).parseString("abc def\nghi jkl") # -> ['abc', 'def', 'ghi', 'jkl'] - - # change to just treat newline as significant - ParserElement.setDefaultWhitespaceChars(" \t") - OneOrMore(Word(alphas)).parseString("abc def\nghi jkl") # -> ['abc', 'def'] - """ - ParserElement.DEFAULT_WHITE_CHARS = chars - - @staticmethod - def inlineLiteralsUsing(cls): - """ - Set class to be used for inclusion of string literals into a parser. - - Example:: - - # default literal class used is Literal - integer = Word(nums) - date_str = integer("year") + '/' + integer("month") + '/' + integer("day") - - date_str.parseString("1999/12/31") # -> ['1999', '/', '12', '/', '31'] - - - # change to Suppress - ParserElement.inlineLiteralsUsing(Suppress) - date_str = integer("year") + '/' + integer("month") + '/' + integer("day") - - date_str.parseString("1999/12/31") # -> ['1999', '12', '31'] - """ - ParserElement._literalStringClass = cls - - @classmethod - def _trim_traceback(cls, tb): - while tb.tb_next: - tb = tb.tb_next - return tb - - def __init__(self, savelist=False): - self.parseAction = list() - self.failAction = None - # ~ self.name = "" # don't define self.name, let subclasses try/except upcall - self.strRepr = None - self.resultsName = None - self.saveAsList = savelist - self.skipWhitespace = True - self.whiteChars = set(ParserElement.DEFAULT_WHITE_CHARS) - self.copyDefaultWhiteChars = True - self.mayReturnEmpty = False # used when checking for left-recursion - self.keepTabs = False - self.ignoreExprs = list() - self.debug = False - self.streamlined = False - self.mayIndexError = True # used to optimize exception handling for subclasses that don't advance parse index - self.errmsg = "" - self.modalResults = True # used to mark results names as modal (report only last) or cumulative (list all) - self.debugActions = (None, None, None) # custom debug actions - self.re = None - self.callPreparse = True # used to avoid redundant calls to preParse - self.callDuringTry = False - - def copy(self): - """ - Make a copy of this :class:`ParserElement`. Useful for defining - different parse actions for the same parsing pattern, using copies of - the original parse element. - - Example:: - - integer = Word(nums).setParseAction(lambda toks: int(toks[0])) - integerK = integer.copy().addParseAction(lambda toks: toks[0] * 1024) + Suppress("K") - integerM = integer.copy().addParseAction(lambda toks: toks[0] * 1024 * 1024) + Suppress("M") - - print(OneOrMore(integerK | integerM | integer).parseString("5K 100 640K 256M")) - - prints:: - - [5120, 100, 655360, 268435456] - - Equivalent form of ``expr.copy()`` is just ``expr()``:: - - integerM = integer().addParseAction(lambda toks: toks[0] * 1024 * 1024) + Suppress("M") - """ - cpy = copy.copy(self) - cpy.parseAction = self.parseAction[:] - cpy.ignoreExprs = self.ignoreExprs[:] - if self.copyDefaultWhiteChars: - cpy.whiteChars = ParserElement.DEFAULT_WHITE_CHARS - return cpy - - def setName(self, name): - """ - Define name for this expression, makes debugging and exception messages clearer. - - Example:: - - Word(nums).parseString("ABC") # -> Exception: Expected W:(0123...) (at char 0), (line:1, col:1) - Word(nums).setName("integer").parseString("ABC") # -> Exception: Expected integer (at char 0), (line:1, col:1) - """ - self.name = name - self.errmsg = "Expected " + self.name - if __diag__.enable_debug_on_named_expressions: - self.setDebug() - return self - - def setResultsName(self, name, listAllMatches=False): - """ - Define name for referencing matching tokens as a nested attribute - of the returned parse results. - NOTE: this returns a *copy* of the original :class:`ParserElement` object; - this is so that the client can define a basic element, such as an - integer, and reference it in multiple places with different names. - - You can also set results names using the abbreviated syntax, - ``expr("name")`` in place of ``expr.setResultsName("name")`` - - see :class:`__call__`. - - Example:: - - date_str = (integer.setResultsName("year") + '/' - + integer.setResultsName("month") + '/' - + integer.setResultsName("day")) - - # equivalent form: - date_str = integer("year") + '/' + integer("month") + '/' + integer("day") - """ - return self._setResultsName(name, listAllMatches) - - def _setResultsName(self, name, listAllMatches=False): - newself = self.copy() - if name.endswith("*"): - name = name[:-1] - listAllMatches = True - newself.resultsName = name - newself.modalResults = not listAllMatches - return newself - - def setBreak(self, breakFlag=True): - """Method to invoke the Python pdb debugger when this element is - about to be parsed. Set ``breakFlag`` to True to enable, False to - disable. - """ - if breakFlag: - _parseMethod = self._parse - def breaker(instring, loc, doActions=True, callPreParse=True): - import pdb - # this call to pdb.set_trace() is intentional, not a checkin error - pdb.set_trace() - return _parseMethod(instring, loc, doActions, callPreParse) - breaker._originalParseMethod = _parseMethod - self._parse = breaker - else: - if hasattr(self._parse, "_originalParseMethod"): - self._parse = self._parse._originalParseMethod - return self - - def setParseAction(self, *fns, **kwargs): - """ - Define one or more actions to perform when successfully matching parse element definition. - Parse action fn is a callable method with 0-3 arguments, called as ``fn(s, loc, toks)`` , - ``fn(loc, toks)`` , ``fn(toks)`` , or just ``fn()`` , where: - - - s = the original string being parsed (see note below) - - loc = the location of the matching substring - - toks = a list of the matched tokens, packaged as a :class:`ParseResults` object - - If the functions in fns modify the tokens, they can return them as the return - value from fn, and the modified list of tokens will replace the original. - Otherwise, fn does not need to return any value. - - If None is passed as the parse action, all previously added parse actions for this - expression are cleared. - - Optional keyword arguments: - - callDuringTry = (default= ``False``) indicate if parse action should be run during lookaheads and alternate testing - - Note: the default parsing behavior is to expand tabs in the input string - before starting the parsing process. See :class:`parseString for more - information on parsing strings containing ```` s, and suggested - methods to maintain a consistent view of the parsed string, the parse - location, and line and column positions within the parsed string. - - Example:: - - integer = Word(nums) - date_str = integer + '/' + integer + '/' + integer - - date_str.parseString("1999/12/31") # -> ['1999', '/', '12', '/', '31'] - - # use parse action to convert to ints at parse time - integer = Word(nums).setParseAction(lambda toks: int(toks[0])) - date_str = integer + '/' + integer + '/' + integer - - # note that integer fields are now ints, not strings - date_str.parseString("1999/12/31") # -> [1999, '/', 12, '/', 31] - """ - if list(fns) == [None,]: - self.parseAction = [] - else: - if not all(callable(fn) for fn in fns): - raise TypeError("parse actions must be callable") - self.parseAction = list(map(_trim_arity, list(fns))) - self.callDuringTry = kwargs.get("callDuringTry", False) - return self - - def addParseAction(self, *fns, **kwargs): - """ - Add one or more parse actions to expression's list of parse actions. See :class:`setParseAction`. - - See examples in :class:`copy`. - """ - self.parseAction += list(map(_trim_arity, list(fns))) - self.callDuringTry = self.callDuringTry or kwargs.get("callDuringTry", False) - return self - - def addCondition(self, *fns, **kwargs): - """Add a boolean predicate function to expression's list of parse actions. See - :class:`setParseAction` for function call signatures. Unlike ``setParseAction``, - functions passed to ``addCondition`` need to return boolean success/fail of the condition. - - Optional keyword arguments: - - message = define a custom message to be used in the raised exception - - fatal = if True, will raise ParseFatalException to stop parsing immediately; otherwise will raise ParseException - - Example:: - - integer = Word(nums).setParseAction(lambda toks: int(toks[0])) - year_int = integer.copy() - year_int.addCondition(lambda toks: toks[0] >= 2000, message="Only support years 2000 and later") - date_str = year_int + '/' + integer + '/' + integer - - result = date_str.parseString("1999/12/31") # -> Exception: Only support years 2000 and later (at char 0), (line:1, col:1) - """ - for fn in fns: - self.parseAction.append(conditionAsParseAction(fn, message=kwargs.get('message'), - fatal=kwargs.get('fatal', False))) - - self.callDuringTry = self.callDuringTry or kwargs.get("callDuringTry", False) - return self - - def setFailAction(self, fn): - """Define action to perform if parsing fails at this expression. - Fail acton fn is a callable function that takes the arguments - ``fn(s, loc, expr, err)`` where: - - s = string being parsed - - loc = location where expression match was attempted and failed - - expr = the parse expression that failed - - err = the exception thrown - The function returns no value. It may throw :class:`ParseFatalException` - if it is desired to stop parsing immediately.""" - self.failAction = fn - return self - - def _skipIgnorables(self, instring, loc): - exprsFound = True - while exprsFound: - exprsFound = False - for e in self.ignoreExprs: - try: - while 1: - loc, dummy = e._parse(instring, loc) - exprsFound = True - except ParseException: - pass - return loc - - def preParse(self, instring, loc): - if self.ignoreExprs: - loc = self._skipIgnorables(instring, loc) - - if self.skipWhitespace: - wt = self.whiteChars - instrlen = len(instring) - while loc < instrlen and instring[loc] in wt: - loc += 1 - - return loc - - def parseImpl(self, instring, loc, doActions=True): - return loc, [] - - def postParse(self, instring, loc, tokenlist): - return tokenlist - - # ~ @profile - def _parseNoCache(self, instring, loc, doActions=True, callPreParse=True): - TRY, MATCH, FAIL = 0, 1, 2 - debugging = (self.debug) # and doActions) - - if debugging or self.failAction: - # ~ print ("Match", self, "at loc", loc, "(%d, %d)" % (lineno(loc, instring), col(loc, instring))) - if self.debugActions[TRY]: - self.debugActions[TRY](instring, loc, self) - try: - if callPreParse and self.callPreparse: - preloc = self.preParse(instring, loc) - else: - preloc = loc - tokensStart = preloc - if self.mayIndexError or preloc >= len(instring): - try: - loc, tokens = self.parseImpl(instring, preloc, doActions) - except IndexError: - raise ParseException(instring, len(instring), self.errmsg, self) - else: - loc, tokens = self.parseImpl(instring, preloc, doActions) - except Exception as err: - # ~ print ("Exception raised:", err) - if self.debugActions[FAIL]: - self.debugActions[FAIL](instring, tokensStart, self, err) - if self.failAction: - self.failAction(instring, tokensStart, self, err) - raise - else: - if callPreParse and self.callPreparse: - preloc = self.preParse(instring, loc) - else: - preloc = loc - tokensStart = preloc - if self.mayIndexError or preloc >= len(instring): - try: - loc, tokens = self.parseImpl(instring, preloc, doActions) - except IndexError: - raise ParseException(instring, len(instring), self.errmsg, self) - else: - loc, tokens = self.parseImpl(instring, preloc, doActions) - - tokens = self.postParse(instring, loc, tokens) - - retTokens = ParseResults(tokens, self.resultsName, asList=self.saveAsList, modal=self.modalResults) - if self.parseAction and (doActions or self.callDuringTry): - if debugging: - try: - for fn in self.parseAction: - try: - tokens = fn(instring, tokensStart, retTokens) - except IndexError as parse_action_exc: - exc = ParseException("exception raised in parse action") - exc.__cause__ = parse_action_exc - raise exc - - if tokens is not None and tokens is not retTokens: - retTokens = ParseResults(tokens, - self.resultsName, - asList=self.saveAsList and isinstance(tokens, (ParseResults, list)), - modal=self.modalResults) - except Exception as err: - # ~ print "Exception raised in user parse action:", err - if self.debugActions[FAIL]: - self.debugActions[FAIL](instring, tokensStart, self, err) - raise - else: - for fn in self.parseAction: - try: - tokens = fn(instring, tokensStart, retTokens) - except IndexError as parse_action_exc: - exc = ParseException("exception raised in parse action") - exc.__cause__ = parse_action_exc - raise exc - - if tokens is not None and tokens is not retTokens: - retTokens = ParseResults(tokens, - self.resultsName, - asList=self.saveAsList and isinstance(tokens, (ParseResults, list)), - modal=self.modalResults) - if debugging: - # ~ print ("Matched", self, "->", retTokens.asList()) - if self.debugActions[MATCH]: - self.debugActions[MATCH](instring, tokensStart, loc, self, retTokens) - - return loc, retTokens - - def tryParse(self, instring, loc): - try: - return self._parse(instring, loc, doActions=False)[0] - except ParseFatalException: - raise ParseException(instring, loc, self.errmsg, self) - - def canParseNext(self, instring, loc): - try: - self.tryParse(instring, loc) - except (ParseException, IndexError): - return False - else: - return True - - class _UnboundedCache(object): - def __init__(self): - cache = {} - self.not_in_cache = not_in_cache = object() - - def get(self, key): - return cache.get(key, not_in_cache) - - def set(self, key, value): - cache[key] = value - - def clear(self): - cache.clear() - - def cache_len(self): - return len(cache) - - self.get = types.MethodType(get, self) - self.set = types.MethodType(set, self) - self.clear = types.MethodType(clear, self) - self.__len__ = types.MethodType(cache_len, self) - - if _OrderedDict is not None: - class _FifoCache(object): - def __init__(self, size): - self.not_in_cache = not_in_cache = object() - - cache = _OrderedDict() - - def get(self, key): - return cache.get(key, not_in_cache) - - def set(self, key, value): - cache[key] = value - while len(cache) > size: - try: - cache.popitem(False) - except KeyError: - pass - - def clear(self): - cache.clear() - - def cache_len(self): - return len(cache) - - self.get = types.MethodType(get, self) - self.set = types.MethodType(set, self) - self.clear = types.MethodType(clear, self) - self.__len__ = types.MethodType(cache_len, self) - - else: - class _FifoCache(object): - def __init__(self, size): - self.not_in_cache = not_in_cache = object() - - cache = {} - key_fifo = collections.deque([], size) - - def get(self, key): - return cache.get(key, not_in_cache) - - def set(self, key, value): - cache[key] = value - while len(key_fifo) > size: - cache.pop(key_fifo.popleft(), None) - key_fifo.append(key) - - def clear(self): - cache.clear() - key_fifo.clear() - - def cache_len(self): - return len(cache) - - self.get = types.MethodType(get, self) - self.set = types.MethodType(set, self) - self.clear = types.MethodType(clear, self) - self.__len__ = types.MethodType(cache_len, self) - - # argument cache for optimizing repeated calls when backtracking through recursive expressions - packrat_cache = {} # this is set later by enabledPackrat(); this is here so that resetCache() doesn't fail - packrat_cache_lock = RLock() - packrat_cache_stats = [0, 0] - - # this method gets repeatedly called during backtracking with the same arguments - - # we can cache these arguments and save ourselves the trouble of re-parsing the contained expression - def _parseCache(self, instring, loc, doActions=True, callPreParse=True): - HIT, MISS = 0, 1 - lookup = (self, instring, loc, callPreParse, doActions) - with ParserElement.packrat_cache_lock: - cache = ParserElement.packrat_cache - value = cache.get(lookup) - if value is cache.not_in_cache: - ParserElement.packrat_cache_stats[MISS] += 1 - try: - value = self._parseNoCache(instring, loc, doActions, callPreParse) - except ParseBaseException as pe: - # cache a copy of the exception, without the traceback - cache.set(lookup, pe.__class__(*pe.args)) - raise - else: - cache.set(lookup, (value[0], value[1].copy())) - return value - else: - ParserElement.packrat_cache_stats[HIT] += 1 - if isinstance(value, Exception): - raise value - return value[0], value[1].copy() - - _parse = _parseNoCache - - @staticmethod - def resetCache(): - ParserElement.packrat_cache.clear() - ParserElement.packrat_cache_stats[:] = [0] * len(ParserElement.packrat_cache_stats) - - _packratEnabled = False - @staticmethod - def enablePackrat(cache_size_limit=128): - """Enables "packrat" parsing, which adds memoizing to the parsing logic. - Repeated parse attempts at the same string location (which happens - often in many complex grammars) can immediately return a cached value, - instead of re-executing parsing/validating code. Memoizing is done of - both valid results and parsing exceptions. - - Parameters: - - - cache_size_limit - (default= ``128``) - if an integer value is provided - will limit the size of the packrat cache; if None is passed, then - the cache size will be unbounded; if 0 is passed, the cache will - be effectively disabled. - - This speedup may break existing programs that use parse actions that - have side-effects. For this reason, packrat parsing is disabled when - you first import pyparsing. To activate the packrat feature, your - program must call the class method :class:`ParserElement.enablePackrat`. - For best results, call ``enablePackrat()`` immediately after - importing pyparsing. - - Example:: - - import pipenv.vendor.pyparsing as pyparsing - pyparsing.ParserElement.enablePackrat() - """ - if not ParserElement._packratEnabled: - ParserElement._packratEnabled = True - if cache_size_limit is None: - ParserElement.packrat_cache = ParserElement._UnboundedCache() - else: - ParserElement.packrat_cache = ParserElement._FifoCache(cache_size_limit) - ParserElement._parse = ParserElement._parseCache - - def parseString(self, instring, parseAll=False): - """ - Execute the parse expression with the given string. - This is the main interface to the client code, once the complete - expression has been built. - - Returns the parsed data as a :class:`ParseResults` object, which may be - accessed as a list, or as a dict or object with attributes if the given parser - includes results names. - - If you want the grammar to require that the entire input string be - successfully parsed, then set ``parseAll`` to True (equivalent to ending - the grammar with ``StringEnd()``). - - Note: ``parseString`` implicitly calls ``expandtabs()`` on the input string, - in order to report proper column numbers in parse actions. - If the input string contains tabs and - the grammar uses parse actions that use the ``loc`` argument to index into the - string being parsed, you can ensure you have a consistent view of the input - string by: - - - calling ``parseWithTabs`` on your grammar before calling ``parseString`` - (see :class:`parseWithTabs`) - - define your parse action using the full ``(s, loc, toks)`` signature, and - reference the input string using the parse action's ``s`` argument - - explictly expand the tabs in your input string before calling - ``parseString`` - - Example:: - - Word('a').parseString('aaaaabaaa') # -> ['aaaaa'] - Word('a').parseString('aaaaabaaa', parseAll=True) # -> Exception: Expected end of text - """ - ParserElement.resetCache() - if not self.streamlined: - self.streamline() - # ~ self.saveAsList = True - for e in self.ignoreExprs: - e.streamline() - if not self.keepTabs: - instring = instring.expandtabs() - try: - loc, tokens = self._parse(instring, 0) - if parseAll: - loc = self.preParse(instring, loc) - se = Empty() + StringEnd() - se._parse(instring, loc) - except ParseBaseException as exc: - if ParserElement.verbose_stacktrace: - raise - else: - # catch and re-raise exception from here, clearing out pyparsing internal stack trace - if getattr(exc, '__traceback__', None) is not None: - exc.__traceback__ = self._trim_traceback(exc.__traceback__) - raise exc - else: - return tokens - - def scanString(self, instring, maxMatches=_MAX_INT, overlap=False): - """ - Scan the input string for expression matches. Each match will return the - matching tokens, start location, and end location. May be called with optional - ``maxMatches`` argument, to clip scanning after 'n' matches are found. If - ``overlap`` is specified, then overlapping matches will be reported. - - Note that the start and end locations are reported relative to the string - being parsed. See :class:`parseString` for more information on parsing - strings with embedded tabs. - - Example:: - - source = "sldjf123lsdjjkf345sldkjf879lkjsfd987" - print(source) - for tokens, start, end in Word(alphas).scanString(source): - print(' '*start + '^'*(end-start)) - print(' '*start + tokens[0]) - - prints:: - - sldjf123lsdjjkf345sldkjf879lkjsfd987 - ^^^^^ - sldjf - ^^^^^^^ - lsdjjkf - ^^^^^^ - sldkjf - ^^^^^^ - lkjsfd - """ - if not self.streamlined: - self.streamline() - for e in self.ignoreExprs: - e.streamline() - - if not self.keepTabs: - instring = _ustr(instring).expandtabs() - instrlen = len(instring) - loc = 0 - preparseFn = self.preParse - parseFn = self._parse - ParserElement.resetCache() - matches = 0 - try: - while loc <= instrlen and matches < maxMatches: - try: - preloc = preparseFn(instring, loc) - nextLoc, tokens = parseFn(instring, preloc, callPreParse=False) - except ParseException: - loc = preloc + 1 - else: - if nextLoc > loc: - matches += 1 - yield tokens, preloc, nextLoc - if overlap: - nextloc = preparseFn(instring, loc) - if nextloc > loc: - loc = nextLoc - else: - loc += 1 - else: - loc = nextLoc - else: - loc = preloc + 1 - except ParseBaseException as exc: - if ParserElement.verbose_stacktrace: - raise - else: - # catch and re-raise exception from here, clearing out pyparsing internal stack trace - if getattr(exc, '__traceback__', None) is not None: - exc.__traceback__ = self._trim_traceback(exc.__traceback__) - raise exc - - def transformString(self, instring): - """ - Extension to :class:`scanString`, to modify matching text with modified tokens that may - be returned from a parse action. To use ``transformString``, define a grammar and - attach a parse action to it that modifies the returned token list. - Invoking ``transformString()`` on a target string will then scan for matches, - and replace the matched text patterns according to the logic in the parse - action. ``transformString()`` returns the resulting transformed string. - - Example:: - - wd = Word(alphas) - wd.setParseAction(lambda toks: toks[0].title()) - - print(wd.transformString("now is the winter of our discontent made glorious summer by this sun of york.")) - - prints:: - - Now Is The Winter Of Our Discontent Made Glorious Summer By This Sun Of York. - """ - out = [] - lastE = 0 - # force preservation of s, to minimize unwanted transformation of string, and to - # keep string locs straight between transformString and scanString - self.keepTabs = True - try: - for t, s, e in self.scanString(instring): - out.append(instring[lastE:s]) - if t: - if isinstance(t, ParseResults): - out += t.asList() - elif isinstance(t, list): - out += t - else: - out.append(t) - lastE = e - out.append(instring[lastE:]) - out = [o for o in out if o] - return "".join(map(_ustr, _flatten(out))) - except ParseBaseException as exc: - if ParserElement.verbose_stacktrace: - raise - else: - # catch and re-raise exception from here, clearing out pyparsing internal stack trace - if getattr(exc, '__traceback__', None) is not None: - exc.__traceback__ = self._trim_traceback(exc.__traceback__) - raise exc - - def searchString(self, instring, maxMatches=_MAX_INT): - """ - Another extension to :class:`scanString`, simplifying the access to the tokens found - to match the given parse expression. May be called with optional - ``maxMatches`` argument, to clip searching after 'n' matches are found. - - Example:: - - # a capitalized word starts with an uppercase letter, followed by zero or more lowercase letters - cap_word = Word(alphas.upper(), alphas.lower()) - - print(cap_word.searchString("More than Iron, more than Lead, more than Gold I need Electricity")) - - # the sum() builtin can be used to merge results into a single ParseResults object - print(sum(cap_word.searchString("More than Iron, more than Lead, more than Gold I need Electricity"))) - - prints:: - - [['More'], ['Iron'], ['Lead'], ['Gold'], ['I'], ['Electricity']] - ['More', 'Iron', 'Lead', 'Gold', 'I', 'Electricity'] - """ - try: - return ParseResults([t for t, s, e in self.scanString(instring, maxMatches)]) - except ParseBaseException as exc: - if ParserElement.verbose_stacktrace: - raise - else: - # catch and re-raise exception from here, clearing out pyparsing internal stack trace - if getattr(exc, '__traceback__', None) is not None: - exc.__traceback__ = self._trim_traceback(exc.__traceback__) - raise exc - - def split(self, instring, maxsplit=_MAX_INT, includeSeparators=False): - """ - Generator method to split a string using the given expression as a separator. - May be called with optional ``maxsplit`` argument, to limit the number of splits; - and the optional ``includeSeparators`` argument (default= ``False``), if the separating - matching text should be included in the split results. - - Example:: - - punc = oneOf(list(".,;:/-!?")) - print(list(punc.split("This, this?, this sentence, is badly punctuated!"))) - - prints:: - - ['This', ' this', '', ' this sentence', ' is badly punctuated', ''] - """ - splits = 0 - last = 0 - for t, s, e in self.scanString(instring, maxMatches=maxsplit): - yield instring[last:s] - if includeSeparators: - yield t[0] - last = e - yield instring[last:] - - def __add__(self, other): - """ - Implementation of + operator - returns :class:`And`. Adding strings to a ParserElement - converts them to :class:`Literal`s by default. - - Example:: - - greet = Word(alphas) + "," + Word(alphas) + "!" - hello = "Hello, World!" - print (hello, "->", greet.parseString(hello)) - - prints:: - - Hello, World! -> ['Hello', ',', 'World', '!'] - - ``...`` may be used as a parse expression as a short form of :class:`SkipTo`. - - Literal('start') + ... + Literal('end') - - is equivalent to: - - Literal('start') + SkipTo('end')("_skipped*") + Literal('end') - - Note that the skipped text is returned with '_skipped' as a results name, - and to support having multiple skips in the same parser, the value returned is - a list of all skipped text. - """ - if other is Ellipsis: - return _PendingSkip(self) - - if isinstance(other, basestring): - other = self._literalStringClass(other) - if not isinstance(other, ParserElement): - warnings.warn("Cannot combine element of type %s with ParserElement" % type(other), - SyntaxWarning, stacklevel=2) - return None - return And([self, other]) - - def __radd__(self, other): - """ - Implementation of + operator when left operand is not a :class:`ParserElement` - """ - if other is Ellipsis: - return SkipTo(self)("_skipped*") + self - - if isinstance(other, basestring): - other = self._literalStringClass(other) - if not isinstance(other, ParserElement): - warnings.warn("Cannot combine element of type %s with ParserElement" % type(other), - SyntaxWarning, stacklevel=2) - return None - return other + self - - def __sub__(self, other): - """ - Implementation of - operator, returns :class:`And` with error stop - """ - if isinstance(other, basestring): - other = self._literalStringClass(other) - if not isinstance(other, ParserElement): - warnings.warn("Cannot combine element of type %s with ParserElement" % type(other), - SyntaxWarning, stacklevel=2) - return None - return self + And._ErrorStop() + other - - def __rsub__(self, other): - """ - Implementation of - operator when left operand is not a :class:`ParserElement` - """ - if isinstance(other, basestring): - other = self._literalStringClass(other) - if not isinstance(other, ParserElement): - warnings.warn("Cannot combine element of type %s with ParserElement" % type(other), - SyntaxWarning, stacklevel=2) - return None - return other - self - - def __mul__(self, other): - """ - Implementation of * operator, allows use of ``expr * 3`` in place of - ``expr + expr + expr``. Expressions may also me multiplied by a 2-integer - tuple, similar to ``{min, max}`` multipliers in regular expressions. Tuples - may also include ``None`` as in: - - ``expr*(n, None)`` or ``expr*(n, )`` is equivalent - to ``expr*n + ZeroOrMore(expr)`` - (read as "at least n instances of ``expr``") - - ``expr*(None, n)`` is equivalent to ``expr*(0, n)`` - (read as "0 to n instances of ``expr``") - - ``expr*(None, None)`` is equivalent to ``ZeroOrMore(expr)`` - - ``expr*(1, None)`` is equivalent to ``OneOrMore(expr)`` - - Note that ``expr*(None, n)`` does not raise an exception if - more than n exprs exist in the input stream; that is, - ``expr*(None, n)`` does not enforce a maximum number of expr - occurrences. If this behavior is desired, then write - ``expr*(None, n) + ~expr`` - """ - if other is Ellipsis: - other = (0, None) - elif isinstance(other, tuple) and other[:1] == (Ellipsis,): - other = ((0, ) + other[1:] + (None,))[:2] - - if isinstance(other, int): - minElements, optElements = other, 0 - elif isinstance(other, tuple): - other = tuple(o if o is not Ellipsis else None for o in other) - other = (other + (None, None))[:2] - if other[0] is None: - other = (0, other[1]) - if isinstance(other[0], int) and other[1] is None: - if other[0] == 0: - return ZeroOrMore(self) - if other[0] == 1: - return OneOrMore(self) - else: - return self * other[0] + ZeroOrMore(self) - elif isinstance(other[0], int) and isinstance(other[1], int): - minElements, optElements = other - optElements -= minElements - else: - raise TypeError("cannot multiply 'ParserElement' and ('%s', '%s') objects", type(other[0]), type(other[1])) - else: - raise TypeError("cannot multiply 'ParserElement' and '%s' objects", type(other)) - - if minElements < 0: - raise ValueError("cannot multiply ParserElement by negative value") - if optElements < 0: - raise ValueError("second tuple value must be greater or equal to first tuple value") - if minElements == optElements == 0: - raise ValueError("cannot multiply ParserElement by 0 or (0, 0)") - - if optElements: - def makeOptionalList(n): - if n > 1: - return Optional(self + makeOptionalList(n - 1)) - else: - return Optional(self) - if minElements: - if minElements == 1: - ret = self + makeOptionalList(optElements) - else: - ret = And([self] * minElements) + makeOptionalList(optElements) - else: - ret = makeOptionalList(optElements) - else: - if minElements == 1: - ret = self - else: - ret = And([self] * minElements) - return ret - - def __rmul__(self, other): - return self.__mul__(other) - - def __or__(self, other): - """ - Implementation of | operator - returns :class:`MatchFirst` - """ - if other is Ellipsis: - return _PendingSkip(self, must_skip=True) - - if isinstance(other, basestring): - other = self._literalStringClass(other) - if not isinstance(other, ParserElement): - warnings.warn("Cannot combine element of type %s with ParserElement" % type(other), - SyntaxWarning, stacklevel=2) - return None - return MatchFirst([self, other]) - - def __ror__(self, other): - """ - Implementation of | operator when left operand is not a :class:`ParserElement` - """ - if isinstance(other, basestring): - other = self._literalStringClass(other) - if not isinstance(other, ParserElement): - warnings.warn("Cannot combine element of type %s with ParserElement" % type(other), - SyntaxWarning, stacklevel=2) - return None - return other | self - - def __xor__(self, other): - """ - Implementation of ^ operator - returns :class:`Or` - """ - if isinstance(other, basestring): - other = self._literalStringClass(other) - if not isinstance(other, ParserElement): - warnings.warn("Cannot combine element of type %s with ParserElement" % type(other), - SyntaxWarning, stacklevel=2) - return None - return Or([self, other]) - - def __rxor__(self, other): - """ - Implementation of ^ operator when left operand is not a :class:`ParserElement` - """ - if isinstance(other, basestring): - other = self._literalStringClass(other) - if not isinstance(other, ParserElement): - warnings.warn("Cannot combine element of type %s with ParserElement" % type(other), - SyntaxWarning, stacklevel=2) - return None - return other ^ self - - def __and__(self, other): - """ - Implementation of & operator - returns :class:`Each` - """ - if isinstance(other, basestring): - other = self._literalStringClass(other) - if not isinstance(other, ParserElement): - warnings.warn("Cannot combine element of type %s with ParserElement" % type(other), - SyntaxWarning, stacklevel=2) - return None - return Each([self, other]) - - def __rand__(self, other): - """ - Implementation of & operator when left operand is not a :class:`ParserElement` - """ - if isinstance(other, basestring): - other = self._literalStringClass(other) - if not isinstance(other, ParserElement): - warnings.warn("Cannot combine element of type %s with ParserElement" % type(other), - SyntaxWarning, stacklevel=2) - return None - return other & self - - def __invert__(self): - """ - Implementation of ~ operator - returns :class:`NotAny` - """ - return NotAny(self) - - def __iter__(self): - # must implement __iter__ to override legacy use of sequential access to __getitem__ to - # iterate over a sequence - raise TypeError('%r object is not iterable' % self.__class__.__name__) - - def __getitem__(self, key): - """ - use ``[]`` indexing notation as a short form for expression repetition: - - ``expr[n]`` is equivalent to ``expr*n`` - - ``expr[m, n]`` is equivalent to ``expr*(m, n)`` - - ``expr[n, ...]`` or ``expr[n,]`` is equivalent - to ``expr*n + ZeroOrMore(expr)`` - (read as "at least n instances of ``expr``") - - ``expr[..., n]`` is equivalent to ``expr*(0, n)`` - (read as "0 to n instances of ``expr``") - - ``expr[...]`` and ``expr[0, ...]`` are equivalent to ``ZeroOrMore(expr)`` - - ``expr[1, ...]`` is equivalent to ``OneOrMore(expr)`` - ``None`` may be used in place of ``...``. - - Note that ``expr[..., n]`` and ``expr[m, n]``do not raise an exception - if more than ``n`` ``expr``s exist in the input stream. If this behavior is - desired, then write ``expr[..., n] + ~expr``. - """ - - # convert single arg keys to tuples - try: - if isinstance(key, str): - key = (key,) - iter(key) - except TypeError: - key = (key, key) - - if len(key) > 2: - warnings.warn("only 1 or 2 index arguments supported ({0}{1})".format(key[:5], - '... [{0}]'.format(len(key)) - if len(key) > 5 else '')) - - # clip to 2 elements - ret = self * tuple(key[:2]) - return ret - - def __call__(self, name=None): - """ - Shortcut for :class:`setResultsName`, with ``listAllMatches=False``. - - If ``name`` is given with a trailing ``'*'`` character, then ``listAllMatches`` will be - passed as ``True``. - - If ``name` is omitted, same as calling :class:`copy`. - - Example:: - - # these are equivalent - userdata = Word(alphas).setResultsName("name") + Word(nums + "-").setResultsName("socsecno") - userdata = Word(alphas)("name") + Word(nums + "-")("socsecno") - """ - if name is not None: - return self._setResultsName(name) - else: - return self.copy() - - def suppress(self): - """ - Suppresses the output of this :class:`ParserElement`; useful to keep punctuation from - cluttering up returned output. - """ - return Suppress(self) - - def leaveWhitespace(self): - """ - Disables the skipping of whitespace before matching the characters in the - :class:`ParserElement`'s defined pattern. This is normally only used internally by - the pyparsing module, but may be needed in some whitespace-sensitive grammars. - """ - self.skipWhitespace = False - return self - - def setWhitespaceChars(self, chars): - """ - Overrides the default whitespace chars - """ - self.skipWhitespace = True - self.whiteChars = chars - self.copyDefaultWhiteChars = False - return self - - def parseWithTabs(self): - """ - Overrides default behavior to expand ````s to spaces before parsing the input string. - Must be called before ``parseString`` when the input grammar contains elements that - match ```` characters. - """ - self.keepTabs = True - return self - - def ignore(self, other): - """ - Define expression to be ignored (e.g., comments) while doing pattern - matching; may be called repeatedly, to define multiple comment or other - ignorable patterns. - - Example:: - - patt = OneOrMore(Word(alphas)) - patt.parseString('ablaj /* comment */ lskjd') # -> ['ablaj'] - - patt.ignore(cStyleComment) - patt.parseString('ablaj /* comment */ lskjd') # -> ['ablaj', 'lskjd'] - """ - if isinstance(other, basestring): - other = Suppress(other) - - if isinstance(other, Suppress): - if other not in self.ignoreExprs: - self.ignoreExprs.append(other) - else: - self.ignoreExprs.append(Suppress(other.copy())) - return self - - def setDebugActions(self, startAction, successAction, exceptionAction): - """ - Enable display of debugging messages while doing pattern matching. - """ - self.debugActions = (startAction or _defaultStartDebugAction, - successAction or _defaultSuccessDebugAction, - exceptionAction or _defaultExceptionDebugAction) - self.debug = True - return self - - def setDebug(self, flag=True): - """ - Enable display of debugging messages while doing pattern matching. - Set ``flag`` to True to enable, False to disable. - - Example:: - - wd = Word(alphas).setName("alphaword") - integer = Word(nums).setName("numword") - term = wd | integer - - # turn on debugging for wd - wd.setDebug() - - OneOrMore(term).parseString("abc 123 xyz 890") - - prints:: - - Match alphaword at loc 0(1,1) - Matched alphaword -> ['abc'] - Match alphaword at loc 3(1,4) - Exception raised:Expected alphaword (at char 4), (line:1, col:5) - Match alphaword at loc 7(1,8) - Matched alphaword -> ['xyz'] - Match alphaword at loc 11(1,12) - Exception raised:Expected alphaword (at char 12), (line:1, col:13) - Match alphaword at loc 15(1,16) - Exception raised:Expected alphaword (at char 15), (line:1, col:16) - - The output shown is that produced by the default debug actions - custom debug actions can be - specified using :class:`setDebugActions`. Prior to attempting - to match the ``wd`` expression, the debugging message ``"Match at loc (,)"`` - is shown. Then if the parse succeeds, a ``"Matched"`` message is shown, or an ``"Exception raised"`` - message is shown. Also note the use of :class:`setName` to assign a human-readable name to the expression, - which makes debugging and exception messages easier to understand - for instance, the default - name created for the :class:`Word` expression without calling ``setName`` is ``"W:(ABCD...)"``. - """ - if flag: - self.setDebugActions(_defaultStartDebugAction, _defaultSuccessDebugAction, _defaultExceptionDebugAction) - else: - self.debug = False - return self - - def __str__(self): - return self.name - - def __repr__(self): - return _ustr(self) - - def streamline(self): - self.streamlined = True - self.strRepr = None - return self - - def checkRecursion(self, parseElementList): - pass - - def validate(self, validateTrace=None): - """ - Check defined expressions for valid structure, check for infinite recursive definitions. - """ - self.checkRecursion([]) - - def parseFile(self, file_or_filename, parseAll=False): - """ - Execute the parse expression on the given file or filename. - If a filename is specified (instead of a file object), - the entire file is opened, read, and closed before parsing. - """ - try: - file_contents = file_or_filename.read() - except AttributeError: - with open(file_or_filename, "r") as f: - file_contents = f.read() - try: - return self.parseString(file_contents, parseAll) - except ParseBaseException as exc: - if ParserElement.verbose_stacktrace: - raise - else: - # catch and re-raise exception from here, clearing out pyparsing internal stack trace - if getattr(exc, '__traceback__', None) is not None: - exc.__traceback__ = self._trim_traceback(exc.__traceback__) - raise exc - - def __eq__(self, other): - if self is other: - return True - elif isinstance(other, basestring): - return self.matches(other) - elif isinstance(other, ParserElement): - return vars(self) == vars(other) - return False - - def __ne__(self, other): - return not (self == other) - - def __hash__(self): - return id(self) - - def __req__(self, other): - return self == other - - def __rne__(self, other): - return not (self == other) - - def matches(self, testString, parseAll=True): - """ - Method for quick testing of a parser against a test string. Good for simple - inline microtests of sub expressions while building up larger parser. - - Parameters: - - testString - to test against this expression for a match - - parseAll - (default= ``True``) - flag to pass to :class:`parseString` when running tests - - Example:: - - expr = Word(nums) - assert expr.matches("100") - """ - try: - self.parseString(_ustr(testString), parseAll=parseAll) - return True - except ParseBaseException: - return False - - def runTests(self, tests, parseAll=True, comment='#', - fullDump=True, printResults=True, failureTests=False, postParse=None, - file=None): - """ - Execute the parse expression on a series of test strings, showing each - test, the parsed results or where the parse failed. Quick and easy way to - run a parse expression against a list of sample strings. - - Parameters: - - tests - a list of separate test strings, or a multiline string of test strings - - parseAll - (default= ``True``) - flag to pass to :class:`parseString` when running tests - - comment - (default= ``'#'``) - expression for indicating embedded comments in the test - string; pass None to disable comment filtering - - fullDump - (default= ``True``) - dump results as list followed by results names in nested outline; - if False, only dump nested list - - printResults - (default= ``True``) prints test output to stdout - - failureTests - (default= ``False``) indicates if these tests are expected to fail parsing - - postParse - (default= ``None``) optional callback for successful parse results; called as - `fn(test_string, parse_results)` and returns a string to be added to the test output - - file - (default=``None``) optional file-like object to which test output will be written; - if None, will default to ``sys.stdout`` - - Returns: a (success, results) tuple, where success indicates that all tests succeeded - (or failed if ``failureTests`` is True), and the results contain a list of lines of each - test's output - - Example:: - - number_expr = pyparsing_common.number.copy() - - result = number_expr.runTests(''' - # unsigned integer - 100 - # negative integer - -100 - # float with scientific notation - 6.02e23 - # integer with scientific notation - 1e-12 - ''') - print("Success" if result[0] else "Failed!") - - result = number_expr.runTests(''' - # stray character - 100Z - # missing leading digit before '.' - -.100 - # too many '.' - 3.14.159 - ''', failureTests=True) - print("Success" if result[0] else "Failed!") - - prints:: - - # unsigned integer - 100 - [100] - - # negative integer - -100 - [-100] - - # float with scientific notation - 6.02e23 - [6.02e+23] - - # integer with scientific notation - 1e-12 - [1e-12] - - Success - - # stray character - 100Z - ^ - FAIL: Expected end of text (at char 3), (line:1, col:4) - - # missing leading digit before '.' - -.100 - ^ - FAIL: Expected {real number with scientific notation | real number | signed integer} (at char 0), (line:1, col:1) - - # too many '.' - 3.14.159 - ^ - FAIL: Expected end of text (at char 4), (line:1, col:5) - - Success - - Each test string must be on a single line. If you want to test a string that spans multiple - lines, create a test like this:: - - expr.runTest(r"this is a test\\n of strings that spans \\n 3 lines") - - (Note that this is a raw string literal, you must include the leading 'r'.) - """ - if isinstance(tests, basestring): - tests = list(map(str.strip, tests.rstrip().splitlines())) - if isinstance(comment, basestring): - comment = Literal(comment) - if file is None: - file = sys.stdout - print_ = file.write - - allResults = [] - comments = [] - success = True - NL = Literal(r'\n').addParseAction(replaceWith('\n')).ignore(quotedString) - BOM = u'\ufeff' - for t in tests: - if comment is not None and comment.matches(t, False) or comments and not t: - comments.append(t) - continue - if not t: - continue - out = ['\n' + '\n'.join(comments) if comments else '', t] - comments = [] - try: - # convert newline marks to actual newlines, and strip leading BOM if present - t = NL.transformString(t.lstrip(BOM)) - result = self.parseString(t, parseAll=parseAll) - except ParseBaseException as pe: - fatal = "(FATAL)" if isinstance(pe, ParseFatalException) else "" - if '\n' in t: - out.append(line(pe.loc, t)) - out.append(' ' * (col(pe.loc, t) - 1) + '^' + fatal) - else: - out.append(' ' * pe.loc + '^' + fatal) - out.append("FAIL: " + str(pe)) - success = success and failureTests - result = pe - except Exception as exc: - out.append("FAIL-EXCEPTION: " + str(exc)) - success = success and failureTests - result = exc - else: - success = success and not failureTests - if postParse is not None: - try: - pp_value = postParse(t, result) - if pp_value is not None: - if isinstance(pp_value, ParseResults): - out.append(pp_value.dump()) - else: - out.append(str(pp_value)) - else: - out.append(result.dump()) - except Exception as e: - out.append(result.dump(full=fullDump)) - out.append("{0} failed: {1}: {2}".format(postParse.__name__, type(e).__name__, e)) - else: - out.append(result.dump(full=fullDump)) - - if printResults: - if fullDump: - out.append('') - print_('\n'.join(out)) - - allResults.append((t, result)) - - return success, allResults - - -class _PendingSkip(ParserElement): - # internal placeholder class to hold a place were '...' is added to a parser element, - # once another ParserElement is added, this placeholder will be replaced with a SkipTo - def __init__(self, expr, must_skip=False): - super(_PendingSkip, self).__init__() - self.strRepr = str(expr + Empty()).replace('Empty', '...') - self.name = self.strRepr - self.anchor = expr - self.must_skip = must_skip - - def __add__(self, other): - skipper = SkipTo(other).setName("...")("_skipped*") - if self.must_skip: - def must_skip(t): - if not t._skipped or t._skipped.asList() == ['']: - del t[0] - t.pop("_skipped", None) - def show_skip(t): - if t._skipped.asList()[-1:] == ['']: - skipped = t.pop('_skipped') - t['_skipped'] = 'missing <' + repr(self.anchor) + '>' - return (self.anchor + skipper().addParseAction(must_skip) - | skipper().addParseAction(show_skip)) + other - - return self.anchor + skipper + other - - def __repr__(self): - return self.strRepr - - def parseImpl(self, *args): - raise Exception("use of `...` expression without following SkipTo target expression") - - -class Token(ParserElement): - """Abstract :class:`ParserElement` subclass, for defining atomic - matching patterns. - """ - def __init__(self): - super(Token, self).__init__(savelist=False) - - -class Empty(Token): - """An empty token, will always match. - """ - def __init__(self): - super(Empty, self).__init__() - self.name = "Empty" - self.mayReturnEmpty = True - self.mayIndexError = False - - -class NoMatch(Token): - """A token that will never match. - """ - def __init__(self): - super(NoMatch, self).__init__() - self.name = "NoMatch" - self.mayReturnEmpty = True - self.mayIndexError = False - self.errmsg = "Unmatchable token" - - def parseImpl(self, instring, loc, doActions=True): - raise ParseException(instring, loc, self.errmsg, self) - - -class Literal(Token): - """Token to exactly match a specified string. - - Example:: - - Literal('blah').parseString('blah') # -> ['blah'] - Literal('blah').parseString('blahfooblah') # -> ['blah'] - Literal('blah').parseString('bla') # -> Exception: Expected "blah" - - For case-insensitive matching, use :class:`CaselessLiteral`. - - For keyword matching (force word break before and after the matched string), - use :class:`Keyword` or :class:`CaselessKeyword`. - """ - def __init__(self, matchString): - super(Literal, self).__init__() - self.match = matchString - self.matchLen = len(matchString) - try: - self.firstMatchChar = matchString[0] - except IndexError: - warnings.warn("null string passed to Literal; use Empty() instead", - SyntaxWarning, stacklevel=2) - self.__class__ = Empty - self.name = '"%s"' % _ustr(self.match) - self.errmsg = "Expected " + self.name - self.mayReturnEmpty = False - self.mayIndexError = False - - # Performance tuning: modify __class__ to select - # a parseImpl optimized for single-character check - if self.matchLen == 1 and type(self) is Literal: - self.__class__ = _SingleCharLiteral - - def parseImpl(self, instring, loc, doActions=True): - if instring[loc] == self.firstMatchChar and instring.startswith(self.match, loc): - return loc + self.matchLen, self.match - raise ParseException(instring, loc, self.errmsg, self) - -class _SingleCharLiteral(Literal): - def parseImpl(self, instring, loc, doActions=True): - if instring[loc] == self.firstMatchChar: - return loc + 1, self.match - raise ParseException(instring, loc, self.errmsg, self) - -_L = Literal -ParserElement._literalStringClass = Literal - -class Keyword(Token): - """Token to exactly match a specified string as a keyword, that is, - it must be immediately followed by a non-keyword character. Compare - with :class:`Literal`: - - - ``Literal("if")`` will match the leading ``'if'`` in - ``'ifAndOnlyIf'``. - - ``Keyword("if")`` will not; it will only match the leading - ``'if'`` in ``'if x=1'``, or ``'if(y==2)'`` - - Accepts two optional constructor arguments in addition to the - keyword string: - - - ``identChars`` is a string of characters that would be valid - identifier characters, defaulting to all alphanumerics + "_" and - "$" - - ``caseless`` allows case-insensitive matching, default is ``False``. - - Example:: - - Keyword("start").parseString("start") # -> ['start'] - Keyword("start").parseString("starting") # -> Exception - - For case-insensitive matching, use :class:`CaselessKeyword`. - """ - DEFAULT_KEYWORD_CHARS = alphanums + "_$" - - def __init__(self, matchString, identChars=None, caseless=False): - super(Keyword, self).__init__() - if identChars is None: - identChars = Keyword.DEFAULT_KEYWORD_CHARS - self.match = matchString - self.matchLen = len(matchString) - try: - self.firstMatchChar = matchString[0] - except IndexError: - warnings.warn("null string passed to Keyword; use Empty() instead", - SyntaxWarning, stacklevel=2) - self.name = '"%s"' % self.match - self.errmsg = "Expected " + self.name - self.mayReturnEmpty = False - self.mayIndexError = False - self.caseless = caseless - if caseless: - self.caselessmatch = matchString.upper() - identChars = identChars.upper() - self.identChars = set(identChars) - - def parseImpl(self, instring, loc, doActions=True): - if self.caseless: - if ((instring[loc:loc + self.matchLen].upper() == self.caselessmatch) - and (loc >= len(instring) - self.matchLen - or instring[loc + self.matchLen].upper() not in self.identChars) - and (loc == 0 - or instring[loc - 1].upper() not in self.identChars)): - return loc + self.matchLen, self.match - - else: - if instring[loc] == self.firstMatchChar: - if ((self.matchLen == 1 or instring.startswith(self.match, loc)) - and (loc >= len(instring) - self.matchLen - or instring[loc + self.matchLen] not in self.identChars) - and (loc == 0 or instring[loc - 1] not in self.identChars)): - return loc + self.matchLen, self.match - - raise ParseException(instring, loc, self.errmsg, self) - - def copy(self): - c = super(Keyword, self).copy() - c.identChars = Keyword.DEFAULT_KEYWORD_CHARS - return c - - @staticmethod - def setDefaultKeywordChars(chars): - """Overrides the default Keyword chars - """ - Keyword.DEFAULT_KEYWORD_CHARS = chars - -class CaselessLiteral(Literal): - """Token to match a specified string, ignoring case of letters. - Note: the matched results will always be in the case of the given - match string, NOT the case of the input text. - - Example:: - - OneOrMore(CaselessLiteral("CMD")).parseString("cmd CMD Cmd10") # -> ['CMD', 'CMD', 'CMD'] - - (Contrast with example for :class:`CaselessKeyword`.) - """ - def __init__(self, matchString): - super(CaselessLiteral, self).__init__(matchString.upper()) - # Preserve the defining literal. - self.returnString = matchString - self.name = "'%s'" % self.returnString - self.errmsg = "Expected " + self.name - - def parseImpl(self, instring, loc, doActions=True): - if instring[loc:loc + self.matchLen].upper() == self.match: - return loc + self.matchLen, self.returnString - raise ParseException(instring, loc, self.errmsg, self) - -class CaselessKeyword(Keyword): - """ - Caseless version of :class:`Keyword`. - - Example:: - - OneOrMore(CaselessKeyword("CMD")).parseString("cmd CMD Cmd10") # -> ['CMD', 'CMD'] - - (Contrast with example for :class:`CaselessLiteral`.) - """ - def __init__(self, matchString, identChars=None): - super(CaselessKeyword, self).__init__(matchString, identChars, caseless=True) - -class CloseMatch(Token): - """A variation on :class:`Literal` which matches "close" matches, - that is, strings with at most 'n' mismatching characters. - :class:`CloseMatch` takes parameters: - - - ``match_string`` - string to be matched - - ``maxMismatches`` - (``default=1``) maximum number of - mismatches allowed to count as a match - - The results from a successful parse will contain the matched text - from the input string and the following named results: - - - ``mismatches`` - a list of the positions within the - match_string where mismatches were found - - ``original`` - the original match_string used to compare - against the input string - - If ``mismatches`` is an empty list, then the match was an exact - match. - - Example:: - - patt = CloseMatch("ATCATCGAATGGA") - patt.parseString("ATCATCGAAXGGA") # -> (['ATCATCGAAXGGA'], {'mismatches': [[9]], 'original': ['ATCATCGAATGGA']}) - patt.parseString("ATCAXCGAAXGGA") # -> Exception: Expected 'ATCATCGAATGGA' (with up to 1 mismatches) (at char 0), (line:1, col:1) - - # exact match - patt.parseString("ATCATCGAATGGA") # -> (['ATCATCGAATGGA'], {'mismatches': [[]], 'original': ['ATCATCGAATGGA']}) - - # close match allowing up to 2 mismatches - patt = CloseMatch("ATCATCGAATGGA", maxMismatches=2) - patt.parseString("ATCAXCGAAXGGA") # -> (['ATCAXCGAAXGGA'], {'mismatches': [[4, 9]], 'original': ['ATCATCGAATGGA']}) - """ - def __init__(self, match_string, maxMismatches=1): - super(CloseMatch, self).__init__() - self.name = match_string - self.match_string = match_string - self.maxMismatches = maxMismatches - self.errmsg = "Expected %r (with up to %d mismatches)" % (self.match_string, self.maxMismatches) - self.mayIndexError = False - self.mayReturnEmpty = False - - def parseImpl(self, instring, loc, doActions=True): - start = loc - instrlen = len(instring) - maxloc = start + len(self.match_string) - - if maxloc <= instrlen: - match_string = self.match_string - match_stringloc = 0 - mismatches = [] - maxMismatches = self.maxMismatches - - for match_stringloc, s_m in enumerate(zip(instring[loc:maxloc], match_string)): - src, mat = s_m - if src != mat: - mismatches.append(match_stringloc) - if len(mismatches) > maxMismatches: - break - else: - loc = match_stringloc + 1 - results = ParseResults([instring[start:loc]]) - results['original'] = match_string - results['mismatches'] = mismatches - return loc, results - - raise ParseException(instring, loc, self.errmsg, self) - - -class Word(Token): - """Token for matching words composed of allowed character sets. - Defined with string containing all allowed initial characters, an - optional string containing allowed body characters (if omitted, - defaults to the initial character set), and an optional minimum, - maximum, and/or exact length. The default value for ``min`` is - 1 (a minimum value < 1 is not valid); the default values for - ``max`` and ``exact`` are 0, meaning no maximum or exact - length restriction. An optional ``excludeChars`` parameter can - list characters that might be found in the input ``bodyChars`` - string; useful to define a word of all printables except for one or - two characters, for instance. - - :class:`srange` is useful for defining custom character set strings - for defining ``Word`` expressions, using range notation from - regular expression character sets. - - A common mistake is to use :class:`Word` to match a specific literal - string, as in ``Word("Address")``. Remember that :class:`Word` - uses the string argument to define *sets* of matchable characters. - This expression would match "Add", "AAA", "dAred", or any other word - made up of the characters 'A', 'd', 'r', 'e', and 's'. To match an - exact literal string, use :class:`Literal` or :class:`Keyword`. - - pyparsing includes helper strings for building Words: - - - :class:`alphas` - - :class:`nums` - - :class:`alphanums` - - :class:`hexnums` - - :class:`alphas8bit` (alphabetic characters in ASCII range 128-255 - - accented, tilded, umlauted, etc.) - - :class:`punc8bit` (non-alphabetic characters in ASCII range - 128-255 - currency, symbols, superscripts, diacriticals, etc.) - - :class:`printables` (any non-whitespace character) - - Example:: - - # a word composed of digits - integer = Word(nums) # equivalent to Word("0123456789") or Word(srange("0-9")) - - # a word with a leading capital, and zero or more lowercase - capital_word = Word(alphas.upper(), alphas.lower()) - - # hostnames are alphanumeric, with leading alpha, and '-' - hostname = Word(alphas, alphanums + '-') - - # roman numeral (not a strict parser, accepts invalid mix of characters) - roman = Word("IVXLCDM") - - # any string of non-whitespace characters, except for ',' - csv_value = Word(printables, excludeChars=",") - """ - def __init__(self, initChars, bodyChars=None, min=1, max=0, exact=0, asKeyword=False, excludeChars=None): - super(Word, self).__init__() - if excludeChars: - excludeChars = set(excludeChars) - initChars = ''.join(c for c in initChars if c not in excludeChars) - if bodyChars: - bodyChars = ''.join(c for c in bodyChars if c not in excludeChars) - self.initCharsOrig = initChars - self.initChars = set(initChars) - if bodyChars: - self.bodyCharsOrig = bodyChars - self.bodyChars = set(bodyChars) - else: - self.bodyCharsOrig = initChars - self.bodyChars = set(initChars) - - self.maxSpecified = max > 0 - - if min < 1: - raise ValueError("cannot specify a minimum length < 1; use Optional(Word()) if zero-length word is permitted") - - self.minLen = min - - if max > 0: - self.maxLen = max - else: - self.maxLen = _MAX_INT - - if exact > 0: - self.maxLen = exact - self.minLen = exact - - self.name = _ustr(self) - self.errmsg = "Expected " + self.name - self.mayIndexError = False - self.asKeyword = asKeyword - - if ' ' not in self.initCharsOrig + self.bodyCharsOrig and (min == 1 and max == 0 and exact == 0): - if self.bodyCharsOrig == self.initCharsOrig: - self.reString = "[%s]+" % _escapeRegexRangeChars(self.initCharsOrig) - elif len(self.initCharsOrig) == 1: - self.reString = "%s[%s]*" % (re.escape(self.initCharsOrig), - _escapeRegexRangeChars(self.bodyCharsOrig),) - else: - self.reString = "[%s][%s]*" % (_escapeRegexRangeChars(self.initCharsOrig), - _escapeRegexRangeChars(self.bodyCharsOrig),) - if self.asKeyword: - self.reString = r"\b" + self.reString + r"\b" - - try: - self.re = re.compile(self.reString) - except Exception: - self.re = None - else: - self.re_match = self.re.match - self.__class__ = _WordRegex - - def parseImpl(self, instring, loc, doActions=True): - if instring[loc] not in self.initChars: - raise ParseException(instring, loc, self.errmsg, self) - - start = loc - loc += 1 - instrlen = len(instring) - bodychars = self.bodyChars - maxloc = start + self.maxLen - maxloc = min(maxloc, instrlen) - while loc < maxloc and instring[loc] in bodychars: - loc += 1 - - throwException = False - if loc - start < self.minLen: - throwException = True - elif self.maxSpecified and loc < instrlen and instring[loc] in bodychars: - throwException = True - elif self.asKeyword: - if (start > 0 and instring[start - 1] in bodychars - or loc < instrlen and instring[loc] in bodychars): - throwException = True - - if throwException: - raise ParseException(instring, loc, self.errmsg, self) - - return loc, instring[start:loc] - - def __str__(self): - try: - return super(Word, self).__str__() - except Exception: - pass - - if self.strRepr is None: - - def charsAsStr(s): - if len(s) > 4: - return s[:4] + "..." - else: - return s - - if self.initCharsOrig != self.bodyCharsOrig: - self.strRepr = "W:(%s, %s)" % (charsAsStr(self.initCharsOrig), charsAsStr(self.bodyCharsOrig)) - else: - self.strRepr = "W:(%s)" % charsAsStr(self.initCharsOrig) - - return self.strRepr - -class _WordRegex(Word): - def parseImpl(self, instring, loc, doActions=True): - result = self.re_match(instring, loc) - if not result: - raise ParseException(instring, loc, self.errmsg, self) - - loc = result.end() - return loc, result.group() - - -class Char(_WordRegex): - """A short-cut class for defining ``Word(characters, exact=1)``, - when defining a match of any single character in a string of - characters. - """ - def __init__(self, charset, asKeyword=False, excludeChars=None): - super(Char, self).__init__(charset, exact=1, asKeyword=asKeyword, excludeChars=excludeChars) - self.reString = "[%s]" % _escapeRegexRangeChars(''.join(self.initChars)) - if asKeyword: - self.reString = r"\b%s\b" % self.reString - self.re = re.compile(self.reString) - self.re_match = self.re.match - - -class Regex(Token): - r"""Token for matching strings that match a given regular - expression. Defined with string specifying the regular expression in - a form recognized by the stdlib Python `re module `_. - If the given regex contains named groups (defined using ``(?P...)``), - these will be preserved as named parse results. - - If instead of the Python stdlib re module you wish to use a different RE module - (such as the `regex` module), you can replace it by either building your - Regex object with a compiled RE that was compiled using regex: - - Example:: - - realnum = Regex(r"[+-]?\d+\.\d*") - date = Regex(r'(?P\d{4})-(?P\d\d?)-(?P\d\d?)') - # ref: https://stackoverflow.com/questions/267399/how-do-you-match-only-valid-roman-numerals-with-a-regular-expression - roman = Regex(r"M{0,4}(CM|CD|D?{0,3})(XC|XL|L?X{0,3})(IX|IV|V?I{0,3})") - - # use regex module instead of stdlib re module to construct a Regex using - # a compiled regular expression - import regex - parser = pp.Regex(regex.compile(r'[0-9]')) - - """ - def __init__(self, pattern, flags=0, asGroupList=False, asMatch=False): - """The parameters ``pattern`` and ``flags`` are passed - to the ``re.compile()`` function as-is. See the Python - `re module `_ module for an - explanation of the acceptable patterns and flags. - """ - super(Regex, self).__init__() - - if isinstance(pattern, basestring): - if not pattern: - warnings.warn("null string passed to Regex; use Empty() instead", - SyntaxWarning, stacklevel=2) - - self.pattern = pattern - self.flags = flags - - try: - self.re = re.compile(self.pattern, self.flags) - self.reString = self.pattern - except sre_constants.error: - warnings.warn("invalid pattern (%s) passed to Regex" % pattern, - SyntaxWarning, stacklevel=2) - raise - - elif hasattr(pattern, 'pattern') and hasattr(pattern, 'match'): - self.re = pattern - self.pattern = self.reString = pattern.pattern - self.flags = flags - - else: - raise TypeError("Regex may only be constructed with a string or a compiled RE object") - - self.re_match = self.re.match - - self.name = _ustr(self) - self.errmsg = "Expected " + self.name - self.mayIndexError = False - self.mayReturnEmpty = self.re_match("") is not None - self.asGroupList = asGroupList - self.asMatch = asMatch - if self.asGroupList: - self.parseImpl = self.parseImplAsGroupList - if self.asMatch: - self.parseImpl = self.parseImplAsMatch - - def parseImpl(self, instring, loc, doActions=True): - result = self.re_match(instring, loc) - if not result: - raise ParseException(instring, loc, self.errmsg, self) - - loc = result.end() - ret = ParseResults(result.group()) - d = result.groupdict() - if d: - for k, v in d.items(): - ret[k] = v - return loc, ret - - def parseImplAsGroupList(self, instring, loc, doActions=True): - result = self.re_match(instring, loc) - if not result: - raise ParseException(instring, loc, self.errmsg, self) - - loc = result.end() - ret = result.groups() - return loc, ret - - def parseImplAsMatch(self, instring, loc, doActions=True): - result = self.re_match(instring, loc) - if not result: - raise ParseException(instring, loc, self.errmsg, self) - - loc = result.end() - ret = result - return loc, ret - - def __str__(self): - try: - return super(Regex, self).__str__() - except Exception: - pass - - if self.strRepr is None: - self.strRepr = "Re:(%s)" % repr(self.pattern) - - return self.strRepr - - def sub(self, repl): - r""" - Return Regex with an attached parse action to transform the parsed - result as if called using `re.sub(expr, repl, string) `_. - - Example:: - - make_html = Regex(r"(\w+):(.*?):").sub(r"<\1>\2") - print(make_html.transformString("h1:main title:")) - # prints "

main title

" - """ - if self.asGroupList: - warnings.warn("cannot use sub() with Regex(asGroupList=True)", - SyntaxWarning, stacklevel=2) - raise SyntaxError() - - if self.asMatch and callable(repl): - warnings.warn("cannot use sub() with a callable with Regex(asMatch=True)", - SyntaxWarning, stacklevel=2) - raise SyntaxError() - - if self.asMatch: - def pa(tokens): - return tokens[0].expand(repl) - else: - def pa(tokens): - return self.re.sub(repl, tokens[0]) - return self.addParseAction(pa) - -class QuotedString(Token): - r""" - Token for matching strings that are delimited by quoting characters. - - Defined with the following parameters: - - - quoteChar - string of one or more characters defining the - quote delimiting string - - escChar - character to escape quotes, typically backslash - (default= ``None``) - - escQuote - special quote sequence to escape an embedded quote - string (such as SQL's ``""`` to escape an embedded ``"``) - (default= ``None``) - - multiline - boolean indicating whether quotes can span - multiple lines (default= ``False``) - - unquoteResults - boolean indicating whether the matched text - should be unquoted (default= ``True``) - - endQuoteChar - string of one or more characters defining the - end of the quote delimited string (default= ``None`` => same as - quoteChar) - - convertWhitespaceEscapes - convert escaped whitespace - (``'\t'``, ``'\n'``, etc.) to actual whitespace - (default= ``True``) - - Example:: - - qs = QuotedString('"') - print(qs.searchString('lsjdf "This is the quote" sldjf')) - complex_qs = QuotedString('{{', endQuoteChar='}}') - print(complex_qs.searchString('lsjdf {{This is the "quote"}} sldjf')) - sql_qs = QuotedString('"', escQuote='""') - print(sql_qs.searchString('lsjdf "This is the quote with ""embedded"" quotes" sldjf')) - - prints:: - - [['This is the quote']] - [['This is the "quote"']] - [['This is the quote with "embedded" quotes']] - """ - def __init__(self, quoteChar, escChar=None, escQuote=None, multiline=False, - unquoteResults=True, endQuoteChar=None, convertWhitespaceEscapes=True): - super(QuotedString, self).__init__() - - # remove white space from quote chars - wont work anyway - quoteChar = quoteChar.strip() - if not quoteChar: - warnings.warn("quoteChar cannot be the empty string", SyntaxWarning, stacklevel=2) - raise SyntaxError() - - if endQuoteChar is None: - endQuoteChar = quoteChar - else: - endQuoteChar = endQuoteChar.strip() - if not endQuoteChar: - warnings.warn("endQuoteChar cannot be the empty string", SyntaxWarning, stacklevel=2) - raise SyntaxError() - - self.quoteChar = quoteChar - self.quoteCharLen = len(quoteChar) - self.firstQuoteChar = quoteChar[0] - self.endQuoteChar = endQuoteChar - self.endQuoteCharLen = len(endQuoteChar) - self.escChar = escChar - self.escQuote = escQuote - self.unquoteResults = unquoteResults - self.convertWhitespaceEscapes = convertWhitespaceEscapes - - if multiline: - self.flags = re.MULTILINE | re.DOTALL - self.pattern = r'%s(?:[^%s%s]' % (re.escape(self.quoteChar), - _escapeRegexRangeChars(self.endQuoteChar[0]), - (escChar is not None and _escapeRegexRangeChars(escChar) or '')) - else: - self.flags = 0 - self.pattern = r'%s(?:[^%s\n\r%s]' % (re.escape(self.quoteChar), - _escapeRegexRangeChars(self.endQuoteChar[0]), - (escChar is not None and _escapeRegexRangeChars(escChar) or '')) - if len(self.endQuoteChar) > 1: - self.pattern += ( - '|(?:' + ')|(?:'.join("%s[^%s]" % (re.escape(self.endQuoteChar[:i]), - _escapeRegexRangeChars(self.endQuoteChar[i])) - for i in range(len(self.endQuoteChar) - 1, 0, -1)) + ')') - - if escQuote: - self.pattern += (r'|(?:%s)' % re.escape(escQuote)) - if escChar: - self.pattern += (r'|(?:%s.)' % re.escape(escChar)) - self.escCharReplacePattern = re.escape(self.escChar) + "(.)" - self.pattern += (r')*%s' % re.escape(self.endQuoteChar)) - - try: - self.re = re.compile(self.pattern, self.flags) - self.reString = self.pattern - self.re_match = self.re.match - except sre_constants.error: - warnings.warn("invalid pattern (%s) passed to Regex" % self.pattern, - SyntaxWarning, stacklevel=2) - raise - - self.name = _ustr(self) - self.errmsg = "Expected " + self.name - self.mayIndexError = False - self.mayReturnEmpty = True - - def parseImpl(self, instring, loc, doActions=True): - result = instring[loc] == self.firstQuoteChar and self.re_match(instring, loc) or None - if not result: - raise ParseException(instring, loc, self.errmsg, self) - - loc = result.end() - ret = result.group() - - if self.unquoteResults: - - # strip off quotes - ret = ret[self.quoteCharLen: -self.endQuoteCharLen] - - if isinstance(ret, basestring): - # replace escaped whitespace - if '\\' in ret and self.convertWhitespaceEscapes: - ws_map = { - r'\t': '\t', - r'\n': '\n', - r'\f': '\f', - r'\r': '\r', - } - for wslit, wschar in ws_map.items(): - ret = ret.replace(wslit, wschar) - - # replace escaped characters - if self.escChar: - ret = re.sub(self.escCharReplacePattern, r"\g<1>", ret) - - # replace escaped quotes - if self.escQuote: - ret = ret.replace(self.escQuote, self.endQuoteChar) - - return loc, ret - - def __str__(self): - try: - return super(QuotedString, self).__str__() - except Exception: - pass - - if self.strRepr is None: - self.strRepr = "quoted string, starting with %s ending with %s" % (self.quoteChar, self.endQuoteChar) - - return self.strRepr - - -class CharsNotIn(Token): - """Token for matching words composed of characters *not* in a given - set (will include whitespace in matched characters if not listed in - the provided exclusion set - see example). Defined with string - containing all disallowed characters, and an optional minimum, - maximum, and/or exact length. The default value for ``min`` is - 1 (a minimum value < 1 is not valid); the default values for - ``max`` and ``exact`` are 0, meaning no maximum or exact - length restriction. - - Example:: - - # define a comma-separated-value as anything that is not a ',' - csv_value = CharsNotIn(',') - print(delimitedList(csv_value).parseString("dkls,lsdkjf,s12 34,@!#,213")) - - prints:: - - ['dkls', 'lsdkjf', 's12 34', '@!#', '213'] - """ - def __init__(self, notChars, min=1, max=0, exact=0): - super(CharsNotIn, self).__init__() - self.skipWhitespace = False - self.notChars = notChars - - if min < 1: - raise ValueError("cannot specify a minimum length < 1; use " - "Optional(CharsNotIn()) if zero-length char group is permitted") - - self.minLen = min - - if max > 0: - self.maxLen = max - else: - self.maxLen = _MAX_INT - - if exact > 0: - self.maxLen = exact - self.minLen = exact - - self.name = _ustr(self) - self.errmsg = "Expected " + self.name - self.mayReturnEmpty = (self.minLen == 0) - self.mayIndexError = False - - def parseImpl(self, instring, loc, doActions=True): - if instring[loc] in self.notChars: - raise ParseException(instring, loc, self.errmsg, self) - - start = loc - loc += 1 - notchars = self.notChars - maxlen = min(start + self.maxLen, len(instring)) - while loc < maxlen and instring[loc] not in notchars: - loc += 1 - - if loc - start < self.minLen: - raise ParseException(instring, loc, self.errmsg, self) - - return loc, instring[start:loc] - - def __str__(self): - try: - return super(CharsNotIn, self).__str__() - except Exception: - pass - - if self.strRepr is None: - if len(self.notChars) > 4: - self.strRepr = "!W:(%s...)" % self.notChars[:4] - else: - self.strRepr = "!W:(%s)" % self.notChars - - return self.strRepr - -class White(Token): - """Special matching class for matching whitespace. Normally, - whitespace is ignored by pyparsing grammars. This class is included - when some whitespace structures are significant. Define with - a string containing the whitespace characters to be matched; default - is ``" \\t\\r\\n"``. Also takes optional ``min``, - ``max``, and ``exact`` arguments, as defined for the - :class:`Word` class. - """ - whiteStrs = { - ' ' : '', - '\t': '', - '\n': '', - '\r': '', - '\f': '', - u'\u00A0': '', - u'\u1680': '', - u'\u180E': '', - u'\u2000': '', - u'\u2001': '', - u'\u2002': '', - u'\u2003': '', - u'\u2004': '', - u'\u2005': '', - u'\u2006': '', - u'\u2007': '', - u'\u2008': '', - u'\u2009': '', - u'\u200A': '', - u'\u200B': '', - u'\u202F': '', - u'\u205F': '', - u'\u3000': '', - } - def __init__(self, ws=" \t\r\n", min=1, max=0, exact=0): - super(White, self).__init__() - self.matchWhite = ws - self.setWhitespaceChars("".join(c for c in self.whiteChars if c not in self.matchWhite)) - # ~ self.leaveWhitespace() - self.name = ("".join(White.whiteStrs[c] for c in self.matchWhite)) - self.mayReturnEmpty = True - self.errmsg = "Expected " + self.name - - self.minLen = min - - if max > 0: - self.maxLen = max - else: - self.maxLen = _MAX_INT - - if exact > 0: - self.maxLen = exact - self.minLen = exact - - def parseImpl(self, instring, loc, doActions=True): - if instring[loc] not in self.matchWhite: - raise ParseException(instring, loc, self.errmsg, self) - start = loc - loc += 1 - maxloc = start + self.maxLen - maxloc = min(maxloc, len(instring)) - while loc < maxloc and instring[loc] in self.matchWhite: - loc += 1 - - if loc - start < self.minLen: - raise ParseException(instring, loc, self.errmsg, self) - - return loc, instring[start:loc] - - -class _PositionToken(Token): - def __init__(self): - super(_PositionToken, self).__init__() - self.name = self.__class__.__name__ - self.mayReturnEmpty = True - self.mayIndexError = False - -class GoToColumn(_PositionToken): - """Token to advance to a specific column of input text; useful for - tabular report scraping. - """ - def __init__(self, colno): - super(GoToColumn, self).__init__() - self.col = colno - - def preParse(self, instring, loc): - if col(loc, instring) != self.col: - instrlen = len(instring) - if self.ignoreExprs: - loc = self._skipIgnorables(instring, loc) - while loc < instrlen and instring[loc].isspace() and col(loc, instring) != self.col: - loc += 1 - return loc - - def parseImpl(self, instring, loc, doActions=True): - thiscol = col(loc, instring) - if thiscol > self.col: - raise ParseException(instring, loc, "Text not in expected column", self) - newloc = loc + self.col - thiscol - ret = instring[loc: newloc] - return newloc, ret - - -class LineStart(_PositionToken): - r"""Matches if current position is at the beginning of a line within - the parse string - - Example:: - - test = '''\ - AAA this line - AAA and this line - AAA but not this one - B AAA and definitely not this one - ''' - - for t in (LineStart() + 'AAA' + restOfLine).searchString(test): - print(t) - - prints:: - - ['AAA', ' this line'] - ['AAA', ' and this line'] - - """ - def __init__(self): - super(LineStart, self).__init__() - self.errmsg = "Expected start of line" - - def parseImpl(self, instring, loc, doActions=True): - if col(loc, instring) == 1: - return loc, [] - raise ParseException(instring, loc, self.errmsg, self) - -class LineEnd(_PositionToken): - """Matches if current position is at the end of a line within the - parse string - """ - def __init__(self): - super(LineEnd, self).__init__() - self.setWhitespaceChars(ParserElement.DEFAULT_WHITE_CHARS.replace("\n", "")) - self.errmsg = "Expected end of line" - - def parseImpl(self, instring, loc, doActions=True): - if loc < len(instring): - if instring[loc] == "\n": - return loc + 1, "\n" - else: - raise ParseException(instring, loc, self.errmsg, self) - elif loc == len(instring): - return loc + 1, [] - else: - raise ParseException(instring, loc, self.errmsg, self) - -class StringStart(_PositionToken): - """Matches if current position is at the beginning of the parse - string - """ - def __init__(self): - super(StringStart, self).__init__() - self.errmsg = "Expected start of text" - - def parseImpl(self, instring, loc, doActions=True): - if loc != 0: - # see if entire string up to here is just whitespace and ignoreables - if loc != self.preParse(instring, 0): - raise ParseException(instring, loc, self.errmsg, self) - return loc, [] - -class StringEnd(_PositionToken): - """Matches if current position is at the end of the parse string - """ - def __init__(self): - super(StringEnd, self).__init__() - self.errmsg = "Expected end of text" - - def parseImpl(self, instring, loc, doActions=True): - if loc < len(instring): - raise ParseException(instring, loc, self.errmsg, self) - elif loc == len(instring): - return loc + 1, [] - elif loc > len(instring): - return loc, [] - else: - raise ParseException(instring, loc, self.errmsg, self) - -class WordStart(_PositionToken): - """Matches if the current position is at the beginning of a Word, - and is not preceded by any character in a given set of - ``wordChars`` (default= ``printables``). To emulate the - ``\b`` behavior of regular expressions, use - ``WordStart(alphanums)``. ``WordStart`` will also match at - the beginning of the string being parsed, or at the beginning of - a line. - """ - def __init__(self, wordChars=printables): - super(WordStart, self).__init__() - self.wordChars = set(wordChars) - self.errmsg = "Not at the start of a word" - - def parseImpl(self, instring, loc, doActions=True): - if loc != 0: - if (instring[loc - 1] in self.wordChars - or instring[loc] not in self.wordChars): - raise ParseException(instring, loc, self.errmsg, self) - return loc, [] - -class WordEnd(_PositionToken): - """Matches if the current position is at the end of a Word, and is - not followed by any character in a given set of ``wordChars`` - (default= ``printables``). To emulate the ``\b`` behavior of - regular expressions, use ``WordEnd(alphanums)``. ``WordEnd`` - will also match at the end of the string being parsed, or at the end - of a line. - """ - def __init__(self, wordChars=printables): - super(WordEnd, self).__init__() - self.wordChars = set(wordChars) - self.skipWhitespace = False - self.errmsg = "Not at the end of a word" - - def parseImpl(self, instring, loc, doActions=True): - instrlen = len(instring) - if instrlen > 0 and loc < instrlen: - if (instring[loc] in self.wordChars or - instring[loc - 1] not in self.wordChars): - raise ParseException(instring, loc, self.errmsg, self) - return loc, [] - - -class ParseExpression(ParserElement): - """Abstract subclass of ParserElement, for combining and - post-processing parsed tokens. - """ - def __init__(self, exprs, savelist=False): - super(ParseExpression, self).__init__(savelist) - if isinstance(exprs, _generatorType): - exprs = list(exprs) - - if isinstance(exprs, basestring): - self.exprs = [self._literalStringClass(exprs)] - elif isinstance(exprs, ParserElement): - self.exprs = [exprs] - elif isinstance(exprs, Iterable): - exprs = list(exprs) - # if sequence of strings provided, wrap with Literal - if any(isinstance(expr, basestring) for expr in exprs): - exprs = (self._literalStringClass(e) if isinstance(e, basestring) else e for e in exprs) - self.exprs = list(exprs) - else: - try: - self.exprs = list(exprs) - except TypeError: - self.exprs = [exprs] - self.callPreparse = False - - def append(self, other): - self.exprs.append(other) - self.strRepr = None - return self - - def leaveWhitespace(self): - """Extends ``leaveWhitespace`` defined in base class, and also invokes ``leaveWhitespace`` on - all contained expressions.""" - self.skipWhitespace = False - self.exprs = [e.copy() for e in self.exprs] - for e in self.exprs: - e.leaveWhitespace() - return self - - def ignore(self, other): - if isinstance(other, Suppress): - if other not in self.ignoreExprs: - super(ParseExpression, self).ignore(other) - for e in self.exprs: - e.ignore(self.ignoreExprs[-1]) - else: - super(ParseExpression, self).ignore(other) - for e in self.exprs: - e.ignore(self.ignoreExprs[-1]) - return self - - def __str__(self): - try: - return super(ParseExpression, self).__str__() - except Exception: - pass - - if self.strRepr is None: - self.strRepr = "%s:(%s)" % (self.__class__.__name__, _ustr(self.exprs)) - return self.strRepr - - def streamline(self): - super(ParseExpression, self).streamline() - - for e in self.exprs: - e.streamline() - - # collapse nested And's of the form And(And(And(a, b), c), d) to And(a, b, c, d) - # but only if there are no parse actions or resultsNames on the nested And's - # (likewise for Or's and MatchFirst's) - if len(self.exprs) == 2: - other = self.exprs[0] - if (isinstance(other, self.__class__) - and not other.parseAction - and other.resultsName is None - and not other.debug): - self.exprs = other.exprs[:] + [self.exprs[1]] - self.strRepr = None - self.mayReturnEmpty |= other.mayReturnEmpty - self.mayIndexError |= other.mayIndexError - - other = self.exprs[-1] - if (isinstance(other, self.__class__) - and not other.parseAction - and other.resultsName is None - and not other.debug): - self.exprs = self.exprs[:-1] + other.exprs[:] - self.strRepr = None - self.mayReturnEmpty |= other.mayReturnEmpty - self.mayIndexError |= other.mayIndexError - - self.errmsg = "Expected " + _ustr(self) - - return self - - def validate(self, validateTrace=None): - tmp = (validateTrace if validateTrace is not None else [])[:] + [self] - for e in self.exprs: - e.validate(tmp) - self.checkRecursion([]) - - def copy(self): - ret = super(ParseExpression, self).copy() - ret.exprs = [e.copy() for e in self.exprs] - return ret - - def _setResultsName(self, name, listAllMatches=False): - if __diag__.warn_ungrouped_named_tokens_in_collection: - for e in self.exprs: - if isinstance(e, ParserElement) and e.resultsName: - warnings.warn("{0}: setting results name {1!r} on {2} expression " - "collides with {3!r} on contained expression".format("warn_ungrouped_named_tokens_in_collection", - name, - type(self).__name__, - e.resultsName), - stacklevel=3) - - return super(ParseExpression, self)._setResultsName(name, listAllMatches) - - -class And(ParseExpression): - """ - Requires all given :class:`ParseExpression` s to be found in the given order. - Expressions may be separated by whitespace. - May be constructed using the ``'+'`` operator. - May also be constructed using the ``'-'`` operator, which will - suppress backtracking. - - Example:: - - integer = Word(nums) - name_expr = OneOrMore(Word(alphas)) - - expr = And([integer("id"), name_expr("name"), integer("age")]) - # more easily written as: - expr = integer("id") + name_expr("name") + integer("age") - """ - - class _ErrorStop(Empty): - def __init__(self, *args, **kwargs): - super(And._ErrorStop, self).__init__(*args, **kwargs) - self.name = '-' - self.leaveWhitespace() - - def __init__(self, exprs, savelist=True): - exprs = list(exprs) - if exprs and Ellipsis in exprs: - tmp = [] - for i, expr in enumerate(exprs): - if expr is Ellipsis: - if i < len(exprs) - 1: - skipto_arg = (Empty() + exprs[i + 1]).exprs[-1] - tmp.append(SkipTo(skipto_arg)("_skipped*")) - else: - raise Exception("cannot construct And with sequence ending in ...") - else: - tmp.append(expr) - exprs[:] = tmp - super(And, self).__init__(exprs, savelist) - self.mayReturnEmpty = all(e.mayReturnEmpty for e in self.exprs) - self.setWhitespaceChars(self.exprs[0].whiteChars) - self.skipWhitespace = self.exprs[0].skipWhitespace - self.callPreparse = True - - def streamline(self): - # collapse any _PendingSkip's - if self.exprs: - if any(isinstance(e, ParseExpression) and e.exprs and isinstance(e.exprs[-1], _PendingSkip) - for e in self.exprs[:-1]): - for i, e in enumerate(self.exprs[:-1]): - if e is None: - continue - if (isinstance(e, ParseExpression) - and e.exprs and isinstance(e.exprs[-1], _PendingSkip)): - e.exprs[-1] = e.exprs[-1] + self.exprs[i + 1] - self.exprs[i + 1] = None - self.exprs = [e for e in self.exprs if e is not None] - - super(And, self).streamline() - self.mayReturnEmpty = all(e.mayReturnEmpty for e in self.exprs) - return self - - def parseImpl(self, instring, loc, doActions=True): - # pass False as last arg to _parse for first element, since we already - # pre-parsed the string as part of our And pre-parsing - loc, resultlist = self.exprs[0]._parse(instring, loc, doActions, callPreParse=False) - errorStop = False - for e in self.exprs[1:]: - if isinstance(e, And._ErrorStop): - errorStop = True - continue - if errorStop: - try: - loc, exprtokens = e._parse(instring, loc, doActions) - except ParseSyntaxException: - raise - except ParseBaseException as pe: - pe.__traceback__ = None - raise ParseSyntaxException._from_exception(pe) - except IndexError: - raise ParseSyntaxException(instring, len(instring), self.errmsg, self) - else: - loc, exprtokens = e._parse(instring, loc, doActions) - if exprtokens or exprtokens.haskeys(): - resultlist += exprtokens - return loc, resultlist - - def __iadd__(self, other): - if isinstance(other, basestring): - other = self._literalStringClass(other) - return self.append(other) # And([self, other]) - - def checkRecursion(self, parseElementList): - subRecCheckList = parseElementList[:] + [self] - for e in self.exprs: - e.checkRecursion(subRecCheckList) - if not e.mayReturnEmpty: - break - - def __str__(self): - if hasattr(self, "name"): - return self.name - - if self.strRepr is None: - self.strRepr = "{" + " ".join(_ustr(e) for e in self.exprs) + "}" - - return self.strRepr - - -class Or(ParseExpression): - """Requires that at least one :class:`ParseExpression` is found. If - two expressions match, the expression that matches the longest - string will be used. May be constructed using the ``'^'`` - operator. - - Example:: - - # construct Or using '^' operator - - number = Word(nums) ^ Combine(Word(nums) + '.' + Word(nums)) - print(number.searchString("123 3.1416 789")) - - prints:: - - [['123'], ['3.1416'], ['789']] - """ - def __init__(self, exprs, savelist=False): - super(Or, self).__init__(exprs, savelist) - if self.exprs: - self.mayReturnEmpty = any(e.mayReturnEmpty for e in self.exprs) - else: - self.mayReturnEmpty = True - - def streamline(self): - super(Or, self).streamline() - if __compat__.collect_all_And_tokens: - self.saveAsList = any(e.saveAsList for e in self.exprs) - return self - - def parseImpl(self, instring, loc, doActions=True): - maxExcLoc = -1 - maxException = None - matches = [] - for e in self.exprs: - try: - loc2 = e.tryParse(instring, loc) - except ParseException as err: - err.__traceback__ = None - if err.loc > maxExcLoc: - maxException = err - maxExcLoc = err.loc - except IndexError: - if len(instring) > maxExcLoc: - maxException = ParseException(instring, len(instring), e.errmsg, self) - maxExcLoc = len(instring) - else: - # save match among all matches, to retry longest to shortest - matches.append((loc2, e)) - - if matches: - # re-evaluate all matches in descending order of length of match, in case attached actions - # might change whether or how much they match of the input. - matches.sort(key=itemgetter(0), reverse=True) - - if not doActions: - # no further conditions or parse actions to change the selection of - # alternative, so the first match will be the best match - best_expr = matches[0][1] - return best_expr._parse(instring, loc, doActions) - - longest = -1, None - for loc1, expr1 in matches: - if loc1 <= longest[0]: - # already have a longer match than this one will deliver, we are done - return longest - - try: - loc2, toks = expr1._parse(instring, loc, doActions) - except ParseException as err: - err.__traceback__ = None - if err.loc > maxExcLoc: - maxException = err - maxExcLoc = err.loc - else: - if loc2 >= loc1: - return loc2, toks - # didn't match as much as before - elif loc2 > longest[0]: - longest = loc2, toks - - if longest != (-1, None): - return longest - - if maxException is not None: - maxException.msg = self.errmsg - raise maxException - else: - raise ParseException(instring, loc, "no defined alternatives to match", self) - - - def __ixor__(self, other): - if isinstance(other, basestring): - other = self._literalStringClass(other) - return self.append(other) # Or([self, other]) - - def __str__(self): - if hasattr(self, "name"): - return self.name - - if self.strRepr is None: - self.strRepr = "{" + " ^ ".join(_ustr(e) for e in self.exprs) + "}" - - return self.strRepr - - def checkRecursion(self, parseElementList): - subRecCheckList = parseElementList[:] + [self] - for e in self.exprs: - e.checkRecursion(subRecCheckList) - - def _setResultsName(self, name, listAllMatches=False): - if (not __compat__.collect_all_And_tokens - and __diag__.warn_multiple_tokens_in_named_alternation): - if any(isinstance(e, And) for e in self.exprs): - warnings.warn("{0}: setting results name {1!r} on {2} expression " - "may only return a single token for an And alternative, " - "in future will return the full list of tokens".format( - "warn_multiple_tokens_in_named_alternation", name, type(self).__name__), - stacklevel=3) - - return super(Or, self)._setResultsName(name, listAllMatches) - - -class MatchFirst(ParseExpression): - """Requires that at least one :class:`ParseExpression` is found. If - two expressions match, the first one listed is the one that will - match. May be constructed using the ``'|'`` operator. - - Example:: - - # construct MatchFirst using '|' operator - - # watch the order of expressions to match - number = Word(nums) | Combine(Word(nums) + '.' + Word(nums)) - print(number.searchString("123 3.1416 789")) # Fail! -> [['123'], ['3'], ['1416'], ['789']] - - # put more selective expression first - number = Combine(Word(nums) + '.' + Word(nums)) | Word(nums) - print(number.searchString("123 3.1416 789")) # Better -> [['123'], ['3.1416'], ['789']] - """ - def __init__(self, exprs, savelist=False): - super(MatchFirst, self).__init__(exprs, savelist) - if self.exprs: - self.mayReturnEmpty = any(e.mayReturnEmpty for e in self.exprs) - else: - self.mayReturnEmpty = True - - def streamline(self): - super(MatchFirst, self).streamline() - if __compat__.collect_all_And_tokens: - self.saveAsList = any(e.saveAsList for e in self.exprs) - return self - - def parseImpl(self, instring, loc, doActions=True): - maxExcLoc = -1 - maxException = None - for e in self.exprs: - try: - ret = e._parse(instring, loc, doActions) - return ret - except ParseException as err: - if err.loc > maxExcLoc: - maxException = err - maxExcLoc = err.loc - except IndexError: - if len(instring) > maxExcLoc: - maxException = ParseException(instring, len(instring), e.errmsg, self) - maxExcLoc = len(instring) - - # only got here if no expression matched, raise exception for match that made it the furthest - else: - if maxException is not None: - maxException.msg = self.errmsg - raise maxException - else: - raise ParseException(instring, loc, "no defined alternatives to match", self) - - def __ior__(self, other): - if isinstance(other, basestring): - other = self._literalStringClass(other) - return self.append(other) # MatchFirst([self, other]) - - def __str__(self): - if hasattr(self, "name"): - return self.name - - if self.strRepr is None: - self.strRepr = "{" + " | ".join(_ustr(e) for e in self.exprs) + "}" - - return self.strRepr - - def checkRecursion(self, parseElementList): - subRecCheckList = parseElementList[:] + [self] - for e in self.exprs: - e.checkRecursion(subRecCheckList) - - def _setResultsName(self, name, listAllMatches=False): - if (not __compat__.collect_all_And_tokens - and __diag__.warn_multiple_tokens_in_named_alternation): - if any(isinstance(e, And) for e in self.exprs): - warnings.warn("{0}: setting results name {1!r} on {2} expression " - "may only return a single token for an And alternative, " - "in future will return the full list of tokens".format( - "warn_multiple_tokens_in_named_alternation", name, type(self).__name__), - stacklevel=3) - - return super(MatchFirst, self)._setResultsName(name, listAllMatches) - - -class Each(ParseExpression): - """Requires all given :class:`ParseExpression` s to be found, but in - any order. Expressions may be separated by whitespace. - - May be constructed using the ``'&'`` operator. - - Example:: - - color = oneOf("RED ORANGE YELLOW GREEN BLUE PURPLE BLACK WHITE BROWN") - shape_type = oneOf("SQUARE CIRCLE TRIANGLE STAR HEXAGON OCTAGON") - integer = Word(nums) - shape_attr = "shape:" + shape_type("shape") - posn_attr = "posn:" + Group(integer("x") + ',' + integer("y"))("posn") - color_attr = "color:" + color("color") - size_attr = "size:" + integer("size") - - # use Each (using operator '&') to accept attributes in any order - # (shape and posn are required, color and size are optional) - shape_spec = shape_attr & posn_attr & Optional(color_attr) & Optional(size_attr) - - shape_spec.runTests(''' - shape: SQUARE color: BLACK posn: 100, 120 - shape: CIRCLE size: 50 color: BLUE posn: 50,80 - color:GREEN size:20 shape:TRIANGLE posn:20,40 - ''' - ) - - prints:: - - shape: SQUARE color: BLACK posn: 100, 120 - ['shape:', 'SQUARE', 'color:', 'BLACK', 'posn:', ['100', ',', '120']] - - color: BLACK - - posn: ['100', ',', '120'] - - x: 100 - - y: 120 - - shape: SQUARE - - - shape: CIRCLE size: 50 color: BLUE posn: 50,80 - ['shape:', 'CIRCLE', 'size:', '50', 'color:', 'BLUE', 'posn:', ['50', ',', '80']] - - color: BLUE - - posn: ['50', ',', '80'] - - x: 50 - - y: 80 - - shape: CIRCLE - - size: 50 - - - color: GREEN size: 20 shape: TRIANGLE posn: 20,40 - ['color:', 'GREEN', 'size:', '20', 'shape:', 'TRIANGLE', 'posn:', ['20', ',', '40']] - - color: GREEN - - posn: ['20', ',', '40'] - - x: 20 - - y: 40 - - shape: TRIANGLE - - size: 20 - """ - def __init__(self, exprs, savelist=True): - super(Each, self).__init__(exprs, savelist) - self.mayReturnEmpty = all(e.mayReturnEmpty for e in self.exprs) - self.skipWhitespace = True - self.initExprGroups = True - self.saveAsList = True - - def streamline(self): - super(Each, self).streamline() - self.mayReturnEmpty = all(e.mayReturnEmpty for e in self.exprs) - return self - - def parseImpl(self, instring, loc, doActions=True): - if self.initExprGroups: - self.opt1map = dict((id(e.expr), e) for e in self.exprs if isinstance(e, Optional)) - opt1 = [e.expr for e in self.exprs if isinstance(e, Optional)] - opt2 = [e for e in self.exprs if e.mayReturnEmpty and not isinstance(e, (Optional, Regex))] - self.optionals = opt1 + opt2 - self.multioptionals = [e.expr for e in self.exprs if isinstance(e, ZeroOrMore)] - self.multirequired = [e.expr for e in self.exprs if isinstance(e, OneOrMore)] - self.required = [e for e in self.exprs if not isinstance(e, (Optional, ZeroOrMore, OneOrMore))] - self.required += self.multirequired - self.initExprGroups = False - tmpLoc = loc - tmpReqd = self.required[:] - tmpOpt = self.optionals[:] - matchOrder = [] - - keepMatching = True - while keepMatching: - tmpExprs = tmpReqd + tmpOpt + self.multioptionals + self.multirequired - failed = [] - for e in tmpExprs: - try: - tmpLoc = e.tryParse(instring, tmpLoc) - except ParseException: - failed.append(e) - else: - matchOrder.append(self.opt1map.get(id(e), e)) - if e in tmpReqd: - tmpReqd.remove(e) - elif e in tmpOpt: - tmpOpt.remove(e) - if len(failed) == len(tmpExprs): - keepMatching = False - - if tmpReqd: - missing = ", ".join(_ustr(e) for e in tmpReqd) - raise ParseException(instring, loc, "Missing one or more required elements (%s)" % missing) - - # add any unmatched Optionals, in case they have default values defined - matchOrder += [e for e in self.exprs if isinstance(e, Optional) and e.expr in tmpOpt] - - resultlist = [] - for e in matchOrder: - loc, results = e._parse(instring, loc, doActions) - resultlist.append(results) - - finalResults = sum(resultlist, ParseResults([])) - return loc, finalResults - - def __str__(self): - if hasattr(self, "name"): - return self.name - - if self.strRepr is None: - self.strRepr = "{" + " & ".join(_ustr(e) for e in self.exprs) + "}" - - return self.strRepr - - def checkRecursion(self, parseElementList): - subRecCheckList = parseElementList[:] + [self] - for e in self.exprs: - e.checkRecursion(subRecCheckList) - - -class ParseElementEnhance(ParserElement): - """Abstract subclass of :class:`ParserElement`, for combining and - post-processing parsed tokens. - """ - def __init__(self, expr, savelist=False): - super(ParseElementEnhance, self).__init__(savelist) - if isinstance(expr, basestring): - if issubclass(self._literalStringClass, Token): - expr = self._literalStringClass(expr) - else: - expr = self._literalStringClass(Literal(expr)) - self.expr = expr - self.strRepr = None - if expr is not None: - self.mayIndexError = expr.mayIndexError - self.mayReturnEmpty = expr.mayReturnEmpty - self.setWhitespaceChars(expr.whiteChars) - self.skipWhitespace = expr.skipWhitespace - self.saveAsList = expr.saveAsList - self.callPreparse = expr.callPreparse - self.ignoreExprs.extend(expr.ignoreExprs) - - def parseImpl(self, instring, loc, doActions=True): - if self.expr is not None: - return self.expr._parse(instring, loc, doActions, callPreParse=False) - else: - raise ParseException("", loc, self.errmsg, self) - - def leaveWhitespace(self): - self.skipWhitespace = False - self.expr = self.expr.copy() - if self.expr is not None: - self.expr.leaveWhitespace() - return self - - def ignore(self, other): - if isinstance(other, Suppress): - if other not in self.ignoreExprs: - super(ParseElementEnhance, self).ignore(other) - if self.expr is not None: - self.expr.ignore(self.ignoreExprs[-1]) - else: - super(ParseElementEnhance, self).ignore(other) - if self.expr is not None: - self.expr.ignore(self.ignoreExprs[-1]) - return self - - def streamline(self): - super(ParseElementEnhance, self).streamline() - if self.expr is not None: - self.expr.streamline() - return self - - def checkRecursion(self, parseElementList): - if self in parseElementList: - raise RecursiveGrammarException(parseElementList + [self]) - subRecCheckList = parseElementList[:] + [self] - if self.expr is not None: - self.expr.checkRecursion(subRecCheckList) - - def validate(self, validateTrace=None): - if validateTrace is None: - validateTrace = [] - tmp = validateTrace[:] + [self] - if self.expr is not None: - self.expr.validate(tmp) - self.checkRecursion([]) - - def __str__(self): - try: - return super(ParseElementEnhance, self).__str__() - except Exception: - pass - - if self.strRepr is None and self.expr is not None: - self.strRepr = "%s:(%s)" % (self.__class__.__name__, _ustr(self.expr)) - return self.strRepr - - -class FollowedBy(ParseElementEnhance): - """Lookahead matching of the given parse expression. - ``FollowedBy`` does *not* advance the parsing position within - the input string, it only verifies that the specified parse - expression matches at the current position. ``FollowedBy`` - always returns a null token list. If any results names are defined - in the lookahead expression, those *will* be returned for access by - name. - - Example:: - - # use FollowedBy to match a label only if it is followed by a ':' - data_word = Word(alphas) - label = data_word + FollowedBy(':') - attr_expr = Group(label + Suppress(':') + OneOrMore(data_word, stopOn=label).setParseAction(' '.join)) - - OneOrMore(attr_expr).parseString("shape: SQUARE color: BLACK posn: upper left").pprint() - - prints:: - - [['shape', 'SQUARE'], ['color', 'BLACK'], ['posn', 'upper left']] - """ - def __init__(self, expr): - super(FollowedBy, self).__init__(expr) - self.mayReturnEmpty = True - - def parseImpl(self, instring, loc, doActions=True): - # by using self._expr.parse and deleting the contents of the returned ParseResults list - # we keep any named results that were defined in the FollowedBy expression - _, ret = self.expr._parse(instring, loc, doActions=doActions) - del ret[:] - - return loc, ret - - -class PrecededBy(ParseElementEnhance): - """Lookbehind matching of the given parse expression. - ``PrecededBy`` does not advance the parsing position within the - input string, it only verifies that the specified parse expression - matches prior to the current position. ``PrecededBy`` always - returns a null token list, but if a results name is defined on the - given expression, it is returned. - - Parameters: - - - expr - expression that must match prior to the current parse - location - - retreat - (default= ``None``) - (int) maximum number of characters - to lookbehind prior to the current parse location - - If the lookbehind expression is a string, Literal, Keyword, or - a Word or CharsNotIn with a specified exact or maximum length, then - the retreat parameter is not required. Otherwise, retreat must be - specified to give a maximum number of characters to look back from - the current parse position for a lookbehind match. - - Example:: - - # VB-style variable names with type prefixes - int_var = PrecededBy("#") + pyparsing_common.identifier - str_var = PrecededBy("$") + pyparsing_common.identifier - - """ - def __init__(self, expr, retreat=None): - super(PrecededBy, self).__init__(expr) - self.expr = self.expr().leaveWhitespace() - self.mayReturnEmpty = True - self.mayIndexError = False - self.exact = False - if isinstance(expr, str): - retreat = len(expr) - self.exact = True - elif isinstance(expr, (Literal, Keyword)): - retreat = expr.matchLen - self.exact = True - elif isinstance(expr, (Word, CharsNotIn)) and expr.maxLen != _MAX_INT: - retreat = expr.maxLen - self.exact = True - elif isinstance(expr, _PositionToken): - retreat = 0 - self.exact = True - self.retreat = retreat - self.errmsg = "not preceded by " + str(expr) - self.skipWhitespace = False - self.parseAction.append(lambda s, l, t: t.__delitem__(slice(None, None))) - - def parseImpl(self, instring, loc=0, doActions=True): - if self.exact: - if loc < self.retreat: - raise ParseException(instring, loc, self.errmsg) - start = loc - self.retreat - _, ret = self.expr._parse(instring, start) - else: - # retreat specified a maximum lookbehind window, iterate - test_expr = self.expr + StringEnd() - instring_slice = instring[max(0, loc - self.retreat):loc] - last_expr = ParseException(instring, loc, self.errmsg) - for offset in range(1, min(loc, self.retreat + 1)+1): - try: - # print('trying', offset, instring_slice, repr(instring_slice[loc - offset:])) - _, ret = test_expr._parse(instring_slice, len(instring_slice) - offset) - except ParseBaseException as pbe: - last_expr = pbe - else: - break - else: - raise last_expr - return loc, ret - - -class NotAny(ParseElementEnhance): - """Lookahead to disallow matching with the given parse expression. - ``NotAny`` does *not* advance the parsing position within the - input string, it only verifies that the specified parse expression - does *not* match at the current position. Also, ``NotAny`` does - *not* skip over leading whitespace. ``NotAny`` always returns - a null token list. May be constructed using the '~' operator. - - Example:: - - AND, OR, NOT = map(CaselessKeyword, "AND OR NOT".split()) - - # take care not to mistake keywords for identifiers - ident = ~(AND | OR | NOT) + Word(alphas) - boolean_term = Optional(NOT) + ident - - # very crude boolean expression - to support parenthesis groups and - # operation hierarchy, use infixNotation - boolean_expr = boolean_term + ZeroOrMore((AND | OR) + boolean_term) - - # integers that are followed by "." are actually floats - integer = Word(nums) + ~Char(".") - """ - def __init__(self, expr): - super(NotAny, self).__init__(expr) - # ~ self.leaveWhitespace() - self.skipWhitespace = False # do NOT use self.leaveWhitespace(), don't want to propagate to exprs - self.mayReturnEmpty = True - self.errmsg = "Found unwanted token, " + _ustr(self.expr) - - def parseImpl(self, instring, loc, doActions=True): - if self.expr.canParseNext(instring, loc): - raise ParseException(instring, loc, self.errmsg, self) - return loc, [] - - def __str__(self): - if hasattr(self, "name"): - return self.name - - if self.strRepr is None: - self.strRepr = "~{" + _ustr(self.expr) + "}" - - return self.strRepr - -class _MultipleMatch(ParseElementEnhance): - def __init__(self, expr, stopOn=None): - super(_MultipleMatch, self).__init__(expr) - self.saveAsList = True - ender = stopOn - if isinstance(ender, basestring): - ender = self._literalStringClass(ender) - self.stopOn(ender) - - def stopOn(self, ender): - if isinstance(ender, basestring): - ender = self._literalStringClass(ender) - self.not_ender = ~ender if ender is not None else None - return self - - def parseImpl(self, instring, loc, doActions=True): - self_expr_parse = self.expr._parse - self_skip_ignorables = self._skipIgnorables - check_ender = self.not_ender is not None - if check_ender: - try_not_ender = self.not_ender.tryParse - - # must be at least one (but first see if we are the stopOn sentinel; - # if so, fail) - if check_ender: - try_not_ender(instring, loc) - loc, tokens = self_expr_parse(instring, loc, doActions, callPreParse=False) - try: - hasIgnoreExprs = (not not self.ignoreExprs) - while 1: - if check_ender: - try_not_ender(instring, loc) - if hasIgnoreExprs: - preloc = self_skip_ignorables(instring, loc) - else: - preloc = loc - loc, tmptokens = self_expr_parse(instring, preloc, doActions) - if tmptokens or tmptokens.haskeys(): - tokens += tmptokens - except (ParseException, IndexError): - pass - - return loc, tokens - - def _setResultsName(self, name, listAllMatches=False): - if __diag__.warn_ungrouped_named_tokens_in_collection: - for e in [self.expr] + getattr(self.expr, 'exprs', []): - if isinstance(e, ParserElement) and e.resultsName: - warnings.warn("{0}: setting results name {1!r} on {2} expression " - "collides with {3!r} on contained expression".format("warn_ungrouped_named_tokens_in_collection", - name, - type(self).__name__, - e.resultsName), - stacklevel=3) - - return super(_MultipleMatch, self)._setResultsName(name, listAllMatches) - - -class OneOrMore(_MultipleMatch): - """Repetition of one or more of the given expression. - - Parameters: - - expr - expression that must match one or more times - - stopOn - (default= ``None``) - expression for a terminating sentinel - (only required if the sentinel would ordinarily match the repetition - expression) - - Example:: - - data_word = Word(alphas) - label = data_word + FollowedBy(':') - attr_expr = Group(label + Suppress(':') + OneOrMore(data_word).setParseAction(' '.join)) - - text = "shape: SQUARE posn: upper left color: BLACK" - OneOrMore(attr_expr).parseString(text).pprint() # Fail! read 'color' as data instead of next label -> [['shape', 'SQUARE color']] - - # use stopOn attribute for OneOrMore to avoid reading label string as part of the data - attr_expr = Group(label + Suppress(':') + OneOrMore(data_word, stopOn=label).setParseAction(' '.join)) - OneOrMore(attr_expr).parseString(text).pprint() # Better -> [['shape', 'SQUARE'], ['posn', 'upper left'], ['color', 'BLACK']] - - # could also be written as - (attr_expr * (1,)).parseString(text).pprint() - """ - - def __str__(self): - if hasattr(self, "name"): - return self.name - - if self.strRepr is None: - self.strRepr = "{" + _ustr(self.expr) + "}..." - - return self.strRepr - -class ZeroOrMore(_MultipleMatch): - """Optional repetition of zero or more of the given expression. - - Parameters: - - expr - expression that must match zero or more times - - stopOn - (default= ``None``) - expression for a terminating sentinel - (only required if the sentinel would ordinarily match the repetition - expression) - - Example: similar to :class:`OneOrMore` - """ - def __init__(self, expr, stopOn=None): - super(ZeroOrMore, self).__init__(expr, stopOn=stopOn) - self.mayReturnEmpty = True - - def parseImpl(self, instring, loc, doActions=True): - try: - return super(ZeroOrMore, self).parseImpl(instring, loc, doActions) - except (ParseException, IndexError): - return loc, [] - - def __str__(self): - if hasattr(self, "name"): - return self.name - - if self.strRepr is None: - self.strRepr = "[" + _ustr(self.expr) + "]..." - - return self.strRepr - - -class _NullToken(object): - def __bool__(self): - return False - __nonzero__ = __bool__ - def __str__(self): - return "" - -class Optional(ParseElementEnhance): - """Optional matching of the given expression. - - Parameters: - - expr - expression that must match zero or more times - - default (optional) - value to be returned if the optional expression is not found. - - Example:: - - # US postal code can be a 5-digit zip, plus optional 4-digit qualifier - zip = Combine(Word(nums, exact=5) + Optional('-' + Word(nums, exact=4))) - zip.runTests(''' - # traditional ZIP code - 12345 - - # ZIP+4 form - 12101-0001 - - # invalid ZIP - 98765- - ''') - - prints:: - - # traditional ZIP code - 12345 - ['12345'] - - # ZIP+4 form - 12101-0001 - ['12101-0001'] - - # invalid ZIP - 98765- - ^ - FAIL: Expected end of text (at char 5), (line:1, col:6) - """ - __optionalNotMatched = _NullToken() - - def __init__(self, expr, default=__optionalNotMatched): - super(Optional, self).__init__(expr, savelist=False) - self.saveAsList = self.expr.saveAsList - self.defaultValue = default - self.mayReturnEmpty = True - - def parseImpl(self, instring, loc, doActions=True): - try: - loc, tokens = self.expr._parse(instring, loc, doActions, callPreParse=False) - except (ParseException, IndexError): - if self.defaultValue is not self.__optionalNotMatched: - if self.expr.resultsName: - tokens = ParseResults([self.defaultValue]) - tokens[self.expr.resultsName] = self.defaultValue - else: - tokens = [self.defaultValue] - else: - tokens = [] - return loc, tokens - - def __str__(self): - if hasattr(self, "name"): - return self.name - - if self.strRepr is None: - self.strRepr = "[" + _ustr(self.expr) + "]" - - return self.strRepr - -class SkipTo(ParseElementEnhance): - """Token for skipping over all undefined text until the matched - expression is found. - - Parameters: - - expr - target expression marking the end of the data to be skipped - - include - (default= ``False``) if True, the target expression is also parsed - (the skipped text and target expression are returned as a 2-element list). - - ignore - (default= ``None``) used to define grammars (typically quoted strings and - comments) that might contain false matches to the target expression - - failOn - (default= ``None``) define expressions that are not allowed to be - included in the skipped test; if found before the target expression is found, - the SkipTo is not a match - - Example:: - - report = ''' - Outstanding Issues Report - 1 Jan 2000 - - # | Severity | Description | Days Open - -----+----------+-------------------------------------------+----------- - 101 | Critical | Intermittent system crash | 6 - 94 | Cosmetic | Spelling error on Login ('log|n') | 14 - 79 | Minor | System slow when running too many reports | 47 - ''' - integer = Word(nums) - SEP = Suppress('|') - # use SkipTo to simply match everything up until the next SEP - # - ignore quoted strings, so that a '|' character inside a quoted string does not match - # - parse action will call token.strip() for each matched token, i.e., the description body - string_data = SkipTo(SEP, ignore=quotedString) - string_data.setParseAction(tokenMap(str.strip)) - ticket_expr = (integer("issue_num") + SEP - + string_data("sev") + SEP - + string_data("desc") + SEP - + integer("days_open")) - - for tkt in ticket_expr.searchString(report): - print tkt.dump() - - prints:: - - ['101', 'Critical', 'Intermittent system crash', '6'] - - days_open: 6 - - desc: Intermittent system crash - - issue_num: 101 - - sev: Critical - ['94', 'Cosmetic', "Spelling error on Login ('log|n')", '14'] - - days_open: 14 - - desc: Spelling error on Login ('log|n') - - issue_num: 94 - - sev: Cosmetic - ['79', 'Minor', 'System slow when running too many reports', '47'] - - days_open: 47 - - desc: System slow when running too many reports - - issue_num: 79 - - sev: Minor - """ - def __init__(self, other, include=False, ignore=None, failOn=None): - super(SkipTo, self).__init__(other) - self.ignoreExpr = ignore - self.mayReturnEmpty = True - self.mayIndexError = False - self.includeMatch = include - self.saveAsList = False - if isinstance(failOn, basestring): - self.failOn = self._literalStringClass(failOn) - else: - self.failOn = failOn - self.errmsg = "No match found for " + _ustr(self.expr) - - def parseImpl(self, instring, loc, doActions=True): - startloc = loc - instrlen = len(instring) - expr = self.expr - expr_parse = self.expr._parse - self_failOn_canParseNext = self.failOn.canParseNext if self.failOn is not None else None - self_ignoreExpr_tryParse = self.ignoreExpr.tryParse if self.ignoreExpr is not None else None - - tmploc = loc - while tmploc <= instrlen: - if self_failOn_canParseNext is not None: - # break if failOn expression matches - if self_failOn_canParseNext(instring, tmploc): - break - - if self_ignoreExpr_tryParse is not None: - # advance past ignore expressions - while 1: - try: - tmploc = self_ignoreExpr_tryParse(instring, tmploc) - except ParseBaseException: - break - - try: - expr_parse(instring, tmploc, doActions=False, callPreParse=False) - except (ParseException, IndexError): - # no match, advance loc in string - tmploc += 1 - else: - # matched skipto expr, done - break - - else: - # ran off the end of the input string without matching skipto expr, fail - raise ParseException(instring, loc, self.errmsg, self) - - # build up return values - loc = tmploc - skiptext = instring[startloc:loc] - skipresult = ParseResults(skiptext) - - if self.includeMatch: - loc, mat = expr_parse(instring, loc, doActions, callPreParse=False) - skipresult += mat - - return loc, skipresult - -class Forward(ParseElementEnhance): - """Forward declaration of an expression to be defined later - - used for recursive grammars, such as algebraic infix notation. - When the expression is known, it is assigned to the ``Forward`` - variable using the '<<' operator. - - Note: take care when assigning to ``Forward`` not to overlook - precedence of operators. - - Specifically, '|' has a lower precedence than '<<', so that:: - - fwdExpr << a | b | c - - will actually be evaluated as:: - - (fwdExpr << a) | b | c - - thereby leaving b and c out as parseable alternatives. It is recommended that you - explicitly group the values inserted into the ``Forward``:: - - fwdExpr << (a | b | c) - - Converting to use the '<<=' operator instead will avoid this problem. - - See :class:`ParseResults.pprint` for an example of a recursive - parser created using ``Forward``. - """ - def __init__(self, other=None): - super(Forward, self).__init__(other, savelist=False) - - def __lshift__(self, other): - if isinstance(other, basestring): - other = self._literalStringClass(other) - self.expr = other - self.strRepr = None - self.mayIndexError = self.expr.mayIndexError - self.mayReturnEmpty = self.expr.mayReturnEmpty - self.setWhitespaceChars(self.expr.whiteChars) - self.skipWhitespace = self.expr.skipWhitespace - self.saveAsList = self.expr.saveAsList - self.ignoreExprs.extend(self.expr.ignoreExprs) - return self - - def __ilshift__(self, other): - return self << other - - def leaveWhitespace(self): - self.skipWhitespace = False - return self - - def streamline(self): - if not self.streamlined: - self.streamlined = True - if self.expr is not None: - self.expr.streamline() - return self - - def validate(self, validateTrace=None): - if validateTrace is None: - validateTrace = [] - - if self not in validateTrace: - tmp = validateTrace[:] + [self] - if self.expr is not None: - self.expr.validate(tmp) - self.checkRecursion([]) - - def __str__(self): - if hasattr(self, "name"): - return self.name - if self.strRepr is not None: - return self.strRepr - - # Avoid infinite recursion by setting a temporary strRepr - self.strRepr = ": ..." - - # Use the string representation of main expression. - retString = '...' - try: - if self.expr is not None: - retString = _ustr(self.expr)[:1000] - else: - retString = "None" - finally: - self.strRepr = self.__class__.__name__ + ": " + retString - return self.strRepr - - def copy(self): - if self.expr is not None: - return super(Forward, self).copy() - else: - ret = Forward() - ret <<= self - return ret - - def _setResultsName(self, name, listAllMatches=False): - if __diag__.warn_name_set_on_empty_Forward: - if self.expr is None: - warnings.warn("{0}: setting results name {0!r} on {1} expression " - "that has no contained expression".format("warn_name_set_on_empty_Forward", - name, - type(self).__name__), - stacklevel=3) - - return super(Forward, self)._setResultsName(name, listAllMatches) - -class TokenConverter(ParseElementEnhance): - """ - Abstract subclass of :class:`ParseExpression`, for converting parsed results. - """ - def __init__(self, expr, savelist=False): - super(TokenConverter, self).__init__(expr) # , savelist) - self.saveAsList = False - -class Combine(TokenConverter): - """Converter to concatenate all matching tokens to a single string. - By default, the matching patterns must also be contiguous in the - input string; this can be disabled by specifying - ``'adjacent=False'`` in the constructor. - - Example:: - - real = Word(nums) + '.' + Word(nums) - print(real.parseString('3.1416')) # -> ['3', '.', '1416'] - # will also erroneously match the following - print(real.parseString('3. 1416')) # -> ['3', '.', '1416'] - - real = Combine(Word(nums) + '.' + Word(nums)) - print(real.parseString('3.1416')) # -> ['3.1416'] - # no match when there are internal spaces - print(real.parseString('3. 1416')) # -> Exception: Expected W:(0123...) - """ - def __init__(self, expr, joinString="", adjacent=True): - super(Combine, self).__init__(expr) - # suppress whitespace-stripping in contained parse expressions, but re-enable it on the Combine itself - if adjacent: - self.leaveWhitespace() - self.adjacent = adjacent - self.skipWhitespace = True - self.joinString = joinString - self.callPreparse = True - - def ignore(self, other): - if self.adjacent: - ParserElement.ignore(self, other) - else: - super(Combine, self).ignore(other) - return self - - def postParse(self, instring, loc, tokenlist): - retToks = tokenlist.copy() - del retToks[:] - retToks += ParseResults(["".join(tokenlist._asStringList(self.joinString))], modal=self.modalResults) - - if self.resultsName and retToks.haskeys(): - return [retToks] - else: - return retToks - -class Group(TokenConverter): - """Converter to return the matched tokens as a list - useful for - returning tokens of :class:`ZeroOrMore` and :class:`OneOrMore` expressions. - - Example:: - - ident = Word(alphas) - num = Word(nums) - term = ident | num - func = ident + Optional(delimitedList(term)) - print(func.parseString("fn a, b, 100")) # -> ['fn', 'a', 'b', '100'] - - func = ident + Group(Optional(delimitedList(term))) - print(func.parseString("fn a, b, 100")) # -> ['fn', ['a', 'b', '100']] - """ - def __init__(self, expr): - super(Group, self).__init__(expr) - self.saveAsList = True - - def postParse(self, instring, loc, tokenlist): - return [tokenlist] - -class Dict(TokenConverter): - """Converter to return a repetitive expression as a list, but also - as a dictionary. Each element can also be referenced using the first - token in the expression as its key. Useful for tabular report - scraping when the first column can be used as a item key. - - Example:: - - data_word = Word(alphas) - label = data_word + FollowedBy(':') - attr_expr = Group(label + Suppress(':') + OneOrMore(data_word).setParseAction(' '.join)) - - text = "shape: SQUARE posn: upper left color: light blue texture: burlap" - attr_expr = (label + Suppress(':') + OneOrMore(data_word, stopOn=label).setParseAction(' '.join)) - - # print attributes as plain groups - print(OneOrMore(attr_expr).parseString(text).dump()) - - # instead of OneOrMore(expr), parse using Dict(OneOrMore(Group(expr))) - Dict will auto-assign names - result = Dict(OneOrMore(Group(attr_expr))).parseString(text) - print(result.dump()) - - # access named fields as dict entries, or output as dict - print(result['shape']) - print(result.asDict()) - - prints:: - - ['shape', 'SQUARE', 'posn', 'upper left', 'color', 'light blue', 'texture', 'burlap'] - [['shape', 'SQUARE'], ['posn', 'upper left'], ['color', 'light blue'], ['texture', 'burlap']] - - color: light blue - - posn: upper left - - shape: SQUARE - - texture: burlap - SQUARE - {'color': 'light blue', 'posn': 'upper left', 'texture': 'burlap', 'shape': 'SQUARE'} - - See more examples at :class:`ParseResults` of accessing fields by results name. - """ - def __init__(self, expr): - super(Dict, self).__init__(expr) - self.saveAsList = True - - def postParse(self, instring, loc, tokenlist): - for i, tok in enumerate(tokenlist): - if len(tok) == 0: - continue - ikey = tok[0] - if isinstance(ikey, int): - ikey = _ustr(tok[0]).strip() - if len(tok) == 1: - tokenlist[ikey] = _ParseResultsWithOffset("", i) - elif len(tok) == 2 and not isinstance(tok[1], ParseResults): - tokenlist[ikey] = _ParseResultsWithOffset(tok[1], i) - else: - dictvalue = tok.copy() # ParseResults(i) - del dictvalue[0] - if len(dictvalue) != 1 or (isinstance(dictvalue, ParseResults) and dictvalue.haskeys()): - tokenlist[ikey] = _ParseResultsWithOffset(dictvalue, i) - else: - tokenlist[ikey] = _ParseResultsWithOffset(dictvalue[0], i) - - if self.resultsName: - return [tokenlist] - else: - return tokenlist - - -class Suppress(TokenConverter): - """Converter for ignoring the results of a parsed expression. - - Example:: - - source = "a, b, c,d" - wd = Word(alphas) - wd_list1 = wd + ZeroOrMore(',' + wd) - print(wd_list1.parseString(source)) - - # often, delimiters that are useful during parsing are just in the - # way afterward - use Suppress to keep them out of the parsed output - wd_list2 = wd + ZeroOrMore(Suppress(',') + wd) - print(wd_list2.parseString(source)) - - prints:: - - ['a', ',', 'b', ',', 'c', ',', 'd'] - ['a', 'b', 'c', 'd'] - - (See also :class:`delimitedList`.) - """ - def postParse(self, instring, loc, tokenlist): - return [] - - def suppress(self): - return self - - -class OnlyOnce(object): - """Wrapper for parse actions, to ensure they are only called once. - """ - def __init__(self, methodCall): - self.callable = _trim_arity(methodCall) - self.called = False - def __call__(self, s, l, t): - if not self.called: - results = self.callable(s, l, t) - self.called = True - return results - raise ParseException(s, l, "") - def reset(self): - self.called = False - -def traceParseAction(f): - """Decorator for debugging parse actions. - - When the parse action is called, this decorator will print - ``">> entering method-name(line:, , )"``. - When the parse action completes, the decorator will print - ``"<<"`` followed by the returned value, or any exception that the parse action raised. - - Example:: - - wd = Word(alphas) - - @traceParseAction - def remove_duplicate_chars(tokens): - return ''.join(sorted(set(''.join(tokens)))) - - wds = OneOrMore(wd).setParseAction(remove_duplicate_chars) - print(wds.parseString("slkdjs sld sldd sdlf sdljf")) - - prints:: - - >>entering remove_duplicate_chars(line: 'slkdjs sld sldd sdlf sdljf', 0, (['slkdjs', 'sld', 'sldd', 'sdlf', 'sdljf'], {})) - < 3: - thisFunc = paArgs[0].__class__.__name__ + '.' + thisFunc - sys.stderr.write(">>entering %s(line: '%s', %d, %r)\n" % (thisFunc, line(l, s), l, t)) - try: - ret = f(*paArgs) - except Exception as exc: - sys.stderr.write("< ['aa', 'bb', 'cc'] - delimitedList(Word(hexnums), delim=':', combine=True).parseString("AA:BB:CC:DD:EE") # -> ['AA:BB:CC:DD:EE'] - """ - dlName = _ustr(expr) + " [" + _ustr(delim) + " " + _ustr(expr) + "]..." - if combine: - return Combine(expr + ZeroOrMore(delim + expr)).setName(dlName) - else: - return (expr + ZeroOrMore(Suppress(delim) + expr)).setName(dlName) - -def countedArray(expr, intExpr=None): - """Helper to define a counted list of expressions. - - This helper defines a pattern of the form:: - - integer expr expr expr... - - where the leading integer tells how many expr expressions follow. - The matched tokens returns the array of expr tokens as a list - the - leading count token is suppressed. - - If ``intExpr`` is specified, it should be a pyparsing expression - that produces an integer value. - - Example:: - - countedArray(Word(alphas)).parseString('2 ab cd ef') # -> ['ab', 'cd'] - - # in this parser, the leading integer value is given in binary, - # '10' indicating that 2 values are in the array - binaryConstant = Word('01').setParseAction(lambda t: int(t[0], 2)) - countedArray(Word(alphas), intExpr=binaryConstant).parseString('10 ab cd ef') # -> ['ab', 'cd'] - """ - arrayExpr = Forward() - def countFieldParseAction(s, l, t): - n = t[0] - arrayExpr << (n and Group(And([expr] * n)) or Group(empty)) - return [] - if intExpr is None: - intExpr = Word(nums).setParseAction(lambda t: int(t[0])) - else: - intExpr = intExpr.copy() - intExpr.setName("arrayLen") - intExpr.addParseAction(countFieldParseAction, callDuringTry=True) - return (intExpr + arrayExpr).setName('(len) ' + _ustr(expr) + '...') - -def _flatten(L): - ret = [] - for i in L: - if isinstance(i, list): - ret.extend(_flatten(i)) - else: - ret.append(i) - return ret - -def matchPreviousLiteral(expr): - """Helper to define an expression that is indirectly defined from - the tokens matched in a previous expression, that is, it looks for - a 'repeat' of a previous expression. For example:: - - first = Word(nums) - second = matchPreviousLiteral(first) - matchExpr = first + ":" + second - - will match ``"1:1"``, but not ``"1:2"``. Because this - matches a previous literal, will also match the leading - ``"1:1"`` in ``"1:10"``. If this is not desired, use - :class:`matchPreviousExpr`. Do *not* use with packrat parsing - enabled. - """ - rep = Forward() - def copyTokenToRepeater(s, l, t): - if t: - if len(t) == 1: - rep << t[0] - else: - # flatten t tokens - tflat = _flatten(t.asList()) - rep << And(Literal(tt) for tt in tflat) - else: - rep << Empty() - expr.addParseAction(copyTokenToRepeater, callDuringTry=True) - rep.setName('(prev) ' + _ustr(expr)) - return rep - -def matchPreviousExpr(expr): - """Helper to define an expression that is indirectly defined from - the tokens matched in a previous expression, that is, it looks for - a 'repeat' of a previous expression. For example:: - - first = Word(nums) - second = matchPreviousExpr(first) - matchExpr = first + ":" + second - - will match ``"1:1"``, but not ``"1:2"``. Because this - matches by expressions, will *not* match the leading ``"1:1"`` - in ``"1:10"``; the expressions are evaluated first, and then - compared, so ``"1"`` is compared with ``"10"``. Do *not* use - with packrat parsing enabled. - """ - rep = Forward() - e2 = expr.copy() - rep <<= e2 - def copyTokenToRepeater(s, l, t): - matchTokens = _flatten(t.asList()) - def mustMatchTheseTokens(s, l, t): - theseTokens = _flatten(t.asList()) - if theseTokens != matchTokens: - raise ParseException('', 0, '') - rep.setParseAction(mustMatchTheseTokens, callDuringTry=True) - expr.addParseAction(copyTokenToRepeater, callDuringTry=True) - rep.setName('(prev) ' + _ustr(expr)) - return rep - -def _escapeRegexRangeChars(s): - # ~ escape these chars: ^-[] - for c in r"\^-[]": - s = s.replace(c, _bslash + c) - s = s.replace("\n", r"\n") - s = s.replace("\t", r"\t") - return _ustr(s) - -def oneOf(strs, caseless=False, useRegex=True, asKeyword=False): - """Helper to quickly define a set of alternative Literals, and makes - sure to do longest-first testing when there is a conflict, - regardless of the input order, but returns - a :class:`MatchFirst` for best performance. - - Parameters: - - - strs - a string of space-delimited literals, or a collection of - string literals - - caseless - (default= ``False``) - treat all literals as - caseless - - useRegex - (default= ``True``) - as an optimization, will - generate a Regex object; otherwise, will generate - a :class:`MatchFirst` object (if ``caseless=True`` or ``asKeyword=True``, or if - creating a :class:`Regex` raises an exception) - - asKeyword - (default=``False``) - enforce Keyword-style matching on the - generated expressions - - Example:: - - comp_oper = oneOf("< = > <= >= !=") - var = Word(alphas) - number = Word(nums) - term = var | number - comparison_expr = term + comp_oper + term - print(comparison_expr.searchString("B = 12 AA=23 B<=AA AA>12")) - - prints:: - - [['B', '=', '12'], ['AA', '=', '23'], ['B', '<=', 'AA'], ['AA', '>', '12']] - """ - if isinstance(caseless, basestring): - warnings.warn("More than one string argument passed to oneOf, pass " - "choices as a list or space-delimited string", stacklevel=2) - - if caseless: - isequal = (lambda a, b: a.upper() == b.upper()) - masks = (lambda a, b: b.upper().startswith(a.upper())) - parseElementClass = CaselessKeyword if asKeyword else CaselessLiteral - else: - isequal = (lambda a, b: a == b) - masks = (lambda a, b: b.startswith(a)) - parseElementClass = Keyword if asKeyword else Literal - - symbols = [] - if isinstance(strs, basestring): - symbols = strs.split() - elif isinstance(strs, Iterable): - symbols = list(strs) - else: - warnings.warn("Invalid argument to oneOf, expected string or iterable", - SyntaxWarning, stacklevel=2) - if not symbols: - return NoMatch() - - if not asKeyword: - # if not producing keywords, need to reorder to take care to avoid masking - # longer choices with shorter ones - i = 0 - while i < len(symbols) - 1: - cur = symbols[i] - for j, other in enumerate(symbols[i + 1:]): - if isequal(other, cur): - del symbols[i + j + 1] - break - elif masks(cur, other): - del symbols[i + j + 1] - symbols.insert(i, other) - break - else: - i += 1 - - if not (caseless or asKeyword) and useRegex: - # ~ print (strs, "->", "|".join([_escapeRegexChars(sym) for sym in symbols])) - try: - if len(symbols) == len("".join(symbols)): - return Regex("[%s]" % "".join(_escapeRegexRangeChars(sym) for sym in symbols)).setName(' | '.join(symbols)) - else: - return Regex("|".join(re.escape(sym) for sym in symbols)).setName(' | '.join(symbols)) - except Exception: - warnings.warn("Exception creating Regex for oneOf, building MatchFirst", - SyntaxWarning, stacklevel=2) - - # last resort, just use MatchFirst - return MatchFirst(parseElementClass(sym) for sym in symbols).setName(' | '.join(symbols)) - -def dictOf(key, value): - """Helper to easily and clearly define a dictionary by specifying - the respective patterns for the key and value. Takes care of - defining the :class:`Dict`, :class:`ZeroOrMore`, and - :class:`Group` tokens in the proper order. The key pattern - can include delimiting markers or punctuation, as long as they are - suppressed, thereby leaving the significant key text. The value - pattern can include named results, so that the :class:`Dict` results - can include named token fields. - - Example:: - - text = "shape: SQUARE posn: upper left color: light blue texture: burlap" - attr_expr = (label + Suppress(':') + OneOrMore(data_word, stopOn=label).setParseAction(' '.join)) - print(OneOrMore(attr_expr).parseString(text).dump()) - - attr_label = label - attr_value = Suppress(':') + OneOrMore(data_word, stopOn=label).setParseAction(' '.join) - - # similar to Dict, but simpler call format - result = dictOf(attr_label, attr_value).parseString(text) - print(result.dump()) - print(result['shape']) - print(result.shape) # object attribute access works too - print(result.asDict()) - - prints:: - - [['shape', 'SQUARE'], ['posn', 'upper left'], ['color', 'light blue'], ['texture', 'burlap']] - - color: light blue - - posn: upper left - - shape: SQUARE - - texture: burlap - SQUARE - SQUARE - {'color': 'light blue', 'shape': 'SQUARE', 'posn': 'upper left', 'texture': 'burlap'} - """ - return Dict(OneOrMore(Group(key + value))) - -def originalTextFor(expr, asString=True): - """Helper to return the original, untokenized text for a given - expression. Useful to restore the parsed fields of an HTML start - tag into the raw tag text itself, or to revert separate tokens with - intervening whitespace back to the original matching input text. By - default, returns astring containing the original parsed text. - - If the optional ``asString`` argument is passed as - ``False``, then the return value is - a :class:`ParseResults` containing any results names that - were originally matched, and a single token containing the original - matched text from the input string. So if the expression passed to - :class:`originalTextFor` contains expressions with defined - results names, you must set ``asString`` to ``False`` if you - want to preserve those results name values. - - Example:: - - src = "this is test bold text normal text " - for tag in ("b", "i"): - opener, closer = makeHTMLTags(tag) - patt = originalTextFor(opener + SkipTo(closer) + closer) - print(patt.searchString(src)[0]) - - prints:: - - [' bold text '] - ['text'] - """ - locMarker = Empty().setParseAction(lambda s, loc, t: loc) - endlocMarker = locMarker.copy() - endlocMarker.callPreparse = False - matchExpr = locMarker("_original_start") + expr + endlocMarker("_original_end") - if asString: - extractText = lambda s, l, t: s[t._original_start: t._original_end] - else: - def extractText(s, l, t): - t[:] = [s[t.pop('_original_start'):t.pop('_original_end')]] - matchExpr.setParseAction(extractText) - matchExpr.ignoreExprs = expr.ignoreExprs - return matchExpr - -def ungroup(expr): - """Helper to undo pyparsing's default grouping of And expressions, - even if all but one are non-empty. - """ - return TokenConverter(expr).addParseAction(lambda t: t[0]) - -def locatedExpr(expr): - """Helper to decorate a returned token with its starting and ending - locations in the input string. - - This helper adds the following results names: - - - locn_start = location where matched expression begins - - locn_end = location where matched expression ends - - value = the actual parsed results - - Be careful if the input text contains ```` characters, you - may want to call :class:`ParserElement.parseWithTabs` - - Example:: - - wd = Word(alphas) - for match in locatedExpr(wd).searchString("ljsdf123lksdjjf123lkkjj1222"): - print(match) - - prints:: - - [[0, 'ljsdf', 5]] - [[8, 'lksdjjf', 15]] - [[18, 'lkkjj', 23]] - """ - locator = Empty().setParseAction(lambda s, l, t: l) - return Group(locator("locn_start") + expr("value") + locator.copy().leaveWhitespace()("locn_end")) - - -# convenience constants for positional expressions -empty = Empty().setName("empty") -lineStart = LineStart().setName("lineStart") -lineEnd = LineEnd().setName("lineEnd") -stringStart = StringStart().setName("stringStart") -stringEnd = StringEnd().setName("stringEnd") - -_escapedPunc = Word(_bslash, r"\[]-*.$+^?()~ ", exact=2).setParseAction(lambda s, l, t: t[0][1]) -_escapedHexChar = Regex(r"\\0?[xX][0-9a-fA-F]+").setParseAction(lambda s, l, t: unichr(int(t[0].lstrip(r'\0x'), 16))) -_escapedOctChar = Regex(r"\\0[0-7]+").setParseAction(lambda s, l, t: unichr(int(t[0][1:], 8))) -_singleChar = _escapedPunc | _escapedHexChar | _escapedOctChar | CharsNotIn(r'\]', exact=1) -_charRange = Group(_singleChar + Suppress("-") + _singleChar) -_reBracketExpr = Literal("[") + Optional("^").setResultsName("negate") + Group(OneOrMore(_charRange | _singleChar)).setResultsName("body") + "]" - -def srange(s): - r"""Helper to easily define string ranges for use in Word - construction. Borrows syntax from regexp '[]' string range - definitions:: - - srange("[0-9]") -> "0123456789" - srange("[a-z]") -> "abcdefghijklmnopqrstuvwxyz" - srange("[a-z$_]") -> "abcdefghijklmnopqrstuvwxyz$_" - - The input string must be enclosed in []'s, and the returned string - is the expanded character set joined into a single string. The - values enclosed in the []'s may be: - - - a single character - - an escaped character with a leading backslash (such as ``\-`` - or ``\]``) - - an escaped hex character with a leading ``'\x'`` - (``\x21``, which is a ``'!'`` character) (``\0x##`` - is also supported for backwards compatibility) - - an escaped octal character with a leading ``'\0'`` - (``\041``, which is a ``'!'`` character) - - a range of any of the above, separated by a dash (``'a-z'``, - etc.) - - any combination of the above (``'aeiouy'``, - ``'a-zA-Z0-9_$'``, etc.) - """ - _expanded = lambda p: p if not isinstance(p, ParseResults) else ''.join(unichr(c) for c in range(ord(p[0]), ord(p[1]) + 1)) - try: - return "".join(_expanded(part) for part in _reBracketExpr.parseString(s).body) - except Exception: - return "" - -def matchOnlyAtCol(n): - """Helper method for defining parse actions that require matching at - a specific column in the input text. - """ - def verifyCol(strg, locn, toks): - if col(locn, strg) != n: - raise ParseException(strg, locn, "matched token not at column %d" % n) - return verifyCol - -def replaceWith(replStr): - """Helper method for common parse actions that simply return - a literal value. Especially useful when used with - :class:`transformString` (). - - Example:: - - num = Word(nums).setParseAction(lambda toks: int(toks[0])) - na = oneOf("N/A NA").setParseAction(replaceWith(math.nan)) - term = na | num - - OneOrMore(term).parseString("324 234 N/A 234") # -> [324, 234, nan, 234] - """ - return lambda s, l, t: [replStr] - -def removeQuotes(s, l, t): - """Helper parse action for removing quotation marks from parsed - quoted strings. - - Example:: - - # by default, quotation marks are included in parsed results - quotedString.parseString("'Now is the Winter of our Discontent'") # -> ["'Now is the Winter of our Discontent'"] - - # use removeQuotes to strip quotation marks from parsed results - quotedString.setParseAction(removeQuotes) - quotedString.parseString("'Now is the Winter of our Discontent'") # -> ["Now is the Winter of our Discontent"] - """ - return t[0][1:-1] - -def tokenMap(func, *args): - """Helper to define a parse action by mapping a function to all - elements of a ParseResults list. If any additional args are passed, - they are forwarded to the given function as additional arguments - after the token, as in - ``hex_integer = Word(hexnums).setParseAction(tokenMap(int, 16))``, - which will convert the parsed data to an integer using base 16. - - Example (compare the last to example in :class:`ParserElement.transformString`:: - - hex_ints = OneOrMore(Word(hexnums)).setParseAction(tokenMap(int, 16)) - hex_ints.runTests(''' - 00 11 22 aa FF 0a 0d 1a - ''') - - upperword = Word(alphas).setParseAction(tokenMap(str.upper)) - OneOrMore(upperword).runTests(''' - my kingdom for a horse - ''') - - wd = Word(alphas).setParseAction(tokenMap(str.title)) - OneOrMore(wd).setParseAction(' '.join).runTests(''' - now is the winter of our discontent made glorious summer by this sun of york - ''') - - prints:: - - 00 11 22 aa FF 0a 0d 1a - [0, 17, 34, 170, 255, 10, 13, 26] - - my kingdom for a horse - ['MY', 'KINGDOM', 'FOR', 'A', 'HORSE'] - - now is the winter of our discontent made glorious summer by this sun of york - ['Now Is The Winter Of Our Discontent Made Glorious Summer By This Sun Of York'] - """ - def pa(s, l, t): - return [func(tokn, *args) for tokn in t] - - try: - func_name = getattr(func, '__name__', - getattr(func, '__class__').__name__) - except Exception: - func_name = str(func) - pa.__name__ = func_name - - return pa - -upcaseTokens = tokenMap(lambda t: _ustr(t).upper()) -"""(Deprecated) Helper parse action to convert tokens to upper case. -Deprecated in favor of :class:`pyparsing_common.upcaseTokens`""" - -downcaseTokens = tokenMap(lambda t: _ustr(t).lower()) -"""(Deprecated) Helper parse action to convert tokens to lower case. -Deprecated in favor of :class:`pyparsing_common.downcaseTokens`""" - -def _makeTags(tagStr, xml, - suppress_LT=Suppress("<"), - suppress_GT=Suppress(">")): - """Internal helper to construct opening and closing tag expressions, given a tag name""" - if isinstance(tagStr, basestring): - resname = tagStr - tagStr = Keyword(tagStr, caseless=not xml) - else: - resname = tagStr.name - - tagAttrName = Word(alphas, alphanums + "_-:") - if xml: - tagAttrValue = dblQuotedString.copy().setParseAction(removeQuotes) - openTag = (suppress_LT - + tagStr("tag") - + Dict(ZeroOrMore(Group(tagAttrName + Suppress("=") + tagAttrValue))) - + Optional("/", default=[False])("empty").setParseAction(lambda s, l, t: t[0] == '/') - + suppress_GT) - else: - tagAttrValue = quotedString.copy().setParseAction(removeQuotes) | Word(printables, excludeChars=">") - openTag = (suppress_LT - + tagStr("tag") - + Dict(ZeroOrMore(Group(tagAttrName.setParseAction(downcaseTokens) - + Optional(Suppress("=") + tagAttrValue)))) - + Optional("/", default=[False])("empty").setParseAction(lambda s, l, t: t[0] == '/') - + suppress_GT) - closeTag = Combine(_L("", adjacent=False) - - openTag.setName("<%s>" % resname) - # add start results name in parse action now that ungrouped names are not reported at two levels - openTag.addParseAction(lambda t: t.__setitem__("start" + "".join(resname.replace(":", " ").title().split()), t.copy())) - closeTag = closeTag("end" + "".join(resname.replace(":", " ").title().split())).setName("" % resname) - openTag.tag = resname - closeTag.tag = resname - openTag.tag_body = SkipTo(closeTag()) - return openTag, closeTag - -def makeHTMLTags(tagStr): - """Helper to construct opening and closing tag expressions for HTML, - given a tag name. Matches tags in either upper or lower case, - attributes with namespaces and with quoted or unquoted values. - - Example:: - - text = 'More info at the pyparsing wiki page' - # makeHTMLTags returns pyparsing expressions for the opening and - # closing tags as a 2-tuple - a, a_end = makeHTMLTags("A") - link_expr = a + SkipTo(a_end)("link_text") + a_end - - for link in link_expr.searchString(text): - # attributes in the tag (like "href" shown here) are - # also accessible as named results - print(link.link_text, '->', link.href) - - prints:: - - pyparsing -> https://github.com/pyparsing/pyparsing/wiki - """ - return _makeTags(tagStr, False) - -def makeXMLTags(tagStr): - """Helper to construct opening and closing tag expressions for XML, - given a tag name. Matches tags only in the given upper/lower case. - - Example: similar to :class:`makeHTMLTags` - """ - return _makeTags(tagStr, True) - -def withAttribute(*args, **attrDict): - """Helper to create a validating parse action to be used with start - tags created with :class:`makeXMLTags` or - :class:`makeHTMLTags`. Use ``withAttribute`` to qualify - a starting tag with a required attribute value, to avoid false - matches on common tags such as ```` or ``
``. - - Call ``withAttribute`` with a series of attribute names and - values. Specify the list of filter attributes names and values as: - - - keyword arguments, as in ``(align="right")``, or - - as an explicit dict with ``**`` operator, when an attribute - name is also a Python reserved word, as in ``**{"class":"Customer", "align":"right"}`` - - a list of name-value tuples, as in ``(("ns1:class", "Customer"), ("ns2:align", "right"))`` - - For attribute names with a namespace prefix, you must use the second - form. Attribute names are matched insensitive to upper/lower case. - - If just testing for ``class`` (with or without a namespace), use - :class:`withClass`. - - To verify that the attribute exists, but without specifying a value, - pass ``withAttribute.ANY_VALUE`` as the value. - - Example:: - - html = ''' -
- Some text -
1 4 0 1 0
-
1,3 2,3 1,1
-
this has no type
-
- - ''' - div,div_end = makeHTMLTags("div") - - # only match div tag having a type attribute with value "grid" - div_grid = div().setParseAction(withAttribute(type="grid")) - grid_expr = div_grid + SkipTo(div | div_end)("body") - for grid_header in grid_expr.searchString(html): - print(grid_header.body) - - # construct a match with any div tag having a type attribute, regardless of the value - div_any_type = div().setParseAction(withAttribute(type=withAttribute.ANY_VALUE)) - div_expr = div_any_type + SkipTo(div | div_end)("body") - for div_header in div_expr.searchString(html): - print(div_header.body) - - prints:: - - 1 4 0 1 0 - - 1 4 0 1 0 - 1,3 2,3 1,1 - """ - if args: - attrs = args[:] - else: - attrs = attrDict.items() - attrs = [(k, v) for k, v in attrs] - def pa(s, l, tokens): - for attrName, attrValue in attrs: - if attrName not in tokens: - raise ParseException(s, l, "no matching attribute " + attrName) - if attrValue != withAttribute.ANY_VALUE and tokens[attrName] != attrValue: - raise ParseException(s, l, "attribute '%s' has value '%s', must be '%s'" % - (attrName, tokens[attrName], attrValue)) - return pa -withAttribute.ANY_VALUE = object() - -def withClass(classname, namespace=''): - """Simplified version of :class:`withAttribute` when - matching on a div class - made difficult because ``class`` is - a reserved word in Python. - - Example:: - - html = ''' -
- Some text -
1 4 0 1 0
-
1,3 2,3 1,1
-
this <div> has no class
-
- - ''' - div,div_end = makeHTMLTags("div") - div_grid = div().setParseAction(withClass("grid")) - - grid_expr = div_grid + SkipTo(div | div_end)("body") - for grid_header in grid_expr.searchString(html): - print(grid_header.body) - - div_any_type = div().setParseAction(withClass(withAttribute.ANY_VALUE)) - div_expr = div_any_type + SkipTo(div | div_end)("body") - for div_header in div_expr.searchString(html): - print(div_header.body) - - prints:: - - 1 4 0 1 0 - - 1 4 0 1 0 - 1,3 2,3 1,1 - """ - classattr = "%s:class" % namespace if namespace else "class" - return withAttribute(**{classattr: classname}) - -opAssoc = SimpleNamespace() -opAssoc.LEFT = object() -opAssoc.RIGHT = object() - -def infixNotation(baseExpr, opList, lpar=Suppress('('), rpar=Suppress(')')): - """Helper method for constructing grammars of expressions made up of - operators working in a precedence hierarchy. Operators may be unary - or binary, left- or right-associative. Parse actions can also be - attached to operator expressions. The generated parser will also - recognize the use of parentheses to override operator precedences - (see example below). - - Note: if you define a deep operator list, you may see performance - issues when using infixNotation. See - :class:`ParserElement.enablePackrat` for a mechanism to potentially - improve your parser performance. - - Parameters: - - baseExpr - expression representing the most basic element for the - nested - - opList - list of tuples, one for each operator precedence level - in the expression grammar; each tuple is of the form ``(opExpr, - numTerms, rightLeftAssoc, parseAction)``, where: - - - opExpr is the pyparsing expression for the operator; may also - be a string, which will be converted to a Literal; if numTerms - is 3, opExpr is a tuple of two expressions, for the two - operators separating the 3 terms - - numTerms is the number of terms for this operator (must be 1, - 2, or 3) - - rightLeftAssoc is the indicator whether the operator is right - or left associative, using the pyparsing-defined constants - ``opAssoc.RIGHT`` and ``opAssoc.LEFT``. - - parseAction is the parse action to be associated with - expressions matching this operator expression (the parse action - tuple member may be omitted); if the parse action is passed - a tuple or list of functions, this is equivalent to calling - ``setParseAction(*fn)`` - (:class:`ParserElement.setParseAction`) - - lpar - expression for matching left-parentheses - (default= ``Suppress('(')``) - - rpar - expression for matching right-parentheses - (default= ``Suppress(')')``) - - Example:: - - # simple example of four-function arithmetic with ints and - # variable names - integer = pyparsing_common.signed_integer - varname = pyparsing_common.identifier - - arith_expr = infixNotation(integer | varname, - [ - ('-', 1, opAssoc.RIGHT), - (oneOf('* /'), 2, opAssoc.LEFT), - (oneOf('+ -'), 2, opAssoc.LEFT), - ]) - - arith_expr.runTests(''' - 5+3*6 - (5+3)*6 - -2--11 - ''', fullDump=False) - - prints:: - - 5+3*6 - [[5, '+', [3, '*', 6]]] - - (5+3)*6 - [[[5, '+', 3], '*', 6]] - - -2--11 - [[['-', 2], '-', ['-', 11]]] - """ - # captive version of FollowedBy that does not do parse actions or capture results names - class _FB(FollowedBy): - def parseImpl(self, instring, loc, doActions=True): - self.expr.tryParse(instring, loc) - return loc, [] - - ret = Forward() - lastExpr = baseExpr | (lpar + ret + rpar) - for i, operDef in enumerate(opList): - opExpr, arity, rightLeftAssoc, pa = (operDef + (None, ))[:4] - termName = "%s term" % opExpr if arity < 3 else "%s%s term" % opExpr - if arity == 3: - if opExpr is None or len(opExpr) != 2: - raise ValueError( - "if numterms=3, opExpr must be a tuple or list of two expressions") - opExpr1, opExpr2 = opExpr - thisExpr = Forward().setName(termName) - if rightLeftAssoc == opAssoc.LEFT: - if arity == 1: - matchExpr = _FB(lastExpr + opExpr) + Group(lastExpr + OneOrMore(opExpr)) - elif arity == 2: - if opExpr is not None: - matchExpr = _FB(lastExpr + opExpr + lastExpr) + Group(lastExpr + OneOrMore(opExpr + lastExpr)) - else: - matchExpr = _FB(lastExpr + lastExpr) + Group(lastExpr + OneOrMore(lastExpr)) - elif arity == 3: - matchExpr = (_FB(lastExpr + opExpr1 + lastExpr + opExpr2 + lastExpr) - + Group(lastExpr + OneOrMore(opExpr1 + lastExpr + opExpr2 + lastExpr))) - else: - raise ValueError("operator must be unary (1), binary (2), or ternary (3)") - elif rightLeftAssoc == opAssoc.RIGHT: - if arity == 1: - # try to avoid LR with this extra test - if not isinstance(opExpr, Optional): - opExpr = Optional(opExpr) - matchExpr = _FB(opExpr.expr + thisExpr) + Group(opExpr + thisExpr) - elif arity == 2: - if opExpr is not None: - matchExpr = _FB(lastExpr + opExpr + thisExpr) + Group(lastExpr + OneOrMore(opExpr + thisExpr)) - else: - matchExpr = _FB(lastExpr + thisExpr) + Group(lastExpr + OneOrMore(thisExpr)) - elif arity == 3: - matchExpr = (_FB(lastExpr + opExpr1 + thisExpr + opExpr2 + thisExpr) - + Group(lastExpr + opExpr1 + thisExpr + opExpr2 + thisExpr)) - else: - raise ValueError("operator must be unary (1), binary (2), or ternary (3)") - else: - raise ValueError("operator must indicate right or left associativity") - if pa: - if isinstance(pa, (tuple, list)): - matchExpr.setParseAction(*pa) - else: - matchExpr.setParseAction(pa) - thisExpr <<= (matchExpr.setName(termName) | lastExpr) - lastExpr = thisExpr - ret <<= lastExpr - return ret - -operatorPrecedence = infixNotation -"""(Deprecated) Former name of :class:`infixNotation`, will be -dropped in a future release.""" - -dblQuotedString = Combine(Regex(r'"(?:[^"\n\r\\]|(?:"")|(?:\\(?:[^x]|x[0-9a-fA-F]+)))*') + '"').setName("string enclosed in double quotes") -sglQuotedString = Combine(Regex(r"'(?:[^'\n\r\\]|(?:'')|(?:\\(?:[^x]|x[0-9a-fA-F]+)))*") + "'").setName("string enclosed in single quotes") -quotedString = Combine(Regex(r'"(?:[^"\n\r\\]|(?:"")|(?:\\(?:[^x]|x[0-9a-fA-F]+)))*') + '"' - | Regex(r"'(?:[^'\n\r\\]|(?:'')|(?:\\(?:[^x]|x[0-9a-fA-F]+)))*") + "'").setName("quotedString using single or double quotes") -unicodeString = Combine(_L('u') + quotedString.copy()).setName("unicode string literal") - -def nestedExpr(opener="(", closer=")", content=None, ignoreExpr=quotedString.copy()): - """Helper method for defining nested lists enclosed in opening and - closing delimiters ("(" and ")" are the default). - - Parameters: - - opener - opening character for a nested list - (default= ``"("``); can also be a pyparsing expression - - closer - closing character for a nested list - (default= ``")"``); can also be a pyparsing expression - - content - expression for items within the nested lists - (default= ``None``) - - ignoreExpr - expression for ignoring opening and closing - delimiters (default= :class:`quotedString`) - - If an expression is not provided for the content argument, the - nested expression will capture all whitespace-delimited content - between delimiters as a list of separate values. - - Use the ``ignoreExpr`` argument to define expressions that may - contain opening or closing characters that should not be treated as - opening or closing characters for nesting, such as quotedString or - a comment expression. Specify multiple expressions using an - :class:`Or` or :class:`MatchFirst`. The default is - :class:`quotedString`, but if no expressions are to be ignored, then - pass ``None`` for this argument. - - Example:: - - data_type = oneOf("void int short long char float double") - decl_data_type = Combine(data_type + Optional(Word('*'))) - ident = Word(alphas+'_', alphanums+'_') - number = pyparsing_common.number - arg = Group(decl_data_type + ident) - LPAR, RPAR = map(Suppress, "()") - - code_body = nestedExpr('{', '}', ignoreExpr=(quotedString | cStyleComment)) - - c_function = (decl_data_type("type") - + ident("name") - + LPAR + Optional(delimitedList(arg), [])("args") + RPAR - + code_body("body")) - c_function.ignore(cStyleComment) - - source_code = ''' - int is_odd(int x) { - return (x%2); - } - - int dec_to_hex(char hchar) { - if (hchar >= '0' && hchar <= '9') { - return (ord(hchar)-ord('0')); - } else { - return (10+ord(hchar)-ord('A')); - } - } - ''' - for func in c_function.searchString(source_code): - print("%(name)s (%(type)s) args: %(args)s" % func) - - - prints:: - - is_odd (int) args: [['int', 'x']] - dec_to_hex (int) args: [['char', 'hchar']] - """ - if opener == closer: - raise ValueError("opening and closing strings cannot be the same") - if content is None: - if isinstance(opener, basestring) and isinstance(closer, basestring): - if len(opener) == 1 and len(closer) == 1: - if ignoreExpr is not None: - content = (Combine(OneOrMore(~ignoreExpr - + CharsNotIn(opener - + closer - + ParserElement.DEFAULT_WHITE_CHARS, exact=1) - ) - ).setParseAction(lambda t: t[0].strip())) - else: - content = (empty.copy() + CharsNotIn(opener - + closer - + ParserElement.DEFAULT_WHITE_CHARS - ).setParseAction(lambda t: t[0].strip())) - else: - if ignoreExpr is not None: - content = (Combine(OneOrMore(~ignoreExpr - + ~Literal(opener) - + ~Literal(closer) - + CharsNotIn(ParserElement.DEFAULT_WHITE_CHARS, exact=1)) - ).setParseAction(lambda t: t[0].strip())) - else: - content = (Combine(OneOrMore(~Literal(opener) - + ~Literal(closer) - + CharsNotIn(ParserElement.DEFAULT_WHITE_CHARS, exact=1)) - ).setParseAction(lambda t: t[0].strip())) - else: - raise ValueError("opening and closing arguments must be strings if no content expression is given") - ret = Forward() - if ignoreExpr is not None: - ret <<= Group(Suppress(opener) + ZeroOrMore(ignoreExpr | ret | content) + Suppress(closer)) - else: - ret <<= Group(Suppress(opener) + ZeroOrMore(ret | content) + Suppress(closer)) - ret.setName('nested %s%s expression' % (opener, closer)) - return ret - -def indentedBlock(blockStatementExpr, indentStack, indent=True): - """Helper method for defining space-delimited indentation blocks, - such as those used to define block statements in Python source code. - - Parameters: - - - blockStatementExpr - expression defining syntax of statement that - is repeated within the indented block - - indentStack - list created by caller to manage indentation stack - (multiple statementWithIndentedBlock expressions within a single - grammar should share a common indentStack) - - indent - boolean indicating whether block must be indented beyond - the current level; set to False for block of left-most - statements (default= ``True``) - - A valid block must contain at least one ``blockStatement``. - - Example:: - - data = ''' - def A(z): - A1 - B = 100 - G = A2 - A2 - A3 - B - def BB(a,b,c): - BB1 - def BBA(): - bba1 - bba2 - bba3 - C - D - def spam(x,y): - def eggs(z): - pass - ''' - - - indentStack = [1] - stmt = Forward() - - identifier = Word(alphas, alphanums) - funcDecl = ("def" + identifier + Group("(" + Optional(delimitedList(identifier)) + ")") + ":") - func_body = indentedBlock(stmt, indentStack) - funcDef = Group(funcDecl + func_body) - - rvalue = Forward() - funcCall = Group(identifier + "(" + Optional(delimitedList(rvalue)) + ")") - rvalue << (funcCall | identifier | Word(nums)) - assignment = Group(identifier + "=" + rvalue) - stmt << (funcDef | assignment | identifier) - - module_body = OneOrMore(stmt) - - parseTree = module_body.parseString(data) - parseTree.pprint() - - prints:: - - [['def', - 'A', - ['(', 'z', ')'], - ':', - [['A1'], [['B', '=', '100']], [['G', '=', 'A2']], ['A2'], ['A3']]], - 'B', - ['def', - 'BB', - ['(', 'a', 'b', 'c', ')'], - ':', - [['BB1'], [['def', 'BBA', ['(', ')'], ':', [['bba1'], ['bba2'], ['bba3']]]]]], - 'C', - 'D', - ['def', - 'spam', - ['(', 'x', 'y', ')'], - ':', - [[['def', 'eggs', ['(', 'z', ')'], ':', [['pass']]]]]]] - """ - backup_stack = indentStack[:] - - def reset_stack(): - indentStack[:] = backup_stack - - def checkPeerIndent(s, l, t): - if l >= len(s): return - curCol = col(l, s) - if curCol != indentStack[-1]: - if curCol > indentStack[-1]: - raise ParseException(s, l, "illegal nesting") - raise ParseException(s, l, "not a peer entry") - - def checkSubIndent(s, l, t): - curCol = col(l, s) - if curCol > indentStack[-1]: - indentStack.append(curCol) - else: - raise ParseException(s, l, "not a subentry") - - def checkUnindent(s, l, t): - if l >= len(s): return - curCol = col(l, s) - if not(indentStack and curCol in indentStack): - raise ParseException(s, l, "not an unindent") - if curCol < indentStack[-1]: - indentStack.pop() - - NL = OneOrMore(LineEnd().setWhitespaceChars("\t ").suppress(), stopOn=StringEnd()) - INDENT = (Empty() + Empty().setParseAction(checkSubIndent)).setName('INDENT') - PEER = Empty().setParseAction(checkPeerIndent).setName('') - UNDENT = Empty().setParseAction(checkUnindent).setName('UNINDENT') - if indent: - smExpr = Group(Optional(NL) - + INDENT - + OneOrMore(PEER + Group(blockStatementExpr) + Optional(NL), stopOn=StringEnd()) - + UNDENT) - else: - smExpr = Group(Optional(NL) - + OneOrMore(PEER + Group(blockStatementExpr) + Optional(NL), stopOn=StringEnd()) - + UNDENT) - smExpr.setFailAction(lambda a, b, c, d: reset_stack()) - blockStatementExpr.ignore(_bslash + LineEnd()) - return smExpr.setName('indented block') - -alphas8bit = srange(r"[\0xc0-\0xd6\0xd8-\0xf6\0xf8-\0xff]") -punc8bit = srange(r"[\0xa1-\0xbf\0xd7\0xf7]") - -anyOpenTag, anyCloseTag = makeHTMLTags(Word(alphas, alphanums + "_:").setName('any tag')) -_htmlEntityMap = dict(zip("gt lt amp nbsp quot apos".split(), '><& "\'')) -commonHTMLEntity = Regex('&(?P' + '|'.join(_htmlEntityMap.keys()) +");").setName("common HTML entity") -def replaceHTMLEntity(t): - """Helper parser action to replace common HTML entities with their special characters""" - return _htmlEntityMap.get(t.entity) - -# it's easy to get these comment structures wrong - they're very common, so may as well make them available -cStyleComment = Combine(Regex(r"/\*(?:[^*]|\*(?!/))*") + '*/').setName("C style comment") -"Comment of the form ``/* ... */``" - -htmlComment = Regex(r"").setName("HTML comment") -"Comment of the form ````" - -restOfLine = Regex(r".*").leaveWhitespace().setName("rest of line") -dblSlashComment = Regex(r"//(?:\\\n|[^\n])*").setName("// comment") -"Comment of the form ``// ... (to end of line)``" - -cppStyleComment = Combine(Regex(r"/\*(?:[^*]|\*(?!/))*") + '*/' | dblSlashComment).setName("C++ style comment") -"Comment of either form :class:`cStyleComment` or :class:`dblSlashComment`" - -javaStyleComment = cppStyleComment -"Same as :class:`cppStyleComment`" - -pythonStyleComment = Regex(r"#.*").setName("Python style comment") -"Comment of the form ``# ... (to end of line)``" - -_commasepitem = Combine(OneOrMore(Word(printables, excludeChars=',') - + Optional(Word(" \t") - + ~Literal(",") + ~LineEnd()))).streamline().setName("commaItem") -commaSeparatedList = delimitedList(Optional(quotedString.copy() | _commasepitem, default="")).setName("commaSeparatedList") -"""(Deprecated) Predefined expression of 1 or more printable words or -quoted strings, separated by commas. - -This expression is deprecated in favor of :class:`pyparsing_common.comma_separated_list`. -""" - -# some other useful expressions - using lower-case class name since we are really using this as a namespace -class pyparsing_common: - """Here are some common low-level expressions that may be useful in - jump-starting parser development: - - - numeric forms (:class:`integers`, :class:`reals`, - :class:`scientific notation`) - - common :class:`programming identifiers` - - network addresses (:class:`MAC`, - :class:`IPv4`, :class:`IPv6`) - - ISO8601 :class:`dates` and - :class:`datetime` - - :class:`UUID` - - :class:`comma-separated list` - - Parse actions: - - - :class:`convertToInteger` - - :class:`convertToFloat` - - :class:`convertToDate` - - :class:`convertToDatetime` - - :class:`stripHTMLTags` - - :class:`upcaseTokens` - - :class:`downcaseTokens` - - Example:: - - pyparsing_common.number.runTests(''' - # any int or real number, returned as the appropriate type - 100 - -100 - +100 - 3.14159 - 6.02e23 - 1e-12 - ''') - - pyparsing_common.fnumber.runTests(''' - # any int or real number, returned as float - 100 - -100 - +100 - 3.14159 - 6.02e23 - 1e-12 - ''') - - pyparsing_common.hex_integer.runTests(''' - # hex numbers - 100 - FF - ''') - - pyparsing_common.fraction.runTests(''' - # fractions - 1/2 - -3/4 - ''') - - pyparsing_common.mixed_integer.runTests(''' - # mixed fractions - 1 - 1/2 - -3/4 - 1-3/4 - ''') - - import uuid - pyparsing_common.uuid.setParseAction(tokenMap(uuid.UUID)) - pyparsing_common.uuid.runTests(''' - # uuid - 12345678-1234-5678-1234-567812345678 - ''') - - prints:: - - # any int or real number, returned as the appropriate type - 100 - [100] - - -100 - [-100] - - +100 - [100] - - 3.14159 - [3.14159] - - 6.02e23 - [6.02e+23] - - 1e-12 - [1e-12] - - # any int or real number, returned as float - 100 - [100.0] - - -100 - [-100.0] - - +100 - [100.0] - - 3.14159 - [3.14159] - - 6.02e23 - [6.02e+23] - - 1e-12 - [1e-12] - - # hex numbers - 100 - [256] - - FF - [255] - - # fractions - 1/2 - [0.5] - - -3/4 - [-0.75] - - # mixed fractions - 1 - [1] - - 1/2 - [0.5] - - -3/4 - [-0.75] - - 1-3/4 - [1.75] - - # uuid - 12345678-1234-5678-1234-567812345678 - [UUID('12345678-1234-5678-1234-567812345678')] - """ - - convertToInteger = tokenMap(int) - """ - Parse action for converting parsed integers to Python int - """ - - convertToFloat = tokenMap(float) - """ - Parse action for converting parsed numbers to Python float - """ - - integer = Word(nums).setName("integer").setParseAction(convertToInteger) - """expression that parses an unsigned integer, returns an int""" - - hex_integer = Word(hexnums).setName("hex integer").setParseAction(tokenMap(int, 16)) - """expression that parses a hexadecimal integer, returns an int""" - - signed_integer = Regex(r'[+-]?\d+').setName("signed integer").setParseAction(convertToInteger) - """expression that parses an integer with optional leading sign, returns an int""" - - fraction = (signed_integer().setParseAction(convertToFloat) + '/' + signed_integer().setParseAction(convertToFloat)).setName("fraction") - """fractional expression of an integer divided by an integer, returns a float""" - fraction.addParseAction(lambda t: t[0]/t[-1]) - - mixed_integer = (fraction | signed_integer + Optional(Optional('-').suppress() + fraction)).setName("fraction or mixed integer-fraction") - """mixed integer of the form 'integer - fraction', with optional leading integer, returns float""" - mixed_integer.addParseAction(sum) - - real = Regex(r'[+-]?(?:\d+\.\d*|\.\d+)').setName("real number").setParseAction(convertToFloat) - """expression that parses a floating point number and returns a float""" - - sci_real = Regex(r'[+-]?(?:\d+(?:[eE][+-]?\d+)|(?:\d+\.\d*|\.\d+)(?:[eE][+-]?\d+)?)').setName("real number with scientific notation").setParseAction(convertToFloat) - """expression that parses a floating point number with optional - scientific notation and returns a float""" - - # streamlining this expression makes the docs nicer-looking - number = (sci_real | real | signed_integer).streamline() - """any numeric expression, returns the corresponding Python type""" - - fnumber = Regex(r'[+-]?\d+\.?\d*([eE][+-]?\d+)?').setName("fnumber").setParseAction(convertToFloat) - """any int or real number, returned as float""" - - identifier = Word(alphas + '_', alphanums + '_').setName("identifier") - """typical code identifier (leading alpha or '_', followed by 0 or more alphas, nums, or '_')""" - - ipv4_address = Regex(r'(25[0-5]|2[0-4][0-9]|1?[0-9]{1,2})(\.(25[0-5]|2[0-4][0-9]|1?[0-9]{1,2})){3}').setName("IPv4 address") - "IPv4 address (``0.0.0.0 - 255.255.255.255``)" - - _ipv6_part = Regex(r'[0-9a-fA-F]{1,4}').setName("hex_integer") - _full_ipv6_address = (_ipv6_part + (':' + _ipv6_part) * 7).setName("full IPv6 address") - _short_ipv6_address = (Optional(_ipv6_part + (':' + _ipv6_part) * (0, 6)) - + "::" - + Optional(_ipv6_part + (':' + _ipv6_part) * (0, 6)) - ).setName("short IPv6 address") - _short_ipv6_address.addCondition(lambda t: sum(1 for tt in t if pyparsing_common._ipv6_part.matches(tt)) < 8) - _mixed_ipv6_address = ("::ffff:" + ipv4_address).setName("mixed IPv6 address") - ipv6_address = Combine((_full_ipv6_address | _mixed_ipv6_address | _short_ipv6_address).setName("IPv6 address")).setName("IPv6 address") - "IPv6 address (long, short, or mixed form)" - - mac_address = Regex(r'[0-9a-fA-F]{2}([:.-])[0-9a-fA-F]{2}(?:\1[0-9a-fA-F]{2}){4}').setName("MAC address") - "MAC address xx:xx:xx:xx:xx (may also have '-' or '.' delimiters)" - - @staticmethod - def convertToDate(fmt="%Y-%m-%d"): - """ - Helper to create a parse action for converting parsed date string to Python datetime.date - - Params - - - fmt - format to be passed to datetime.strptime (default= ``"%Y-%m-%d"``) - - Example:: - - date_expr = pyparsing_common.iso8601_date.copy() - date_expr.setParseAction(pyparsing_common.convertToDate()) - print(date_expr.parseString("1999-12-31")) - - prints:: - - [datetime.date(1999, 12, 31)] - """ - def cvt_fn(s, l, t): - try: - return datetime.strptime(t[0], fmt).date() - except ValueError as ve: - raise ParseException(s, l, str(ve)) - return cvt_fn - - @staticmethod - def convertToDatetime(fmt="%Y-%m-%dT%H:%M:%S.%f"): - """Helper to create a parse action for converting parsed - datetime string to Python datetime.datetime - - Params - - - fmt - format to be passed to datetime.strptime (default= ``"%Y-%m-%dT%H:%M:%S.%f"``) - - Example:: - - dt_expr = pyparsing_common.iso8601_datetime.copy() - dt_expr.setParseAction(pyparsing_common.convertToDatetime()) - print(dt_expr.parseString("1999-12-31T23:59:59.999")) - - prints:: - - [datetime.datetime(1999, 12, 31, 23, 59, 59, 999000)] - """ - def cvt_fn(s, l, t): - try: - return datetime.strptime(t[0], fmt) - except ValueError as ve: - raise ParseException(s, l, str(ve)) - return cvt_fn - - iso8601_date = Regex(r'(?P\d{4})(?:-(?P\d\d)(?:-(?P\d\d))?)?').setName("ISO8601 date") - "ISO8601 date (``yyyy-mm-dd``)" - - iso8601_datetime = Regex(r'(?P\d{4})-(?P\d\d)-(?P\d\d)[T ](?P\d\d):(?P\d\d)(:(?P\d\d(\.\d*)?)?)?(?PZ|[+-]\d\d:?\d\d)?').setName("ISO8601 datetime") - "ISO8601 datetime (``yyyy-mm-ddThh:mm:ss.s(Z|+-00:00)``) - trailing seconds, milliseconds, and timezone optional; accepts separating ``'T'`` or ``' '``" - - uuid = Regex(r'[0-9a-fA-F]{8}(-[0-9a-fA-F]{4}){3}-[0-9a-fA-F]{12}').setName("UUID") - "UUID (``xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx``)" - - _html_stripper = anyOpenTag.suppress() | anyCloseTag.suppress() - @staticmethod - def stripHTMLTags(s, l, tokens): - """Parse action to remove HTML tags from web page HTML source - - Example:: - - # strip HTML links from normal text - text = 'More info at the
pyparsing wiki page' - td, td_end = makeHTMLTags("TD") - table_text = td + SkipTo(td_end).setParseAction(pyparsing_common.stripHTMLTags)("body") + td_end - print(table_text.parseString(text).body) - - Prints:: - - More info at the pyparsing wiki page - """ - return pyparsing_common._html_stripper.transformString(tokens[0]) - - _commasepitem = Combine(OneOrMore(~Literal(",") - + ~LineEnd() - + Word(printables, excludeChars=',') - + Optional(White(" \t")))).streamline().setName("commaItem") - comma_separated_list = delimitedList(Optional(quotedString.copy() - | _commasepitem, default='') - ).setName("comma separated list") - """Predefined expression of 1 or more printable words or quoted strings, separated by commas.""" - - upcaseTokens = staticmethod(tokenMap(lambda t: _ustr(t).upper())) - """Parse action to convert tokens to upper case.""" - - downcaseTokens = staticmethod(tokenMap(lambda t: _ustr(t).lower())) - """Parse action to convert tokens to lower case.""" - - -class _lazyclassproperty(object): - def __init__(self, fn): - self.fn = fn - self.__doc__ = fn.__doc__ - self.__name__ = fn.__name__ - - def __get__(self, obj, cls): - if cls is None: - cls = type(obj) - if not hasattr(cls, '_intern') or any(cls._intern is getattr(superclass, '_intern', []) - for superclass in cls.__mro__[1:]): - cls._intern = {} - attrname = self.fn.__name__ - if attrname not in cls._intern: - cls._intern[attrname] = self.fn(cls) - return cls._intern[attrname] - - -class unicode_set(object): - """ - A set of Unicode characters, for language-specific strings for - ``alphas``, ``nums``, ``alphanums``, and ``printables``. - A unicode_set is defined by a list of ranges in the Unicode character - set, in a class attribute ``_ranges``, such as:: - - _ranges = [(0x0020, 0x007e), (0x00a0, 0x00ff),] - - A unicode set can also be defined using multiple inheritance of other unicode sets:: - - class CJK(Chinese, Japanese, Korean): - pass - """ - _ranges = [] - - @classmethod - def _get_chars_for_ranges(cls): - ret = [] - for cc in cls.__mro__: - if cc is unicode_set: - break - for rr in cc._ranges: - ret.extend(range(rr[0], rr[-1] + 1)) - return [unichr(c) for c in sorted(set(ret))] - - @_lazyclassproperty - def printables(cls): - "all non-whitespace characters in this range" - return u''.join(filterfalse(unicode.isspace, cls._get_chars_for_ranges())) - - @_lazyclassproperty - def alphas(cls): - "all alphabetic characters in this range" - return u''.join(filter(unicode.isalpha, cls._get_chars_for_ranges())) - - @_lazyclassproperty - def nums(cls): - "all numeric digit characters in this range" - return u''.join(filter(unicode.isdigit, cls._get_chars_for_ranges())) - - @_lazyclassproperty - def alphanums(cls): - "all alphanumeric characters in this range" - return cls.alphas + cls.nums - - -class pyparsing_unicode(unicode_set): - """ - A namespace class for defining common language unicode_sets. - """ - _ranges = [(32, sys.maxunicode)] - - class Latin1(unicode_set): - "Unicode set for Latin-1 Unicode Character Range" - _ranges = [(0x0020, 0x007e), (0x00a0, 0x00ff),] - - class LatinA(unicode_set): - "Unicode set for Latin-A Unicode Character Range" - _ranges = [(0x0100, 0x017f),] - - class LatinB(unicode_set): - "Unicode set for Latin-B Unicode Character Range" - _ranges = [(0x0180, 0x024f),] - - class Greek(unicode_set): - "Unicode set for Greek Unicode Character Ranges" - _ranges = [ - (0x0370, 0x03ff), (0x1f00, 0x1f15), (0x1f18, 0x1f1d), (0x1f20, 0x1f45), (0x1f48, 0x1f4d), - (0x1f50, 0x1f57), (0x1f59,), (0x1f5b,), (0x1f5d,), (0x1f5f, 0x1f7d), (0x1f80, 0x1fb4), (0x1fb6, 0x1fc4), - (0x1fc6, 0x1fd3), (0x1fd6, 0x1fdb), (0x1fdd, 0x1fef), (0x1ff2, 0x1ff4), (0x1ff6, 0x1ffe), - ] - - class Cyrillic(unicode_set): - "Unicode set for Cyrillic Unicode Character Range" - _ranges = [(0x0400, 0x04ff)] - - class Chinese(unicode_set): - "Unicode set for Chinese Unicode Character Range" - _ranges = [(0x4e00, 0x9fff), (0x3000, 0x303f),] - - class Japanese(unicode_set): - "Unicode set for Japanese Unicode Character Range, combining Kanji, Hiragana, and Katakana ranges" - _ranges = [] - - class Kanji(unicode_set): - "Unicode set for Kanji Unicode Character Range" - _ranges = [(0x4E00, 0x9Fbf), (0x3000, 0x303f),] - - class Hiragana(unicode_set): - "Unicode set for Hiragana Unicode Character Range" - _ranges = [(0x3040, 0x309f),] - - class Katakana(unicode_set): - "Unicode set for Katakana Unicode Character Range" - _ranges = [(0x30a0, 0x30ff),] - - class Korean(unicode_set): - "Unicode set for Korean Unicode Character Range" - _ranges = [(0xac00, 0xd7af), (0x1100, 0x11ff), (0x3130, 0x318f), (0xa960, 0xa97f), (0xd7b0, 0xd7ff), (0x3000, 0x303f),] - - class CJK(Chinese, Japanese, Korean): - "Unicode set for combined Chinese, Japanese, and Korean (CJK) Unicode Character Range" - pass - - class Thai(unicode_set): - "Unicode set for Thai Unicode Character Range" - _ranges = [(0x0e01, 0x0e3a), (0x0e3f, 0x0e5b),] - - class Arabic(unicode_set): - "Unicode set for Arabic Unicode Character Range" - _ranges = [(0x0600, 0x061b), (0x061e, 0x06ff), (0x0700, 0x077f),] - - class Hebrew(unicode_set): - "Unicode set for Hebrew Unicode Character Range" - _ranges = [(0x0590, 0x05ff),] - - class Devanagari(unicode_set): - "Unicode set for Devanagari Unicode Character Range" - _ranges = [(0x0900, 0x097f), (0xa8e0, 0xa8ff)] - -pyparsing_unicode.Japanese._ranges = (pyparsing_unicode.Japanese.Kanji._ranges - + pyparsing_unicode.Japanese.Hiragana._ranges - + pyparsing_unicode.Japanese.Katakana._ranges) - -# define ranges in language character sets -if PY_3: - setattr(pyparsing_unicode, u"العربية", pyparsing_unicode.Arabic) - setattr(pyparsing_unicode, u"中文", pyparsing_unicode.Chinese) - setattr(pyparsing_unicode, u"кириллица", pyparsing_unicode.Cyrillic) - setattr(pyparsing_unicode, u"Ελληνικά", pyparsing_unicode.Greek) - setattr(pyparsing_unicode, u"עִברִית", pyparsing_unicode.Hebrew) - setattr(pyparsing_unicode, u"日本語", pyparsing_unicode.Japanese) - setattr(pyparsing_unicode.Japanese, u"漢字", pyparsing_unicode.Japanese.Kanji) - setattr(pyparsing_unicode.Japanese, u"カタカナ", pyparsing_unicode.Japanese.Katakana) - setattr(pyparsing_unicode.Japanese, u"ひらがな", pyparsing_unicode.Japanese.Hiragana) - setattr(pyparsing_unicode, u"한국어", pyparsing_unicode.Korean) - setattr(pyparsing_unicode, u"ไทย", pyparsing_unicode.Thai) - setattr(pyparsing_unicode, u"देवनागरी", pyparsing_unicode.Devanagari) - - -class pyparsing_test: - """ - namespace class for classes useful in writing unit tests - """ - - class reset_pyparsing_context: - """ - Context manager to be used when writing unit tests that modify pyparsing config values: - - packrat parsing - - default whitespace characters. - - default keyword characters - - literal string auto-conversion class - - __diag__ settings - - Example: - with reset_pyparsing_context(): - # test that literals used to construct a grammar are automatically suppressed - ParserElement.inlineLiteralsUsing(Suppress) - - term = Word(alphas) | Word(nums) - group = Group('(' + term[...] + ')') - - # assert that the '()' characters are not included in the parsed tokens - self.assertParseAndCheckLisst(group, "(abc 123 def)", ['abc', '123', 'def']) - - # after exiting context manager, literals are converted to Literal expressions again - """ - - def __init__(self): - self._save_context = {} - - def save(self): - self._save_context["default_whitespace"] = ParserElement.DEFAULT_WHITE_CHARS - self._save_context["default_keyword_chars"] = Keyword.DEFAULT_KEYWORD_CHARS - self._save_context[ - "literal_string_class" - ] = ParserElement._literalStringClass - self._save_context["packrat_enabled"] = ParserElement._packratEnabled - self._save_context["packrat_parse"] = ParserElement._parse - self._save_context["__diag__"] = { - name: getattr(__diag__, name) for name in __diag__._all_names - } - self._save_context["__compat__"] = { - "collect_all_And_tokens": __compat__.collect_all_And_tokens - } - return self - - def restore(self): - # reset pyparsing global state - if ( - ParserElement.DEFAULT_WHITE_CHARS - != self._save_context["default_whitespace"] - ): - ParserElement.setDefaultWhitespaceChars( - self._save_context["default_whitespace"] - ) - Keyword.DEFAULT_KEYWORD_CHARS = self._save_context["default_keyword_chars"] - ParserElement.inlineLiteralsUsing( - self._save_context["literal_string_class"] - ) - for name, value in self._save_context["__diag__"].items(): - setattr(__diag__, name, value) - ParserElement._packratEnabled = self._save_context["packrat_enabled"] - ParserElement._parse = self._save_context["packrat_parse"] - __compat__.collect_all_And_tokens = self._save_context["__compat__"] - - def __enter__(self): - return self.save() - - def __exit__(self, *args): - return self.restore() - - class TestParseResultsAsserts: - """ - A mixin class to add parse results assertion methods to normal unittest.TestCase classes. - """ - def assertParseResultsEquals( - self, result, expected_list=None, expected_dict=None, msg=None - ): - """ - Unit test assertion to compare a ParseResults object with an optional expected_list, - and compare any defined results names with an optional expected_dict. - """ - if expected_list is not None: - self.assertEqual(expected_list, result.asList(), msg=msg) - if expected_dict is not None: - self.assertEqual(expected_dict, result.asDict(), msg=msg) - - def assertParseAndCheckList( - self, expr, test_string, expected_list, msg=None, verbose=True - ): - """ - Convenience wrapper assert to test a parser element and input string, and assert that - the resulting ParseResults.asList() is equal to the expected_list. - """ - result = expr.parseString(test_string, parseAll=True) - if verbose: - print(result.dump()) - self.assertParseResultsEquals(result, expected_list=expected_list, msg=msg) - - def assertParseAndCheckDict( - self, expr, test_string, expected_dict, msg=None, verbose=True - ): - """ - Convenience wrapper assert to test a parser element and input string, and assert that - the resulting ParseResults.asDict() is equal to the expected_dict. - """ - result = expr.parseString(test_string, parseAll=True) - if verbose: - print(result.dump()) - self.assertParseResultsEquals(result, expected_dict=expected_dict, msg=msg) - - def assertRunTestResults( - self, run_tests_report, expected_parse_results=None, msg=None - ): - """ - Unit test assertion to evaluate output of ParserElement.runTests(). If a list of - list-dict tuples is given as the expected_parse_results argument, then these are zipped - with the report tuples returned by runTests and evaluated using assertParseResultsEquals. - Finally, asserts that the overall runTests() success value is True. - - :param run_tests_report: tuple(bool, [tuple(str, ParseResults or Exception)]) returned from runTests - :param expected_parse_results (optional): [tuple(str, list, dict, Exception)] - """ - run_test_success, run_test_results = run_tests_report - - if expected_parse_results is not None: - merged = [ - (rpt[0], rpt[1], expected) - for rpt, expected in zip(run_test_results, expected_parse_results) - ] - for test_string, result, expected in merged: - # expected should be a tuple containing a list and/or a dict or an exception, - # and optional failure message string - # an empty tuple will skip any result validation - fail_msg = next( - (exp for exp in expected if isinstance(exp, str)), None - ) - expected_exception = next( - ( - exp - for exp in expected - if isinstance(exp, type) and issubclass(exp, Exception) - ), - None, - ) - if expected_exception is not None: - with self.assertRaises( - expected_exception=expected_exception, msg=fail_msg or msg - ): - if isinstance(result, Exception): - raise result - else: - expected_list = next( - (exp for exp in expected if isinstance(exp, list)), None - ) - expected_dict = next( - (exp for exp in expected if isinstance(exp, dict)), None - ) - if (expected_list, expected_dict) != (None, None): - self.assertParseResultsEquals( - result, - expected_list=expected_list, - expected_dict=expected_dict, - msg=fail_msg or msg, - ) - else: - # warning here maybe? - print("no validation for {!r}".format(test_string)) - - # do this last, in case some specific test results can be reported instead - self.assertTrue( - run_test_success, msg=msg if msg is not None else "failed runTests" - ) - - @contextmanager - def assertRaisesParseException(self, exc_type=ParseException, msg=None): - with self.assertRaises(exc_type, msg=msg): - yield - - -if __name__ == "__main__": - - selectToken = CaselessLiteral("select") - fromToken = CaselessLiteral("from") - - ident = Word(alphas, alphanums + "_$") - - columnName = delimitedList(ident, ".", combine=True).setParseAction(upcaseTokens) - columnNameList = Group(delimitedList(columnName)).setName("columns") - columnSpec = ('*' | columnNameList) - - tableName = delimitedList(ident, ".", combine=True).setParseAction(upcaseTokens) - tableNameList = Group(delimitedList(tableName)).setName("tables") - - simpleSQL = selectToken("command") + columnSpec("columns") + fromToken + tableNameList("tables") - - # demo runTests method, including embedded comments in test string - simpleSQL.runTests(""" - # '*' as column list and dotted table name - select * from SYS.XYZZY - - # caseless match on "SELECT", and casts back to "select" - SELECT * from XYZZY, ABC - - # list of column names, and mixed case SELECT keyword - Select AA,BB,CC from Sys.dual - - # multiple tables - Select A, B, C from Sys.dual, Table2 - - # invalid SELECT keyword - should fail - Xelect A, B, C from Sys.dual - - # incomplete command - should fail - Select - - # invalid column name - should fail - Select ^^^ frox Sys.dual - - """) - - pyparsing_common.number.runTests(""" - 100 - -100 - +100 - 3.14159 - 6.02e23 - 1e-12 - """) - - # any int or real number, returned as float - pyparsing_common.fnumber.runTests(""" - 100 - -100 - +100 - 3.14159 - 6.02e23 - 1e-12 - """) - - pyparsing_common.hex_integer.runTests(""" - 100 - FF - """) - - import uuid - pyparsing_common.uuid.setParseAction(tokenMap(uuid.UUID)) - pyparsing_common.uuid.runTests(""" - 12345678-1234-5678-1234-567812345678 - """) diff --git a/pipenv/patched/notpip/_vendor/pyparsing.LICENSE b/pipenv/vendor/pyparsing/LICENSE similarity index 100% rename from pipenv/patched/notpip/_vendor/pyparsing.LICENSE rename to pipenv/vendor/pyparsing/LICENSE diff --git a/pipenv/vendor/pyparsing/__init__.py b/pipenv/vendor/pyparsing/__init__.py new file mode 100644 index 0000000000..36c4601263 --- /dev/null +++ b/pipenv/vendor/pyparsing/__init__.py @@ -0,0 +1,328 @@ +# module pyparsing.py +# +# Copyright (c) 2003-2021 Paul T. McGuire +# +# Permission is hereby granted, free of charge, to any person obtaining +# a copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, +# distribute, sublicense, and/or sell copies of the Software, and to +# permit persons to whom the Software is furnished to do so, subject to +# the following conditions: +# +# The above copyright notice and this permission notice shall be +# included in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. +# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY +# CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, +# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE +# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +# + +__doc__ = """ +pyparsing module - Classes and methods to define and execute parsing grammars +============================================================================= + +The pyparsing module is an alternative approach to creating and +executing simple grammars, vs. the traditional lex/yacc approach, or the +use of regular expressions. With pyparsing, you don't need to learn +a new syntax for defining grammars or matching expressions - the parsing +module provides a library of classes that you use to construct the +grammar directly in Python. + +Here is a program to parse "Hello, World!" (or any greeting of the form +``", !"``), built up using :class:`Word`, +:class:`Literal`, and :class:`And` elements +(the :meth:`'+'` operators create :class:`And` expressions, +and the strings are auto-converted to :class:`Literal` expressions):: + + from pipenv.vendor.pyparsing import Word, alphas + + # define grammar of a greeting + greet = Word(alphas) + "," + Word(alphas) + "!" + + hello = "Hello, World!" + print(hello, "->", greet.parse_string(hello)) + +The program outputs the following:: + + Hello, World! -> ['Hello', ',', 'World', '!'] + +The Python representation of the grammar is quite readable, owing to the +self-explanatory class names, and the use of :class:`'+'`, +:class:`'|'`, :class:`'^'` and :class:`'&'` operators. + +The :class:`ParseResults` object returned from +:class:`ParserElement.parseString` can be +accessed as a nested list, a dictionary, or an object with named +attributes. + +The pyparsing module handles some of the problems that are typically +vexing when writing text parsers: + + - extra or missing whitespace (the above program will also handle + "Hello,World!", "Hello , World !", etc.) + - quoted strings + - embedded comments + + +Getting Started - +----------------- +Visit the classes :class:`ParserElement` and :class:`ParseResults` to +see the base classes that most other pyparsing +classes inherit from. Use the docstrings for examples of how to: + + - construct literal match expressions from :class:`Literal` and + :class:`CaselessLiteral` classes + - construct character word-group expressions using the :class:`Word` + class + - see how to create repetitive expressions using :class:`ZeroOrMore` + and :class:`OneOrMore` classes + - use :class:`'+'`, :class:`'|'`, :class:`'^'`, + and :class:`'&'` operators to combine simple expressions into + more complex ones + - associate names with your parsed results using + :class:`ParserElement.setResultsName` + - access the parsed data, which is returned as a :class:`ParseResults` + object + - find some helpful expression short-cuts like :class:`delimitedList` + and :class:`oneOf` + - find more useful common expressions in the :class:`pyparsing_common` + namespace class +""" +from typing import NamedTuple + + +class version_info(NamedTuple): + major: int + minor: int + micro: int + releaselevel: str + serial: int + + @property + def __version__(self): + return "{}.{}.{}".format(self.major, self.minor, self.micro) + ( + "{}{}{}".format( + "r" if self.releaselevel[0] == "c" else "", + self.releaselevel[0], + self.serial, + ), + "", + )[self.releaselevel == "final"] + + def __str__(self): + return "{} {} / {}".format(__name__, self.__version__, __version_time__) + + def __repr__(self): + return "{}.{}({})".format( + __name__, + type(self).__name__, + ", ".join("{}={!r}".format(*nv) for nv in zip(self._fields, self)), + ) + + +__version_info__ = version_info(3, 0, 7, "final", 0) +__version_time__ = "15 Jan 2022 04:10 UTC" +__version__ = __version_info__.__version__ +__versionTime__ = __version_time__ +__author__ = "Paul McGuire " + +from .util import * +from .exceptions import * +from .actions import * +from .core import __diag__, __compat__ +from .results import * +from .core import * +from .core import _builtin_exprs as core_builtin_exprs +from .helpers import * +from .helpers import _builtin_exprs as helper_builtin_exprs + +from .unicode import unicode_set, UnicodeRangeList, pyparsing_unicode as unicode +from .testing import pyparsing_test as testing +from .common import ( + pyparsing_common as common, + _builtin_exprs as common_builtin_exprs, +) + +# define backward compat synonyms +if "pyparsing_unicode" not in globals(): + pyparsing_unicode = unicode +if "pyparsing_common" not in globals(): + pyparsing_common = common +if "pyparsing_test" not in globals(): + pyparsing_test = testing + +core_builtin_exprs += common_builtin_exprs + helper_builtin_exprs + + +__all__ = [ + "__version__", + "__version_time__", + "__author__", + "__compat__", + "__diag__", + "And", + "AtLineStart", + "AtStringStart", + "CaselessKeyword", + "CaselessLiteral", + "CharsNotIn", + "Combine", + "Dict", + "Each", + "Empty", + "FollowedBy", + "Forward", + "GoToColumn", + "Group", + "IndentedBlock", + "Keyword", + "LineEnd", + "LineStart", + "Literal", + "Located", + "PrecededBy", + "MatchFirst", + "NoMatch", + "NotAny", + "OneOrMore", + "OnlyOnce", + "OpAssoc", + "Opt", + "Optional", + "Or", + "ParseBaseException", + "ParseElementEnhance", + "ParseException", + "ParseExpression", + "ParseFatalException", + "ParseResults", + "ParseSyntaxException", + "ParserElement", + "PositionToken", + "QuotedString", + "RecursiveGrammarException", + "Regex", + "SkipTo", + "StringEnd", + "StringStart", + "Suppress", + "Token", + "TokenConverter", + "White", + "Word", + "WordEnd", + "WordStart", + "ZeroOrMore", + "Char", + "alphanums", + "alphas", + "alphas8bit", + "any_close_tag", + "any_open_tag", + "c_style_comment", + "col", + "common_html_entity", + "counted_array", + "cpp_style_comment", + "dbl_quoted_string", + "dbl_slash_comment", + "delimited_list", + "dict_of", + "empty", + "hexnums", + "html_comment", + "identchars", + "identbodychars", + "java_style_comment", + "line", + "line_end", + "line_start", + "lineno", + "make_html_tags", + "make_xml_tags", + "match_only_at_col", + "match_previous_expr", + "match_previous_literal", + "nested_expr", + "null_debug_action", + "nums", + "one_of", + "printables", + "punc8bit", + "python_style_comment", + "quoted_string", + "remove_quotes", + "replace_with", + "replace_html_entity", + "rest_of_line", + "sgl_quoted_string", + "srange", + "string_end", + "string_start", + "trace_parse_action", + "unicode_string", + "with_attribute", + "indentedBlock", + "original_text_for", + "ungroup", + "infix_notation", + "locatedExpr", + "with_class", + "CloseMatch", + "token_map", + "pyparsing_common", + "pyparsing_unicode", + "unicode_set", + "condition_as_parse_action", + "pyparsing_test", + # pre-PEP8 compatibility names + "__versionTime__", + "anyCloseTag", + "anyOpenTag", + "cStyleComment", + "commonHTMLEntity", + "countedArray", + "cppStyleComment", + "dblQuotedString", + "dblSlashComment", + "delimitedList", + "dictOf", + "htmlComment", + "javaStyleComment", + "lineEnd", + "lineStart", + "makeHTMLTags", + "makeXMLTags", + "matchOnlyAtCol", + "matchPreviousExpr", + "matchPreviousLiteral", + "nestedExpr", + "nullDebugAction", + "oneOf", + "opAssoc", + "pythonStyleComment", + "quotedString", + "removeQuotes", + "replaceHTMLEntity", + "replaceWith", + "restOfLine", + "sglQuotedString", + "stringEnd", + "stringStart", + "traceParseAction", + "unicodeString", + "withAttribute", + "indentedBlock", + "originalTextFor", + "infixNotation", + "locatedExpr", + "withClass", + "tokenMap", + "conditionAsParseAction", + "autoname_elements", +] diff --git a/pipenv/vendor/pyparsing/actions.py b/pipenv/vendor/pyparsing/actions.py new file mode 100644 index 0000000000..2bcc5502b0 --- /dev/null +++ b/pipenv/vendor/pyparsing/actions.py @@ -0,0 +1,207 @@ +# actions.py + +from .exceptions import ParseException +from .util import col + + +class OnlyOnce: + """ + Wrapper for parse actions, to ensure they are only called once. + """ + + def __init__(self, method_call): + from .core import _trim_arity + + self.callable = _trim_arity(method_call) + self.called = False + + def __call__(self, s, l, t): + if not self.called: + results = self.callable(s, l, t) + self.called = True + return results + raise ParseException(s, l, "OnlyOnce obj called multiple times w/out reset") + + def reset(self): + """ + Allow the associated parse action to be called once more. + """ + + self.called = False + + +def match_only_at_col(n): + """ + Helper method for defining parse actions that require matching at + a specific column in the input text. + """ + + def verify_col(strg, locn, toks): + if col(locn, strg) != n: + raise ParseException(strg, locn, "matched token not at column {}".format(n)) + + return verify_col + + +def replace_with(repl_str): + """ + Helper method for common parse actions that simply return + a literal value. Especially useful when used with + :class:`transform_string` (). + + Example:: + + num = Word(nums).set_parse_action(lambda toks: int(toks[0])) + na = one_of("N/A NA").set_parse_action(replace_with(math.nan)) + term = na | num + + OneOrMore(term).parse_string("324 234 N/A 234") # -> [324, 234, nan, 234] + """ + return lambda s, l, t: [repl_str] + + +def remove_quotes(s, l, t): + """ + Helper parse action for removing quotation marks from parsed + quoted strings. + + Example:: + + # by default, quotation marks are included in parsed results + quoted_string.parse_string("'Now is the Winter of our Discontent'") # -> ["'Now is the Winter of our Discontent'"] + + # use remove_quotes to strip quotation marks from parsed results + quoted_string.set_parse_action(remove_quotes) + quoted_string.parse_string("'Now is the Winter of our Discontent'") # -> ["Now is the Winter of our Discontent"] + """ + return t[0][1:-1] + + +def with_attribute(*args, **attr_dict): + """ + Helper to create a validating parse action to be used with start + tags created with :class:`make_xml_tags` or + :class:`make_html_tags`. Use ``with_attribute`` to qualify + a starting tag with a required attribute value, to avoid false + matches on common tags such as ```` or ``
``. + + Call ``with_attribute`` with a series of attribute names and + values. Specify the list of filter attributes names and values as: + + - keyword arguments, as in ``(align="right")``, or + - as an explicit dict with ``**`` operator, when an attribute + name is also a Python reserved word, as in ``**{"class":"Customer", "align":"right"}`` + - a list of name-value tuples, as in ``(("ns1:class", "Customer"), ("ns2:align", "right"))`` + + For attribute names with a namespace prefix, you must use the second + form. Attribute names are matched insensitive to upper/lower case. + + If just testing for ``class`` (with or without a namespace), use + :class:`with_class`. + + To verify that the attribute exists, but without specifying a value, + pass ``with_attribute.ANY_VALUE`` as the value. + + Example:: + + html = ''' +
+ Some text +
1 4 0 1 0
+
1,3 2,3 1,1
+
this has no type
+
+ + ''' + div,div_end = make_html_tags("div") + + # only match div tag having a type attribute with value "grid" + div_grid = div().set_parse_action(with_attribute(type="grid")) + grid_expr = div_grid + SkipTo(div | div_end)("body") + for grid_header in grid_expr.search_string(html): + print(grid_header.body) + + # construct a match with any div tag having a type attribute, regardless of the value + div_any_type = div().set_parse_action(with_attribute(type=with_attribute.ANY_VALUE)) + div_expr = div_any_type + SkipTo(div | div_end)("body") + for div_header in div_expr.search_string(html): + print(div_header.body) + + prints:: + + 1 4 0 1 0 + + 1 4 0 1 0 + 1,3 2,3 1,1 + """ + if args: + attrs = args[:] + else: + attrs = attr_dict.items() + attrs = [(k, v) for k, v in attrs] + + def pa(s, l, tokens): + for attrName, attrValue in attrs: + if attrName not in tokens: + raise ParseException(s, l, "no matching attribute " + attrName) + if attrValue != with_attribute.ANY_VALUE and tokens[attrName] != attrValue: + raise ParseException( + s, + l, + "attribute {!r} has value {!r}, must be {!r}".format( + attrName, tokens[attrName], attrValue + ), + ) + + return pa + + +with_attribute.ANY_VALUE = object() + + +def with_class(classname, namespace=""): + """ + Simplified version of :class:`with_attribute` when + matching on a div class - made difficult because ``class`` is + a reserved word in Python. + + Example:: + + html = ''' +
+ Some text +
1 4 0 1 0
+
1,3 2,3 1,1
+
this <div> has no class
+
+ + ''' + div,div_end = make_html_tags("div") + div_grid = div().set_parse_action(with_class("grid")) + + grid_expr = div_grid + SkipTo(div | div_end)("body") + for grid_header in grid_expr.search_string(html): + print(grid_header.body) + + div_any_type = div().set_parse_action(with_class(withAttribute.ANY_VALUE)) + div_expr = div_any_type + SkipTo(div | div_end)("body") + for div_header in div_expr.search_string(html): + print(div_header.body) + + prints:: + + 1 4 0 1 0 + + 1 4 0 1 0 + 1,3 2,3 1,1 + """ + classattr = "{}:class".format(namespace) if namespace else "class" + return with_attribute(**{classattr: classname}) + + +# pre-PEP8 compatibility symbols +replaceWith = replace_with +removeQuotes = remove_quotes +withAttribute = with_attribute +withClass = with_class +matchOnlyAtCol = match_only_at_col diff --git a/pipenv/vendor/pyparsing/common.py b/pipenv/vendor/pyparsing/common.py new file mode 100644 index 0000000000..1859fb79cc --- /dev/null +++ b/pipenv/vendor/pyparsing/common.py @@ -0,0 +1,424 @@ +# common.py +from .core import * +from .helpers import delimited_list, any_open_tag, any_close_tag +from datetime import datetime + + +# some other useful expressions - using lower-case class name since we are really using this as a namespace +class pyparsing_common: + """Here are some common low-level expressions that may be useful in + jump-starting parser development: + + - numeric forms (:class:`integers`, :class:`reals`, + :class:`scientific notation`) + - common :class:`programming identifiers` + - network addresses (:class:`MAC`, + :class:`IPv4`, :class:`IPv6`) + - ISO8601 :class:`dates` and + :class:`datetime` + - :class:`UUID` + - :class:`comma-separated list` + - :class:`url` + + Parse actions: + + - :class:`convertToInteger` + - :class:`convertToFloat` + - :class:`convertToDate` + - :class:`convertToDatetime` + - :class:`stripHTMLTags` + - :class:`upcaseTokens` + - :class:`downcaseTokens` + + Example:: + + pyparsing_common.number.runTests(''' + # any int or real number, returned as the appropriate type + 100 + -100 + +100 + 3.14159 + 6.02e23 + 1e-12 + ''') + + pyparsing_common.fnumber.runTests(''' + # any int or real number, returned as float + 100 + -100 + +100 + 3.14159 + 6.02e23 + 1e-12 + ''') + + pyparsing_common.hex_integer.runTests(''' + # hex numbers + 100 + FF + ''') + + pyparsing_common.fraction.runTests(''' + # fractions + 1/2 + -3/4 + ''') + + pyparsing_common.mixed_integer.runTests(''' + # mixed fractions + 1 + 1/2 + -3/4 + 1-3/4 + ''') + + import uuid + pyparsing_common.uuid.setParseAction(tokenMap(uuid.UUID)) + pyparsing_common.uuid.runTests(''' + # uuid + 12345678-1234-5678-1234-567812345678 + ''') + + prints:: + + # any int or real number, returned as the appropriate type + 100 + [100] + + -100 + [-100] + + +100 + [100] + + 3.14159 + [3.14159] + + 6.02e23 + [6.02e+23] + + 1e-12 + [1e-12] + + # any int or real number, returned as float + 100 + [100.0] + + -100 + [-100.0] + + +100 + [100.0] + + 3.14159 + [3.14159] + + 6.02e23 + [6.02e+23] + + 1e-12 + [1e-12] + + # hex numbers + 100 + [256] + + FF + [255] + + # fractions + 1/2 + [0.5] + + -3/4 + [-0.75] + + # mixed fractions + 1 + [1] + + 1/2 + [0.5] + + -3/4 + [-0.75] + + 1-3/4 + [1.75] + + # uuid + 12345678-1234-5678-1234-567812345678 + [UUID('12345678-1234-5678-1234-567812345678')] + """ + + convert_to_integer = token_map(int) + """ + Parse action for converting parsed integers to Python int + """ + + convert_to_float = token_map(float) + """ + Parse action for converting parsed numbers to Python float + """ + + integer = Word(nums).set_name("integer").set_parse_action(convert_to_integer) + """expression that parses an unsigned integer, returns an int""" + + hex_integer = ( + Word(hexnums).set_name("hex integer").set_parse_action(token_map(int, 16)) + ) + """expression that parses a hexadecimal integer, returns an int""" + + signed_integer = ( + Regex(r"[+-]?\d+") + .set_name("signed integer") + .set_parse_action(convert_to_integer) + ) + """expression that parses an integer with optional leading sign, returns an int""" + + fraction = ( + signed_integer().set_parse_action(convert_to_float) + + "/" + + signed_integer().set_parse_action(convert_to_float) + ).set_name("fraction") + """fractional expression of an integer divided by an integer, returns a float""" + fraction.add_parse_action(lambda tt: tt[0] / tt[-1]) + + mixed_integer = ( + fraction | signed_integer + Opt(Opt("-").suppress() + fraction) + ).set_name("fraction or mixed integer-fraction") + """mixed integer of the form 'integer - fraction', with optional leading integer, returns float""" + mixed_integer.add_parse_action(sum) + + real = ( + Regex(r"[+-]?(?:\d+\.\d*|\.\d+)") + .set_name("real number") + .set_parse_action(convert_to_float) + ) + """expression that parses a floating point number and returns a float""" + + sci_real = ( + Regex(r"[+-]?(?:\d+(?:[eE][+-]?\d+)|(?:\d+\.\d*|\.\d+)(?:[eE][+-]?\d+)?)") + .set_name("real number with scientific notation") + .set_parse_action(convert_to_float) + ) + """expression that parses a floating point number with optional + scientific notation and returns a float""" + + # streamlining this expression makes the docs nicer-looking + number = (sci_real | real | signed_integer).setName("number").streamline() + """any numeric expression, returns the corresponding Python type""" + + fnumber = ( + Regex(r"[+-]?\d+\.?\d*([eE][+-]?\d+)?") + .set_name("fnumber") + .set_parse_action(convert_to_float) + ) + """any int or real number, returned as float""" + + identifier = Word(identchars, identbodychars).set_name("identifier") + """typical code identifier (leading alpha or '_', followed by 0 or more alphas, nums, or '_')""" + + ipv4_address = Regex( + r"(25[0-5]|2[0-4][0-9]|1?[0-9]{1,2})(\.(25[0-5]|2[0-4][0-9]|1?[0-9]{1,2})){3}" + ).set_name("IPv4 address") + "IPv4 address (``0.0.0.0 - 255.255.255.255``)" + + _ipv6_part = Regex(r"[0-9a-fA-F]{1,4}").set_name("hex_integer") + _full_ipv6_address = (_ipv6_part + (":" + _ipv6_part) * 7).set_name( + "full IPv6 address" + ) + _short_ipv6_address = ( + Opt(_ipv6_part + (":" + _ipv6_part) * (0, 6)) + + "::" + + Opt(_ipv6_part + (":" + _ipv6_part) * (0, 6)) + ).set_name("short IPv6 address") + _short_ipv6_address.add_condition( + lambda t: sum(1 for tt in t if pyparsing_common._ipv6_part.matches(tt)) < 8 + ) + _mixed_ipv6_address = ("::ffff:" + ipv4_address).set_name("mixed IPv6 address") + ipv6_address = Combine( + (_full_ipv6_address | _mixed_ipv6_address | _short_ipv6_address).set_name( + "IPv6 address" + ) + ).set_name("IPv6 address") + "IPv6 address (long, short, or mixed form)" + + mac_address = Regex( + r"[0-9a-fA-F]{2}([:.-])[0-9a-fA-F]{2}(?:\1[0-9a-fA-F]{2}){4}" + ).set_name("MAC address") + "MAC address xx:xx:xx:xx:xx (may also have '-' or '.' delimiters)" + + @staticmethod + def convert_to_date(fmt: str = "%Y-%m-%d"): + """ + Helper to create a parse action for converting parsed date string to Python datetime.date + + Params - + - fmt - format to be passed to datetime.strptime (default= ``"%Y-%m-%d"``) + + Example:: + + date_expr = pyparsing_common.iso8601_date.copy() + date_expr.setParseAction(pyparsing_common.convertToDate()) + print(date_expr.parseString("1999-12-31")) + + prints:: + + [datetime.date(1999, 12, 31)] + """ + + def cvt_fn(ss, ll, tt): + try: + return datetime.strptime(tt[0], fmt).date() + except ValueError as ve: + raise ParseException(ss, ll, str(ve)) + + return cvt_fn + + @staticmethod + def convert_to_datetime(fmt: str = "%Y-%m-%dT%H:%M:%S.%f"): + """Helper to create a parse action for converting parsed + datetime string to Python datetime.datetime + + Params - + - fmt - format to be passed to datetime.strptime (default= ``"%Y-%m-%dT%H:%M:%S.%f"``) + + Example:: + + dt_expr = pyparsing_common.iso8601_datetime.copy() + dt_expr.setParseAction(pyparsing_common.convertToDatetime()) + print(dt_expr.parseString("1999-12-31T23:59:59.999")) + + prints:: + + [datetime.datetime(1999, 12, 31, 23, 59, 59, 999000)] + """ + + def cvt_fn(s, l, t): + try: + return datetime.strptime(t[0], fmt) + except ValueError as ve: + raise ParseException(s, l, str(ve)) + + return cvt_fn + + iso8601_date = Regex( + r"(?P\d{4})(?:-(?P\d\d)(?:-(?P\d\d))?)?" + ).set_name("ISO8601 date") + "ISO8601 date (``yyyy-mm-dd``)" + + iso8601_datetime = Regex( + r"(?P\d{4})-(?P\d\d)-(?P\d\d)[T ](?P\d\d):(?P\d\d)(:(?P\d\d(\.\d*)?)?)?(?PZ|[+-]\d\d:?\d\d)?" + ).set_name("ISO8601 datetime") + "ISO8601 datetime (``yyyy-mm-ddThh:mm:ss.s(Z|+-00:00)``) - trailing seconds, milliseconds, and timezone optional; accepts separating ``'T'`` or ``' '``" + + uuid = Regex(r"[0-9a-fA-F]{8}(-[0-9a-fA-F]{4}){3}-[0-9a-fA-F]{12}").set_name("UUID") + "UUID (``xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx``)" + + _html_stripper = any_open_tag.suppress() | any_close_tag.suppress() + + @staticmethod + def strip_html_tags(s: str, l: int, tokens: ParseResults): + """Parse action to remove HTML tags from web page HTML source + + Example:: + + # strip HTML links from normal text + text = 'More info at the pyparsing wiki page' + td, td_end = makeHTMLTags("TD") + table_text = td + SkipTo(td_end).setParseAction(pyparsing_common.stripHTMLTags)("body") + td_end + print(table_text.parseString(text).body) + + Prints:: + + More info at the pyparsing wiki page + """ + return pyparsing_common._html_stripper.transform_string(tokens[0]) + + _commasepitem = ( + Combine( + OneOrMore( + ~Literal(",") + + ~LineEnd() + + Word(printables, exclude_chars=",") + + Opt(White(" \t") + ~FollowedBy(LineEnd() | ",")) + ) + ) + .streamline() + .set_name("commaItem") + ) + comma_separated_list = delimited_list( + Opt(quoted_string.copy() | _commasepitem, default="") + ).set_name("comma separated list") + """Predefined expression of 1 or more printable words or quoted strings, separated by commas.""" + + upcase_tokens = staticmethod(token_map(lambda t: t.upper())) + """Parse action to convert tokens to upper case.""" + + downcase_tokens = staticmethod(token_map(lambda t: t.lower())) + """Parse action to convert tokens to lower case.""" + + # fmt: off + url = Regex( + # https://mathiasbynens.be/demo/url-regex + # https://gist.github.com/dperini/729294 + r"^" + + # protocol identifier (optional) + # short syntax // still required + r"(?:(?:(?Phttps?|ftp):)?\/\/)" + + # user:pass BasicAuth (optional) + r"(?:(?P\S+(?::\S*)?)@)?" + + r"(?P" + + # IP address exclusion + # private & local networks + r"(?!(?:10|127)(?:\.\d{1,3}){3})" + + r"(?!(?:169\.254|192\.168)(?:\.\d{1,3}){2})" + + r"(?!172\.(?:1[6-9]|2\d|3[0-1])(?:\.\d{1,3}){2})" + + # IP address dotted notation octets + # excludes loopback network 0.0.0.0 + # excludes reserved space >= 224.0.0.0 + # excludes network & broadcast addresses + # (first & last IP address of each class) + r"(?:[1-9]\d?|1\d\d|2[01]\d|22[0-3])" + + r"(?:\.(?:1?\d{1,2}|2[0-4]\d|25[0-5])){2}" + + r"(?:\.(?:[1-9]\d?|1\d\d|2[0-4]\d|25[0-4]))" + + r"|" + + # host & domain names, may end with dot + # can be replaced by a shortest alternative + # (?![-_])(?:[-\w\u00a1-\uffff]{0,63}[^-_]\.)+ + r"(?:" + + r"(?:" + + r"[a-z0-9\u00a1-\uffff]" + + r"[a-z0-9\u00a1-\uffff_-]{0,62}" + + r")?" + + r"[a-z0-9\u00a1-\uffff]\." + + r")+" + + # TLD identifier name, may end with dot + r"(?:[a-z\u00a1-\uffff]{2,}\.?)" + + r")" + + # port number (optional) + r"(:(?P\d{2,5}))?" + + # resource path (optional) + r"(?P\/[^?# ]*)?" + + # query string (optional) + r"(\?(?P[^#]*))?" + + # fragment (optional) + r"(#(?P\S*))?" + + r"$" + ).set_name("url") + # fmt: on + + # pre-PEP8 compatibility names + convertToInteger = convert_to_integer + convertToFloat = convert_to_float + convertToDate = convert_to_date + convertToDatetime = convert_to_datetime + stripHTMLTags = strip_html_tags + upcaseTokens = upcase_tokens + downcaseTokens = downcase_tokens + + +_builtin_exprs = [ + v for v in vars(pyparsing_common).values() if isinstance(v, ParserElement) +] diff --git a/pipenv/vendor/pyparsing/core.py b/pipenv/vendor/pyparsing/core.py new file mode 100644 index 0000000000..e8d9ea9c60 --- /dev/null +++ b/pipenv/vendor/pyparsing/core.py @@ -0,0 +1,5789 @@ +# +# core.py +# +import os +from typing import ( + Optional as OptionalType, + Iterable as IterableType, + NamedTuple, + Union, + Callable, + Any, + Generator, + Tuple, + List, + TextIO, + Set, + Dict as DictType, + Sequence, +) +from abc import ABC, abstractmethod +from enum import Enum +import string +import copy +import warnings +import re +import sre_constants +import sys +from collections.abc import Iterable +import traceback +import types +from operator import itemgetter +from functools import wraps +from threading import RLock +from pathlib import Path + +from .util import ( + _FifoCache, + _UnboundedCache, + __config_flags, + _collapse_string_to_ranges, + _escape_regex_range_chars, + _bslash, + _flatten, + LRUMemo as _LRUMemo, + UnboundedMemo as _UnboundedMemo, +) +from .exceptions import * +from .actions import * +from .results import ParseResults, _ParseResultsWithOffset +from .unicode import pyparsing_unicode + +_MAX_INT = sys.maxsize +str_type: Tuple[type, ...] = (str, bytes) + +# +# Copyright (c) 2003-2021 Paul T. McGuire +# +# Permission is hereby granted, free of charge, to any person obtaining +# a copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, +# distribute, sublicense, and/or sell copies of the Software, and to +# permit persons to whom the Software is furnished to do so, subject to +# the following conditions: +# +# The above copyright notice and this permission notice shall be +# included in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. +# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY +# CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, +# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE +# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +# + + +class __compat__(__config_flags): + """ + A cross-version compatibility configuration for pyparsing features that will be + released in a future version. By setting values in this configuration to True, + those features can be enabled in prior versions for compatibility development + and testing. + + - ``collect_all_And_tokens`` - flag to enable fix for Issue #63 that fixes erroneous grouping + of results names when an :class:`And` expression is nested within an :class:`Or` or :class:`MatchFirst`; + maintained for compatibility, but setting to ``False`` no longer restores pre-2.3.1 + behavior + """ + + _type_desc = "compatibility" + + collect_all_And_tokens = True + + _all_names = [__ for __ in locals() if not __.startswith("_")] + _fixed_names = """ + collect_all_And_tokens + """.split() + + +class __diag__(__config_flags): + _type_desc = "diagnostic" + + warn_multiple_tokens_in_named_alternation = False + warn_ungrouped_named_tokens_in_collection = False + warn_name_set_on_empty_Forward = False + warn_on_parse_using_empty_Forward = False + warn_on_assignment_to_Forward = False + warn_on_multiple_string_args_to_oneof = False + warn_on_match_first_with_lshift_operator = False + enable_debug_on_named_expressions = False + + _all_names = [__ for __ in locals() if not __.startswith("_")] + _warning_names = [name for name in _all_names if name.startswith("warn")] + _debug_names = [name for name in _all_names if name.startswith("enable_debug")] + + @classmethod + def enable_all_warnings(cls) -> None: + for name in cls._warning_names: + cls.enable(name) + + +class Diagnostics(Enum): + """ + Diagnostic configuration (all default to disabled) + - ``warn_multiple_tokens_in_named_alternation`` - flag to enable warnings when a results + name is defined on a :class:`MatchFirst` or :class:`Or` expression with one or more :class:`And` subexpressions + - ``warn_ungrouped_named_tokens_in_collection`` - flag to enable warnings when a results + name is defined on a containing expression with ungrouped subexpressions that also + have results names + - ``warn_name_set_on_empty_Forward`` - flag to enable warnings when a :class:`Forward` is defined + with a results name, but has no contents defined + - ``warn_on_parse_using_empty_Forward`` - flag to enable warnings when a :class:`Forward` is + defined in a grammar but has never had an expression attached to it + - ``warn_on_assignment_to_Forward`` - flag to enable warnings when a :class:`Forward` is defined + but is overwritten by assigning using ``'='`` instead of ``'<<='`` or ``'<<'`` + - ``warn_on_multiple_string_args_to_oneof`` - flag to enable warnings when :class:`one_of` is + incorrectly called with multiple str arguments + - ``enable_debug_on_named_expressions`` - flag to auto-enable debug on all subsequent + calls to :class:`ParserElement.set_name` + + Diagnostics are enabled/disabled by calling :class:`enable_diag` and :class:`disable_diag`. + All warnings can be enabled by calling :class:`enable_all_warnings`. + """ + + warn_multiple_tokens_in_named_alternation = 0 + warn_ungrouped_named_tokens_in_collection = 1 + warn_name_set_on_empty_Forward = 2 + warn_on_parse_using_empty_Forward = 3 + warn_on_assignment_to_Forward = 4 + warn_on_multiple_string_args_to_oneof = 5 + warn_on_match_first_with_lshift_operator = 6 + enable_debug_on_named_expressions = 7 + + +def enable_diag(diag_enum: Diagnostics) -> None: + """ + Enable a global pyparsing diagnostic flag (see :class:`Diagnostics`). + """ + __diag__.enable(diag_enum.name) + + +def disable_diag(diag_enum: Diagnostics) -> None: + """ + Disable a global pyparsing diagnostic flag (see :class:`Diagnostics`). + """ + __diag__.disable(diag_enum.name) + + +def enable_all_warnings() -> None: + """ + Enable all global pyparsing diagnostic warnings (see :class:`Diagnostics`). + """ + __diag__.enable_all_warnings() + + +# hide abstract class +del __config_flags + + +def _should_enable_warnings( + cmd_line_warn_options: IterableType[str], warn_env_var: OptionalType[str] +) -> bool: + enable = bool(warn_env_var) + for warn_opt in cmd_line_warn_options: + w_action, w_message, w_category, w_module, w_line = (warn_opt + "::::").split( + ":" + )[:5] + if not w_action.lower().startswith("i") and ( + not (w_message or w_category or w_module) or w_module == "pyparsing" + ): + enable = True + elif w_action.lower().startswith("i") and w_module in ("pyparsing", ""): + enable = False + return enable + + +if _should_enable_warnings( + sys.warnoptions, os.environ.get("PYPARSINGENABLEALLWARNINGS") +): + enable_all_warnings() + + +# build list of single arg builtins, that can be used as parse actions +_single_arg_builtins = { + sum, + len, + sorted, + reversed, + list, + tuple, + set, + any, + all, + min, + max, +} + +_generatorType = types.GeneratorType +ParseAction = Union[ + Callable[[], Any], + Callable[[ParseResults], Any], + Callable[[int, ParseResults], Any], + Callable[[str, int, ParseResults], Any], +] +ParseCondition = Union[ + Callable[[], bool], + Callable[[ParseResults], bool], + Callable[[int, ParseResults], bool], + Callable[[str, int, ParseResults], bool], +] +ParseFailAction = Callable[[str, int, "ParserElement", Exception], None] +DebugStartAction = Callable[[str, int, "ParserElement", bool], None] +DebugSuccessAction = Callable[ + [str, int, int, "ParserElement", ParseResults, bool], None +] +DebugExceptionAction = Callable[[str, int, "ParserElement", Exception, bool], None] + + +alphas = string.ascii_uppercase + string.ascii_lowercase +identchars = pyparsing_unicode.Latin1.identchars +identbodychars = pyparsing_unicode.Latin1.identbodychars +nums = "0123456789" +hexnums = nums + "ABCDEFabcdef" +alphanums = alphas + nums +printables = "".join([c for c in string.printable if c not in string.whitespace]) + +_trim_arity_call_line = None + + +def _trim_arity(func, maxargs=2): + """decorator to trim function calls to match the arity of the target""" + global _trim_arity_call_line + + if func in _single_arg_builtins: + return lambda s, l, t: func(t) + + limit = 0 + found_arity = False + + def extract_tb(tb, limit=0): + frames = traceback.extract_tb(tb, limit=limit) + frame_summary = frames[-1] + return [frame_summary[:2]] + + # synthesize what would be returned by traceback.extract_stack at the call to + # user's parse action 'func', so that we don't incur call penalty at parse time + + LINE_DIFF = 11 + # IF ANY CODE CHANGES, EVEN JUST COMMENTS OR BLANK LINES, BETWEEN THE NEXT LINE AND + # THE CALL TO FUNC INSIDE WRAPPER, LINE_DIFF MUST BE MODIFIED!!!! + _trim_arity_call_line = ( + _trim_arity_call_line or traceback.extract_stack(limit=2)[-1] + ) + pa_call_line_synth = ( + _trim_arity_call_line[0], + _trim_arity_call_line[1] + LINE_DIFF, + ) + + def wrapper(*args): + nonlocal found_arity, limit + while 1: + try: + ret = func(*args[limit:]) + found_arity = True + return ret + except TypeError as te: + # re-raise TypeErrors if they did not come from our arity testing + if found_arity: + raise + else: + tb = te.__traceback__ + trim_arity_type_error = ( + extract_tb(tb, limit=2)[-1][:2] == pa_call_line_synth + ) + del tb + + if trim_arity_type_error: + if limit <= maxargs: + limit += 1 + continue + + raise + + # copy func name to wrapper for sensible debug output + # (can't use functools.wraps, since that messes with function signature) + func_name = getattr(func, "__name__", getattr(func, "__class__").__name__) + wrapper.__name__ = func_name + + return wrapper + + +def condition_as_parse_action( + fn: ParseCondition, message: str = None, fatal: bool = False +) -> ParseAction: + """ + Function to convert a simple predicate function that returns ``True`` or ``False`` + into a parse action. Can be used in places when a parse action is required + and :class:`ParserElement.add_condition` cannot be used (such as when adding a condition + to an operator level in :class:`infix_notation`). + + Optional keyword arguments: + + - ``message`` - define a custom message to be used in the raised exception + - ``fatal`` - if True, will raise :class:`ParseFatalException` to stop parsing immediately; + otherwise will raise :class:`ParseException` + + """ + msg = message if message is not None else "failed user-defined condition" + exc_type = ParseFatalException if fatal else ParseException + fn = _trim_arity(fn) + + @wraps(fn) + def pa(s, l, t): + if not bool(fn(s, l, t)): + raise exc_type(s, l, msg) + + return pa + + +def _default_start_debug_action( + instring: str, loc: int, expr: "ParserElement", cache_hit: bool = False +): + cache_hit_str = "*" if cache_hit else "" + print( + ( + "{}Match {} at loc {}({},{})\n {}\n {}^".format( + cache_hit_str, + expr, + loc, + lineno(loc, instring), + col(loc, instring), + line(loc, instring), + " " * (col(loc, instring) - 1), + ) + ) + ) + + +def _default_success_debug_action( + instring: str, + startloc: int, + endloc: int, + expr: "ParserElement", + toks: ParseResults, + cache_hit: bool = False, +): + cache_hit_str = "*" if cache_hit else "" + print("{}Matched {} -> {}".format(cache_hit_str, expr, toks.as_list())) + + +def _default_exception_debug_action( + instring: str, + loc: int, + expr: "ParserElement", + exc: Exception, + cache_hit: bool = False, +): + cache_hit_str = "*" if cache_hit else "" + print( + "{}Match {} failed, {} raised: {}".format( + cache_hit_str, expr, type(exc).__name__, exc + ) + ) + + +def null_debug_action(*args): + """'Do-nothing' debug action, to suppress debugging output during parsing.""" + + +class ParserElement(ABC): + """Abstract base level parser element class.""" + + DEFAULT_WHITE_CHARS: str = " \n\t\r" + verbose_stacktrace: bool = False + _literalStringClass: OptionalType[type] = None + + @staticmethod + def set_default_whitespace_chars(chars: str) -> None: + r""" + Overrides the default whitespace chars + + Example:: + + # default whitespace chars are space, and newline + OneOrMore(Word(alphas)).parse_string("abc def\nghi jkl") # -> ['abc', 'def', 'ghi', 'jkl'] + + # change to just treat newline as significant + ParserElement.set_default_whitespace_chars(" \t") + OneOrMore(Word(alphas)).parse_string("abc def\nghi jkl") # -> ['abc', 'def'] + """ + ParserElement.DEFAULT_WHITE_CHARS = chars + + # update whitespace all parse expressions defined in this module + for expr in _builtin_exprs: + if expr.copyDefaultWhiteChars: + expr.whiteChars = set(chars) + + @staticmethod + def inline_literals_using(cls: type) -> None: + """ + Set class to be used for inclusion of string literals into a parser. + + Example:: + + # default literal class used is Literal + integer = Word(nums) + date_str = integer("year") + '/' + integer("month") + '/' + integer("day") + + date_str.parse_string("1999/12/31") # -> ['1999', '/', '12', '/', '31'] + + + # change to Suppress + ParserElement.inline_literals_using(Suppress) + date_str = integer("year") + '/' + integer("month") + '/' + integer("day") + + date_str.parse_string("1999/12/31") # -> ['1999', '12', '31'] + """ + ParserElement._literalStringClass = cls + + class DebugActions(NamedTuple): + debug_try: OptionalType[DebugStartAction] + debug_match: OptionalType[DebugSuccessAction] + debug_fail: OptionalType[DebugExceptionAction] + + def __init__(self, savelist: bool = False): + self.parseAction: List[ParseAction] = list() + self.failAction: OptionalType[ParseFailAction] = None + self.customName = None + self._defaultName = None + self.resultsName = None + self.saveAsList = savelist + self.skipWhitespace = True + self.whiteChars = set(ParserElement.DEFAULT_WHITE_CHARS) + self.copyDefaultWhiteChars = True + # used when checking for left-recursion + self.mayReturnEmpty = False + self.keepTabs = False + self.ignoreExprs: List["ParserElement"] = list() + self.debug = False + self.streamlined = False + # optimize exception handling for subclasses that don't advance parse index + self.mayIndexError = True + self.errmsg = "" + # mark results names as modal (report only last) or cumulative (list all) + self.modalResults = True + # custom debug actions + self.debugActions = self.DebugActions(None, None, None) + self.re = None + # avoid redundant calls to preParse + self.callPreparse = True + self.callDuringTry = False + self.suppress_warnings_: List[Diagnostics] = [] + + def suppress_warning(self, warning_type: Diagnostics) -> "ParserElement": + """ + Suppress warnings emitted for a particular diagnostic on this expression. + + Example:: + + base = pp.Forward() + base.suppress_warning(Diagnostics.warn_on_parse_using_empty_Forward) + + # statement would normally raise a warning, but is now suppressed + print(base.parseString("x")) + + """ + self.suppress_warnings_.append(warning_type) + return self + + def copy(self) -> "ParserElement": + """ + Make a copy of this :class:`ParserElement`. Useful for defining + different parse actions for the same parsing pattern, using copies of + the original parse element. + + Example:: + + integer = Word(nums).set_parse_action(lambda toks: int(toks[0])) + integerK = integer.copy().add_parse_action(lambda toks: toks[0] * 1024) + Suppress("K") + integerM = integer.copy().add_parse_action(lambda toks: toks[0] * 1024 * 1024) + Suppress("M") + + print(OneOrMore(integerK | integerM | integer).parse_string("5K 100 640K 256M")) + + prints:: + + [5120, 100, 655360, 268435456] + + Equivalent form of ``expr.copy()`` is just ``expr()``:: + + integerM = integer().add_parse_action(lambda toks: toks[0] * 1024 * 1024) + Suppress("M") + """ + cpy = copy.copy(self) + cpy.parseAction = self.parseAction[:] + cpy.ignoreExprs = self.ignoreExprs[:] + if self.copyDefaultWhiteChars: + cpy.whiteChars = set(ParserElement.DEFAULT_WHITE_CHARS) + return cpy + + def set_results_name( + self, name: str, list_all_matches: bool = False, *, listAllMatches: bool = False + ) -> "ParserElement": + """ + Define name for referencing matching tokens as a nested attribute + of the returned parse results. + + Normally, results names are assigned as you would assign keys in a dict: + any existing value is overwritten by later values. If it is necessary to + keep all values captured for a particular results name, call ``set_results_name`` + with ``list_all_matches`` = True. + + NOTE: ``set_results_name`` returns a *copy* of the original :class:`ParserElement` object; + this is so that the client can define a basic element, such as an + integer, and reference it in multiple places with different names. + + You can also set results names using the abbreviated syntax, + ``expr("name")`` in place of ``expr.set_results_name("name")`` + - see :class:`__call__`. If ``list_all_matches`` is required, use + ``expr("name*")``. + + Example:: + + date_str = (integer.set_results_name("year") + '/' + + integer.set_results_name("month") + '/' + + integer.set_results_name("day")) + + # equivalent form: + date_str = integer("year") + '/' + integer("month") + '/' + integer("day") + """ + listAllMatches = listAllMatches or list_all_matches + return self._setResultsName(name, listAllMatches) + + def _setResultsName(self, name, listAllMatches=False): + if name is None: + return self + newself = self.copy() + if name.endswith("*"): + name = name[:-1] + listAllMatches = True + newself.resultsName = name + newself.modalResults = not listAllMatches + return newself + + def set_break(self, break_flag: bool = True) -> "ParserElement": + """ + Method to invoke the Python pdb debugger when this element is + about to be parsed. Set ``break_flag`` to ``True`` to enable, ``False`` to + disable. + """ + if break_flag: + _parseMethod = self._parse + + def breaker(instring, loc, doActions=True, callPreParse=True): + import pdb + + # this call to pdb.set_trace() is intentional, not a checkin error + pdb.set_trace() + return _parseMethod(instring, loc, doActions, callPreParse) + + breaker._originalParseMethod = _parseMethod + self._parse = breaker + else: + if hasattr(self._parse, "_originalParseMethod"): + self._parse = self._parse._originalParseMethod + return self + + def set_parse_action(self, *fns: ParseAction, **kwargs) -> "ParserElement": + """ + Define one or more actions to perform when successfully matching parse element definition. + + Parse actions can be called to perform data conversions, do extra validation, + update external data structures, or enhance or replace the parsed tokens. + Each parse action ``fn`` is a callable method with 0-3 arguments, called as + ``fn(s, loc, toks)`` , ``fn(loc, toks)`` , ``fn(toks)`` , or just ``fn()`` , where: + + - s = the original string being parsed (see note below) + - loc = the location of the matching substring + - toks = a list of the matched tokens, packaged as a :class:`ParseResults` object + + The parsed tokens are passed to the parse action as ParseResults. They can be + modified in place using list-style append, extend, and pop operations to update + the parsed list elements; and with dictionary-style item set and del operations + to add, update, or remove any named results. If the tokens are modified in place, + it is not necessary to return them with a return statement. + + Parse actions can also completely replace the given tokens, with another ``ParseResults`` + object, or with some entirely different object (common for parse actions that perform data + conversions). A convenient way to build a new parse result is to define the values + using a dict, and then create the return value using :class:`ParseResults.from_dict`. + + If None is passed as the ``fn`` parse action, all previously added parse actions for this + expression are cleared. + + Optional keyword arguments: + + - call_during_try = (default= ``False``) indicate if parse action should be run during + lookaheads and alternate testing. For parse actions that have side effects, it is + important to only call the parse action once it is determined that it is being + called as part of a successful parse. For parse actions that perform additional + validation, then call_during_try should be passed as True, so that the validation + code is included in the preliminary "try" parses. + + Note: the default parsing behavior is to expand tabs in the input string + before starting the parsing process. See :class:`parse_string` for more + information on parsing strings containing ```` s, and suggested + methods to maintain a consistent view of the parsed string, the parse + location, and line and column positions within the parsed string. + + Example:: + + # parse dates in the form YYYY/MM/DD + + # use parse action to convert toks from str to int at parse time + def convert_to_int(toks): + return int(toks[0]) + + # use a parse action to verify that the date is a valid date + def is_valid_date(instring, loc, toks): + from datetime import date + year, month, day = toks[::2] + try: + date(year, month, day) + except ValueError: + raise ParseException(instring, loc, "invalid date given") + + integer = Word(nums) + date_str = integer + '/' + integer + '/' + integer + + # add parse actions + integer.set_parse_action(convert_to_int) + date_str.set_parse_action(is_valid_date) + + # note that integer fields are now ints, not strings + date_str.run_tests(''' + # successful parse - note that integer fields were converted to ints + 1999/12/31 + + # fail - invalid date + 1999/13/31 + ''') + """ + if list(fns) == [None]: + self.parseAction = [] + else: + if not all(callable(fn) for fn in fns): + raise TypeError("parse actions must be callable") + self.parseAction = [_trim_arity(fn) for fn in fns] + self.callDuringTry = kwargs.get( + "call_during_try", kwargs.get("callDuringTry", False) + ) + return self + + def add_parse_action(self, *fns: ParseAction, **kwargs) -> "ParserElement": + """ + Add one or more parse actions to expression's list of parse actions. See :class:`set_parse_action`. + + See examples in :class:`copy`. + """ + self.parseAction += [_trim_arity(fn) for fn in fns] + self.callDuringTry = self.callDuringTry or kwargs.get( + "call_during_try", kwargs.get("callDuringTry", False) + ) + return self + + def add_condition(self, *fns: ParseCondition, **kwargs) -> "ParserElement": + """Add a boolean predicate function to expression's list of parse actions. See + :class:`set_parse_action` for function call signatures. Unlike ``set_parse_action``, + functions passed to ``add_condition`` need to return boolean success/fail of the condition. + + Optional keyword arguments: + + - message = define a custom message to be used in the raised exception + - fatal = if True, will raise ParseFatalException to stop parsing immediately; otherwise will raise + ParseException + - call_during_try = boolean to indicate if this method should be called during internal tryParse calls, + default=False + + Example:: + + integer = Word(nums).set_parse_action(lambda toks: int(toks[0])) + year_int = integer.copy() + year_int.add_condition(lambda toks: toks[0] >= 2000, message="Only support years 2000 and later") + date_str = year_int + '/' + integer + '/' + integer + + result = date_str.parse_string("1999/12/31") # -> Exception: Only support years 2000 and later (at char 0), + (line:1, col:1) + """ + for fn in fns: + self.parseAction.append( + condition_as_parse_action( + fn, message=kwargs.get("message"), fatal=kwargs.get("fatal", False) + ) + ) + + self.callDuringTry = self.callDuringTry or kwargs.get( + "call_during_try", kwargs.get("callDuringTry", False) + ) + return self + + def set_fail_action(self, fn: ParseFailAction) -> "ParserElement": + """ + Define action to perform if parsing fails at this expression. + Fail acton fn is a callable function that takes the arguments + ``fn(s, loc, expr, err)`` where: + + - s = string being parsed + - loc = location where expression match was attempted and failed + - expr = the parse expression that failed + - err = the exception thrown + + The function returns no value. It may throw :class:`ParseFatalException` + if it is desired to stop parsing immediately.""" + self.failAction = fn + return self + + def _skipIgnorables(self, instring, loc): + exprsFound = True + while exprsFound: + exprsFound = False + for e in self.ignoreExprs: + try: + while 1: + loc, dummy = e._parse(instring, loc) + exprsFound = True + except ParseException: + pass + return loc + + def preParse(self, instring, loc): + if self.ignoreExprs: + loc = self._skipIgnorables(instring, loc) + + if self.skipWhitespace: + instrlen = len(instring) + white_chars = self.whiteChars + while loc < instrlen and instring[loc] in white_chars: + loc += 1 + + return loc + + def parseImpl(self, instring, loc, doActions=True): + return loc, [] + + def postParse(self, instring, loc, tokenlist): + return tokenlist + + # @profile + def _parseNoCache( + self, instring, loc, doActions=True, callPreParse=True + ) -> Tuple[int, ParseResults]: + TRY, MATCH, FAIL = 0, 1, 2 + debugging = self.debug # and doActions) + len_instring = len(instring) + + if debugging or self.failAction: + # print("Match {} at loc {}({}, {})".format(self, loc, lineno(loc, instring), col(loc, instring))) + try: + if callPreParse and self.callPreparse: + pre_loc = self.preParse(instring, loc) + else: + pre_loc = loc + tokens_start = pre_loc + if self.debugActions.debug_try: + self.debugActions.debug_try(instring, tokens_start, self, False) + if self.mayIndexError or pre_loc >= len_instring: + try: + loc, tokens = self.parseImpl(instring, pre_loc, doActions) + except IndexError: + raise ParseException(instring, len_instring, self.errmsg, self) + else: + loc, tokens = self.parseImpl(instring, pre_loc, doActions) + except Exception as err: + # print("Exception raised:", err) + if self.debugActions.debug_fail: + self.debugActions.debug_fail( + instring, tokens_start, self, err, False + ) + if self.failAction: + self.failAction(instring, tokens_start, self, err) + raise + else: + if callPreParse and self.callPreparse: + pre_loc = self.preParse(instring, loc) + else: + pre_loc = loc + tokens_start = pre_loc + if self.mayIndexError or pre_loc >= len_instring: + try: + loc, tokens = self.parseImpl(instring, pre_loc, doActions) + except IndexError: + raise ParseException(instring, len_instring, self.errmsg, self) + else: + loc, tokens = self.parseImpl(instring, pre_loc, doActions) + + tokens = self.postParse(instring, loc, tokens) + + ret_tokens = ParseResults( + tokens, self.resultsName, asList=self.saveAsList, modal=self.modalResults + ) + if self.parseAction and (doActions or self.callDuringTry): + if debugging: + try: + for fn in self.parseAction: + try: + tokens = fn(instring, tokens_start, ret_tokens) + except IndexError as parse_action_exc: + exc = ParseException("exception raised in parse action") + raise exc from parse_action_exc + + if tokens is not None and tokens is not ret_tokens: + ret_tokens = ParseResults( + tokens, + self.resultsName, + asList=self.saveAsList + and isinstance(tokens, (ParseResults, list)), + modal=self.modalResults, + ) + except Exception as err: + # print "Exception raised in user parse action:", err + if self.debugActions.debug_fail: + self.debugActions.debug_fail( + instring, tokens_start, self, err, False + ) + raise + else: + for fn in self.parseAction: + try: + tokens = fn(instring, tokens_start, ret_tokens) + except IndexError as parse_action_exc: + exc = ParseException("exception raised in parse action") + raise exc from parse_action_exc + + if tokens is not None and tokens is not ret_tokens: + ret_tokens = ParseResults( + tokens, + self.resultsName, + asList=self.saveAsList + and isinstance(tokens, (ParseResults, list)), + modal=self.modalResults, + ) + if debugging: + # print("Matched", self, "->", ret_tokens.as_list()) + if self.debugActions.debug_match: + self.debugActions.debug_match( + instring, tokens_start, loc, self, ret_tokens, False + ) + + return loc, ret_tokens + + def try_parse(self, instring: str, loc: int, raise_fatal: bool = False) -> int: + try: + return self._parse(instring, loc, doActions=False)[0] + except ParseFatalException: + if raise_fatal: + raise + raise ParseException(instring, loc, self.errmsg, self) + + def can_parse_next(self, instring: str, loc: int) -> bool: + try: + self.try_parse(instring, loc) + except (ParseException, IndexError): + return False + else: + return True + + # cache for left-recursion in Forward references + recursion_lock = RLock() + recursion_memos: DictType[ + Tuple[int, "Forward", bool], Tuple[int, Union[ParseResults, Exception]] + ] = {} + + # argument cache for optimizing repeated calls when backtracking through recursive expressions + packrat_cache = ( + {} + ) # this is set later by enabled_packrat(); this is here so that reset_cache() doesn't fail + packrat_cache_lock = RLock() + packrat_cache_stats = [0, 0] + + # this method gets repeatedly called during backtracking with the same arguments - + # we can cache these arguments and save ourselves the trouble of re-parsing the contained expression + def _parseCache( + self, instring, loc, doActions=True, callPreParse=True + ) -> Tuple[int, ParseResults]: + HIT, MISS = 0, 1 + TRY, MATCH, FAIL = 0, 1, 2 + lookup = (self, instring, loc, callPreParse, doActions) + with ParserElement.packrat_cache_lock: + cache = ParserElement.packrat_cache + value = cache.get(lookup) + if value is cache.not_in_cache: + ParserElement.packrat_cache_stats[MISS] += 1 + try: + value = self._parseNoCache(instring, loc, doActions, callPreParse) + except ParseBaseException as pe: + # cache a copy of the exception, without the traceback + cache.set(lookup, pe.__class__(*pe.args)) + raise + else: + cache.set(lookup, (value[0], value[1].copy(), loc)) + return value + else: + ParserElement.packrat_cache_stats[HIT] += 1 + if self.debug and self.debugActions.debug_try: + try: + self.debugActions.debug_try(instring, loc, self, cache_hit=True) + except TypeError: + pass + if isinstance(value, Exception): + if self.debug and self.debugActions.debug_fail: + try: + self.debugActions.debug_fail( + instring, loc, self, value, cache_hit=True + ) + except TypeError: + pass + raise value + + loc_, result, endloc = value[0], value[1].copy(), value[2] + if self.debug and self.debugActions.debug_match: + try: + self.debugActions.debug_match( + instring, loc_, endloc, self, result, cache_hit=True + ) + except TypeError: + pass + + return loc_, result + + _parse = _parseNoCache + + @staticmethod + def reset_cache() -> None: + ParserElement.packrat_cache.clear() + ParserElement.packrat_cache_stats[:] = [0] * len( + ParserElement.packrat_cache_stats + ) + ParserElement.recursion_memos.clear() + + _packratEnabled = False + _left_recursion_enabled = False + + @staticmethod + def disable_memoization() -> None: + """ + Disables active Packrat or Left Recursion parsing and their memoization + + This method also works if neither Packrat nor Left Recursion are enabled. + This makes it safe to call before activating Packrat nor Left Recursion + to clear any previous settings. + """ + ParserElement.reset_cache() + ParserElement._left_recursion_enabled = False + ParserElement._packratEnabled = False + ParserElement._parse = ParserElement._parseNoCache + + @staticmethod + def enable_left_recursion( + cache_size_limit: OptionalType[int] = None, *, force=False + ) -> None: + """ + Enables "bounded recursion" parsing, which allows for both direct and indirect + left-recursion. During parsing, left-recursive :class:`Forward` elements are + repeatedly matched with a fixed recursion depth that is gradually increased + until finding the longest match. + + Example:: + + import pipenv.vendor.pyparsing as pp + pp.ParserElement.enable_left_recursion() + + E = pp.Forward("E") + num = pp.Word(pp.nums) + # match `num`, or `num '+' num`, or `num '+' num '+' num`, ... + E <<= E + '+' - num | num + + print(E.parse_string("1+2+3")) + + Recursion search naturally memoizes matches of ``Forward`` elements and may + thus skip reevaluation of parse actions during backtracking. This may break + programs with parse actions which rely on strict ordering of side-effects. + + Parameters: + + - cache_size_limit - (default=``None``) - memoize at most this many + ``Forward`` elements during matching; if ``None`` (the default), + memoize all ``Forward`` elements. + + Bounded Recursion parsing works similar but not identical to Packrat parsing, + thus the two cannot be used together. Use ``force=True`` to disable any + previous, conflicting settings. + """ + if force: + ParserElement.disable_memoization() + elif ParserElement._packratEnabled: + raise RuntimeError("Packrat and Bounded Recursion are not compatible") + if cache_size_limit is None: + ParserElement.recursion_memos = _UnboundedMemo() + elif cache_size_limit > 0: + ParserElement.recursion_memos = _LRUMemo(capacity=cache_size_limit) + else: + raise NotImplementedError("Memo size of %s" % cache_size_limit) + ParserElement._left_recursion_enabled = True + + @staticmethod + def enable_packrat(cache_size_limit: int = 128, *, force: bool = False) -> None: + """ + Enables "packrat" parsing, which adds memoizing to the parsing logic. + Repeated parse attempts at the same string location (which happens + often in many complex grammars) can immediately return a cached value, + instead of re-executing parsing/validating code. Memoizing is done of + both valid results and parsing exceptions. + + Parameters: + + - cache_size_limit - (default= ``128``) - if an integer value is provided + will limit the size of the packrat cache; if None is passed, then + the cache size will be unbounded; if 0 is passed, the cache will + be effectively disabled. + + This speedup may break existing programs that use parse actions that + have side-effects. For this reason, packrat parsing is disabled when + you first import pyparsing. To activate the packrat feature, your + program must call the class method :class:`ParserElement.enable_packrat`. + For best results, call ``enable_packrat()`` immediately after + importing pyparsing. + + Example:: + + import pipenv.vendor.pyparsing as pyparsing + pyparsing.ParserElement.enable_packrat() + + Packrat parsing works similar but not identical to Bounded Recursion parsing, + thus the two cannot be used together. Use ``force=True`` to disable any + previous, conflicting settings. + """ + if force: + ParserElement.disable_memoization() + elif ParserElement._left_recursion_enabled: + raise RuntimeError("Packrat and Bounded Recursion are not compatible") + if not ParserElement._packratEnabled: + ParserElement._packratEnabled = True + if cache_size_limit is None: + ParserElement.packrat_cache = _UnboundedCache() + else: + ParserElement.packrat_cache = _FifoCache(cache_size_limit) + ParserElement._parse = ParserElement._parseCache + + def parse_string( + self, instring: str, parse_all: bool = False, *, parseAll: bool = False + ) -> ParseResults: + """ + Parse a string with respect to the parser definition. This function is intended as the primary interface to the + client code. + + :param instring: The input string to be parsed. + :param parse_all: If set, the entire input string must match the grammar. + :param parseAll: retained for pre-PEP8 compatibility, will be removed in a future release. + :raises ParseException: Raised if ``parse_all`` is set and the input string does not match the whole grammar. + :returns: the parsed data as a :class:`ParseResults` object, which may be accessed as a `list`, a `dict`, or + an object with attributes if the given parser includes results names. + + If the input string is required to match the entire grammar, ``parse_all`` flag must be set to ``True``. This + is also equivalent to ending the grammar with :class:`StringEnd`(). + + To report proper column numbers, ``parse_string`` operates on a copy of the input string where all tabs are + converted to spaces (8 spaces per tab, as per the default in ``string.expandtabs``). If the input string + contains tabs and the grammar uses parse actions that use the ``loc`` argument to index into the string + being parsed, one can ensure a consistent view of the input string by doing one of the following: + + - calling ``parse_with_tabs`` on your grammar before calling ``parse_string`` (see :class:`parse_with_tabs`), + - define your parse action using the full ``(s,loc,toks)`` signature, and reference the input string using the + parse action's ``s`` argument, or + - explicitly expand the tabs in your input string before calling ``parse_string``. + + Examples: + + By default, partial matches are OK. + + >>> res = Word('a').parse_string('aaaaabaaa') + >>> print(res) + ['aaaaa'] + + The parsing behavior varies by the inheriting class of this abstract class. Please refer to the children + directly to see more examples. + + It raises an exception if parse_all flag is set and instring does not match the whole grammar. + + >>> res = Word('a').parse_string('aaaaabaaa', parse_all=True) + Traceback (most recent call last): + ... + pyparsing.ParseException: Expected end of text, found 'b' (at char 5), (line:1, col:6) + """ + parseAll = parse_all or parseAll + + ParserElement.reset_cache() + if not self.streamlined: + self.streamline() + for e in self.ignoreExprs: + e.streamline() + if not self.keepTabs: + instring = instring.expandtabs() + try: + loc, tokens = self._parse(instring, 0) + if parseAll: + loc = self.preParse(instring, loc) + se = Empty() + StringEnd() + se._parse(instring, loc) + except ParseBaseException as exc: + if ParserElement.verbose_stacktrace: + raise + else: + # catch and re-raise exception from here, clearing out pyparsing internal stack trace + raise exc.with_traceback(None) + else: + return tokens + + def scan_string( + self, + instring: str, + max_matches: int = _MAX_INT, + overlap: bool = False, + *, + debug: bool = False, + maxMatches: int = _MAX_INT, + ) -> Generator[Tuple[ParseResults, int, int], None, None]: + """ + Scan the input string for expression matches. Each match will return the + matching tokens, start location, and end location. May be called with optional + ``max_matches`` argument, to clip scanning after 'n' matches are found. If + ``overlap`` is specified, then overlapping matches will be reported. + + Note that the start and end locations are reported relative to the string + being parsed. See :class:`parse_string` for more information on parsing + strings with embedded tabs. + + Example:: + + source = "sldjf123lsdjjkf345sldkjf879lkjsfd987" + print(source) + for tokens, start, end in Word(alphas).scan_string(source): + print(' '*start + '^'*(end-start)) + print(' '*start + tokens[0]) + + prints:: + + sldjf123lsdjjkf345sldkjf879lkjsfd987 + ^^^^^ + sldjf + ^^^^^^^ + lsdjjkf + ^^^^^^ + sldkjf + ^^^^^^ + lkjsfd + """ + maxMatches = min(maxMatches, max_matches) + if not self.streamlined: + self.streamline() + for e in self.ignoreExprs: + e.streamline() + + if not self.keepTabs: + instring = str(instring).expandtabs() + instrlen = len(instring) + loc = 0 + preparseFn = self.preParse + parseFn = self._parse + ParserElement.resetCache() + matches = 0 + try: + while loc <= instrlen and matches < maxMatches: + try: + preloc = preparseFn(instring, loc) + nextLoc, tokens = parseFn(instring, preloc, callPreParse=False) + except ParseException: + loc = preloc + 1 + else: + if nextLoc > loc: + matches += 1 + if debug: + print( + { + "tokens": tokens.asList(), + "start": preloc, + "end": nextLoc, + } + ) + yield tokens, preloc, nextLoc + if overlap: + nextloc = preparseFn(instring, loc) + if nextloc > loc: + loc = nextLoc + else: + loc += 1 + else: + loc = nextLoc + else: + loc = preloc + 1 + except ParseBaseException as exc: + if ParserElement.verbose_stacktrace: + raise + else: + # catch and re-raise exception from here, clears out pyparsing internal stack trace + raise exc.with_traceback(None) + + def transform_string(self, instring: str, *, debug: bool = False) -> str: + """ + Extension to :class:`scan_string`, to modify matching text with modified tokens that may + be returned from a parse action. To use ``transform_string``, define a grammar and + attach a parse action to it that modifies the returned token list. + Invoking ``transform_string()`` on a target string will then scan for matches, + and replace the matched text patterns according to the logic in the parse + action. ``transform_string()`` returns the resulting transformed string. + + Example:: + + wd = Word(alphas) + wd.set_parse_action(lambda toks: toks[0].title()) + + print(wd.transform_string("now is the winter of our discontent made glorious summer by this sun of york.")) + + prints:: + + Now Is The Winter Of Our Discontent Made Glorious Summer By This Sun Of York. + """ + out: List[str] = [] + lastE = 0 + # force preservation of s, to minimize unwanted transformation of string, and to + # keep string locs straight between transform_string and scan_string + self.keepTabs = True + try: + for t, s, e in self.scan_string(instring, debug=debug): + out.append(instring[lastE:s]) + if t: + if isinstance(t, ParseResults): + out += t.as_list() + elif isinstance(t, Iterable) and not isinstance(t, str_type): + out.extend(t) + else: + out.append(t) + lastE = e + out.append(instring[lastE:]) + out = [o for o in out if o] + return "".join([str(s) for s in _flatten(out)]) + except ParseBaseException as exc: + if ParserElement.verbose_stacktrace: + raise + else: + # catch and re-raise exception from here, clears out pyparsing internal stack trace + raise exc.with_traceback(None) + + def search_string( + self, + instring: str, + max_matches: int = _MAX_INT, + *, + debug: bool = False, + maxMatches: int = _MAX_INT, + ) -> ParseResults: + """ + Another extension to :class:`scan_string`, simplifying the access to the tokens found + to match the given parse expression. May be called with optional + ``max_matches`` argument, to clip searching after 'n' matches are found. + + Example:: + + # a capitalized word starts with an uppercase letter, followed by zero or more lowercase letters + cap_word = Word(alphas.upper(), alphas.lower()) + + print(cap_word.search_string("More than Iron, more than Lead, more than Gold I need Electricity")) + + # the sum() builtin can be used to merge results into a single ParseResults object + print(sum(cap_word.search_string("More than Iron, more than Lead, more than Gold I need Electricity"))) + + prints:: + + [['More'], ['Iron'], ['Lead'], ['Gold'], ['I'], ['Electricity']] + ['More', 'Iron', 'Lead', 'Gold', 'I', 'Electricity'] + """ + maxMatches = min(maxMatches, max_matches) + try: + return ParseResults( + [t for t, s, e in self.scan_string(instring, maxMatches, debug=debug)] + ) + except ParseBaseException as exc: + if ParserElement.verbose_stacktrace: + raise + else: + # catch and re-raise exception from here, clears out pyparsing internal stack trace + raise exc.with_traceback(None) + + def split( + self, + instring: str, + maxsplit: int = _MAX_INT, + include_separators: bool = False, + *, + includeSeparators=False, + ) -> Generator[str, None, None]: + """ + Generator method to split a string using the given expression as a separator. + May be called with optional ``maxsplit`` argument, to limit the number of splits; + and the optional ``include_separators`` argument (default= ``False``), if the separating + matching text should be included in the split results. + + Example:: + + punc = one_of(list(".,;:/-!?")) + print(list(punc.split("This, this?, this sentence, is badly punctuated!"))) + + prints:: + + ['This', ' this', '', ' this sentence', ' is badly punctuated', ''] + """ + includeSeparators = includeSeparators or include_separators + last = 0 + for t, s, e in self.scan_string(instring, max_matches=maxsplit): + yield instring[last:s] + if includeSeparators: + yield t[0] + last = e + yield instring[last:] + + def __add__(self, other): + """ + Implementation of ``+`` operator - returns :class:`And`. Adding strings to a :class:`ParserElement` + converts them to :class:`Literal`s by default. + + Example:: + + greet = Word(alphas) + "," + Word(alphas) + "!" + hello = "Hello, World!" + print(hello, "->", greet.parse_string(hello)) + + prints:: + + Hello, World! -> ['Hello', ',', 'World', '!'] + + ``...`` may be used as a parse expression as a short form of :class:`SkipTo`. + + Literal('start') + ... + Literal('end') + + is equivalent to: + + Literal('start') + SkipTo('end')("_skipped*") + Literal('end') + + Note that the skipped text is returned with '_skipped' as a results name, + and to support having multiple skips in the same parser, the value returned is + a list of all skipped text. + """ + if other is Ellipsis: + return _PendingSkip(self) + + if isinstance(other, str_type): + other = self._literalStringClass(other) + if not isinstance(other, ParserElement): + raise TypeError( + "Cannot combine element of type {} with ParserElement".format( + type(other).__name__ + ) + ) + return And([self, other]) + + def __radd__(self, other): + """ + Implementation of ``+`` operator when left operand is not a :class:`ParserElement` + """ + if other is Ellipsis: + return SkipTo(self)("_skipped*") + self + + if isinstance(other, str_type): + other = self._literalStringClass(other) + if not isinstance(other, ParserElement): + raise TypeError( + "Cannot combine element of type {} with ParserElement".format( + type(other).__name__ + ) + ) + return other + self + + def __sub__(self, other): + """ + Implementation of ``-`` operator, returns :class:`And` with error stop + """ + if isinstance(other, str_type): + other = self._literalStringClass(other) + if not isinstance(other, ParserElement): + raise TypeError( + "Cannot combine element of type {} with ParserElement".format( + type(other).__name__ + ) + ) + return self + And._ErrorStop() + other + + def __rsub__(self, other): + """ + Implementation of ``-`` operator when left operand is not a :class:`ParserElement` + """ + if isinstance(other, str_type): + other = self._literalStringClass(other) + if not isinstance(other, ParserElement): + raise TypeError( + "Cannot combine element of type {} with ParserElement".format( + type(other).__name__ + ) + ) + return other - self + + def __mul__(self, other): + """ + Implementation of ``*`` operator, allows use of ``expr * 3`` in place of + ``expr + expr + expr``. Expressions may also be multiplied by a 2-integer + tuple, similar to ``{min, max}`` multipliers in regular expressions. Tuples + may also include ``None`` as in: + - ``expr*(n, None)`` or ``expr*(n, )`` is equivalent + to ``expr*n + ZeroOrMore(expr)`` + (read as "at least n instances of ``expr``") + - ``expr*(None, n)`` is equivalent to ``expr*(0, n)`` + (read as "0 to n instances of ``expr``") + - ``expr*(None, None)`` is equivalent to ``ZeroOrMore(expr)`` + - ``expr*(1, None)`` is equivalent to ``OneOrMore(expr)`` + + Note that ``expr*(None, n)`` does not raise an exception if + more than n exprs exist in the input stream; that is, + ``expr*(None, n)`` does not enforce a maximum number of expr + occurrences. If this behavior is desired, then write + ``expr*(None, n) + ~expr`` + """ + if other is Ellipsis: + other = (0, None) + elif isinstance(other, tuple) and other[:1] == (Ellipsis,): + other = ((0,) + other[1:] + (None,))[:2] + + if isinstance(other, int): + minElements, optElements = other, 0 + elif isinstance(other, tuple): + other = tuple(o if o is not Ellipsis else None for o in other) + other = (other + (None, None))[:2] + if other[0] is None: + other = (0, other[1]) + if isinstance(other[0], int) and other[1] is None: + if other[0] == 0: + return ZeroOrMore(self) + if other[0] == 1: + return OneOrMore(self) + else: + return self * other[0] + ZeroOrMore(self) + elif isinstance(other[0], int) and isinstance(other[1], int): + minElements, optElements = other + optElements -= minElements + else: + raise TypeError( + "cannot multiply ParserElement and ({}) objects".format( + ",".join(type(item).__name__ for item in other) + ) + ) + else: + raise TypeError( + "cannot multiply ParserElement and {} objects".format( + type(other).__name__ + ) + ) + + if minElements < 0: + raise ValueError("cannot multiply ParserElement by negative value") + if optElements < 0: + raise ValueError( + "second tuple value must be greater or equal to first tuple value" + ) + if minElements == optElements == 0: + return And([]) + + if optElements: + + def makeOptionalList(n): + if n > 1: + return Opt(self + makeOptionalList(n - 1)) + else: + return Opt(self) + + if minElements: + if minElements == 1: + ret = self + makeOptionalList(optElements) + else: + ret = And([self] * minElements) + makeOptionalList(optElements) + else: + ret = makeOptionalList(optElements) + else: + if minElements == 1: + ret = self + else: + ret = And([self] * minElements) + return ret + + def __rmul__(self, other): + return self.__mul__(other) + + def __or__(self, other): + """ + Implementation of ``|`` operator - returns :class:`MatchFirst` + """ + if other is Ellipsis: + return _PendingSkip(self, must_skip=True) + + if isinstance(other, str_type): + other = self._literalStringClass(other) + if not isinstance(other, ParserElement): + raise TypeError( + "Cannot combine element of type {} with ParserElement".format( + type(other).__name__ + ) + ) + return MatchFirst([self, other]) + + def __ror__(self, other): + """ + Implementation of ``|`` operator when left operand is not a :class:`ParserElement` + """ + if isinstance(other, str_type): + other = self._literalStringClass(other) + if not isinstance(other, ParserElement): + raise TypeError( + "Cannot combine element of type {} with ParserElement".format( + type(other).__name__ + ) + ) + return other | self + + def __xor__(self, other): + """ + Implementation of ``^`` operator - returns :class:`Or` + """ + if isinstance(other, str_type): + other = self._literalStringClass(other) + if not isinstance(other, ParserElement): + raise TypeError( + "Cannot combine element of type {} with ParserElement".format( + type(other).__name__ + ) + ) + return Or([self, other]) + + def __rxor__(self, other): + """ + Implementation of ``^`` operator when left operand is not a :class:`ParserElement` + """ + if isinstance(other, str_type): + other = self._literalStringClass(other) + if not isinstance(other, ParserElement): + raise TypeError( + "Cannot combine element of type {} with ParserElement".format( + type(other).__name__ + ) + ) + return other ^ self + + def __and__(self, other): + """ + Implementation of ``&`` operator - returns :class:`Each` + """ + if isinstance(other, str_type): + other = self._literalStringClass(other) + if not isinstance(other, ParserElement): + raise TypeError( + "Cannot combine element of type {} with ParserElement".format( + type(other).__name__ + ) + ) + return Each([self, other]) + + def __rand__(self, other): + """ + Implementation of ``&`` operator when left operand is not a :class:`ParserElement` + """ + if isinstance(other, str_type): + other = self._literalStringClass(other) + if not isinstance(other, ParserElement): + raise TypeError( + "Cannot combine element of type {} with ParserElement".format( + type(other).__name__ + ) + ) + return other & self + + def __invert__(self): + """ + Implementation of ``~`` operator - returns :class:`NotAny` + """ + return NotAny(self) + + # disable __iter__ to override legacy use of sequential access to __getitem__ to + # iterate over a sequence + __iter__ = None + + def __getitem__(self, key): + """ + use ``[]`` indexing notation as a short form for expression repetition: + + - ``expr[n]`` is equivalent to ``expr*n`` + - ``expr[m, n]`` is equivalent to ``expr*(m, n)`` + - ``expr[n, ...]`` or ``expr[n,]`` is equivalent + to ``expr*n + ZeroOrMore(expr)`` + (read as "at least n instances of ``expr``") + - ``expr[..., n]`` is equivalent to ``expr*(0, n)`` + (read as "0 to n instances of ``expr``") + - ``expr[...]`` and ``expr[0, ...]`` are equivalent to ``ZeroOrMore(expr)`` + - ``expr[1, ...]`` is equivalent to ``OneOrMore(expr)`` + + ``None`` may be used in place of ``...``. + + Note that ``expr[..., n]`` and ``expr[m, n]``do not raise an exception + if more than ``n`` ``expr``s exist in the input stream. If this behavior is + desired, then write ``expr[..., n] + ~expr``. + """ + + # convert single arg keys to tuples + try: + if isinstance(key, str_type): + key = (key,) + iter(key) + except TypeError: + key = (key, key) + + if len(key) > 2: + raise TypeError( + "only 1 or 2 index arguments supported ({}{})".format( + key[:5], "... [{}]".format(len(key)) if len(key) > 5 else "" + ) + ) + + # clip to 2 elements + ret = self * tuple(key[:2]) + return ret + + def __call__(self, name: str = None): + """ + Shortcut for :class:`set_results_name`, with ``list_all_matches=False``. + + If ``name`` is given with a trailing ``'*'`` character, then ``list_all_matches`` will be + passed as ``True``. + + If ``name` is omitted, same as calling :class:`copy`. + + Example:: + + # these are equivalent + userdata = Word(alphas).set_results_name("name") + Word(nums + "-").set_results_name("socsecno") + userdata = Word(alphas)("name") + Word(nums + "-")("socsecno") + """ + if name is not None: + return self._setResultsName(name) + else: + return self.copy() + + def suppress(self) -> "ParserElement": + """ + Suppresses the output of this :class:`ParserElement`; useful to keep punctuation from + cluttering up returned output. + """ + return Suppress(self) + + def ignore_whitespace(self, recursive: bool = True) -> "ParserElement": + """ + Enables the skipping of whitespace before matching the characters in the + :class:`ParserElement`'s defined pattern. + + :param recursive: If ``True`` (the default), also enable whitespace skipping in child elements (if any) + """ + self.skipWhitespace = True + return self + + def leave_whitespace(self, recursive: bool = True) -> "ParserElement": + """ + Disables the skipping of whitespace before matching the characters in the + :class:`ParserElement`'s defined pattern. This is normally only used internally by + the pyparsing module, but may be needed in some whitespace-sensitive grammars. + + :param recursive: If true (the default), also disable whitespace skipping in child elements (if any) + """ + self.skipWhitespace = False + return self + + def set_whitespace_chars( + self, chars: Union[Set[str], str], copy_defaults: bool = False + ) -> "ParserElement": + """ + Overrides the default whitespace chars + """ + self.skipWhitespace = True + self.whiteChars = set(chars) + self.copyDefaultWhiteChars = copy_defaults + return self + + def parse_with_tabs(self) -> "ParserElement": + """ + Overrides default behavior to expand ```` s to spaces before parsing the input string. + Must be called before ``parse_string`` when the input grammar contains elements that + match ```` characters. + """ + self.keepTabs = True + return self + + def ignore(self, other: "ParserElement") -> "ParserElement": + """ + Define expression to be ignored (e.g., comments) while doing pattern + matching; may be called repeatedly, to define multiple comment or other + ignorable patterns. + + Example:: + + patt = OneOrMore(Word(alphas)) + patt.parse_string('ablaj /* comment */ lskjd') + # -> ['ablaj'] + + patt.ignore(c_style_comment) + patt.parse_string('ablaj /* comment */ lskjd') + # -> ['ablaj', 'lskjd'] + """ + import typing + + if isinstance(other, str_type): + other = Suppress(other) + + if isinstance(other, Suppress): + if other not in self.ignoreExprs: + self.ignoreExprs.append(other) + else: + self.ignoreExprs.append(Suppress(other.copy())) + return self + + def set_debug_actions( + self, + start_action: DebugStartAction, + success_action: DebugSuccessAction, + exception_action: DebugExceptionAction, + ) -> "ParserElement": + """ + Customize display of debugging messages while doing pattern matching: + + - ``start_action`` - method to be called when an expression is about to be parsed; + should have the signature ``fn(input_string: str, location: int, expression: ParserElement, cache_hit: bool)`` + + - ``success_action`` - method to be called when an expression has successfully parsed; + should have the signature ``fn(input_string: str, start_location: int, end_location: int, expression: ParserELement, parsed_tokens: ParseResults, cache_hit: bool)`` + + - ``exception_action`` - method to be called when expression fails to parse; + should have the signature ``fn(input_string: str, location: int, expression: ParserElement, exception: Exception, cache_hit: bool)`` + """ + self.debugActions = self.DebugActions( + start_action or _default_start_debug_action, + success_action or _default_success_debug_action, + exception_action or _default_exception_debug_action, + ) + self.debug = True + return self + + def set_debug(self, flag: bool = True) -> "ParserElement": + """ + Enable display of debugging messages while doing pattern matching. + Set ``flag`` to ``True`` to enable, ``False`` to disable. + + Example:: + + wd = Word(alphas).set_name("alphaword") + integer = Word(nums).set_name("numword") + term = wd | integer + + # turn on debugging for wd + wd.set_debug() + + OneOrMore(term).parse_string("abc 123 xyz 890") + + prints:: + + Match alphaword at loc 0(1,1) + Matched alphaword -> ['abc'] + Match alphaword at loc 3(1,4) + Exception raised:Expected alphaword (at char 4), (line:1, col:5) + Match alphaword at loc 7(1,8) + Matched alphaword -> ['xyz'] + Match alphaword at loc 11(1,12) + Exception raised:Expected alphaword (at char 12), (line:1, col:13) + Match alphaword at loc 15(1,16) + Exception raised:Expected alphaword (at char 15), (line:1, col:16) + + The output shown is that produced by the default debug actions - custom debug actions can be + specified using :class:`set_debug_actions`. Prior to attempting + to match the ``wd`` expression, the debugging message ``"Match at loc (,)"`` + is shown. Then if the parse succeeds, a ``"Matched"`` message is shown, or an ``"Exception raised"`` + message is shown. Also note the use of :class:`set_name` to assign a human-readable name to the expression, + which makes debugging and exception messages easier to understand - for instance, the default + name created for the :class:`Word` expression without calling ``set_name`` is ``"W:(A-Za-z)"``. + """ + if flag: + self.set_debug_actions( + _default_start_debug_action, + _default_success_debug_action, + _default_exception_debug_action, + ) + else: + self.debug = False + return self + + @property + def default_name(self) -> str: + if self._defaultName is None: + self._defaultName = self._generateDefaultName() + return self._defaultName + + @abstractmethod + def _generateDefaultName(self): + """ + Child classes must define this method, which defines how the ``default_name`` is set. + """ + + def set_name(self, name: str) -> "ParserElement": + """ + Define name for this expression, makes debugging and exception messages clearer. + Example:: + Word(nums).parse_string("ABC") # -> Exception: Expected W:(0-9) (at char 0), (line:1, col:1) + Word(nums).set_name("integer").parse_string("ABC") # -> Exception: Expected integer (at char 0), (line:1, col:1) + """ + self.customName = name + self.errmsg = "Expected " + self.name + if __diag__.enable_debug_on_named_expressions: + self.set_debug() + return self + + @property + def name(self) -> str: + # This will use a user-defined name if available, but otherwise defaults back to the auto-generated name + return self.customName if self.customName is not None else self.default_name + + def __str__(self) -> str: + return self.name + + def __repr__(self) -> str: + return str(self) + + def streamline(self) -> "ParserElement": + self.streamlined = True + self._defaultName = None + return self + + def recurse(self) -> Sequence["ParserElement"]: + return [] + + def _checkRecursion(self, parseElementList): + subRecCheckList = parseElementList[:] + [self] + for e in self.recurse(): + e._checkRecursion(subRecCheckList) + + def validate(self, validateTrace=None) -> None: + """ + Check defined expressions for valid structure, check for infinite recursive definitions. + """ + self._checkRecursion([]) + + def parse_file( + self, + file_or_filename: Union[str, Path, TextIO], + encoding: str = "utf-8", + parse_all: bool = False, + *, + parseAll: bool = False, + ) -> ParseResults: + """ + Execute the parse expression on the given file or filename. + If a filename is specified (instead of a file object), + the entire file is opened, read, and closed before parsing. + """ + parseAll = parseAll or parse_all + try: + file_contents = file_or_filename.read() + except AttributeError: + with open(file_or_filename, "r", encoding=encoding) as f: + file_contents = f.read() + try: + return self.parse_string(file_contents, parseAll) + except ParseBaseException as exc: + if ParserElement.verbose_stacktrace: + raise + else: + # catch and re-raise exception from here, clears out pyparsing internal stack trace + raise exc.with_traceback(None) + + def __eq__(self, other): + if self is other: + return True + elif isinstance(other, str_type): + return self.matches(other, parse_all=True) + elif isinstance(other, ParserElement): + return vars(self) == vars(other) + return False + + def __hash__(self): + return id(self) + + def matches( + self, test_string: str, parse_all: bool = True, *, parseAll: bool = True + ) -> bool: + """ + Method for quick testing of a parser against a test string. Good for simple + inline microtests of sub expressions while building up larger parser. + + Parameters: + - ``test_string`` - to test against this expression for a match + - ``parse_all`` - (default= ``True``) - flag to pass to :class:`parse_string` when running tests + + Example:: + + expr = Word(nums) + assert expr.matches("100") + """ + parseAll = parseAll and parse_all + try: + self.parse_string(str(test_string), parse_all=parseAll) + return True + except ParseBaseException: + return False + + def run_tests( + self, + tests: Union[str, List[str]], + parse_all: bool = True, + comment: OptionalType[Union["ParserElement", str]] = "#", + full_dump: bool = True, + print_results: bool = True, + failure_tests: bool = False, + post_parse: Callable[[str, ParseResults], str] = None, + file: OptionalType[TextIO] = None, + with_line_numbers: bool = False, + *, + parseAll: bool = True, + fullDump: bool = True, + printResults: bool = True, + failureTests: bool = False, + postParse: Callable[[str, ParseResults], str] = None, + ) -> Tuple[bool, List[Tuple[str, Union[ParseResults, Exception]]]]: + """ + Execute the parse expression on a series of test strings, showing each + test, the parsed results or where the parse failed. Quick and easy way to + run a parse expression against a list of sample strings. + + Parameters: + - ``tests`` - a list of separate test strings, or a multiline string of test strings + - ``parse_all`` - (default= ``True``) - flag to pass to :class:`parse_string` when running tests + - ``comment`` - (default= ``'#'``) - expression for indicating embedded comments in the test + string; pass None to disable comment filtering + - ``full_dump`` - (default= ``True``) - dump results as list followed by results names in nested outline; + if False, only dump nested list + - ``print_results`` - (default= ``True``) prints test output to stdout + - ``failure_tests`` - (default= ``False``) indicates if these tests are expected to fail parsing + - ``post_parse`` - (default= ``None``) optional callback for successful parse results; called as + `fn(test_string, parse_results)` and returns a string to be added to the test output + - ``file`` - (default= ``None``) optional file-like object to which test output will be written; + if None, will default to ``sys.stdout`` + - ``with_line_numbers`` - default= ``False``) show test strings with line and column numbers + + Returns: a (success, results) tuple, where success indicates that all tests succeeded + (or failed if ``failure_tests`` is True), and the results contain a list of lines of each + test's output + + Example:: + + number_expr = pyparsing_common.number.copy() + + result = number_expr.run_tests(''' + # unsigned integer + 100 + # negative integer + -100 + # float with scientific notation + 6.02e23 + # integer with scientific notation + 1e-12 + ''') + print("Success" if result[0] else "Failed!") + + result = number_expr.run_tests(''' + # stray character + 100Z + # missing leading digit before '.' + -.100 + # too many '.' + 3.14.159 + ''', failure_tests=True) + print("Success" if result[0] else "Failed!") + + prints:: + + # unsigned integer + 100 + [100] + + # negative integer + -100 + [-100] + + # float with scientific notation + 6.02e23 + [6.02e+23] + + # integer with scientific notation + 1e-12 + [1e-12] + + Success + + # stray character + 100Z + ^ + FAIL: Expected end of text (at char 3), (line:1, col:4) + + # missing leading digit before '.' + -.100 + ^ + FAIL: Expected {real number with scientific notation | real number | signed integer} (at char 0), (line:1, col:1) + + # too many '.' + 3.14.159 + ^ + FAIL: Expected end of text (at char 4), (line:1, col:5) + + Success + + Each test string must be on a single line. If you want to test a string that spans multiple + lines, create a test like this:: + + expr.run_tests(r"this is a test\\n of strings that spans \\n 3 lines") + + (Note that this is a raw string literal, you must include the leading ``'r'``.) + """ + from .testing import pyparsing_test + + parseAll = parseAll and parse_all + fullDump = fullDump and full_dump + printResults = printResults and print_results + failureTests = failureTests or failure_tests + postParse = postParse or post_parse + if isinstance(tests, str_type): + line_strip = type(tests).strip + tests = [line_strip(test_line) for test_line in tests.rstrip().splitlines()] + if isinstance(comment, str_type): + comment = Literal(comment) + if file is None: + file = sys.stdout + print_ = file.write + + result: Union[ParseResults, Exception] + allResults = [] + comments = [] + success = True + NL = Literal(r"\n").add_parse_action(replace_with("\n")).ignore(quoted_string) + BOM = "\ufeff" + for t in tests: + if comment is not None and comment.matches(t, False) or comments and not t: + comments.append( + pyparsing_test.with_line_numbers(t) if with_line_numbers else t + ) + continue + if not t: + continue + out = [ + "\n" + "\n".join(comments) if comments else "", + pyparsing_test.with_line_numbers(t) if with_line_numbers else t, + ] + comments = [] + try: + # convert newline marks to actual newlines, and strip leading BOM if present + t = NL.transform_string(t.lstrip(BOM)) + result = self.parse_string(t, parse_all=parseAll) + except ParseBaseException as pe: + fatal = "(FATAL)" if isinstance(pe, ParseFatalException) else "" + out.append(pe.explain()) + out.append("FAIL: " + str(pe)) + if ParserElement.verbose_stacktrace: + out.extend(traceback.format_tb(pe.__traceback__)) + success = success and failureTests + result = pe + except Exception as exc: + out.append("FAIL-EXCEPTION: {}: {}".format(type(exc).__name__, exc)) + if ParserElement.verbose_stacktrace: + out.extend(traceback.format_tb(exc.__traceback__)) + success = success and failureTests + result = exc + else: + success = success and not failureTests + if postParse is not None: + try: + pp_value = postParse(t, result) + if pp_value is not None: + if isinstance(pp_value, ParseResults): + out.append(pp_value.dump()) + else: + out.append(str(pp_value)) + else: + out.append(result.dump()) + except Exception as e: + out.append(result.dump(full=fullDump)) + out.append( + "{} failed: {}: {}".format( + postParse.__name__, type(e).__name__, e + ) + ) + else: + out.append(result.dump(full=fullDump)) + out.append("") + + if printResults: + print_("\n".join(out)) + + allResults.append((t, result)) + + return success, allResults + + def create_diagram( + self, + output_html: Union[TextIO, Path, str], + vertical: int = 3, + show_results_names: bool = False, + **kwargs, + ) -> None: + """ + Create a railroad diagram for the parser. + + Parameters: + - output_html (str or file-like object) - output target for generated + diagram HTML + - vertical (int) - threshold for formatting multiple alternatives vertically + instead of horizontally (default=3) + - show_results_names - bool flag whether diagram should show annotations for + defined results names + + Additional diagram-formatting keyword arguments can also be included; + see railroad.Diagram class. + """ + + try: + from .diagram import to_railroad, railroad_to_html + except ImportError as ie: + raise Exception( + "must ``pip install pyparsing[diagrams]`` to generate parser railroad diagrams" + ) from ie + + self.streamline() + + railroad = to_railroad( + self, + vertical=vertical, + show_results_names=show_results_names, + diagram_kwargs=kwargs, + ) + if isinstance(output_html, (str, Path)): + with open(output_html, "w", encoding="utf-8") as diag_file: + diag_file.write(railroad_to_html(railroad)) + else: + # we were passed a file-like object, just write to it + output_html.write(railroad_to_html(railroad)) + + setDefaultWhitespaceChars = set_default_whitespace_chars + inlineLiteralsUsing = inline_literals_using + setResultsName = set_results_name + setBreak = set_break + setParseAction = set_parse_action + addParseAction = add_parse_action + addCondition = add_condition + setFailAction = set_fail_action + tryParse = try_parse + canParseNext = can_parse_next + resetCache = reset_cache + enableLeftRecursion = enable_left_recursion + enablePackrat = enable_packrat + parseString = parse_string + scanString = scan_string + searchString = search_string + transformString = transform_string + setWhitespaceChars = set_whitespace_chars + parseWithTabs = parse_with_tabs + setDebugActions = set_debug_actions + setDebug = set_debug + defaultName = default_name + setName = set_name + parseFile = parse_file + runTests = run_tests + ignoreWhitespace = ignore_whitespace + leaveWhitespace = leave_whitespace + + +class _PendingSkip(ParserElement): + # internal placeholder class to hold a place were '...' is added to a parser element, + # once another ParserElement is added, this placeholder will be replaced with a SkipTo + def __init__(self, expr: ParserElement, must_skip: bool = False): + super().__init__() + self.anchor = expr + self.must_skip = must_skip + + def _generateDefaultName(self): + return str(self.anchor + Empty()).replace("Empty", "...") + + def __add__(self, other): + skipper = SkipTo(other).set_name("...")("_skipped*") + if self.must_skip: + + def must_skip(t): + if not t._skipped or t._skipped.as_list() == [""]: + del t[0] + t.pop("_skipped", None) + + def show_skip(t): + if t._skipped.as_list()[-1:] == [""]: + t.pop("_skipped") + t["_skipped"] = "missing <" + repr(self.anchor) + ">" + + return ( + self.anchor + skipper().add_parse_action(must_skip) + | skipper().add_parse_action(show_skip) + ) + other + + return self.anchor + skipper + other + + def __repr__(self): + return self.defaultName + + def parseImpl(self, *args): + raise Exception( + "use of `...` expression without following SkipTo target expression" + ) + + +class Token(ParserElement): + """Abstract :class:`ParserElement` subclass, for defining atomic + matching patterns. + """ + + def __init__(self): + super().__init__(savelist=False) + + def _generateDefaultName(self): + return type(self).__name__ + + +class Empty(Token): + """ + An empty token, will always match. + """ + + def __init__(self): + super().__init__() + self.mayReturnEmpty = True + self.mayIndexError = False + + +class NoMatch(Token): + """ + A token that will never match. + """ + + def __init__(self): + super().__init__() + self.mayReturnEmpty = True + self.mayIndexError = False + self.errmsg = "Unmatchable token" + + def parseImpl(self, instring, loc, doActions=True): + raise ParseException(instring, loc, self.errmsg, self) + + +class Literal(Token): + """ + Token to exactly match a specified string. + + Example:: + + Literal('blah').parse_string('blah') # -> ['blah'] + Literal('blah').parse_string('blahfooblah') # -> ['blah'] + Literal('blah').parse_string('bla') # -> Exception: Expected "blah" + + For case-insensitive matching, use :class:`CaselessLiteral`. + + For keyword matching (force word break before and after the matched string), + use :class:`Keyword` or :class:`CaselessKeyword`. + """ + + def __init__(self, match_string: str = "", *, matchString: str = ""): + super().__init__() + match_string = matchString or match_string + self.match = match_string + self.matchLen = len(match_string) + try: + self.firstMatchChar = match_string[0] + except IndexError: + raise ValueError("null string passed to Literal; use Empty() instead") + self.errmsg = "Expected " + self.name + self.mayReturnEmpty = False + self.mayIndexError = False + + # Performance tuning: modify __class__ to select + # a parseImpl optimized for single-character check + if self.matchLen == 1 and type(self) is Literal: + self.__class__ = _SingleCharLiteral + + def _generateDefaultName(self): + return repr(self.match) + + def parseImpl(self, instring, loc, doActions=True): + if instring[loc] == self.firstMatchChar and instring.startswith( + self.match, loc + ): + return loc + self.matchLen, self.match + raise ParseException(instring, loc, self.errmsg, self) + + +class _SingleCharLiteral(Literal): + def parseImpl(self, instring, loc, doActions=True): + if instring[loc] == self.firstMatchChar: + return loc + 1, self.match + raise ParseException(instring, loc, self.errmsg, self) + + +ParserElement._literalStringClass = Literal + + +class Keyword(Token): + """ + Token to exactly match a specified string as a keyword, that is, + it must be immediately followed by a non-keyword character. Compare + with :class:`Literal`: + + - ``Literal("if")`` will match the leading ``'if'`` in + ``'ifAndOnlyIf'``. + - ``Keyword("if")`` will not; it will only match the leading + ``'if'`` in ``'if x=1'``, or ``'if(y==2)'`` + + Accepts two optional constructor arguments in addition to the + keyword string: + + - ``identChars`` is a string of characters that would be valid + identifier characters, defaulting to all alphanumerics + "_" and + "$" + - ``caseless`` allows case-insensitive matching, default is ``False``. + + Example:: + + Keyword("start").parse_string("start") # -> ['start'] + Keyword("start").parse_string("starting") # -> Exception + + For case-insensitive matching, use :class:`CaselessKeyword`. + """ + + DEFAULT_KEYWORD_CHARS = alphanums + "_$" + + def __init__( + self, + match_string: str = "", + ident_chars: OptionalType[str] = None, + caseless: bool = False, + *, + matchString: str = "", + identChars: OptionalType[str] = None, + ): + super().__init__() + identChars = identChars or ident_chars + if identChars is None: + identChars = Keyword.DEFAULT_KEYWORD_CHARS + match_string = matchString or match_string + self.match = match_string + self.matchLen = len(match_string) + try: + self.firstMatchChar = match_string[0] + except IndexError: + raise ValueError("null string passed to Keyword; use Empty() instead") + self.errmsg = "Expected {} {}".format(type(self).__name__, self.name) + self.mayReturnEmpty = False + self.mayIndexError = False + self.caseless = caseless + if caseless: + self.caselessmatch = match_string.upper() + identChars = identChars.upper() + self.identChars = set(identChars) + + def _generateDefaultName(self): + return repr(self.match) + + def parseImpl(self, instring, loc, doActions=True): + errmsg = self.errmsg + errloc = loc + if self.caseless: + if instring[loc : loc + self.matchLen].upper() == self.caselessmatch: + if loc == 0 or instring[loc - 1].upper() not in self.identChars: + if ( + loc >= len(instring) - self.matchLen + or instring[loc + self.matchLen].upper() not in self.identChars + ): + return loc + self.matchLen, self.match + else: + # followed by keyword char + errmsg += ", was immediately followed by keyword character" + errloc = loc + self.matchLen + else: + # preceded by keyword char + errmsg += ", keyword was immediately preceded by keyword character" + errloc = loc - 1 + # else no match just raise plain exception + + else: + if ( + instring[loc] == self.firstMatchChar + and self.matchLen == 1 + or instring.startswith(self.match, loc) + ): + if loc == 0 or instring[loc - 1] not in self.identChars: + if ( + loc >= len(instring) - self.matchLen + or instring[loc + self.matchLen] not in self.identChars + ): + return loc + self.matchLen, self.match + else: + # followed by keyword char + errmsg += ( + ", keyword was immediately followed by keyword character" + ) + errloc = loc + self.matchLen + else: + # preceded by keyword char + errmsg += ", keyword was immediately preceded by keyword character" + errloc = loc - 1 + # else no match just raise plain exception + + raise ParseException(instring, errloc, errmsg, self) + + @staticmethod + def set_default_keyword_chars(chars) -> None: + """ + Overrides the default characters used by :class:`Keyword` expressions. + """ + Keyword.DEFAULT_KEYWORD_CHARS = chars + + setDefaultKeywordChars = set_default_keyword_chars + + +class CaselessLiteral(Literal): + """ + Token to match a specified string, ignoring case of letters. + Note: the matched results will always be in the case of the given + match string, NOT the case of the input text. + + Example:: + + OneOrMore(CaselessLiteral("CMD")).parse_string("cmd CMD Cmd10") + # -> ['CMD', 'CMD', 'CMD'] + + (Contrast with example for :class:`CaselessKeyword`.) + """ + + def __init__(self, match_string: str = "", *, matchString: str = ""): + match_string = matchString or match_string + super().__init__(match_string.upper()) + # Preserve the defining literal. + self.returnString = match_string + self.errmsg = "Expected " + self.name + + def parseImpl(self, instring, loc, doActions=True): + if instring[loc : loc + self.matchLen].upper() == self.match: + return loc + self.matchLen, self.returnString + raise ParseException(instring, loc, self.errmsg, self) + + +class CaselessKeyword(Keyword): + """ + Caseless version of :class:`Keyword`. + + Example:: + + OneOrMore(CaselessKeyword("CMD")).parse_string("cmd CMD Cmd10") + # -> ['CMD', 'CMD'] + + (Contrast with example for :class:`CaselessLiteral`.) + """ + + def __init__( + self, + match_string: str = "", + ident_chars: OptionalType[str] = None, + *, + matchString: str = "", + identChars: OptionalType[str] = None, + ): + identChars = identChars or ident_chars + match_string = matchString or match_string + super().__init__(match_string, identChars, caseless=True) + + +class CloseMatch(Token): + """A variation on :class:`Literal` which matches "close" matches, + that is, strings with at most 'n' mismatching characters. + :class:`CloseMatch` takes parameters: + + - ``match_string`` - string to be matched + - ``caseless`` - a boolean indicating whether to ignore casing when comparing characters + - ``max_mismatches`` - (``default=1``) maximum number of + mismatches allowed to count as a match + + The results from a successful parse will contain the matched text + from the input string and the following named results: + + - ``mismatches`` - a list of the positions within the + match_string where mismatches were found + - ``original`` - the original match_string used to compare + against the input string + + If ``mismatches`` is an empty list, then the match was an exact + match. + + Example:: + + patt = CloseMatch("ATCATCGAATGGA") + patt.parse_string("ATCATCGAAXGGA") # -> (['ATCATCGAAXGGA'], {'mismatches': [[9]], 'original': ['ATCATCGAATGGA']}) + patt.parse_string("ATCAXCGAAXGGA") # -> Exception: Expected 'ATCATCGAATGGA' (with up to 1 mismatches) (at char 0), (line:1, col:1) + + # exact match + patt.parse_string("ATCATCGAATGGA") # -> (['ATCATCGAATGGA'], {'mismatches': [[]], 'original': ['ATCATCGAATGGA']}) + + # close match allowing up to 2 mismatches + patt = CloseMatch("ATCATCGAATGGA", max_mismatches=2) + patt.parse_string("ATCAXCGAAXGGA") # -> (['ATCAXCGAAXGGA'], {'mismatches': [[4, 9]], 'original': ['ATCATCGAATGGA']}) + """ + + def __init__( + self, + match_string: str, + max_mismatches: int = None, + *, + maxMismatches: int = 1, + caseless=False, + ): + maxMismatches = max_mismatches if max_mismatches is not None else maxMismatches + super().__init__() + self.match_string = match_string + self.maxMismatches = maxMismatches + self.errmsg = "Expected {!r} (with up to {} mismatches)".format( + self.match_string, self.maxMismatches + ) + self.caseless = caseless + self.mayIndexError = False + self.mayReturnEmpty = False + + def _generateDefaultName(self): + return "{}:{!r}".format(type(self).__name__, self.match_string) + + def parseImpl(self, instring, loc, doActions=True): + start = loc + instrlen = len(instring) + maxloc = start + len(self.match_string) + + if maxloc <= instrlen: + match_string = self.match_string + match_stringloc = 0 + mismatches = [] + maxMismatches = self.maxMismatches + + for match_stringloc, s_m in enumerate( + zip(instring[loc:maxloc], match_string) + ): + src, mat = s_m + if self.caseless: + src, mat = src.lower(), mat.lower() + + if src != mat: + mismatches.append(match_stringloc) + if len(mismatches) > maxMismatches: + break + else: + loc = start + match_stringloc + 1 + results = ParseResults([instring[start:loc]]) + results["original"] = match_string + results["mismatches"] = mismatches + return loc, results + + raise ParseException(instring, loc, self.errmsg, self) + + +class Word(Token): + """Token for matching words composed of allowed character sets. + Parameters: + - ``init_chars`` - string of all characters that should be used to + match as a word; "ABC" will match "AAA", "ABAB", "CBAC", etc.; + if ``body_chars`` is also specified, then this is the string of + initial characters + - ``body_chars`` - string of characters that + can be used for matching after a matched initial character as + given in ``init_chars``; if omitted, same as the initial characters + (default=``None``) + - ``min`` - minimum number of characters to match (default=1) + - ``max`` - maximum number of characters to match (default=0) + - ``exact`` - exact number of characters to match (default=0) + - ``as_keyword`` - match as a keyword (default=``False``) + - ``exclude_chars`` - characters that might be + found in the input ``body_chars`` string but which should not be + accepted for matching ;useful to define a word of all + printables except for one or two characters, for instance + (default=``None``) + + :class:`srange` is useful for defining custom character set strings + for defining :class:`Word` expressions, using range notation from + regular expression character sets. + + A common mistake is to use :class:`Word` to match a specific literal + string, as in ``Word("Address")``. Remember that :class:`Word` + uses the string argument to define *sets* of matchable characters. + This expression would match "Add", "AAA", "dAred", or any other word + made up of the characters 'A', 'd', 'r', 'e', and 's'. To match an + exact literal string, use :class:`Literal` or :class:`Keyword`. + + pyparsing includes helper strings for building Words: + + - :class:`alphas` + - :class:`nums` + - :class:`alphanums` + - :class:`hexnums` + - :class:`alphas8bit` (alphabetic characters in ASCII range 128-255 + - accented, tilded, umlauted, etc.) + - :class:`punc8bit` (non-alphabetic characters in ASCII range + 128-255 - currency, symbols, superscripts, diacriticals, etc.) + - :class:`printables` (any non-whitespace character) + + ``alphas``, ``nums``, and ``printables`` are also defined in several + Unicode sets - see :class:`pyparsing_unicode``. + + Example:: + + # a word composed of digits + integer = Word(nums) # equivalent to Word("0123456789") or Word(srange("0-9")) + + # a word with a leading capital, and zero or more lowercase + capital_word = Word(alphas.upper(), alphas.lower()) + + # hostnames are alphanumeric, with leading alpha, and '-' + hostname = Word(alphas, alphanums + '-') + + # roman numeral (not a strict parser, accepts invalid mix of characters) + roman = Word("IVXLCDM") + + # any string of non-whitespace characters, except for ',' + csv_value = Word(printables, exclude_chars=",") + """ + + def __init__( + self, + init_chars: str = "", + body_chars: OptionalType[str] = None, + min: int = 1, + max: int = 0, + exact: int = 0, + as_keyword: bool = False, + exclude_chars: OptionalType[str] = None, + *, + initChars: OptionalType[str] = None, + bodyChars: OptionalType[str] = None, + asKeyword: bool = False, + excludeChars: OptionalType[str] = None, + ): + initChars = initChars or init_chars + bodyChars = bodyChars or body_chars + asKeyword = asKeyword or as_keyword + excludeChars = excludeChars or exclude_chars + super().__init__() + if not initChars: + raise ValueError( + "invalid {}, initChars cannot be empty string".format( + type(self).__name__ + ) + ) + + initChars = set(initChars) + self.initChars = initChars + if excludeChars: + excludeChars = set(excludeChars) + initChars -= excludeChars + if bodyChars: + bodyChars = set(bodyChars) - excludeChars + self.initCharsOrig = "".join(sorted(initChars)) + + if bodyChars: + self.bodyCharsOrig = "".join(sorted(bodyChars)) + self.bodyChars = set(bodyChars) + else: + self.bodyCharsOrig = "".join(sorted(initChars)) + self.bodyChars = set(initChars) + + self.maxSpecified = max > 0 + + if min < 1: + raise ValueError( + "cannot specify a minimum length < 1; use Opt(Word()) if zero-length word is permitted" + ) + + self.minLen = min + + if max > 0: + self.maxLen = max + else: + self.maxLen = _MAX_INT + + if exact > 0: + self.maxLen = exact + self.minLen = exact + + self.errmsg = "Expected " + self.name + self.mayIndexError = False + self.asKeyword = asKeyword + + # see if we can make a regex for this Word + if " " not in self.initChars | self.bodyChars and (min == 1 and exact == 0): + if self.bodyChars == self.initChars: + if max == 0: + repeat = "+" + elif max == 1: + repeat = "" + else: + repeat = "{{{},{}}}".format( + self.minLen, "" if self.maxLen == _MAX_INT else self.maxLen + ) + self.reString = "[{}]{}".format( + _collapse_string_to_ranges(self.initChars), + repeat, + ) + elif len(self.initChars) == 1: + if max == 0: + repeat = "*" + else: + repeat = "{{0,{}}}".format(max - 1) + self.reString = "{}[{}]{}".format( + re.escape(self.initCharsOrig), + _collapse_string_to_ranges(self.bodyChars), + repeat, + ) + else: + if max == 0: + repeat = "*" + elif max == 2: + repeat = "" + else: + repeat = "{{0,{}}}".format(max - 1) + self.reString = "[{}][{}]{}".format( + _collapse_string_to_ranges(self.initChars), + _collapse_string_to_ranges(self.bodyChars), + repeat, + ) + if self.asKeyword: + self.reString = r"\b" + self.reString + r"\b" + + try: + self.re = re.compile(self.reString) + except sre_constants.error: + self.re = None + else: + self.re_match = self.re.match + self.__class__ = _WordRegex + + def _generateDefaultName(self): + def charsAsStr(s): + max_repr_len = 16 + s = _collapse_string_to_ranges(s, re_escape=False) + if len(s) > max_repr_len: + return s[: max_repr_len - 3] + "..." + else: + return s + + if self.initChars != self.bodyChars: + base = "W:({}, {})".format( + charsAsStr(self.initChars), charsAsStr(self.bodyChars) + ) + else: + base = "W:({})".format(charsAsStr(self.initChars)) + + # add length specification + if self.minLen > 1 or self.maxLen != _MAX_INT: + if self.minLen == self.maxLen: + if self.minLen == 1: + return base[2:] + else: + return base + "{{{}}}".format(self.minLen) + elif self.maxLen == _MAX_INT: + return base + "{{{},...}}".format(self.minLen) + else: + return base + "{{{},{}}}".format(self.minLen, self.maxLen) + return base + + def parseImpl(self, instring, loc, doActions=True): + if instring[loc] not in self.initChars: + raise ParseException(instring, loc, self.errmsg, self) + + start = loc + loc += 1 + instrlen = len(instring) + bodychars = self.bodyChars + maxloc = start + self.maxLen + maxloc = min(maxloc, instrlen) + while loc < maxloc and instring[loc] in bodychars: + loc += 1 + + throwException = False + if loc - start < self.minLen: + throwException = True + elif self.maxSpecified and loc < instrlen and instring[loc] in bodychars: + throwException = True + elif self.asKeyword: + if ( + start > 0 + and instring[start - 1] in bodychars + or loc < instrlen + and instring[loc] in bodychars + ): + throwException = True + + if throwException: + raise ParseException(instring, loc, self.errmsg, self) + + return loc, instring[start:loc] + + +class _WordRegex(Word): + def parseImpl(self, instring, loc, doActions=True): + result = self.re_match(instring, loc) + if not result: + raise ParseException(instring, loc, self.errmsg, self) + + loc = result.end() + return loc, result.group() + + +class Char(_WordRegex): + """A short-cut class for defining :class:`Word` ``(characters, exact=1)``, + when defining a match of any single character in a string of + characters. + """ + + def __init__( + self, + charset: str, + as_keyword: bool = False, + exclude_chars: OptionalType[str] = None, + *, + asKeyword: bool = False, + excludeChars: OptionalType[str] = None, + ): + asKeyword = asKeyword or as_keyword + excludeChars = excludeChars or exclude_chars + super().__init__( + charset, exact=1, asKeyword=asKeyword, excludeChars=excludeChars + ) + self.reString = "[{}]".format(_collapse_string_to_ranges(self.initChars)) + if asKeyword: + self.reString = r"\b{}\b".format(self.reString) + self.re = re.compile(self.reString) + self.re_match = self.re.match + + +class Regex(Token): + r"""Token for matching strings that match a given regular + expression. Defined with string specifying the regular expression in + a form recognized by the stdlib Python `re module `_. + If the given regex contains named groups (defined using ``(?P...)``), + these will be preserved as named :class:`ParseResults`. + + If instead of the Python stdlib ``re`` module you wish to use a different RE module + (such as the ``regex`` module), you can do so by building your ``Regex`` object with + a compiled RE that was compiled using ``regex``. + + Example:: + + realnum = Regex(r"[+-]?\d+\.\d*") + # ref: https://stackoverflow.com/questions/267399/how-do-you-match-only-valid-roman-numerals-with-a-regular-expression + roman = Regex(r"M{0,4}(CM|CD|D?{0,3})(XC|XL|L?X{0,3})(IX|IV|V?I{0,3})") + + # named fields in a regex will be returned as named results + date = Regex(r'(?P\d{4})-(?P\d\d?)-(?P\d\d?)') + + # the Regex class will accept re's compiled using the regex module + import regex + parser = pp.Regex(regex.compile(r'[0-9]')) + """ + + def __init__( + self, + pattern: Any, + flags: Union[re.RegexFlag, int] = 0, + as_group_list: bool = False, + as_match: bool = False, + *, + asGroupList: bool = False, + asMatch: bool = False, + ): + """The parameters ``pattern`` and ``flags`` are passed + to the ``re.compile()`` function as-is. See the Python + `re module `_ module for an + explanation of the acceptable patterns and flags. + """ + super().__init__() + asGroupList = asGroupList or as_group_list + asMatch = asMatch or as_match + + if isinstance(pattern, str_type): + if not pattern: + raise ValueError("null string passed to Regex; use Empty() instead") + + self.pattern = pattern + self.flags = flags + + try: + self.re = re.compile(self.pattern, self.flags) + self.reString = self.pattern + except sre_constants.error: + raise ValueError( + "invalid pattern ({!r}) passed to Regex".format(pattern) + ) + + elif hasattr(pattern, "pattern") and hasattr(pattern, "match"): + self.re = pattern + self.pattern = self.reString = pattern.pattern + self.flags = flags + + else: + raise TypeError( + "Regex may only be constructed with a string or a compiled RE object" + ) + + self.re_match = self.re.match + + self.errmsg = "Expected " + self.name + self.mayIndexError = False + self.mayReturnEmpty = self.re_match("") is not None + self.asGroupList = asGroupList + self.asMatch = asMatch + if self.asGroupList: + self.parseImpl = self.parseImplAsGroupList + if self.asMatch: + self.parseImpl = self.parseImplAsMatch + + def _generateDefaultName(self): + return "Re:({})".format(repr(self.pattern).replace("\\\\", "\\")) + + def parseImpl(self, instring, loc, doActions=True): + result = self.re_match(instring, loc) + if not result: + raise ParseException(instring, loc, self.errmsg, self) + + loc = result.end() + ret = ParseResults(result.group()) + d = result.groupdict() + if d: + for k, v in d.items(): + ret[k] = v + return loc, ret + + def parseImplAsGroupList(self, instring, loc, doActions=True): + result = self.re_match(instring, loc) + if not result: + raise ParseException(instring, loc, self.errmsg, self) + + loc = result.end() + ret = result.groups() + return loc, ret + + def parseImplAsMatch(self, instring, loc, doActions=True): + result = self.re_match(instring, loc) + if not result: + raise ParseException(instring, loc, self.errmsg, self) + + loc = result.end() + ret = result + return loc, ret + + def sub(self, repl: str) -> ParserElement: + r""" + Return :class:`Regex` with an attached parse action to transform the parsed + result as if called using `re.sub(expr, repl, string) `_. + + Example:: + + make_html = Regex(r"(\w+):(.*?):").sub(r"<\1>\2") + print(make_html.transform_string("h1:main title:")) + # prints "

main title

" + """ + if self.asGroupList: + raise TypeError("cannot use sub() with Regex(asGroupList=True)") + + if self.asMatch and callable(repl): + raise TypeError("cannot use sub() with a callable with Regex(asMatch=True)") + + if self.asMatch: + + def pa(tokens): + return tokens[0].expand(repl) + + else: + + def pa(tokens): + return self.re.sub(repl, tokens[0]) + + return self.add_parse_action(pa) + + +class QuotedString(Token): + r""" + Token for matching strings that are delimited by quoting characters. + + Defined with the following parameters: + + - ``quote_char`` - string of one or more characters defining the + quote delimiting string + - ``esc_char`` - character to re_escape quotes, typically backslash + (default= ``None``) + - ``esc_quote`` - special quote sequence to re_escape an embedded quote + string (such as SQL's ``""`` to re_escape an embedded ``"``) + (default= ``None``) + - ``multiline`` - boolean indicating whether quotes can span + multiple lines (default= ``False``) + - ``unquote_results`` - boolean indicating whether the matched text + should be unquoted (default= ``True``) + - ``end_quote_char`` - string of one or more characters defining the + end of the quote delimited string (default= ``None`` => same as + quote_char) + - ``convert_whitespace_escapes`` - convert escaped whitespace + (``'\t'``, ``'\n'``, etc.) to actual whitespace + (default= ``True``) + + Example:: + + qs = QuotedString('"') + print(qs.search_string('lsjdf "This is the quote" sldjf')) + complex_qs = QuotedString('{{', end_quote_char='}}') + print(complex_qs.search_string('lsjdf {{This is the "quote"}} sldjf')) + sql_qs = QuotedString('"', esc_quote='""') + print(sql_qs.search_string('lsjdf "This is the quote with ""embedded"" quotes" sldjf')) + + prints:: + + [['This is the quote']] + [['This is the "quote"']] + [['This is the quote with "embedded" quotes']] + """ + ws_map = ((r"\t", "\t"), (r"\n", "\n"), (r"\f", "\f"), (r"\r", "\r")) + + def __init__( + self, + quote_char: str = "", + esc_char: OptionalType[str] = None, + esc_quote: OptionalType[str] = None, + multiline: bool = False, + unquote_results: bool = True, + end_quote_char: OptionalType[str] = None, + convert_whitespace_escapes: bool = True, + *, + quoteChar: str = "", + escChar: OptionalType[str] = None, + escQuote: OptionalType[str] = None, + unquoteResults: bool = True, + endQuoteChar: OptionalType[str] = None, + convertWhitespaceEscapes: bool = True, + ): + super().__init__() + escChar = escChar or esc_char + escQuote = escQuote or esc_quote + unquoteResults = unquoteResults and unquote_results + endQuoteChar = endQuoteChar or end_quote_char + convertWhitespaceEscapes = ( + convertWhitespaceEscapes and convert_whitespace_escapes + ) + quote_char = quoteChar or quote_char + + # remove white space from quote chars - wont work anyway + quote_char = quote_char.strip() + if not quote_char: + raise ValueError("quote_char cannot be the empty string") + + if endQuoteChar is None: + endQuoteChar = quote_char + else: + endQuoteChar = endQuoteChar.strip() + if not endQuoteChar: + raise ValueError("endQuoteChar cannot be the empty string") + + self.quoteChar = quote_char + self.quoteCharLen = len(quote_char) + self.firstQuoteChar = quote_char[0] + self.endQuoteChar = endQuoteChar + self.endQuoteCharLen = len(endQuoteChar) + self.escChar = escChar + self.escQuote = escQuote + self.unquoteResults = unquoteResults + self.convertWhitespaceEscapes = convertWhitespaceEscapes + + sep = "" + inner_pattern = "" + + if escQuote: + inner_pattern += r"{}(?:{})".format(sep, re.escape(escQuote)) + sep = "|" + + if escChar: + inner_pattern += r"{}(?:{}.)".format(sep, re.escape(escChar)) + sep = "|" + self.escCharReplacePattern = re.escape(self.escChar) + "(.)" + + if len(self.endQuoteChar) > 1: + inner_pattern += ( + "{}(?:".format(sep) + + "|".join( + "(?:{}(?!{}))".format( + re.escape(self.endQuoteChar[:i]), + re.escape(self.endQuoteChar[i:]), + ) + for i in range(len(self.endQuoteChar) - 1, 0, -1) + ) + + ")" + ) + sep = "|" + + if multiline: + self.flags = re.MULTILINE | re.DOTALL + inner_pattern += r"{}(?:[^{}{}])".format( + sep, + _escape_regex_range_chars(self.endQuoteChar[0]), + (_escape_regex_range_chars(escChar) if escChar is not None else ""), + ) + else: + self.flags = 0 + inner_pattern += r"{}(?:[^{}\n\r{}])".format( + sep, + _escape_regex_range_chars(self.endQuoteChar[0]), + (_escape_regex_range_chars(escChar) if escChar is not None else ""), + ) + + self.pattern = "".join( + [ + re.escape(self.quoteChar), + "(?:", + inner_pattern, + ")*", + re.escape(self.endQuoteChar), + ] + ) + + try: + self.re = re.compile(self.pattern, self.flags) + self.reString = self.pattern + self.re_match = self.re.match + except sre_constants.error: + raise ValueError( + "invalid pattern {!r} passed to Regex".format(self.pattern) + ) + + self.errmsg = "Expected " + self.name + self.mayIndexError = False + self.mayReturnEmpty = True + + def _generateDefaultName(self): + if self.quoteChar == self.endQuoteChar and isinstance(self.quoteChar, str_type): + return "string enclosed in {!r}".format(self.quoteChar) + + return "quoted string, starting with {} ending with {}".format( + self.quoteChar, self.endQuoteChar + ) + + def parseImpl(self, instring, loc, doActions=True): + result = ( + instring[loc] == self.firstQuoteChar + and self.re_match(instring, loc) + or None + ) + if not result: + raise ParseException(instring, loc, self.errmsg, self) + + loc = result.end() + ret = result.group() + + if self.unquoteResults: + + # strip off quotes + ret = ret[self.quoteCharLen : -self.endQuoteCharLen] + + if isinstance(ret, str_type): + # replace escaped whitespace + if "\\" in ret and self.convertWhitespaceEscapes: + for wslit, wschar in self.ws_map: + ret = ret.replace(wslit, wschar) + + # replace escaped characters + if self.escChar: + ret = re.sub(self.escCharReplacePattern, r"\g<1>", ret) + + # replace escaped quotes + if self.escQuote: + ret = ret.replace(self.escQuote, self.endQuoteChar) + + return loc, ret + + +class CharsNotIn(Token): + """Token for matching words composed of characters *not* in a given + set (will include whitespace in matched characters if not listed in + the provided exclusion set - see example). Defined with string + containing all disallowed characters, and an optional minimum, + maximum, and/or exact length. The default value for ``min`` is + 1 (a minimum value < 1 is not valid); the default values for + ``max`` and ``exact`` are 0, meaning no maximum or exact + length restriction. + + Example:: + + # define a comma-separated-value as anything that is not a ',' + csv_value = CharsNotIn(',') + print(delimited_list(csv_value).parse_string("dkls,lsdkjf,s12 34,@!#,213")) + + prints:: + + ['dkls', 'lsdkjf', 's12 34', '@!#', '213'] + """ + + def __init__( + self, + not_chars: str = "", + min: int = 1, + max: int = 0, + exact: int = 0, + *, + notChars: str = "", + ): + super().__init__() + self.skipWhitespace = False + self.notChars = not_chars or notChars + self.notCharsSet = set(self.notChars) + + if min < 1: + raise ValueError( + "cannot specify a minimum length < 1; use " + "Opt(CharsNotIn()) if zero-length char group is permitted" + ) + + self.minLen = min + + if max > 0: + self.maxLen = max + else: + self.maxLen = _MAX_INT + + if exact > 0: + self.maxLen = exact + self.minLen = exact + + self.errmsg = "Expected " + self.name + self.mayReturnEmpty = self.minLen == 0 + self.mayIndexError = False + + def _generateDefaultName(self): + not_chars_str = _collapse_string_to_ranges(self.notChars) + if len(not_chars_str) > 16: + return "!W:({}...)".format(self.notChars[: 16 - 3]) + else: + return "!W:({})".format(self.notChars) + + def parseImpl(self, instring, loc, doActions=True): + notchars = self.notCharsSet + if instring[loc] in notchars: + raise ParseException(instring, loc, self.errmsg, self) + + start = loc + loc += 1 + maxlen = min(start + self.maxLen, len(instring)) + while loc < maxlen and instring[loc] not in notchars: + loc += 1 + + if loc - start < self.minLen: + raise ParseException(instring, loc, self.errmsg, self) + + return loc, instring[start:loc] + + +class White(Token): + """Special matching class for matching whitespace. Normally, + whitespace is ignored by pyparsing grammars. This class is included + when some whitespace structures are significant. Define with + a string containing the whitespace characters to be matched; default + is ``" \\t\\r\\n"``. Also takes optional ``min``, + ``max``, and ``exact`` arguments, as defined for the + :class:`Word` class. + """ + + whiteStrs = { + " ": "", + "\t": "", + "\n": "", + "\r": "", + "\f": "", + "\u00A0": "", + "\u1680": "", + "\u180E": "", + "\u2000": "", + "\u2001": "", + "\u2002": "", + "\u2003": "", + "\u2004": "", + "\u2005": "", + "\u2006": "", + "\u2007": "", + "\u2008": "", + "\u2009": "", + "\u200A": "", + "\u200B": "", + "\u202F": "", + "\u205F": "", + "\u3000": "", + } + + def __init__(self, ws: str = " \t\r\n", min: int = 1, max: int = 0, exact: int = 0): + super().__init__() + self.matchWhite = ws + self.set_whitespace_chars( + "".join(c for c in self.whiteStrs if c not in self.matchWhite), + copy_defaults=True, + ) + # self.leave_whitespace() + self.mayReturnEmpty = True + self.errmsg = "Expected " + self.name + + self.minLen = min + + if max > 0: + self.maxLen = max + else: + self.maxLen = _MAX_INT + + if exact > 0: + self.maxLen = exact + self.minLen = exact + + def _generateDefaultName(self): + return "".join(White.whiteStrs[c] for c in self.matchWhite) + + def parseImpl(self, instring, loc, doActions=True): + if instring[loc] not in self.matchWhite: + raise ParseException(instring, loc, self.errmsg, self) + start = loc + loc += 1 + maxloc = start + self.maxLen + maxloc = min(maxloc, len(instring)) + while loc < maxloc and instring[loc] in self.matchWhite: + loc += 1 + + if loc - start < self.minLen: + raise ParseException(instring, loc, self.errmsg, self) + + return loc, instring[start:loc] + + +class PositionToken(Token): + def __init__(self): + super().__init__() + self.mayReturnEmpty = True + self.mayIndexError = False + + +class GoToColumn(PositionToken): + """Token to advance to a specific column of input text; useful for + tabular report scraping. + """ + + def __init__(self, colno: int): + super().__init__() + self.col = colno + + def preParse(self, instring, loc): + if col(loc, instring) != self.col: + instrlen = len(instring) + if self.ignoreExprs: + loc = self._skipIgnorables(instring, loc) + while ( + loc < instrlen + and instring[loc].isspace() + and col(loc, instring) != self.col + ): + loc += 1 + return loc + + def parseImpl(self, instring, loc, doActions=True): + thiscol = col(loc, instring) + if thiscol > self.col: + raise ParseException(instring, loc, "Text not in expected column", self) + newloc = loc + self.col - thiscol + ret = instring[loc:newloc] + return newloc, ret + + +class LineStart(PositionToken): + r"""Matches if current position is at the beginning of a line within + the parse string + + Example:: + + test = '''\ + AAA this line + AAA and this line + AAA but not this one + B AAA and definitely not this one + ''' + + for t in (LineStart() + 'AAA' + restOfLine).search_string(test): + print(t) + + prints:: + + ['AAA', ' this line'] + ['AAA', ' and this line'] + + """ + + def __init__(self): + super().__init__() + self.leave_whitespace() + self.orig_whiteChars = set() | self.whiteChars + self.whiteChars.discard("\n") + self.skipper = Empty().set_whitespace_chars(self.whiteChars) + self.errmsg = "Expected start of line" + + def preParse(self, instring, loc): + if loc == 0: + return loc + else: + ret = self.skipper.preParse(instring, loc) + if "\n" in self.orig_whiteChars: + while instring[ret : ret + 1] == "\n": + ret = self.skipper.preParse(instring, ret + 1) + return ret + + def parseImpl(self, instring, loc, doActions=True): + if col(loc, instring) == 1: + return loc, [] + raise ParseException(instring, loc, self.errmsg, self) + + +class LineEnd(PositionToken): + """Matches if current position is at the end of a line within the + parse string + """ + + def __init__(self): + super().__init__() + self.whiteChars.discard("\n") + self.set_whitespace_chars(self.whiteChars, copy_defaults=False) + self.errmsg = "Expected end of line" + + def parseImpl(self, instring, loc, doActions=True): + if loc < len(instring): + if instring[loc] == "\n": + return loc + 1, "\n" + else: + raise ParseException(instring, loc, self.errmsg, self) + elif loc == len(instring): + return loc + 1, [] + else: + raise ParseException(instring, loc, self.errmsg, self) + + +class StringStart(PositionToken): + """Matches if current position is at the beginning of the parse + string + """ + + def __init__(self): + super().__init__() + self.errmsg = "Expected start of text" + + def parseImpl(self, instring, loc, doActions=True): + if loc != 0: + # see if entire string up to here is just whitespace and ignoreables + if loc != self.preParse(instring, 0): + raise ParseException(instring, loc, self.errmsg, self) + return loc, [] + + +class StringEnd(PositionToken): + """ + Matches if current position is at the end of the parse string + """ + + def __init__(self): + super().__init__() + self.errmsg = "Expected end of text" + + def parseImpl(self, instring, loc, doActions=True): + if loc < len(instring): + raise ParseException(instring, loc, self.errmsg, self) + elif loc == len(instring): + return loc + 1, [] + elif loc > len(instring): + return loc, [] + else: + raise ParseException(instring, loc, self.errmsg, self) + + +class WordStart(PositionToken): + """Matches if the current position is at the beginning of a + :class:`Word`, and is not preceded by any character in a given + set of ``word_chars`` (default= ``printables``). To emulate the + ``\b`` behavior of regular expressions, use + ``WordStart(alphanums)``. ``WordStart`` will also match at + the beginning of the string being parsed, or at the beginning of + a line. + """ + + def __init__(self, word_chars: str = printables, *, wordChars: str = printables): + wordChars = word_chars if wordChars == printables else wordChars + super().__init__() + self.wordChars = set(wordChars) + self.errmsg = "Not at the start of a word" + + def parseImpl(self, instring, loc, doActions=True): + if loc != 0: + if ( + instring[loc - 1] in self.wordChars + or instring[loc] not in self.wordChars + ): + raise ParseException(instring, loc, self.errmsg, self) + return loc, [] + + +class WordEnd(PositionToken): + """Matches if the current position is at the end of a :class:`Word`, + and is not followed by any character in a given set of ``word_chars`` + (default= ``printables``). To emulate the ``\b`` behavior of + regular expressions, use ``WordEnd(alphanums)``. ``WordEnd`` + will also match at the end of the string being parsed, or at the end + of a line. + """ + + def __init__(self, word_chars: str = printables, *, wordChars: str = printables): + wordChars = word_chars if wordChars == printables else wordChars + super().__init__() + self.wordChars = set(wordChars) + self.skipWhitespace = False + self.errmsg = "Not at the end of a word" + + def parseImpl(self, instring, loc, doActions=True): + instrlen = len(instring) + if instrlen > 0 and loc < instrlen: + if ( + instring[loc] in self.wordChars + or instring[loc - 1] not in self.wordChars + ): + raise ParseException(instring, loc, self.errmsg, self) + return loc, [] + + +class ParseExpression(ParserElement): + """Abstract subclass of ParserElement, for combining and + post-processing parsed tokens. + """ + + def __init__(self, exprs: IterableType[ParserElement], savelist: bool = False): + super().__init__(savelist) + self.exprs: List[ParserElement] + if isinstance(exprs, _generatorType): + exprs = list(exprs) + + if isinstance(exprs, str_type): + self.exprs = [self._literalStringClass(exprs)] + elif isinstance(exprs, ParserElement): + self.exprs = [exprs] + elif isinstance(exprs, Iterable): + exprs = list(exprs) + # if sequence of strings provided, wrap with Literal + if any(isinstance(expr, str_type) for expr in exprs): + exprs = ( + self._literalStringClass(e) if isinstance(e, str_type) else e + for e in exprs + ) + self.exprs = list(exprs) + else: + try: + self.exprs = list(exprs) + except TypeError: + self.exprs = [exprs] + self.callPreparse = False + + def recurse(self) -> Sequence[ParserElement]: + return self.exprs[:] + + def append(self, other) -> ParserElement: + self.exprs.append(other) + self._defaultName = None + return self + + def leave_whitespace(self, recursive: bool = True) -> ParserElement: + """ + Extends ``leave_whitespace`` defined in base class, and also invokes ``leave_whitespace`` on + all contained expressions. + """ + super().leave_whitespace(recursive) + + if recursive: + self.exprs = [e.copy() for e in self.exprs] + for e in self.exprs: + e.leave_whitespace(recursive) + return self + + def ignore_whitespace(self, recursive: bool = True) -> ParserElement: + """ + Extends ``ignore_whitespace`` defined in base class, and also invokes ``leave_whitespace`` on + all contained expressions. + """ + super().ignore_whitespace(recursive) + if recursive: + self.exprs = [e.copy() for e in self.exprs] + for e in self.exprs: + e.ignore_whitespace(recursive) + return self + + def ignore(self, other) -> ParserElement: + if isinstance(other, Suppress): + if other not in self.ignoreExprs: + super().ignore(other) + for e in self.exprs: + e.ignore(self.ignoreExprs[-1]) + else: + super().ignore(other) + for e in self.exprs: + e.ignore(self.ignoreExprs[-1]) + return self + + def _generateDefaultName(self): + return "{}:({})".format(self.__class__.__name__, str(self.exprs)) + + def streamline(self) -> ParserElement: + if self.streamlined: + return self + + super().streamline() + + for e in self.exprs: + e.streamline() + + # collapse nested :class:`And`'s of the form ``And(And(And(a, b), c), d)`` to ``And(a, b, c, d)`` + # but only if there are no parse actions or resultsNames on the nested And's + # (likewise for :class:`Or`'s and :class:`MatchFirst`'s) + if len(self.exprs) == 2: + other = self.exprs[0] + if ( + isinstance(other, self.__class__) + and not other.parseAction + and other.resultsName is None + and not other.debug + ): + self.exprs = other.exprs[:] + [self.exprs[1]] + self._defaultName = None + self.mayReturnEmpty |= other.mayReturnEmpty + self.mayIndexError |= other.mayIndexError + + other = self.exprs[-1] + if ( + isinstance(other, self.__class__) + and not other.parseAction + and other.resultsName is None + and not other.debug + ): + self.exprs = self.exprs[:-1] + other.exprs[:] + self._defaultName = None + self.mayReturnEmpty |= other.mayReturnEmpty + self.mayIndexError |= other.mayIndexError + + self.errmsg = "Expected " + str(self) + + return self + + def validate(self, validateTrace=None) -> None: + tmp = (validateTrace if validateTrace is not None else [])[:] + [self] + for e in self.exprs: + e.validate(tmp) + self._checkRecursion([]) + + def copy(self) -> ParserElement: + ret = super().copy() + ret.exprs = [e.copy() for e in self.exprs] + return ret + + def _setResultsName(self, name, listAllMatches=False): + if ( + __diag__.warn_ungrouped_named_tokens_in_collection + and Diagnostics.warn_ungrouped_named_tokens_in_collection + not in self.suppress_warnings_ + ): + for e in self.exprs: + if ( + isinstance(e, ParserElement) + and e.resultsName + and Diagnostics.warn_ungrouped_named_tokens_in_collection + not in e.suppress_warnings_ + ): + warnings.warn( + "{}: setting results name {!r} on {} expression " + "collides with {!r} on contained expression".format( + "warn_ungrouped_named_tokens_in_collection", + name, + type(self).__name__, + e.resultsName, + ), + stacklevel=3, + ) + + return super()._setResultsName(name, listAllMatches) + + ignoreWhitespace = ignore_whitespace + leaveWhitespace = leave_whitespace + + +class And(ParseExpression): + """ + Requires all given :class:`ParseExpression` s to be found in the given order. + Expressions may be separated by whitespace. + May be constructed using the ``'+'`` operator. + May also be constructed using the ``'-'`` operator, which will + suppress backtracking. + + Example:: + + integer = Word(nums) + name_expr = OneOrMore(Word(alphas)) + + expr = And([integer("id"), name_expr("name"), integer("age")]) + # more easily written as: + expr = integer("id") + name_expr("name") + integer("age") + """ + + class _ErrorStop(Empty): + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + self.leave_whitespace() + + def _generateDefaultName(self): + return "-" + + def __init__(self, exprs_arg: IterableType[ParserElement], savelist: bool = True): + exprs: List[ParserElement] = list(exprs_arg) + if exprs and Ellipsis in exprs: + tmp = [] + for i, expr in enumerate(exprs): + if expr is Ellipsis: + if i < len(exprs) - 1: + skipto_arg: ParserElement = (Empty() + exprs[i + 1]).exprs[-1] + tmp.append(SkipTo(skipto_arg)("_skipped*")) + else: + raise Exception( + "cannot construct And with sequence ending in ..." + ) + else: + tmp.append(expr) + exprs[:] = tmp + super().__init__(exprs, savelist) + if self.exprs: + self.mayReturnEmpty = all(e.mayReturnEmpty for e in self.exprs) + if not isinstance(self.exprs[0], White): + self.set_whitespace_chars( + self.exprs[0].whiteChars, + copy_defaults=self.exprs[0].copyDefaultWhiteChars, + ) + self.skipWhitespace = self.exprs[0].skipWhitespace + else: + self.skipWhitespace = False + else: + self.mayReturnEmpty = True + self.callPreparse = True + + def streamline(self) -> ParserElement: + # collapse any _PendingSkip's + if self.exprs: + if any( + isinstance(e, ParseExpression) + and e.exprs + and isinstance(e.exprs[-1], _PendingSkip) + for e in self.exprs[:-1] + ): + for i, e in enumerate(self.exprs[:-1]): + if e is None: + continue + if ( + isinstance(e, ParseExpression) + and e.exprs + and isinstance(e.exprs[-1], _PendingSkip) + ): + e.exprs[-1] = e.exprs[-1] + self.exprs[i + 1] + self.exprs[i + 1] = None + self.exprs = [e for e in self.exprs if e is not None] + + super().streamline() + + # link any IndentedBlocks to the prior expression + for prev, cur in zip(self.exprs, self.exprs[1:]): + # traverse cur or any first embedded expr of cur looking for an IndentedBlock + # (but watch out for recursive grammar) + seen = set() + while cur: + if id(cur) in seen: + break + seen.add(id(cur)) + if isinstance(cur, IndentedBlock): + prev.add_parse_action( + lambda s, l, t, cur_=cur: setattr(cur_, "parent_anchor", col(l, s)) + ) + break + subs = cur.recurse() + cur = next(iter(subs), None) + + self.mayReturnEmpty = all(e.mayReturnEmpty for e in self.exprs) + return self + + def parseImpl(self, instring, loc, doActions=True): + # pass False as callPreParse arg to _parse for first element, since we already + # pre-parsed the string as part of our And pre-parsing + loc, resultlist = self.exprs[0]._parse( + instring, loc, doActions, callPreParse=False + ) + errorStop = False + for e in self.exprs[1:]: + # if isinstance(e, And._ErrorStop): + if type(e) is And._ErrorStop: + errorStop = True + continue + if errorStop: + try: + loc, exprtokens = e._parse(instring, loc, doActions) + except ParseSyntaxException: + raise + except ParseBaseException as pe: + pe.__traceback__ = None + raise ParseSyntaxException._from_exception(pe) + except IndexError: + raise ParseSyntaxException( + instring, len(instring), self.errmsg, self + ) + else: + loc, exprtokens = e._parse(instring, loc, doActions) + if exprtokens or exprtokens.haskeys(): + resultlist += exprtokens + return loc, resultlist + + def __iadd__(self, other): + if isinstance(other, str_type): + other = self._literalStringClass(other) + return self.append(other) # And([self, other]) + + def _checkRecursion(self, parseElementList): + subRecCheckList = parseElementList[:] + [self] + for e in self.exprs: + e._checkRecursion(subRecCheckList) + if not e.mayReturnEmpty: + break + + def _generateDefaultName(self): + inner = " ".join(str(e) for e in self.exprs) + # strip off redundant inner {}'s + while len(inner) > 1 and inner[0 :: len(inner) - 1] == "{}": + inner = inner[1:-1] + return "{" + inner + "}" + + +class Or(ParseExpression): + """Requires that at least one :class:`ParseExpression` is found. If + two expressions match, the expression that matches the longest + string will be used. May be constructed using the ``'^'`` + operator. + + Example:: + + # construct Or using '^' operator + + number = Word(nums) ^ Combine(Word(nums) + '.' + Word(nums)) + print(number.search_string("123 3.1416 789")) + + prints:: + + [['123'], ['3.1416'], ['789']] + """ + + def __init__(self, exprs: IterableType[ParserElement], savelist: bool = False): + super().__init__(exprs, savelist) + if self.exprs: + self.mayReturnEmpty = any(e.mayReturnEmpty for e in self.exprs) + self.skipWhitespace = all(e.skipWhitespace for e in self.exprs) + else: + self.mayReturnEmpty = True + + def streamline(self) -> ParserElement: + super().streamline() + if self.exprs: + self.mayReturnEmpty = any(e.mayReturnEmpty for e in self.exprs) + self.saveAsList = any(e.saveAsList for e in self.exprs) + self.skipWhitespace = all( + e.skipWhitespace and not isinstance(e, White) for e in self.exprs + ) + else: + self.saveAsList = False + return self + + def parseImpl(self, instring, loc, doActions=True): + maxExcLoc = -1 + maxException = None + matches = [] + fatals = [] + if all(e.callPreparse for e in self.exprs): + loc = self.preParse(instring, loc) + for e in self.exprs: + try: + loc2 = e.try_parse(instring, loc, raise_fatal=True) + except ParseFatalException as pfe: + pfe.__traceback__ = None + pfe.parserElement = e + fatals.append(pfe) + maxException = None + maxExcLoc = -1 + except ParseException as err: + if not fatals: + err.__traceback__ = None + if err.loc > maxExcLoc: + maxException = err + maxExcLoc = err.loc + except IndexError: + if len(instring) > maxExcLoc: + maxException = ParseException( + instring, len(instring), e.errmsg, self + ) + maxExcLoc = len(instring) + else: + # save match among all matches, to retry longest to shortest + matches.append((loc2, e)) + + if matches: + # re-evaluate all matches in descending order of length of match, in case attached actions + # might change whether or how much they match of the input. + matches.sort(key=itemgetter(0), reverse=True) + + if not doActions: + # no further conditions or parse actions to change the selection of + # alternative, so the first match will be the best match + best_expr = matches[0][1] + return best_expr._parse(instring, loc, doActions) + + longest = -1, None + for loc1, expr1 in matches: + if loc1 <= longest[0]: + # already have a longer match than this one will deliver, we are done + return longest + + try: + loc2, toks = expr1._parse(instring, loc, doActions) + except ParseException as err: + err.__traceback__ = None + if err.loc > maxExcLoc: + maxException = err + maxExcLoc = err.loc + else: + if loc2 >= loc1: + return loc2, toks + # didn't match as much as before + elif loc2 > longest[0]: + longest = loc2, toks + + if longest != (-1, None): + return longest + + if fatals: + if len(fatals) > 1: + fatals.sort(key=lambda e: -e.loc) + if fatals[0].loc == fatals[1].loc: + fatals.sort(key=lambda e: (-e.loc, -len(str(e.parserElement)))) + max_fatal = fatals[0] + raise max_fatal + + if maxException is not None: + maxException.msg = self.errmsg + raise maxException + else: + raise ParseException( + instring, loc, "no defined alternatives to match", self + ) + + def __ixor__(self, other): + if isinstance(other, str_type): + other = self._literalStringClass(other) + return self.append(other) # Or([self, other]) + + def _generateDefaultName(self): + return "{" + " ^ ".join(str(e) for e in self.exprs) + "}" + + def _setResultsName(self, name, listAllMatches=False): + if ( + __diag__.warn_multiple_tokens_in_named_alternation + and Diagnostics.warn_multiple_tokens_in_named_alternation + not in self.suppress_warnings_ + ): + if any( + isinstance(e, And) + and Diagnostics.warn_multiple_tokens_in_named_alternation + not in e.suppress_warnings_ + for e in self.exprs + ): + warnings.warn( + "{}: setting results name {!r} on {} expression " + "will return a list of all parsed tokens in an And alternative, " + "in prior versions only the first token was returned; enclose " + "contained argument in Group".format( + "warn_multiple_tokens_in_named_alternation", + name, + type(self).__name__, + ), + stacklevel=3, + ) + + return super()._setResultsName(name, listAllMatches) + + +class MatchFirst(ParseExpression): + """Requires that at least one :class:`ParseExpression` is found. If + more than one expression matches, the first one listed is the one that will + match. May be constructed using the ``'|'`` operator. + + Example:: + + # construct MatchFirst using '|' operator + + # watch the order of expressions to match + number = Word(nums) | Combine(Word(nums) + '.' + Word(nums)) + print(number.search_string("123 3.1416 789")) # Fail! -> [['123'], ['3'], ['1416'], ['789']] + + # put more selective expression first + number = Combine(Word(nums) + '.' + Word(nums)) | Word(nums) + print(number.search_string("123 3.1416 789")) # Better -> [['123'], ['3.1416'], ['789']] + """ + + def __init__(self, exprs: IterableType[ParserElement], savelist: bool = False): + super().__init__(exprs, savelist) + if self.exprs: + self.mayReturnEmpty = any(e.mayReturnEmpty for e in self.exprs) + self.skipWhitespace = all(e.skipWhitespace for e in self.exprs) + else: + self.mayReturnEmpty = True + + def streamline(self) -> ParserElement: + if self.streamlined: + return self + + super().streamline() + if self.exprs: + self.saveAsList = any(e.saveAsList for e in self.exprs) + self.mayReturnEmpty = any(e.mayReturnEmpty for e in self.exprs) + self.skipWhitespace = all( + e.skipWhitespace and not isinstance(e, White) for e in self.exprs + ) + else: + self.saveAsList = False + self.mayReturnEmpty = True + return self + + def parseImpl(self, instring, loc, doActions=True): + maxExcLoc = -1 + maxException = None + + for e in self.exprs: + try: + return e._parse( + instring, + loc, + doActions, + ) + except ParseFatalException as pfe: + pfe.__traceback__ = None + pfe.parserElement = e + raise + except ParseException as err: + if err.loc > maxExcLoc: + maxException = err + maxExcLoc = err.loc + except IndexError: + if len(instring) > maxExcLoc: + maxException = ParseException( + instring, len(instring), e.errmsg, self + ) + maxExcLoc = len(instring) + + if maxException is not None: + maxException.msg = self.errmsg + raise maxException + else: + raise ParseException( + instring, loc, "no defined alternatives to match", self + ) + + def __ior__(self, other): + if isinstance(other, str_type): + other = self._literalStringClass(other) + return self.append(other) # MatchFirst([self, other]) + + def _generateDefaultName(self): + return "{" + " | ".join(str(e) for e in self.exprs) + "}" + + def _setResultsName(self, name, listAllMatches=False): + if ( + __diag__.warn_multiple_tokens_in_named_alternation + and Diagnostics.warn_multiple_tokens_in_named_alternation + not in self.suppress_warnings_ + ): + if any( + isinstance(e, And) + and Diagnostics.warn_multiple_tokens_in_named_alternation + not in e.suppress_warnings_ + for e in self.exprs + ): + warnings.warn( + "{}: setting results name {!r} on {} expression " + "will return a list of all parsed tokens in an And alternative, " + "in prior versions only the first token was returned; enclose " + "contained argument in Group".format( + "warn_multiple_tokens_in_named_alternation", + name, + type(self).__name__, + ), + stacklevel=3, + ) + + return super()._setResultsName(name, listAllMatches) + + +class Each(ParseExpression): + """Requires all given :class:`ParseExpression` s to be found, but in + any order. Expressions may be separated by whitespace. + + May be constructed using the ``'&'`` operator. + + Example:: + + color = one_of("RED ORANGE YELLOW GREEN BLUE PURPLE BLACK WHITE BROWN") + shape_type = one_of("SQUARE CIRCLE TRIANGLE STAR HEXAGON OCTAGON") + integer = Word(nums) + shape_attr = "shape:" + shape_type("shape") + posn_attr = "posn:" + Group(integer("x") + ',' + integer("y"))("posn") + color_attr = "color:" + color("color") + size_attr = "size:" + integer("size") + + # use Each (using operator '&') to accept attributes in any order + # (shape and posn are required, color and size are optional) + shape_spec = shape_attr & posn_attr & Opt(color_attr) & Opt(size_attr) + + shape_spec.run_tests(''' + shape: SQUARE color: BLACK posn: 100, 120 + shape: CIRCLE size: 50 color: BLUE posn: 50,80 + color:GREEN size:20 shape:TRIANGLE posn:20,40 + ''' + ) + + prints:: + + shape: SQUARE color: BLACK posn: 100, 120 + ['shape:', 'SQUARE', 'color:', 'BLACK', 'posn:', ['100', ',', '120']] + - color: BLACK + - posn: ['100', ',', '120'] + - x: 100 + - y: 120 + - shape: SQUARE + + + shape: CIRCLE size: 50 color: BLUE posn: 50,80 + ['shape:', 'CIRCLE', 'size:', '50', 'color:', 'BLUE', 'posn:', ['50', ',', '80']] + - color: BLUE + - posn: ['50', ',', '80'] + - x: 50 + - y: 80 + - shape: CIRCLE + - size: 50 + + + color: GREEN size: 20 shape: TRIANGLE posn: 20,40 + ['color:', 'GREEN', 'size:', '20', 'shape:', 'TRIANGLE', 'posn:', ['20', ',', '40']] + - color: GREEN + - posn: ['20', ',', '40'] + - x: 20 + - y: 40 + - shape: TRIANGLE + - size: 20 + """ + + def __init__(self, exprs: IterableType[ParserElement], savelist: bool = True): + super().__init__(exprs, savelist) + if self.exprs: + self.mayReturnEmpty = all(e.mayReturnEmpty for e in self.exprs) + else: + self.mayReturnEmpty = True + self.skipWhitespace = True + self.initExprGroups = True + self.saveAsList = True + + def streamline(self) -> ParserElement: + super().streamline() + if self.exprs: + self.mayReturnEmpty = all(e.mayReturnEmpty for e in self.exprs) + else: + self.mayReturnEmpty = True + return self + + def parseImpl(self, instring, loc, doActions=True): + if self.initExprGroups: + self.opt1map = dict( + (id(e.expr), e) for e in self.exprs if isinstance(e, Opt) + ) + opt1 = [e.expr for e in self.exprs if isinstance(e, Opt)] + opt2 = [ + e + for e in self.exprs + if e.mayReturnEmpty and not isinstance(e, (Opt, Regex, ZeroOrMore)) + ] + self.optionals = opt1 + opt2 + self.multioptionals = [ + e.expr.set_results_name(e.resultsName, list_all_matches=True) + for e in self.exprs + if isinstance(e, _MultipleMatch) + ] + self.multirequired = [ + e.expr.set_results_name(e.resultsName, list_all_matches=True) + for e in self.exprs + if isinstance(e, OneOrMore) + ] + self.required = [ + e for e in self.exprs if not isinstance(e, (Opt, ZeroOrMore, OneOrMore)) + ] + self.required += self.multirequired + self.initExprGroups = False + + tmpLoc = loc + tmpReqd = self.required[:] + tmpOpt = self.optionals[:] + multis = self.multioptionals[:] + matchOrder = [] + + keepMatching = True + failed = [] + fatals = [] + while keepMatching: + tmpExprs = tmpReqd + tmpOpt + multis + failed.clear() + fatals.clear() + for e in tmpExprs: + try: + tmpLoc = e.try_parse(instring, tmpLoc, raise_fatal=True) + except ParseFatalException as pfe: + pfe.__traceback__ = None + pfe.parserElement = e + fatals.append(pfe) + failed.append(e) + except ParseException: + failed.append(e) + else: + matchOrder.append(self.opt1map.get(id(e), e)) + if e in tmpReqd: + tmpReqd.remove(e) + elif e in tmpOpt: + tmpOpt.remove(e) + if len(failed) == len(tmpExprs): + keepMatching = False + + # look for any ParseFatalExceptions + if fatals: + if len(fatals) > 1: + fatals.sort(key=lambda e: -e.loc) + if fatals[0].loc == fatals[1].loc: + fatals.sort(key=lambda e: (-e.loc, -len(str(e.parserElement)))) + max_fatal = fatals[0] + raise max_fatal + + if tmpReqd: + missing = ", ".join([str(e) for e in tmpReqd]) + raise ParseException( + instring, + loc, + "Missing one or more required elements ({})".format(missing), + ) + + # add any unmatched Opts, in case they have default values defined + matchOrder += [e for e in self.exprs if isinstance(e, Opt) and e.expr in tmpOpt] + + total_results = ParseResults([]) + for e in matchOrder: + loc, results = e._parse(instring, loc, doActions) + total_results += results + + return loc, total_results + + def _generateDefaultName(self): + return "{" + " & ".join(str(e) for e in self.exprs) + "}" + + +class ParseElementEnhance(ParserElement): + """Abstract subclass of :class:`ParserElement`, for combining and + post-processing parsed tokens. + """ + + def __init__(self, expr: Union[ParserElement, str], savelist: bool = False): + super().__init__(savelist) + if isinstance(expr, str_type): + if issubclass(self._literalStringClass, Token): + expr = self._literalStringClass(expr) + elif issubclass(type(self), self._literalStringClass): + expr = Literal(expr) + else: + expr = self._literalStringClass(Literal(expr)) + self.expr = expr + if expr is not None: + self.mayIndexError = expr.mayIndexError + self.mayReturnEmpty = expr.mayReturnEmpty + self.set_whitespace_chars( + expr.whiteChars, copy_defaults=expr.copyDefaultWhiteChars + ) + self.skipWhitespace = expr.skipWhitespace + self.saveAsList = expr.saveAsList + self.callPreparse = expr.callPreparse + self.ignoreExprs.extend(expr.ignoreExprs) + + def recurse(self) -> Sequence[ParserElement]: + return [self.expr] if self.expr is not None else [] + + def parseImpl(self, instring, loc, doActions=True): + if self.expr is not None: + return self.expr._parse(instring, loc, doActions, callPreParse=False) + else: + raise ParseException(instring, loc, "No expression defined", self) + + def leave_whitespace(self, recursive: bool = True) -> ParserElement: + super().leave_whitespace(recursive) + + if recursive: + self.expr = self.expr.copy() + if self.expr is not None: + self.expr.leave_whitespace(recursive) + return self + + def ignore_whitespace(self, recursive: bool = True) -> ParserElement: + super().ignore_whitespace(recursive) + + if recursive: + self.expr = self.expr.copy() + if self.expr is not None: + self.expr.ignore_whitespace(recursive) + return self + + def ignore(self, other) -> ParserElement: + if isinstance(other, Suppress): + if other not in self.ignoreExprs: + super().ignore(other) + if self.expr is not None: + self.expr.ignore(self.ignoreExprs[-1]) + else: + super().ignore(other) + if self.expr is not None: + self.expr.ignore(self.ignoreExprs[-1]) + return self + + def streamline(self) -> ParserElement: + super().streamline() + if self.expr is not None: + self.expr.streamline() + return self + + def _checkRecursion(self, parseElementList): + if self in parseElementList: + raise RecursiveGrammarException(parseElementList + [self]) + subRecCheckList = parseElementList[:] + [self] + if self.expr is not None: + self.expr._checkRecursion(subRecCheckList) + + def validate(self, validateTrace=None) -> None: + if validateTrace is None: + validateTrace = [] + tmp = validateTrace[:] + [self] + if self.expr is not None: + self.expr.validate(tmp) + self._checkRecursion([]) + + def _generateDefaultName(self): + return "{}:({})".format(self.__class__.__name__, str(self.expr)) + + ignoreWhitespace = ignore_whitespace + leaveWhitespace = leave_whitespace + + +class IndentedBlock(ParseElementEnhance): + """ + Expression to match one or more expressions at a given indentation level. + Useful for parsing text where structure is implied by indentation (like Python source code). + """ + + class _Indent(Empty): + def __init__(self, ref_col: int): + super().__init__() + self.errmsg = "expected indent at column {}".format(ref_col) + self.add_condition(lambda s, l, t: col(l, s) == ref_col) + + class _IndentGreater(Empty): + def __init__(self, ref_col: int): + super().__init__() + self.errmsg = "expected indent at column greater than {}".format(ref_col) + self.add_condition(lambda s, l, t: col(l, s) > ref_col) + + def __init__( + self, expr: ParserElement, *, recursive: bool = False, grouped: bool = True + ): + super().__init__(expr, savelist=True) + # if recursive: + # raise NotImplementedError("IndentedBlock with recursive is not implemented") + self._recursive = recursive + self._grouped = grouped + self.parent_anchor = 1 + + def parseImpl(self, instring, loc, doActions=True): + # advance parse position to non-whitespace by using an Empty() + # this should be the column to be used for all subsequent indented lines + anchor_loc = Empty().preParse(instring, loc) + + # see if self.expr matches at the current location - if not it will raise an exception + # and no further work is necessary + self.expr.try_parse(instring, anchor_loc, doActions) + + indent_col = col(anchor_loc, instring) + peer_detect_expr = self._Indent(indent_col) + + inner_expr = Empty() + peer_detect_expr + self.expr + if self._recursive: + sub_indent = self._IndentGreater(indent_col) + nested_block = IndentedBlock( + self.expr, recursive=self._recursive, grouped=self._grouped + ) + nested_block.set_debug(self.debug) + nested_block.parent_anchor = indent_col + inner_expr += Opt(sub_indent + nested_block) + + inner_expr.set_name(f"inner {hex(id(inner_expr))[-4:].upper()}@{indent_col}") + block = OneOrMore(inner_expr) + + trailing_undent = self._Indent(self.parent_anchor) | StringEnd() + + if self._grouped: + wrapper = Group + else: + wrapper = lambda expr: expr + return (wrapper(block) + Optional(trailing_undent)).parseImpl( + instring, anchor_loc, doActions + ) + + +class AtStringStart(ParseElementEnhance): + """Matches if expression matches at the beginning of the parse + string:: + + AtStringStart(Word(nums)).parse_string("123") + # prints ["123"] + + AtStringStart(Word(nums)).parse_string(" 123") + # raises ParseException + """ + + def __init__(self, expr: Union[ParserElement, str]): + super().__init__(expr) + self.callPreparse = False + + def parseImpl(self, instring, loc, doActions=True): + if loc != 0: + raise ParseException(instring, loc, "not found at string start") + return super().parseImpl(instring, loc, doActions) + + +class AtLineStart(ParseElementEnhance): + r"""Matches if an expression matches at the beginning of a line within + the parse string + + Example:: + + test = '''\ + AAA this line + AAA and this line + AAA but not this one + B AAA and definitely not this one + ''' + + for t in (AtLineStart('AAA') + restOfLine).search_string(test): + print(t) + + prints:: + + ['AAA', ' this line'] + ['AAA', ' and this line'] + + """ + + def __init__(self, expr: Union[ParserElement, str]): + super().__init__(expr) + self.callPreparse = False + + def parseImpl(self, instring, loc, doActions=True): + if col(loc, instring) != 1: + raise ParseException(instring, loc, "not found at line start") + return super().parseImpl(instring, loc, doActions) + + +class FollowedBy(ParseElementEnhance): + """Lookahead matching of the given parse expression. + ``FollowedBy`` does *not* advance the parsing position within + the input string, it only verifies that the specified parse + expression matches at the current position. ``FollowedBy`` + always returns a null token list. If any results names are defined + in the lookahead expression, those *will* be returned for access by + name. + + Example:: + + # use FollowedBy to match a label only if it is followed by a ':' + data_word = Word(alphas) + label = data_word + FollowedBy(':') + attr_expr = Group(label + Suppress(':') + OneOrMore(data_word, stop_on=label).set_parse_action(' '.join)) + + OneOrMore(attr_expr).parse_string("shape: SQUARE color: BLACK posn: upper left").pprint() + + prints:: + + [['shape', 'SQUARE'], ['color', 'BLACK'], ['posn', 'upper left']] + """ + + def __init__(self, expr: Union[ParserElement, str]): + super().__init__(expr) + self.mayReturnEmpty = True + + def parseImpl(self, instring, loc, doActions=True): + # by using self._expr.parse and deleting the contents of the returned ParseResults list + # we keep any named results that were defined in the FollowedBy expression + _, ret = self.expr._parse(instring, loc, doActions=doActions) + del ret[:] + + return loc, ret + + +class PrecededBy(ParseElementEnhance): + """Lookbehind matching of the given parse expression. + ``PrecededBy`` does not advance the parsing position within the + input string, it only verifies that the specified parse expression + matches prior to the current position. ``PrecededBy`` always + returns a null token list, but if a results name is defined on the + given expression, it is returned. + + Parameters: + + - expr - expression that must match prior to the current parse + location + - retreat - (default= ``None``) - (int) maximum number of characters + to lookbehind prior to the current parse location + + If the lookbehind expression is a string, :class:`Literal`, + :class:`Keyword`, or a :class:`Word` or :class:`CharsNotIn` + with a specified exact or maximum length, then the retreat + parameter is not required. Otherwise, retreat must be specified to + give a maximum number of characters to look back from + the current parse position for a lookbehind match. + + Example:: + + # VB-style variable names with type prefixes + int_var = PrecededBy("#") + pyparsing_common.identifier + str_var = PrecededBy("$") + pyparsing_common.identifier + + """ + + def __init__( + self, expr: Union[ParserElement, str], retreat: OptionalType[int] = None + ): + super().__init__(expr) + self.expr = self.expr().leave_whitespace() + self.mayReturnEmpty = True + self.mayIndexError = False + self.exact = False + if isinstance(expr, str_type): + retreat = len(expr) + self.exact = True + elif isinstance(expr, (Literal, Keyword)): + retreat = expr.matchLen + self.exact = True + elif isinstance(expr, (Word, CharsNotIn)) and expr.maxLen != _MAX_INT: + retreat = expr.maxLen + self.exact = True + elif isinstance(expr, PositionToken): + retreat = 0 + self.exact = True + self.retreat = retreat + self.errmsg = "not preceded by " + str(expr) + self.skipWhitespace = False + self.parseAction.append(lambda s, l, t: t.__delitem__(slice(None, None))) + + def parseImpl(self, instring, loc=0, doActions=True): + if self.exact: + if loc < self.retreat: + raise ParseException(instring, loc, self.errmsg) + start = loc - self.retreat + _, ret = self.expr._parse(instring, start) + else: + # retreat specified a maximum lookbehind window, iterate + test_expr = self.expr + StringEnd() + instring_slice = instring[max(0, loc - self.retreat) : loc] + last_expr = ParseException(instring, loc, self.errmsg) + for offset in range(1, min(loc, self.retreat + 1) + 1): + try: + # print('trying', offset, instring_slice, repr(instring_slice[loc - offset:])) + _, ret = test_expr._parse( + instring_slice, len(instring_slice) - offset + ) + except ParseBaseException as pbe: + last_expr = pbe + else: + break + else: + raise last_expr + return loc, ret + + +class Located(ParseElementEnhance): + """ + Decorates a returned token with its starting and ending + locations in the input string. + + This helper adds the following results names: + + - ``locn_start`` - location where matched expression begins + - ``locn_end`` - location where matched expression ends + - ``value`` - the actual parsed results + + Be careful if the input text contains ```` characters, you + may want to call :class:`ParserElement.parse_with_tabs` + + Example:: + + wd = Word(alphas) + for match in Located(wd).search_string("ljsdf123lksdjjf123lkkjj1222"): + print(match) + + prints:: + + [0, ['ljsdf'], 5] + [8, ['lksdjjf'], 15] + [18, ['lkkjj'], 23] + + """ + + def parseImpl(self, instring, loc, doActions=True): + start = loc + loc, tokens = self.expr._parse(instring, start, doActions, callPreParse=False) + ret_tokens = ParseResults([start, tokens, loc]) + ret_tokens["locn_start"] = start + ret_tokens["value"] = tokens + ret_tokens["locn_end"] = loc + if self.resultsName: + # must return as a list, so that the name will be attached to the complete group + return loc, [ret_tokens] + else: + return loc, ret_tokens + + +class NotAny(ParseElementEnhance): + """ + Lookahead to disallow matching with the given parse expression. + ``NotAny`` does *not* advance the parsing position within the + input string, it only verifies that the specified parse expression + does *not* match at the current position. Also, ``NotAny`` does + *not* skip over leading whitespace. ``NotAny`` always returns + a null token list. May be constructed using the ``'~'`` operator. + + Example:: + + AND, OR, NOT = map(CaselessKeyword, "AND OR NOT".split()) + + # take care not to mistake keywords for identifiers + ident = ~(AND | OR | NOT) + Word(alphas) + boolean_term = Opt(NOT) + ident + + # very crude boolean expression - to support parenthesis groups and + # operation hierarchy, use infix_notation + boolean_expr = boolean_term + ZeroOrMore((AND | OR) + boolean_term) + + # integers that are followed by "." are actually floats + integer = Word(nums) + ~Char(".") + """ + + def __init__(self, expr: Union[ParserElement, str]): + super().__init__(expr) + # do NOT use self.leave_whitespace(), don't want to propagate to exprs + # self.leave_whitespace() + self.skipWhitespace = False + + self.mayReturnEmpty = True + self.errmsg = "Found unwanted token, " + str(self.expr) + + def parseImpl(self, instring, loc, doActions=True): + if self.expr.can_parse_next(instring, loc): + raise ParseException(instring, loc, self.errmsg, self) + return loc, [] + + def _generateDefaultName(self): + return "~{" + str(self.expr) + "}" + + +class _MultipleMatch(ParseElementEnhance): + def __init__( + self, + expr: ParserElement, + stop_on: OptionalType[Union[ParserElement, str]] = None, + *, + stopOn: OptionalType[Union[ParserElement, str]] = None, + ): + super().__init__(expr) + stopOn = stopOn or stop_on + self.saveAsList = True + ender = stopOn + if isinstance(ender, str_type): + ender = self._literalStringClass(ender) + self.stopOn(ender) + + def stopOn(self, ender) -> ParserElement: + if isinstance(ender, str_type): + ender = self._literalStringClass(ender) + self.not_ender = ~ender if ender is not None else None + return self + + def parseImpl(self, instring, loc, doActions=True): + self_expr_parse = self.expr._parse + self_skip_ignorables = self._skipIgnorables + check_ender = self.not_ender is not None + if check_ender: + try_not_ender = self.not_ender.tryParse + + # must be at least one (but first see if we are the stopOn sentinel; + # if so, fail) + if check_ender: + try_not_ender(instring, loc) + loc, tokens = self_expr_parse(instring, loc, doActions) + try: + hasIgnoreExprs = not not self.ignoreExprs + while 1: + if check_ender: + try_not_ender(instring, loc) + if hasIgnoreExprs: + preloc = self_skip_ignorables(instring, loc) + else: + preloc = loc + loc, tmptokens = self_expr_parse(instring, preloc, doActions) + if tmptokens or tmptokens.haskeys(): + tokens += tmptokens + except (ParseException, IndexError): + pass + + return loc, tokens + + def _setResultsName(self, name, listAllMatches=False): + if ( + __diag__.warn_ungrouped_named_tokens_in_collection + and Diagnostics.warn_ungrouped_named_tokens_in_collection + not in self.suppress_warnings_ + ): + for e in [self.expr] + self.expr.recurse(): + if ( + isinstance(e, ParserElement) + and e.resultsName + and Diagnostics.warn_ungrouped_named_tokens_in_collection + not in e.suppress_warnings_ + ): + warnings.warn( + "{}: setting results name {!r} on {} expression " + "collides with {!r} on contained expression".format( + "warn_ungrouped_named_tokens_in_collection", + name, + type(self).__name__, + e.resultsName, + ), + stacklevel=3, + ) + + return super()._setResultsName(name, listAllMatches) + + +class OneOrMore(_MultipleMatch): + """ + Repetition of one or more of the given expression. + + Parameters: + - expr - expression that must match one or more times + - stop_on - (default= ``None``) - expression for a terminating sentinel + (only required if the sentinel would ordinarily match the repetition + expression) + + Example:: + + data_word = Word(alphas) + label = data_word + FollowedBy(':') + attr_expr = Group(label + Suppress(':') + OneOrMore(data_word).set_parse_action(' '.join)) + + text = "shape: SQUARE posn: upper left color: BLACK" + OneOrMore(attr_expr).parse_string(text).pprint() # Fail! read 'color' as data instead of next label -> [['shape', 'SQUARE color']] + + # use stop_on attribute for OneOrMore to avoid reading label string as part of the data + attr_expr = Group(label + Suppress(':') + OneOrMore(data_word, stop_on=label).set_parse_action(' '.join)) + OneOrMore(attr_expr).parse_string(text).pprint() # Better -> [['shape', 'SQUARE'], ['posn', 'upper left'], ['color', 'BLACK']] + + # could also be written as + (attr_expr * (1,)).parse_string(text).pprint() + """ + + def _generateDefaultName(self): + return "{" + str(self.expr) + "}..." + + +class ZeroOrMore(_MultipleMatch): + """ + Optional repetition of zero or more of the given expression. + + Parameters: + - ``expr`` - expression that must match zero or more times + - ``stop_on`` - expression for a terminating sentinel + (only required if the sentinel would ordinarily match the repetition + expression) - (default= ``None``) + + Example: similar to :class:`OneOrMore` + """ + + def __init__( + self, + expr: ParserElement, + stop_on: OptionalType[Union[ParserElement, str]] = None, + *, + stopOn: OptionalType[Union[ParserElement, str]] = None, + ): + super().__init__(expr, stopOn=stopOn or stop_on) + self.mayReturnEmpty = True + + def parseImpl(self, instring, loc, doActions=True): + try: + return super().parseImpl(instring, loc, doActions) + except (ParseException, IndexError): + return loc, ParseResults([], name=self.resultsName) + + def _generateDefaultName(self): + return "[" + str(self.expr) + "]..." + + +class _NullToken: + def __bool__(self): + return False + + def __str__(self): + return "" + + +class Opt(ParseElementEnhance): + """ + Optional matching of the given expression. + + Parameters: + - ``expr`` - expression that must match zero or more times + - ``default`` (optional) - value to be returned if the optional expression is not found. + + Example:: + + # US postal code can be a 5-digit zip, plus optional 4-digit qualifier + zip = Combine(Word(nums, exact=5) + Opt('-' + Word(nums, exact=4))) + zip.run_tests(''' + # traditional ZIP code + 12345 + + # ZIP+4 form + 12101-0001 + + # invalid ZIP + 98765- + ''') + + prints:: + + # traditional ZIP code + 12345 + ['12345'] + + # ZIP+4 form + 12101-0001 + ['12101-0001'] + + # invalid ZIP + 98765- + ^ + FAIL: Expected end of text (at char 5), (line:1, col:6) + """ + + __optionalNotMatched = _NullToken() + + def __init__( + self, expr: Union[ParserElement, str], default: Any = __optionalNotMatched + ): + super().__init__(expr, savelist=False) + self.saveAsList = self.expr.saveAsList + self.defaultValue = default + self.mayReturnEmpty = True + + def parseImpl(self, instring, loc, doActions=True): + self_expr = self.expr + try: + loc, tokens = self_expr._parse(instring, loc, doActions, callPreParse=False) + except (ParseException, IndexError): + default_value = self.defaultValue + if default_value is not self.__optionalNotMatched: + if self_expr.resultsName: + tokens = ParseResults([default_value]) + tokens[self_expr.resultsName] = default_value + else: + tokens = [default_value] + else: + tokens = [] + return loc, tokens + + def _generateDefaultName(self): + inner = str(self.expr) + # strip off redundant inner {}'s + while len(inner) > 1 and inner[0 :: len(inner) - 1] == "{}": + inner = inner[1:-1] + return "[" + inner + "]" + + +Optional = Opt + + +class SkipTo(ParseElementEnhance): + """ + Token for skipping over all undefined text until the matched + expression is found. + + Parameters: + - ``expr`` - target expression marking the end of the data to be skipped + - ``include`` - if ``True``, the target expression is also parsed + (the skipped text and target expression are returned as a 2-element + list) (default= ``False``). + - ``ignore`` - (default= ``None``) used to define grammars (typically quoted strings and + comments) that might contain false matches to the target expression + - ``fail_on`` - (default= ``None``) define expressions that are not allowed to be + included in the skipped test; if found before the target expression is found, + the :class:`SkipTo` is not a match + + Example:: + + report = ''' + Outstanding Issues Report - 1 Jan 2000 + + # | Severity | Description | Days Open + -----+----------+-------------------------------------------+----------- + 101 | Critical | Intermittent system crash | 6 + 94 | Cosmetic | Spelling error on Login ('log|n') | 14 + 79 | Minor | System slow when running too many reports | 47 + ''' + integer = Word(nums) + SEP = Suppress('|') + # use SkipTo to simply match everything up until the next SEP + # - ignore quoted strings, so that a '|' character inside a quoted string does not match + # - parse action will call token.strip() for each matched token, i.e., the description body + string_data = SkipTo(SEP, ignore=quoted_string) + string_data.set_parse_action(token_map(str.strip)) + ticket_expr = (integer("issue_num") + SEP + + string_data("sev") + SEP + + string_data("desc") + SEP + + integer("days_open")) + + for tkt in ticket_expr.search_string(report): + print tkt.dump() + + prints:: + + ['101', 'Critical', 'Intermittent system crash', '6'] + - days_open: 6 + - desc: Intermittent system crash + - issue_num: 101 + - sev: Critical + ['94', 'Cosmetic', "Spelling error on Login ('log|n')", '14'] + - days_open: 14 + - desc: Spelling error on Login ('log|n') + - issue_num: 94 + - sev: Cosmetic + ['79', 'Minor', 'System slow when running too many reports', '47'] + - days_open: 47 + - desc: System slow when running too many reports + - issue_num: 79 + - sev: Minor + """ + + def __init__( + self, + other: Union[ParserElement, str], + include: bool = False, + ignore: bool = None, + fail_on: OptionalType[Union[ParserElement, str]] = None, + *, + failOn: Union[ParserElement, str] = None, + ): + super().__init__(other) + failOn = failOn or fail_on + self.ignoreExpr = ignore + self.mayReturnEmpty = True + self.mayIndexError = False + self.includeMatch = include + self.saveAsList = False + if isinstance(failOn, str_type): + self.failOn = self._literalStringClass(failOn) + else: + self.failOn = failOn + self.errmsg = "No match found for " + str(self.expr) + + def parseImpl(self, instring, loc, doActions=True): + startloc = loc + instrlen = len(instring) + self_expr_parse = self.expr._parse + self_failOn_canParseNext = ( + self.failOn.canParseNext if self.failOn is not None else None + ) + self_ignoreExpr_tryParse = ( + self.ignoreExpr.tryParse if self.ignoreExpr is not None else None + ) + + tmploc = loc + while tmploc <= instrlen: + if self_failOn_canParseNext is not None: + # break if failOn expression matches + if self_failOn_canParseNext(instring, tmploc): + break + + if self_ignoreExpr_tryParse is not None: + # advance past ignore expressions + while 1: + try: + tmploc = self_ignoreExpr_tryParse(instring, tmploc) + except ParseBaseException: + break + + try: + self_expr_parse(instring, tmploc, doActions=False, callPreParse=False) + except (ParseException, IndexError): + # no match, advance loc in string + tmploc += 1 + else: + # matched skipto expr, done + break + + else: + # ran off the end of the input string without matching skipto expr, fail + raise ParseException(instring, loc, self.errmsg, self) + + # build up return values + loc = tmploc + skiptext = instring[startloc:loc] + skipresult = ParseResults(skiptext) + + if self.includeMatch: + loc, mat = self_expr_parse(instring, loc, doActions, callPreParse=False) + skipresult += mat + + return loc, skipresult + + +class Forward(ParseElementEnhance): + """ + Forward declaration of an expression to be defined later - + used for recursive grammars, such as algebraic infix notation. + When the expression is known, it is assigned to the ``Forward`` + variable using the ``'<<'`` operator. + + Note: take care when assigning to ``Forward`` not to overlook + precedence of operators. + + Specifically, ``'|'`` has a lower precedence than ``'<<'``, so that:: + + fwd_expr << a | b | c + + will actually be evaluated as:: + + (fwd_expr << a) | b | c + + thereby leaving b and c out as parseable alternatives. It is recommended that you + explicitly group the values inserted into the ``Forward``:: + + fwd_expr << (a | b | c) + + Converting to use the ``'<<='`` operator instead will avoid this problem. + + See :class:`ParseResults.pprint` for an example of a recursive + parser created using ``Forward``. + """ + + def __init__(self, other: OptionalType[Union[ParserElement, str]] = None): + self.caller_frame = traceback.extract_stack(limit=2)[0] + super().__init__(other, savelist=False) + self.lshift_line = None + + def __lshift__(self, other): + if hasattr(self, "caller_frame"): + del self.caller_frame + if isinstance(other, str_type): + other = self._literalStringClass(other) + self.expr = other + self.mayIndexError = self.expr.mayIndexError + self.mayReturnEmpty = self.expr.mayReturnEmpty + self.set_whitespace_chars( + self.expr.whiteChars, copy_defaults=self.expr.copyDefaultWhiteChars + ) + self.skipWhitespace = self.expr.skipWhitespace + self.saveAsList = self.expr.saveAsList + self.ignoreExprs.extend(self.expr.ignoreExprs) + self.lshift_line = traceback.extract_stack(limit=2)[-2] + return self + + def __ilshift__(self, other): + return self << other + + def __or__(self, other): + caller_line = traceback.extract_stack(limit=2)[-2] + if ( + __diag__.warn_on_match_first_with_lshift_operator + and caller_line == self.lshift_line + and Diagnostics.warn_on_match_first_with_lshift_operator + not in self.suppress_warnings_ + ): + warnings.warn( + "using '<<' operator with '|' is probably an error, use '<<='", + stacklevel=2, + ) + ret = super().__or__(other) + return ret + + def __del__(self): + # see if we are getting dropped because of '=' reassignment of var instead of '<<=' or '<<' + if ( + self.expr is None + and __diag__.warn_on_assignment_to_Forward + and Diagnostics.warn_on_assignment_to_Forward not in self.suppress_warnings_ + ): + warnings.warn_explicit( + "Forward defined here but no expression attached later using '<<=' or '<<'", + UserWarning, + filename=self.caller_frame.filename, + lineno=self.caller_frame.lineno, + ) + + def parseImpl(self, instring, loc, doActions=True): + if ( + self.expr is None + and __diag__.warn_on_parse_using_empty_Forward + and Diagnostics.warn_on_parse_using_empty_Forward + not in self.suppress_warnings_ + ): + # walk stack until parse_string, scan_string, search_string, or transform_string is found + parse_fns = [ + "parse_string", + "scan_string", + "search_string", + "transform_string", + ] + tb = traceback.extract_stack(limit=200) + for i, frm in enumerate(reversed(tb), start=1): + if frm.name in parse_fns: + stacklevel = i + 1 + break + else: + stacklevel = 2 + warnings.warn( + "Forward expression was never assigned a value, will not parse any input", + stacklevel=stacklevel, + ) + if not ParserElement._left_recursion_enabled: + return super().parseImpl(instring, loc, doActions) + # ## Bounded Recursion algorithm ## + # Recursion only needs to be processed at ``Forward`` elements, since they are + # the only ones that can actually refer to themselves. The general idea is + # to handle recursion stepwise: We start at no recursion, then recurse once, + # recurse twice, ..., until more recursion offers no benefit (we hit the bound). + # + # The "trick" here is that each ``Forward`` gets evaluated in two contexts + # - to *match* a specific recursion level, and + # - to *search* the bounded recursion level + # and the two run concurrently. The *search* must *match* each recursion level + # to find the best possible match. This is handled by a memo table, which + # provides the previous match to the next level match attempt. + # + # See also "Left Recursion in Parsing Expression Grammars", Medeiros et al. + # + # There is a complication since we not only *parse* but also *transform* via + # actions: We do not want to run the actions too often while expanding. Thus, + # we expand using `doActions=False` and only run `doActions=True` if the next + # recursion level is acceptable. + with ParserElement.recursion_lock: + memo = ParserElement.recursion_memos + try: + # we are parsing at a specific recursion expansion - use it as-is + prev_loc, prev_result = memo[loc, self, doActions] + if isinstance(prev_result, Exception): + raise prev_result + return prev_loc, prev_result.copy() + except KeyError: + act_key = (loc, self, True) + peek_key = (loc, self, False) + # we are searching for the best recursion expansion - keep on improving + # both `doActions` cases must be tracked separately here! + prev_loc, prev_peek = memo[peek_key] = ( + loc - 1, + ParseException( + instring, loc, "Forward recursion without base case", self + ), + ) + if doActions: + memo[act_key] = memo[peek_key] + while True: + try: + new_loc, new_peek = super().parseImpl(instring, loc, False) + except ParseException: + # we failed before getting any match – do not hide the error + if isinstance(prev_peek, Exception): + raise + new_loc, new_peek = prev_loc, prev_peek + # the match did not get better: we are done + if new_loc <= prev_loc: + if doActions: + # replace the match for doActions=False as well, + # in case the action did backtrack + prev_loc, prev_result = memo[peek_key] = memo[act_key] + del memo[peek_key], memo[act_key] + return prev_loc, prev_result.copy() + del memo[peek_key] + return prev_loc, prev_peek.copy() + # the match did get better: see if we can improve further + else: + if doActions: + try: + memo[act_key] = super().parseImpl(instring, loc, True) + except ParseException as e: + memo[peek_key] = memo[act_key] = (new_loc, e) + raise + prev_loc, prev_peek = memo[peek_key] = new_loc, new_peek + + def leave_whitespace(self, recursive: bool = True) -> ParserElement: + self.skipWhitespace = False + return self + + def ignore_whitespace(self, recursive: bool = True) -> ParserElement: + self.skipWhitespace = True + return self + + def streamline(self) -> ParserElement: + if not self.streamlined: + self.streamlined = True + if self.expr is not None: + self.expr.streamline() + return self + + def validate(self, validateTrace=None) -> None: + if validateTrace is None: + validateTrace = [] + + if self not in validateTrace: + tmp = validateTrace[:] + [self] + if self.expr is not None: + self.expr.validate(tmp) + self._checkRecursion([]) + + def _generateDefaultName(self): + # Avoid infinite recursion by setting a temporary _defaultName + self._defaultName = ": ..." + + # Use the string representation of main expression. + retString = "..." + try: + if self.expr is not None: + retString = str(self.expr)[:1000] + else: + retString = "None" + finally: + return self.__class__.__name__ + ": " + retString + + def copy(self) -> ParserElement: + if self.expr is not None: + return super().copy() + else: + ret = Forward() + ret <<= self + return ret + + def _setResultsName(self, name, list_all_matches=False): + if ( + __diag__.warn_name_set_on_empty_Forward + and Diagnostics.warn_name_set_on_empty_Forward + not in self.suppress_warnings_ + ): + if self.expr is None: + warnings.warn( + "{}: setting results name {!r} on {} expression " + "that has no contained expression".format( + "warn_name_set_on_empty_Forward", name, type(self).__name__ + ), + stacklevel=3, + ) + + return super()._setResultsName(name, list_all_matches) + + ignoreWhitespace = ignore_whitespace + leaveWhitespace = leave_whitespace + + +class TokenConverter(ParseElementEnhance): + """ + Abstract subclass of :class:`ParseExpression`, for converting parsed results. + """ + + def __init__(self, expr: Union[ParserElement, str], savelist=False): + super().__init__(expr) # , savelist) + self.saveAsList = False + + +class Combine(TokenConverter): + """Converter to concatenate all matching tokens to a single string. + By default, the matching patterns must also be contiguous in the + input string; this can be disabled by specifying + ``'adjacent=False'`` in the constructor. + + Example:: + + real = Word(nums) + '.' + Word(nums) + print(real.parse_string('3.1416')) # -> ['3', '.', '1416'] + # will also erroneously match the following + print(real.parse_string('3. 1416')) # -> ['3', '.', '1416'] + + real = Combine(Word(nums) + '.' + Word(nums)) + print(real.parse_string('3.1416')) # -> ['3.1416'] + # no match when there are internal spaces + print(real.parse_string('3. 1416')) # -> Exception: Expected W:(0123...) + """ + + def __init__( + self, + expr: ParserElement, + join_string: str = "", + adjacent: bool = True, + *, + joinString: OptionalType[str] = None, + ): + super().__init__(expr) + joinString = joinString if joinString is not None else join_string + # suppress whitespace-stripping in contained parse expressions, but re-enable it on the Combine itself + if adjacent: + self.leave_whitespace() + self.adjacent = adjacent + self.skipWhitespace = True + self.joinString = joinString + self.callPreparse = True + + def ignore(self, other) -> ParserElement: + if self.adjacent: + ParserElement.ignore(self, other) + else: + super().ignore(other) + return self + + def postParse(self, instring, loc, tokenlist): + retToks = tokenlist.copy() + del retToks[:] + retToks += ParseResults( + ["".join(tokenlist._asStringList(self.joinString))], modal=self.modalResults + ) + + if self.resultsName and retToks.haskeys(): + return [retToks] + else: + return retToks + + +class Group(TokenConverter): + """Converter to return the matched tokens as a list - useful for + returning tokens of :class:`ZeroOrMore` and :class:`OneOrMore` expressions. + + The optional ``aslist`` argument when set to True will return the + parsed tokens as a Python list instead of a pyparsing ParseResults. + + Example:: + + ident = Word(alphas) + num = Word(nums) + term = ident | num + func = ident + Opt(delimited_list(term)) + print(func.parse_string("fn a, b, 100")) + # -> ['fn', 'a', 'b', '100'] + + func = ident + Group(Opt(delimited_list(term))) + print(func.parse_string("fn a, b, 100")) + # -> ['fn', ['a', 'b', '100']] + """ + + def __init__(self, expr: ParserElement, aslist: bool = False): + super().__init__(expr) + self.saveAsList = True + self._asPythonList = aslist + + def postParse(self, instring, loc, tokenlist): + if self._asPythonList: + return ParseResults.List( + tokenlist.asList() + if isinstance(tokenlist, ParseResults) + else list(tokenlist) + ) + else: + return [tokenlist] + + +class Dict(TokenConverter): + """Converter to return a repetitive expression as a list, but also + as a dictionary. Each element can also be referenced using the first + token in the expression as its key. Useful for tabular report + scraping when the first column can be used as a item key. + + The optional ``asdict`` argument when set to True will return the + parsed tokens as a Python dict instead of a pyparsing ParseResults. + + Example:: + + data_word = Word(alphas) + label = data_word + FollowedBy(':') + + text = "shape: SQUARE posn: upper left color: light blue texture: burlap" + attr_expr = (label + Suppress(':') + OneOrMore(data_word, stop_on=label).set_parse_action(' '.join)) + + # print attributes as plain groups + print(OneOrMore(attr_expr).parse_string(text).dump()) + + # instead of OneOrMore(expr), parse using Dict(OneOrMore(Group(expr))) - Dict will auto-assign names + result = Dict(OneOrMore(Group(attr_expr))).parse_string(text) + print(result.dump()) + + # access named fields as dict entries, or output as dict + print(result['shape']) + print(result.as_dict()) + + prints:: + + ['shape', 'SQUARE', 'posn', 'upper left', 'color', 'light blue', 'texture', 'burlap'] + [['shape', 'SQUARE'], ['posn', 'upper left'], ['color', 'light blue'], ['texture', 'burlap']] + - color: light blue + - posn: upper left + - shape: SQUARE + - texture: burlap + SQUARE + {'color': 'light blue', 'posn': 'upper left', 'texture': 'burlap', 'shape': 'SQUARE'} + + See more examples at :class:`ParseResults` of accessing fields by results name. + """ + + def __init__(self, expr: ParserElement, asdict: bool = False): + super().__init__(expr) + self.saveAsList = True + self._asPythonDict = asdict + + def postParse(self, instring, loc, tokenlist): + for i, tok in enumerate(tokenlist): + if len(tok) == 0: + continue + + ikey = tok[0] + if isinstance(ikey, int): + ikey = str(ikey).strip() + + if len(tok) == 1: + tokenlist[ikey] = _ParseResultsWithOffset("", i) + + elif len(tok) == 2 and not isinstance(tok[1], ParseResults): + tokenlist[ikey] = _ParseResultsWithOffset(tok[1], i) + + else: + try: + dictvalue = tok.copy() # ParseResults(i) + except Exception: + exc = TypeError( + "could not extract dict values from parsed results" + " - Dict expression must contain Grouped expressions" + ) + raise exc from None + + del dictvalue[0] + + if len(dictvalue) != 1 or ( + isinstance(dictvalue, ParseResults) and dictvalue.haskeys() + ): + tokenlist[ikey] = _ParseResultsWithOffset(dictvalue, i) + else: + tokenlist[ikey] = _ParseResultsWithOffset(dictvalue[0], i) + + if self._asPythonDict: + return [tokenlist.as_dict()] if self.resultsName else tokenlist.as_dict() + else: + return [tokenlist] if self.resultsName else tokenlist + + +class Suppress(TokenConverter): + """Converter for ignoring the results of a parsed expression. + + Example:: + + source = "a, b, c,d" + wd = Word(alphas) + wd_list1 = wd + ZeroOrMore(',' + wd) + print(wd_list1.parse_string(source)) + + # often, delimiters that are useful during parsing are just in the + # way afterward - use Suppress to keep them out of the parsed output + wd_list2 = wd + ZeroOrMore(Suppress(',') + wd) + print(wd_list2.parse_string(source)) + + # Skipped text (using '...') can be suppressed as well + source = "lead in START relevant text END trailing text" + start_marker = Keyword("START") + end_marker = Keyword("END") + find_body = Suppress(...) + start_marker + ... + end_marker + print(find_body.parse_string(source) + + prints:: + + ['a', ',', 'b', ',', 'c', ',', 'd'] + ['a', 'b', 'c', 'd'] + ['START', 'relevant text ', 'END'] + + (See also :class:`delimited_list`.) + """ + + def __init__(self, expr: Union[ParserElement, str], savelist: bool = False): + if expr is ...: + expr = _PendingSkip(NoMatch()) + super().__init__(expr) + + def __add__(self, other): + if isinstance(self.expr, _PendingSkip): + return Suppress(SkipTo(other)) + other + else: + return super().__add__(other) + + def __sub__(self, other): + if isinstance(self.expr, _PendingSkip): + return Suppress(SkipTo(other)) - other + else: + return super().__sub__(other) + + def postParse(self, instring, loc, tokenlist): + return [] + + def suppress(self) -> ParserElement: + return self + + +def trace_parse_action(f: ParseAction) -> ParseAction: + """Decorator for debugging parse actions. + + When the parse action is called, this decorator will print + ``">> entering method-name(line:, , )"``. + When the parse action completes, the decorator will print + ``"<<"`` followed by the returned value, or any exception that the parse action raised. + + Example:: + + wd = Word(alphas) + + @trace_parse_action + def remove_duplicate_chars(tokens): + return ''.join(sorted(set(''.join(tokens)))) + + wds = OneOrMore(wd).set_parse_action(remove_duplicate_chars) + print(wds.parse_string("slkdjs sld sldd sdlf sdljf")) + + prints:: + + >>entering remove_duplicate_chars(line: 'slkdjs sld sldd sdlf sdljf', 0, (['slkdjs', 'sld', 'sldd', 'sdlf', 'sdljf'], {})) + < 3: + thisFunc = paArgs[0].__class__.__name__ + "." + thisFunc + sys.stderr.write( + ">>entering {}(line: {!r}, {}, {!r})\n".format(thisFunc, line(l, s), l, t) + ) + try: + ret = f(*paArgs) + except Exception as exc: + sys.stderr.write("< str: + r"""Helper to easily define string ranges for use in :class:`Word` + construction. Borrows syntax from regexp ``'[]'`` string range + definitions:: + + srange("[0-9]") -> "0123456789" + srange("[a-z]") -> "abcdefghijklmnopqrstuvwxyz" + srange("[a-z$_]") -> "abcdefghijklmnopqrstuvwxyz$_" + + The input string must be enclosed in []'s, and the returned string + is the expanded character set joined into a single string. The + values enclosed in the []'s may be: + + - a single character + - an escaped character with a leading backslash (such as ``\-`` + or ``\]``) + - an escaped hex character with a leading ``'\x'`` + (``\x21``, which is a ``'!'`` character) (``\0x##`` + is also supported for backwards compatibility) + - an escaped octal character with a leading ``'\0'`` + (``\041``, which is a ``'!'`` character) + - a range of any of the above, separated by a dash (``'a-z'``, + etc.) + - any combination of the above (``'aeiouy'``, + ``'a-zA-Z0-9_$'``, etc.) + """ + _expanded = ( + lambda p: p + if not isinstance(p, ParseResults) + else "".join(chr(c) for c in range(ord(p[0]), ord(p[1]) + 1)) + ) + try: + return "".join(_expanded(part) for part in _reBracketExpr.parse_string(s).body) + except Exception: + return "" + + +def token_map(func, *args) -> ParseAction: + """Helper to define a parse action by mapping a function to all + elements of a :class:`ParseResults` list. If any additional args are passed, + they are forwarded to the given function as additional arguments + after the token, as in + ``hex_integer = Word(hexnums).set_parse_action(token_map(int, 16))``, + which will convert the parsed data to an integer using base 16. + + Example (compare the last to example in :class:`ParserElement.transform_string`:: + + hex_ints = OneOrMore(Word(hexnums)).set_parse_action(token_map(int, 16)) + hex_ints.run_tests(''' + 00 11 22 aa FF 0a 0d 1a + ''') + + upperword = Word(alphas).set_parse_action(token_map(str.upper)) + OneOrMore(upperword).run_tests(''' + my kingdom for a horse + ''') + + wd = Word(alphas).set_parse_action(token_map(str.title)) + OneOrMore(wd).set_parse_action(' '.join).run_tests(''' + now is the winter of our discontent made glorious summer by this sun of york + ''') + + prints:: + + 00 11 22 aa FF 0a 0d 1a + [0, 17, 34, 170, 255, 10, 13, 26] + + my kingdom for a horse + ['MY', 'KINGDOM', 'FOR', 'A', 'HORSE'] + + now is the winter of our discontent made glorious summer by this sun of york + ['Now Is The Winter Of Our Discontent Made Glorious Summer By This Sun Of York'] + """ + + def pa(s, l, t): + return [func(tokn, *args) for tokn in t] + + func_name = getattr(func, "__name__", getattr(func, "__class__").__name__) + pa.__name__ = func_name + + return pa + + +def autoname_elements() -> None: + """ + Utility to simplify mass-naming of parser elements, for + generating railroad diagram with named subdiagrams. + """ + for name, var in sys._getframe().f_back.f_locals.items(): + if isinstance(var, ParserElement) and not var.customName: + var.set_name(name) + + +dbl_quoted_string = Combine( + Regex(r'"(?:[^"\n\r\\]|(?:"")|(?:\\(?:[^x]|x[0-9a-fA-F]+)))*') + '"' +).set_name("string enclosed in double quotes") + +sgl_quoted_string = Combine( + Regex(r"'(?:[^'\n\r\\]|(?:'')|(?:\\(?:[^x]|x[0-9a-fA-F]+)))*") + "'" +).set_name("string enclosed in single quotes") + +quoted_string = Combine( + Regex(r'"(?:[^"\n\r\\]|(?:"")|(?:\\(?:[^x]|x[0-9a-fA-F]+)))*') + '"' + | Regex(r"'(?:[^'\n\r\\]|(?:'')|(?:\\(?:[^x]|x[0-9a-fA-F]+)))*") + "'" +).set_name("quotedString using single or double quotes") + +unicode_string = Combine("u" + quoted_string.copy()).set_name("unicode string literal") + + +alphas8bit = srange(r"[\0xc0-\0xd6\0xd8-\0xf6\0xf8-\0xff]") +punc8bit = srange(r"[\0xa1-\0xbf\0xd7\0xf7]") + +# build list of built-in expressions, for future reference if a global default value +# gets updated +_builtin_exprs = [v for v in vars().values() if isinstance(v, ParserElement)] + +# backward compatibility names +tokenMap = token_map +conditionAsParseAction = condition_as_parse_action +nullDebugAction = null_debug_action +sglQuotedString = sgl_quoted_string +dblQuotedString = dbl_quoted_string +quotedString = quoted_string +unicodeString = unicode_string +lineStart = line_start +lineEnd = line_end +stringStart = string_start +stringEnd = string_end +traceParseAction = trace_parse_action diff --git a/pipenv/vendor/pyparsing/diagram/__init__.py b/pipenv/vendor/pyparsing/diagram/__init__.py new file mode 100644 index 0000000000..cfc33fd16d --- /dev/null +++ b/pipenv/vendor/pyparsing/diagram/__init__.py @@ -0,0 +1,593 @@ +import railroad +import pipenv.vendor.pyparsing as pyparsing +from pkg_resources import resource_filename +from typing import ( + List, + Optional, + NamedTuple, + Generic, + TypeVar, + Dict, + Callable, + Set, + Iterable, +) +from jinja2 import Template +from io import StringIO +import inspect + +with open(resource_filename(__name__, "template.jinja2"), encoding="utf-8") as fp: + template = Template(fp.read()) + +# Note: ideally this would be a dataclass, but we're supporting Python 3.5+ so we can't do this yet +NamedDiagram = NamedTuple( + "NamedDiagram", + [("name", str), ("diagram", Optional[railroad.DiagramItem]), ("index", int)], +) +""" +A simple structure for associating a name with a railroad diagram +""" + +T = TypeVar("T") + + +class EachItem(railroad.Group): + """ + Custom railroad item to compose a: + - Group containing a + - OneOrMore containing a + - Choice of the elements in the Each + with the group label indicating that all must be matched + """ + + all_label = "[ALL]" + + def __init__(self, *items): + choice_item = railroad.Choice(len(items) - 1, *items) + one_or_more_item = railroad.OneOrMore(item=choice_item) + super().__init__(one_or_more_item, label=self.all_label) + + +class AnnotatedItem(railroad.Group): + """ + Simple subclass of Group that creates an annotation label + """ + + def __init__(self, label: str, item): + super().__init__(item=item, label="[{}]".format(label)) + + +class EditablePartial(Generic[T]): + """ + Acts like a functools.partial, but can be edited. In other words, it represents a type that hasn't yet been + constructed. + """ + + # We need this here because the railroad constructors actually transform the data, so can't be called until the + # entire tree is assembled + + def __init__(self, func: Callable[..., T], args: list, kwargs: dict): + self.func = func + self.args = args + self.kwargs = kwargs + + @classmethod + def from_call(cls, func: Callable[..., T], *args, **kwargs) -> "EditablePartial[T]": + """ + If you call this function in the same way that you would call the constructor, it will store the arguments + as you expect. For example EditablePartial.from_call(Fraction, 1, 3)() == Fraction(1, 3) + """ + return EditablePartial(func=func, args=list(args), kwargs=kwargs) + + @property + def name(self): + return self.kwargs["name"] + + def __call__(self) -> T: + """ + Evaluate the partial and return the result + """ + args = self.args.copy() + kwargs = self.kwargs.copy() + + # This is a helpful hack to allow you to specify varargs parameters (e.g. *args) as keyword args (e.g. + # args=['list', 'of', 'things']) + arg_spec = inspect.getfullargspec(self.func) + if arg_spec.varargs in self.kwargs: + args += kwargs.pop(arg_spec.varargs) + + return self.func(*args, **kwargs) + + +def railroad_to_html(diagrams: List[NamedDiagram], **kwargs) -> str: + """ + Given a list of NamedDiagram, produce a single HTML string that visualises those diagrams + :params kwargs: kwargs to be passed in to the template + """ + data = [] + for diagram in diagrams: + io = StringIO() + diagram.diagram.writeSvg(io.write) + title = diagram.name + if diagram.index == 0: + title += " (root)" + data.append({"title": title, "text": "", "svg": io.getvalue()}) + + return template.render(diagrams=data, **kwargs) + + +def resolve_partial(partial: "EditablePartial[T]") -> T: + """ + Recursively resolves a collection of Partials into whatever type they are + """ + if isinstance(partial, EditablePartial): + partial.args = resolve_partial(partial.args) + partial.kwargs = resolve_partial(partial.kwargs) + return partial() + elif isinstance(partial, list): + return [resolve_partial(x) for x in partial] + elif isinstance(partial, dict): + return {key: resolve_partial(x) for key, x in partial.items()} + else: + return partial + + +def to_railroad( + element: pyparsing.ParserElement, + diagram_kwargs: Optional[dict] = None, + vertical: int = 3, + show_results_names: bool = False, +) -> List[NamedDiagram]: + """ + Convert a pyparsing element tree into a list of diagrams. This is the recommended entrypoint to diagram + creation if you want to access the Railroad tree before it is converted to HTML + :param element: base element of the parser being diagrammed + :param diagram_kwargs: kwargs to pass to the Diagram() constructor + :param vertical: (optional) - int - limit at which number of alternatives should be + shown vertically instead of horizontally + :param show_results_names - bool to indicate whether results name annotations should be + included in the diagram + """ + # Convert the whole tree underneath the root + lookup = ConverterState(diagram_kwargs=diagram_kwargs or {}) + _to_diagram_element( + element, + lookup=lookup, + parent=None, + vertical=vertical, + show_results_names=show_results_names, + ) + + root_id = id(element) + # Convert the root if it hasn't been already + if root_id in lookup: + if not element.customName: + lookup[root_id].name = "" + lookup[root_id].mark_for_extraction(root_id, lookup, force=True) + + # Now that we're finished, we can convert from intermediate structures into Railroad elements + diags = list(lookup.diagrams.values()) + if len(diags) > 1: + # collapse out duplicate diags with the same name + seen = set() + deduped_diags = [] + for d in diags: + # don't extract SkipTo elements, they are uninformative as subdiagrams + if d.name == "...": + continue + if d.name is not None and d.name not in seen: + seen.add(d.name) + deduped_diags.append(d) + resolved = [resolve_partial(partial) for partial in deduped_diags] + else: + # special case - if just one diagram, always display it, even if + # it has no name + resolved = [resolve_partial(partial) for partial in diags] + return sorted(resolved, key=lambda diag: diag.index) + + +def _should_vertical( + specification: int, exprs: Iterable[pyparsing.ParserElement] +) -> bool: + """ + Returns true if we should return a vertical list of elements + """ + if specification is None: + return False + else: + return len(_visible_exprs(exprs)) >= specification + + +class ElementState: + """ + State recorded for an individual pyparsing Element + """ + + # Note: this should be a dataclass, but we have to support Python 3.5 + def __init__( + self, + element: pyparsing.ParserElement, + converted: EditablePartial, + parent: EditablePartial, + number: int, + name: str = None, + parent_index: Optional[int] = None, + ): + #: The pyparsing element that this represents + self.element: pyparsing.ParserElement = element + #: The name of the element + self.name: str = name + #: The output Railroad element in an unconverted state + self.converted: EditablePartial = converted + #: The parent Railroad element, which we store so that we can extract this if it's duplicated + self.parent: EditablePartial = parent + #: The order in which we found this element, used for sorting diagrams if this is extracted into a diagram + self.number: int = number + #: The index of this inside its parent + self.parent_index: Optional[int] = parent_index + #: If true, we should extract this out into a subdiagram + self.extract: bool = False + #: If true, all of this element's children have been filled out + self.complete: bool = False + + def mark_for_extraction( + self, el_id: int, state: "ConverterState", name: str = None, force: bool = False + ): + """ + Called when this instance has been seen twice, and thus should eventually be extracted into a sub-diagram + :param el_id: id of the element + :param state: element/diagram state tracker + :param name: name to use for this element's text + :param force: If true, force extraction now, regardless of the state of this. Only useful for extracting the + root element when we know we're finished + """ + self.extract = True + + # Set the name + if not self.name: + if name: + # Allow forcing a custom name + self.name = name + elif self.element.customName: + self.name = self.element.customName + else: + self.name = "" + + # Just because this is marked for extraction doesn't mean we can do it yet. We may have to wait for children + # to be added + # Also, if this is just a string literal etc, don't bother extracting it + if force or (self.complete and _worth_extracting(self.element)): + state.extract_into_diagram(el_id) + + +class ConverterState: + """ + Stores some state that persists between recursions into the element tree + """ + + def __init__(self, diagram_kwargs: Optional[dict] = None): + #: A dictionary mapping ParserElements to state relating to them + self._element_diagram_states: Dict[int, ElementState] = {} + #: A dictionary mapping ParserElement IDs to subdiagrams generated from them + self.diagrams: Dict[int, EditablePartial[NamedDiagram]] = {} + #: The index of the next unnamed element + self.unnamed_index: int = 1 + #: The index of the next element. This is used for sorting + self.index: int = 0 + #: Shared kwargs that are used to customize the construction of diagrams + self.diagram_kwargs: dict = diagram_kwargs or {} + self.extracted_diagram_names: Set[str] = set() + + def __setitem__(self, key: int, value: ElementState): + self._element_diagram_states[key] = value + + def __getitem__(self, key: int) -> ElementState: + return self._element_diagram_states[key] + + def __delitem__(self, key: int): + del self._element_diagram_states[key] + + def __contains__(self, key: int): + return key in self._element_diagram_states + + def generate_unnamed(self) -> int: + """ + Generate a number used in the name of an otherwise unnamed diagram + """ + self.unnamed_index += 1 + return self.unnamed_index + + def generate_index(self) -> int: + """ + Generate a number used to index a diagram + """ + self.index += 1 + return self.index + + def extract_into_diagram(self, el_id: int): + """ + Used when we encounter the same token twice in the same tree. When this + happens, we replace all instances of that token with a terminal, and + create a new subdiagram for the token + """ + position = self[el_id] + + # Replace the original definition of this element with a regular block + if position.parent: + ret = EditablePartial.from_call(railroad.NonTerminal, text=position.name) + if "item" in position.parent.kwargs: + position.parent.kwargs["item"] = ret + elif "items" in position.parent.kwargs: + position.parent.kwargs["items"][position.parent_index] = ret + + # If the element we're extracting is a group, skip to its content but keep the title + if position.converted.func == railroad.Group: + content = position.converted.kwargs["item"] + else: + content = position.converted + + self.diagrams[el_id] = EditablePartial.from_call( + NamedDiagram, + name=position.name, + diagram=EditablePartial.from_call( + railroad.Diagram, content, **self.diagram_kwargs + ), + index=position.number, + ) + + del self[el_id] + + +def _worth_extracting(element: pyparsing.ParserElement) -> bool: + """ + Returns true if this element is worth having its own sub-diagram. Simply, if any of its children + themselves have children, then its complex enough to extract + """ + children = element.recurse() + return any(child.recurse() for child in children) + + +def _apply_diagram_item_enhancements(fn): + """ + decorator to ensure enhancements to a diagram item (such as results name annotations) + get applied on return from _to_diagram_element (we do this since there are several + returns in _to_diagram_element) + """ + + def _inner( + element: pyparsing.ParserElement, + parent: Optional[EditablePartial], + lookup: ConverterState = None, + vertical: int = None, + index: int = 0, + name_hint: str = None, + show_results_names: bool = False, + ) -> Optional[EditablePartial]: + + ret = fn( + element, + parent, + lookup, + vertical, + index, + name_hint, + show_results_names, + ) + + # apply annotation for results name, if present + if show_results_names and ret is not None: + element_results_name = element.resultsName + if element_results_name: + # add "*" to indicate if this is a "list all results" name + element_results_name += "" if element.modalResults else "*" + ret = EditablePartial.from_call( + railroad.Group, item=ret, label=element_results_name + ) + + return ret + + return _inner + + +def _visible_exprs(exprs: Iterable[pyparsing.ParserElement]): + non_diagramming_exprs = ( + pyparsing.ParseElementEnhance, + pyparsing.PositionToken, + pyparsing.And._ErrorStop, + ) + return [ + e + for e in exprs + if not (e.customName or e.resultsName or isinstance(e, non_diagramming_exprs)) + ] + + +@_apply_diagram_item_enhancements +def _to_diagram_element( + element: pyparsing.ParserElement, + parent: Optional[EditablePartial], + lookup: ConverterState = None, + vertical: int = None, + index: int = 0, + name_hint: str = None, + show_results_names: bool = False, +) -> Optional[EditablePartial]: + """ + Recursively converts a PyParsing Element to a railroad Element + :param lookup: The shared converter state that keeps track of useful things + :param index: The index of this element within the parent + :param parent: The parent of this element in the output tree + :param vertical: Controls at what point we make a list of elements vertical. If this is an integer (the default), + it sets the threshold of the number of items before we go vertical. If True, always go vertical, if False, never + do so + :param name_hint: If provided, this will override the generated name + :param show_results_names: bool flag indicating whether to add annotations for results names + :returns: The converted version of the input element, but as a Partial that hasn't yet been constructed + """ + exprs = element.recurse() + name = name_hint or element.customName or element.__class__.__name__ + + # Python's id() is used to provide a unique identifier for elements + el_id = id(element) + + element_results_name = element.resultsName + + # Here we basically bypass processing certain wrapper elements if they contribute nothing to the diagram + if not element.customName: + if isinstance( + element, + ( + pyparsing.TokenConverter, + # pyparsing.Forward, + pyparsing.Located, + ), + ): + # However, if this element has a useful custom name, and its child does not, we can pass it on to the child + if exprs: + if not exprs[0].customName: + propagated_name = name + else: + propagated_name = None + + return _to_diagram_element( + element.expr, + parent=parent, + lookup=lookup, + vertical=vertical, + index=index, + name_hint=propagated_name, + show_results_names=show_results_names, + ) + + # If the element isn't worth extracting, we always treat it as the first time we say it + if _worth_extracting(element): + if el_id in lookup: + # If we've seen this element exactly once before, we are only just now finding out that it's a duplicate, + # so we have to extract it into a new diagram. + looked_up = lookup[el_id] + looked_up.mark_for_extraction(el_id, lookup, name=name_hint) + ret = EditablePartial.from_call(railroad.NonTerminal, text=looked_up.name) + return ret + + elif el_id in lookup.diagrams: + # If we have seen the element at least twice before, and have already extracted it into a subdiagram, we + # just put in a marker element that refers to the sub-diagram + ret = EditablePartial.from_call( + railroad.NonTerminal, text=lookup.diagrams[el_id].kwargs["name"] + ) + return ret + + # Recursively convert child elements + # Here we find the most relevant Railroad element for matching pyparsing Element + # We use ``items=[]`` here to hold the place for where the child elements will go once created + if isinstance(element, pyparsing.And): + # detect And's created with ``expr*N`` notation - for these use a OneOrMore with a repeat + # (all will have the same name, and resultsName) + if not exprs: + return None + if len(set((e.name, e.resultsName) for e in exprs)) == 1: + ret = EditablePartial.from_call( + railroad.OneOrMore, item="", repeat=str(len(exprs)) + ) + elif _should_vertical(vertical, exprs): + ret = EditablePartial.from_call(railroad.Stack, items=[]) + else: + ret = EditablePartial.from_call(railroad.Sequence, items=[]) + elif isinstance(element, (pyparsing.Or, pyparsing.MatchFirst)): + if not exprs: + return None + if _should_vertical(vertical, exprs): + ret = EditablePartial.from_call(railroad.Choice, 0, items=[]) + else: + ret = EditablePartial.from_call(railroad.HorizontalChoice, items=[]) + elif isinstance(element, pyparsing.Each): + if not exprs: + return None + ret = EditablePartial.from_call(EachItem, items=[]) + elif isinstance(element, pyparsing.NotAny): + ret = EditablePartial.from_call(AnnotatedItem, label="NOT", item="") + elif isinstance(element, pyparsing.FollowedBy): + ret = EditablePartial.from_call(AnnotatedItem, label="LOOKAHEAD", item="") + elif isinstance(element, pyparsing.PrecededBy): + ret = EditablePartial.from_call(AnnotatedItem, label="LOOKBEHIND", item="") + elif isinstance(element, pyparsing.Opt): + ret = EditablePartial.from_call(railroad.Optional, item="") + elif isinstance(element, pyparsing.OneOrMore): + ret = EditablePartial.from_call(railroad.OneOrMore, item="") + elif isinstance(element, pyparsing.ZeroOrMore): + ret = EditablePartial.from_call(railroad.ZeroOrMore, item="") + elif isinstance(element, pyparsing.Group): + ret = EditablePartial.from_call( + railroad.Group, item=None, label=element_results_name + ) + elif isinstance(element, pyparsing.Empty) and not element.customName: + # Skip unnamed "Empty" elements + ret = None + elif len(exprs) > 1: + ret = EditablePartial.from_call(railroad.Sequence, items=[]) + elif len(exprs) > 0 and not element_results_name: + ret = EditablePartial.from_call(railroad.Group, item="", label=name) + else: + terminal = EditablePartial.from_call(railroad.Terminal, element.defaultName) + ret = terminal + + if ret is None: + return + + # Indicate this element's position in the tree so we can extract it if necessary + lookup[el_id] = ElementState( + element=element, + converted=ret, + parent=parent, + parent_index=index, + number=lookup.generate_index(), + ) + if element.customName: + lookup[el_id].mark_for_extraction(el_id, lookup, element.customName) + + i = 0 + for expr in exprs: + # Add a placeholder index in case we have to extract the child before we even add it to the parent + if "items" in ret.kwargs: + ret.kwargs["items"].insert(i, None) + + item = _to_diagram_element( + expr, + parent=ret, + lookup=lookup, + vertical=vertical, + index=i, + show_results_names=show_results_names, + ) + + # Some elements don't need to be shown in the diagram + if item is not None: + if "item" in ret.kwargs: + ret.kwargs["item"] = item + elif "items" in ret.kwargs: + # If we've already extracted the child, don't touch this index, since it's occupied by a nonterminal + ret.kwargs["items"][i] = item + i += 1 + elif "items" in ret.kwargs: + # If we're supposed to skip this element, remove it from the parent + del ret.kwargs["items"][i] + + # If all this items children are none, skip this item + if ret and ( + ("items" in ret.kwargs and len(ret.kwargs["items"]) == 0) + or ("item" in ret.kwargs and ret.kwargs["item"] is None) + ): + ret = EditablePartial.from_call(railroad.Terminal, name) + + # Mark this element as "complete", ie it has all of its children + if el_id in lookup: + lookup[el_id].complete = True + + if el_id in lookup and lookup[el_id].extract and lookup[el_id].complete: + lookup.extract_into_diagram(el_id) + if ret is not None: + ret = EditablePartial.from_call( + railroad.NonTerminal, text=lookup.diagrams[el_id].kwargs["name"] + ) + + return ret diff --git a/pipenv/vendor/pyparsing/diagram/template.jinja2 b/pipenv/vendor/pyparsing/diagram/template.jinja2 new file mode 100644 index 0000000000..d2219fb011 --- /dev/null +++ b/pipenv/vendor/pyparsing/diagram/template.jinja2 @@ -0,0 +1,26 @@ + + + + {% if not head %} + + {% else %} + {{ hear | safe }} + {% endif %} + + +{{ body | safe }} +{% for diagram in diagrams %} +
+

{{ diagram.title }}

+
{{ diagram.text }}
+
+ {{ diagram.svg }} +
+
+{% endfor %} + + diff --git a/pipenv/vendor/pyparsing/exceptions.py b/pipenv/vendor/pyparsing/exceptions.py new file mode 100644 index 0000000000..e06513eb00 --- /dev/null +++ b/pipenv/vendor/pyparsing/exceptions.py @@ -0,0 +1,267 @@ +# exceptions.py + +import re +import sys +from typing import Optional + +from .util import col, line, lineno, _collapse_string_to_ranges +from .unicode import pyparsing_unicode as ppu + + +class ExceptionWordUnicode(ppu.Latin1, ppu.LatinA, ppu.LatinB, ppu.Greek, ppu.Cyrillic): + pass + + +_extract_alphanums = _collapse_string_to_ranges(ExceptionWordUnicode.alphanums) +_exception_word_extractor = re.compile("([" + _extract_alphanums + "]{1,16})|.") + + +class ParseBaseException(Exception): + """base exception class for all parsing runtime exceptions""" + + # Performance tuning: we construct a *lot* of these, so keep this + # constructor as small and fast as possible + def __init__( + self, + pstr: str, + loc: int = 0, + msg: Optional[str] = None, + elem=None, + ): + self.loc = loc + if msg is None: + self.msg = pstr + self.pstr = "" + else: + self.msg = msg + self.pstr = pstr + self.parser_element = self.parserElement = elem + self.args = (pstr, loc, msg) + + @staticmethod + def explain_exception(exc, depth=16): + """ + Method to take an exception and translate the Python internal traceback into a list + of the pyparsing expressions that caused the exception to be raised. + + Parameters: + + - exc - exception raised during parsing (need not be a ParseException, in support + of Python exceptions that might be raised in a parse action) + - depth (default=16) - number of levels back in the stack trace to list expression + and function names; if None, the full stack trace names will be listed; if 0, only + the failing input line, marker, and exception string will be shown + + Returns a multi-line string listing the ParserElements and/or function names in the + exception's stack trace. + """ + import inspect + from .core import ParserElement + + if depth is None: + depth = sys.getrecursionlimit() + ret = [] + if isinstance(exc, ParseBaseException): + ret.append(exc.line) + ret.append(" " * (exc.column - 1) + "^") + ret.append("{}: {}".format(type(exc).__name__, exc)) + + if depth > 0: + callers = inspect.getinnerframes(exc.__traceback__, context=depth) + seen = set() + for i, ff in enumerate(callers[-depth:]): + frm = ff[0] + + f_self = frm.f_locals.get("self", None) + if isinstance(f_self, ParserElement): + if frm.f_code.co_name not in ("parseImpl", "_parseNoCache"): + continue + if id(f_self) in seen: + continue + seen.add(id(f_self)) + + self_type = type(f_self) + ret.append( + "{}.{} - {}".format( + self_type.__module__, self_type.__name__, f_self + ) + ) + + elif f_self is not None: + self_type = type(f_self) + ret.append("{}.{}".format(self_type.__module__, self_type.__name__)) + + else: + code = frm.f_code + if code.co_name in ("wrapper", ""): + continue + + ret.append("{}".format(code.co_name)) + + depth -= 1 + if not depth: + break + + return "\n".join(ret) + + @classmethod + def _from_exception(cls, pe): + """ + internal factory method to simplify creating one type of ParseException + from another - avoids having __init__ signature conflicts among subclasses + """ + return cls(pe.pstr, pe.loc, pe.msg, pe.parserElement) + + @property + def line(self) -> str: + """ + Return the line of text where the exception occurred. + """ + return line(self.loc, self.pstr) + + @property + def lineno(self) -> int: + """ + Return the 1-based line number of text where the exception occurred. + """ + return lineno(self.loc, self.pstr) + + @property + def col(self) -> int: + """ + Return the 1-based column on the line of text where the exception occurred. + """ + return col(self.loc, self.pstr) + + @property + def column(self) -> int: + """ + Return the 1-based column on the line of text where the exception occurred. + """ + return col(self.loc, self.pstr) + + def __str__(self) -> str: + if self.pstr: + if self.loc >= len(self.pstr): + foundstr = ", found end of text" + else: + # pull out next word at error location + found_match = _exception_word_extractor.match(self.pstr, self.loc) + if found_match is not None: + found = found_match.group(0) + else: + found = self.pstr[self.loc : self.loc + 1] + foundstr = (", found %r" % found).replace(r"\\", "\\") + else: + foundstr = "" + return "{}{} (at char {}), (line:{}, col:{})".format( + self.msg, foundstr, self.loc, self.lineno, self.column + ) + + def __repr__(self): + return str(self) + + def mark_input_line(self, marker_string: str = None, *, markerString=">!<") -> str: + """ + Extracts the exception line from the input string, and marks + the location of the exception with a special symbol. + """ + markerString = marker_string if marker_string is not None else markerString + line_str = self.line + line_column = self.column - 1 + if markerString: + line_str = "".join( + (line_str[:line_column], markerString, line_str[line_column:]) + ) + return line_str.strip() + + def explain(self, depth=16) -> str: + """ + Method to translate the Python internal traceback into a list + of the pyparsing expressions that caused the exception to be raised. + + Parameters: + + - depth (default=16) - number of levels back in the stack trace to list expression + and function names; if None, the full stack trace names will be listed; if 0, only + the failing input line, marker, and exception string will be shown + + Returns a multi-line string listing the ParserElements and/or function names in the + exception's stack trace. + + Example:: + + expr = pp.Word(pp.nums) * 3 + try: + expr.parse_string("123 456 A789") + except pp.ParseException as pe: + print(pe.explain(depth=0)) + + prints:: + + 123 456 A789 + ^ + ParseException: Expected W:(0-9), found 'A' (at char 8), (line:1, col:9) + + Note: the diagnostic output will include string representations of the expressions + that failed to parse. These representations will be more helpful if you use `set_name` to + give identifiable names to your expressions. Otherwise they will use the default string + forms, which may be cryptic to read. + + Note: pyparsing's default truncation of exception tracebacks may also truncate the + stack of expressions that are displayed in the ``explain`` output. To get the full listing + of parser expressions, you may have to set ``ParserElement.verbose_stacktrace = True`` + """ + return self.explain_exception(self, depth) + + markInputline = mark_input_line + + +class ParseException(ParseBaseException): + """ + Exception thrown when a parse expression doesn't match the input string + + Example:: + + try: + Word(nums).set_name("integer").parse_string("ABC") + except ParseException as pe: + print(pe) + print("column: {}".format(pe.column)) + + prints:: + + Expected integer (at char 0), (line:1, col:1) + column: 1 + + """ + + +class ParseFatalException(ParseBaseException): + """ + User-throwable exception thrown when inconsistent parse content + is found; stops all parsing immediately + """ + + +class ParseSyntaxException(ParseFatalException): + """ + Just like :class:`ParseFatalException`, but thrown internally + when an :class:`ErrorStop` ('-' operator) indicates + that parsing is to stop immediately because an unbacktrackable + syntax error has been found. + """ + + +class RecursiveGrammarException(Exception): + """ + Exception thrown by :class:`ParserElement.validate` if the + grammar could be left-recursive; parser may need to enable + left recursion using :class:`ParserElement.enable_left_recursion` + """ + + def __init__(self, parseElementList): + self.parseElementTrace = parseElementList + + def __str__(self) -> str: + return "RecursiveGrammarException: {}".format(self.parseElementTrace) diff --git a/pipenv/vendor/pyparsing/helpers.py b/pipenv/vendor/pyparsing/helpers.py new file mode 100644 index 0000000000..5e7b3ad05e --- /dev/null +++ b/pipenv/vendor/pyparsing/helpers.py @@ -0,0 +1,1069 @@ +# helpers.py +import html.entities +import re + +from . import __diag__ +from .core import * +from .util import _bslash, _flatten, _escape_regex_range_chars + + +# +# global helpers +# +def delimited_list( + expr: Union[str, ParserElement], + delim: Union[str, ParserElement] = ",", + combine: bool = False, + min: OptionalType[int] = None, + max: OptionalType[int] = None, + *, + allow_trailing_delim: bool = False, +) -> ParserElement: + """Helper to define a delimited list of expressions - the delimiter + defaults to ','. By default, the list elements and delimiters can + have intervening whitespace, and comments, but this can be + overridden by passing ``combine=True`` in the constructor. If + ``combine`` is set to ``True``, the matching tokens are + returned as a single token string, with the delimiters included; + otherwise, the matching tokens are returned as a list of tokens, + with the delimiters suppressed. + + If ``allow_trailing_delim`` is set to True, then the list may end with + a delimiter. + + Example:: + + delimited_list(Word(alphas)).parse_string("aa,bb,cc") # -> ['aa', 'bb', 'cc'] + delimited_list(Word(hexnums), delim=':', combine=True).parse_string("AA:BB:CC:DD:EE") # -> ['AA:BB:CC:DD:EE'] + """ + if isinstance(expr, str_type): + expr = ParserElement._literalStringClass(expr) + + dlName = "{expr} [{delim} {expr}]...{end}".format( + expr=str(expr.copy().streamline()), + delim=str(delim), + end=" [{}]".format(str(delim)) if allow_trailing_delim else "", + ) + + if not combine: + delim = Suppress(delim) + + if min is not None: + if min < 1: + raise ValueError("min must be greater than 0") + min -= 1 + if max is not None: + if min is not None and max <= min: + raise ValueError("max must be greater than, or equal to min") + max -= 1 + delimited_list_expr = expr + (delim + expr)[min, max] + + if allow_trailing_delim: + delimited_list_expr += Opt(delim) + + if combine: + return Combine(delimited_list_expr).set_name(dlName) + else: + return delimited_list_expr.set_name(dlName) + + +def counted_array( + expr: ParserElement, + int_expr: OptionalType[ParserElement] = None, + *, + intExpr: OptionalType[ParserElement] = None, +) -> ParserElement: + """Helper to define a counted list of expressions. + + This helper defines a pattern of the form:: + + integer expr expr expr... + + where the leading integer tells how many expr expressions follow. + The matched tokens returns the array of expr tokens as a list - the + leading count token is suppressed. + + If ``int_expr`` is specified, it should be a pyparsing expression + that produces an integer value. + + Example:: + + counted_array(Word(alphas)).parse_string('2 ab cd ef') # -> ['ab', 'cd'] + + # in this parser, the leading integer value is given in binary, + # '10' indicating that 2 values are in the array + binary_constant = Word('01').set_parse_action(lambda t: int(t[0], 2)) + counted_array(Word(alphas), int_expr=binary_constant).parse_string('10 ab cd ef') # -> ['ab', 'cd'] + + # if other fields must be parsed after the count but before the + # list items, give the fields results names and they will + # be preserved in the returned ParseResults: + count_with_metadata = integer + Word(alphas)("type") + typed_array = counted_array(Word(alphanums), int_expr=count_with_metadata)("items") + result = typed_array.parse_string("3 bool True True False") + print(result.dump()) + + # prints + # ['True', 'True', 'False'] + # - items: ['True', 'True', 'False'] + # - type: 'bool' + """ + intExpr = intExpr or int_expr + array_expr = Forward() + + def count_field_parse_action(s, l, t): + nonlocal array_expr + n = t[0] + array_expr <<= (expr * n) if n else Empty() + # clear list contents, but keep any named results + del t[:] + + if intExpr is None: + intExpr = Word(nums).set_parse_action(lambda t: int(t[0])) + else: + intExpr = intExpr.copy() + intExpr.set_name("arrayLen") + intExpr.add_parse_action(count_field_parse_action, call_during_try=True) + return (intExpr + array_expr).set_name("(len) " + str(expr) + "...") + + +def match_previous_literal(expr: ParserElement) -> ParserElement: + """Helper to define an expression that is indirectly defined from + the tokens matched in a previous expression, that is, it looks for + a 'repeat' of a previous expression. For example:: + + first = Word(nums) + second = match_previous_literal(first) + match_expr = first + ":" + second + + will match ``"1:1"``, but not ``"1:2"``. Because this + matches a previous literal, will also match the leading + ``"1:1"`` in ``"1:10"``. If this is not desired, use + :class:`match_previous_expr`. Do *not* use with packrat parsing + enabled. + """ + rep = Forward() + + def copy_token_to_repeater(s, l, t): + if t: + if len(t) == 1: + rep << t[0] + else: + # flatten t tokens + tflat = _flatten(t.as_list()) + rep << And(Literal(tt) for tt in tflat) + else: + rep << Empty() + + expr.add_parse_action(copy_token_to_repeater, callDuringTry=True) + rep.set_name("(prev) " + str(expr)) + return rep + + +def match_previous_expr(expr: ParserElement) -> ParserElement: + """Helper to define an expression that is indirectly defined from + the tokens matched in a previous expression, that is, it looks for + a 'repeat' of a previous expression. For example:: + + first = Word(nums) + second = match_previous_expr(first) + match_expr = first + ":" + second + + will match ``"1:1"``, but not ``"1:2"``. Because this + matches by expressions, will *not* match the leading ``"1:1"`` + in ``"1:10"``; the expressions are evaluated first, and then + compared, so ``"1"`` is compared with ``"10"``. Do *not* use + with packrat parsing enabled. + """ + rep = Forward() + e2 = expr.copy() + rep <<= e2 + + def copy_token_to_repeater(s, l, t): + matchTokens = _flatten(t.as_list()) + + def must_match_these_tokens(s, l, t): + theseTokens = _flatten(t.as_list()) + if theseTokens != matchTokens: + raise ParseException(s, l, "Expected {}, found{}".format(matchTokens, theseTokens)) + + rep.set_parse_action(must_match_these_tokens, callDuringTry=True) + + expr.add_parse_action(copy_token_to_repeater, callDuringTry=True) + rep.set_name("(prev) " + str(expr)) + return rep + + +def one_of( + strs: Union[IterableType[str], str], + caseless: bool = False, + use_regex: bool = True, + as_keyword: bool = False, + *, + useRegex: bool = True, + asKeyword: bool = False, +) -> ParserElement: + """Helper to quickly define a set of alternative :class:`Literal` s, + and makes sure to do longest-first testing when there is a conflict, + regardless of the input order, but returns + a :class:`MatchFirst` for best performance. + + Parameters: + + - ``strs`` - a string of space-delimited literals, or a collection of + string literals + - ``caseless`` - treat all literals as caseless - (default= ``False``) + - ``use_regex`` - as an optimization, will + generate a :class:`Regex` object; otherwise, will generate + a :class:`MatchFirst` object (if ``caseless=True`` or ``asKeyword=True``, or if + creating a :class:`Regex` raises an exception) - (default= ``True``) + - ``as_keyword`` - enforce :class:`Keyword`-style matching on the + generated expressions - (default= ``False``) + - ``asKeyword`` and ``useRegex`` are retained for pre-PEP8 compatibility, + but will be removed in a future release + + Example:: + + comp_oper = one_of("< = > <= >= !=") + var = Word(alphas) + number = Word(nums) + term = var | number + comparison_expr = term + comp_oper + term + print(comparison_expr.search_string("B = 12 AA=23 B<=AA AA>12")) + + prints:: + + [['B', '=', '12'], ['AA', '=', '23'], ['B', '<=', 'AA'], ['AA', '>', '12']] + """ + asKeyword = asKeyword or as_keyword + useRegex = useRegex and use_regex + + if ( + isinstance(caseless, str_type) + and __diag__.warn_on_multiple_string_args_to_oneof + ): + warnings.warn( + "More than one string argument passed to one_of, pass" + " choices as a list or space-delimited string", + stacklevel=2, + ) + + if caseless: + isequal = lambda a, b: a.upper() == b.upper() + masks = lambda a, b: b.upper().startswith(a.upper()) + parseElementClass = CaselessKeyword if asKeyword else CaselessLiteral + else: + isequal = lambda a, b: a == b + masks = lambda a, b: b.startswith(a) + parseElementClass = Keyword if asKeyword else Literal + + symbols: List[str] = [] + if isinstance(strs, str_type): + symbols = strs.split() + elif isinstance(strs, Iterable): + symbols = list(strs) + else: + raise TypeError("Invalid argument to one_of, expected string or iterable") + if not symbols: + return NoMatch() + + # reorder given symbols to take care to avoid masking longer choices with shorter ones + # (but only if the given symbols are not just single characters) + if any(len(sym) > 1 for sym in symbols): + i = 0 + while i < len(symbols) - 1: + cur = symbols[i] + for j, other in enumerate(symbols[i + 1 :]): + if isequal(other, cur): + del symbols[i + j + 1] + break + elif masks(cur, other): + del symbols[i + j + 1] + symbols.insert(i, other) + break + else: + i += 1 + + if useRegex: + re_flags: int = re.IGNORECASE if caseless else 0 + + try: + if all(len(sym) == 1 for sym in symbols): + # symbols are just single characters, create range regex pattern + patt = "[{}]".format( + "".join(_escape_regex_range_chars(sym) for sym in symbols) + ) + else: + patt = "|".join(re.escape(sym) for sym in symbols) + + # wrap with \b word break markers if defining as keywords + if asKeyword: + patt = r"\b(?:{})\b".format(patt) + + ret = Regex(patt, flags=re_flags).set_name(" | ".join(symbols)) + + if caseless: + # add parse action to return symbols as specified, not in random + # casing as found in input string + symbol_map = {sym.lower(): sym for sym in symbols} + ret.add_parse_action(lambda s, l, t: symbol_map[t[0].lower()]) + + return ret + + except sre_constants.error: + warnings.warn( + "Exception creating Regex for one_of, building MatchFirst", stacklevel=2 + ) + + # last resort, just use MatchFirst + return MatchFirst(parseElementClass(sym) for sym in symbols).set_name( + " | ".join(symbols) + ) + + +def dict_of(key: ParserElement, value: ParserElement) -> ParserElement: + """Helper to easily and clearly define a dictionary by specifying + the respective patterns for the key and value. Takes care of + defining the :class:`Dict`, :class:`ZeroOrMore`, and + :class:`Group` tokens in the proper order. The key pattern + can include delimiting markers or punctuation, as long as they are + suppressed, thereby leaving the significant key text. The value + pattern can include named results, so that the :class:`Dict` results + can include named token fields. + + Example:: + + text = "shape: SQUARE posn: upper left color: light blue texture: burlap" + attr_expr = (label + Suppress(':') + OneOrMore(data_word, stop_on=label).set_parse_action(' '.join)) + print(OneOrMore(attr_expr).parse_string(text).dump()) + + attr_label = label + attr_value = Suppress(':') + OneOrMore(data_word, stop_on=label).set_parse_action(' '.join) + + # similar to Dict, but simpler call format + result = dict_of(attr_label, attr_value).parse_string(text) + print(result.dump()) + print(result['shape']) + print(result.shape) # object attribute access works too + print(result.as_dict()) + + prints:: + + [['shape', 'SQUARE'], ['posn', 'upper left'], ['color', 'light blue'], ['texture', 'burlap']] + - color: light blue + - posn: upper left + - shape: SQUARE + - texture: burlap + SQUARE + SQUARE + {'color': 'light blue', 'shape': 'SQUARE', 'posn': 'upper left', 'texture': 'burlap'} + """ + return Dict(OneOrMore(Group(key + value))) + + +def original_text_for( + expr: ParserElement, as_string: bool = True, *, asString: bool = True +) -> ParserElement: + """Helper to return the original, untokenized text for a given + expression. Useful to restore the parsed fields of an HTML start + tag into the raw tag text itself, or to revert separate tokens with + intervening whitespace back to the original matching input text. By + default, returns astring containing the original parsed text. + + If the optional ``as_string`` argument is passed as + ``False``, then the return value is + a :class:`ParseResults` containing any results names that + were originally matched, and a single token containing the original + matched text from the input string. So if the expression passed to + :class:`original_text_for` contains expressions with defined + results names, you must set ``as_string`` to ``False`` if you + want to preserve those results name values. + + The ``asString`` pre-PEP8 argument is retained for compatibility, + but will be removed in a future release. + + Example:: + + src = "this is test bold text normal text " + for tag in ("b", "i"): + opener, closer = make_html_tags(tag) + patt = original_text_for(opener + SkipTo(closer) + closer) + print(patt.search_string(src)[0]) + + prints:: + + [' bold text '] + ['text'] + """ + asString = asString and as_string + + locMarker = Empty().set_parse_action(lambda s, loc, t: loc) + endlocMarker = locMarker.copy() + endlocMarker.callPreparse = False + matchExpr = locMarker("_original_start") + expr + endlocMarker("_original_end") + if asString: + extractText = lambda s, l, t: s[t._original_start : t._original_end] + else: + + def extractText(s, l, t): + t[:] = [s[t.pop("_original_start") : t.pop("_original_end")]] + + matchExpr.set_parse_action(extractText) + matchExpr.ignoreExprs = expr.ignoreExprs + matchExpr.suppress_warning(Diagnostics.warn_ungrouped_named_tokens_in_collection) + return matchExpr + + +def ungroup(expr: ParserElement) -> ParserElement: + """Helper to undo pyparsing's default grouping of And expressions, + even if all but one are non-empty. + """ + return TokenConverter(expr).add_parse_action(lambda t: t[0]) + + +def locatedExpr(expr: ParserElement) -> ParserElement: + """ + (DEPRECATED - future code should use the Located class) + Helper to decorate a returned token with its starting and ending + locations in the input string. + + This helper adds the following results names: + + - ``locn_start`` - location where matched expression begins + - ``locn_end`` - location where matched expression ends + - ``value`` - the actual parsed results + + Be careful if the input text contains ```` characters, you + may want to call :class:`ParserElement.parseWithTabs` + + Example:: + + wd = Word(alphas) + for match in locatedExpr(wd).searchString("ljsdf123lksdjjf123lkkjj1222"): + print(match) + + prints:: + + [[0, 'ljsdf', 5]] + [[8, 'lksdjjf', 15]] + [[18, 'lkkjj', 23]] + """ + locator = Empty().set_parse_action(lambda ss, ll, tt: ll) + return Group( + locator("locn_start") + + expr("value") + + locator.copy().leaveWhitespace()("locn_end") + ) + + +def nested_expr( + opener: Union[str, ParserElement] = "(", + closer: Union[str, ParserElement] = ")", + content: OptionalType[ParserElement] = None, + ignore_expr: ParserElement = quoted_string(), + *, + ignoreExpr: ParserElement = quoted_string(), +) -> ParserElement: + """Helper method for defining nested lists enclosed in opening and + closing delimiters (``"("`` and ``")"`` are the default). + + Parameters: + - ``opener`` - opening character for a nested list + (default= ``"("``); can also be a pyparsing expression + - ``closer`` - closing character for a nested list + (default= ``")"``); can also be a pyparsing expression + - ``content`` - expression for items within the nested lists + (default= ``None``) + - ``ignore_expr`` - expression for ignoring opening and closing delimiters + (default= :class:`quoted_string`) + - ``ignoreExpr`` - this pre-PEP8 argument is retained for compatibility + but will be removed in a future release + + If an expression is not provided for the content argument, the + nested expression will capture all whitespace-delimited content + between delimiters as a list of separate values. + + Use the ``ignore_expr`` argument to define expressions that may + contain opening or closing characters that should not be treated as + opening or closing characters for nesting, such as quoted_string or + a comment expression. Specify multiple expressions using an + :class:`Or` or :class:`MatchFirst`. The default is + :class:`quoted_string`, but if no expressions are to be ignored, then + pass ``None`` for this argument. + + Example:: + + data_type = one_of("void int short long char float double") + decl_data_type = Combine(data_type + Opt(Word('*'))) + ident = Word(alphas+'_', alphanums+'_') + number = pyparsing_common.number + arg = Group(decl_data_type + ident) + LPAR, RPAR = map(Suppress, "()") + + code_body = nested_expr('{', '}', ignore_expr=(quoted_string | c_style_comment)) + + c_function = (decl_data_type("type") + + ident("name") + + LPAR + Opt(delimited_list(arg), [])("args") + RPAR + + code_body("body")) + c_function.ignore(c_style_comment) + + source_code = ''' + int is_odd(int x) { + return (x%2); + } + + int dec_to_hex(char hchar) { + if (hchar >= '0' && hchar <= '9') { + return (ord(hchar)-ord('0')); + } else { + return (10+ord(hchar)-ord('A')); + } + } + ''' + for func in c_function.search_string(source_code): + print("%(name)s (%(type)s) args: %(args)s" % func) + + + prints:: + + is_odd (int) args: [['int', 'x']] + dec_to_hex (int) args: [['char', 'hchar']] + """ + if ignoreExpr != ignore_expr: + ignoreExpr = ignore_expr if ignoreExpr == quoted_string() else ignoreExpr + if opener == closer: + raise ValueError("opening and closing strings cannot be the same") + if content is None: + if isinstance(opener, str_type) and isinstance(closer, str_type): + if len(opener) == 1 and len(closer) == 1: + if ignoreExpr is not None: + content = Combine( + OneOrMore( + ~ignoreExpr + + CharsNotIn( + opener + closer + ParserElement.DEFAULT_WHITE_CHARS, + exact=1, + ) + ) + ).set_parse_action(lambda t: t[0].strip()) + else: + content = empty.copy() + CharsNotIn( + opener + closer + ParserElement.DEFAULT_WHITE_CHARS + ).set_parse_action(lambda t: t[0].strip()) + else: + if ignoreExpr is not None: + content = Combine( + OneOrMore( + ~ignoreExpr + + ~Literal(opener) + + ~Literal(closer) + + CharsNotIn(ParserElement.DEFAULT_WHITE_CHARS, exact=1) + ) + ).set_parse_action(lambda t: t[0].strip()) + else: + content = Combine( + OneOrMore( + ~Literal(opener) + + ~Literal(closer) + + CharsNotIn(ParserElement.DEFAULT_WHITE_CHARS, exact=1) + ) + ).set_parse_action(lambda t: t[0].strip()) + else: + raise ValueError( + "opening and closing arguments must be strings if no content expression is given" + ) + ret = Forward() + if ignoreExpr is not None: + ret <<= Group( + Suppress(opener) + ZeroOrMore(ignoreExpr | ret | content) + Suppress(closer) + ) + else: + ret <<= Group(Suppress(opener) + ZeroOrMore(ret | content) + Suppress(closer)) + ret.set_name("nested %s%s expression" % (opener, closer)) + return ret + + +def _makeTags(tagStr, xml, suppress_LT=Suppress("<"), suppress_GT=Suppress(">")): + """Internal helper to construct opening and closing tag expressions, given a tag name""" + if isinstance(tagStr, str_type): + resname = tagStr + tagStr = Keyword(tagStr, caseless=not xml) + else: + resname = tagStr.name + + tagAttrName = Word(alphas, alphanums + "_-:") + if xml: + tagAttrValue = dbl_quoted_string.copy().set_parse_action(remove_quotes) + openTag = ( + suppress_LT + + tagStr("tag") + + Dict(ZeroOrMore(Group(tagAttrName + Suppress("=") + tagAttrValue))) + + Opt("/", default=[False])("empty").set_parse_action( + lambda s, l, t: t[0] == "/" + ) + + suppress_GT + ) + else: + tagAttrValue = quoted_string.copy().set_parse_action(remove_quotes) | Word( + printables, exclude_chars=">" + ) + openTag = ( + suppress_LT + + tagStr("tag") + + Dict( + ZeroOrMore( + Group( + tagAttrName.set_parse_action(lambda t: t[0].lower()) + + Opt(Suppress("=") + tagAttrValue) + ) + ) + ) + + Opt("/", default=[False])("empty").set_parse_action( + lambda s, l, t: t[0] == "/" + ) + + suppress_GT + ) + closeTag = Combine(Literal("", adjacent=False) + + openTag.set_name("<%s>" % resname) + # add start results name in parse action now that ungrouped names are not reported at two levels + openTag.add_parse_action( + lambda t: t.__setitem__( + "start" + "".join(resname.replace(":", " ").title().split()), t.copy() + ) + ) + closeTag = closeTag( + "end" + "".join(resname.replace(":", " ").title().split()) + ).set_name("" % resname) + openTag.tag = resname + closeTag.tag = resname + openTag.tag_body = SkipTo(closeTag()) + return openTag, closeTag + + +def make_html_tags( + tag_str: Union[str, ParserElement] +) -> Tuple[ParserElement, ParserElement]: + """Helper to construct opening and closing tag expressions for HTML, + given a tag name. Matches tags in either upper or lower case, + attributes with namespaces and with quoted or unquoted values. + + Example:: + + text = 'More info at the pyparsing wiki page' + # make_html_tags returns pyparsing expressions for the opening and + # closing tags as a 2-tuple + a, a_end = make_html_tags("A") + link_expr = a + SkipTo(a_end)("link_text") + a_end + + for link in link_expr.search_string(text): + # attributes in the tag (like "href" shown here) are + # also accessible as named results + print(link.link_text, '->', link.href) + + prints:: + + pyparsing -> https://github.com/pyparsing/pyparsing/wiki + """ + return _makeTags(tag_str, False) + + +def make_xml_tags( + tag_str: Union[str, ParserElement] +) -> Tuple[ParserElement, ParserElement]: + """Helper to construct opening and closing tag expressions for XML, + given a tag name. Matches tags only in the given upper/lower case. + + Example: similar to :class:`make_html_tags` + """ + return _makeTags(tag_str, True) + + +any_open_tag, any_close_tag = make_html_tags( + Word(alphas, alphanums + "_:").set_name("any tag") +) + +_htmlEntityMap = {k.rstrip(";"): v for k, v in html.entities.html5.items()} +common_html_entity = Regex("&(?P" + "|".join(_htmlEntityMap) + ");").set_name( + "common HTML entity" +) + + +def replace_html_entity(t): + """Helper parser action to replace common HTML entities with their special characters""" + return _htmlEntityMap.get(t.entity) + + +class OpAssoc(Enum): + LEFT = 1 + RIGHT = 2 + + +InfixNotationOperatorArgType = Union[ + ParserElement, str, Tuple[Union[ParserElement, str], Union[ParserElement, str]] +] +InfixNotationOperatorSpec = Union[ + Tuple[ + InfixNotationOperatorArgType, + int, + OpAssoc, + OptionalType[ParseAction], + ], + Tuple[ + InfixNotationOperatorArgType, + int, + OpAssoc, + ], +] + + +def infix_notation( + base_expr: ParserElement, + op_list: List[InfixNotationOperatorSpec], + lpar: Union[str, ParserElement] = Suppress("("), + rpar: Union[str, ParserElement] = Suppress(")"), +) -> ParserElement: + """Helper method for constructing grammars of expressions made up of + operators working in a precedence hierarchy. Operators may be unary + or binary, left- or right-associative. Parse actions can also be + attached to operator expressions. The generated parser will also + recognize the use of parentheses to override operator precedences + (see example below). + + Note: if you define a deep operator list, you may see performance + issues when using infix_notation. See + :class:`ParserElement.enable_packrat` for a mechanism to potentially + improve your parser performance. + + Parameters: + - ``base_expr`` - expression representing the most basic operand to + be used in the expression + - ``op_list`` - list of tuples, one for each operator precedence level + in the expression grammar; each tuple is of the form ``(op_expr, + num_operands, right_left_assoc, (optional)parse_action)``, where: + + - ``op_expr`` is the pyparsing expression for the operator; may also + be a string, which will be converted to a Literal; if ``num_operands`` + is 3, ``op_expr`` is a tuple of two expressions, for the two + operators separating the 3 terms + - ``num_operands`` is the number of terms for this operator (must be 1, + 2, or 3) + - ``right_left_assoc`` is the indicator whether the operator is right + or left associative, using the pyparsing-defined constants + ``OpAssoc.RIGHT`` and ``OpAssoc.LEFT``. + - ``parse_action`` is the parse action to be associated with + expressions matching this operator expression (the parse action + tuple member may be omitted); if the parse action is passed + a tuple or list of functions, this is equivalent to calling + ``set_parse_action(*fn)`` + (:class:`ParserElement.set_parse_action`) + - ``lpar`` - expression for matching left-parentheses + (default= ``Suppress('(')``) + - ``rpar`` - expression for matching right-parentheses + (default= ``Suppress(')')``) + + Example:: + + # simple example of four-function arithmetic with ints and + # variable names + integer = pyparsing_common.signed_integer + varname = pyparsing_common.identifier + + arith_expr = infix_notation(integer | varname, + [ + ('-', 1, OpAssoc.RIGHT), + (one_of('* /'), 2, OpAssoc.LEFT), + (one_of('+ -'), 2, OpAssoc.LEFT), + ]) + + arith_expr.run_tests(''' + 5+3*6 + (5+3)*6 + -2--11 + ''', full_dump=False) + + prints:: + + 5+3*6 + [[5, '+', [3, '*', 6]]] + + (5+3)*6 + [[[5, '+', 3], '*', 6]] + + -2--11 + [[['-', 2], '-', ['-', 11]]] + """ + # captive version of FollowedBy that does not do parse actions or capture results names + class _FB(FollowedBy): + def parseImpl(self, instring, loc, doActions=True): + self.expr.try_parse(instring, loc) + return loc, [] + + _FB.__name__ = "FollowedBy>" + + ret = Forward() + lpar = Suppress(lpar) + rpar = Suppress(rpar) + lastExpr = base_expr | (lpar + ret + rpar) + for i, operDef in enumerate(op_list): + opExpr, arity, rightLeftAssoc, pa = (operDef + (None,))[:4] + if isinstance(opExpr, str_type): + opExpr = ParserElement._literalStringClass(opExpr) + if arity == 3: + if not isinstance(opExpr, (tuple, list)) or len(opExpr) != 2: + raise ValueError( + "if numterms=3, opExpr must be a tuple or list of two expressions" + ) + opExpr1, opExpr2 = opExpr + term_name = "{}{} term".format(opExpr1, opExpr2) + else: + term_name = "{} term".format(opExpr) + + if not 1 <= arity <= 3: + raise ValueError("operator must be unary (1), binary (2), or ternary (3)") + + if rightLeftAssoc not in (OpAssoc.LEFT, OpAssoc.RIGHT): + raise ValueError("operator must indicate right or left associativity") + + thisExpr = Forward().set_name(term_name) + if rightLeftAssoc is OpAssoc.LEFT: + if arity == 1: + matchExpr = _FB(lastExpr + opExpr) + Group(lastExpr + opExpr[1, ...]) + elif arity == 2: + if opExpr is not None: + matchExpr = _FB(lastExpr + opExpr + lastExpr) + Group( + lastExpr + (opExpr + lastExpr)[1, ...] + ) + else: + matchExpr = _FB(lastExpr + lastExpr) + Group(lastExpr[2, ...]) + elif arity == 3: + matchExpr = _FB( + lastExpr + opExpr1 + lastExpr + opExpr2 + lastExpr + ) + Group(lastExpr + OneOrMore(opExpr1 + lastExpr + opExpr2 + lastExpr)) + elif rightLeftAssoc is OpAssoc.RIGHT: + if arity == 1: + # try to avoid LR with this extra test + if not isinstance(opExpr, Opt): + opExpr = Opt(opExpr) + matchExpr = _FB(opExpr.expr + thisExpr) + Group(opExpr + thisExpr) + elif arity == 2: + if opExpr is not None: + matchExpr = _FB(lastExpr + opExpr + thisExpr) + Group( + lastExpr + (opExpr + thisExpr)[1, ...] + ) + else: + matchExpr = _FB(lastExpr + thisExpr) + Group( + lastExpr + thisExpr[1, ...] + ) + elif arity == 3: + matchExpr = _FB( + lastExpr + opExpr1 + thisExpr + opExpr2 + thisExpr + ) + Group(lastExpr + opExpr1 + thisExpr + opExpr2 + thisExpr) + if pa: + if isinstance(pa, (tuple, list)): + matchExpr.set_parse_action(*pa) + else: + matchExpr.set_parse_action(pa) + thisExpr <<= (matchExpr | lastExpr).setName(term_name) + lastExpr = thisExpr + ret <<= lastExpr + return ret + + +def indentedBlock(blockStatementExpr, indentStack, indent=True, backup_stacks=[]): + """ + (DEPRECATED - use IndentedBlock class instead) + Helper method for defining space-delimited indentation blocks, + such as those used to define block statements in Python source code. + + Parameters: + + - ``blockStatementExpr`` - expression defining syntax of statement that + is repeated within the indented block + - ``indentStack`` - list created by caller to manage indentation stack + (multiple ``statementWithIndentedBlock`` expressions within a single + grammar should share a common ``indentStack``) + - ``indent`` - boolean indicating whether block must be indented beyond + the current level; set to ``False`` for block of left-most statements + (default= ``True``) + + A valid block must contain at least one ``blockStatement``. + + (Note that indentedBlock uses internal parse actions which make it + incompatible with packrat parsing.) + + Example:: + + data = ''' + def A(z): + A1 + B = 100 + G = A2 + A2 + A3 + B + def BB(a,b,c): + BB1 + def BBA(): + bba1 + bba2 + bba3 + C + D + def spam(x,y): + def eggs(z): + pass + ''' + + + indentStack = [1] + stmt = Forward() + + identifier = Word(alphas, alphanums) + funcDecl = ("def" + identifier + Group("(" + Opt(delimitedList(identifier)) + ")") + ":") + func_body = indentedBlock(stmt, indentStack) + funcDef = Group(funcDecl + func_body) + + rvalue = Forward() + funcCall = Group(identifier + "(" + Opt(delimitedList(rvalue)) + ")") + rvalue << (funcCall | identifier | Word(nums)) + assignment = Group(identifier + "=" + rvalue) + stmt << (funcDef | assignment | identifier) + + module_body = OneOrMore(stmt) + + parseTree = module_body.parseString(data) + parseTree.pprint() + + prints:: + + [['def', + 'A', + ['(', 'z', ')'], + ':', + [['A1'], [['B', '=', '100']], [['G', '=', 'A2']], ['A2'], ['A3']]], + 'B', + ['def', + 'BB', + ['(', 'a', 'b', 'c', ')'], + ':', + [['BB1'], [['def', 'BBA', ['(', ')'], ':', [['bba1'], ['bba2'], ['bba3']]]]]], + 'C', + 'D', + ['def', + 'spam', + ['(', 'x', 'y', ')'], + ':', + [[['def', 'eggs', ['(', 'z', ')'], ':', [['pass']]]]]]] + """ + backup_stacks.append(indentStack[:]) + + def reset_stack(): + indentStack[:] = backup_stacks[-1] + + def checkPeerIndent(s, l, t): + if l >= len(s): + return + curCol = col(l, s) + if curCol != indentStack[-1]: + if curCol > indentStack[-1]: + raise ParseException(s, l, "illegal nesting") + raise ParseException(s, l, "not a peer entry") + + def checkSubIndent(s, l, t): + curCol = col(l, s) + if curCol > indentStack[-1]: + indentStack.append(curCol) + else: + raise ParseException(s, l, "not a subentry") + + def checkUnindent(s, l, t): + if l >= len(s): + return + curCol = col(l, s) + if not (indentStack and curCol in indentStack): + raise ParseException(s, l, "not an unindent") + if curCol < indentStack[-1]: + indentStack.pop() + + NL = OneOrMore(LineEnd().set_whitespace_chars("\t ").suppress()) + INDENT = (Empty() + Empty().set_parse_action(checkSubIndent)).set_name("INDENT") + PEER = Empty().set_parse_action(checkPeerIndent).set_name("") + UNDENT = Empty().set_parse_action(checkUnindent).set_name("UNINDENT") + if indent: + smExpr = Group( + Opt(NL) + + INDENT + + OneOrMore(PEER + Group(blockStatementExpr) + Opt(NL)) + + UNDENT + ) + else: + smExpr = Group( + Opt(NL) + + OneOrMore(PEER + Group(blockStatementExpr) + Opt(NL)) + + Opt(UNDENT) + ) + + # add a parse action to remove backup_stack from list of backups + smExpr.add_parse_action( + lambda: backup_stacks.pop(-1) and None if backup_stacks else None + ) + smExpr.set_fail_action(lambda a, b, c, d: reset_stack()) + blockStatementExpr.ignore(_bslash + LineEnd()) + return smExpr.set_name("indented block") + + +# it's easy to get these comment structures wrong - they're very common, so may as well make them available +c_style_comment = Combine(Regex(r"/\*(?:[^*]|\*(?!/))*") + "*/").set_name( + "C style comment" +) +"Comment of the form ``/* ... */``" + +html_comment = Regex(r"").set_name("HTML comment") +"Comment of the form ````" + +rest_of_line = Regex(r".*").leave_whitespace().set_name("rest of line") +dbl_slash_comment = Regex(r"//(?:\\\n|[^\n])*").set_name("// comment") +"Comment of the form ``// ... (to end of line)``" + +cpp_style_comment = Combine( + Regex(r"/\*(?:[^*]|\*(?!/))*") + "*/" | dbl_slash_comment +).set_name("C++ style comment") +"Comment of either form :class:`c_style_comment` or :class:`dbl_slash_comment`" + +java_style_comment = cpp_style_comment +"Same as :class:`cpp_style_comment`" + +python_style_comment = Regex(r"#.*").set_name("Python style comment") +"Comment of the form ``# ... (to end of line)``" + + +# build list of built-in expressions, for future reference if a global default value +# gets updated +_builtin_exprs = [v for v in vars().values() if isinstance(v, ParserElement)] + + +# pre-PEP8 compatible names +delimitedList = delimited_list +countedArray = counted_array +matchPreviousLiteral = match_previous_literal +matchPreviousExpr = match_previous_expr +oneOf = one_of +dictOf = dict_of +originalTextFor = original_text_for +nestedExpr = nested_expr +makeHTMLTags = make_html_tags +makeXMLTags = make_xml_tags +anyOpenTag, anyCloseTag = any_open_tag, any_close_tag +commonHTMLEntity = common_html_entity +replaceHTMLEntity = replace_html_entity +opAssoc = OpAssoc +infixNotation = infix_notation +cStyleComment = c_style_comment +htmlComment = html_comment +restOfLine = rest_of_line +dblSlashComment = dbl_slash_comment +cppStyleComment = cpp_style_comment +javaStyleComment = java_style_comment +pythonStyleComment = python_style_comment diff --git a/pipenv/vendor/pyparsing/results.py b/pipenv/vendor/pyparsing/results.py new file mode 100644 index 0000000000..9676f45b88 --- /dev/null +++ b/pipenv/vendor/pyparsing/results.py @@ -0,0 +1,760 @@ +# results.py +from collections.abc import MutableMapping, Mapping, MutableSequence, Iterator +import pprint +from weakref import ref as wkref +from typing import Tuple, Any + +str_type: Tuple[type, ...] = (str, bytes) +_generator_type = type((_ for _ in ())) + + +class _ParseResultsWithOffset: + __slots__ = ["tup"] + + def __init__(self, p1, p2): + self.tup = (p1, p2) + + def __getitem__(self, i): + return self.tup[i] + + def __getstate__(self): + return self.tup + + def __setstate__(self, *args): + self.tup = args[0] + + +class ParseResults: + """Structured parse results, to provide multiple means of access to + the parsed data: + + - as a list (``len(results)``) + - by list index (``results[0], results[1]``, etc.) + - by attribute (``results.`` - see :class:`ParserElement.set_results_name`) + + Example:: + + integer = Word(nums) + date_str = (integer.set_results_name("year") + '/' + + integer.set_results_name("month") + '/' + + integer.set_results_name("day")) + # equivalent form: + # date_str = (integer("year") + '/' + # + integer("month") + '/' + # + integer("day")) + + # parse_string returns a ParseResults object + result = date_str.parse_string("1999/12/31") + + def test(s, fn=repr): + print("{} -> {}".format(s, fn(eval(s)))) + test("list(result)") + test("result[0]") + test("result['month']") + test("result.day") + test("'month' in result") + test("'minutes' in result") + test("result.dump()", str) + + prints:: + + list(result) -> ['1999', '/', '12', '/', '31'] + result[0] -> '1999' + result['month'] -> '12' + result.day -> '31' + 'month' in result -> True + 'minutes' in result -> False + result.dump() -> ['1999', '/', '12', '/', '31'] + - day: 31 + - month: 12 + - year: 1999 + """ + + _null_values: Tuple[Any, ...] = (None, [], "", ()) + + __slots__ = [ + "_name", + "_parent", + "_all_names", + "_modal", + "_toklist", + "_tokdict", + "__weakref__", + ] + + class List(list): + """ + Simple wrapper class to distinguish parsed list results that should be preserved + as actual Python lists, instead of being converted to :class:`ParseResults`: + + LBRACK, RBRACK = map(pp.Suppress, "[]") + element = pp.Forward() + item = ppc.integer + element_list = LBRACK + pp.delimited_list(element) + RBRACK + + # add parse actions to convert from ParseResults to actual Python collection types + def as_python_list(t): + return pp.ParseResults.List(t.as_list()) + element_list.add_parse_action(as_python_list) + + element <<= item | element_list + + element.run_tests(''' + 100 + [2,3,4] + [[2, 1],3,4] + [(2, 1),3,4] + (2,3,4) + ''', post_parse=lambda s, r: (r[0], type(r[0]))) + + prints: + + 100 + (100, ) + + [2,3,4] + ([2, 3, 4], ) + + [[2, 1],3,4] + ([[2, 1], 3, 4], ) + + (Used internally by :class:`Group` when `aslist=True`.) + """ + + def __new__(cls, contained=None): + if contained is None: + contained = [] + + if not isinstance(contained, list): + raise TypeError( + "{} may only be constructed with a list," + " not {}".format(cls.__name__, type(contained).__name__) + ) + + return list.__new__(cls) + + def __new__(cls, toklist=None, name=None, **kwargs): + if isinstance(toklist, ParseResults): + return toklist + self = object.__new__(cls) + self._name = None + self._parent = None + self._all_names = set() + + if toklist is None: + self._toklist = [] + elif isinstance(toklist, (list, _generator_type)): + self._toklist = ( + [toklist[:]] + if isinstance(toklist, ParseResults.List) + else list(toklist) + ) + else: + self._toklist = [toklist] + self._tokdict = dict() + return self + + # Performance tuning: we construct a *lot* of these, so keep this + # constructor as small and fast as possible + def __init__( + self, toklist=None, name=None, asList=True, modal=True, isinstance=isinstance + ): + self._modal = modal + if name is not None and name != "": + if isinstance(name, int): + name = str(name) + if not modal: + self._all_names = {name} + self._name = name + if toklist not in self._null_values: + if isinstance(toklist, (str_type, type)): + toklist = [toklist] + if asList: + if isinstance(toklist, ParseResults): + self[name] = _ParseResultsWithOffset( + ParseResults(toklist._toklist), 0 + ) + else: + self[name] = _ParseResultsWithOffset( + ParseResults(toklist[0]), 0 + ) + self[name]._name = name + else: + try: + self[name] = toklist[0] + except (KeyError, TypeError, IndexError): + if toklist is not self: + self[name] = toklist + else: + self._name = name + + def __getitem__(self, i): + if isinstance(i, (int, slice)): + return self._toklist[i] + else: + if i not in self._all_names: + return self._tokdict[i][-1][0] + else: + return ParseResults([v[0] for v in self._tokdict[i]]) + + def __setitem__(self, k, v, isinstance=isinstance): + if isinstance(v, _ParseResultsWithOffset): + self._tokdict[k] = self._tokdict.get(k, list()) + [v] + sub = v[0] + elif isinstance(k, (int, slice)): + self._toklist[k] = v + sub = v + else: + self._tokdict[k] = self._tokdict.get(k, list()) + [ + _ParseResultsWithOffset(v, 0) + ] + sub = v + if isinstance(sub, ParseResults): + sub._parent = wkref(self) + + def __delitem__(self, i): + if isinstance(i, (int, slice)): + mylen = len(self._toklist) + del self._toklist[i] + + # convert int to slice + if isinstance(i, int): + if i < 0: + i += mylen + i = slice(i, i + 1) + # get removed indices + removed = list(range(*i.indices(mylen))) + removed.reverse() + # fixup indices in token dictionary + for name, occurrences in self._tokdict.items(): + for j in removed: + for k, (value, position) in enumerate(occurrences): + occurrences[k] = _ParseResultsWithOffset( + value, position - (position > j) + ) + else: + del self._tokdict[i] + + def __contains__(self, k) -> bool: + return k in self._tokdict + + def __len__(self) -> int: + return len(self._toklist) + + def __bool__(self) -> bool: + return not not (self._toklist or self._tokdict) + + def __iter__(self) -> Iterator: + return iter(self._toklist) + + def __reversed__(self) -> Iterator: + return iter(self._toklist[::-1]) + + def keys(self): + return iter(self._tokdict) + + def values(self): + return (self[k] for k in self.keys()) + + def items(self): + return ((k, self[k]) for k in self.keys()) + + def haskeys(self) -> bool: + """ + Since ``keys()`` returns an iterator, this method is helpful in bypassing + code that looks for the existence of any defined results names.""" + return bool(self._tokdict) + + def pop(self, *args, **kwargs): + """ + Removes and returns item at specified index (default= ``last``). + Supports both ``list`` and ``dict`` semantics for ``pop()``. If + passed no argument or an integer argument, it will use ``list`` + semantics and pop tokens from the list of parsed tokens. If passed + a non-integer argument (most likely a string), it will use ``dict`` + semantics and pop the corresponding value from any defined results + names. A second default return value argument is supported, just as in + ``dict.pop()``. + + Example:: + + numlist = Word(nums)[...] + print(numlist.parse_string("0 123 321")) # -> ['0', '123', '321'] + + def remove_first(tokens): + tokens.pop(0) + numlist.add_parse_action(remove_first) + print(numlist.parse_string("0 123 321")) # -> ['123', '321'] + + label = Word(alphas) + patt = label("LABEL") + OneOrMore(Word(nums)) + print(patt.parse_string("AAB 123 321").dump()) + + # Use pop() in a parse action to remove named result (note that corresponding value is not + # removed from list form of results) + def remove_LABEL(tokens): + tokens.pop("LABEL") + return tokens + patt.add_parse_action(remove_LABEL) + print(patt.parse_string("AAB 123 321").dump()) + + prints:: + + ['AAB', '123', '321'] + - LABEL: AAB + + ['AAB', '123', '321'] + """ + if not args: + args = [-1] + for k, v in kwargs.items(): + if k == "default": + args = (args[0], v) + else: + raise TypeError( + "pop() got an unexpected keyword argument {!r}".format(k) + ) + if isinstance(args[0], int) or len(args) == 1 or args[0] in self: + index = args[0] + ret = self[index] + del self[index] + return ret + else: + defaultvalue = args[1] + return defaultvalue + + def get(self, key, default_value=None): + """ + Returns named result matching the given key, or if there is no + such name, then returns the given ``default_value`` or ``None`` if no + ``default_value`` is specified. + + Similar to ``dict.get()``. + + Example:: + + integer = Word(nums) + date_str = integer("year") + '/' + integer("month") + '/' + integer("day") + + result = date_str.parse_string("1999/12/31") + print(result.get("year")) # -> '1999' + print(result.get("hour", "not specified")) # -> 'not specified' + print(result.get("hour")) # -> None + """ + if key in self: + return self[key] + else: + return default_value + + def insert(self, index, ins_string): + """ + Inserts new element at location index in the list of parsed tokens. + + Similar to ``list.insert()``. + + Example:: + + numlist = Word(nums)[...] + print(numlist.parse_string("0 123 321")) # -> ['0', '123', '321'] + + # use a parse action to insert the parse location in the front of the parsed results + def insert_locn(locn, tokens): + tokens.insert(0, locn) + numlist.add_parse_action(insert_locn) + print(numlist.parse_string("0 123 321")) # -> [0, '0', '123', '321'] + """ + self._toklist.insert(index, ins_string) + # fixup indices in token dictionary + for name, occurrences in self._tokdict.items(): + for k, (value, position) in enumerate(occurrences): + occurrences[k] = _ParseResultsWithOffset( + value, position + (position > index) + ) + + def append(self, item): + """ + Add single element to end of ``ParseResults`` list of elements. + + Example:: + + numlist = Word(nums)[...] + print(numlist.parse_string("0 123 321")) # -> ['0', '123', '321'] + + # use a parse action to compute the sum of the parsed integers, and add it to the end + def append_sum(tokens): + tokens.append(sum(map(int, tokens))) + numlist.add_parse_action(append_sum) + print(numlist.parse_string("0 123 321")) # -> ['0', '123', '321', 444] + """ + self._toklist.append(item) + + def extend(self, itemseq): + """ + Add sequence of elements to end of ``ParseResults`` list of elements. + + Example:: + + patt = OneOrMore(Word(alphas)) + + # use a parse action to append the reverse of the matched strings, to make a palindrome + def make_palindrome(tokens): + tokens.extend(reversed([t[::-1] for t in tokens])) + return ''.join(tokens) + patt.add_parse_action(make_palindrome) + print(patt.parse_string("lskdj sdlkjf lksd")) # -> 'lskdjsdlkjflksddsklfjkldsjdksl' + """ + if isinstance(itemseq, ParseResults): + self.__iadd__(itemseq) + else: + self._toklist.extend(itemseq) + + def clear(self): + """ + Clear all elements and results names. + """ + del self._toklist[:] + self._tokdict.clear() + + def __getattr__(self, name): + try: + return self[name] + except KeyError: + if name.startswith("__"): + raise AttributeError(name) + return "" + + def __add__(self, other) -> "ParseResults": + ret = self.copy() + ret += other + return ret + + def __iadd__(self, other) -> "ParseResults": + if other._tokdict: + offset = len(self._toklist) + addoffset = lambda a: offset if a < 0 else a + offset + otheritems = other._tokdict.items() + otherdictitems = [ + (k, _ParseResultsWithOffset(v[0], addoffset(v[1]))) + for k, vlist in otheritems + for v in vlist + ] + for k, v in otherdictitems: + self[k] = v + if isinstance(v[0], ParseResults): + v[0]._parent = wkref(self) + + self._toklist += other._toklist + self._all_names |= other._all_names + return self + + def __radd__(self, other) -> "ParseResults": + if isinstance(other, int) and other == 0: + # useful for merging many ParseResults using sum() builtin + return self.copy() + else: + # this may raise a TypeError - so be it + return other + self + + def __repr__(self) -> str: + return "{}({!r}, {})".format(type(self).__name__, self._toklist, self.as_dict()) + + def __str__(self) -> str: + return ( + "[" + + ", ".join( + [ + str(i) if isinstance(i, ParseResults) else repr(i) + for i in self._toklist + ] + ) + + "]" + ) + + def _asStringList(self, sep=""): + out = [] + for item in self._toklist: + if out and sep: + out.append(sep) + if isinstance(item, ParseResults): + out += item._asStringList() + else: + out.append(str(item)) + return out + + def as_list(self) -> list: + """ + Returns the parse results as a nested list of matching tokens, all converted to strings. + + Example:: + + patt = OneOrMore(Word(alphas)) + result = patt.parse_string("sldkj lsdkj sldkj") + # even though the result prints in string-like form, it is actually a pyparsing ParseResults + print(type(result), result) # -> ['sldkj', 'lsdkj', 'sldkj'] + + # Use as_list() to create an actual list + result_list = result.as_list() + print(type(result_list), result_list) # -> ['sldkj', 'lsdkj', 'sldkj'] + """ + return [ + res.as_list() if isinstance(res, ParseResults) else res + for res in self._toklist + ] + + def as_dict(self) -> dict: + """ + Returns the named parse results as a nested dictionary. + + Example:: + + integer = Word(nums) + date_str = integer("year") + '/' + integer("month") + '/' + integer("day") + + result = date_str.parse_string('12/31/1999') + print(type(result), repr(result)) # -> (['12', '/', '31', '/', '1999'], {'day': [('1999', 4)], 'year': [('12', 0)], 'month': [('31', 2)]}) + + result_dict = result.as_dict() + print(type(result_dict), repr(result_dict)) # -> {'day': '1999', 'year': '12', 'month': '31'} + + # even though a ParseResults supports dict-like access, sometime you just need to have a dict + import json + print(json.dumps(result)) # -> Exception: TypeError: ... is not JSON serializable + print(json.dumps(result.as_dict())) # -> {"month": "31", "day": "1999", "year": "12"} + """ + + def to_item(obj): + if isinstance(obj, ParseResults): + return obj.as_dict() if obj.haskeys() else [to_item(v) for v in obj] + else: + return obj + + return dict((k, to_item(v)) for k, v in self.items()) + + def copy(self) -> "ParseResults": + """ + Returns a new copy of a :class:`ParseResults` object. + """ + ret = ParseResults(self._toklist) + ret._tokdict = self._tokdict.copy() + ret._parent = self._parent + ret._all_names |= self._all_names + ret._name = self._name + return ret + + def get_name(self): + r""" + Returns the results name for this token expression. Useful when several + different expressions might match at a particular location. + + Example:: + + integer = Word(nums) + ssn_expr = Regex(r"\d\d\d-\d\d-\d\d\d\d") + house_number_expr = Suppress('#') + Word(nums, alphanums) + user_data = (Group(house_number_expr)("house_number") + | Group(ssn_expr)("ssn") + | Group(integer)("age")) + user_info = OneOrMore(user_data) + + result = user_info.parse_string("22 111-22-3333 #221B") + for item in result: + print(item.get_name(), ':', item[0]) + + prints:: + + age : 22 + ssn : 111-22-3333 + house_number : 221B + """ + if self._name: + return self._name + elif self._parent: + par = self._parent() + + def find_in_parent(sub): + return next( + ( + k + for k, vlist in par._tokdict.items() + for v, loc in vlist + if sub is v + ), + None, + ) + + return find_in_parent(self) if par else None + elif ( + len(self) == 1 + and len(self._tokdict) == 1 + and next(iter(self._tokdict.values()))[0][1] in (0, -1) + ): + return next(iter(self._tokdict.keys())) + else: + return None + + def dump(self, indent="", full=True, include_list=True, _depth=0) -> str: + """ + Diagnostic method for listing out the contents of + a :class:`ParseResults`. Accepts an optional ``indent`` argument so + that this string can be embedded in a nested display of other data. + + Example:: + + integer = Word(nums) + date_str = integer("year") + '/' + integer("month") + '/' + integer("day") + + result = date_str.parse_string('12/31/1999') + print(result.dump()) + + prints:: + + ['12', '/', '31', '/', '1999'] + - day: 1999 + - month: 31 + - year: 12 + """ + out = [] + NL = "\n" + out.append(indent + str(self.as_list()) if include_list else "") + + if full: + if self.haskeys(): + items = sorted((str(k), v) for k, v in self.items()) + for k, v in items: + if out: + out.append(NL) + out.append("{}{}- {}: ".format(indent, (" " * _depth), k)) + if isinstance(v, ParseResults): + if v: + out.append( + v.dump( + indent=indent, + full=full, + include_list=include_list, + _depth=_depth + 1, + ) + ) + else: + out.append(str(v)) + else: + out.append(repr(v)) + if any(isinstance(vv, ParseResults) for vv in self): + v = self + for i, vv in enumerate(v): + if isinstance(vv, ParseResults): + out.append( + "\n{}{}[{}]:\n{}{}{}".format( + indent, + (" " * (_depth)), + i, + indent, + (" " * (_depth + 1)), + vv.dump( + indent=indent, + full=full, + include_list=include_list, + _depth=_depth + 1, + ), + ) + ) + else: + out.append( + "\n%s%s[%d]:\n%s%s%s" + % ( + indent, + (" " * (_depth)), + i, + indent, + (" " * (_depth + 1)), + str(vv), + ) + ) + + return "".join(out) + + def pprint(self, *args, **kwargs): + """ + Pretty-printer for parsed results as a list, using the + `pprint `_ module. + Accepts additional positional or keyword args as defined for + `pprint.pprint `_ . + + Example:: + + ident = Word(alphas, alphanums) + num = Word(nums) + func = Forward() + term = ident | num | Group('(' + func + ')') + func <<= ident + Group(Optional(delimited_list(term))) + result = func.parse_string("fna a,b,(fnb c,d,200),100") + result.pprint(width=40) + + prints:: + + ['fna', + ['a', + 'b', + ['(', 'fnb', ['c', 'd', '200'], ')'], + '100']] + """ + pprint.pprint(self.as_list(), *args, **kwargs) + + # add support for pickle protocol + def __getstate__(self): + return ( + self._toklist, + ( + self._tokdict.copy(), + self._parent is not None and self._parent() or None, + self._all_names, + self._name, + ), + ) + + def __setstate__(self, state): + self._toklist, (self._tokdict, par, inAccumNames, self._name) = state + self._all_names = set(inAccumNames) + if par is not None: + self._parent = wkref(par) + else: + self._parent = None + + def __getnewargs__(self): + return self._toklist, self._name + + def __dir__(self): + return dir(type(self)) + list(self.keys()) + + @classmethod + def from_dict(cls, other, name=None) -> "ParseResults": + """ + Helper classmethod to construct a ``ParseResults`` from a ``dict``, preserving the + name-value relations as results names. If an optional ``name`` argument is + given, a nested ``ParseResults`` will be returned. + """ + + def is_iterable(obj): + try: + iter(obj) + except Exception: + return False + else: + return not isinstance(obj, str_type) + + ret = cls([]) + for k, v in other.items(): + if isinstance(v, Mapping): + ret += cls.from_dict(v, name=k) + else: + ret += cls([v], name=k, asList=is_iterable(v)) + if name is not None: + ret = cls([ret], name=name) + return ret + + asList = as_list + asDict = as_dict + getName = get_name + + +MutableMapping.register(ParseResults) +MutableSequence.register(ParseResults) diff --git a/pipenv/vendor/pyparsing/testing.py b/pipenv/vendor/pyparsing/testing.py new file mode 100644 index 0000000000..991972f3fb --- /dev/null +++ b/pipenv/vendor/pyparsing/testing.py @@ -0,0 +1,331 @@ +# testing.py + +from contextlib import contextmanager +from typing import Optional + +from .core import ( + ParserElement, + ParseException, + Keyword, + __diag__, + __compat__, +) + + +class pyparsing_test: + """ + namespace class for classes useful in writing unit tests + """ + + class reset_pyparsing_context: + """ + Context manager to be used when writing unit tests that modify pyparsing config values: + - packrat parsing + - bounded recursion parsing + - default whitespace characters. + - default keyword characters + - literal string auto-conversion class + - __diag__ settings + + Example:: + + with reset_pyparsing_context(): + # test that literals used to construct a grammar are automatically suppressed + ParserElement.inlineLiteralsUsing(Suppress) + + term = Word(alphas) | Word(nums) + group = Group('(' + term[...] + ')') + + # assert that the '()' characters are not included in the parsed tokens + self.assertParseAndCheckList(group, "(abc 123 def)", ['abc', '123', 'def']) + + # after exiting context manager, literals are converted to Literal expressions again + """ + + def __init__(self): + self._save_context = {} + + def save(self): + self._save_context["default_whitespace"] = ParserElement.DEFAULT_WHITE_CHARS + self._save_context["default_keyword_chars"] = Keyword.DEFAULT_KEYWORD_CHARS + + self._save_context[ + "literal_string_class" + ] = ParserElement._literalStringClass + + self._save_context["verbose_stacktrace"] = ParserElement.verbose_stacktrace + + self._save_context["packrat_enabled"] = ParserElement._packratEnabled + if ParserElement._packratEnabled: + self._save_context[ + "packrat_cache_size" + ] = ParserElement.packrat_cache.size + else: + self._save_context["packrat_cache_size"] = None + self._save_context["packrat_parse"] = ParserElement._parse + self._save_context[ + "recursion_enabled" + ] = ParserElement._left_recursion_enabled + + self._save_context["__diag__"] = { + name: getattr(__diag__, name) for name in __diag__._all_names + } + + self._save_context["__compat__"] = { + "collect_all_And_tokens": __compat__.collect_all_And_tokens + } + + return self + + def restore(self): + # reset pyparsing global state + if ( + ParserElement.DEFAULT_WHITE_CHARS + != self._save_context["default_whitespace"] + ): + ParserElement.set_default_whitespace_chars( + self._save_context["default_whitespace"] + ) + + ParserElement.verbose_stacktrace = self._save_context["verbose_stacktrace"] + + Keyword.DEFAULT_KEYWORD_CHARS = self._save_context["default_keyword_chars"] + ParserElement.inlineLiteralsUsing( + self._save_context["literal_string_class"] + ) + + for name, value in self._save_context["__diag__"].items(): + (__diag__.enable if value else __diag__.disable)(name) + + ParserElement._packratEnabled = False + if self._save_context["packrat_enabled"]: + ParserElement.enable_packrat(self._save_context["packrat_cache_size"]) + else: + ParserElement._parse = self._save_context["packrat_parse"] + ParserElement._left_recursion_enabled = self._save_context[ + "recursion_enabled" + ] + + __compat__.collect_all_And_tokens = self._save_context["__compat__"] + + return self + + def copy(self): + ret = type(self)() + ret._save_context.update(self._save_context) + return ret + + def __enter__(self): + return self.save() + + def __exit__(self, *args): + self.restore() + + class TestParseResultsAsserts: + """ + A mixin class to add parse results assertion methods to normal unittest.TestCase classes. + """ + + def assertParseResultsEquals( + self, result, expected_list=None, expected_dict=None, msg=None + ): + """ + Unit test assertion to compare a :class:`ParseResults` object with an optional ``expected_list``, + and compare any defined results names with an optional ``expected_dict``. + """ + if expected_list is not None: + self.assertEqual(expected_list, result.as_list(), msg=msg) + if expected_dict is not None: + self.assertEqual(expected_dict, result.as_dict(), msg=msg) + + def assertParseAndCheckList( + self, expr, test_string, expected_list, msg=None, verbose=True + ): + """ + Convenience wrapper assert to test a parser element and input string, and assert that + the resulting ``ParseResults.asList()`` is equal to the ``expected_list``. + """ + result = expr.parse_string(test_string, parse_all=True) + if verbose: + print(result.dump()) + else: + print(result.as_list()) + self.assertParseResultsEquals(result, expected_list=expected_list, msg=msg) + + def assertParseAndCheckDict( + self, expr, test_string, expected_dict, msg=None, verbose=True + ): + """ + Convenience wrapper assert to test a parser element and input string, and assert that + the resulting ``ParseResults.asDict()`` is equal to the ``expected_dict``. + """ + result = expr.parse_string(test_string, parseAll=True) + if verbose: + print(result.dump()) + else: + print(result.as_list()) + self.assertParseResultsEquals(result, expected_dict=expected_dict, msg=msg) + + def assertRunTestResults( + self, run_tests_report, expected_parse_results=None, msg=None + ): + """ + Unit test assertion to evaluate output of ``ParserElement.runTests()``. If a list of + list-dict tuples is given as the ``expected_parse_results`` argument, then these are zipped + with the report tuples returned by ``runTests`` and evaluated using ``assertParseResultsEquals``. + Finally, asserts that the overall ``runTests()`` success value is ``True``. + + :param run_tests_report: tuple(bool, [tuple(str, ParseResults or Exception)]) returned from runTests + :param expected_parse_results (optional): [tuple(str, list, dict, Exception)] + """ + run_test_success, run_test_results = run_tests_report + + if expected_parse_results is not None: + merged = [ + (*rpt, expected) + for rpt, expected in zip(run_test_results, expected_parse_results) + ] + for test_string, result, expected in merged: + # expected should be a tuple containing a list and/or a dict or an exception, + # and optional failure message string + # an empty tuple will skip any result validation + fail_msg = next( + (exp for exp in expected if isinstance(exp, str)), None + ) + expected_exception = next( + ( + exp + for exp in expected + if isinstance(exp, type) and issubclass(exp, Exception) + ), + None, + ) + if expected_exception is not None: + with self.assertRaises( + expected_exception=expected_exception, msg=fail_msg or msg + ): + if isinstance(result, Exception): + raise result + else: + expected_list = next( + (exp for exp in expected if isinstance(exp, list)), None + ) + expected_dict = next( + (exp for exp in expected if isinstance(exp, dict)), None + ) + if (expected_list, expected_dict) != (None, None): + self.assertParseResultsEquals( + result, + expected_list=expected_list, + expected_dict=expected_dict, + msg=fail_msg or msg, + ) + else: + # warning here maybe? + print("no validation for {!r}".format(test_string)) + + # do this last, in case some specific test results can be reported instead + self.assertTrue( + run_test_success, msg=msg if msg is not None else "failed runTests" + ) + + @contextmanager + def assertRaisesParseException(self, exc_type=ParseException, msg=None): + with self.assertRaises(exc_type, msg=msg): + yield + + @staticmethod + def with_line_numbers( + s: str, + start_line: Optional[int] = None, + end_line: Optional[int] = None, + expand_tabs: bool = True, + eol_mark: str = "|", + mark_spaces: Optional[str] = None, + mark_control: Optional[str] = None, + ) -> str: + """ + Helpful method for debugging a parser - prints a string with line and column numbers. + (Line and column numbers are 1-based.) + + :param s: tuple(bool, str - string to be printed with line and column numbers + :param start_line: int - (optional) starting line number in s to print (default=1) + :param end_line: int - (optional) ending line number in s to print (default=len(s)) + :param expand_tabs: bool - (optional) expand tabs to spaces, to match the pyparsing default + :param eol_mark: str - (optional) string to mark the end of lines, helps visualize trailing spaces (default="|") + :param mark_spaces: str - (optional) special character to display in place of spaces + :param mark_control: str - (optional) convert non-printing control characters to a placeholding + character; valid values: + - "unicode" - replaces control chars with Unicode symbols, such as "␍" and "␊" + - any single character string - replace control characters with given string + - None (default) - string is displayed as-is + + :return: str - input string with leading line numbers and column number headers + """ + if expand_tabs: + s = s.expandtabs() + if mark_control is not None: + if mark_control == "unicode": + tbl = str.maketrans( + {c: u for c, u in zip(range(0, 33), range(0x2400, 0x2433))} + | {127: 0x2421} + ) + eol_mark = "" + else: + tbl = str.maketrans( + {c: mark_control for c in list(range(0, 32)) + [127]} + ) + s = s.translate(tbl) + if mark_spaces is not None and mark_spaces != " ": + if mark_spaces == "unicode": + tbl = str.maketrans({9: 0x2409, 32: 0x2423}) + s = s.translate(tbl) + else: + s = s.replace(" ", mark_spaces) + if start_line is None: + start_line = 1 + if end_line is None: + end_line = len(s) + end_line = min(end_line, len(s)) + start_line = min(max(1, start_line), end_line) + + if mark_control != "unicode": + s_lines = s.splitlines()[start_line - 1 : end_line] + else: + s_lines = [line + "␊" for line in s.split("␊")[start_line - 1 : end_line]] + if not s_lines: + return "" + + lineno_width = len(str(end_line)) + max_line_len = max(len(line) for line in s_lines) + lead = " " * (lineno_width + 1) + if max_line_len >= 99: + header0 = ( + lead + + "".join( + "{}{}".format(" " * 99, (i + 1) % 100) + for i in range(max(max_line_len // 100, 1)) + ) + + "\n" + ) + else: + header0 = "" + header1 = ( + header0 + + lead + + "".join( + " {}".format((i + 1) % 10) + for i in range(-(-max_line_len // 10)) + ) + + "\n" + ) + header2 = lead + "1234567890" * (-(-max_line_len // 10)) + "\n" + return ( + header1 + + header2 + + "\n".join( + "{:{}d}:{}{}".format(i, lineno_width, line, eol_mark) + for i, line in enumerate(s_lines, start=start_line) + ) + + "\n" + ) diff --git a/pipenv/vendor/pyparsing/unicode.py b/pipenv/vendor/pyparsing/unicode.py new file mode 100644 index 0000000000..92261487c7 --- /dev/null +++ b/pipenv/vendor/pyparsing/unicode.py @@ -0,0 +1,332 @@ +# unicode.py + +import sys +from itertools import filterfalse +from typing import List, Tuple, Union + + +class _lazyclassproperty: + def __init__(self, fn): + self.fn = fn + self.__doc__ = fn.__doc__ + self.__name__ = fn.__name__ + + def __get__(self, obj, cls): + if cls is None: + cls = type(obj) + if not hasattr(cls, "_intern") or any( + cls._intern is getattr(superclass, "_intern", []) + for superclass in cls.__mro__[1:] + ): + cls._intern = {} + attrname = self.fn.__name__ + if attrname not in cls._intern: + cls._intern[attrname] = self.fn(cls) + return cls._intern[attrname] + + +UnicodeRangeList = List[Union[Tuple[int, int], Tuple[int]]] + + +class unicode_set: + """ + A set of Unicode characters, for language-specific strings for + ``alphas``, ``nums``, ``alphanums``, and ``printables``. + A unicode_set is defined by a list of ranges in the Unicode character + set, in a class attribute ``_ranges``. Ranges can be specified using + 2-tuples or a 1-tuple, such as:: + + _ranges = [ + (0x0020, 0x007e), + (0x00a0, 0x00ff), + (0x0100,), + ] + + Ranges are left- and right-inclusive. A 1-tuple of (x,) is treated as (x, x). + + A unicode set can also be defined using multiple inheritance of other unicode sets:: + + class CJK(Chinese, Japanese, Korean): + pass + """ + + _ranges: UnicodeRangeList = [] + + @_lazyclassproperty + def _chars_for_ranges(cls): + ret = [] + for cc in cls.__mro__: + if cc is unicode_set: + break + for rr in getattr(cc, "_ranges", ()): + ret.extend(range(rr[0], rr[-1] + 1)) + return [chr(c) for c in sorted(set(ret))] + + @_lazyclassproperty + def printables(cls): + "all non-whitespace characters in this range" + return "".join(filterfalse(str.isspace, cls._chars_for_ranges)) + + @_lazyclassproperty + def alphas(cls): + "all alphabetic characters in this range" + return "".join(filter(str.isalpha, cls._chars_for_ranges)) + + @_lazyclassproperty + def nums(cls): + "all numeric digit characters in this range" + return "".join(filter(str.isdigit, cls._chars_for_ranges)) + + @_lazyclassproperty + def alphanums(cls): + "all alphanumeric characters in this range" + return cls.alphas + cls.nums + + @_lazyclassproperty + def identchars(cls): + "all characters in this range that are valid identifier characters, plus underscore '_'" + return "".join( + sorted( + set( + "".join(filter(str.isidentifier, cls._chars_for_ranges)) + + "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyzªµº" + + "ÀÁÂÃÄÅÆÇÈÉÊËÌÍÎÏÐÑÒÓÔÕÖØÙÚÛÜÝÞßàáâãäåæçèéêëìíîïðñòóôõöøùúûüýþÿ" + + "_" + ) + ) + ) + + @_lazyclassproperty + def identbodychars(cls): + """ + all characters in this range that are valid identifier body characters, + plus the digits 0-9 + """ + return "".join( + sorted( + set( + cls.identchars + + "0123456789" + + "".join( + [c for c in cls._chars_for_ranges if ("_" + c).isidentifier()] + ) + ) + ) + ) + + +class pyparsing_unicode(unicode_set): + """ + A namespace class for defining common language unicode_sets. + """ + + _ranges: UnicodeRangeList = [(32, sys.maxunicode)] + + class Latin1(unicode_set): + "Unicode set for Latin-1 Unicode Character Range" + _ranges: UnicodeRangeList = [ + (0x0020, 0x007E), + (0x00A0, 0x00FF), + ] + + class LatinA(unicode_set): + "Unicode set for Latin-A Unicode Character Range" + _ranges: UnicodeRangeList = [ + (0x0100, 0x017F), + ] + + class LatinB(unicode_set): + "Unicode set for Latin-B Unicode Character Range" + _ranges: UnicodeRangeList = [ + (0x0180, 0x024F), + ] + + class Greek(unicode_set): + "Unicode set for Greek Unicode Character Ranges" + _ranges: UnicodeRangeList = [ + (0x0342, 0x0345), + (0x0370, 0x0377), + (0x037A, 0x037F), + (0x0384, 0x038A), + (0x038C,), + (0x038E, 0x03A1), + (0x03A3, 0x03E1), + (0x03F0, 0x03FF), + (0x1D26, 0x1D2A), + (0x1D5E,), + (0x1D60,), + (0x1D66, 0x1D6A), + (0x1F00, 0x1F15), + (0x1F18, 0x1F1D), + (0x1F20, 0x1F45), + (0x1F48, 0x1F4D), + (0x1F50, 0x1F57), + (0x1F59,), + (0x1F5B,), + (0x1F5D,), + (0x1F5F, 0x1F7D), + (0x1F80, 0x1FB4), + (0x1FB6, 0x1FC4), + (0x1FC6, 0x1FD3), + (0x1FD6, 0x1FDB), + (0x1FDD, 0x1FEF), + (0x1FF2, 0x1FF4), + (0x1FF6, 0x1FFE), + (0x2129,), + (0x2719, 0x271A), + (0xAB65,), + (0x10140, 0x1018D), + (0x101A0,), + (0x1D200, 0x1D245), + (0x1F7A1, 0x1F7A7), + ] + + class Cyrillic(unicode_set): + "Unicode set for Cyrillic Unicode Character Range" + _ranges: UnicodeRangeList = [ + (0x0400, 0x052F), + (0x1C80, 0x1C88), + (0x1D2B,), + (0x1D78,), + (0x2DE0, 0x2DFF), + (0xA640, 0xA672), + (0xA674, 0xA69F), + (0xFE2E, 0xFE2F), + ] + + class Chinese(unicode_set): + "Unicode set for Chinese Unicode Character Range" + _ranges: UnicodeRangeList = [ + (0x2E80, 0x2E99), + (0x2E9B, 0x2EF3), + (0x31C0, 0x31E3), + (0x3400, 0x4DB5), + (0x4E00, 0x9FEF), + (0xA700, 0xA707), + (0xF900, 0xFA6D), + (0xFA70, 0xFAD9), + (0x16FE2, 0x16FE3), + (0x1F210, 0x1F212), + (0x1F214, 0x1F23B), + (0x1F240, 0x1F248), + (0x20000, 0x2A6D6), + (0x2A700, 0x2B734), + (0x2B740, 0x2B81D), + (0x2B820, 0x2CEA1), + (0x2CEB0, 0x2EBE0), + (0x2F800, 0x2FA1D), + ] + + class Japanese(unicode_set): + "Unicode set for Japanese Unicode Character Range, combining Kanji, Hiragana, and Katakana ranges" + _ranges: UnicodeRangeList = [] + + class Kanji(unicode_set): + "Unicode set for Kanji Unicode Character Range" + _ranges: UnicodeRangeList = [ + (0x4E00, 0x9FBF), + (0x3000, 0x303F), + ] + + class Hiragana(unicode_set): + "Unicode set for Hiragana Unicode Character Range" + _ranges: UnicodeRangeList = [ + (0x3041, 0x3096), + (0x3099, 0x30A0), + (0x30FC,), + (0xFF70,), + (0x1B001,), + (0x1B150, 0x1B152), + (0x1F200,), + ] + + class Katakana(unicode_set): + "Unicode set for Katakana Unicode Character Range" + _ranges: UnicodeRangeList = [ + (0x3099, 0x309C), + (0x30A0, 0x30FF), + (0x31F0, 0x31FF), + (0x32D0, 0x32FE), + (0xFF65, 0xFF9F), + (0x1B000,), + (0x1B164, 0x1B167), + (0x1F201, 0x1F202), + (0x1F213,), + ] + + class Hangul(unicode_set): + "Unicode set for Hangul (Korean) Unicode Character Range" + _ranges: UnicodeRangeList = [ + (0x1100, 0x11FF), + (0x302E, 0x302F), + (0x3131, 0x318E), + (0x3200, 0x321C), + (0x3260, 0x327B), + (0x327E,), + (0xA960, 0xA97C), + (0xAC00, 0xD7A3), + (0xD7B0, 0xD7C6), + (0xD7CB, 0xD7FB), + (0xFFA0, 0xFFBE), + (0xFFC2, 0xFFC7), + (0xFFCA, 0xFFCF), + (0xFFD2, 0xFFD7), + (0xFFDA, 0xFFDC), + ] + + Korean = Hangul + + class CJK(Chinese, Japanese, Hangul): + "Unicode set for combined Chinese, Japanese, and Korean (CJK) Unicode Character Range" + pass + + class Thai(unicode_set): + "Unicode set for Thai Unicode Character Range" + _ranges: UnicodeRangeList = [(0x0E01, 0x0E3A), (0x0E3F, 0x0E5B)] + + class Arabic(unicode_set): + "Unicode set for Arabic Unicode Character Range" + _ranges: UnicodeRangeList = [ + (0x0600, 0x061B), + (0x061E, 0x06FF), + (0x0700, 0x077F), + ] + + class Hebrew(unicode_set): + "Unicode set for Hebrew Unicode Character Range" + _ranges: UnicodeRangeList = [ + (0x0591, 0x05C7), + (0x05D0, 0x05EA), + (0x05EF, 0x05F4), + (0xFB1D, 0xFB36), + (0xFB38, 0xFB3C), + (0xFB3E,), + (0xFB40, 0xFB41), + (0xFB43, 0xFB44), + (0xFB46, 0xFB4F), + ] + + class Devanagari(unicode_set): + "Unicode set for Devanagari Unicode Character Range" + _ranges: UnicodeRangeList = [(0x0900, 0x097F), (0xA8E0, 0xA8FF)] + + +pyparsing_unicode.Japanese._ranges = ( + pyparsing_unicode.Japanese.Kanji._ranges + + pyparsing_unicode.Japanese.Hiragana._ranges + + pyparsing_unicode.Japanese.Katakana._ranges +) + +# define ranges in language character sets +pyparsing_unicode.العربية = pyparsing_unicode.Arabic +pyparsing_unicode.中文 = pyparsing_unicode.Chinese +pyparsing_unicode.кириллица = pyparsing_unicode.Cyrillic +pyparsing_unicode.Ελληνικά = pyparsing_unicode.Greek +pyparsing_unicode.עִברִית = pyparsing_unicode.Hebrew +pyparsing_unicode.日本語 = pyparsing_unicode.Japanese +pyparsing_unicode.Japanese.漢字 = pyparsing_unicode.Japanese.Kanji +pyparsing_unicode.Japanese.カタカナ = pyparsing_unicode.Japanese.Katakana +pyparsing_unicode.Japanese.ひらがな = pyparsing_unicode.Japanese.Hiragana +pyparsing_unicode.한국어 = pyparsing_unicode.Korean +pyparsing_unicode.ไทย = pyparsing_unicode.Thai +pyparsing_unicode.देवनागरी = pyparsing_unicode.Devanagari diff --git a/pipenv/vendor/pyparsing/util.py b/pipenv/vendor/pyparsing/util.py new file mode 100644 index 0000000000..34ce092c6d --- /dev/null +++ b/pipenv/vendor/pyparsing/util.py @@ -0,0 +1,235 @@ +# util.py +import warnings +import types +import collections +import itertools +from functools import lru_cache +from typing import List, Union, Iterable + +_bslash = chr(92) + + +class __config_flags: + """Internal class for defining compatibility and debugging flags""" + + _all_names: List[str] = [] + _fixed_names: List[str] = [] + _type_desc = "configuration" + + @classmethod + def _set(cls, dname, value): + if dname in cls._fixed_names: + warnings.warn( + "{}.{} {} is {} and cannot be overridden".format( + cls.__name__, + dname, + cls._type_desc, + str(getattr(cls, dname)).upper(), + ) + ) + return + if dname in cls._all_names: + setattr(cls, dname, value) + else: + raise ValueError("no such {} {!r}".format(cls._type_desc, dname)) + + enable = classmethod(lambda cls, name: cls._set(name, True)) + disable = classmethod(lambda cls, name: cls._set(name, False)) + + +@lru_cache(maxsize=128) +def col(loc: int, strg: str) -> int: + """ + Returns current column within a string, counting newlines as line separators. + The first column is number 1. + + Note: the default parsing behavior is to expand tabs in the input string + before starting the parsing process. See + :class:`ParserElement.parseString` for more + information on parsing strings containing ```` s, and suggested + methods to maintain a consistent view of the parsed string, the parse + location, and line and column positions within the parsed string. + """ + s = strg + return 1 if 0 < loc < len(s) and s[loc - 1] == "\n" else loc - s.rfind("\n", 0, loc) + + +@lru_cache(maxsize=128) +def lineno(loc: int, strg: str) -> int: + """Returns current line number within a string, counting newlines as line separators. + The first line is number 1. + + Note - the default parsing behavior is to expand tabs in the input string + before starting the parsing process. See :class:`ParserElement.parseString` + for more information on parsing strings containing ```` s, and + suggested methods to maintain a consistent view of the parsed string, the + parse location, and line and column positions within the parsed string. + """ + return strg.count("\n", 0, loc) + 1 + + +@lru_cache(maxsize=128) +def line(loc: int, strg: str) -> str: + """ + Returns the line of text containing loc within a string, counting newlines as line separators. + """ + last_cr = strg.rfind("\n", 0, loc) + next_cr = strg.find("\n", loc) + return strg[last_cr + 1 : next_cr] if next_cr >= 0 else strg[last_cr + 1 :] + + +class _UnboundedCache: + def __init__(self): + cache = {} + cache_get = cache.get + self.not_in_cache = not_in_cache = object() + + def get(_, key): + return cache_get(key, not_in_cache) + + def set_(_, key, value): + cache[key] = value + + def clear(_): + cache.clear() + + self.size = None + self.get = types.MethodType(get, self) + self.set = types.MethodType(set_, self) + self.clear = types.MethodType(clear, self) + + +class _FifoCache: + def __init__(self, size): + self.not_in_cache = not_in_cache = object() + cache = collections.OrderedDict() + cache_get = cache.get + + def get(_, key): + return cache_get(key, not_in_cache) + + def set_(_, key, value): + cache[key] = value + while len(cache) > size: + cache.popitem(last=False) + + def clear(_): + cache.clear() + + self.size = size + self.get = types.MethodType(get, self) + self.set = types.MethodType(set_, self) + self.clear = types.MethodType(clear, self) + + +class LRUMemo: + """ + A memoizing mapping that retains `capacity` deleted items + + The memo tracks retained items by their access order; once `capacity` items + are retained, the least recently used item is discarded. + """ + + def __init__(self, capacity): + self._capacity = capacity + self._active = {} + self._memory = collections.OrderedDict() + + def __getitem__(self, key): + try: + return self._active[key] + except KeyError: + self._memory.move_to_end(key) + return self._memory[key] + + def __setitem__(self, key, value): + self._memory.pop(key, None) + self._active[key] = value + + def __delitem__(self, key): + try: + value = self._active.pop(key) + except KeyError: + pass + else: + while len(self._memory) >= self._capacity: + self._memory.popitem(last=False) + self._memory[key] = value + + def clear(self): + self._active.clear() + self._memory.clear() + + +class UnboundedMemo(dict): + """ + A memoizing mapping that retains all deleted items + """ + + def __delitem__(self, key): + pass + + +def _escape_regex_range_chars(s: str) -> str: + # escape these chars: ^-[] + for c in r"\^-[]": + s = s.replace(c, _bslash + c) + s = s.replace("\n", r"\n") + s = s.replace("\t", r"\t") + return str(s) + + +def _collapse_string_to_ranges( + s: Union[str, Iterable[str]], re_escape: bool = True +) -> str: + def is_consecutive(c): + c_int = ord(c) + is_consecutive.prev, prev = c_int, is_consecutive.prev + if c_int - prev > 1: + is_consecutive.value = next(is_consecutive.counter) + return is_consecutive.value + + is_consecutive.prev = 0 + is_consecutive.counter = itertools.count() + is_consecutive.value = -1 + + def escape_re_range_char(c): + return "\\" + c if c in r"\^-][" else c + + def no_escape_re_range_char(c): + return c + + if not re_escape: + escape_re_range_char = no_escape_re_range_char + + ret = [] + s = "".join(sorted(set(s))) + if len(s) > 3: + for _, chars in itertools.groupby(s, key=is_consecutive): + first = last = next(chars) + last = collections.deque( + itertools.chain(iter([last]), chars), maxlen=1 + ).pop() + if first == last: + ret.append(escape_re_range_char(first)) + else: + sep = "" if ord(last) == ord(first) + 1 else "-" + ret.append( + "{}{}{}".format( + escape_re_range_char(first), sep, escape_re_range_char(last) + ) + ) + else: + ret = [escape_re_range_char(c) for c in s] + + return "".join(ret) + + +def _flatten(ll: list) -> list: + ret = [] + for i in ll: + if isinstance(i, list): + ret.extend(_flatten(i)) + else: + ret.append(i) + return ret diff --git a/pipenv/vendor/requirementslib/models/vcs.py b/pipenv/vendor/requirementslib/models/vcs.py index 5933623dfc..6a60b3ca91 100644 --- a/pipenv/vendor/requirementslib/models/vcs.py +++ b/pipenv/vendor/requirementslib/models/vcs.py @@ -73,7 +73,7 @@ def obtain(self): if lt_pip_19_2: self.repo_backend.obtain(self.checkout_directory) else: - self.repo_backend.obtain(self.checkout_directory, self.parsed_url) + self.repo_backend.obtain(self.checkout_directory, self.parsed_url, 1) else: if self.ref: self.checkout_ref(self.ref) diff --git a/pipenv/vendor/vendor.txt b/pipenv/vendor/vendor.txt index 95aa65ebd4..2def591286 100644 --- a/pipenv/vendor/vendor.txt +++ b/pipenv/vendor/vendor.txt @@ -18,17 +18,17 @@ iso8601==0.1.16 markupsafe==2.0.1 more-itertools==8.8.0 orderedmultidict==1.0.1 -packaging==21.0 +packaging==21.3 parse==1.19.0 pep517==0.11.0 pexpect==4.8.0 pip-shims==0.6.0 -pipdeptree==2.0.0 +pipdeptree==2.2.1 pipreqs==0.4.10 platformdirs==2.4.0 plette[validation]==0.2.3 ptyprocess==0.7.0 -pyparsing==2.4.7 +pyparsing==3.0.7 python-dateutil==2.8.2 python-dotenv==0.19.0 pythonfinder==1.2.10 diff --git a/tasks/vendoring/__init__.py b/tasks/vendoring/__init__.py index d5d27417d3..2b4caca769 100644 --- a/tasks/vendoring/__init__.py +++ b/tasks/vendoring/__init__.py @@ -45,6 +45,7 @@ HARDCODED_LICENSE_URLS = { "cursor": "https://raw.githubusercontent.com/GijsTimmers/cursor/master/LICENSE", "delegator.py": "https://raw.githubusercontent.com/amitt001/delegator.py/master/LICENSE", + "CacheControl": "https://raw.githubusercontent.com/ionrock/cachecontrol/master/LICENSE.txt", "click-didyoumean": "https://raw.githubusercontent.com/click-contrib/click-didyoumean/master/LICENSE", "click-completion": "https://raw.githubusercontent.com/click-contrib/click-completion/master/LICENSE", "parse": "https://raw.githubusercontent.com/techalchemy/parse/master/LICENSE", diff --git a/tasks/vendoring/patches/patched/_post_pip_import.patch b/tasks/vendoring/patches/patched/_post_pip_import.patch index 26cd89af3c..2f6ae7d037 100644 --- a/tasks/vendoring/patches/patched/_post_pip_import.patch +++ b/tasks/vendoring/patches/patched/_post_pip_import.patch @@ -14,8 +14,16 @@ diff --git a/pipenv/patched/notpip/_internal/resolution/resolvelib/candidates.py index 0ba06c52..6fdb59b7 100644 --- a/pipenv/patched/notpip/_internal/resolution/resolvelib/candidates.py +++ b/pipenv/patched/notpip/_internal/resolution/resolvelib/candidates.py +@@ -2,6 +2,7 @@ import logging + import sys + from typing import TYPE_CHECKING, Any, FrozenSet, Iterable, Optional, Tuple, Union, cast + ++from pipenv.patched.notpip._vendor.packaging.specifiers import SpecifierSet + from pipenv.patched.notpip._vendor.packaging.utils import NormalizedName, canonicalize_name + from pipenv.patched.notpip._vendor.packaging.version import Version + @@ -253,7 +253,10 @@ class _InstallRequirementBackedCandidate(Candidate): - yield self._get_requires_python_dependency() + yield self._factory.make_requires_python_requirement(self.dist.requires_python) def get_install_requirement(self) -> Optional[InstallRequirement]: - return self._ireq diff --git a/tasks/vendoring/patches/patched/pip21.patch b/tasks/vendoring/patches/patched/pip22.patch similarity index 51% rename from tasks/vendoring/patches/patched/pip21.patch rename to tasks/vendoring/patches/patched/pip22.patch index f700a99a4c..4823626608 100644 --- a/tasks/vendoring/patches/patched/pip21.patch +++ b/tasks/vendoring/patches/patched/pip22.patch @@ -1,111 +1,126 @@ -diff --git a/pipenv/patched/pip/_internal/build_env.py b/pipenv/patched/pip/_internal/build_env.py -index de98163d..a5a99042 100644 +diff --git a/pipenv/patched/pip/_internal/build_env.py b/src/pip/_internal/build_env.py +index daeb7fbc8..88482908b 100644 --- a/pipenv/patched/pip/_internal/build_env.py +++ b/pipenv/patched/pip/_internal/build_env.py -@@ -223,8 +223,9 @@ class BuildEnvironment: - prefix: _Prefix, - message: str, +@@ -216,8 +216,9 @@ class BuildEnvironment: + *, + kind: str, ) -> None: + sys_executable = os.environ.get('PIP_PYTHON_PATH', sys.executable) - args = [ -- sys.executable, pip_runnable, 'install', -+ sys_executable, pip_runnable, 'install', - '--ignore-installed', '--no-user', '--prefix', prefix.path, - '--no-warn-script-location', - ] # type: List[str] -diff --git a/pipenv/patched/pip/_internal/commands/__init__.py b/pipenv/patched/pip/_internal/commands/__init__.py -index 8e94b38f..eb236d70 100644 + args: List[str] = [ +- sys.executable, ++ sys_executable, + pip_runnable, + "install", + "--ignore-installed", +diff --git a/pipenv/patched/pip/_internal/commands/__init__.py b/src/pip/_internal/commands/__init__.py +index c72f24f30..df5defb16 100644 --- a/pipenv/patched/pip/_internal/commands/__init__.py +++ b/pipenv/patched/pip/_internal/commands/__init__.py -@@ -20,67 +20,67 @@ CommandInfo = namedtuple('CommandInfo', 'module_path, class_name, summary') - # so that the ordering won't be lost when using Python 2.7. - commands_dict: Dict[str, CommandInfo] = OrderedDict([ - ('install', CommandInfo( -- 'pip._internal.commands.install', 'InstallCommand', -+ 'pipenv.patched.notpip._internal.commands.install', 'InstallCommand', - 'Install packages.', - )), - ('download', CommandInfo( -- 'pip._internal.commands.download', 'DownloadCommand', -+ 'pipenv.patched.notpip._internal.commands.download', 'DownloadCommand', - 'Download packages.', - )), - ('uninstall', CommandInfo( -- 'pip._internal.commands.uninstall', 'UninstallCommand', -+ 'pipenv.patched.notpip._internal.commands.uninstall', 'UninstallCommand', - 'Uninstall packages.', - )), - ('freeze', CommandInfo( -- 'pip._internal.commands.freeze', 'FreezeCommand', -+ 'pipenv.patched.notpip._internal.commands.freeze', 'FreezeCommand', - 'Output installed packages in requirements format.', - )), - ('list', CommandInfo( -- 'pip._internal.commands.list', 'ListCommand', -+ 'pipenv.patched.notpip._internal.commands.list', 'ListCommand', - 'List installed packages.', - )), - ('show', CommandInfo( -- 'pip._internal.commands.show', 'ShowCommand', -+ 'pipenv.patched.notpip._internal.commands.show', 'ShowCommand', - 'Show information about installed packages.', - )), - ('check', CommandInfo( -- 'pip._internal.commands.check', 'CheckCommand', -+ 'pipenv.patched.notpip._internal.commands.check', 'CheckCommand', - 'Verify installed packages have compatible dependencies.', - )), - ('config', CommandInfo( -- 'pip._internal.commands.configuration', 'ConfigurationCommand', -+ 'pipenv.patched.notpip._internal.commands.configuration', 'ConfigurationCommand', - 'Manage local and global configuration.', - )), - ('search', CommandInfo( -- 'pip._internal.commands.search', 'SearchCommand', -+ 'pipenv.patched.notpip._internal.commands.search', 'SearchCommand', - 'Search PyPI for packages.', - )), - ('cache', CommandInfo( -- 'pip._internal.commands.cache', 'CacheCommand', -+ 'pipenv.patched.notpip._internal.commands.cache', 'CacheCommand', +@@ -19,82 +19,82 @@ CommandInfo = namedtuple("CommandInfo", "module_path, class_name, summary") + # `commands_dict` in test setup / teardown). + commands_dict: Dict[str, CommandInfo] = { + "install": CommandInfo( +- "pip._internal.commands.install", ++ "pipenv.patched.notpip._internal.commands.install", + "InstallCommand", + "Install packages.", + ), + "download": CommandInfo( +- "pip._internal.commands.download", ++ "pipenv.patched.notpip._internal.commands.download", + "DownloadCommand", + "Download packages.", + ), + "uninstall": CommandInfo( +- "pip._internal.commands.uninstall", ++ "pipenv.patched.notpip._internal.commands.uninstall", + "UninstallCommand", + "Uninstall packages.", + ), + "freeze": CommandInfo( +- "pip._internal.commands.freeze", ++ "pipenv.patched.notpip._internal.commands.freeze", + "FreezeCommand", + "Output installed packages in requirements format.", + ), + "list": CommandInfo( +- "pip._internal.commands.list", ++ "pipenv.patched.notpip._internal.commands.list", + "ListCommand", + "List installed packages.", + ), + "show": CommandInfo( +- "pip._internal.commands.show", ++ "pipenv.patched.notpip._internal.commands.show", + "ShowCommand", + "Show information about installed packages.", + ), + "check": CommandInfo( +- "pip._internal.commands.check", ++ "pipenv.patched.notpip._internal.commands.check", + "CheckCommand", + "Verify installed packages have compatible dependencies.", + ), + "config": CommandInfo( +- "pip._internal.commands.configuration", ++ "pipenv.patched.notpip._internal.commands.configuration", + "ConfigurationCommand", + "Manage local and global configuration.", + ), + "search": CommandInfo( +- "pip._internal.commands.search", ++ "pipenv.patched.notpip._internal.commands.search", + "SearchCommand", + "Search PyPI for packages.", + ), + "cache": CommandInfo( +- "pip._internal.commands.cache", ++ "pipenv.patched.notpip._internal.commands.cache", + "CacheCommand", "Inspect and manage pip's wheel cache.", - )), - ('index', CommandInfo( -- 'pip._internal.commands.index', 'IndexCommand', -+ 'pipenv.patched.notpip._internal.commands.index', 'IndexCommand', + ), + "index": CommandInfo( +- "pip._internal.commands.index", ++ "pipenv.patched.notpip._internal.commands.index", + "IndexCommand", "Inspect information available from package indexes.", - )), - ('wheel', CommandInfo( -- 'pip._internal.commands.wheel', 'WheelCommand', -+ 'pipenv.patched.notpip._internal.commands.wheel', 'WheelCommand', - 'Build wheels from your requirements.', - )), - ('hash', CommandInfo( -- 'pip._internal.commands.hash', 'HashCommand', -+ 'pipenv.patched.notpip._internal.commands.hash', 'HashCommand', - 'Compute hashes of package archives.', - )), - ('completion', CommandInfo( -- 'pip._internal.commands.completion', 'CompletionCommand', -+ 'pipenv.patched.notpip._internal.commands.completion', 'CompletionCommand', - 'A helper command used for command completion.', - )), - ('debug', CommandInfo( -- 'pip._internal.commands.debug', 'DebugCommand', -+ 'pipenv.patched.notpip._internal.commands.debug', 'DebugCommand', - 'Show information useful for debugging.', - )), - ('help', CommandInfo( -- 'pip._internal.commands.help', 'HelpCommand', -+ 'pipenv.patched.notpip._internal.commands.help', 'HelpCommand', - 'Show help for commands.', - )), - ]) -diff --git a/pipenv/patched/pip/_internal/index/package_finder.py b/pipenv/patched/pip/_internal/index/package_finder.py -index 2dadb5ae..31769869 100644 + ), + "wheel": CommandInfo( +- "pip._internal.commands.wheel", ++ "pipenv.patched.notpip._internal.commands.wheel", + "WheelCommand", + "Build wheels from your requirements.", + ), + "hash": CommandInfo( +- "pip._internal.commands.hash", ++ "pipenv.patched.notpip._internal.commands.hash", + "HashCommand", + "Compute hashes of package archives.", + ), + "completion": CommandInfo( +- "pip._internal.commands.completion", ++ "pipenv.patched.notpip._internal.commands.completion", + "CompletionCommand", + "A helper command used for command completion.", + ), + "debug": CommandInfo( +- "pip._internal.commands.debug", ++ "pipenv.patched.notpip._internal.commands.debug", + "DebugCommand", + "Show information useful for debugging.", + ), + "help": CommandInfo( +- "pip._internal.commands.help", ++ "pipenv.patched.notpip._internal.commands.help", + "HelpCommand", + "Show help for commands.", + ), +diff --git a/pipenv/patched/pip/_internal/index/package_finder.py b/src/pip/_internal/index/package_finder.py +index 223d06df6..dd5283c7f 100644 --- a/pipenv/patched/pip/_internal/index/package_finder.py +++ b/pipenv/patched/pip/_internal/index/package_finder.py -@@ -112,6 +112,7 @@ class LinkEvaluator: +@@ -114,6 +114,7 @@ class LinkEvaluator: target_python: TargetPython, allow_yanked: bool, ignore_requires_python: Optional[bool] = None, @@ -113,7 +128,7 @@ index 2dadb5ae..31769869 100644 ) -> None: """ :param project_name: The user supplied package name. -@@ -129,15 +130,20 @@ class LinkEvaluator: +@@ -131,9 +132,13 @@ class LinkEvaluator: :param ignore_requires_python: Whether to ignore incompatible PEP 503 "data-requires-python" values in HTML links. Defaults to False. @@ -124,41 +139,41 @@ index 2dadb5ae..31769869 100644 ignore_requires_python = False + if ignore_compatibility is None: + ignore_compatibility = True - + self._allow_yanked = allow_yanked self._canonical_name = canonical_name - self._ignore_requires_python = ignore_requires_python - self._formats = formats +@@ -142,6 +147,7 @@ class LinkEvaluator: self._target_python = target_python -+ self._ignore_compatibility = ignore_compatibility - + self.project_name = project_name - -@@ -164,11 +170,11 @@ class LinkEvaluator: - return (False, 'not a file') ++ self._ignore_compatibility = ignore_compatibility + + def evaluate_link(self, link: Link) -> Tuple[bool, Optional[str]]: + """ +@@ -166,10 +172,10 @@ class LinkEvaluator: + return (False, "not a file") if ext not in SUPPORTED_EXTENSIONS: - return (False, f'unsupported archive format: {ext}') + return (False, f"unsupported archive format: {ext}") - if "binary" not in self._formats and ext == WHEEL_EXTENSION: + if "binary" not in self._formats and ext == WHEEL_EXTENSION and not self._ignore_compatibility: - reason = 'No binaries permitted for {}'.format( - self.project_name) + reason = "No binaries permitted for {}".format(self.project_name) return (False, reason) -- if "macosx10" in link.path and ext == '.zip': +- if "macosx10" in link.path and ext == ".zip": + if "macosx10" in link.path and ext == '.zip' and not self._ignore_compatibility: - return (False, 'macosx10 one') + return (False, "macosx10 one") if ext == WHEEL_EXTENSION: try: @@ -181,7 +187,7 @@ class LinkEvaluator: return (False, reason) - + supported_tags = self._target_python.get_tags() - if not wheel.supported(supported_tags): + if not wheel.supported(supported_tags) and not self._ignore_compatibility: # Include the wheel's tags in the reason string to # simplify troubleshooting compatibility issues. file_tags = wheel.get_formatted_file_tags() -@@ -219,7 +225,7 @@ class LinkEvaluator: - link, version_info=self._target_python.py_version_info, +@@ -221,7 +227,7 @@ class LinkEvaluator: + version_info=self._target_python.py_version_info, ignore_requires_python=self._ignore_requires_python, ) - if not supports_python: @@ -166,10 +181,10 @@ index 2dadb5ae..31769869 100644 # Return None for the reason text to suppress calling # _log_skipped_link(). return (False, None) -@@ -470,7 +476,10 @@ class CandidateEvaluator: - +@@ -469,7 +475,10 @@ class CandidateEvaluator: + return sorted(filtered_applicable_candidates, key=self._sort_key) - + - def _sort_key(self, candidate: InstallationCandidate) -> CandidateSortingKey: + def _sort_key( + self, candidate: InstallationCandidate, @@ -178,9 +193,9 @@ index 2dadb5ae..31769869 100644 """ Function to pass as the `key` argument to a call to sorted() to sort InstallationCandidates by preference. -@@ -513,10 +522,12 @@ class CandidateEvaluator: - valid_tags, self._wheel_tag_preferences - )) +@@ -514,10 +523,13 @@ class CandidateEvaluator: + ) + ) except ValueError: - raise UnsupportedWheel( - "{} is not a supported wheel for this platform. It " @@ -192,10 +207,11 @@ index 2dadb5ae..31769869 100644 + "can't be sorted.".format(wheel.filename) + ) + pri = -(support_num) ++ if self._prefer_binary: binary_preference = 1 if wheel.build_tag is not None: -@@ -578,6 +589,7 @@ class PackageFinder: +@@ -584,6 +596,7 @@ class PackageFinder: format_control: Optional[FormatControl] = None, candidate_prefs: Optional[CandidatePreferences] = None, ignore_requires_python: Optional[bool] = None, @@ -203,41 +219,41 @@ index 2dadb5ae..31769869 100644 ) -> None: """ This constructor is primarily meant to be used by the create() class -@@ -599,6 +611,7 @@ class PackageFinder: +@@ -605,6 +618,7 @@ class PackageFinder: self._ignore_requires_python = ignore_requires_python self._link_collector = link_collector self._target_python = target_python + self._ignore_compatibility = ignore_compatibility - + self._use_deprecated_html5lib = use_deprecated_html5lib + self.format_control = format_control - -@@ -691,6 +704,7 @@ class PackageFinder: +@@ -701,6 +715,7 @@ class PackageFinder: target_python=self._target_python, allow_yanked=self._allow_yanked, ignore_requires_python=self._ignore_requires_python, + ignore_compatibility=self._ignore_compatibility ) - + def _sort_links(self, links: Iterable[Link]) -> List[Link]: -diff --git a/pipenv/patched/pip/_internal/req/req_install.py b/pipenv/patched/pip/_internal/req/req_install.py -index 6a42a510..318578e0 100644 +diff --git a/pipenv/patched/pip/_internal/req/req_install.py b/src/pip/_internal/req/req_install.py +index 02dbda194..72ab5c1dc 100644 --- a/pipenv/patched/pip/_internal/req/req_install.py +++ b/pipenv/patched/pip/_internal/req/req_install.py -@@ -488,6 +488,7 @@ class InstallRequirement: - self.pyproject_requires = requires - self.pep517_backend = Pep517HookCaller( - self.unpacked_source_directory, backend, backend_path=backend_path, +@@ -474,6 +474,7 @@ class InstallRequirement: + self.unpacked_source_directory, + backend, + backend_path=backend_path, + python_executable=os.getenv('PIP_PYTHON_PATH', sys.executable) ) - - def _generate_metadata(self) -> str: -diff --git a/pipenv/patched/pip/_vendor/requests/packages.py b/pipenv/patched/pip/_vendor/requests/packages.py -index 9582fa73..5fb6f07d 100644 + + def isolated_editable_sanity_check(self) -> None: +diff --git a/pipenv/patched/pip/_vendor/requests/packages.py b/src/pip/_vendor/requests/packages.py +index 9582fa730..5fb6f07df 100644 --- a/pipenv/patched/pip/_vendor/requests/packages.py +++ b/pipenv/patched/pip/_vendor/requests/packages.py @@ -4,13 +4,13 @@ import sys # I don't like it either. Just look the other way. :) - + for package in ('urllib3', 'idna', 'chardet'): - vendored_package = "pip._vendor." + package + vendored_package = "pipenv.patched.notpip._vendor." + package @@ -250,5 +266,5 @@ index 9582fa73..5fb6f07d 100644 - sys.modules['pip._vendor.requests.packages.' + unprefixed_mod] = sys.modules[mod] + unprefixed_mod = mod[len("pipenv.patched.notpip._vendor."):] + sys.modules['pipenv.patched.notpip._vendor.requests.packages.' + unprefixed_mod] = sys.modules[mod] - + # Kinda cool, though, right? diff --git a/tasks/vendoring/patches/vendor/pipdeptree-update-pip-import.patch b/tasks/vendoring/patches/vendor/pipdeptree-update-pip-import.patch index 810d674fb9..a136f3961a 100644 --- a/tasks/vendoring/patches/vendor/pipdeptree-update-pip-import.patch +++ b/tasks/vendoring/patches/vendor/pipdeptree-update-pip-import.patch @@ -2,21 +2,21 @@ diff --git a/pipenv/vendor/pipdeptree.py b/pipenv/vendor/pipdeptree.py index e15cd8f4..9f692879 100644 --- a/pipenv/vendor/pipdeptree.py +++ b/pipenv/vendor/pipdeptree.py -@@ -21,13 +21,11 @@ try: +@@ -20,11 +20,12 @@ try: except ImportError: from collections import Mapping - + +-from pipenv.patched.notpip._vendor import pkg_resources -try: -- from pipenv.patched.notpip._internal.utils.misc import get_installed_distributions - from pipenv.patched.notpip._internal.operations.freeze import FrozenRequirement -except ImportError: -- from pipenv.patched.notpip import get_installed_distributions, FrozenRequirement +- from pipenv.patched.notpip import FrozenRequirement ++import pkg_resources ++ +pardir = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) +sys.path.append(pardir) +from pipenv.vendor.pip_shims.shims import get_installed_distributions, FrozenRequirement - --from pipenv.patched.notpip._vendor import pkg_resources -+import pkg_resources ++ # inline: - # from graphviz import backend, Digraph - + # from graphviz import Digraph + # from graphviz import parameters

' : '\U0001d4ab', + '\\' : '\U0001d4ac', + '\\' : '\U0000211b', + '\\' : '\U0001d4ae', + '\\' : '\U0001d4af', + '\\' : '\U0001d4b0', + '\\' : '\U0001d4b1', + '\\' : '\U0001d4b2', + '\\' : '\U0001d4b3', + '\\' : '\U0001d4b4', + '\\' : '\U0001d4b5', + '\\' : '\U0001d5ba', + '\\' : '\U0001d5bb', + '\\' : '\U0001d5bc', + '\\' : '\U0001d5bd', + '\\' : '\U0001d5be', + '\\' : '\U0001d5bf', + '\\' : '\U0001d5c0', + '\\' : '\U0001d5c1', + '\\' : '\U0001d5c2', + '\\' : '\U0001d5c3', + '\\' : '\U0001d5c4', + '\\' : '\U0001d5c5', + '\\' : '\U0001d5c6', + '\\' : '\U0001d5c7', + '\\' : '\U0001d5c8', + '\\