diff --git a/conda_lock/_vendor/cleo.LICENSE b/conda_lock/_vendor/cleo.LICENSE deleted file mode 100644 index 3f0aed7f..00000000 --- a/conda_lock/_vendor/cleo.LICENSE +++ /dev/null @@ -1,20 +0,0 @@ -Copyright (c) 2013 Sébastien Eustace - -Permission is hereby granted, free of charge, to any person obtaining -a copy of this software and associated documentation files (the -"Software"), to deal in the Software without restriction, including -without limitation the rights to use, copy, modify, merge, publish, -distribute, sublicense, and/or sell copies of the Software, and to -permit persons to whom the Software is furnished to do so, subject to -the following conditions: - -The above copyright notice and this permission notice shall be -included in all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, -EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF -MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND -NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE -LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION -WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. \ No newline at end of file diff --git a/conda_lock/_vendor/cleo.pyi b/conda_lock/_vendor/cleo.pyi deleted file mode 100644 index da1bd180..00000000 --- a/conda_lock/_vendor/cleo.pyi +++ /dev/null @@ -1 +0,0 @@ -from cleo import * \ No newline at end of file diff --git a/conda_lock/_vendor/cleo/__init__.py b/conda_lock/_vendor/cleo/__init__.py new file mode 100644 index 00000000..86e16da9 --- /dev/null +++ b/conda_lock/_vendor/cleo/__init__.py @@ -0,0 +1,4 @@ +from __future__ import annotations + + +__version__ = "2.1.0" diff --git a/conda_lock/_vendor/cleo/_compat.py b/conda_lock/_vendor/cleo/_compat.py new file mode 100644 index 00000000..82636122 --- /dev/null +++ b/conda_lock/_vendor/cleo/_compat.py @@ -0,0 +1,15 @@ +from __future__ import annotations + +import shlex +import subprocess +import sys + + +WINDOWS = sys.platform == "win32" + + +def shell_quote(token: str) -> str: + if WINDOWS: + return subprocess.list2cmdline([token]) + + return shlex.quote(token) diff --git a/conda_lock/_vendor/cleo/_utils.py b/conda_lock/_vendor/cleo/_utils.py new file mode 100644 index 00000000..ece2e5cb --- /dev/null +++ b/conda_lock/_vendor/cleo/_utils.py @@ -0,0 +1,110 @@ +from __future__ import annotations + +import math + +from dataclasses import dataclass +from html.parser import HTMLParser + +from rapidfuzz.distance import Levenshtein + + +class TagStripper(HTMLParser): + def __init__(self) -> None: + super().__init__(convert_charrefs=False) + + self.reset() + self.fed: list[str] = [] + + def handle_data(self, d: str) -> None: + self.fed.append(d) + + def handle_entityref(self, name: str) -> None: + self.fed.append(f"&{name};") + + def handle_charref(self, name: str) -> None: + self.fed.append(f"&#{name};") + + def get_data(self) -> str: + return "".join(self.fed) + + +def _strip(value: str) -> str: + s = TagStripper() + s.feed(value) + s.close() + + return s.get_data() + + +def strip_tags(value: str) -> str: + while "<" in value and ">" in value: + new_value = _strip(value) + if value.count("<") == new_value.count("<"): + break + + value = new_value + + return value + + +def find_similar_names(name: str, names: list[str]) -> list[str]: + """ + Finds names similar to a given command name. + """ + threshold = 1e3 + distance_by_name = {} + + for actual_name in names: + # Get Levenshtein distance between the input and each command name + distance = Levenshtein.distance(name, actual_name) + + is_similar = distance <= len(name) / 3 + substring_index = actual_name.find(name) + is_substring = substring_index != -1 + + if is_similar or is_substring: + distance_by_name[actual_name] = ( + distance, + substring_index if is_substring else float("inf"), + ) + + # Only keep results with a distance below the threshold + distance_by_name = { + key: value + for key, value in distance_by_name.items() + if value[0] < 2 * threshold + } + # Display results with shortest distance first + return sorted(distance_by_name, key=lambda key: distance_by_name[key]) + + +@dataclass +class TimeFormat: + threshold: int + alias: str + divisor: int | None = None + + def apply(self, secs: float) -> str: + if self.divisor: + return f"{math.ceil(secs / self.divisor)} {self.alias}" + return self.alias + + +_TIME_FORMATS: list[TimeFormat] = [ + TimeFormat(1, "< 1 sec"), + TimeFormat(2, "1 sec"), + TimeFormat(60, "secs", 1), + TimeFormat(61, "1 min"), + TimeFormat(3600, "mins", 60), + TimeFormat(5401, "1 hr"), + TimeFormat(86400, "hrs", 3600), + TimeFormat(129601, "1 day"), + TimeFormat(604801, "days", 86400), +] + + +def format_time(secs: float) -> str: + time_format = next( + (fmt for fmt in _TIME_FORMATS if secs < fmt.threshold), _TIME_FORMATS[-1] + ) + return time_format.apply(secs) diff --git a/conda_lock/_vendor/cleo/application.py b/conda_lock/_vendor/cleo/application.py new file mode 100644 index 00000000..1a90f543 --- /dev/null +++ b/conda_lock/_vendor/cleo/application.py @@ -0,0 +1,641 @@ +from __future__ import annotations + +import os +import re +import sys + +from contextlib import suppress +from typing import TYPE_CHECKING +from typing import cast + +from conda_lock._vendor.cleo.commands.completions_command import CompletionsCommand +from conda_lock._vendor.cleo.commands.help_command import HelpCommand +from conda_lock._vendor.cleo.commands.list_command import ListCommand +from conda_lock._vendor.cleo.events.console_command_event import ConsoleCommandEvent +from conda_lock._vendor.cleo.events.console_error_event import ConsoleErrorEvent +from conda_lock._vendor.cleo.events.console_events import COMMAND +from conda_lock._vendor.cleo.events.console_events import ERROR +from conda_lock._vendor.cleo.events.console_events import TERMINATE +from conda_lock._vendor.cleo.events.console_terminate_event import ConsoleTerminateEvent +from conda_lock._vendor.cleo.exceptions import CleoCommandNotFoundError +from conda_lock._vendor.cleo.exceptions import CleoError +from conda_lock._vendor.cleo.exceptions import CleoLogicError +from conda_lock._vendor.cleo.exceptions import CleoNamespaceNotFoundError +from conda_lock._vendor.cleo.exceptions import CleoUserError +from conda_lock._vendor.cleo.io.inputs.argument import Argument +from conda_lock._vendor.cleo.io.inputs.argv_input import ArgvInput +from conda_lock._vendor.cleo.io.inputs.definition import Definition +from conda_lock._vendor.cleo.io.inputs.option import Option +from conda_lock._vendor.cleo.io.io import IO +from conda_lock._vendor.cleo.io.outputs.output import Verbosity +from conda_lock._vendor.cleo.io.outputs.stream_output import StreamOutput +from conda_lock._vendor.cleo.terminal import Terminal +from conda_lock._vendor.cleo.ui.ui import UI + + +if TYPE_CHECKING: + from crashtest.solution_providers.solution_provider_repository import ( + SolutionProviderRepository, + ) + + from conda_lock._vendor.cleo.commands.command import Command + from conda_lock._vendor.cleo.events.event_dispatcher import EventDispatcher + from conda_lock._vendor.cleo.io.inputs.input import Input + from conda_lock._vendor.cleo.io.outputs.output import Output + from conda_lock._vendor.cleo.loaders.command_loader import CommandLoader + + +class Application: + """ + An Application is the container for a collection of commands. + + This class is optimized for a standard CLI environment. + + Usage: + >>> app = Application('myapp', '1.0 (stable)') + >>> app.add(Command()) + >>> app.run() + """ + + def __init__(self, name: str = "console", version: str = "") -> None: + self._name = name + self._version = version + self._display_name: str | None = None + self._terminal = Terminal().size + self._default_command = "list" + self._single_command = False + self._commands: dict[str, Command] = {} + self._running_command: Command | None = None + self._want_helps = False + self._definition: Definition | None = None + self._catch_exceptions = True + self._auto_exit = True + self._initialized = False + self._ui: UI | None = None + + # TODO: signals support + self._event_dispatcher: EventDispatcher | None = None + + self._command_loader: CommandLoader | None = None + + self._solution_provider_repository: SolutionProviderRepository | None = None + + @property + def name(self) -> str: + return self._name + + @property + def display_name(self) -> str: + if self._display_name is None: + return re.sub(r"[\s\-_]+", " ", self._name).title() + + return self._display_name + + @property + def version(self) -> str: + return self._version + + @property + def long_version(self) -> str: + if self._name: + if self._version: + return f"{self.display_name} (version {self._version})" + + return f"{self.display_name}" + + return "Console application" + + @property + def definition(self) -> Definition: + if self._definition is None: + self._definition = self._default_definition + + if self._single_command: + definition = self._definition + definition.set_arguments([]) + + return definition + + return self._definition + + @property + def default_commands(self) -> list[Command]: + return [HelpCommand(), ListCommand(), CompletionsCommand()] + + @property + def help(self) -> str: + return self.long_version + + @property + def ui(self) -> UI: + if self._ui is None: + self._ui = self._get_default_ui() + + return self._ui + + @property + def event_dispatcher(self) -> EventDispatcher | None: + return self._event_dispatcher + + def set_event_dispatcher(self, event_dispatcher: EventDispatcher) -> None: + self._event_dispatcher = event_dispatcher + + def set_name(self, name: str) -> None: + self._name = name + + def set_display_name(self, display_name: str) -> None: + self._display_name = display_name + + def set_version(self, version: str) -> None: + self._version = version + + def set_ui(self, ui: UI) -> None: + self._ui = ui + + def set_command_loader(self, command_loader: CommandLoader) -> None: + self._command_loader = command_loader + + def auto_exits(self, auto_exits: bool = True) -> None: + self._auto_exit = auto_exits + + def is_auto_exit_enabled(self) -> bool: + return self._auto_exit + + def are_exceptions_caught(self) -> bool: + return self._catch_exceptions + + def catch_exceptions(self, catch_exceptions: bool = True) -> None: + self._catch_exceptions = catch_exceptions + + def is_single_command(self) -> bool: + return self._single_command + + def set_solution_provider_repository( + self, solution_provider_repository: SolutionProviderRepository + ) -> None: + self._solution_provider_repository = solution_provider_repository + + def add(self, command: Command) -> Command | None: + self._init() + + command.set_application(self) + + if not command.enabled: + command.set_application() + + return None + + if not command.name: + raise CleoLogicError( + f'The command "{command.__class__.__name__}" cannot have an empty name' + ) + + self._commands[command.name] = command + + for alias in command.aliases: + self._commands[alias] = command + + return command + + def get(self, name: str) -> Command: + self._init() + + if not self.has(name): + raise CleoCommandNotFoundError(name) + + if name not in self._commands: + # The command was registered in a different name in the command loader + raise CleoCommandNotFoundError(name) + + command = self._commands[name] + + if self._want_helps: + self._want_helps = False + + help_command: HelpCommand = cast(HelpCommand, self.get("help")) + help_command.set_command(command) + + return help_command + + return command + + def has(self, name: str) -> bool: + self._init() + + if name in self._commands: + return True + + if not self._command_loader: + return False + + return bool( + self._command_loader.has(name) and self.add(self._command_loader.get(name)) + ) + + def get_namespaces(self) -> list[str]: + namespaces = [] + seen = set() + + for command in self.all().values(): + if command.hidden or not command.name: + continue + + for namespace in self._extract_all_namespaces(command.name): + if namespace in seen: + continue + + namespaces.append(namespace) + seen.add(namespace) + + for alias in command.aliases: + for namespace in self._extract_all_namespaces(alias): + if namespace in seen: + continue + + namespaces.append(namespace) + seen.add(namespace) + + return namespaces + + def find_namespace(self, namespace: str) -> str: + all_namespaces = self.get_namespaces() + + if namespace not in all_namespaces: + raise CleoNamespaceNotFoundError(namespace, all_namespaces) + + return namespace + + def find(self, name: str) -> Command: + self._init() + + if self.has(name): + return self.get(name) + + all_commands = [] + if self._command_loader: + all_commands += self._command_loader.names + + all_commands += [ + name for name, command in self._commands.items() if not command.hidden + ] + + raise CleoCommandNotFoundError(name, all_commands) + + def all(self, namespace: str | None = None) -> dict[str, Command]: + self._init() + + if namespace is None: + commands = self._commands.copy() + if not self._command_loader: + return commands + + for name in self._command_loader.names: + if name not in commands and self.has(name): + commands[name] = self.get(name) + + return commands + + commands = {} + + for name, command in self._commands.items(): + if namespace == self.extract_namespace(name, name.count(" ") + 1): + commands[name] = command + + if self._command_loader: + for name in self._command_loader.names: + if ( + name not in commands + and namespace == self.extract_namespace(name, name.count(" ") + 1) + and self.has(name) + ): + commands[name] = self.get(name) + + return commands + + def run( + self, + input: Input | None = None, + output: Output | None = None, + error_output: Output | None = None, + ) -> int: + try: + io = self.create_io(input, output, error_output) + + self._configure_io(io) + + try: + exit_code = self._run(io) + except BrokenPipeError: + # If we are piped to another process, it may close early and send a + # SIGPIPE: https://docs.python.org/3/library/signal.html#note-on-sigpipe + devnull = os.open(os.devnull, os.O_WRONLY) + os.dup2(devnull, sys.stdout.fileno()) + exit_code = 0 + except Exception as e: + if not self._catch_exceptions: + raise + + self.render_error(e, io) + + exit_code = 1 + # TODO: Custom error exit codes + except KeyboardInterrupt: + exit_code = 1 + + if self._auto_exit: + sys.exit(exit_code) + + return exit_code + + def _run(self, io: IO) -> int: + if io.input.has_parameter_option(["--version", "-V"], True): + io.write_line(self.long_version) + + return 0 + + definition = self.definition + input_definition = Definition() + for argument in definition.arguments: + if argument.name == "command": + argument = Argument( + "command", + required=True, + is_list=True, + description=definition.argument("command").description, + ) + + input_definition.add_argument(argument) + + input_definition.set_options(definition.options) + + # Errors must be ignored, full binding/validation + # happens later when the command is known. + with suppress(CleoError): + # Makes ArgvInput.first_argument() able to + # distinguish an option from an argument. + io.input.bind(input_definition) + + name = self._get_command_name(io) + if io.input.has_parameter_option(["--help", "-h"], True): + if not name: + name = "help" + io.set_input(ArgvInput(["console", "help", self._default_command])) + else: + self._want_helps = True + + if not name: + name = self._default_command + definition = self.definition + arguments = definition.arguments + if not definition.has_argument("command"): + arguments.append( + Argument( + "command", + required=False, + description=definition.argument("command").description, + default=name, + ) + ) + definition.set_arguments(arguments) + + self._running_command = None + command = self.find(name) + + self._running_command = command + + if " " in name and isinstance(io.input, ArgvInput): + # If the command is namespaced we rearrange + # the input to parse it as a single argument + argv = io.input._tokens[:] + + if io.input.script_name is not None: + argv.insert(0, io.input.script_name) + + namespace = name.split(" ")[0] + index = None + for i, arg in enumerate(argv): + if arg == namespace and i > 0: + argv[i] = name + index = i + break + + if index is not None: + del argv[index + 1 : index + 1 + name.count(" ")] + + stream = io.input.stream + interactive = io.input.is_interactive() + io.set_input(ArgvInput(argv)) + io.input.set_stream(stream) + io.input.interactive(interactive) + + exit_code = self._run_command(command, io) + self._running_command = None + + return exit_code + + def _run_command(self, command: Command, io: IO) -> int: + if self._event_dispatcher is None: + return command.run(io) + + # Bind before the console.command event, + # so the listeners have access to the arguments and options + try: + command.merge_application_definition() + io.input.bind(command.definition) + except CleoError: + # Ignore invalid option/arguments for now, + # to allow the listeners to customize the definition + pass + + command_event = ConsoleCommandEvent(command, io) + error = None + + try: + self._event_dispatcher.dispatch(command_event, COMMAND) + + if command_event.command_should_run(): + exit_code = command.run(io) + else: + exit_code = ConsoleCommandEvent.RETURN_CODE_DISABLED + except Exception as e: + error_event = ConsoleErrorEvent(command, io, e) + self._event_dispatcher.dispatch(error_event, ERROR) + error = error_event.error + exit_code = error_event.exit_code + + if exit_code == 0: + error = None + + terminate_event = ConsoleTerminateEvent(command, io, exit_code) + self._event_dispatcher.dispatch(terminate_event, TERMINATE) + + if error is not None: + raise error + + return terminate_event.exit_code + + def create_io( + self, + input: Input | None = None, + output: Output | None = None, + error_output: Output | None = None, + ) -> IO: + if input is None: + input = ArgvInput() + input.set_stream(sys.stdin) + + if output is None: + output = StreamOutput(sys.stdout) + + if error_output is None: + error_output = StreamOutput(sys.stderr) + + return IO(input, output, error_output) + + def render_error(self, error: Exception, io: IO) -> None: + from conda_lock._vendor.cleo.ui.exception_trace import ExceptionTrace + + trace = ExceptionTrace( + error, solution_provider_repository=self._solution_provider_repository + ) + simple = not io.is_verbose() or isinstance(error, CleoUserError) + trace.render(io.error_output, simple) + + def _configure_io(self, io: IO) -> None: + if io.input.has_parameter_option("--ansi", True): + io.decorated(True) + elif io.input.has_parameter_option("--no-ansi", True): + io.decorated(False) + + if io.input.has_parameter_option(["--no-interaction", "-n"], True) or ( + io.input._interactive is None + and io.input.stream + and not io.input.stream.isatty() + ): + io.interactive(False) + + shell_verbosity = int(os.getenv("SHELL_VERBOSITY", 0)) + if shell_verbosity == -1: + io.set_verbosity(Verbosity.QUIET) + elif shell_verbosity == 1: + io.set_verbosity(Verbosity.VERBOSE) + elif shell_verbosity == 2: + io.set_verbosity(Verbosity.VERY_VERBOSE) + elif shell_verbosity == 3: + io.set_verbosity(Verbosity.DEBUG) + else: + shell_verbosity = 0 + + if io.input.has_parameter_option(["--quiet", "-q"], True): + io.set_verbosity(Verbosity.QUIET) + shell_verbosity = -1 + else: + if io.input.has_parameter_option("-vvv", True): + io.set_verbosity(Verbosity.DEBUG) + shell_verbosity = 3 + elif io.input.has_parameter_option("-vv", True): + io.set_verbosity(Verbosity.VERY_VERBOSE) + shell_verbosity = 2 + elif io.input.has_parameter_option( + "-v", True + ) or io.input.has_parameter_option("--verbose", only_params=True): + io.set_verbosity(Verbosity.VERBOSE) + shell_verbosity = 1 + + if shell_verbosity == -1: + io.interactive(False) + + @property + def _default_definition(self) -> Definition: + return Definition( + [ + Argument( + "command", + required=True, + description="The command to execute.", + ), + Option( + "--help", + "-h", + flag=True, + description=( + "Display help for the given command. " + "When no command is given display help for " + f"the {self._default_command} command." + ), + ), + Option( + "--quiet", "-q", flag=True, description="Do not output any message." + ), + Option( + "--verbose", + "-v|vv|vvv", + flag=True, + description=( + "Increase the verbosity of messages: " + "1 for normal output, 2 for more verbose " + "output and 3 for debug." + ), + ), + Option( + "--version", + "-V", + flag=True, + description="Display this application version.", + ), + Option("--ansi", flag=True, description="Force ANSI output."), + Option("--no-ansi", flag=True, description="Disable ANSI output."), + Option( + "--no-interaction", + "-n", + flag=True, + description="Do not ask any interactive question.", + ), + ] + ) + + def _get_command_name(self, io: IO) -> str | None: + if self._single_command: + return self._default_command + + if "command" in io.input.arguments and io.input.argument("command"): + candidates: list[str] = [] + for command_part in io.input.argument("command"): + if candidates: + candidates.append(candidates[-1] + " " + command_part) + else: + candidates.append(command_part) + + for candidate in reversed(candidates): + if self.has(candidate): + return candidate + + return io.input.first_argument + + def extract_namespace(self, name: str, limit: int | None = None) -> str: + parts = name.split(" ")[:-1] + return " ".join(parts[:limit]) + + def _get_default_ui(self) -> UI: + from conda_lock._vendor.cleo.ui.progress_bar import ProgressBar + + io = self.create_io() + return UI([ProgressBar(io)]) + + def _extract_all_namespaces(self, name: str) -> list[str]: + parts = name.split(" ")[:-1] + namespaces: list[str] = [] + + for part in parts: + namespaces.append(namespaces[-1] + " " + part if namespaces else part) + + return namespaces + + def _init(self) -> None: + if self._initialized: + return + + self._initialized = True + + for command in self.default_commands: + self.add(command) diff --git a/conda_lock/_vendor/cleo/color.py b/conda_lock/_vendor/cleo/color.py new file mode 100644 index 00000000..6b88d4fe --- /dev/null +++ b/conda_lock/_vendor/cleo/color.py @@ -0,0 +1,148 @@ +from __future__ import annotations + +import os + +from typing import ClassVar + +from conda_lock._vendor.cleo.exceptions import CleoValueError + + +class Color: + COLORS: ClassVar[dict[str, tuple[int, int]]] = { + "black": (30, 40), + "red": (31, 41), + "green": (32, 42), + "yellow": (33, 43), + "blue": (34, 44), + "magenta": (35, 45), + "cyan": (36, 46), + "light_gray": (37, 47), + "default": (39, 49), + "dark_gray": (90, 100), + "light_red": (91, 101), + "light_green": (92, 102), + "light_yellow": (93, 103), + "light_blue": (94, 104), + "light_magenta": (95, 105), + "light_cyan": (96, 106), + "white": (97, 107), + } + + AVAILABLE_OPTIONS: ClassVar[dict[str, dict[str, int]]] = { + "bold": {"set": 1, "unset": 22}, + "dark": {"set": 2, "unset": 22}, + "italic": {"set": 3, "unset": 23}, + "underline": {"set": 4, "unset": 24}, + "blink": {"set": 5, "unset": 25}, + "reverse": {"set": 7, "unset": 27}, + "conceal": {"set": 8, "unset": 28}, + } + + def __init__( + self, + foreground: str = "", + background: str = "", + options: list[str] | None = None, + ) -> None: + self._foreground = self._parse_color(foreground, False) + self._background = self._parse_color(background, True) + + self._options = {} + for option in options or []: + if option not in self.AVAILABLE_OPTIONS: + raise ValueError( + f'"{option}" is not a valid color option. ' + f"It must be one of {', '.join(self.AVAILABLE_OPTIONS)}" + ) + + self._options[option] = self.AVAILABLE_OPTIONS[option] + + def apply(self, text: str) -> str: + return self.set() + text + self.unset() + + def set(self) -> str: + codes = [] + + if self._foreground: + codes.append(self._foreground) + + if self._background: + codes.append(self._background) + + for option in self._options.values(): + codes.append(str(option["set"])) + + if not codes: + return "" + + return f"\033[{';'.join(codes)}m" + + def unset(self) -> str: + codes = [] + + if self._foreground: + codes.append("39") + + if self._background: + codes.append("49") + + for option in self._options.values(): + codes.append(str(option["unset"])) + + if not codes: + return "" + + return f"\033[{';'.join(codes)}m" + + def _parse_color(self, color: str, background: bool) -> str: + if not color: + return "" + + if color.startswith("#"): + color = color[1:] + + if len(color) == 3: + color = color[0] * 2 + color[1] * 2 + color[2] * 2 + + if len(color) != 6: + raise CleoValueError(f'"{color}" is an invalid color') + + return ("4" if background else "3") + self._convert_hex_color_to_ansi( + int(color, 16) + ) + + if color not in self.COLORS: + raise CleoValueError( + f'"{color}" is an invalid color.' + f" It must be one of {', '.join(self.COLORS)}" + ) + + return str(self.COLORS[color][int(background)]) + + def _convert_hex_color_to_ansi(self, color: int) -> str: + r = (color >> 16) & 255 + g = (color >> 8) & 255 + b = color & 255 + + if os.getenv("COLORTERM") != "truecolor": + return str(self._degrade_hex_color_to_ansi(r, g, b)) + + return f"8;2;{r};{g};{b}" + + def _degrade_hex_color_to_ansi(self, r: int, g: int, b: int) -> int: + if round(self._get_saturation(r, g, b) / 50) == 0: + return 0 + + return (round(b / 255) << 2) | (round(g / 255) << 1) | round(r / 255) + + def _get_saturation(self, r: int, g: int, b: int) -> int: + r_float = r / 255 + g_float = g / 255 + b_float = b / 255 + v = max(r_float, g_float, b_float) + + diff = v - min(r_float, g_float, b_float) + if diff == 0: + return 0 + + return int(diff * 100 / v) diff --git a/conda_lock/_vendor/poetry/io/__init__.py b/conda_lock/_vendor/cleo/commands/__init__.py similarity index 100% rename from conda_lock/_vendor/poetry/io/__init__.py rename to conda_lock/_vendor/cleo/commands/__init__.py diff --git a/conda_lock/_vendor/cleo/commands/base_command.py b/conda_lock/_vendor/cleo/commands/base_command.py new file mode 100644 index 00000000..f54dd7cf --- /dev/null +++ b/conda_lock/_vendor/cleo/commands/base_command.py @@ -0,0 +1,139 @@ +from __future__ import annotations + +import inspect + +from typing import TYPE_CHECKING +from typing import ClassVar + +from conda_lock._vendor.cleo.exceptions import CleoError +from conda_lock._vendor.cleo.io.inputs.definition import Definition + + +if TYPE_CHECKING: + from conda_lock._vendor.cleo.application import Application + from conda_lock._vendor.cleo.io.io import IO + + +class BaseCommand: + name: str | None = None + + description = "" + + help = "" + + enabled = True + hidden = False + + usages: ClassVar[list[str]] = [] + + def __init__(self) -> None: + self._definition = Definition() + self._full_definition: Definition | None = None + self._application: Application | None = None + self._ignore_validation_errors = False + self._synopsis: dict[str, str] = {} + + self.configure() + + for i, usage in enumerate(self.usages): + if self.name and not usage.startswith(self.name): + self.usages[i] = f"{self.name} {usage}" + + @property + def application(self) -> Application | None: + return self._application + + @property + def definition(self) -> Definition: + if self._full_definition is not None: + return self._full_definition + + return self._definition + + @property + def processed_help(self) -> str: + help_text = self.help + if not self.help: + help_text = self.description + + is_single_command = self._application and self._application.is_single_command() + + if self._application: + current_script = self._application.name + else: + current_script = inspect.stack()[-1][1] + + return help_text.format( + command_name=self.name, + command_full_name=current_script + if is_single_command + else f"{current_script} {self.name}", + script_name=current_script, + ) + + def ignore_validation_errors(self) -> None: + self._ignore_validation_errors = True + + def set_application(self, application: Application | None = None) -> None: + self._application = application + + self._full_definition = None + + def configure(self) -> None: + """ + Configures the current command. + """ + + def execute(self, io: IO) -> int: + raise NotImplementedError + + def interact(self, io: IO) -> None: + """ + Interacts with the user. + """ + + def initialize(self, io: IO) -> None: + pass + + def run(self, io: IO) -> int: + self.merge_application_definition() + + try: + io.input.bind(self.definition) + except CleoError: + if not self._ignore_validation_errors: + raise + + self.initialize(io) + + if io.is_interactive(): + self.interact(io) + + if io.input.has_argument("command") and io.input.argument("command") is None: + io.input.set_argument("command", self.name) + + io.input.validate() + + return self.execute(io) or 0 + + def merge_application_definition(self, merge_args: bool = True) -> None: + if self._application is None: + return + + self._full_definition = Definition() + self._full_definition.add_options(self._definition.options) + self._full_definition.add_options(self._application.definition.options) + + if merge_args: + self._full_definition.set_arguments(self._application.definition.arguments) + self._full_definition.add_arguments(self._definition.arguments) + else: + self._full_definition.set_arguments(self._definition.arguments) + + def synopsis(self, short: bool = False) -> str: + key = "short" if short else "long" + + if key not in self._synopsis: + self._synopsis[key] = f"{self.name} {self.definition.synopsis(short)}" + + return self._synopsis[key] diff --git a/conda_lock/_vendor/cleo/commands/command.py b/conda_lock/_vendor/cleo/commands/command.py new file mode 100644 index 00000000..196dae1b --- /dev/null +++ b/conda_lock/_vendor/cleo/commands/command.py @@ -0,0 +1,336 @@ +from __future__ import annotations + +from typing import TYPE_CHECKING +from typing import Any +from typing import ClassVar +from typing import ContextManager +from typing import cast + +from conda_lock._vendor.cleo.commands.base_command import BaseCommand +from conda_lock._vendor.cleo.formatters.style import Style +from conda_lock._vendor.cleo.io.inputs.string_input import StringInput +from conda_lock._vendor.cleo.io.null_io import NullIO +from conda_lock._vendor.cleo.io.outputs.output import Verbosity +from conda_lock._vendor.cleo.ui.table_separator import TableSeparator + + +if TYPE_CHECKING: + import sys + + if sys.version_info >= (3, 8): + from typing import Literal + else: + from typing_extensions import Literal + + from conda_lock._vendor.cleo.io.inputs.argument import Argument + from conda_lock._vendor.cleo.io.inputs.option import Option + from conda_lock._vendor.cleo.io.io import IO + from conda_lock._vendor.cleo.ui.progress_bar import ProgressBar + from conda_lock._vendor.cleo.ui.progress_indicator import ProgressIndicator + from conda_lock._vendor.cleo.ui.question import Question + from conda_lock._vendor.cleo.ui.table import Rows + from conda_lock._vendor.cleo.ui.table import Table + + +class Command(BaseCommand): + arguments: ClassVar[list[Argument]] = [] + options: ClassVar[list[Option]] = [] + aliases: ClassVar[list[str]] = [] + usages: ClassVar[list[str]] = [] + commands: ClassVar[list[BaseCommand]] = [] + + def __init__(self) -> None: + self._io: IO = None # type: ignore[assignment] + super().__init__() + + @property + def io(self) -> IO: + return self._io + + def configure(self) -> None: + for argument in self.arguments: + self._definition.add_argument(argument) + + for option in self.options: + self._definition.add_option(option) + + def execute(self, io: IO) -> int: + self._io = io + + try: + return self.handle() + except KeyboardInterrupt: + return 1 + + def handle(self) -> int: + """ + Execute the command. + """ + raise NotImplementedError + + def call(self, name: str, args: str | None = None) -> int: + """ + Call another command. + """ + assert self.application is not None + command = self.application.get(name) + + return self.application._run_command( + command, self._io.with_input(StringInput(args or "")) + ) + + def call_silent(self, name: str, args: str | None = None) -> int: + """ + Call another command silently. + """ + assert self.application is not None + command = self.application.get(name) + + return self.application._run_command(command, NullIO(StringInput(args or ""))) + + def argument(self, name: str) -> Any: + """ + Get the value of a command argument. + """ + return self._io.input.argument(name) + + def option(self, name: str) -> Any: + """ + Get the value of a command option. + """ + return self._io.input.option(name) + + def confirm( + self, question: str, default: bool = False, true_answer_regex: str = r"(?i)^y" + ) -> bool: + """ + Confirm a question with the user. + """ + from conda_lock._vendor.cleo.ui.confirmation_question import ConfirmationQuestion + + confirmation = ConfirmationQuestion( + question, default=default, true_answer_regex=true_answer_regex + ) + return cast(bool, confirmation.ask(self._io)) + + def ask(self, question: str | Question, default: Any | None = None) -> Any: + """ + Prompt the user for input. + """ + from conda_lock._vendor.cleo.ui.question import Question + + if not isinstance(question, Question): + question = Question(question, default=default) + + return question.ask(self._io) + + def secret(self, question: str | Question, default: Any | None = None) -> Any: + """ + Prompt the user for input but hide the answer from the console. + """ + from conda_lock._vendor.cleo.ui.question import Question + + if not isinstance(question, Question): + question = Question(question, default=default) + + question.hide() + + return question.ask(self._io) + + def choice( + self, + question: str, + choices: list[str], + default: Any | None = None, + attempts: int | None = None, + multiple: bool = False, + ) -> Any: + """ + Give the user a single choice from an list of answers. + """ + from conda_lock._vendor.cleo.ui.choice_question import ChoiceQuestion + + choice = ChoiceQuestion(question, choices, default) + + choice.set_max_attempts(attempts) + choice.set_multi_select(multiple) + + return choice.ask(self._io) + + def create_question( + self, + question: str, + type: Literal["choice", "confirmation"] | None = None, + **kwargs: Any, + ) -> Question: + """ + Returns a Question of specified type. + """ + from conda_lock._vendor.cleo.ui.choice_question import ChoiceQuestion + from conda_lock._vendor.cleo.ui.confirmation_question import ConfirmationQuestion + from conda_lock._vendor.cleo.ui.question import Question + + if type == "confirmation": + return ConfirmationQuestion(question, **kwargs) + + if type == "choice": + return ChoiceQuestion(question, **kwargs) + + return Question(question, **kwargs) + + def table( + self, + header: str | None = None, + rows: Rows | None = None, + style: str | None = None, + ) -> Table: + """ + Return a Table instance. + """ + from conda_lock._vendor.cleo.ui.table import Table + + table = Table(self._io, style=style) + + if header: + table.set_headers([header]) + + if rows: + table.set_rows(rows) + + return table + + def table_separator(self) -> TableSeparator: + """ + Return a TableSeparator instance. + """ + + return TableSeparator() + + def render_table(self, headers: str, rows: Rows, style: str | None = None) -> None: + """ + Format input to textual table. + """ + table = self.table(headers, rows, style) + + table.render() + + def write(self, text: str, style: str | None = None) -> None: + """ + Writes a string without a new line. + Useful if you want to use overwrite(). + """ + styled = f"<{style}>{text}" if style else text + + self._io.write(styled) + + def line( + self, + text: str, + style: str | None = None, + verbosity: Verbosity = Verbosity.NORMAL, + ) -> None: + """ + Write a string as information output. + """ + styled = f"<{style}>{text}" if style else text + + self._io.write_line(styled, verbosity=verbosity) + + def line_error( + self, + text: str, + style: str | None = None, + verbosity: Verbosity = Verbosity.NORMAL, + ) -> None: + """ + Write a string as information output to stderr. + """ + styled = f"<{style}>{text}" if style else text + + self._io.write_error_line(styled, verbosity) + + def info(self, text: str) -> None: + """ + Write a string as information output. + + :param text: The line to write + :type text: str + """ + self.line(text, "info") + + def comment(self, text: str) -> None: + """ + Write a string as comment output. + + :param text: The line to write + :type text: str + """ + self.line(text, "comment") + + def question(self, text: str) -> None: + """ + Write a string as question output. + + :param text: The line to write + :type text: str + """ + self.line(text, "question") + + def progress_bar(self, max: int = 0) -> ProgressBar: + """ + Creates a new progress bar + """ + from conda_lock._vendor.cleo.ui.progress_bar import ProgressBar + + return ProgressBar(self._io, max=max) + + def progress_indicator( + self, + fmt: str | None = None, + interval: int = 100, + values: list[str] | None = None, + ) -> ProgressIndicator: + """ + Creates a new progress indicator. + """ + from conda_lock._vendor.cleo.ui.progress_indicator import ProgressIndicator + + return ProgressIndicator(self.io, fmt, interval, values) + + def spin( + self, + start_message: str, + end_message: str, + fmt: str | None = None, + interval: int = 100, + values: list[str] | None = None, + ) -> ContextManager[ProgressIndicator]: + """ + Automatically spin a progress indicator. + """ + spinner = self.progress_indicator(fmt, interval, values) + + return spinner.auto(start_message, end_message) + + def add_style( + self, + name: str, + fg: str | None = None, + bg: str | None = None, + options: list[str] | None = None, + ) -> None: + """ + Adds a new style + """ + style = Style(fg, bg, options) + self._io.output.formatter.set_style(name, style) + self._io.error_output.formatter.set_style(name, style) + + def overwrite(self, text: str) -> None: + """ + Overwrites the current line. + + It will not add a new line so use line('') + if necessary. + """ + self._io.overwrite(text) diff --git a/conda_lock/_vendor/poetry/core/_vendor/attr/py.typed b/conda_lock/_vendor/cleo/commands/completions/__init__.py similarity index 100% rename from conda_lock/_vendor/poetry/core/_vendor/attr/py.typed rename to conda_lock/_vendor/cleo/commands/completions/__init__.py diff --git a/conda_lock/_vendor/cleo/commands/completions/templates.py b/conda_lock/_vendor/cleo/commands/completions/templates.py new file mode 100644 index 00000000..7cf7c54e --- /dev/null +++ b/conda_lock/_vendor/cleo/commands/completions/templates.py @@ -0,0 +1,125 @@ +from __future__ import annotations + + +BASH_TEMPLATE = """\ +%(function)s() +{ + local cur script coms opts com + COMPREPLY=() + _get_comp_words_by_ref -n : cur words + + # for an alias, get the real script behind it + if [[ $(type -t ${words[0]}) == "alias" ]]; then + script=$(alias ${words[0]} | sed -E "s/alias ${words[0]}='(.*)'/\\1/") + else + script=${words[0]} + fi + + # lookup for command + for word in ${words[@]:1}; do + if [[ $word != -* ]]; then + com=$word + break + fi + done + + # completing for an option + if [[ ${cur} == --* ]] ; then + opts="%(opts)s" + + case "$com" in + +%(cmds_opts)s + + esac + + COMPREPLY=($(compgen -W "${opts}" -- ${cur})) + __ltrim_colon_completions "$cur" + + return 0; + fi + + # completing for a command + if [[ $cur == $com ]]; then + coms="%(cmds)s" + + COMPREPLY=($(compgen -W "${coms}" -- ${cur})) + __ltrim_colon_completions "$cur" + + return 0 + fi +} + +%(compdefs)s""" + +ZSH_TEMPLATE = """\ +#compdef %(script_name)s + +%(function)s() +{ + local state com cur + local -a opts + local -a coms + + cur=${words[${#words[@]}]} + + # lookup for command + for word in ${words[@]:1}; do + if [[ $word != -* ]]; then + com=$word + break + fi + done + + if [[ ${cur} == --* ]]; then + state="option" + opts+=(%(opts)s) + elif [[ $cur == $com ]]; then + state="command" + coms+=(%(cmds)s) + fi + + case $state in + (command) + _describe 'command' coms + ;; + (option) + case "$com" in + +%(cmds_opts)s + + esac + + _describe 'option' opts + ;; + *) + # fallback to file completion + _arguments '*:file:_files' + esac +} + +%(function)s "$@" +%(compdefs)s""" + +FISH_TEMPLATE = """\ +function __fish%(function)s_no_subcommand + for i in (commandline -opc) + if contains -- $i %(cmds_names)s + return 1 + end + end + return 0 +end + +# global options +%(opts)s + +# commands +%(cmds)s + +# command options + +%(cmds_opts)s""" + + +TEMPLATES = {"bash": BASH_TEMPLATE, "zsh": ZSH_TEMPLATE, "fish": FISH_TEMPLATE} diff --git a/conda_lock/_vendor/cleo/commands/completions_command.py b/conda_lock/_vendor/cleo/commands/completions_command.py new file mode 100644 index 00000000..ab2e9c4b --- /dev/null +++ b/conda_lock/_vendor/cleo/commands/completions_command.py @@ -0,0 +1,365 @@ +from __future__ import annotations + +import hashlib +import inspect +import os +import posixpath +import re +import subprocess + +from pathlib import Path +from typing import TYPE_CHECKING +from typing import ClassVar +from typing import cast + +from conda_lock._vendor.cleo import helpers +from conda_lock._vendor.cleo._compat import shell_quote +from conda_lock._vendor.cleo.commands.command import Command +from conda_lock._vendor.cleo.commands.completions.templates import TEMPLATES +from conda_lock._vendor.cleo.exceptions import CleoRuntimeError + + +if TYPE_CHECKING: + from conda_lock._vendor.cleo.io.inputs.argument import Argument + from conda_lock._vendor.cleo.io.inputs.option import Option + + +class CompletionsCommand(Command): + name = "completions" + description = "Generate completion scripts for your shell." + + arguments: ClassVar[list[Argument]] = [ + helpers.argument( + "shell", "The shell to generate the scripts for.", optional=True + ) + ] + options: ClassVar[list[Option]] = [ + helpers.option( + "alias", None, "Alias for the current command.", flag=False, multiple=True + ) + ] + + SUPPORTED_SHELLS = ("bash", "zsh", "fish") + + hidden = True + + help = """ +One can generate a completion script for `{script_name}` \ +that is compatible with a given shell. The script is output on \ +`stdout` allowing one to re-direct \ +the output to the file of their choosing. Where you place the file will \ +depend on which shell, and which operating system you are using. Your \ +particular configuration may also determine where these scripts need \ +to be placed. + +Here are some common set ups for the three supported shells under \ +Unix and similar operating systems (such as GNU/Linux). + +BASH: + +Completion files are commonly stored in `/etc/bash_completion.d/` + +Run the command: + +`{script_name} {command_name} bash >\ + /etc/bash_completion.d/{script_name}.bash-completion` + +This installs the completion script. You may have to log out and log \ +back in to your shell session for the changes to take effect. + +FISH: + +Fish completion files are commonly stored in\ +`$HOME/.config/fish/completions` + +Run the command: + +`{script_name} {command_name} fish > \ +~/.config/fish/completions/{script_name}.fish` + +This installs the completion script. You may have to log out and log \ +back in to your shell session for the changes to take effect. + +ZSH: + +ZSH completions are commonly stored in any directory listed in your \ +`$fpath` variable. To use these completions, you must either add the \ +generated script to one of those directories, or add your own \ +to this list. + +Adding a custom directory is often the safest best if you're unsure \ +of which directory to use. First create the directory, for this \ +example we'll create a hidden directory inside our `$HOME` directory + +`mkdir ~/.zfunc` + +Then add the following lines to your `.zshrc` \ +just before `compinit` + +`fpath+=~/.zfunc` + +Now you can install the completions script using the following command + +`{script_name} {command_name} zsh > ~/.zfunc/_{script_name}` + +You must then either log out and log back in, or simply run + +`exec zsh` + +For the new completions to take affect. + +CUSTOM LOCATIONS: + +Alternatively, you could save these files to the place of your choosing, \ +such as a custom directory inside your $HOME. Doing so will require you \ +to add the proper directives, such as `source`ing inside your login \ +script. Consult your shells documentation for how to add such directives. +""" + + def handle(self) -> int: + shell = self.argument("shell") + if not shell: + shell = self.get_shell_type() + + if shell not in self.SUPPORTED_SHELLS: + raise ValueError( + f"[shell] argument must be one of {', '.join(self.SUPPORTED_SHELLS)}" + ) + + self.line(self.render(shell)) + + return 0 + + def render(self, shell: str) -> str: + if shell == "bash": + return self.render_bash() + if shell == "zsh": + return self.render_zsh() + if shell == "fish": + return self.render_fish() + + raise RuntimeError(f"Unrecognized shell: {shell}") + + @staticmethod + def _get_prog_name_from_stack() -> str: + package_name = "" + frame = inspect.currentframe() + f_back = frame.f_back if frame is not None else None + f_globals = f_back.f_globals if f_back is not None else None + # break reference cycle + # https://docs.python.org/3/library/inspect.html#the-interpreter-stack + del frame + + if f_globals is not None: + package_name = cast(str, f_globals.get("__name__")) + + if package_name == "__main__": + package_name = cast(str, f_globals.get("__package__")) + + if package_name: + package_name = package_name.partition(".")[0] + + if not package_name: + raise CleoRuntimeError("Can not determine package name") + + return package_name + + def _get_script_name_and_path(self) -> tuple[str, str]: + script_name = self._io.input.script_name or self._get_prog_name_from_stack() + script_path = posixpath.realpath(script_name) + script_name = Path(script_path).name + + return script_name, script_path + + def render_bash(self) -> str: + script_name, script_path = self._get_script_name_and_path() + aliases = [script_name, script_path, *self.option("alias")] + function = self._generate_function_name(script_name, script_path) + + # Global options + assert self.application + opts = [ + f"--{opt.name}" + for opt in sorted(self.application.definition.options, key=lambda o: o.name) + ] + + # Commands + options + cmds = [] + cmds_opts = [] + for cmd in sorted(self.application.all().values(), key=lambda c: c.name or ""): + if cmd.hidden or not (cmd.enabled and cmd.name): + continue + command_name = shell_quote(cmd.name) if " " in cmd.name else cmd.name + cmds.append(command_name) + options = " ".join( + f"--{opt.name}".replace(":", "\\:") + for opt in sorted(cmd.definition.options, key=lambda o: o.name) + ) + cmds_opts += [ + f" ({command_name})", + f' opts="${{opts}} {options}"', + " ;;", + "", # newline + ] + + return TEMPLATES["bash"] % { + "script_name": script_name, + "function": function, + "opts": " ".join(opts), + "cmds": " ".join(cmds), + "cmds_opts": "\n".join(cmds_opts[:-1]), # trim trailing newline + "compdefs": "\n".join( + f"complete -o default -F {function} {alias}" for alias in aliases + ), + } + + def render_zsh(self) -> str: + script_name, script_path = self._get_script_name_and_path() + aliases = [script_path, *self.option("alias")] + function = self._generate_function_name(script_name, script_path) + + def sanitize(s: str) -> str: + return self._io.output.formatter.remove_format(s) + + # Global options + assert self.application + opts = [ + self._zsh_describe(f"--{opt.name}", sanitize(opt.description)) + for opt in sorted(self.application.definition.options, key=lambda o: o.name) + ] + + # Commands + options + cmds = [] + cmds_opts = [] + for cmd in sorted(self.application.all().values(), key=lambda c: c.name or ""): + if cmd.hidden or not (cmd.enabled and cmd.name): + continue + command_name = shell_quote(cmd.name) if " " in cmd.name else cmd.name + cmds.append(self._zsh_describe(command_name, sanitize(cmd.description))) + options = " ".join( + self._zsh_describe(f"--{opt.name}", sanitize(opt.description)) + for opt in sorted(cmd.definition.options, key=lambda o: o.name) + ) + cmds_opts += [ + f" ({command_name})", + f" opts+=({options})", + " ;;", + "", # newline + ] + + return TEMPLATES["zsh"] % { + "script_name": script_name, + "function": function, + "opts": " ".join(opts), + "cmds": " ".join(cmds), + "cmds_opts": "\n".join(cmds_opts[:-1]), # trim trailing newline + "compdefs": "\n".join(f"compdef {function} {alias}" for alias in aliases), + } + + def render_fish(self) -> str: + script_name, script_path = self._get_script_name_and_path() + function = self._generate_function_name(script_name, script_path) + + def sanitize(s: str) -> str: + return self._io.output.formatter.remove_format(s).replace("'", "\\'") + + # Global options + assert self.application + opts = [ + f"complete -c {script_name} -n '__fish{function}_no_subcommand' " + f"-l {opt.name} -d '{sanitize(opt.description)}'" + for opt in sorted(self.application.definition.options, key=lambda o: o.name) + ] + + # Commands + options + cmds = [] + cmds_opts = [] + namespaces = set() + for cmd in sorted(self.application.all().values(), key=lambda c: c.name or ""): + if cmd.hidden or not cmd.enabled or not cmd.name: + continue + cmd_path = cmd.name.split(" ") + namespace = cmd_path[0] + cmd_name = cmd_path[-1] if " " in cmd.name else cmd.name + + # We either have a command like `poetry add` or a nested (namespaced) + # command like `poetry cache clear`. + if len(cmd_path) == 1: + cmds.append( + f"complete -c {script_name} -f -n '__fish{function}_no_subcommand' " + f"-a {cmd_name} -d '{sanitize(cmd.description)}'" + ) + condition = f"__fish_seen_subcommand_from {cmd_name}" + else: + # Complete the namespace first + if namespace not in namespaces: + cmds.append( + f"complete -c {script_name} -f -n " + f"'__fish{function}_no_subcommand' -a {namespace}" + ) + # Now complete the command + subcmds = [ + name.split(" ")[-1] for name in self.application.all(namespace) + ] + cmds.append( + f"complete -c {script_name} -f -n '__fish_seen_subcommand_from " + f"{namespace}; and not __fish_seen_subcommand_from {' '.join(subcmds)}' " + f"-a {cmd_name} -d '{sanitize(cmd.description)}'" + ) + condition = ( + f"__fish_seen_subcommand_from {namespace}; " + f"and __fish_seen_subcommand_from {cmd_name}" + ) + + cmds_opts += [ + f"# {cmd.name}", + *[ + f"complete -c {script_name} " + f"-n '{condition}' " + f"-l {opt.name} -d '{sanitize(opt.description)}'" + for opt in sorted(cmd.definition.options, key=lambda o: o.name) + ], + "", # newline + ] + namespaces.add(namespace) + + return TEMPLATES["fish"] % { + "script_name": script_name, + "function": function, + "opts": "\n".join(opts), + "cmds": "\n".join(cmds), + "cmds_opts": "\n".join(cmds_opts[:-1]), # trim trailing newline + "cmds_names": " ".join(sorted(namespaces)), + } + + def get_shell_type(self) -> str: + shell = os.getenv("SHELL") + if not shell: + raise RuntimeError( + "Could not read SHELL environment variable. " + "Please specify your shell type by passing it as the first argument." + ) + + return Path(shell).name + + def _generate_function_name(self, script_name: str, script_path: str) -> str: + sanitized_name = self._sanitize_for_function_name(script_name) + md5_hash = hashlib.md5(script_path.encode()).hexdigest()[:16] + return f"_{sanitized_name}_{md5_hash}_complete" + + def _sanitize_for_function_name(self, name: str) -> str: + name = name.replace("-", "_") + + return re.sub(r"[^A-Za-z0-9_]+", "", name) + + def _zsh_describe(self, value: str, description: str | None = None) -> str: + value = '"' + value.replace(":", "\\:") + if description: + description = re.sub( + r"([\"'#&;`|*?~<>^()\[\]{}$\\\x0A\xFF])", r"\\\1", description + ) + value += ":" + subprocess.list2cmdline([description]).strip('"') + + value += '"' + + return value diff --git a/conda_lock/_vendor/cleo/commands/help_command.py b/conda_lock/_vendor/cleo/commands/help_command.py new file mode 100644 index 00000000..cf6307b7 --- /dev/null +++ b/conda_lock/_vendor/cleo/commands/help_command.py @@ -0,0 +1,53 @@ +from __future__ import annotations + +from typing import ClassVar + +from conda_lock._vendor.cleo.commands.command import Command +from conda_lock._vendor.cleo.io.inputs.argument import Argument + + +class HelpCommand(Command): + name = "help" + + description = "Displays help for a command." + + arguments: ClassVar[list[Argument]] = [ + Argument( + "command_name", + required=False, + description="The command name", + default="help", + ) + ] + + help = """\ +The {command_name} command displays help for a given command: + + {command_full_name} list + +To display the list of available commands, please use the list command. +""" + + _command = None + + def set_command(self, command: Command) -> None: + self._command = command + + def configure(self) -> None: + self.ignore_validation_errors() + + super().configure() + + def handle(self) -> int: + from conda_lock._vendor.cleo.descriptors.text_descriptor import TextDescriptor + + if self._command is None: + assert self._application is not None + self._command = self._application.find(self.argument("command_name")) + + self.line("") + TextDescriptor().describe(self._io, self._command) + + self._command = None + + return 0 diff --git a/conda_lock/_vendor/cleo/commands/list_command.py b/conda_lock/_vendor/cleo/commands/list_command.py new file mode 100644 index 00000000..655535ac --- /dev/null +++ b/conda_lock/_vendor/cleo/commands/list_command.py @@ -0,0 +1,35 @@ +from __future__ import annotations + +from typing import ClassVar + +from conda_lock._vendor.cleo.commands.command import Command +from conda_lock._vendor.cleo.io.inputs.argument import Argument + + +class ListCommand(Command): + name = "list" + + description = "Lists commands." + + help = """\ +The {command_name} command lists all commands: + + {command_full_name} + +You can also display the commands for a specific namespace: + + {command_full_name} test +""" + + arguments: ClassVar[list[Argument]] = [ + Argument("namespace", required=False, description="The namespace name") + ] + + def handle(self) -> int: + from conda_lock._vendor.cleo.descriptors.text_descriptor import TextDescriptor + + TextDescriptor().describe( + self._io, self.application, namespace=self.argument("namespace") + ) + + return 0 diff --git a/conda_lock/_vendor/cleo/cursor.py b/conda_lock/_vendor/cleo/cursor.py new file mode 100644 index 00000000..a9cc5810 --- /dev/null +++ b/conda_lock/_vendor/cleo/cursor.py @@ -0,0 +1,108 @@ +from __future__ import annotations + +import sys + +from typing import TYPE_CHECKING +from typing import TextIO + +from conda_lock._vendor.cleo.io.io import IO + + +if TYPE_CHECKING: + from conda_lock._vendor.cleo.io.outputs.output import Output + + +class Cursor: + def __init__(self, io: IO | Output, input: TextIO | None = None) -> None: + if isinstance(io, IO): + io = io.output + + self._output = io + + if input is None: + input = sys.stdin + + self._input = input + + def move_up(self, lines: int = 1) -> Cursor: + self._output.write(f"\x1b[{lines}A") + + return self + + def move_down(self, lines: int = 1) -> Cursor: + self._output.write(f"\x1b[{lines}B") + + return self + + def move_right(self, columns: int = 1) -> Cursor: + self._output.write(f"\x1b[{columns}C") + + return self + + def move_left(self, columns: int = 1) -> Cursor: + self._output.write(f"\x1b[{columns}D") + + return self + + def move_to_column(self, column: int) -> Cursor: + self._output.write(f"\x1b[{column}G") + + return self + + def move_to_position(self, column: int, row: int) -> Cursor: + self._output.write(f"\x1b[{row + 1};{column}H") + + return self + + def save_position(self) -> Cursor: + self._output.write("\x1b7") + + return self + + def restore_position(self) -> Cursor: + self._output.write("\x1b8") + + return self + + def hide(self) -> Cursor: + self._output.write("\x1b[?25l") + + return self + + def show(self) -> Cursor: + self._output.write("\x1b[?25h\x1b[?0c") + + return self + + def clear_line(self) -> Cursor: + """ + Clears all the output from the current line. + """ + self._output.write("\x1b[2K") + + return self + + def clear_line_after(self) -> Cursor: + """ + Clears all the output from the current line after the current position. + """ + self._output.write("\x1b[K") + + return self + + def clear_output(self) -> Cursor: + """ + Clears all the output from the cursors' current position + to the end of the screen. + """ + self._output.write("\x1b[0J") + + return self + + def clear_screen(self) -> Cursor: + """ + Clears the entire screen. + """ + self._output.write("\x1b[2J") + + return self diff --git a/conda_lock/_vendor/poetry/core/_vendor/pyrsistent/py.typed b/conda_lock/_vendor/cleo/descriptors/__init__.py similarity index 100% rename from conda_lock/_vendor/poetry/core/_vendor/pyrsistent/py.typed rename to conda_lock/_vendor/cleo/descriptors/__init__.py diff --git a/conda_lock/_vendor/cleo/descriptors/application_description.py b/conda_lock/_vendor/cleo/descriptors/application_description.py new file mode 100644 index 00000000..ca59cca7 --- /dev/null +++ b/conda_lock/_vendor/cleo/descriptors/application_description.py @@ -0,0 +1,86 @@ +from __future__ import annotations + +from collections import defaultdict +from typing import TYPE_CHECKING + +from conda_lock._vendor.cleo.exceptions import CleoCommandNotFoundError + + +if TYPE_CHECKING: + from conda_lock._vendor.cleo.application import Application + from conda_lock._vendor.cleo.commands.command import Command + + +class ApplicationDescription: + GLOBAL_NAMESPACE = "_global" + + def __init__( + self, + application: Application, + namespace: str | None = None, + show_hidden: bool = False, + ) -> None: + self._application: Application = application + self._namespace = namespace + self._show_hidden = show_hidden + self._namespaces: dict[str, dict[str, str | list[str]]] = {} + self._commands: dict[str, Command] = {} + self._aliases: dict[str, Command] = {} + + self._inspect_application() + + @property + def namespaces(self) -> dict[str, dict[str, str | list[str]]]: + return self._namespaces + + @property + def commands(self) -> dict[str, Command]: + return self._commands + + def command(self, name: str) -> Command: + if name in self._commands: + return self._commands[name] + if name in self._aliases: + return self._aliases[name] + raise CleoCommandNotFoundError(name) + + def _inspect_application(self) -> None: + namespace = None + if self._namespace: + namespace = self._application.find_namespace(self._namespace) + + all_commands = self._application.all(namespace) + + for namespace, commands in self._sort_commands(all_commands): + names = [] + + for name, command in commands: + if not command.name or command.hidden: + continue + + if command.name == name: + self._commands[name] = command + else: + self._aliases[name] = command + + names.append(name) + + self._namespaces[namespace] = {"id": namespace, "commands": names} + + def _sort_commands( + self, commands: dict[str, Command] + ) -> list[tuple[str, list[tuple[str, Command]]]]: + """ + Sorts command in alphabetical order + """ + namespaced_commands: dict[str, dict[str, Command]] = defaultdict(dict) + for name, command in commands.items(): + key = self._application.extract_namespace(name, 1) or "_global" + namespaced_commands[key][name] = command + + namespaced_commands_list: dict[str, list[tuple[str, Command]]] = { + namespace: sorted(commands.items()) + for namespace, commands in namespaced_commands.items() + } + + return sorted(namespaced_commands_list.items()) diff --git a/conda_lock/_vendor/cleo/descriptors/descriptor.py b/conda_lock/_vendor/cleo/descriptors/descriptor.py new file mode 100644 index 00000000..03b7d53c --- /dev/null +++ b/conda_lock/_vendor/cleo/descriptors/descriptor.py @@ -0,0 +1,51 @@ +from __future__ import annotations + +from typing import TYPE_CHECKING +from typing import Any + +from conda_lock._vendor.cleo.application import Application +from conda_lock._vendor.cleo.commands.command import Command +from conda_lock._vendor.cleo.io.inputs.argument import Argument +from conda_lock._vendor.cleo.io.inputs.definition import Definition +from conda_lock._vendor.cleo.io.inputs.option import Option +from conda_lock._vendor.cleo.io.outputs.output import Type + + +if TYPE_CHECKING: + from conda_lock._vendor.cleo.io.io import IO + + +class Descriptor: + def describe(self, io: IO, obj: Any, **options: Any) -> None: + self._io = io + + if isinstance(obj, Argument): + self._describe_argument(obj, **options) + elif isinstance(obj, Option): + self._describe_option(obj, **options) + elif isinstance(obj, Definition): + self._describe_definition(obj, **options) + elif isinstance(obj, Command): + self._describe_command(obj, **options) + elif isinstance(obj, Application): + self._describe_application(obj, **options) + + def _write(self, content: str, decorated: bool = True) -> None: + self._io.write( + content, new_line=False, type=Type.NORMAL if decorated else Type.RAW + ) + + def _describe_argument(self, argument: Argument, **options: Any) -> None: + raise NotImplementedError + + def _describe_option(self, option: Option, **options: Any) -> None: + raise NotImplementedError + + def _describe_definition(self, definition: Definition, **options: Any) -> None: + raise NotImplementedError + + def _describe_command(self, command: Command, **options: Any) -> None: + raise NotImplementedError + + def _describe_application(self, application: Application, **options: Any) -> None: + raise NotImplementedError diff --git a/conda_lock/_vendor/cleo/descriptors/text_descriptor.py b/conda_lock/_vendor/cleo/descriptors/text_descriptor.py new file mode 100644 index 00000000..824b25de --- /dev/null +++ b/conda_lock/_vendor/cleo/descriptors/text_descriptor.py @@ -0,0 +1,280 @@ +from __future__ import annotations + +import json +import re + +from typing import TYPE_CHECKING +from typing import Any +from typing import Sequence + +from conda_lock._vendor.cleo.commands.command import Command +from conda_lock._vendor.cleo.descriptors.descriptor import Descriptor +from conda_lock._vendor.cleo.formatters.formatter import Formatter +from conda_lock._vendor.cleo.io.inputs.definition import Definition + + +if TYPE_CHECKING: + from conda_lock._vendor.cleo.application import Application + from conda_lock._vendor.cleo.io.inputs.argument import Argument + from conda_lock._vendor.cleo.io.inputs.option import Option + + +class TextDescriptor(Descriptor): + def _describe_argument(self, argument: Argument, **options: Any) -> None: + if argument.default is not None and ( + not isinstance(argument.default, list) or argument.default + ): + default = ( + f" [default: {self._format_default_value(argument.default)}]" + "" + ) + else: + default = "" + + total_width = options.get("total_width", len(argument.name)) + + spacing_width = total_width - len(argument.name) + sub_argument_description = re.sub( + r"\s*[\r\n]\s*", + "\n" + " " * (total_width + 4), + argument.description, + ) + self._write( + f" {argument.name} {' ' * spacing_width}" + f"{sub_argument_description}{default}" + ) + + def _describe_option(self, option: Option, **options: Any) -> None: + if ( + option.accepts_value() + and option.default is not None + and (not isinstance(option.default, list) or option.default) + ): + default = ( + " [default: " + f"{self._format_default_value(option.default)}]" + ) + else: + default = "" + + value = "" + if option.accepts_value(): + value = "=" + option.name.upper() + + if not option.requires_value(): + value = "[" + value + "]" + + total_width = options.get( + "total_width", self._calculate_total_width_for_options([option]) + ) + + option_shortcut = f"-{option.shortcut}, " if option.shortcut else " " + synopsis = f"{option_shortcut}--{option.name}{value}" + + spacing_width = total_width - len(synopsis) + sub_option_description = re.sub( + r"\s*[\r\n]\s*", + "\n" + " " * (total_width + 4), + option.description, + ) + are_multiple_values_allowed = ( + " (multiple values allowed)" if option.is_list() else "" + ) + self._write( + f" {synopsis} " + f"{' ' * spacing_width}{sub_option_description}" + f"{default}" + f"{are_multiple_values_allowed}" + ) + + def _describe_definition(self, definition: Definition, **options: Any) -> None: + arguments = definition.arguments + definition_options = definition.options + total_width = self._calculate_total_width_for_options(definition_options) + + for argument in arguments: + total_width = max(total_width, len(argument.name)) + + if arguments: + self._write("Arguments:") + self._write("\n") + + for argument in arguments: + self._describe_argument(argument, total_width=total_width) + self._write("\n") + + if arguments and definition_options: + self._write("\n") + + if definition_options: + later_options = [] + + self._write("Options:") + + for option in definition_options: + if option.shortcut and len(option.shortcut) > 1: + later_options.append(option) + continue + + self._write("\n") + self._describe_option(option, total_width=total_width) + + for option in later_options: + self._write("\n") + self._describe_option(option, total_width=total_width) + + def _describe_command(self, command: Command, **options: Any) -> None: + command.merge_application_definition(False) + + description = command.description + if description: + self._write("Description:") + self._write("\n") + self._write(" " + description) + self._write("\n\n") + + self._write("Usage:") + for usage in [command.synopsis(True), *command.aliases, *command.usages]: + self._write("\n") + self._write(" " + Formatter.escape(usage)) + + self._write("\n") + + definition = command.definition + if definition.options or definition.arguments: + self._write("\n") + self._describe_definition(definition, **options) + self._write("\n") + + help_text = command.processed_help + if help_text and help_text != description: + self._write("\n") + self._write("Help:") + self._write("\n") + self._write(" " + help_text.replace("\n", "\n ")) + self._write("\n") + + def _describe_application(self, application: Application, **options: Any) -> None: + from conda_lock._vendor.cleo.descriptors.application_description import ApplicationDescription + + described_namespace = options.get("namespace") + description = ApplicationDescription(application, namespace=described_namespace) + + help_text = application.help + if help_text: + self._write(f"{help_text}\n\n") + + self._write("Usage:\n") + self._write(" command [options] [arguments]\n\n") + + self._describe_definition(Definition(application.definition.options), **options) + + self._write("\n\n") + + commands = description.commands + namespaces = description.namespaces + + if described_namespace and namespaces: + described_namespace_info = next(iter(namespaces.values())) + for name in described_namespace_info["commands"]: + commands[name] = description.command(name) + + # calculate max width based on available commands per namespace + all_commands = list(commands) + for namespace in namespaces.values(): + all_commands += namespace["commands"] + + width = self._get_column_width(all_commands) + if described_namespace: + self._write( + f'Available commands for the "{described_namespace}" namespace:' + ) + else: + self._write("Available commands:") + + for namespace in namespaces.values(): + namespace["commands"] = [c for c in namespace["commands"] if c in commands] + + if not namespace["commands"]: + continue + + if not ( + described_namespace + or namespace["id"] == ApplicationDescription.GLOBAL_NAMESPACE + ): + self._write("\n") + self._write(f" {namespace['id']}") + + for name in namespace["commands"]: + self._write("\n") + spacing_width = width - len(name) + command = commands[name] + command_aliases = ( + self._get_command_aliases_text(command) + if command.name == name + else "" + ) + self._write( + f" {name}{' ' * spacing_width}" + f"{command_aliases + command.description}" + ) + + self._write("\n") + + def _format_default_value(self, default: Any) -> str: + if isinstance(default, str): + default = Formatter.escape(default) + elif isinstance(default, list): + default = [ + Formatter.escape(value) for value in default if isinstance(value, str) + ] + elif isinstance(default, dict): + default = { + key: Formatter.escape(value) + for key, value in default.items() + if isinstance(value, str) + } + + return json.dumps(default).replace("\\\\", "\\") + + def _calculate_total_width_for_options(self, options: list[Option]) -> int: + total_width = 0 + + for option in options: + name_length = 1 + max(len(option.shortcut or ""), 1) + 4 + len(option.name) + + if option.accepts_value(): + value_length = 1 + len(option.name) + if not option.requires_value(): + value_length += 2 + + name_length += value_length + + total_width = max(total_width, name_length) + + return total_width + + def _get_column_width(self, commands: Sequence[Command | str]) -> int: + widths: list[int] = [] + + for command in commands: + if isinstance(command, Command): + assert command.name is not None + widths.append(len(command.name)) + for alias in command.aliases: + widths.append(len(alias)) + else: + widths.append(len(command)) + + if not widths: + return 0 + + return max(widths) + 2 + + def _get_command_aliases_text(self, command: Command) -> str: + aliases = command.aliases + + if aliases: + return f"[{ '|'.join(aliases) }] " + + return "" diff --git a/conda_lock/_vendor/cleo/events/__init__.py b/conda_lock/_vendor/cleo/events/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/conda_lock/_vendor/cleo/events/console_command_event.py b/conda_lock/_vendor/cleo/events/console_command_event.py new file mode 100644 index 00000000..684fc45e --- /dev/null +++ b/conda_lock/_vendor/cleo/events/console_command_event.py @@ -0,0 +1,34 @@ +from __future__ import annotations + +from typing import TYPE_CHECKING + +from conda_lock._vendor.cleo.events.console_event import ConsoleEvent + + +if TYPE_CHECKING: + from conda_lock._vendor.cleo.commands.command import Command + from conda_lock._vendor.cleo.io.io import IO + + +class ConsoleCommandEvent(ConsoleEvent): + """ + An event triggered before the command is executed. + + It allows to do things like skipping the command or changing the input. + """ + + RETURN_CODE_DISABLED: int = 113 + + def __init__(self, command: Command, io: IO) -> None: + super().__init__(command, io) + + self._command_should_run = True + + def disable_command(self) -> None: + self._command_should_run = False + + def enable_command(self) -> None: + self._command_should_run = True + + def command_should_run(self) -> bool: + return self._command_should_run diff --git a/conda_lock/_vendor/cleo/events/console_error_event.py b/conda_lock/_vendor/cleo/events/console_error_event.py new file mode 100644 index 00000000..a18915dd --- /dev/null +++ b/conda_lock/_vendor/cleo/events/console_error_event.py @@ -0,0 +1,43 @@ +from __future__ import annotations + +from typing import TYPE_CHECKING + +from conda_lock._vendor.cleo.events.console_event import ConsoleEvent +from conda_lock._vendor.cleo.exceptions import CleoError + + +if TYPE_CHECKING: + from conda_lock._vendor.cleo.commands.command import Command + from conda_lock._vendor.cleo.io.io import IO + + +class ConsoleErrorEvent(ConsoleEvent): + """ + An event triggered when an exception is raised during the execution of a command. + """ + + def __init__(self, command: Command, io: IO, error: Exception) -> None: + super().__init__(command, io) + + self._error = error + self._exit_code: int | None = None + + @property + def error(self) -> Exception: + return self._error + + @property + def exit_code(self) -> int: + if self._exit_code is not None: + return self._exit_code + + if isinstance(self._error, CleoError) and self._error.exit_code is not None: + return self._error.exit_code + + return 1 + + def set_error(self, error: Exception) -> None: + self._error = error + + def set_exit_code(self, exit_code: int) -> None: + self._exit_code = exit_code diff --git a/conda_lock/_vendor/cleo/events/console_event.py b/conda_lock/_vendor/cleo/events/console_event.py new file mode 100644 index 00000000..4dbcc001 --- /dev/null +++ b/conda_lock/_vendor/cleo/events/console_event.py @@ -0,0 +1,30 @@ +from __future__ import annotations + +from typing import TYPE_CHECKING + +from conda_lock._vendor.cleo.events.event import Event + + +if TYPE_CHECKING: + from conda_lock._vendor.cleo.commands.command import Command + from conda_lock._vendor.cleo.io.io import IO + + +class ConsoleEvent(Event): + """ + An event that gives access to the IO of a command. + """ + + def __init__(self, command: Command, io: IO) -> None: + super().__init__() + + self._command = command + self._io = io + + @property + def command(self) -> Command: + return self._command + + @property + def io(self) -> IO: + return self._io diff --git a/conda_lock/_vendor/cleo/events/console_events.py b/conda_lock/_vendor/cleo/events/console_events.py new file mode 100644 index 00000000..e9f1f7c9 --- /dev/null +++ b/conda_lock/_vendor/cleo/events/console_events.py @@ -0,0 +1,21 @@ +# The COMMAND event allows to attach listeners before any command +# is executed. It also allows the modification of the command and IO +# before it's handed to the command. +from __future__ import annotations + + +COMMAND = "console.command" + +# The SIGNAL event allows some actions to be performed after +# the command execution is interrupted. +SIGNAL = "console.signal" + +# The TERMINATE event allows listeners to be attached after the command +# is executed by the console. +TERMINATE = "console.terminate" + +# The ERROR event occurs when an uncaught exception is raised. +# +# This event gives the ability to deal with the exception or to modify +# the raised exception. +ERROR = "console.error" diff --git a/conda_lock/_vendor/cleo/events/console_signal_event.py b/conda_lock/_vendor/cleo/events/console_signal_event.py new file mode 100644 index 00000000..8abf8b88 --- /dev/null +++ b/conda_lock/_vendor/cleo/events/console_signal_event.py @@ -0,0 +1,28 @@ +from __future__ import annotations + +from typing import TYPE_CHECKING + +from conda_lock._vendor.cleo.events.console_event import ConsoleEvent + + +if TYPE_CHECKING: + import signal + + from conda_lock._vendor.cleo.commands.command import Command + from conda_lock._vendor.cleo.io.io import IO + + +class ConsoleSignalEvent(ConsoleEvent): + """ + An event triggered by a system signal. + """ + + def __init__( + self, command: Command, io: IO, handling_signal: signal.Signals + ) -> None: + super().__init__(command, io) + self._handling_signal = handling_signal + + @property + def handling_signal(self) -> signal.Signals: + return self._handling_signal diff --git a/conda_lock/_vendor/cleo/events/console_terminate_event.py b/conda_lock/_vendor/cleo/events/console_terminate_event.py new file mode 100644 index 00000000..e28ea52e --- /dev/null +++ b/conda_lock/_vendor/cleo/events/console_terminate_event.py @@ -0,0 +1,28 @@ +from __future__ import annotations + +from typing import TYPE_CHECKING + +from conda_lock._vendor.cleo.events.console_event import ConsoleEvent + + +if TYPE_CHECKING: + from conda_lock._vendor.cleo.commands.command import Command + from conda_lock._vendor.cleo.io.io import IO + + +class ConsoleTerminateEvent(ConsoleEvent): + """ + An event triggered by after the execution of a command. + """ + + def __init__(self, command: Command, io: IO, exit_code: int) -> None: + super().__init__(command, io) + + self._exit_code = exit_code + + @property + def exit_code(self) -> int: + return self._exit_code + + def set_exit_code(self, exit_code: int) -> None: + self._exit_code = exit_code diff --git a/conda_lock/_vendor/cleo/events/event.py b/conda_lock/_vendor/cleo/events/event.py new file mode 100644 index 00000000..282d6ce4 --- /dev/null +++ b/conda_lock/_vendor/cleo/events/event.py @@ -0,0 +1,16 @@ +from __future__ import annotations + + +class Event: + """ + Event + """ + + def __init__(self) -> None: + self._propagation_stopped = False + + def is_propagation_stopped(self) -> bool: + return self._propagation_stopped + + def stop_propagation(self) -> None: + self._propagation_stopped = True diff --git a/conda_lock/_vendor/cleo/events/event_dispatcher.py b/conda_lock/_vendor/cleo/events/event_dispatcher.py new file mode 100644 index 00000000..0c4b7eb8 --- /dev/null +++ b/conda_lock/_vendor/cleo/events/event_dispatcher.py @@ -0,0 +1,95 @@ +from __future__ import annotations + +from typing import TYPE_CHECKING +from typing import Callable +from typing import cast + + +if TYPE_CHECKING: + from conda_lock._vendor.cleo.events.event import Event + + Listener = Callable[[Event, str, "EventDispatcher"], None] + + +class EventDispatcher: + def __init__(self) -> None: + self._listeners: dict[str, dict[int, list[Listener]]] = {} + self._sorted: dict[str, list[Listener]] = {} + + def dispatch(self, event: Event, event_name: str | None = None) -> Event: + if event_name is None: + event_name = type(event).__name__ + + listeners = cast("list[Listener]", self.get_listeners(event_name)) + + if listeners: + self._do_dispatch(listeners, event_name, event) + + return event + + def get_listeners( + self, event_name: str | None = None + ) -> list[Listener] | dict[str, list[Listener]]: + if event_name is not None: + if event_name not in self._listeners: + return [] + + if event_name not in self._sorted: + self._sort_listeners(event_name) + + return self._sorted[event_name] + + for event_name in self._listeners: + if event_name not in self._sorted: + self._sort_listeners(event_name) + + return self._sorted + + def get_listener_priority(self, event_name: str, listener: Listener) -> int | None: + if event_name not in self._listeners: + return None + + for priority, listeners in self._listeners[event_name].items(): + for v in listeners: + if v == listener: + return priority + + return None + + def has_listeners(self, event_name: str | None = None) -> bool: + if event_name is not None: + return bool(self._listeners.get(event_name)) + return any(self._listeners.values()) + + def add_listener( + self, event_name: str, listener: Listener, priority: int = 0 + ) -> None: + if event_name not in self._listeners: + self._listeners[event_name] = {} + + if priority not in self._listeners[event_name]: + self._listeners[event_name][priority] = [] + + self._listeners[event_name][priority].append(listener) + + if event_name in self._sorted: + del self._sorted[event_name] + + def _do_dispatch( + self, listeners: list[Listener], event_name: str, event: Event + ) -> None: + for listener in listeners: + if event.is_propagation_stopped(): + break + + listener(event, event_name, self) + + def _sort_listeners(self, event_name: str) -> None: + """ + Sorts the internal list of listeners for the given event by priority. + """ + prioritized_listeners = self._listeners[event_name] + sorted_listeners = self._sorted[event_name] = [] + + for priority in sorted(prioritized_listeners, reverse=True): + sorted_listeners.extend(prioritized_listeners[priority]) diff --git a/conda_lock/_vendor/cleo/exceptions/__init__.py b/conda_lock/_vendor/cleo/exceptions/__init__.py new file mode 100644 index 00000000..af91c929 --- /dev/null +++ b/conda_lock/_vendor/cleo/exceptions/__init__.py @@ -0,0 +1,94 @@ +from __future__ import annotations + +from conda_lock._vendor.cleo._utils import find_similar_names + + +class CleoError(Exception): + """ + Base Cleo exception. + """ + + exit_code: int | None = None + + +class CleoLogicError(CleoError): + """ + Raised when there is error in command arguments + and/or options configuration logic. + """ + + +class CleoRuntimeError(CleoError): + """ + Raised when command is called with invalid options or arguments. + """ + + +class CleoValueError(CleoError): + """ + Raised when wrong value was given to Cleo components. + """ + + +class CleoNoSuchOptionError(CleoError): + """ + Raised when command does not have given option. + """ + + +class CleoUserError(CleoError): + """ + Base exception for user errors. + """ + + +class CleoMissingArgumentsError(CleoUserError): + """ + Raised when called command was not given required arguments. + """ + + +def _suggest_similar_names(name: str, names: list[str]) -> str | None: + if not names: + return None + + suggested_names = find_similar_names(name, names) + + if not suggested_names: + return None + + newline_separator = "\n " + return "Did you mean " + newline_separator.join( + ( + ("this?" if len(suggested_names) == 1 else "one of these?"), + newline_separator.join(suggested_names), + ) + ) + + +class CleoCommandNotFoundError(CleoUserError): + """ + Raised when called command does not exist. + """ + + def __init__(self, name: str, commands: list[str] | None = None) -> None: + message = f'The command "{name}" does not exist.' + if commands: + suggestions = _suggest_similar_names(name, commands) + if suggestions: + message += "\n\n" + suggestions + super().__init__(message) + + +class CleoNamespaceNotFoundError(CleoUserError): + """ + Raised when called namespace has no commands. + """ + + def __init__(self, name: str, namespaces: list[str] | None = None) -> None: + message = f'There are no commands in the "{name}" namespace.' + if namespaces: + suggestions = _suggest_similar_names(name, namespaces) + if suggestions: + message += "\n\n" + suggestions + super().__init__(message) diff --git a/conda_lock/_vendor/cleo/formatters/__init__.py b/conda_lock/_vendor/cleo/formatters/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/conda_lock/_vendor/cleo/formatters/formatter.py b/conda_lock/_vendor/cleo/formatters/formatter.py new file mode 100644 index 00000000..dbe1d5bf --- /dev/null +++ b/conda_lock/_vendor/cleo/formatters/formatter.py @@ -0,0 +1,200 @@ +from __future__ import annotations + +import re + +from typing import ClassVar + +from conda_lock._vendor.cleo.exceptions import CleoValueError +from conda_lock._vendor.cleo.formatters.style import Style +from conda_lock._vendor.cleo.formatters.style_stack import StyleStack + + +class Formatter: + TAG_REGEX = re.compile(r"(?ix)<(([a-z](?:[^<>]*)) | /([a-z](?:[^<>]*))?)>") + + _inline_styles_cache: ClassVar[dict[str, Style]] = {} + + def __init__( + self, decorated: bool = False, styles: dict[str, Style] | None = None + ) -> None: + self._decorated = decorated + self._styles: dict[str, Style] = {} + + self.set_style("error", Style("red", options=["bold"])) + self.set_style("info", Style("blue")) + self.set_style("comment", Style("green")) + self.set_style("question", Style("cyan")) + self.set_style("c1", Style("cyan")) + self.set_style("c2", Style("default", options=["bold"])) + self.set_style("b", Style("default", options=["bold"])) + + for name, style in (styles or {}).items(): + self.set_style(name, style) + + self._style_stack = StyleStack() + + @classmethod + def escape(cls, text: str) -> str: + """ + Escapes "<" special char in given text. + """ + text = re.sub(r"([^\\]?)<", "\\1\\<", text) + + return cls.escape_trailing_backslash(text) + + @staticmethod + def escape_trailing_backslash(text: str) -> str: + """ + Escapes trailing "\\" in given text. + """ + if text.endswith("\\"): + length = len(text) + text = text.rstrip("\\").replace("\0", "").ljust(length, "\0") + + return text + + def decorated(self, decorated: bool = True) -> None: + self._decorated = decorated + + def is_decorated(self) -> bool: + return self._decorated + + def set_style(self, name: str, style: Style) -> None: + self._styles[name] = style + + def has_style(self, name: str) -> bool: + return name in self._styles + + def style(self, name: str) -> Style: + if not self.has_style(name): + raise CleoValueError(f'Undefined style: "{name}"') + + return self._styles[name] + + def format(self, message: str) -> str: + return self.format_and_wrap(message, 0) + + def format_and_wrap(self, message: str, width: int) -> str: + offset = 0 + output = "" + current_line_length = 0 + for match in self.TAG_REGEX.finditer(message): + pos = match.start() + text = match.group(0) + + if pos != 0 and message[pos - 1] == "\\": + continue + + # add the text up to the next tag + formatted, current_line_length = self._apply_current_style( + message[offset:pos], output, width, current_line_length + ) + output += formatted + offset = pos + len(text) + + # Opening tag + seen_open = text[1] != "/" + tag = match.group(1) if seen_open else match.group(2) + + style = None + if tag: + style = self._create_style_from_string(tag) + + if not (seen_open or tag): + # + self._style_stack.pop() + elif style is None: + formatted, current_line_length = self._apply_current_style( + text, output, width, current_line_length + ) + output += formatted + elif seen_open: + self._style_stack.push(style) + else: + self._style_stack.pop(style) + + formatted, current_line_length = self._apply_current_style( + message[offset:], output, width, current_line_length + ) + output += formatted + return output.replace("\0", "\\").replace("\\<", "<") + + def remove_format(self, text: str) -> str: + decorated = self._decorated + + self._decorated = False + text = re.sub(r"\033\[[^m]*m", "", self.format(text)) + + self._decorated = decorated + + return text + + def _create_style_from_string(self, string: str) -> Style | None: + if string in self._styles: + return self._styles[string] + + if string in self._inline_styles_cache: + return self._inline_styles_cache[string] + + matches = re.findall(r"([^=]+)=([^;]+)(;|$)", string.lower()) + if not matches: + return None + + style = Style() + + for where, style_options, _ in matches: + if where == "fg": + style.foreground(style_options) + elif where == "bg": + style.background(style_options) + else: + try: + for option in map(str.strip, style_options.split(",")): + style.set_option(option) + except ValueError: + return None + + self._inline_styles_cache[string] = style + + return style + + def _apply_current_style( + self, text: str, current: str, width: int, current_line_length: int + ) -> tuple[str, int]: + if not text: + return "", current_line_length + + if not width: + if self.is_decorated(): + return self._style_stack.current.apply(text), current_line_length + + return text, current_line_length + + if not current_line_length and current: + text = text.lstrip() + + if current_line_length: + i = width - current_line_length + prefix = text[:i] + "\n" + text = text[i:] + else: + prefix = "" + + m = re.match(r"(\n)$", text) + text = prefix + re.sub(rf"([^\n]{{{width}}})\ *", "\\1\n", text) + text = text.rstrip("\n") + (m.group(1) if m else "") + + if not current_line_length and current and not current.endswith("\n"): + text = "\n" + text + + lines = text.split("\n") + for line in lines: + current_line_length += len(line) + if current_line_length >= width: + current_line_length = 0 + + if self.is_decorated(): + apply = self._style_stack.current.apply + text = "\n".join(map(apply, lines)) + + return text, current_line_length diff --git a/conda_lock/_vendor/cleo/formatters/style.py b/conda_lock/_vendor/cleo/formatters/style.py new file mode 100644 index 00000000..3a176c30 --- /dev/null +++ b/conda_lock/_vendor/cleo/formatters/style.py @@ -0,0 +1,68 @@ +from __future__ import annotations + +from conda_lock._vendor.cleo.color import Color + + +class Style: + def __init__( + self, + foreground: str | None = None, + background: str | None = None, + options: list[str] | None = None, + ) -> None: + self._foreground = foreground or "" + self._background = background or "" + self._options = options or [] + + self._color = Color(self._foreground, self._background, self._options) + + def foreground(self, foreground: str) -> Style: + self._color = Color(foreground, self._background, self._options) + self._foreground = foreground + + return self + + def background(self, background: str) -> Style: + self._color = Color(self._foreground, background, self._options) + self._background = background + + return self + + def bold(self, bold: bool = True) -> Style: + return self._toggle_option(bold, "bold") + + def dark(self, dark: bool = True) -> Style: + return self._toggle_option(dark, "dark") + + def underlines(self, underlined: bool = True) -> Style: + return self._toggle_option(underlined, "underline") + + def italic(self, italic: bool = True) -> Style: + return self._toggle_option(italic, "italic") + + def blinking(self, blinking: bool = True) -> Style: + return self._toggle_option(blinking, "blink") + + def inverse(self, inverse: bool = True) -> Style: + return self._toggle_option(inverse, "reverse") + + def hidden(self, hidden: bool = True) -> Style: + return self._toggle_option(hidden, "conceal") + + def set_option(self, option: str) -> Style: + self._options.append(option) + self._color = Color(self._foreground, self._background, self._options) + return self + + def unset_option(self, option: str) -> Style: + if option in self._options: + index = self._options.index(option) + del self._options[index] + self._color = Color(self._foreground, self._background, self._options) + return self + + def _toggle_option(self, toggle_flag: bool, option: str) -> Style: + return (self.set_option if toggle_flag else self.unset_option)(option) + + def apply(self, text: str) -> str: + return self._color.apply(text) diff --git a/conda_lock/_vendor/cleo/formatters/style_stack.py b/conda_lock/_vendor/cleo/formatters/style_stack.py new file mode 100644 index 00000000..7edc382f --- /dev/null +++ b/conda_lock/_vendor/cleo/formatters/style_stack.py @@ -0,0 +1,42 @@ +from __future__ import annotations + +from conda_lock._vendor.cleo.exceptions import CleoValueError +from conda_lock._vendor.cleo.formatters.style import Style + + +class StyleStack: + def __init__(self, empty_style: Style | None = None) -> None: + if empty_style is None: + empty_style = Style() + + self._empty_style = empty_style + self._styles: list[Style] = [] + + @property + def current(self) -> Style: + if not self._styles: + return self._empty_style + + return self._styles[-1] + + def reset(self) -> None: + self._styles = [] + + def push(self, style: Style) -> None: + self._styles.append(style) + + def pop(self, style: Style | None = None) -> Style: + if not self._styles: + return self._empty_style + + if style is None: + return self._styles.pop() + + sample = style.apply("") + + for i, stacked_style in reversed(list(enumerate(self._styles))): + if sample == stacked_style.apply(""): + self._styles = self._styles[:i] + return stacked_style + + raise CleoValueError("Invalid nested tag found") diff --git a/conda_lock/_vendor/cleo/helpers.py b/conda_lock/_vendor/cleo/helpers.py new file mode 100644 index 00000000..585042eb --- /dev/null +++ b/conda_lock/_vendor/cleo/helpers.py @@ -0,0 +1,42 @@ +from __future__ import annotations + +from typing import Any + +from conda_lock._vendor.cleo.io.inputs.argument import Argument +from conda_lock._vendor.cleo.io.inputs.option import Option + + +def argument( + name: str, + description: str | None = None, + optional: bool = False, + multiple: bool = False, + default: Any | None = None, +) -> Argument: + return Argument( + name, + required=not optional, + is_list=multiple, + description=description, + default=default, + ) + + +def option( + long_name: str, + short_name: str | None = None, + description: str | None = None, + flag: bool = True, + value_required: bool = True, + multiple: bool = False, + default: Any | None = None, +) -> Option: + return Option( + long_name, + short_name, + flag=flag, + requires_value=value_required, + is_list=multiple, + description=description, + default=default, + ) diff --git a/conda_lock/_vendor/cleo/io/__init__.py b/conda_lock/_vendor/cleo/io/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/conda_lock/_vendor/cleo/io/buffered_io.py b/conda_lock/_vendor/cleo/io/buffered_io.py new file mode 100644 index 00000000..dde77c33 --- /dev/null +++ b/conda_lock/_vendor/cleo/io/buffered_io.py @@ -0,0 +1,55 @@ +from __future__ import annotations + +from typing import TYPE_CHECKING +from typing import cast + +from conda_lock._vendor.cleo.io.inputs.string_input import StringInput +from conda_lock._vendor.cleo.io.io import IO +from conda_lock._vendor.cleo.io.outputs.buffered_output import BufferedOutput + + +if TYPE_CHECKING: + from conda_lock._vendor.cleo.io.inputs.input import Input + + +class BufferedIO(IO): + def __init__( + self, + input: Input | None = None, + decorated: bool = False, + supports_utf8: bool = True, + ) -> None: + super().__init__( + input or StringInput(""), + BufferedOutput(decorated=decorated, supports_utf8=supports_utf8), + BufferedOutput(decorated=decorated, supports_utf8=supports_utf8), + ) + + def fetch_output(self) -> str: + return cast(BufferedOutput, self._output).fetch() + + def fetch_error(self) -> str: + return cast(BufferedOutput, self._error_output).fetch() + + def clear(self) -> None: + cast(BufferedOutput, self._output).clear() + cast(BufferedOutput, self._error_output).clear() + + def clear_output(self) -> None: + cast(BufferedOutput, self._output).clear() + + def clear_error(self) -> None: + cast(BufferedOutput, self._error_output).clear() + + def supports_utf8(self) -> bool: + return cast(BufferedOutput, self._output).supports_utf8() + + def clear_user_input(self) -> None: + self._input.stream.truncate(0) + self._input.stream.seek(0) + + def set_user_input(self, user_input: str) -> None: + self.clear_user_input() + + self._input.stream.write(user_input) + self._input.stream.seek(0) diff --git a/conda_lock/_vendor/cleo/io/inputs/__init__.py b/conda_lock/_vendor/cleo/io/inputs/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/conda_lock/_vendor/cleo/io/inputs/argument.py b/conda_lock/_vendor/cleo/io/inputs/argument.py new file mode 100644 index 00000000..0e77a19b --- /dev/null +++ b/conda_lock/_vendor/cleo/io/inputs/argument.py @@ -0,0 +1,68 @@ +from __future__ import annotations + +from typing import Any + +from conda_lock._vendor.cleo.exceptions import CleoLogicError + + +class Argument: + """ + A command line argument. + """ + + def __init__( + self, + name: str, + required: bool = True, + is_list: bool = False, + description: str | None = None, + default: Any | None = None, + ) -> None: + self._name = name + self._required = required + self._is_list = is_list + self._description = description or "" + self._default: str | list[str] | None = None + + self.set_default(default) + + @property + def name(self) -> str: + return self._name + + @property + def default(self) -> str | list[str] | None: + return self._default + + @property + def description(self) -> str: + return self._description + + def is_required(self) -> bool: + return self._required + + def is_list(self) -> bool: + return self._is_list + + def set_default(self, default: Any | None = None) -> None: + if self._required and default is not None: + raise CleoLogicError("Cannot set a default value for required arguments") + + if self._is_list: + if default is None: + default = [] + elif not isinstance(default, list): + raise CleoLogicError( + "A default value for a list argument must be a list" + ) + + self._default = default + + def __repr__(self) -> str: + return ( + f"Argument({self._name!r}, " + f"required={self._required}, " + f"is_list={self._is_list}, " + f"description={self._description!r}, " + f"default={self._default!r})" + ) diff --git a/conda_lock/_vendor/cleo/io/inputs/argv_input.py b/conda_lock/_vendor/cleo/io/inputs/argv_input.py new file mode 100644 index 00000000..94beda9c --- /dev/null +++ b/conda_lock/_vendor/cleo/io/inputs/argv_input.py @@ -0,0 +1,294 @@ +from __future__ import annotations + +import sys + +from typing import TYPE_CHECKING +from typing import Any + +from conda_lock._vendor.cleo.exceptions import CleoNoSuchOptionError +from conda_lock._vendor.cleo.exceptions import CleoRuntimeError +from conda_lock._vendor.cleo.io.inputs.input import Input + + +if TYPE_CHECKING: + from conda_lock._vendor.cleo.io.inputs.definition import Definition + + +class ArgvInput(Input): + """ + Represents an input coming from the command line. + """ + + def __init__( + self, argv: list[str] | None = None, definition: Definition | None = None + ) -> None: + if argv is None: + argv = sys.argv + + argv = argv[:] + + # Strip the application name + try: + self._script_name: str | None = argv.pop(0) + except IndexError: + self._script_name = None + + self._tokens = argv + self._parsed: list[str] = [] + + super().__init__(definition=definition) + + @property + def first_argument(self) -> str | None: + is_option = False + + for i, token in enumerate(self._tokens): + if token.startswith("-"): + if "=" in token or len(self._tokens) == (i + 1): + continue + + # If it's a long option, consider that + # everything after "--" is the option name. + # Otherwise, use the last character + # (if it's a short option set, only the last one + # can take a value with space separator). + name = token[2:] if token.startswith("--") else token[-1] + + if not (name in self._options or self._definition.has_shortcut(name)): + # noop + continue + + if name not in self._options: + name = self._definition.shortcut_to_name(name) + + if name in self._options and self._tokens[i + 1] == self._options[name]: + is_option = True + + continue + + if is_option: + is_option = False + continue + + return token + + return None + + @property + def script_name(self) -> str | None: + return self._script_name + + def has_parameter_option( + self, values: str | list[str], only_params: bool = False + ) -> bool: + """ + Returns true if the raw parameters (not parsed) contain a value. + """ + if not isinstance(values, list): + values = [values] + + for token in self._tokens: + if only_params and token == "--": + return False + + for value in values: + # Options with values: + # For long options, test for '--option=' at beginning + # For short options, test for '-o' at beginning + leading = value + "=" if value.startswith("--") else value + + if token == value or leading != "" and token.startswith(leading): + return True + + return False + + def parameter_option( + self, + values: str | list[str], + default: Any = False, + only_params: bool = False, + ) -> Any: + if not isinstance(values, list): + values = [values] + + tokens = self._tokens[:] + while tokens: + token = tokens.pop(0) + if only_params and token == "--": + return default + + for value in values: + if token == value: + try: + return tokens.pop(0) + except IndexError: + return None + + # Options with values: + # For long options, test for '--option=' at beginning + # For short options, test for '-o' at beginning + leading = value + "=" if value.startswith("--") else value + + if token == value or leading != "" and token.startswith(leading): + return token[len(leading)] + + return False + + def _set_tokens(self, tokens: list[str]) -> None: + self._tokens = tokens + + def _parse(self) -> None: + parse_options = True + self._parsed = self._tokens[:] + + try: + token = self._parsed.pop(0) + except IndexError: + return + + while token is not None: + if parse_options and token == "": + self._parse_argument(token) + elif parse_options and token == "--": + parse_options = False + elif parse_options and token.startswith("--"): + self._parse_long_option(token) + elif parse_options and token.startswith("-") and token != "-": + self._parse_short_option(token) + else: + self._parse_argument(token) + + try: + token = self._parsed.pop(0) + except IndexError: + return + + def _parse_short_option(self, token: str) -> None: + name = token[1:] + + if len(name) > 1: + shortcut = name[0] + if ( + self._definition.has_shortcut(shortcut) + and self._definition.option_for_shortcut(shortcut).accepts_value() + ): + # An option with a value and no space + self._add_short_option(shortcut, name[1:]) + else: + self._parse_short_option_set(name) + else: + self._add_short_option(name, None) + + def _parse_short_option_set(self, name: str) -> None: + length = len(name) + for i in range(length): + shortcut = name[i] + if not self._definition.has_shortcut(shortcut): + raise CleoRuntimeError(f'The option "{name[i]}" does not exist') + + option = self._definition.option_for_shortcut(shortcut) + if option.accepts_value(): + self._add_long_option( + option.name, name[i + 1 :] if i < length - 1 else None + ) + + break + + self._add_long_option(option.name, None) + + def _parse_long_option(self, token: str) -> None: + name = token[2:] + + pos = name.find("=") + if pos != -1: + value = name[pos + 1 :] + if not value: + self._parsed.insert(0, value) + + self._add_long_option(name[:pos], value) + else: + self._add_long_option(name, None) + + def _parse_argument(self, token: str) -> None: + next_argument = len(self._arguments) + last_argument = next_argument - 1 + + # If the input is expecting another argument, add it + if self._definition.has_argument(next_argument): + argument = self._definition.argument(next_argument) + self._arguments[argument.name] = [token] if argument.is_list() else token + # If the last argument is a list, append the token to it + elif ( + self._definition.has_argument(last_argument) + and self._definition.argument(last_argument).is_list() + ): + argument = self._definition.argument(last_argument) + self._arguments[argument.name].append(token) + # Unexpected argument + else: + all_arguments = self._definition.arguments.copy() + command_name = None + argument = all_arguments[0] + if argument and argument.name == "command": + command_name = self._arguments.get("command") + del all_arguments[0] + + if all_arguments: + all_names = " ".join(a.name.join('""') for a in all_arguments) + if command_name: + message = ( + f'Too many arguments to "{command_name}" command, ' + f"expected arguments {all_names}" + ) + else: + message = f"Too many arguments, expected arguments {all_names}" + elif command_name: + message = ( + f'No arguments expected for "{command_name}" command, ' + f'got "{token}"' + ) + else: + message = f'No arguments expected, got "{token}"' + + raise CleoRuntimeError(message) + + def _add_short_option(self, shortcut: str, value: Any) -> None: + if not self._definition.has_shortcut(shortcut): + raise CleoNoSuchOptionError(f'The option "-{shortcut}" does not exist') + + self._add_long_option( + self._definition.option_for_shortcut(shortcut).name, value + ) + + def _add_long_option(self, name: str, value: Any) -> None: + if not self._definition.has_option(name): + raise CleoNoSuchOptionError(f'The option "--{name}" does not exist') + + option = self._definition.option(name) + + if not (value is None or option.accepts_value()): + raise CleoRuntimeError(f'The "--{name}" option does not accept a value') + + if value in ("", None) and option.accepts_value() and self._parsed: + # If the option accepts a value, either required or optional, + # we check if there is one + next_token = self._parsed.pop(0) + if not next_token.startswith("-") or next_token in ("", None): + value = next_token + else: + self._parsed.insert(0, next_token) + + if value is None: + if option.requires_value(): + raise CleoRuntimeError(f'The "--{name}" option requires a value') + + if not option.is_list() and option.is_flag(): + value = True + + if option.is_list(): + if name not in self._options: + self._options[name] = [] + + self._options[name].append(value) + else: + self._options[name] = value diff --git a/conda_lock/_vendor/cleo/io/inputs/definition.py b/conda_lock/_vendor/cleo/io/inputs/definition.py new file mode 100644 index 00000000..f7bdea9b --- /dev/null +++ b/conda_lock/_vendor/cleo/io/inputs/definition.py @@ -0,0 +1,214 @@ +from __future__ import annotations + +import sys + +from typing import TYPE_CHECKING +from typing import Any +from typing import Sequence + +from conda_lock._vendor.cleo.exceptions import CleoLogicError +from conda_lock._vendor.cleo.io.inputs.option import Option + + +if TYPE_CHECKING: + from conda_lock._vendor.cleo.io.inputs.argument import Argument + + +class Definition: + """ + A Definition represents a set of command line arguments and options. + """ + + def __init__(self, definition: Sequence[Argument | Option] | None = None) -> None: + self._arguments: dict[str, Argument] = {} + self._required_count = 0 + self._has_list_argument = False + self._has_optional = False + self._options: dict[str, Option] = {} + self._shortcuts: dict[str, str] = {} + + self.set_definition(definition or []) + + @property + def arguments(self) -> list[Argument]: + return list(self._arguments.values()) + + @property + def argument_count(self) -> int: + if self._has_list_argument: + return sys.maxsize + + return len(self._arguments) + + @property + def required_argument_count(self) -> int: + return self._required_count + + @property + def argument_defaults(self) -> dict[str, Any]: + values = {} + + for argument in self._arguments.values(): + values[argument.name] = argument.default + + return values + + @property + def options(self) -> list[Option]: + return list(self._options.values()) + + @property + def option_defaults(self) -> dict[str, Any]: + return {o.name: o.default for o in self._options.values()} + + def set_definition(self, definition: Sequence[Argument | Option]) -> None: + arguments = [] + options = [] + + for item in definition: + if isinstance(item, Option): + options.append(item) + else: + arguments.append(item) + + self.set_arguments(arguments) + self.set_options(options) + + def set_arguments(self, arguments: list[Argument]) -> None: + self._arguments = {} + self._required_count = 0 + self._has_list_argument = False + self._has_optional = False + self.add_arguments(arguments) + + def add_arguments(self, arguments: list[Argument]) -> None: + for argument in arguments: + self.add_argument(argument) + + def add_argument(self, argument: Argument) -> None: + if argument.name in self._arguments: + raise CleoLogicError( + f'An argument with name "{argument.name}" already exists' + ) + + if self._has_list_argument: + raise CleoLogicError("Cannot add an argument after a list argument") + + if argument.is_required() and self._has_optional: + raise CleoLogicError("Cannot add a required argument after an optional one") + + if argument.is_list(): + self._has_list_argument = True + + if argument.is_required(): + self._required_count += 1 + else: + self._has_optional = True + + self._arguments[argument.name] = argument + + def argument(self, name: str | int) -> Argument: + if not self.has_argument(name): + raise ValueError(f'The "{name}" argument does not exist') + + if isinstance(name, int): + arguments = list(self._arguments.values()) + return arguments[name] + + return self._arguments[name] + + def has_argument(self, name: str | int) -> bool: + if isinstance(name, int): + # Check if this is a valid argument index + # abs(x + (x < 0)) to normalize negative indices + return abs(name + (name < 0)) < len(self._arguments) + return name in self._arguments + + def set_options(self, options: list[Option]) -> None: + self._options = {} + self._shortcuts = {} + self.add_options(options) + + def add_options(self, options: list[Option]) -> None: + for option in options: + self.add_option(option) + + def add_option(self, option: Option) -> None: + if option.name in self._options and option != self._options[option.name]: + raise CleoLogicError(f'An option named "{option.name}" already exists') + + if option.shortcut: + for shortcut in option.shortcut.split("|"): + if ( + shortcut in self._shortcuts + and option.name != self._shortcuts[shortcut] + ): + raise CleoLogicError( + f'An option with shortcut "{shortcut}" already exists' + ) + + self._options[option.name] = option + + if option.shortcut: + for shortcut in option.shortcut.split("|"): + self._shortcuts[shortcut] = option.name + + def option(self, name: str) -> Option: + if not self.has_option(name): + raise ValueError(f'The option "--{name}" option does not exist') + + return self._options[name] + + def has_option(self, name: str) -> bool: + return name in self._options + + def has_shortcut(self, shortcut: str) -> bool: + return shortcut in self._shortcuts + + def option_for_shortcut(self, shortcut: str) -> Option: + return self._options[self.shortcut_to_name(shortcut)] + + def shortcut_to_name(self, shortcut: str) -> str: + if shortcut not in self._shortcuts: + raise ValueError(f'The "-{shortcut}" option does not exist') + + return self._shortcuts[shortcut] + + def synopsis(self, short: bool = False) -> str: + elements = [] + + if short and self._options: + elements.append("[options]") + elif not short: + for option in self._options.values(): + value = "" + if option.accepts_value(): + formatted = ( + option.name.upper() + if option.requires_value() + else f"[{option.name.upper()}]" + ) + value = f" {formatted}" + + shortcut = "" + if option.shortcut: + shortcut = f"-{option.shortcut}|" + + elements.append(f"[{shortcut}--{option.name}{value}]") + + if elements and self._arguments: + elements.append("[--]") + + tail = "" + for argument in self._arguments.values(): + element = f"<{argument.name}>" + if argument.is_list(): + element += "..." + + if not argument.is_required(): + element = "[" + element + tail += "]" + + elements.append(element) + + return " ".join(elements) + tail diff --git a/conda_lock/_vendor/cleo/io/inputs/input.py b/conda_lock/_vendor/cleo/io/inputs/input.py new file mode 100644 index 00000000..7d9f5329 --- /dev/null +++ b/conda_lock/_vendor/cleo/io/inputs/input.py @@ -0,0 +1,179 @@ +from __future__ import annotations + +import re + +from typing import Any +from typing import TextIO + +from conda_lock._vendor.cleo._compat import shell_quote +from conda_lock._vendor.cleo.exceptions import CleoMissingArgumentsError +from conda_lock._vendor.cleo.exceptions import CleoValueError +from conda_lock._vendor.cleo.io.inputs.definition import Definition + + +class Input: + """ + This class is the base class for concrete Input implementations. + """ + + def __init__(self, definition: Definition | None = None) -> None: + self._definition: Definition + self._stream: TextIO = None # type: ignore[assignment] + self._options: dict[str, Any] = {} + self._arguments: dict[str, Any] = {} + self._interactive: bool | None = None + + if definition is None: + self._definition = Definition() + else: + self.bind(definition) + self.validate() + + @property + def arguments(self) -> dict[str, Any]: + return {**self._definition.argument_defaults, **self._arguments} + + @property + def options(self) -> dict[str, Any]: + return {**self._definition.option_defaults, **self._options} + + @property + def stream(self) -> TextIO: + return self._stream + + @property + def first_argument(self) -> str | None: + """ + Returns the first argument from the raw parameters (not parsed). + """ + raise NotImplementedError + + @property + def script_name(self) -> str | None: + raise NotImplementedError + + def read(self, length: int, default: str = "") -> str: + """ + Reads the given amount of characters from the input stream. + """ + if not self.is_interactive(): + return default + + return self._stream.read(length) + + def read_line(self, length: int = -1, default: str = "") -> str: + """ + Reads a line from the input stream. + """ + if not self.is_interactive(): + return default + + return self._stream.readline(length) + + def close(self) -> None: + """ + Closes the input. + """ + self._stream.close() + + def is_closed(self) -> bool: + """ + Returns whether the input is closed. + """ + return self._stream.closed + + def is_interactive(self) -> bool: + return True if self._interactive is None else self._interactive + + def interactive(self, interactive: bool = True) -> None: + self._interactive = interactive + + def bind(self, definition: Definition) -> None: + """ + Binds the current Input instance with + the given definition's arguments and options. + """ + self._arguments = {} + self._options = {} + self._definition = definition + + self._parse() + + def validate(self) -> None: + missing_arguments = [] + + for argument in self._definition.arguments: + if argument.name not in self._arguments and argument.is_required(): + missing_arguments.append(argument.name) + + if missing_arguments: + raise CleoMissingArgumentsError( + f'Not enough arguments (missing: "{", ".join(missing_arguments)}")' + ) + + def argument(self, name: str) -> Any: + if not self._definition.has_argument(name): + raise CleoValueError(f'The argument "{name}" does not exist') + + if name in self._arguments: + return self._arguments[name] + + return self._definition.argument(name).default + + def set_argument(self, name: str, value: Any) -> None: + if not self._definition.has_argument(name): + raise CleoValueError(f'The argument "{name}" does not exist') + + self._arguments[name] = value + + def has_argument(self, name: str) -> bool: + return self._definition.has_argument(name) + + def option(self, name: str) -> Any: + if not self._definition.has_option(name): + raise CleoValueError(f'The option "--{name}" does not exist') + + if name in self._options: + return self._options[name] + + return self._definition.option(name).default + + def set_option(self, name: str, value: Any) -> None: + if not self._definition.has_option(name): + raise CleoValueError(f'The option "--{name}" does not exist') + + self._options[name] = value + + def has_option(self, name: str) -> bool: + return self._definition.has_option(name) + + def escape_token(self, token: str) -> str: + if re.match(r"^[\w-]+$", token): + return token + + return shell_quote(token) + + def set_stream(self, stream: TextIO) -> None: + self._stream = stream + + def has_parameter_option( + self, values: str | list[str], only_params: bool = False + ) -> bool: + """ + Returns true if the raw parameters (not parsed) contain a value. + """ + raise NotImplementedError + + def parameter_option( + self, + values: str | list[str], + default: Any = False, + only_params: bool = False, + ) -> Any: + """ + Returns the value of a raw option (not parsed). + """ + raise NotImplementedError + + def _parse(self) -> None: + raise NotImplementedError diff --git a/conda_lock/_vendor/cleo/io/inputs/option.py b/conda_lock/_vendor/cleo/io/inputs/option.py new file mode 100644 index 00000000..7cc62b27 --- /dev/null +++ b/conda_lock/_vendor/cleo/io/inputs/option.py @@ -0,0 +1,96 @@ +from __future__ import annotations + +import re + +from typing import Any + +from conda_lock._vendor.cleo.exceptions import CleoLogicError +from conda_lock._vendor.cleo.exceptions import CleoValueError + + +class Option: + """ + A command line option. + """ + + def __init__( + self, + name: str, + shortcut: str | None = None, + flag: bool = True, + requires_value: bool = True, + is_list: bool = False, + description: str | None = None, + default: Any | None = None, + ) -> None: + if name.startswith("--"): + name = name[2:] + + if not name: + raise CleoValueError("An option name cannot be empty") + + if shortcut is not None: + shortcuts = re.split(r"\|-?", shortcut.lstrip("-")) + shortcut = "|".join(filter(None, shortcuts)) + + if not shortcut: + raise CleoValueError("An option shortcut cannot be empty") + + self._name = name + self._shortcut = shortcut + self._flag = flag + self._requires_value = requires_value + self._is_list = is_list + self._description = description or "" + self._default = None + + if self._is_list and self._flag: + raise CleoLogicError("A flag option cannot be a list as well") + + self.set_default(default) + + @property + def name(self) -> str: + return self._name + + @property + def shortcut(self) -> str | None: + return self._shortcut + + @property + def description(self) -> str: + return self._description + + @property + def default(self) -> Any | None: + return self._default + + def is_flag(self) -> bool: + return self._flag + + def accepts_value(self) -> bool: + return not self._flag + + def requires_value(self) -> bool: + return not self._flag and self._requires_value + + def is_list(self) -> bool: + return self._is_list + + def set_default(self, default: Any | None = None) -> None: + if self._flag and default is not None: + raise CleoLogicError("A flag option cannot have a default value") + + if self._is_list: + if default is None: + default = [] + elif not isinstance(default, list): + raise CleoLogicError("A default value for a list option must be a list") + + if self._flag: + default = False + + self._default = default + + def __repr__(self) -> str: + return f"Option({self._name})" diff --git a/conda_lock/_vendor/cleo/io/inputs/string_input.py b/conda_lock/_vendor/cleo/io/inputs/string_input.py new file mode 100644 index 00000000..a15fced6 --- /dev/null +++ b/conda_lock/_vendor/cleo/io/inputs/string_input.py @@ -0,0 +1,18 @@ +from __future__ import annotations + +from conda_lock._vendor.cleo.io.inputs.argv_input import ArgvInput +from conda_lock._vendor.cleo.io.inputs.token_parser import TokenParser + + +class StringInput(ArgvInput): + """ + Represents an input provided as a string + """ + + def __init__(self, input: str) -> None: + super().__init__([]) + + self._set_tokens(self._tokenize(input)) + + def _tokenize(self, input: str) -> list[str]: + return TokenParser().parse(input) diff --git a/conda_lock/_vendor/cleo/io/inputs/token_parser.py b/conda_lock/_vendor/cleo/io/inputs/token_parser.py new file mode 100644 index 00000000..c087d366 --- /dev/null +++ b/conda_lock/_vendor/cleo/io/inputs/token_parser.py @@ -0,0 +1,112 @@ +from __future__ import annotations + + +QUOTES = {"'", '"'} + + +class TokenParser: + """ + Parses tokens from a string passed to StringArgs. + """ + + def __init__(self) -> None: + self._string: str = "" + self._cursor: int = 0 + self._current: str | None = None + self._next_: str | None = None + + def parse(self, string: str) -> list[str]: + self._string = string + self._cursor = 0 + self._current = None + if string: + self._current = string[0] + + self._next_ = string[1] if len(string) > 1 else None + + return self._parse() + + def _parse(self) -> list[str]: + tokens = [] + + while self._current is not None: + if self._current.isspace(): + # Skip spaces + self._next() + + continue + + tokens.append(self._parse_token()) + + return tokens + + def _next(self) -> None: + """ + Advances the cursor to the next position. + """ + if self._current is None: + return + + self._cursor += 1 + self._current = self._next_ + + if self._cursor + 1 < len(self._string): + self._next_ = self._string[self._cursor + 1] + else: + self._next_ = None + + def _parse_token(self) -> str: + token = "" + + while self._current is not None: + if self._current.isspace(): + self._next() + + break + + if self._current == "\\": + token += self._parse_escape_sequence() + elif self._current in QUOTES: + token += self._parse_quoted_string() + else: + token += self._current + self._next() + + return token + + def _parse_quoted_string(self) -> str: + string = "" + delimiter = self._current + + # Skip first delimiter + self._next() + while self._current is not None: + if self._current == delimiter: + # Skip last delimiter + self._next() + + break + + if self._current == "\\": + string += self._parse_escape_sequence() + elif self._current == '"': + string += f'"{self._parse_quoted_string()}"' + elif self._current == "'": + string += f"'{self._parse_quoted_string()}'" + else: + string += self._current + self._next() + + return string + + def _parse_escape_sequence(self) -> str: + if self._next_ in QUOTES: + sequence = self._next_ + else: + assert self._next_ is not None + sequence = "\\" + self._next_ + + self._next() + self._next() + + return sequence diff --git a/conda_lock/_vendor/cleo/io/io.py b/conda_lock/_vendor/cleo/io/io.py new file mode 100644 index 00000000..b98a08ab --- /dev/null +++ b/conda_lock/_vendor/cleo/io/io.py @@ -0,0 +1,140 @@ +from __future__ import annotations + +from typing import TYPE_CHECKING +from typing import Iterable + +from conda_lock._vendor.cleo.io.outputs.output import Type as OutputType +from conda_lock._vendor.cleo.io.outputs.output import Verbosity + + +if TYPE_CHECKING: + from conda_lock._vendor.cleo.io.inputs.input import Input + from conda_lock._vendor.cleo.io.outputs.output import Output + from conda_lock._vendor.cleo.io.outputs.section_output import SectionOutput + + +class IO: + def __init__(self, input: Input, output: Output, error_output: Output) -> None: + self._input = input + self._output = output + self._error_output = error_output + + @property + def input(self) -> Input: + return self._input + + @property + def output(self) -> Output: + return self._output + + @property + def error_output(self) -> Output: + return self._error_output + + def read(self, length: int, default: str = "") -> str: + """ + Reads the given amount of characters from the input stream. + """ + return self._input.read(length, default=default) + + def read_line(self, length: int = -1, default: str = "") -> str: + """ + Reads a line from the input stream. + """ + return self._input.read_line(length=length, default=default) + + def write_line( + self, + messages: str | Iterable[str], + verbosity: Verbosity = Verbosity.NORMAL, + type: OutputType = OutputType.NORMAL, + ) -> None: + self._output.write_line(messages, verbosity=verbosity, type=type) + + def write( + self, + messages: str | Iterable[str], + new_line: bool = False, + verbosity: Verbosity = Verbosity.NORMAL, + type: OutputType = OutputType.NORMAL, + ) -> None: + self._output.write(messages, new_line=new_line, verbosity=verbosity, type=type) + + def write_error_line( + self, + messages: str | Iterable[str], + verbosity: Verbosity = Verbosity.NORMAL, + type: OutputType = OutputType.NORMAL, + ) -> None: + self._error_output.write_line(messages, verbosity=verbosity, type=type) + + def write_error( + self, + messages: str | Iterable[str], + new_line: bool = False, + verbosity: Verbosity = Verbosity.NORMAL, + type: OutputType = OutputType.NORMAL, + ) -> None: + self._error_output.write( + messages, new_line=new_line, verbosity=verbosity, type=type + ) + + def overwrite(self, messages: str | Iterable[str]) -> None: + from conda_lock._vendor.cleo.cursor import Cursor + + cursor = Cursor(self._output) + cursor.move_to_column(1) + cursor.clear_line() + self.write(messages) + + def overwrite_error(self, messages: str | Iterable[str]) -> None: + from conda_lock._vendor.cleo.cursor import Cursor + + cursor = Cursor(self._error_output) + cursor.move_to_column(1) + cursor.clear_line() + self.write_error(messages) + + def flush(self) -> None: + self._output.flush() + + def is_interactive(self) -> bool: + return self._input.is_interactive() + + def interactive(self, interactive: bool = True) -> None: + self._input.interactive(interactive) + + def decorated(self, decorated: bool = True) -> None: + self._output.decorated(decorated) + self._error_output.decorated(decorated) + + def is_decorated(self) -> bool: + return self._output.is_decorated() + + def supports_utf8(self) -> bool: + return self._output.supports_utf8() + + def set_verbosity(self, verbosity: Verbosity) -> None: + self._output.set_verbosity(verbosity) + self._error_output.set_verbosity(verbosity) + + def is_verbose(self) -> bool: + return self.output.is_verbose() + + def is_very_verbose(self) -> bool: + return self.output.is_very_verbose() + + def is_debug(self) -> bool: + return self.output.is_debug() + + def set_input(self, input: Input) -> None: + self._input = input + + def with_input(self, input: Input) -> IO: + return self.__class__(input, self._output, self._error_output) + + def remove_format(self, text: str) -> str: + return self._output.remove_format(text) + + def section(self) -> SectionOutput: + return self._output.section() diff --git a/conda_lock/_vendor/cleo/io/io_mixin.py b/conda_lock/_vendor/cleo/io/io_mixin.py deleted file mode 100644 index 6bca5406..00000000 --- a/conda_lock/_vendor/cleo/io/io_mixin.py +++ /dev/null @@ -1,112 +0,0 @@ -from clikit.ui.components import ChoiceQuestion -from clikit.ui.components import ConfirmationQuestion -from clikit.ui.components import ProgressBar -from clikit.ui.components import Question - - -class IOMixin(object): - """ - Helpers for IO classes - """ - - def __init__(self, *args, **kwargs): - super(IOMixin, self).__init__(*args, **kwargs) - - self._last_message = "" - self._last_message_err = "" - - def progress_bar(self, max=0): # type: (int) -> ProgressBar - """ - Create a new progress bar - """ - return ProgressBar(self, max) - - def ask(self, question, default=None): - question = Question(question, default) - - return self.ask_question(question) - - def ask_hidden(self, question): - question = Question(question) - question.hide() - - return self.ask_question(question) - - def confirm(self, question, default=True, true_answer_regex="(?i)^y"): - return self.ask_question( - ConfirmationQuestion(question, default, true_answer_regex) - ) - - def choice(self, question, choices, default=None): - if default is not None: - default = choices[default] - - return self.ask_question(ChoiceQuestion(question, choices, default)) - - def ask_question(self, question): - """ - Asks a question. - """ - answer = question.ask(self) - - return answer - - def write(self, string, flags=0): - super(IOMixin, self).write(string, flags) - - self._last_message = string - - def error(self, string, flags=0): - super(IOMixin, self).error(string, flags) - - self._last_message = string - - def write_line(self, string, flags=0): - super(IOMixin, self).write_line(string, flags) - - self._last_message = string - - def error_line(self, string, flags=0): - super(IOMixin, self).error_line(string, flags) - - self._last_message = string - - def overwrite(self, message, size=None): - self._do_overwrite(message, size) - - def overwrite_error(self, message, size=None): - self._do_overwrite(message, size, True) - - def _do_overwrite(self, message, size=None, stderr=False): - output = self.output - if stderr: - output = self.error_output - - # since overwrite is supposed to overwrite last message... - if size is None: - # removing possible formatting of lastMessage with strip_tags - if stderr: - last_message = self._last_message_err - else: - last_message = self._last_message - - size = len(output.remove_format(last_message)) - - # ...let's fill its length with backspaces - output.write("\x08" * size) - - # write the new message - output.write(message) - - fill = size - len(output.remove_format(message)) - - if fill > 0: - # whitespace whatever has left - output.write(" " * fill) - # move the cursor back - output.write("\x08" * fill) - - if stderr: - self._last_message_err = message - else: - self._last_message = message diff --git a/conda_lock/_vendor/cleo/io/null_io.py b/conda_lock/_vendor/cleo/io/null_io.py new file mode 100644 index 00000000..7519459f --- /dev/null +++ b/conda_lock/_vendor/cleo/io/null_io.py @@ -0,0 +1,16 @@ +from __future__ import annotations + +from typing import TYPE_CHECKING + +from conda_lock._vendor.cleo.io.inputs.string_input import StringInput +from conda_lock._vendor.cleo.io.io import IO +from conda_lock._vendor.cleo.io.outputs.null_output import NullOutput + + +if TYPE_CHECKING: + from conda_lock._vendor.cleo.io.inputs.input import Input + + +class NullIO(IO): + def __init__(self, input: Input | None = None) -> None: + super().__init__(input or StringInput(""), NullOutput(), NullOutput()) diff --git a/conda_lock/_vendor/cleo/io/outputs/__init__.py b/conda_lock/_vendor/cleo/io/outputs/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/conda_lock/_vendor/cleo/io/outputs/buffered_output.py b/conda_lock/_vendor/cleo/io/outputs/buffered_output.py new file mode 100644 index 00000000..8e2df0a2 --- /dev/null +++ b/conda_lock/_vendor/cleo/io/outputs/buffered_output.py @@ -0,0 +1,62 @@ +from __future__ import annotations + +from io import StringIO +from typing import TYPE_CHECKING + +from conda_lock._vendor.cleo.io.outputs.output import Output +from conda_lock._vendor.cleo.io.outputs.output import Verbosity +from conda_lock._vendor.cleo.io.outputs.section_output import SectionOutput + + +if TYPE_CHECKING: + from conda_lock._vendor.cleo.formatters.formatter import Formatter + + +class BufferedOutput(Output): + def __init__( + self, + verbosity: Verbosity = Verbosity.NORMAL, + decorated: bool = False, + formatter: Formatter | None = None, + supports_utf8: bool = True, + ) -> None: + super().__init__(decorated=decorated, verbosity=verbosity, formatter=formatter) + + self._buffer = StringIO() + self._supports_utf8 = supports_utf8 + + def fetch(self) -> str: + """ + Empties the buffer and returns its content. + """ + content = self._buffer.getvalue() + self._buffer = StringIO() + + return content + + def clear(self) -> None: + """ + Empties the buffer. + """ + self._buffer = StringIO() + + def supports_utf8(self) -> bool: + return self._supports_utf8 + + def set_supports_utf8(self, supports_utf8: bool) -> None: + self._supports_utf8 = supports_utf8 + + def section(self) -> SectionOutput: + return SectionOutput( + self._buffer, + self._section_outputs, + verbosity=self.verbosity, + decorated=self.is_decorated(), + formatter=self.formatter, + ) + + def _write(self, message: str, new_line: bool = False) -> None: + self._buffer.write(message) + + if new_line: + self._buffer.write("\n") diff --git a/conda_lock/_vendor/cleo/io/outputs/null_output.py b/conda_lock/_vendor/cleo/io/outputs/null_output.py new file mode 100644 index 00000000..3f22c8eb --- /dev/null +++ b/conda_lock/_vendor/cleo/io/outputs/null_output.py @@ -0,0 +1,60 @@ +from __future__ import annotations + +from typing import Iterable + +from conda_lock._vendor.cleo.io.outputs.output import Output +from conda_lock._vendor.cleo.io.outputs.output import Type +from conda_lock._vendor.cleo.io.outputs.output import Verbosity + + +class NullOutput(Output): + @property + def verbosity(self) -> Verbosity: + return Verbosity.QUIET + + def is_decorated(self) -> bool: + return False + + def decorated(self, decorated: bool = True) -> None: + pass + + def supports_utf8(self) -> bool: + return True + + def set_verbosity(self, verbosity: Verbosity) -> None: + pass + + def is_quiet(self) -> bool: + return True + + def is_verbose(self) -> bool: + return False + + def is_very_verbose(self) -> bool: + return False + + def is_debug(self) -> bool: + return False + + def write_line( + self, + messages: str | Iterable[str], + verbosity: Verbosity = Verbosity.NORMAL, + type: Type = Type.NORMAL, + ) -> None: + pass + + def write( + self, + messages: str | Iterable[str], + new_line: bool = False, + verbosity: Verbosity = Verbosity.NORMAL, + type: Type = Type.NORMAL, + ) -> None: + pass + + def flush(self) -> None: + pass + + def _write(self, message: str, new_line: bool = False) -> None: + pass diff --git a/conda_lock/_vendor/cleo/io/outputs/output.py b/conda_lock/_vendor/cleo/io/outputs/output.py new file mode 100644 index 00000000..84248b28 --- /dev/null +++ b/conda_lock/_vendor/cleo/io/outputs/output.py @@ -0,0 +1,119 @@ +from __future__ import annotations + +from enum import Enum +from typing import TYPE_CHECKING +from typing import Iterable + +from conda_lock._vendor.cleo._utils import strip_tags +from conda_lock._vendor.cleo.formatters.formatter import Formatter + + +if TYPE_CHECKING: + from conda_lock._vendor.cleo.io.outputs.section_output import SectionOutput + + +class Verbosity(Enum): + QUIET: int = 16 + NORMAL: int = 32 + VERBOSE: int = 64 + VERY_VERBOSE: int = 128 + DEBUG: int = 256 + + +class Type(Enum): + NORMAL: int = 1 + RAW: int = 2 + PLAIN: int = 4 + + +class Output: + def __init__( + self, + verbosity: Verbosity = Verbosity.NORMAL, + decorated: bool = False, + formatter: Formatter | None = None, + ) -> None: + self._verbosity: Verbosity = verbosity + self._formatter = formatter or Formatter() + self._formatter.decorated(decorated) + + self._section_outputs: list[SectionOutput] = [] + + @property + def formatter(self) -> Formatter: + return self._formatter + + @property + def verbosity(self) -> Verbosity: + return self._verbosity + + def set_formatter(self, formatter: Formatter) -> None: + self._formatter = formatter + + def is_decorated(self) -> bool: + return self._formatter.is_decorated() + + def decorated(self, decorated: bool = True) -> None: + self._formatter.decorated(decorated) + + def supports_utf8(self) -> bool: + """ + Returns whether the stream supports the UTF-8 encoding. + """ + return True + + def set_verbosity(self, verbosity: Verbosity) -> None: + self._verbosity = verbosity + + def is_quiet(self) -> bool: + return self._verbosity is Verbosity.QUIET + + def is_verbose(self) -> bool: + return self._verbosity.value >= Verbosity.VERBOSE.value + + def is_very_verbose(self) -> bool: + return self._verbosity.value >= Verbosity.VERY_VERBOSE.value + + def is_debug(self) -> bool: + return self._verbosity is Verbosity.DEBUG + + def write_line( + self, + messages: str | Iterable[str], + verbosity: Verbosity = Verbosity.NORMAL, + type: Type = Type.NORMAL, + ) -> None: + self.write(messages, new_line=True, verbosity=verbosity, type=type) + + def write( + self, + messages: str | Iterable[str], + new_line: bool = False, + verbosity: Verbosity = Verbosity.NORMAL, + type: Type = Type.NORMAL, + ) -> None: + if isinstance(messages, str): + messages = [messages] + + if verbosity.value > self.verbosity.value: + return + + for message in messages: + if type is Type.NORMAL: + message = self._formatter.format(message) + elif type is Type.PLAIN: + message = strip_tags(self._formatter.format(message)) + + self._write(message, new_line=new_line) + + def flush(self) -> None: + pass + + def remove_format(self, text: str) -> str: + return self.formatter.remove_format(text) + + def section(self) -> SectionOutput: + raise NotImplementedError + + def _write(self, message: str, new_line: bool = False) -> None: + raise NotImplementedError diff --git a/conda_lock/_vendor/cleo/io/outputs/section_output.py b/conda_lock/_vendor/cleo/io/outputs/section_output.py new file mode 100644 index 00000000..d7b2adda --- /dev/null +++ b/conda_lock/_vendor/cleo/io/outputs/section_output.py @@ -0,0 +1,106 @@ +from __future__ import annotations + +import math + +from typing import TYPE_CHECKING +from typing import TextIO + +from conda_lock._vendor.cleo.io.outputs.output import Verbosity +from conda_lock._vendor.cleo.io.outputs.stream_output import StreamOutput +from conda_lock._vendor.cleo.terminal import Terminal + + +if TYPE_CHECKING: + from conda_lock._vendor.cleo.formatters.formatter import Formatter + + +class SectionOutput(StreamOutput): + def __init__( + self, + stream: TextIO, + sections: list[SectionOutput], + verbosity: Verbosity = Verbosity.NORMAL, + decorated: bool | None = None, + formatter: Formatter | None = None, + ) -> None: + super().__init__( + stream, verbosity=verbosity, decorated=decorated, formatter=formatter + ) + + self._content: list[str] = [] + self._lines = 0 + sections.insert(0, self) + self._sections = sections + self._terminal = Terminal().size + + @property + def content(self) -> str: + return "".join(self._content) + + @property + def lines(self) -> int: + return self._lines + + def clear(self, lines: int | None = None) -> None: + if not (self._content and self.is_decorated()): + return + + if lines: + # Multiply lines by 2 to cater for each new line added between content + del self._content[-lines * 2 :] + else: + lines = self._lines + self._content = [] + + self._lines -= lines + + super()._write( + self._pop_stream_content_until_current_section(lines), new_line=False + ) + + def overwrite(self, message: str) -> None: + self.clear() + self.write_line(message) + + def add_content(self, content: str) -> None: + for line_content in content.split("\n"): + self._lines += ( + math.ceil( + len(self.remove_format(line_content).replace("\t", " " * 8)) + / self._terminal.width + ) + or 1 + ) + self._content.append(line_content) + self._content.append("\n") + + def _write(self, message: str, new_line: bool = False) -> None: + if not self.is_decorated(): + return super()._write(message, new_line=new_line) + + erased_content = self._pop_stream_content_until_current_section() + + self.add_content(message) + + super()._write(message, new_line=True) + super()._write(erased_content, new_line=False) + + def _pop_stream_content_until_current_section( + self, lines_to_clear_count: int = 0 + ) -> str: + erased_content = [] + + for section in self._sections: + if section is self: + break + + lines_to_clear_count += section.lines + erased_content.append(section.content) + + if lines_to_clear_count > 0: + # Move cursor up n lines + super()._write(f"\x1b[{lines_to_clear_count}A", new_line=False) + # Erase to end of screen + super()._write("\x1b[0J", new_line=False) + + return "".join(reversed(erased_content)) diff --git a/conda_lock/_vendor/cleo/io/outputs/stream_output.py b/conda_lock/_vendor/cleo/io/outputs/stream_output.py new file mode 100644 index 00000000..17dff6d1 --- /dev/null +++ b/conda_lock/_vendor/cleo/io/outputs/stream_output.py @@ -0,0 +1,154 @@ +from __future__ import annotations + +import codecs +import io +import locale +import os +import sys + +from typing import TYPE_CHECKING +from typing import TextIO +from typing import cast + +from conda_lock._vendor.cleo.io.outputs.output import Output +from conda_lock._vendor.cleo.io.outputs.output import Verbosity + + +if TYPE_CHECKING: + from conda_lock._vendor.cleo.formatters.formatter import Formatter + from conda_lock._vendor.cleo.io.outputs.section_output import SectionOutput + + +class StreamOutput(Output): + FILE_TYPE_CHAR = 0x0002 + FILE_TYPE_REMOTE = 0x8000 + ENABLE_VIRTUAL_TERMINAL_PROCESSING = 0x0004 + + def __init__( + self, + stream: TextIO, + verbosity: Verbosity = Verbosity.NORMAL, + decorated: bool | None = None, + formatter: Formatter | None = None, + ) -> None: + self._stream = stream + self._supports_utf8 = self._get_utf8_support_info() + super().__init__( + verbosity=verbosity, + decorated=decorated or self._has_color_support(), + formatter=formatter, + ) + + @property + def stream(self) -> TextIO: + return self._stream + + def supports_utf8(self) -> bool: + return self._supports_utf8 + + def _get_utf8_support_info(self) -> bool: + """ + Returns whether the stream supports the UTF-8 encoding. + """ + encoding = self._stream.encoding or locale.getpreferredencoding(False) + + try: + return codecs.lookup(encoding).name == "utf-8" + except Exception: + return True + + def flush(self) -> None: + self._stream.flush() + + def section(self) -> SectionOutput: + from conda_lock._vendor.cleo.io.outputs.section_output import SectionOutput + + return SectionOutput( + self._stream, + self._section_outputs, + verbosity=self.verbosity, + decorated=self.is_decorated(), + formatter=self.formatter, + ) + + def _write(self, message: str, new_line: bool = False) -> None: + if new_line: + message += "\n" + + self._stream.write(message) + self._stream.flush() + + def _has_color_support(self) -> bool: + # Follow https://no-color.org/ + if "NO_COLOR" in os.environ: + return False + + if os.getenv("TERM_PROGRAM") == "Hyper": + return True + + if sys.platform == "win32": + shell_supported = ( + os.getenv("ANSICON") is not None + or os.getenv("ConEmuANSI") == "ON" # noqa: SIM112 + or os.getenv("TERM") == "xterm" + ) + + if shell_supported: + return True + + if not hasattr(self._stream, "fileno"): + return False + + # Checking for Windows version + # If we have a compatible version + # activate color support + windows_version = sys.getwindowsversion() + major, build = windows_version[0], windows_version[2] + if (major, build) < (10, 14393): + return False + + # Activate colors if possible + import ctypes + import ctypes.wintypes + + kernel32 = ctypes.windll.kernel32 + + fileno = self._stream.fileno() + + if fileno == 1: + h = kernel32.GetStdHandle(-11) + elif fileno == 2: + h = kernel32.GetStdHandle(-12) + else: + return False + + if h is None or h == ctypes.wintypes.HANDLE(-1): + return False + + if ( + kernel32.GetFileType(h) & ~self.FILE_TYPE_REMOTE + ) != self.FILE_TYPE_CHAR: + return False + + mode = ctypes.wintypes.DWORD() + if not kernel32.GetConsoleMode(h, ctypes.byref(mode)): + return False + + if (mode.value & self.ENABLE_VIRTUAL_TERMINAL_PROCESSING) != 0: + return True + + return cast( + bool, + kernel32.SetConsoleMode( + h, mode.value | self.ENABLE_VIRTUAL_TERMINAL_PROCESSING + ) + != 0, + ) + + if not hasattr(self._stream, "fileno"): + return False + + try: + return os.isatty(self._stream.fileno()) + except io.UnsupportedOperation: + return False diff --git a/conda_lock/_vendor/cleo/loaders/__init__.py b/conda_lock/_vendor/cleo/loaders/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/conda_lock/_vendor/cleo/loaders/command_loader.py b/conda_lock/_vendor/cleo/loaders/command_loader.py new file mode 100644 index 00000000..f1480606 --- /dev/null +++ b/conda_lock/_vendor/cleo/loaders/command_loader.py @@ -0,0 +1,28 @@ +from __future__ import annotations + +from typing import TYPE_CHECKING + + +if TYPE_CHECKING: + from conda_lock._vendor.cleo.commands.command import Command + + +class CommandLoader: + @property + def names(self) -> list[str]: + """ + All registered command names. + """ + raise NotImplementedError + + def get(self, name: str) -> Command: + """ + Loads a command. + """ + raise NotImplementedError + + def has(self, name: str) -> bool: + """ + Checks whether a command exists or not. + """ + raise NotImplementedError diff --git a/conda_lock/_vendor/cleo/loaders/factory_command_loader.py b/conda_lock/_vendor/cleo/loaders/factory_command_loader.py new file mode 100644 index 00000000..5fb32581 --- /dev/null +++ b/conda_lock/_vendor/cleo/loaders/factory_command_loader.py @@ -0,0 +1,32 @@ +from __future__ import annotations + +from typing import Callable + +from conda_lock._vendor.cleo.commands.command import Command +from conda_lock._vendor.cleo.exceptions import CleoCommandNotFoundError +from conda_lock._vendor.cleo.loaders.command_loader import CommandLoader + + +Factory = Callable[[], Command] + + +class FactoryCommandLoader(CommandLoader): + """ + A simple command loader using factories to instantiate commands lazily. + """ + + def __init__(self, factories: dict[str, Factory]) -> None: + self._factories = factories + + @property + def names(self) -> list[str]: + return list(self._factories) + + def has(self, name: str) -> bool: + return name in self._factories + + def get(self, name: str) -> Command: + if name not in self._factories: + raise CleoCommandNotFoundError(name) + + return self._factories[name]() diff --git a/conda_lock/_vendor/cleo/py.typed b/conda_lock/_vendor/cleo/py.typed new file mode 100644 index 00000000..e69de29b diff --git a/conda_lock/_vendor/cleo/terminal.py b/conda_lock/_vendor/cleo/terminal.py new file mode 100644 index 00000000..a75f341f --- /dev/null +++ b/conda_lock/_vendor/cleo/terminal.py @@ -0,0 +1,67 @@ +from __future__ import annotations + +import os +import sys + +from typing import NamedTuple + + +class TerminalSize(NamedTuple): + width: int + height: int + + +class Terminal: + def __init__( + self, + width: int | None = None, + height: int | None = None, + fallback: tuple[int, int] | None = None, + ) -> None: + self._width = width + self._height = height + self._fallback = TerminalSize(*(fallback or (80, 25))) + + @property + def width(self) -> int: + return self.size.width + + @property + def height(self) -> int: + return self.size.height + + @property + def size(self) -> TerminalSize: + return self._get_terminal_size() + + def _get_terminal_size(self) -> TerminalSize: + if not (self._width is None or self._height is None): + return TerminalSize(self._width, self._height) + + width = 0 + height = 0 + + columns = os.environ.get("COLUMNS") + if columns is not None and columns.isdigit(): + width = int(columns) + lines = os.environ.get("LINES") + if lines is not None and lines.isdigit(): + height = int(lines) + + if width <= 0 or height <= 0: + try: + os_size = os.get_terminal_size(sys.__stdout__.fileno()) + size = TerminalSize(*os_size) + except (AttributeError, ValueError, OSError): + # stdout is None, closed, detached, or not a terminal, or + # os.get_terminal_size() is unsupported # noqa: ERA001 + size = self._fallback + if width <= 0: + width = size.width or self._fallback.width + if height <= 0: + height = size.height or self._fallback.height + + return TerminalSize( + width if self._width is None else self._width, + height if self._height is None else self._height, + ) diff --git a/conda_lock/_vendor/cleo/testers/__init__.py b/conda_lock/_vendor/cleo/testers/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/conda_lock/_vendor/cleo/testers/application_tester.py b/conda_lock/_vendor/cleo/testers/application_tester.py new file mode 100644 index 00000000..fa977443 --- /dev/null +++ b/conda_lock/_vendor/cleo/testers/application_tester.py @@ -0,0 +1,75 @@ +from __future__ import annotations + +from io import StringIO +from typing import TYPE_CHECKING + +from conda_lock._vendor.cleo.io.buffered_io import BufferedIO +from conda_lock._vendor.cleo.io.inputs.string_input import StringInput +from conda_lock._vendor.cleo.io.outputs.buffered_output import BufferedOutput + + +if TYPE_CHECKING: + from conda_lock._vendor.cleo.application import Application + from conda_lock._vendor.cleo.io.outputs.output import Verbosity + + +class ApplicationTester: + """ + Eases the testing of console applications. + """ + + def __init__(self, application: Application) -> None: + self._application = application + self._application.auto_exits(False) + self._io = BufferedIO() + self._status_code = 0 + + @property + def application(self) -> Application: + return self._application + + @property + def io(self) -> BufferedIO: + return self._io + + @property + def status_code(self) -> int: + return self._status_code + + def execute( + self, + args: str = "", + inputs: str | None = None, + interactive: bool = True, + verbosity: Verbosity | None = None, + decorated: bool = False, + supports_utf8: bool = True, + ) -> int: + """ + Executes the command + """ + self._io.clear() + + self._io.set_input(StringInput(args)) + self._io.decorated(decorated) + assert isinstance(self._io.output, BufferedOutput) + assert isinstance(self._io.error_output, BufferedOutput) + self._io.output.set_supports_utf8(supports_utf8) + self._io.error_output.set_supports_utf8(supports_utf8) + + if inputs is not None: + self._io.input.set_stream(StringIO(inputs)) + + if interactive is not None: + self._io.interactive(interactive) + + if verbosity is not None: + self._io.set_verbosity(verbosity) + + self._status_code = self._application.run( + self._io.input, + self._io.output, + self._io.error_output, + ) + + return self._status_code diff --git a/conda_lock/_vendor/cleo/testers/command_tester.py b/conda_lock/_vendor/cleo/testers/command_tester.py new file mode 100644 index 00000000..e2e08a37 --- /dev/null +++ b/conda_lock/_vendor/cleo/testers/command_tester.py @@ -0,0 +1,90 @@ +from __future__ import annotations + +from io import StringIO +from typing import TYPE_CHECKING + +from conda_lock._vendor.cleo.io.buffered_io import BufferedIO +from conda_lock._vendor.cleo.io.inputs.argv_input import ArgvInput +from conda_lock._vendor.cleo.io.inputs.string_input import StringInput +from conda_lock._vendor.cleo.io.outputs.buffered_output import BufferedOutput + + +if TYPE_CHECKING: + from conda_lock._vendor.cleo.commands.command import Command + from conda_lock._vendor.cleo.io.outputs.output import Verbosity + + +class CommandTester: + """ + Eases the testing of console commands. + """ + + def __init__(self, command: Command) -> None: + self._command = command + self._io = BufferedIO() + self._inputs: list[str] = [] + self._status_code: int | None = None + + @property + def command(self) -> Command: + return self._command + + @property + def io(self) -> BufferedIO: + return self._io + + @property + def status_code(self) -> int | None: + return self._status_code + + def execute( + self, + args: str = "", + inputs: str | None = None, + interactive: bool | None = None, + verbosity: Verbosity | None = None, + decorated: bool | None = None, + supports_utf8: bool = True, + ) -> int: + """ + Executes the command + """ + application = self._command.application + + input_: StringInput | ArgvInput = StringInput(args) + if ( + application is not None + and application.definition.has_argument("command") + and self._command.name is not None + ): + name = self._command.name + if " " in name: + # If the command is namespaced we rearrange + # the input to parse it as a single argument + argv = [application.name, self._command.name, *input_._tokens] + + input_ = ArgvInput(argv) + else: + input_ = StringInput(name + " " + args) + + self._io.set_input(input_) + assert isinstance(self._io.output, BufferedOutput) + assert isinstance(self._io.error_output, BufferedOutput) + self._io.output.set_supports_utf8(supports_utf8) + self._io.error_output.set_supports_utf8(supports_utf8) + + if inputs is not None: + self._io.input.set_stream(StringIO(inputs)) + + if interactive is not None: + self._io.interactive(interactive) + + if verbosity is not None: + self._io.set_verbosity(verbosity) + + if decorated is not None: + self._io.decorated(decorated) + + self._status_code = self._command.run(self._io) + + return self._status_code diff --git a/conda_lock/_vendor/cleo/ui/__init__.py b/conda_lock/_vendor/cleo/ui/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/conda_lock/_vendor/cleo/ui/choice_question.py b/conda_lock/_vendor/cleo/ui/choice_question.py new file mode 100644 index 00000000..ad98511b --- /dev/null +++ b/conda_lock/_vendor/cleo/ui/choice_question.py @@ -0,0 +1,146 @@ +from __future__ import annotations + +import re + +from typing import TYPE_CHECKING +from typing import Any +from typing import cast + +from conda_lock._vendor.cleo.exceptions import CleoValueError +from conda_lock._vendor.cleo.ui.question import Question + + +if TYPE_CHECKING: + from conda_lock._vendor.cleo.io.io import IO + + +class SelectChoiceValidator: + def __init__(self, question: ChoiceQuestion) -> None: + """ + Constructor. + """ + self._question = question + self._values = question.choices + + def validate(self, selected: Any) -> str | list[str] | None: + """ + Validate a choice. + """ + # Collapse all spaces. + if isinstance(selected, int): + selected = str(selected) + + if selected is None: + return None + + if self._question.supports_multiple_choices(): + # Check for a separated comma values + _selected = selected.replace(" ", "") + if not re.match(r"^[a-zA-Z0-9_-]+(?:,[a-zA-Z0-9_-]+)*$", _selected): + raise CleoValueError(self._question.error_message.format(selected)) + + selected_choices = _selected.split(",") + else: + selected_choices = [selected] + + multiselect_choices = [] + for value in selected_choices: + results = [] + + for key, choice in enumerate(self._values): + if choice == value: + results.append(key) + + if len(results) > 1: + raise CleoValueError( + "The provided answer is ambiguous. " + f"Value should be one of {' or '.join(str(r) for r in results)}." + ) + + if value in self._values: + result = value + elif value.isdigit() and 0 <= int(value) < len(self._values): + result = self._values[int(value)] + else: + raise CleoValueError(self._question.error_message.format(value)) + + multiselect_choices.append(result) + + if self._question.supports_multiple_choices(): + return multiselect_choices + + return cast("str | list[str] | None", multiselect_choices[0]) + + +class ChoiceQuestion(Question): + """ + Multiple choice question. + """ + + def __init__( + self, question: str, choices: list[str], default: Any | None = None + ) -> None: + super().__init__(question, default) + + self._multi_select = False + self._choices = choices + self._validator = SelectChoiceValidator(self).validate + self._autocomplete_values = choices + self._prompt = " > " + self._error_message = 'Value "{}" is invalid' + + @property + def error_message(self) -> str: + return self._error_message + + @property + def choices(self) -> list[str]: + return self._choices + + def supports_multiple_choices(self) -> bool: + return self._multi_select + + def set_multi_select(self, multi_select: bool) -> None: + self._multi_select = multi_select + + def set_error_message(self, message: str) -> None: + self._error_message = message + + def _write_prompt(self, io: IO) -> None: + """ + Outputs the question prompt. + """ + message = self._question + default = self._default + + if default is None: + message = f"{message}: " + elif self._multi_select: + choices = self._choices + default = default.split(",") + + for i, value in enumerate(default): + default[i] = choices[int(value.strip())] + + message = ( + f"{message} " + f"[{', '.join(default)}]:" + ) + else: + choices = self._choices + message = ( + f"{message} " + f"[{choices[int(default)]}]:" + ) + + width = len(str(len(self._choices) - 1)) if len(self._choices) > 1 else 1 + + messages = [message] + for key, value in enumerate(self._choices): + messages.append(f" [{key: {width}}] {value}") + + io.write_error_line("\n".join(messages)) + + message = self._prompt + + io.write_error(message) diff --git a/conda_lock/_vendor/cleo/ui/component.py b/conda_lock/_vendor/cleo/ui/component.py new file mode 100644 index 00000000..221dccc4 --- /dev/null +++ b/conda_lock/_vendor/cleo/ui/component.py @@ -0,0 +1,5 @@ +from __future__ import annotations + + +class Component: + name: str = "" diff --git a/conda_lock/_vendor/cleo/ui/confirmation_question.py b/conda_lock/_vendor/cleo/ui/confirmation_question.py new file mode 100644 index 00000000..d2367ec3 --- /dev/null +++ b/conda_lock/_vendor/cleo/ui/confirmation_question.py @@ -0,0 +1,46 @@ +from __future__ import annotations + +import re + +from typing import TYPE_CHECKING + +from conda_lock._vendor.cleo.ui.question import Question + + +if TYPE_CHECKING: + from conda_lock._vendor.cleo.io.io import IO + + +class ConfirmationQuestion(Question): + """ + Represents a yes/no question. + """ + + def __init__( + self, question: str, default: bool = True, true_answer_regex: str = r"(?i)^y" + ) -> None: + super().__init__(question, default) + + self._true_answer_regex = true_answer_regex + self._normalizer = self._default_normalizer + + def _write_prompt(self, io: IO) -> None: + message = ( + f"{self._question} (yes/no) " + f'[{"yes" if self._default else "no"}] ' + ) + + io.write_error(message) + + def _default_normalizer(self, answer: str) -> bool: + """ + Default answer normalizer. + """ + if isinstance(answer, bool): + return answer + + answer_is_true = re.match(self._true_answer_regex, answer) is not None + if self.default is False: + return bool(answer and answer_is_true) + + return not answer or answer_is_true diff --git a/conda_lock/_vendor/cleo/ui/exception_trace.py b/conda_lock/_vendor/cleo/ui/exception_trace.py new file mode 100644 index 00000000..cfc2e232 --- /dev/null +++ b/conda_lock/_vendor/cleo/ui/exception_trace.py @@ -0,0 +1,432 @@ +from __future__ import annotations + +import ast +import builtins +import inspect +import io +import keyword +import os +import re +import sys +import tokenize + +from typing import TYPE_CHECKING +from typing import ClassVar + +from crashtest.frame_collection import FrameCollection + +from conda_lock._vendor.cleo.formatters.formatter import Formatter + + +if TYPE_CHECKING: + from crashtest.frame import Frame + from crashtest.solution_providers.solution_provider_repository import ( + SolutionProviderRepository, + ) + + from conda_lock._vendor.cleo.io.io import IO + from conda_lock._vendor.cleo.io.outputs.output import Output + + +class Highlighter: + TOKEN_DEFAULT = "token_default" + TOKEN_COMMENT = "token_comment" + TOKEN_STRING = "token_string" + TOKEN_NUMBER = "token_number" + TOKEN_KEYWORD = "token_keyword" + TOKEN_BUILTIN = "token_builtin" + TOKEN_OP = "token_op" + LINE_MARKER = "line_marker" + LINE_NUMBER = "line_number" + + DEFAULT_THEME: ClassVar[dict[str, str]] = { + TOKEN_STRING: "fg=yellow;options=bold", + TOKEN_NUMBER: "fg=blue;options=bold", + TOKEN_COMMENT: "fg=default;options=dark,italic", + TOKEN_KEYWORD: "fg=magenta;options=bold", + TOKEN_BUILTIN: "fg=default;options=bold", + TOKEN_DEFAULT: "fg=default", + TOKEN_OP: "fg=default;options=dark", + LINE_MARKER: "fg=red;options=bold", + LINE_NUMBER: "fg=default;options=dark", + } + + KEYWORDS: ClassVar[set[str]] = set(keyword.kwlist) + BUILTINS: ClassVar[set[str]] = set(dir(builtins)) + + UI: ClassVar[dict[bool, dict[str, str]]] = { + False: {"arrow": ">", "delimiter": "|"}, + True: {"arrow": "→", "delimiter": "│"}, + } + + def __init__(self, supports_utf8: bool = True) -> None: + self._theme = self.DEFAULT_THEME.copy() + self._ui = self.UI[supports_utf8] + + def code_snippet( + self, source: str, line: int, lines_before: int = 2, lines_after: int = 2 + ) -> list[str]: + token_lines = self.highlighted_lines(source) + token_lines = self.line_numbers(token_lines, line) + + offset = line - lines_before - 1 + offset = max(offset, 0) + length = lines_after + lines_before + 1 + return token_lines[offset : offset + length] + + def highlighted_lines(self, source: str) -> list[str]: + source = source.replace("\r\n", "\n").replace("\r", "\n") + + return self.split_to_lines(source) + + def split_to_lines(self, source: str) -> list[str]: + lines = [] + current_line = 1 + current_col = 0 + buffer = "" + current_type = None + source_io = io.BytesIO(source.encode()) + formatter = Formatter() + + def readline() -> bytes: + return formatter.format( + formatter.escape(source_io.readline().decode()) + ).encode() + + tokens = tokenize.tokenize(readline) + line = "" + for token_info in tokens: + token_type, token_string, start, end, _ = token_info + lineno = start[0] + if lineno == 0: + # Encoding line + continue + + if token_type == tokenize.ENDMARKER: + # End of source + if current_type is None: + current_type = self.TOKEN_DEFAULT + + line += f"<{self._theme[current_type]}>{buffer}" + lines.append(line) + break + + if lineno > current_line: + if current_type is None: + current_type = self.TOKEN_DEFAULT + + diff = lineno - current_line + if diff > 1: + lines += [""] * (diff - 1) + + stripped_buffer = buffer.rstrip("\n") + line += f"<{self._theme[current_type]}>{stripped_buffer}" + + # New line + lines.append(line) + line = "" + current_line = lineno + current_col = 0 + buffer = "" + + if token_string in self.KEYWORDS: + new_type = self.TOKEN_KEYWORD + elif token_string in self.BUILTINS or token_string == "self": + new_type = self.TOKEN_BUILTIN + elif token_type == tokenize.STRING: + new_type = self.TOKEN_STRING + elif token_type == tokenize.NUMBER: + new_type = self.TOKEN_NUMBER + elif token_type == tokenize.COMMENT: + new_type = self.TOKEN_COMMENT + elif token_type == tokenize.OP: + new_type = self.TOKEN_OP + elif token_type == tokenize.NEWLINE: + continue + else: + new_type = self.TOKEN_DEFAULT + + if current_type is None: + current_type = new_type + + if start[1] > current_col: + buffer += token_info.line[current_col : start[1]] + + if current_type != new_type: + line += f"<{self._theme[current_type]}>{buffer}" + buffer = "" + current_type = new_type + + if lineno < end[0]: + # The token spans multiple lines + token_lines = token_string.split("\n") + line += f"<{self._theme[current_type]}>{token_lines[0]}" + lines.append(line) + for token_line in token_lines[1:-1]: + lines.append(f"<{self._theme[current_type]}>{token_line}") + + current_line = end[0] + buffer = token_lines[-1][: end[1]] + line = "" + continue + + buffer += token_string + current_col = end[1] + current_line = lineno + + return lines + + def line_numbers(self, lines: list[str], mark_line: int | None = None) -> list[str]: + max_line_length = max(3, len(str(len(lines)))) + + snippet_lines = [] + marker = f"<{self._theme[self.LINE_MARKER]}>{self._ui['arrow']} " + no_marker = " " + for i, line in enumerate(lines): + snippet = "" + if mark_line is not None: + snippet = marker if mark_line == i + 1 else no_marker + + line_number = f"{i + 1:>{max_line_length}}" + styling = ( + "fg=default;options=bold" + if mark_line == i + 1 + else self._theme[self.LINE_NUMBER] + ) + snippet += ( + f"<{styling}>" + f"{line_number}<{self._theme[self.LINE_NUMBER]}>" + f"{self._ui['delimiter']} {line}" + ) + snippet_lines.append(snippet) + + return snippet_lines + + +class ExceptionTrace: + """ + Renders the trace of an exception. + """ + + THEME: ClassVar[dict[str, str]] = { + "comment": "", + "keyword": "", + "builtin": "", + "literal": "", + } + + AST_ELEMENTS: ClassVar[dict[str, list[str]]] = { + "builtins": dir(builtins), + "keywords": [ + getattr(ast, cls) + for cls in dir(ast) + if keyword.iskeyword(cls.lower()) + and inspect.isclass(getattr(ast, cls)) + and issubclass(getattr(ast, cls), ast.AST) + ], + } + + _FRAME_SNIPPET_CACHE: ClassVar[dict[tuple[Frame, int, int], list[str]]] = {} + + def __init__( + self, + exception: Exception, + solution_provider_repository: SolutionProviderRepository | None = None, + ) -> None: + self._exception = exception + self._solution_provider_repository = solution_provider_repository + self._exc_info = sys.exc_info() + self._ignore: str | None = None + + def ignore_files_in(self, ignore: str) -> ExceptionTrace: + self._ignore = ignore + + return self + + def render(self, io: IO | Output, simple: bool = False) -> None: + # If simple rendering wouldn't show anything useful, abandon it. + simple_string = str(self._exception) if simple else "" + if simple_string: + io.write_line("") + io.write_line(f"{simple_string}") + else: + self._render_exception(io, self._exception) + + self._render_solution(io, self._exception) + + def _render_exception(self, io: IO | Output, exception: BaseException) -> None: + from crashtest.inspector import Inspector + + inspector = Inspector(exception) + if not inspector.frames: + return + + if inspector.has_previous_exception(): + assert inspector.previous_exception is not None # make mypy happy + self._render_exception(io, inspector.previous_exception) + io.write_line("") + io.write_line( + "The following error occurred when trying to handle this error:" + ) + io.write_line("") + + self._render_trace(io, inspector.frames) + + self._render_line(io, f"{inspector.exception_name}", True) + io.write_line("") + exception_message = ( + Formatter().format(inspector.exception_message).replace("\n", "\n ") + ) + self._render_line(io, f"{exception_message}") + + current_frame = inspector.frames[-1] + self._render_snippet(io, current_frame) + + def _render_snippet(self, io: IO | Output, frame: Frame) -> None: + self._render_line( + io, + f"at {self._get_relative_file_path(frame.filename)}" + f":{frame.lineno} in {frame.function}", + True, + ) + + code_lines = Highlighter(supports_utf8=io.supports_utf8()).code_snippet( + frame.file_content, frame.lineno, 4, 4 + ) + + for code_line in code_lines: + self._render_line(io, code_line, indent=4) + + def _render_solution(self, io: IO | Output, exception: Exception) -> None: + if self._solution_provider_repository is None: + return + + solutions = self._solution_provider_repository.get_solutions_for_exception( + exception + ) + symbol = "•" if io.supports_utf8() else "*" + + for solution in solutions: + title = solution.solution_title + description = solution.solution_description + links = solution.documentation_links + + description = description.replace("\n", "\n ").strip(" ") + + joined_links = ",".join(f"\n {link}" for link in links) + self._render_line( + io, + f"{symbol} " + f"{title.rstrip('.')}:" + f" {description}{joined_links}", + True, + ) + + def _render_trace(self, io: IO | Output, frames: FrameCollection) -> None: + stack_frames = FrameCollection() + for frame in frames: + if ( + self._ignore + and re.match(self._ignore, frame.filename) + and not io.is_debug() + ): + continue + + stack_frames.append(frame) + + remaining_frames_length = len(stack_frames) - 1 + if io.is_very_verbose() and remaining_frames_length: + self._render_line(io, "Stack trace:", True) + max_frame_length = len(str(remaining_frames_length)) + frame_collections = stack_frames.compact() + i = remaining_frames_length + for collection in frame_collections: + if collection.is_repeated(): + if len(collection) > 1: + frames_message = f"{len(collection)} frames" + else: + frames_message = "frame" + + self._render_line( + io, + f"{'...':>{max_frame_length}} " + f"Previous {frames_message} repeated " + f"{collection.repetitions + 1} times", + True, + ) + + i -= len(collection) * (collection.repetitions + 1) + + for frame in collection: + relative_file_path = self._get_relative_file_path(frame.filename) + relative_file_path_parts = relative_file_path.split(os.path.sep) + relative_file_path = ( + f"{Formatter.escape(os.sep)}".join( + relative_file_path_parts[:-1] + + [ + "" + f"{relative_file_path_parts[-1]}" + ] + ) + ) + self._render_line( + io, + f"{i:>{max_frame_length}} " + f"{relative_file_path}:" + f"{frame.lineno} in {frame.function}", + True, + ) + + if io.is_debug(): + if (frame, 2, 2) not in self._FRAME_SNIPPET_CACHE: + code_lines = Highlighter( + supports_utf8=io.supports_utf8() + ).code_snippet( + frame.file_content, + frame.lineno, + ) + + self._FRAME_SNIPPET_CACHE[(frame, 2, 2)] = code_lines + + code_lines = self._FRAME_SNIPPET_CACHE[(frame, 2, 2)] + + for code_line in code_lines: + self._render_line( + io, + f"{' ' * max_frame_length}{code_line}", + indent=3, + ) + else: + highlighter = Highlighter(supports_utf8=io.supports_utf8()) + try: + code_line = highlighter.highlighted_lines( + frame.line.strip() + )[0] + except tokenize.TokenError: + code_line = frame.line.strip() + + self._render_line( + io, f"{' ' * (max_frame_length + 4)}{code_line}" + ) + + i -= 1 + + def _render_line( + self, io: IO | Output, line: str, new_line: bool = False, indent: int = 2 + ) -> None: + if new_line: + io.write_line("") + + io.write_line(f"{indent * ' '}{line}") + + def _get_relative_file_path(self, filepath: str) -> str: + cwd = os.getcwd() + + if cwd: + filepath = filepath.replace(cwd + os.path.sep, "") + + home = os.path.expanduser("~") + if home: + filepath = filepath.replace(home + os.path.sep, "~" + os.path.sep) + + return filepath diff --git a/conda_lock/_vendor/cleo/ui/progress_bar.py b/conda_lock/_vendor/cleo/ui/progress_bar.py new file mode 100644 index 00000000..212af041 --- /dev/null +++ b/conda_lock/_vendor/cleo/ui/progress_bar.py @@ -0,0 +1,426 @@ +from __future__ import annotations + +import math +import re +import time + +from typing import TYPE_CHECKING +from typing import ClassVar +from typing import Match + +from conda_lock._vendor.cleo._utils import format_time +from conda_lock._vendor.cleo.cursor import Cursor +from conda_lock._vendor.cleo.io.io import IO +from conda_lock._vendor.cleo.io.outputs.section_output import SectionOutput +from conda_lock._vendor.cleo.terminal import Terminal +from conda_lock._vendor.cleo.ui.component import Component + + +if TYPE_CHECKING: + from conda_lock._vendor.cleo.io.outputs.output import Output + + +class ProgressBar(Component): + """ + The ProgressBar provides helpers to display progress output. + """ + + name = "progress_bar" + + # Options + bar_width = 28 + bar_char = None + empty_bar_char = "-" + progress_char = ">" + redraw_freq: int | None = 1 + + formats: ClassVar[dict[str, str]] = { + "normal": " %current%/%max% [%bar%] %percent:3s%%", + "normal_nomax": " %current% [%bar%]", + "verbose": " %current%/%max% [%bar%] %percent:3s%% %elapsed:-6s%", + "verbose_nomax": " %current% [%bar%] %elapsed:6s%", + "very_verbose": ( + " %current%/%max% [%bar%] %percent:3s%%" " %elapsed:6s%/%estimated:-6s%" + ), + "very_verbose_nomax": " %current% [%bar%] %elapsed:6s%", + "debug": " %current%/%max% [%bar%] %percent:3s%% %elapsed:6s%/%estimated:-6s%", + "debug_nomax": " %current% [%bar%] %elapsed:6s%", + } + + def __init__( + self, + io: IO | Output, + max: int = 0, + min_seconds_between_redraws: float = 0.1, + ) -> None: + # If we have an IO, ensure we write to the error output + if isinstance(io, IO): + io = io.error_output + + self._io = io + self._terminal = Terminal().size + self._max = 0 + self._step_width: int = 1 + self._set_max_steps(max) + self._step = 0 + self._percent = 0.0 + self._format: str | None = None + self._internal_format: str | None = None + self._format_line_count = 0 + self._previous_message: str | None = None + self._should_overwrite = True + self._min_seconds_between_redraws = 0.0 + self._max_seconds_between_redraws = 1.0 + self._write_count = 0 + + if min_seconds_between_redraws > 0: + self.redraw_freq = None + self._min_seconds_between_redraws = min_seconds_between_redraws + + if not self._io.formatter.is_decorated(): + # Disable overwrite when output does not support ANSI codes. + self._should_overwrite = False + + # Set a reasonable redraw frequency so output isn't flooded + self.redraw_freq = None + + self._messages: dict[str, str] = {} + + self._start_time = time.time() + self._last_write_time = 0.0 + self._cursor = Cursor(self._io) + + def set_message(self, message: str, name: str = "message") -> None: + self._messages[name] = message + + def get_message(self, name: str = "message") -> str: + return self._messages[name] + + def get_start_time(self) -> float: + return self._start_time + + def get_max_steps(self) -> int: + return self._max + + def get_progress(self) -> int: + return self._step + + def get_progress_percent(self) -> float: + return self._percent + + def set_bar_character(self, character: str) -> ProgressBar: + self.bar_char = character + + return self + + def get_bar_character(self) -> str: + if self.bar_char is None: + if self._max: + return "=" + + return self.empty_bar_char + + return self.bar_char + + def set_bar_width(self, width: int) -> ProgressBar: + self.bar_width = width + + return self + + def get_empty_bar_character(self) -> str: + return self.empty_bar_char + + def set_empty_bar_character(self, character: str) -> ProgressBar: + self.empty_bar_char = character + + return self + + def get_progress_character(self) -> str: + return self.progress_char + + def set_progress_character(self, character: str) -> ProgressBar: + self.progress_char = character + + return self + + def set_format(self, fmt: str) -> None: + self._format = None + self._internal_format = fmt + + def set_redraw_frequency(self, freq: int) -> None: + if self.redraw_freq is not None: + self.redraw_freq = max(freq, 1) + + def min_seconds_between_redraws(self, freq: float) -> None: + if freq > 0: + self.redraw_freq = None + self._min_seconds_between_redraws = freq + + def max_seconds_between_redraws(self, freq: float) -> None: + self._max_seconds_between_redraws = freq + + def start(self, max: int | None = None) -> None: + """ + Start the progress output. + """ + self._start_time = time.time() + self._step = 0 + self._percent = 0.0 + + if max is not None: + self._set_max_steps(max) + + self.display() + + def advance(self, step: int = 1) -> None: + """ + Advances the progress output X steps. + """ + self.set_progress(self._step + step) + + def set_progress(self, step: int) -> None: + """ + Sets the current progress. + """ + if self._max and step > self._max: + self._max = step + elif step < 0: + step = 0 + + redraw_freq = ( + (self._max or 10) / 10 if self.redraw_freq is None else self.redraw_freq + ) + prev_period = int(self._step / redraw_freq) + curr_period = int(step / redraw_freq) + + self._step = step + self._percent = step / (self._max or math.inf) + + time_interval = time.time() - self._last_write_time + + # Draw regardless of other limits + if step == self._max: + self.display() + + return + + # Throttling + if time_interval < self._min_seconds_between_redraws: + return + + # Draw each step period, but not too late + if ( + prev_period != curr_period + or time_interval >= self._max_seconds_between_redraws + ): + self.display() + + def finish(self) -> None: + """ + Finish the progress output. + """ + if not self._max: + self._max = self._step + + if self._step == self._max and not self._should_overwrite: + return + + self.set_progress(self._max) + + def display(self) -> None: + """ + Output the current progress string. + """ + if self._io.is_quiet(): + return + + if self._format is None: + self._set_real_format( + self._internal_format or self._determine_best_format() + ) + + self._overwrite(self._build_line()) + + def _overwrite_callback(self, matches: Match[str]) -> str: + if hasattr(self, f"_formatter_{matches.group(1)}"): + text = str(getattr(self, f"_formatter_{matches.group(1)}")()) + elif matches.group(1) in self._messages: + text = self._messages[matches.group(1)] + else: + return matches.group(0) + + if matches.group(2): + n = int(matches.group(2).lstrip("-").rstrip("s")) + if matches.group(2).startswith("-"): + return text.ljust(n) + return text.rjust(n) + + return text + + def clear(self) -> None: + """ + Removes the progress bar from the current line. + + This is useful if you wish to write some output + while a progress bar is running. + Call display() to show the progress bar again. + """ + if not self._should_overwrite: + return + + if self._format is None: + self._set_real_format( + self._internal_format or self._determine_best_format() + ) + + self._overwrite("\n" * self._format_line_count) + + def _set_real_format(self, fmt: str) -> None: + """ + Sets the progress bar format. + """ + # try to use the _nomax variant if available + if not self._max and fmt + "_nomax" in self.formats: + self._format = self.formats[fmt + "_nomax"] + else: + self._format = self.formats.get(fmt, fmt) + assert self._format is not None + self._format_line_count = self._format.count("\n") + + def _set_max_steps(self, mx: int) -> None: + """ + Sets the progress bar maximal steps. + """ + self._max = max(0, mx) + self._step_width = len(str(self._max)) if self._max else 4 + + def _overwrite(self, message: str) -> None: + """ + Overwrites a previous message to the output. + """ + if self._previous_message == message: + return + + original_message = message + + if self._should_overwrite: + if self._previous_message is not None: + if isinstance(self._io, SectionOutput): + lines_to_clear = ( + len(self._io.remove_format(message)) // self._terminal.width + + self._format_line_count + + 1 + ) + self._io.clear(lines_to_clear) + else: + if self._format_line_count: + self._cursor.move_up(self._format_line_count) + + self._cursor.move_to_column(1) + self._cursor.clear_line() + elif self._step > 0: + message = "\n" + message + + self._previous_message = original_message + self._last_write_time = time.time() + + self._io.write(message) + self._write_count += 1 + + def _determine_best_format(self) -> str: + fmt = "normal" + if self._io.is_debug(): + fmt = "debug" + elif self._io.is_very_verbose(): + fmt = "very_verbose" + elif self._io.is_verbose(): + fmt = "verbose" + + return fmt if self._max else f"{fmt}_nomax" + + @property + def bar_offset(self) -> int: + if self._max: + return math.floor(self._percent * self.bar_width) + if self.redraw_freq is None: + return math.floor( + (min(5, self.bar_width // 15) * self._write_count) % self.bar_width + ) + return math.floor(self._step % self.bar_width) + + def _formatter_bar(self) -> str: + complete_bars = self.bar_offset + + display = self.get_bar_character() * int(complete_bars) + + if complete_bars < self.bar_width: + empty_bars = ( + self.bar_width + - complete_bars + - len(self._io.remove_format(self.progress_char)) + ) + display += self.progress_char + self.empty_bar_char * int(empty_bars) + + return display + + def _formatter_elapsed(self) -> str: + return format_time(time.time() - self._start_time) + + def _formatter_remaining(self) -> str: + if not self._max: + raise RuntimeError( + "Unable to display the remaining time " + "if the maximum number of steps is not set." + ) + + if not self._step: + remaining = 0 + else: + remaining = round( + (time.time() - self._start_time) / self._step * (self._max - self._max) + ) + + return format_time(remaining) + + def _formatter_estimated(self) -> int: + if not self._max: + raise RuntimeError( + "Unable to display the estimated time " + "if the maximum number of steps is not set." + ) + + if not self._step: + return 0 + + return round((time.time() - self._start_time) / self._step * self._max) + + def _formatter_current(self) -> str: + return str(self._step).rjust(self._step_width) + + def _formatter_max(self) -> int: + return self._max + + def _formatter_percent(self) -> int: + return int(math.floor(self._percent * 100)) + + def _build_line(self) -> str: + regex = re.compile(r"(?i)%([a-z\-_]+)(?::([^%]+))?%") + assert self._format is not None + line = regex.sub(self._overwrite_callback, self._format) + + # gets string length for each sub line with multiline format + lines_length = [ + len(self._io.remove_format(sub_line.rstrip("\r"))) + for sub_line in line.split("\n") + ] + + lines_width = max(lines_length) + + terminal_width = self._terminal.width + + if lines_width <= terminal_width: + return line + + self.set_bar_width(self.bar_width - lines_width + terminal_width) + + return regex.sub(self._overwrite_callback, self._format) diff --git a/conda_lock/_vendor/cleo/ui/progress_indicator.py b/conda_lock/_vendor/cleo/ui/progress_indicator.py new file mode 100644 index 00000000..42e0fc1a --- /dev/null +++ b/conda_lock/_vendor/cleo/ui/progress_indicator.py @@ -0,0 +1,215 @@ +from __future__ import annotations + +import re +import threading +import time + +from contextlib import contextmanager +from typing import TYPE_CHECKING + +from conda_lock._vendor.cleo._utils import format_time +from conda_lock._vendor.cleo.io.io import IO + + +if TYPE_CHECKING: + from typing import Iterator + from typing import Match + + from conda_lock._vendor.cleo.io.outputs.output import Output + + +class ProgressIndicator: + """ + A process indicator. + """ + + NORMAL = " {indicator} {message}" + NORMAL_NO_ANSI = " {message}" + VERBOSE = " {indicator} {message} ({elapsed:6s})" + VERBOSE_NO_ANSI = " {message} ({elapsed:6s})" + VERY_VERBOSE = " {indicator} {message} ({elapsed:6s})" + VERY_VERBOSE_NO_ANSI = " {message} ({elapsed:6s})" + + def __init__( + self, + io: IO | Output, + fmt: str | None = None, + interval: int = 100, + values: list[str] | None = None, + ) -> None: + if isinstance(io, IO): + io = io.error_output + + self._io = io + + if fmt is None: + fmt = self._determine_best_format() + + self._fmt = fmt + + if values is None: + values = ["-", "\\", "|", "/"] + + if len(values) < 2: + raise ValueError( + "The progress indicator must have at " + "least 2 indicator value characters." + ) + + self._interval = interval + self._values = values + + self._message: str | None = None + self._update_time: int | None = None + self._started = False + self._current = 0 + + self._auto_running: threading.Event | None = None + self._auto_thread: threading.Thread | None = None + + self._start_time: float | None = None + self._last_message_length = 0 + + @property + def message(self) -> str | None: + return self._message + + def set_message(self, message: str | None) -> None: + self._message = message + + self._display() + + @property + def current_value(self) -> str: + return self._values[self._current % len(self._values)] + + def start(self, message: str) -> None: + if self._started: + raise RuntimeError("Progress indicator already started.") + + self._message = message + self._started = True + self._start_time = time.time() + self._update_time = self._get_current_time_in_milliseconds() + self._interval + self._current = 0 + + self._display() + + def advance(self) -> None: + if not self._started: + raise RuntimeError("Progress indicator has not yet been started.") + + if not self._io.is_decorated(): + return + + current_time = self._get_current_time_in_milliseconds() + if self._update_time is not None and current_time < self._update_time: + return + + self._update_time = current_time + self._interval + self._current += 1 + + self._display() + + def finish(self, message: str, reset_indicator: bool = False) -> None: + if not self._started: + raise RuntimeError("Progress indicator has not yet been started.") + + if not (self._auto_thread is None or self._auto_running is None): + self._auto_running.set() + self._auto_thread.join() + + self._message = message + + if reset_indicator: + self._current = 0 + + self._display() + self._io.write_line("") + self._started = False + + @contextmanager + def auto(self, start_message: str, end_message: str) -> Iterator[ProgressIndicator]: + """ + Auto progress. + """ + self._auto_running = threading.Event() + self._auto_thread = threading.Thread(target=self._spin) + + self.start(start_message) + self._auto_thread.start() + + try: + yield self + except (Exception, KeyboardInterrupt): + self._io.write_line("") + + self._auto_running.set() + self._auto_thread.join() + + raise + + self.finish(end_message, reset_indicator=True) + + def _spin(self) -> None: + while not (self._auto_running is None or self._auto_running.is_set()): + self.advance() + + time.sleep(0.1) + + def _display(self) -> None: + if self._io.is_quiet(): + return + + self._overwrite( + re.sub( + r"(?i){([a-z\-_]+)(?::([^}]+))?}", self._overwrite_callback, self._fmt + ) + ) + + def _overwrite_callback(self, matches: Match[str]) -> str: + if hasattr(self, f"_formatter_{matches.group(1)}"): + return str(getattr(self, f"_formatter_{matches.group(1)}")()) + return matches.group(0) + + def _overwrite(self, message: str) -> None: + """ + Overwrites a previous message to the output. + """ + if self._io.is_decorated(): + self._io.write("\x0D\x1B[2K") + self._io.write(message) + else: + self._io.write_line(message) + + def _determine_best_format(self) -> str: + decorated = self._io.is_decorated() + + if self._io.is_very_verbose(): + if decorated: + return self.VERY_VERBOSE + + return self.VERY_VERBOSE_NO_ANSI + elif self._io.is_verbose(): + if decorated: + return self.VERY_VERBOSE + + return self.VERBOSE_NO_ANSI + + if decorated: + return self.NORMAL + + return self.NORMAL_NO_ANSI + + def _get_current_time_in_milliseconds(self) -> int: + return round(time.time() * 1000) + + def _formatter_indicator(self) -> str: + return self.current_value + + def _formatter_message(self) -> str | None: + return self.message + + def _formatter_elapsed(self) -> str: + assert self._start_time is not None + return format_time(time.time() - self._start_time) diff --git a/conda_lock/_vendor/cleo/ui/question.py b/conda_lock/_vendor/cleo/ui/question.py new file mode 100644 index 00000000..4c5f3ff6 --- /dev/null +++ b/conda_lock/_vendor/cleo/ui/question.py @@ -0,0 +1,274 @@ +from __future__ import annotations + +import getpass +import os +import subprocess + +from pathlib import Path +from typing import TYPE_CHECKING +from typing import Any +from typing import Callable + +from conda_lock._vendor.cleo.formatters.style import Style +from conda_lock._vendor.cleo.io.outputs.stream_output import StreamOutput + + +if TYPE_CHECKING: + from conda_lock._vendor.cleo.io.io import IO + +Validator = Callable[[str], Any] +Normalizer = Callable[[str], Any] + + +class Question: + """ + A question that will be asked in a Console. + """ + + def __init__(self, question: str, default: Any = None) -> None: + self._question = question + self._default = default + + self._attempts: int | None = None + self._hidden = False + self._hidden_fallback = True + self._autocomplete_values: list[str] = [] + self._validator: Validator = lambda s: s + self._normalizer: Normalizer = lambda s: s + self._error_message = 'Value "{}" is invalid' + + @property + def question(self) -> str: + return self._question + + @property + def default(self) -> Any: + return self._default + + @property + def autocomplete_values(self) -> list[str]: + return self._autocomplete_values + + @property + def max_attempts(self) -> int | None: + return self._attempts + + def is_hidden(self) -> bool: + return self._hidden + + def hide(self, hidden: bool = True) -> None: + if hidden is True and self._autocomplete_values: + raise RuntimeError("A hidden question cannot use the autocompleter.") + + self._hidden = hidden + + def set_autocomplete_values(self, autocomplete_values: list[str]) -> None: + if self.is_hidden(): + raise RuntimeError("A hidden question cannot use the autocompleter.") + + self._autocomplete_values = autocomplete_values + + def set_max_attempts(self, attempts: int | None) -> None: + self._attempts = attempts + + def set_validator(self, validator: Validator) -> None: + self._validator = validator + + def ask(self, io: IO) -> Any: + """ + Asks the question to the user. + """ + if not io.is_interactive(): + return self.default + return self._validate_attempts(lambda: self._do_ask(io), io) + + def _do_ask(self, io: IO) -> Any: + """ + Asks the question to the user. + """ + self._write_prompt(io) + + if not (self._autocomplete_values and self._has_stty_available()): + ret: str | None = None + + if self.is_hidden(): + try: + ret = self._get_hidden_response(io) + except RuntimeError: + if not self._hidden_fallback: + raise + + if not ret: + ret = self._read_from_input(io) + else: + ret = self._autocomplete(io) + + if len(ret) <= 0: + ret = self._default + + return self._normalizer(ret) # type: ignore[arg-type] + + def _write_prompt(self, io: IO) -> None: + """ + Outputs the question prompt. + """ + io.write_error(f"{self._question} ") + + def _write_error(self, io: IO, error: Exception) -> None: + """ + Outputs an error message. + """ + io.write_error_line(f"{error!s}") + + def _autocomplete(self, io: IO) -> str: + """ + Autocomplete a question. + """ + autocomplete = self._autocomplete_values + + ret = "" + + i = 0 + ofs = -1 + matches = list(autocomplete) + num_matches = len(matches) + + # Add highlighted text style + style = Style(options=["reverse"]) + io.error_output.formatter.set_style("hl", style) + + stty_mode = subprocess.check_output(["stty", "-g"]).decode().rstrip("\n") + + # Disable icanon (so we can read each keypress) and + # echo (we'll do echoing here instead) + subprocess.check_output(["stty", "-icanon", "-echo"]) + try: + # Read a keypress + while True: + c = io.read(1) + + # Backspace character + if c == "\177": + if num_matches == 0 and i != 0: + i -= 1 + # Move cursor backwards + io.write_error("\033[1D") + + if i == 0: + ofs = -1 + matches = list(autocomplete) + num_matches = len(matches) + else: + num_matches = 0 + + # Pop the last character off the end of our string + ret = ret[:i] + # Did we read an escape sequence + elif c == "\033": + c += io.read(2) + + # A = Up Arrow. B = Down Arrow + if c[2] == "A" or c[2] == "B": + if c[2] == "A" and ofs == -1: + ofs = 0 + + if num_matches == 0: + continue + + ofs += -1 if c[2] == "A" else 1 + ofs = (num_matches + ofs) % num_matches + elif ord(c) < 32: + if c in ["\t", "\n"]: + if num_matches > 0 and ofs != -1: + ret = matches[ofs] + # Echo out remaining chars for current match + io.write_error(ret[i:]) + i = len(ret) + + if c == "\n": + io.write_error(c) + break + + num_matches = 0 + + continue + else: + io.write_error(c) + ret += c + i += 1 + + num_matches = 0 + ofs = 0 + + for value in autocomplete: + # If typed characters match the beginning + # chunk of value (e.g. [AcmeDe]moBundle) + if value.startswith(ret) and i != len(value): + num_matches += 1 + matches[num_matches - 1] = value + + # Erase characters from cursor to end of line + io.write_error("\033[K") + + if num_matches > 0 and ofs != -1: + # Save cursor position + io.write_error("\0337") + # Write highlighted text + io.write_error("" + matches[ofs][i:] + "") + # Restore cursor position + io.write_error("\0338") + finally: + subprocess.call(["stty", f"{stty_mode}"]) + + return ret + + def _get_hidden_response(self, io: IO) -> str: + """ + Gets a hidden response from user. + """ + stream = None + if isinstance(io.error_output, StreamOutput): + stream = io.error_output.stream + return getpass.getpass("", stream=stream) + + def _validate_attempts(self, interviewer: Callable[[], Any], io: IO) -> Any: + """ + Validates an attempt. + """ + error = None + attempts = self._attempts + + while attempts is None or attempts: + if error is not None: + self._write_error(io, error) + + try: + return self._validator(interviewer()) + except Exception as e: + error = e + + if attempts is not None: + attempts -= 1 + + assert error + raise error + + def _read_from_input(self, io: IO) -> str: + """ + Read user input. + """ + ret = io.read_line(4096) + + if not ret: + raise RuntimeError("Aborted") + + return ret.strip() + + def _has_stty_available(self) -> bool: + with Path(os.devnull).open("w") as devnull: + try: + exit_code = subprocess.call(["stty"], stdout=devnull, stderr=devnull) + except Exception: + exit_code = 2 + + return exit_code == 0 diff --git a/conda_lock/_vendor/cleo/ui/table.py b/conda_lock/_vendor/cleo/ui/table.py new file mode 100644 index 00000000..22b34445 --- /dev/null +++ b/conda_lock/_vendor/cleo/ui/table.py @@ -0,0 +1,742 @@ +from __future__ import annotations + +import math +import re + +from contextlib import suppress +from copy import deepcopy +from itertools import repeat +from typing import TYPE_CHECKING +from typing import Iterator +from typing import List +from typing import Union +from typing import cast + +from conda_lock._vendor.cleo.formatters.formatter import Formatter +from conda_lock._vendor.cleo.io.outputs.output import Output +from conda_lock._vendor.cleo.ui.table_cell import TableCell +from conda_lock._vendor.cleo.ui.table_cell_style import TableCellStyle +from conda_lock._vendor.cleo.ui.table_separator import TableSeparator +from conda_lock._vendor.cleo.ui.table_style import TableStyle + + +if TYPE_CHECKING: + from conda_lock._vendor.cleo.io.io import IO + +Row = List[Union[str, TableCell]] +Rows = List[Union[Row, TableSeparator]] +Header = Row + + +class Table: + SEPARATOR_TOP: int = 0 + SEPARATOR_TOP_BOTTOM: int = 1 + SEPARATOR_MID: int = 2 + SEPARATOR_BOTTOM: int = 3 + + BORDER_OUTSIDE: int = 0 + BORDER_INSIDE: int = 1 + + _styles: dict[str, TableStyle] | None = None + + def __init__(self, io: IO | Output, style: str | None = None) -> None: + self._io = io + + if style is None: + style = "default" + + self._header_title: str | None = None + self._footer_title: str | None = None + + self._headers: list[Header] = [] + + self._rows: Rows = [] + self._horizontal = False + + self._effective_column_widths: dict[int, int] = {} + + self._number_of_columns: int | None = None + + self._column_styles: dict[int, TableStyle] = {} + self._column_widths: dict[int, int] = {} + self._column_max_widths: dict[int, int] = {} + + self._rendered = False + + self._style: TableStyle | None = None + self._init_styles() + self.set_style(style) + + @property + def style(self) -> TableStyle: + assert self._style is not None + return self._style + + def set_style(self, name: str) -> Table: + self._init_styles() + + self._style = self._resolve_style(name) + + return self + + def column_style(self, column_index: int) -> TableStyle: + if column_index in self._column_styles: + return self._column_styles[column_index] + + return self.style + + def set_column_style(self, column_index: int, style: str | TableStyle) -> Table: + self._column_styles[column_index] = self._resolve_style(style) + + return self + + def set_column_width(self, column_index: int, width: int) -> Table: + self._column_widths[column_index] = width + + return self + + def set_column_widths(self, widths: list[int]) -> Table: + self._column_widths = {} + + for i, width in enumerate(widths): + self._column_widths[i] = width + + return self + + def set_column_max_width(self, column_index: int, width: int) -> Table: + self._column_widths[column_index] = width + + return self + + def set_headers(self, headers: Header | list[Header]) -> Table: + if headers and not isinstance(headers[0], list): + headers = cast("Header", headers) + headers = [headers] + + headers = cast("List[Header]", headers) + + self._headers = headers + + return self + + def set_rows(self, rows: Rows) -> Table: + self._rows = [] + + return self.add_rows(rows) + + def add_rows(self, rows: Rows) -> Table: + for row in rows: + self.add_row(row) + + return self + + def add_row(self, row: Row | TableSeparator) -> Table: + if isinstance(row, TableSeparator): + self._rows.append(row) + + return self + + self._rows.append(row) + + return self + + def set_header_title(self, header_title: str) -> Table: + self._header_title = header_title + + return self + + def set_footer_title(self, footer_title: str) -> Table: + self._footer_title = footer_title + + return self + + def horizontal(self, horizontal: bool = True) -> Table: + self._horizontal = horizontal + + return self + + def render(self) -> None: + divider = TableSeparator() + + if self._horizontal: + rows: Rows = [] + headers = self._headers[0] if self._headers else [] + for i, header in enumerate(headers): + rows.append([header]) + for row in self._rows: + if isinstance(row, TableSeparator): + continue + + rows_i = rows[i] + assert not isinstance(rows_i, TableSeparator) + + if len(row) > i: + rows_i.append(row[i]) + elif isinstance(rows_i[0], TableCell) and rows_i[0].colspan >= 2: + # There is a title + pass + else: + rows_i.append("") + else: + rows = [*cast("Rows", self._headers), divider, *self._rows] + + self._calculate_number_of_columns(rows) + rows = list(self._build_table_rows(rows)) + self._calculate_column_widths(rows) + + is_header = not self._horizontal + is_first_row = self._horizontal + + for row in rows: + if row is divider: + is_header = False + is_first_row = True + + continue + + if isinstance(row, TableSeparator): + self._render_row_separator() + + continue + + if not row: + continue + + if is_header or is_first_row: + if is_first_row: + self._render_row_separator(self.SEPARATOR_TOP_BOTTOM) + is_first_row = False + else: + self._render_row_separator( + self.SEPARATOR_TOP, + self._header_title, + self.style.header_title_format, + ) + + if self._horizontal: + self._render_row( + row, self.style.cell_row_format, self.style.cell_header_format + ) + else: + self._render_row( + row, + self.style.cell_header_format + if is_header + else self.style.cell_row_format, + ) + + self._render_row_separator( + self.SEPARATOR_BOTTOM, + self._footer_title, + self.style.footer_title_format, + ) + + self._cleanup() + self._rendered = True + + def _render_row_separator( + self, + type: int = SEPARATOR_MID, + title: str | None = None, + title_format: str | None = None, + ) -> None: + """ + Renders horizontal header separator. + + Example: + + +-----+-----------+-------+ + """ + count = self._number_of_columns + if not count: + return + + borders = self.style.border_chars + if not borders[0] and not borders[2] and not self.style.crossing_char: + return + + crossings = self.style.crossing_chars + if type == self.SEPARATOR_MID: + horizontal, left_char, mid_char, right_char = ( + borders[2], + crossings[8], + crossings[0], + crossings[4], + ) + elif type == self.SEPARATOR_TOP: + horizontal, left_char, mid_char, right_char = ( + borders[0], + crossings[1], + crossings[2], + crossings[3], + ) + elif type == self.SEPARATOR_TOP_BOTTOM: + horizontal, left_char, mid_char, right_char = ( + borders[0], + crossings[9], + crossings[10], + crossings[11], + ) + else: + horizontal, left_char, mid_char, right_char = ( + borders[0], + crossings[7], + crossings[6], + crossings[5], + ) + + markup = left_char + for column in range(count): + markup += horizontal * self._effective_column_widths[column] + markup += right_char if column == count - 1 else mid_char + + if title is not None: + assert title_format is not None + formatted_title = title_format.format(title) + title_length = len(self._io.remove_format(formatted_title)) + markup_length = len(markup) + limit = markup_length - 4 + + if title_length > limit: + title_length = limit + format_length = len(self._io.remove_format(title_format.format(""))) + formatted_title = title_format.format( + title[: limit - format_length - 3] + "..." + ) + + title_start = (markup_length - title_length) // 2 + markup = ( + markup[:title_start] + + formatted_title + + markup[title_start + title_length :] + ) + + self._io.write_line(self.style.border_format.format(markup)) + + def _render_column_separator(self, type: int = BORDER_OUTSIDE) -> str: + """ + Renders vertical column separator. + """ + borders = self.style.border_chars + + return self.style.border_format.format( + borders[1] if type == self.BORDER_OUTSIDE else borders[3] + ) + + def _render_row( + self, row: list[str], cell_format: str, first_cell_format: str | None = None + ) -> None: + """ + Renders table row. + + Example: + + | 9971-5-0210-0 | A Tale of Two Cities | Charles Dickens | + """ + row_content = self._render_column_separator(self.BORDER_OUTSIDE) + columns = self._get_row_columns(row) + last = len(columns) - 1 + for i, column in enumerate(columns): + row_content += self._render_cell( + row, + column, + first_cell_format if first_cell_format and i == 0 else cell_format, + ) + + row_content += self._render_column_separator( + self.BORDER_OUTSIDE if i == last else self.BORDER_INSIDE + ) + + self._io.write_line(row_content) + + def _render_cell(self, row: Row, column: int, cell_format: str) -> str: + """ + Renders a table cell with padding. + """ + try: + cell = row[column] + except IndexError: + cell = "" + + width = self._effective_column_widths[column] + if isinstance(cell, TableCell) and cell.colspan > 1: + # add the width of the following columns(numbers of colspan). + for next_column in range(column + 1, column + cell.colspan): + width += ( + self._get_column_separator_width() + + self._effective_column_widths[next_column] + ) + + style = self.column_style(column) + + if isinstance(cell, TableSeparator): + return style.border_format.format(style.border_chars[2] * width) + + width += len(cell) - len(self._io.remove_format(cell)) + content = style.cell_row_content_format.format(cell) + + pad = style.pad + if isinstance(cell, TableCell) and isinstance(cell.style, TableCellStyle): + is_not_styled_by_tag = not re.match( + ( + r"^<(\w+|((?:fg|bg|options)=[\w,]+;?)+)>" + r".+<\/(\w+|((?:fg|bg|options)=[\w,]+;?)+)?>$" + ), + str(cell), + ) + if is_not_styled_by_tag: + cell_format = ( + cell.style.cell_format + if cell.style.cell_format is not None + else f"<{cell.style.tag}>{{}}" + ) + + if "" in content: + content = content.replace("", "") + width -= 3 + + if "" in content: + content = content.replace("", "") + width -= len("") + + pad = cell.style.pad + + return cell_format.format(pad(content, width, style.padding_char)) + + def _calculate_number_of_columns(self, rows: Rows) -> None: + columns = [0] + for row in rows: + if isinstance(row, TableSeparator): + continue + + columns.append(self._get_number_of_columns(row)) + + self._number_of_columns = max(columns) + + def _build_table_rows(self, rows: Rows) -> Iterator[Row | TableSeparator]: + unmerged_rows: dict[int, dict[int, Row]] = {} + row_key = 0 + while row_key < len(rows): + rows = self._fill_next_rows(rows, row_key) + + # Remove any new line breaks and replace it with a new line + for column, cell in enumerate(rows[row_key]): + colspan = cell.colspan if isinstance(cell, TableCell) else 1 + + if column in self._column_max_widths and self._column_max_widths[ + column + ] < len(self._io.remove_format(cell)): + assert isinstance(self._io, Output) + cell = self._io.formatter.format_and_wrap( + cell, self._column_max_widths[column] * colspan + ) + + if "\n" not in cell: + continue + + escaped = "\n".join( + Formatter.escape_trailing_backslash(c) for c in cell.split("\n") + ) + cell = ( + TableCell(escaped, colspan=cell.colspan) + if isinstance(cell, TableCell) + else escaped + ) + lines = cell.replace("\n", "\n").split("\n") + + for line_key, line in enumerate(lines): + if colspan > 1: + line = TableCell(line, colspan=colspan) + + if line_key == 0: + row = rows[row_key] + assert not isinstance(row, TableSeparator) + row[column] = line + else: + if row_key not in unmerged_rows: + unmerged_rows[row_key] = {} + + if line_key not in unmerged_rows[row_key]: + unmerged_rows[row_key][line_key] = self._copy_row( + rows, row_key + ) + + unmerged_rows[row_key][line_key][column] = line + + row_key += 1 + + for row_key, row in enumerate(rows): + yield self._fill_cells(row) + + if row_key in unmerged_rows: + for unmerged_row in unmerged_rows[row_key].values(): + yield self._fill_cells(unmerged_row) + + def _calculate_row_count(self) -> int: + number_of_rows = len( + list( + self._build_table_rows( + [*cast("Rows", self._headers), TableSeparator(), *self._rows] + ) + ) + ) + + if self._headers: + number_of_rows += 1 + + if self._rows: + number_of_rows += 1 + + return number_of_rows + + def _fill_next_rows(self, rows: Rows, line: int) -> Rows: + """ + Fill rows that contains rowspan > 1. + """ + unmerged_rows: dict[int, dict[int, str | TableCell]] = {} + + for column, cell in enumerate(rows[line]): + if isinstance(cell, TableCell) and cell.rowspan > 1: + nb_lines = cell.rowspan - 1 + lines: Row = [cell] + if "\n" in cell: + lines = cell.replace("\n", "\n").split( + "\n" + ) + if len(lines) > nb_lines: + nb_lines = cell.count("\n") + + row = rows[line] + assert not isinstance(row, TableSeparator) + + row[column] = TableCell( + lines[0], colspan=cell.colspan, style=cell.style + ) + + # Create a two dimensional dict (rowspan x colspan) + placeholder: dict[int, dict[int, str | TableCell]] = { + k: {} for k in range(line + 1, line + 1 + nb_lines) + } + for k, v in unmerged_rows.items(): + if k in placeholder: + for l, m in unmerged_rows[k].items(): # noqa: E741 + placeholder[k][l] = m + else: + placeholder[k] = v + + unmerged_rows = placeholder + + for unmerged_row_key, _ in unmerged_rows.items(): + value = "" + if unmerged_row_key - line < len(lines): + value = lines[unmerged_row_key - line] + + unmerged_rows[unmerged_row_key][column] = TableCell( + value, colspan=cell.colspan, style=cell.style + ) + if nb_lines == unmerged_row_key - line: + break + + for unmerged_row_key, unmerged_row in unmerged_rows.items(): + # we need to know if unmerged_row will be merged or inserted into rows + assert self._number_of_columns is not None + this_row = None if unmerged_row_key >= len(rows) else rows[unmerged_row_key] + if ( + this_row is not None + and not isinstance(this_row, TableSeparator) + and ( + ( + self._get_number_of_columns(this_row) + + self._get_number_of_columns( + list(unmerged_rows[unmerged_row_key].values()) + ) + ) + <= self._number_of_columns + ) + ): + # insert cell into row at cell_key position + for cell_key, cell in unmerged_row.items(): + this_row.insert(cell_key, cell) + else: + row = self._copy_row(rows, unmerged_row_key - 1) + for column, cell in unmerged_row.items(): + if len(cell): + row[column] = unmerged_row[column] + + rows.insert(unmerged_row_key, row) + + return rows + + def _fill_cells(self, row: Row | TableSeparator) -> Row | TableSeparator: + """ + Fills cells for a row that contains colspan > 1. + """ + new_row = [] + + for cell in row: + new_row.append(cell) + + if isinstance(cell, TableCell) and cell.colspan > 1: + # insert empty value at column position + new_row.extend(repeat("", cell.colspan - 1)) + + return new_row or row + + def _copy_row(self, rows: Rows, line: int) -> Row: + """ + Copies a row. + """ + row = list(rows[line]) + + for cell_key, cell_value in enumerate(row): + row[cell_key] = "" + if isinstance(cell_value, TableCell): + row[cell_key] = TableCell("", colspan=cell_value.colspan) + + return row + + def _get_number_of_columns(self, row: Row) -> int: + """ + Gets number of columns by row. + """ + columns = len(row) + for column in row: + if isinstance(column, TableCell): + columns += column.colspan - 1 + + return columns + + def _get_row_columns(self, row: Row) -> list[int]: + """ + Gets list of columns for the given row. + """ + assert self._number_of_columns is not None + columns = list(range(self._number_of_columns)) + + for cell_key, cell in enumerate(row): + if isinstance(cell, TableCell) and cell.colspan > 1: + # exclude grouped columns. + columns = [ + column + for column in columns + if column not in range(cell_key + 1, cell_key + cell.colspan) + ] + + return columns + + def _calculate_column_widths(self, rows: Rows) -> None: + """ + Calculates column widths. + """ + assert self._number_of_columns is not None + for column in range(self._number_of_columns): + lengths = [0] + for row in rows: + if isinstance(row, TableSeparator): + continue + + row_ = row.copy() + for i, cell in enumerate(row_): + if isinstance(cell, TableCell): + text_content = self._io.remove_format(cell) + text_length = len(text_content) + if text_length: + length = math.ceil(text_length / cell.colspan) + content_columns = [ + text_content[i : i + length] + for i in range(0, text_length, length) + ] + + for position, content in enumerate(content_columns): + try: + row_[i + position] = content + except IndexError: + row_.append(content) + + lengths.append(self._get_cell_width(row_, column)) + + self._effective_column_widths[column] = ( + max(lengths) + len(self.style.cell_row_content_format) - 2 + ) + + def _get_column_separator_width(self) -> int: + return len(self.style.border_format.format(self.style.border_chars[3])) + + def _get_cell_width(self, row: Row, column: int) -> int: + """ + Gets cell width. + """ + cell_width = 0 + + with suppress(IndexError): + cell = row[column] + cell_width = len(self._io.remove_format(cell)) + + column_width = ( + self._column_widths[column] if column in self._column_widths else 0 + ) + cell_width = max(cell_width, column_width) + + if column in self._column_max_widths: + return min(self._column_max_widths[column], cell_width) + + return cell_width + + def _cleanup(self) -> None: + self._column_widths = {} + self._number_of_columns = None + + @classmethod + def _init_styles(cls) -> None: + if cls._styles is not None: + return + + borderless = ( + TableStyle() + .set_horizontal_border_chars("=") + .set_vertical_border_chars(" ") + .set_default_crossing_char(" ") + ) + + compact = ( + TableStyle() + .set_horizontal_border_chars("") + .set_vertical_border_chars(" ") + .set_default_crossing_char("") + .set_cell_row_content_format("{}") + ) + + box = ( + TableStyle() + .set_horizontal_border_chars("─") + .set_vertical_border_chars("│") + .set_crossing_chars("┼", "┌", "┬", "┐", "┤", "┘", "┴", "└", "├") + ) + + box_double = ( + TableStyle() + .set_horizontal_border_chars("═", "─") + .set_vertical_border_chars("║", "│") + .set_crossing_chars( + "┼", "╔", "╤", "╗", "╢", "╝", "╧", "╚", "╟", "╠", "╪", "╣" + ) + ) + + cls._styles = { + "default": TableStyle(), + "borderless": borderless, + "compact": compact, + "box": box, + "box-double": box_double, + } + + @classmethod + def _resolve_style(cls, name: str | TableStyle) -> TableStyle: + if isinstance(name, TableStyle): + return name + + assert cls._styles is not None + if name in cls._styles: + return deepcopy(cls._styles[name]) + + raise ValueError(f'Table style "{name}" is not defined.') diff --git a/conda_lock/_vendor/cleo/ui/table_cell.py b/conda_lock/_vendor/cleo/ui/table_cell.py new file mode 100644 index 00000000..5f69d451 --- /dev/null +++ b/conda_lock/_vendor/cleo/ui/table_cell.py @@ -0,0 +1,41 @@ +from __future__ import annotations + +from typing import TYPE_CHECKING + + +if TYPE_CHECKING: + from conda_lock._vendor.cleo.ui.table_cell_style import TableCellStyle + + +class TableCell(str): + def __new__( + cls, + value: str = "", + rowspan: int = 1, + colspan: int = 1, + style: TableCellStyle | None = None, + ) -> TableCell: + return super().__new__(cls, value) + + def __init__( + self, + value: str = "", + rowspan: int = 1, + colspan: int = 1, + style: TableCellStyle | None = None, + ) -> None: + self._rowspan = rowspan + self._colspan = colspan + self._style = style + + @property + def rowspan(self) -> int: + return self._rowspan + + @property + def colspan(self) -> int: + return self._colspan + + @property + def style(self) -> TableCellStyle | None: + return self._style diff --git a/conda_lock/_vendor/cleo/ui/table_cell_style.py b/conda_lock/_vendor/cleo/ui/table_cell_style.py new file mode 100644 index 00000000..e244fa78 --- /dev/null +++ b/conda_lock/_vendor/cleo/ui/table_cell_style.py @@ -0,0 +1,54 @@ +from __future__ import annotations + +from typing import TYPE_CHECKING + + +if TYPE_CHECKING: + import sys + + if sys.version_info >= (3, 8): + from typing import Literal + else: + from typing_extensions import Literal + + _Align = Literal["left", "right"] + + +class TableCellStyle: + def __init__( + self, + fg: str = "default", + bg: str = "default", + options: list[str] | None = None, + align: _Align = "left", + cell_format: str | None = None, + ) -> None: + self._fg = fg + self._bg = bg + self._options = options + self._align = "left" + self._cell_format = cell_format + + @property + def cell_format(self) -> str | None: + return self._cell_format + + @property + def tag(self) -> str: + tag = " str: + if self._align == "left": + return string.rjust(length, char) + + if self._align == "right": + return string.ljust(length, char) + + return string.center(length, char) diff --git a/conda_lock/_vendor/cleo/ui/table_separator.py b/conda_lock/_vendor/cleo/ui/table_separator.py new file mode 100644 index 00000000..ac4c4e5d --- /dev/null +++ b/conda_lock/_vendor/cleo/ui/table_separator.py @@ -0,0 +1,8 @@ +from __future__ import annotations + +from conda_lock._vendor.cleo.ui.table_cell import TableCell + + +class TableSeparator(TableCell): + def __init__(self) -> None: + super().__init__("") diff --git a/conda_lock/_vendor/cleo/ui/table_style.py b/conda_lock/_vendor/cleo/ui/table_style.py new file mode 100644 index 00000000..21c2663d --- /dev/null +++ b/conda_lock/_vendor/cleo/ui/table_style.py @@ -0,0 +1,276 @@ +from __future__ import annotations + + +class TableStyle: + """ + Defines styles for Table instances. + """ + + def __init__(self) -> None: + self._padding_char = " " + self._horizontal_outside_border_char = "-" + self._horizontal_inside_border_char = "-" + self._vertical_outside_border_char = "|" + self._vertical_inside_border_char = "|" + self._crossing_char = "+" + self._crossing_top_right_char = "+" + self._crossing_top_mid_char = "+" + self._crossing_top_left_char = "+" + self._crossing_mid_right_char = "+" + self._crossing_bottom_right_char = "+" + self._crossing_bottom_mid_char = "+" + self._crossing_bottom_left_char = "+" + self._crossing_mid_left_char = "+" + self._crossing_top_left_bottom_char = "+" + self._crossing_top_mid_bottom_char = "+" + self._crossing_top_right_bottom_char = "+" + self._header_title_format = " {} " + self._footer_title_format = " {} " + self._cell_header_format = "{}" + self._cell_row_format = "{}" + self._cell_row_content_format = " {} " + self._border_format = "{}" + self._pad_type = "right" + + @property + def padding_char(self) -> str: + return self._padding_char + + @property + def border_chars(self) -> list[str]: + return [ + self._horizontal_outside_border_char, + self._vertical_outside_border_char, + self._horizontal_inside_border_char, + self._vertical_inside_border_char, + ] + + @property + def crossing_char(self) -> str: + return self._crossing_char + + @property + def crossing_chars(self) -> list[str]: + return [ + self._crossing_char, + self._crossing_top_left_char, + self._crossing_top_mid_char, + self._crossing_top_right_char, + self._crossing_mid_right_char, + self._crossing_bottom_right_char, + self._crossing_bottom_mid_char, + self._crossing_bottom_left_char, + self._crossing_mid_left_char, + self._crossing_top_left_bottom_char, + self._crossing_top_mid_bottom_char, + self._crossing_top_right_bottom_char, + ] + + @property + def cell_header_format(self) -> str: + return self._cell_header_format + + @property + def cell_row_format(self) -> str: + return self._cell_row_format + + @property + def cell_row_content_format(self) -> str: + return self._cell_row_content_format + + @property + def border_format(self) -> str: + return self._border_format + + @property + def header_title_format(self) -> str: + return self._header_title_format + + @property + def footer_title_format(self) -> str: + return self._footer_title_format + + @property + def pad_type(self) -> str: + return self._pad_type + + def set_padding_char(self, padding_char: str) -> TableStyle: + """ + Sets padding character, used for cell padding. + """ + if not padding_char: + raise ValueError("The padding char must not be empty.") + + self._padding_char = padding_char + + return self + + def set_horizontal_border_chars( + self, outside: str, inside: str | None = None + ) -> TableStyle: + """ + Sets horizontal border characters. + + ╔═══════════════╤══════════════════════════╤══════════════════╗ + 1 ISBN 2 Title │ Author ║ + ╠═══════════════╪══════════════════════════╪══════════════════╣ + ║ 99921-58-10-7 │ Divine Comedy │ Dante Alighieri ║ + ║ 9971-5-0210-0 │ A Tale of Two Cities │ Charles Dickens ║ + ║ 960-425-059-0 │ The Lord of the Rings │ J. R. R. Tolkien ║ + ║ 80-902734-1-6 │ And Then There Were None │ Agatha Christie ║ + ╚═══════════════╧══════════════════════════╧══════════════════╝ + """ + self._horizontal_outside_border_char = outside + self._horizontal_inside_border_char = outside if inside is None else inside + + return self + + def set_vertical_border_chars( + self, outside: str, inside: str | None = None + ) -> TableStyle: + """ + Sets vertical border characters. + + ╔═══════════════╤══════════════════════════╤══════════════════╗ + ║ ISBN │ Title │ Author ║ + ╠═══════1═══════╪══════════════════════════╪══════════════════╣ + ║ 99921-58-10-7 │ Divine Comedy │ Dante Alighieri ║ + ║ 9971-5-0210-0 │ A Tale of Two Cities │ Charles Dickens ║ + ╟───────2───────┼──────────────────────────┼──────────────────╢ + ║ 960-425-059-0 │ The Lord of the Rings │ J. R. R. Tolkien ║ + ║ 80-902734-1-6 │ And Then There Were None │ Agatha Christie ║ + ╚═══════════════╧══════════════════════════╧══════════════════╝ + """ + self._vertical_outside_border_char = outside + self._vertical_inside_border_char = outside if inside is None else inside + + return self + + def set_crossing_chars( + self, + cross: str, + top_left: str, + top_mid: str, + top_right: str, + mid_right: str, + bottom_right: str, + bottom_mid: str, + bottom_left: str, + mid_left: str, + top_left_bottom: str | None = None, + top_mid_bottom: str | None = None, + top_right_bottom: str | None = None, + ) -> TableStyle: + """ + Sets crossing characters. + + Example: + + 1═══════════════2══════════════════════════2══════════════════3 + ║ ISBN │ Title │ Author ║ + 8'══════════════0'═════════════════════════0'═════════════════4' + ║ 99921-58-10-7 │ Divine Comedy │ Dante Alighieri ║ + ║ 9971-5-0210-0 │ A Tale of Two Cities │ Charles Dickens ║ + 8───────────────0──────────────────────────0──────────────────4 + ║ 960-425-059-0 │ The Lord of the Rings │ J. R. R. Tolkien ║ + ║ 80-902734-1-6 │ And Then There Were None │ Agatha Christie ║ + 7═══════════════6══════════════════════════6══════════════════5 + """ + self._crossing_char = cross + self._crossing_top_left_char = top_left + self._crossing_top_mid_char = top_mid + self._crossing_top_right_char = top_right + self._crossing_mid_right_char = mid_right + self._crossing_bottom_right_char = bottom_right + self._crossing_bottom_mid_char = bottom_mid + self._crossing_bottom_left_char = bottom_left + self._crossing_mid_left_char = mid_left + self._crossing_top_left_bottom_char = ( + mid_left if top_left_bottom is None else top_left_bottom + ) + self._crossing_top_mid_bottom_char = ( + cross if top_mid_bottom is None else top_mid_bottom + ) + self._crossing_top_right_bottom_char = ( + mid_right if top_right_bottom is None else top_right_bottom + ) + + return self + + def set_default_crossing_char(self, char: str) -> TableStyle: + """ + Sets default crossing character used for each cross. + """ + return self.set_crossing_chars( + char, char, char, char, char, char, char, char, char + ) + + def set_cell_header_format(self, cell_header_format: str) -> TableStyle: + """ + Sets the header cell format. + """ + self._cell_header_format = cell_header_format + + return self + + def set_cell_row_format(self, cell_row_format: str) -> TableStyle: + """ + Sets the row cell format. + """ + self._cell_row_format = cell_row_format + + return self + + def set_cell_row_content_format(self, cell_row_content_format: str) -> TableStyle: + """ + Sets the row cell content format. + """ + self._cell_row_content_format = cell_row_content_format + + return self + + def set_border_format(self, border_format: str) -> TableStyle: + """ + Sets the border format. + """ + self._border_format = border_format + + return self + + def set_header_title_format(self, header_title_format: str) -> TableStyle: + """ + Sets the header title format. + """ + self._header_title_format = header_title_format + + return self + + def set_footer_title_format(self, footer_title_format: str) -> TableStyle: + """ + Sets the footer title format. + """ + self._footer_title_format = footer_title_format + + return self + + def set_pad_type(self, pad_type: str) -> TableStyle: + """ + Sets the padding type. + """ + if pad_type not in {"left", "right", "center"}: + raise ValueError( + 'Invalid padding type. Expected one of "left", "right", "center").' + ) + + self._pad_type = pad_type + + return self + + def pad(self, string: str, length: int, char: str = " ") -> str: + if self._pad_type == "left": + return string.rjust(length, char) + + if self._pad_type == "right": + return string.ljust(length, char) + + return string.center(length, char) diff --git a/conda_lock/_vendor/cleo/ui/ui.py b/conda_lock/_vendor/cleo/ui/ui.py new file mode 100644 index 00000000..710ee735 --- /dev/null +++ b/conda_lock/_vendor/cleo/ui/ui.py @@ -0,0 +1,29 @@ +from __future__ import annotations + +from conda_lock._vendor.cleo.exceptions import CleoValueError +from conda_lock._vendor.cleo.ui.component import Component + + +class UI: + def __init__(self, components: list[Component] | None = None) -> None: + self._components: dict[str, Component] = {} + + for component in components or []: + self.register(component) + + def register(self, component: Component) -> None: + if not isinstance(component, Component): + raise CleoValueError( + "A UI component must inherit from the Component class." + ) + + if not component.name: + raise CleoValueError("A UI component cannot be anonymous.") + + self._components[component.name] = component + + def component(self, name: str) -> Component: + if name not in self._components: + raise CleoValueError(f'UI component "{name}" does not exist.') + + return self._components[name] diff --git a/conda_lock/_vendor/conda/LICENSE.txt b/conda_lock/_vendor/conda/LICENSE.txt index a51531d9..eeb91b20 100644 --- a/conda_lock/_vendor/conda/LICENSE.txt +++ b/conda_lock/_vendor/conda/LICENSE.txt @@ -1,7 +1,28 @@ -Copyright (c) 2012 Santiago Lezica +Copyright (c) 2013 Matthew Rocklin -Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: +All rights reserved. -The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + a. Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + b. Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + c. Neither the name of toolz nor the names of its contributors + may be used to endorse or promote products derived from this software + without specific prior written permission. + + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR +ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT +LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY +OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH +DAMAGE. diff --git a/conda_lock/_vendor/poetry.LICENSE b/conda_lock/_vendor/poetry.LICENSE deleted file mode 100644 index 44cf2b30..00000000 --- a/conda_lock/_vendor/poetry.LICENSE +++ /dev/null @@ -1,20 +0,0 @@ -Copyright (c) 2018 Sébastien Eustace - -Permission is hereby granted, free of charge, to any person obtaining -a copy of this software and associated documentation files (the -"Software"), to deal in the Software without restriction, including -without limitation the rights to use, copy, modify, merge, publish, -distribute, sublicense, and/or sell copies of the Software, and to -permit persons to whom the Software is furnished to do so, subject to -the following conditions: - -The above copyright notice and this permission notice shall be -included in all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, -EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF -MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND -NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE -LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION -WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/conda_lock/_vendor/poetry.pyi b/conda_lock/_vendor/poetry.pyi deleted file mode 100644 index b994d366..00000000 --- a/conda_lock/_vendor/poetry.pyi +++ /dev/null @@ -1 +0,0 @@ -from poetry import * \ No newline at end of file diff --git a/conda_lock/_vendor/poetry/LICENSE b/conda_lock/_vendor/poetry/LICENSE index 44cf2b30..81a8e1e4 100644 --- a/conda_lock/_vendor/poetry/LICENSE +++ b/conda_lock/_vendor/poetry/LICENSE @@ -1,4 +1,4 @@ -Copyright (c) 2018 Sébastien Eustace +Copyright (c) 2018-present Sébastien Eustace Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the diff --git a/conda_lock/_vendor/poetry/__init__.py b/conda_lock/_vendor/poetry/__init__.py deleted file mode 100644 index 26cfe405..00000000 --- a/conda_lock/_vendor/poetry/__init__.py +++ /dev/null @@ -1,4 +0,0 @@ -from pkgutil import extend_path - - -__path__ = extend_path(__path__, __name__) diff --git a/conda_lock/_vendor/poetry/__main__.py b/conda_lock/_vendor/poetry/__main__.py index b280ed84..dbbc659d 100644 --- a/conda_lock/_vendor/poetry/__main__.py +++ b/conda_lock/_vendor/poetry/__main__.py @@ -1,7 +1,9 @@ +from __future__ import annotations + import sys if __name__ == "__main__": - from .console import main + from conda_lock._vendor.poetry.console.application import main sys.exit(main()) diff --git a/conda_lock/_vendor/poetry/__version__.py b/conda_lock/_vendor/poetry/__version__.py index 316ae3d0..74e3e1a0 100644 --- a/conda_lock/_vendor/poetry/__version__.py +++ b/conda_lock/_vendor/poetry/__version__.py @@ -1 +1,16 @@ -__version__ = "1.1.15" +from __future__ import annotations + +from typing import TYPE_CHECKING + +from conda_lock._vendor.poetry.utils._compat import metadata + + +if TYPE_CHECKING: + from collections.abc import Callable + + +# The metadata.version that we import for Python 3.7 is untyped, work around +# that. +version: Callable[[str], str] = metadata.version + +__version__ = "1.8.3" diff --git a/conda_lock/_vendor/poetry/_vendor/.gitignore b/conda_lock/_vendor/poetry/_vendor/.gitignore deleted file mode 100644 index d6b7ef32..00000000 --- a/conda_lock/_vendor/poetry/_vendor/.gitignore +++ /dev/null @@ -1,2 +0,0 @@ -* -!.gitignore diff --git a/conda_lock/_vendor/poetry/config/config.py b/conda_lock/_vendor/poetry/config/config.py index 52f5b2d3..0b55fad2 100644 --- a/conda_lock/_vendor/poetry/config/config.py +++ b/conda_lock/_vendor/poetry/config/config.py @@ -1,94 +1,197 @@ -from __future__ import absolute_import +from __future__ import annotations +import dataclasses +import logging import os import re from copy import deepcopy +from pathlib import Path +from typing import TYPE_CHECKING from typing import Any -from typing import Callable -from typing import Dict -from typing import Optional +from typing import ClassVar -from conda_lock._vendor.poetry.locations import CACHE_DIR -from conda_lock._vendor.poetry.utils._compat import Path -from conda_lock._vendor.poetry.utils._compat import basestring +from packaging.utils import canonicalize_name -from .config_source import ConfigSource -from .dict_config_source import DictConfigSource +from conda_lock._vendor.poetry.config.dict_config_source import DictConfigSource +from conda_lock._vendor.poetry.config.file_config_source import FileConfigSource +from conda_lock._vendor.poetry.locations import CONFIG_DIR +from conda_lock._vendor.poetry.locations import DEFAULT_CACHE_DIR +from conda_lock._vendor.poetry.toml import TOMLFile -_NOT_SET = object() +if TYPE_CHECKING: + from collections.abc import Callable + from conda_lock._vendor.poetry.config.config_source import ConfigSource -def boolean_validator(val): + +def boolean_validator(val: str) -> bool: return val in {"true", "false", "1", "0"} -def boolean_normalizer(val): +def boolean_normalizer(val: str) -> bool: return val in ["true", "1"] -class Config(object): +def int_normalizer(val: str) -> int: + return int(val) + + +@dataclasses.dataclass +class PackageFilterPolicy: + policy: dataclasses.InitVar[str | list[str] | None] + packages: list[str] = dataclasses.field(init=False) + + def __post_init__(self, policy: str | list[str] | None) -> None: + if not policy: + policy = [] + elif isinstance(policy, str): + policy = self.normalize(policy) + self.packages = policy + + def allows(self, package_name: str) -> bool: + if ":all:" in self.packages: + return False + + return ( + not self.packages + or ":none:" in self.packages + or canonicalize_name(package_name) not in self.packages + ) + + @classmethod + def is_reserved(cls, name: str) -> bool: + return bool(re.match(r":(all|none):", name)) + + @classmethod + def normalize(cls, policy: str) -> list[str]: + if boolean_validator(policy): + if boolean_normalizer(policy): + return [":all:"] + else: + return [":none:"] + + return list( + { + name.strip() if cls.is_reserved(name) else canonicalize_name(name) + for name in policy.strip().split(",") + if name + } + ) + + @classmethod + def validator(cls, policy: str) -> bool: + if boolean_validator(policy): + return True + + names = policy.strip().split(",") + + for name in names: + if ( + not name + or (cls.is_reserved(name) and len(names) == 1) + or re.match(r"^[a-zA-Z\d_-]+$", name) + ): + continue + return False + + return True - default_config = { - "cache-dir": str(CACHE_DIR), + +logger = logging.getLogger(__name__) + +_default_config: Config | None = None + + +class Config: + default_config: ClassVar[dict[str, Any]] = { + "cache-dir": str(DEFAULT_CACHE_DIR), "virtualenvs": { "create": True, "in-project": None, "path": os.path.join("{cache-dir}", "virtualenvs"), + "options": { + "always-copy": False, + "system-site-packages": False, + # we default to False here in order to prevent development environment + # breakages for IDEs etc. as when working in these environments + # assumptions are often made about virtual environments having pip and + # setuptools. + "no-pip": False, + "no-setuptools": False, + }, + "prefer-active-python": False, + "prompt": "{project_name}-py{python_version}", + }, + "experimental": { + "system-git-client": False, + }, + "installer": { + "modern-installation": True, + "parallel": True, + "max-workers": None, + "no-binary": None, + }, + "solver": { + "lazy-wheel": True, + }, + "warnings": { + "export": True, + }, + "keyring": { + "enabled": True, }, - "experimental": {"new-installer": True}, - "installer": {"parallel": True}, } def __init__( - self, use_environment=True, base_dir=None - ): # type: (bool, Optional[Path]) -> None + self, use_environment: bool = True, base_dir: Path | None = None + ) -> None: self._config = deepcopy(self.default_config) self._use_environment = use_environment self._base_dir = base_dir - self._config_source = DictConfigSource() - self._auth_config_source = DictConfigSource() - - @property - def name(self): - return str(self._file.path) + self._config_source: ConfigSource = DictConfigSource() + self._auth_config_source: ConfigSource = DictConfigSource() @property - def config(self): + def config(self) -> dict[str, Any]: return self._config @property - def config_source(self): # type: () -> ConfigSource + def config_source(self) -> ConfigSource: return self._config_source @property - def auth_config_source(self): # type: () -> ConfigSource + def auth_config_source(self) -> ConfigSource: return self._auth_config_source - def set_config_source(self, config_source): # type: (ConfigSource) -> Config + def set_config_source(self, config_source: ConfigSource) -> Config: self._config_source = config_source return self - def set_auth_config_source(self, config_source): # type: (ConfigSource) -> Config + def set_auth_config_source(self, config_source: ConfigSource) -> Config: self._auth_config_source = config_source return self - def merge(self, config): # type: (Dict[str, Any]) -> None + def merge(self, config: dict[str, Any]) -> None: from conda_lock._vendor.poetry.utils.helpers import merge_dicts merge_dicts(self._config, config) - def all(self): # type: () -> Dict[str, Any] - def _all(config, parent_key=""): + def all(self) -> dict[str, Any]: + def _all(config: dict[str, Any], parent_key: str = "") -> dict[str, Any]: all_ = {} for key in config: value = self.get(parent_key + key) if isinstance(value, dict): - all_[key] = _all(config[key], parent_key=key + ".") + if parent_key != "": + current_parent = parent_key + key + "." + else: + current_parent = key + "." + all_[key] = _all(config[key], parent_key=current_parent) continue all_[key] = value @@ -97,10 +200,55 @@ def _all(config, parent_key=""): return _all(self.config) - def raw(self): # type: () -> Dict[str, Any] + def raw(self) -> dict[str, Any]: return self._config - def get(self, setting_name, default=None): # type: (str, Any) -> Any + @staticmethod + def _get_environment_repositories() -> dict[str, dict[str, str]]: + repositories = {} + pattern = re.compile(r"POETRY_REPOSITORIES_(?P[A-Z_]+)_URL") + + for env_key in os.environ: + match = pattern.match(env_key) + if match: + repositories[match.group("name").lower().replace("_", "-")] = { + "url": os.environ[env_key] + } + + return repositories + + @property + def repository_cache_directory(self) -> Path: + return Path(self.get("cache-dir")).expanduser() / "cache" / "repositories" + + @property + def artifacts_cache_directory(self) -> Path: + return Path(self.get("cache-dir")).expanduser() / "artifacts" + + @property + def virtualenvs_path(self) -> Path: + path = self.get("virtualenvs.path") + if path is None: + path = Path(self.get("cache-dir")) / "virtualenvs" + return Path(path).expanduser() + + @property + def installer_max_workers(self) -> int: + # This should be directly handled by ThreadPoolExecutor + # however, on some systems the number of CPUs cannot be determined + # (it raises a NotImplementedError), so, in this case, we assume + # that the system only has one CPU. + try: + default_max_workers = (os.cpu_count() or 1) + 4 + except NotImplementedError: + default_max_workers = 5 + + desired_max_workers = self.get("installer.max-workers") + if desired_max_workers is None: + return default_max_workers + return min(default_max_workers, int(desired_max_workers)) + + def get(self, setting_name: str, default: Any = None) -> Any: """ Retrieve a setting value. """ @@ -109,12 +257,16 @@ def get(self, setting_name, default=None): # type: (str, Any) -> Any # Looking in the environment if the setting # is set via a POETRY_* environment variable if self._use_environment: - env = "POETRY_{}".format( - "_".join(k.upper().replace("-", "_") for k in keys) - ) - value = os.getenv(env, _NOT_SET) - if value is not _NOT_SET: - return self.process(self._get_normalizer(setting_name)(value)) + if setting_name == "repositories": + # repositories setting is special for now + repositories = self._get_environment_repositories() + if repositories: + return repositories + + env = "POETRY_" + "_".join(k.upper().replace("-", "_") for k in keys) + env_value = os.getenv(env) + if env_value is not None: + return self.process(self._get_normalizer(setting_name)(env_value)) value = self._config for key in keys: @@ -123,34 +275,80 @@ def get(self, setting_name, default=None): # type: (str, Any) -> Any value = value[key] + if self._use_environment and isinstance(value, dict): + # this is a configuration table, it is likely that we missed env vars + # in order to capture them recurse, eg: virtualenvs.options + return {k: self.get(f"{setting_name}.{k}") for k in value} + return self.process(value) - def process(self, value): # type: (Any) -> Any - if not isinstance(value, basestring): + def process(self, value: Any) -> Any: + if not isinstance(value, str): return value - return re.sub(r"{(.+?)}", lambda m: self.get(m.group(1)), value) + def resolve_from_config(match: re.Match[str]) -> Any: + key = match.group(1) + config_value = self.get(key) + if config_value: + return config_value - def _get_validator(self, name): # type: (str) -> Callable - if name in { - "virtualenvs.create", - "virtualenvs.in-project", - "installer.parallel", - }: - return boolean_validator + # The key doesn't exist in the config but might be resolved later, + # so we keep it as a format variable. + return f"{{{key}}}" - if name == "virtualenvs.path": - return str + return re.sub(r"{(.+?)}", resolve_from_config, value) - def _get_normalizer(self, name): # type: (str) -> Callable + @staticmethod + def _get_normalizer(name: str) -> Callable[[str], Any]: if name in { "virtualenvs.create", "virtualenvs.in-project", + "virtualenvs.options.always-copy", + "virtualenvs.options.no-pip", + "virtualenvs.options.no-setuptools", + "virtualenvs.options.system-site-packages", + "virtualenvs.options.prefer-active-python", + "experimental.system-git-client", + "installer.modern-installation", "installer.parallel", + "solver.lazy-wheel", + "warnings.export", + "keyring.enabled", }: return boolean_normalizer if name == "virtualenvs.path": return lambda val: str(Path(val)) + if name == "installer.max-workers": + return int_normalizer + + if name == "installer.no-binary": + return PackageFilterPolicy.normalize + return lambda val: val + + @classmethod + def create(cls, reload: bool = False) -> Config: + global _default_config + + if _default_config is None or reload: + _default_config = cls() + + # Load global config + config_file = TOMLFile(CONFIG_DIR / "config.toml") + if config_file.exists(): + logger.debug("Loading configuration file %s", config_file.path) + _default_config.merge(config_file.read()) + + _default_config.set_config_source(FileConfigSource(config_file)) + + # Load global auth config + auth_config_file = TOMLFile(CONFIG_DIR / "auth.toml") + if auth_config_file.exists(): + logger.debug("Loading configuration file %s", auth_config_file.path) + _default_config.merge(auth_config_file.read()) + + _default_config.set_auth_config_source(FileConfigSource(auth_config_file)) + + return _default_config diff --git a/conda_lock/_vendor/poetry/config/config_source.py b/conda_lock/_vendor/poetry/config/config_source.py index 63a4ad6b..ed97fa91 100644 --- a/conda_lock/_vendor/poetry/config/config_source.py +++ b/conda_lock/_vendor/poetry/config/config_source.py @@ -1,9 +1,11 @@ +from __future__ import annotations + from typing import Any -class ConfigSource(object): - def add_property(self, key, value): # type: (str, Any) -> None +class ConfigSource: + def add_property(self, key: str, value: Any) -> None: raise NotImplementedError() - def remove_property(self, key): # type: (str) -> None + def remove_property(self, key: str) -> None: raise NotImplementedError() diff --git a/conda_lock/_vendor/poetry/config/dict_config_source.py b/conda_lock/_vendor/poetry/config/dict_config_source.py index aaa6ee3b..4ebc24d5 100644 --- a/conda_lock/_vendor/poetry/config/dict_config_source.py +++ b/conda_lock/_vendor/poetry/config/dict_config_source.py @@ -1,18 +1,19 @@ +from __future__ import annotations + from typing import Any -from typing import Dict -from .config_source import ConfigSource +from conda_lock._vendor.poetry.config.config_source import ConfigSource class DictConfigSource(ConfigSource): - def __init__(self): # type: () -> None - self._config = {} + def __init__(self) -> None: + self._config: dict[str, Any] = {} @property - def config(self): # type: () -> Dict[str, Any] + def config(self) -> dict[str, Any]: return self._config - def add_property(self, key, value): # type: (str, Any) -> None + def add_property(self, key: str, value: Any) -> None: keys = key.split(".") config = self._config @@ -26,7 +27,7 @@ def add_property(self, key, value): # type: (str, Any) -> None config = config[key] - def remove_property(self, key): # type: (str) -> None + def remove_property(self, key: str) -> None: keys = key.split(".") config = self._config diff --git a/conda_lock/_vendor/poetry/config/file_config_source.py b/conda_lock/_vendor/poetry/config/file_config_source.py index cec20aea..b7eef28d 100644 --- a/conda_lock/_vendor/poetry/config/file_config_source.py +++ b/conda_lock/_vendor/poetry/config/file_config_source.py @@ -1,3 +1,5 @@ +from __future__ import annotations + from contextlib import contextmanager from typing import TYPE_CHECKING from typing import Any @@ -5,28 +7,33 @@ from tomlkit import document from tomlkit import table -from .config_source import ConfigSource +from conda_lock._vendor.poetry.config.config_source import ConfigSource if TYPE_CHECKING: - from conda_lock._vendor.poetry.core.toml.file import TOMLFile # noqa + from collections.abc import Iterator + + from tomlkit.toml_document import TOMLDocument + + from conda_lock._vendor.poetry.toml.file import TOMLFile class FileConfigSource(ConfigSource): - def __init__(self, file, auth_config=False): # type: ("TOMLFile", bool) -> None + def __init__(self, file: TOMLFile, auth_config: bool = False) -> None: self._file = file self._auth_config = auth_config @property - def name(self): # type: () -> str + def name(self) -> str: return str(self._file.path) @property - def file(self): # type: () -> "TOMLFile" + def file(self) -> TOMLFile: return self._file - def add_property(self, key, value): # type: (str, Any) -> None - with self.secure() as config: + def add_property(self, key: str, value: Any) -> None: + with self.secure() as toml: + config: dict[str, Any] = toml keys = key.split(".") for i, key in enumerate(keys): @@ -39,8 +46,9 @@ def add_property(self, key, value): # type: (str, Any) -> None config = config[key] - def remove_property(self, key): # type: (str) -> None - with self.secure() as config: + def remove_property(self, key: str) -> None: + with self.secure() as toml: + config: dict[str, Any] = toml keys = key.split(".") current_config = config @@ -56,7 +64,7 @@ def remove_property(self, key): # type: (str) -> None current_config = current_config[key] @contextmanager - def secure(self): + def secure(self) -> Iterator[TOMLDocument]: if self.file.exists(): initial_config = self.file.read() config = self.file.read() @@ -74,7 +82,7 @@ def secure(self): mode = 0o600 if new_file: - self.file.touch(mode=mode) + self.file.path.touch(mode=mode) self.file.write(config) except Exception: diff --git a/conda_lock/_vendor/poetry/config/source.py b/conda_lock/_vendor/poetry/config/source.py new file mode 100644 index 00000000..733d0836 --- /dev/null +++ b/conda_lock/_vendor/poetry/config/source.py @@ -0,0 +1,43 @@ +from __future__ import annotations + +import dataclasses +import warnings + +from conda_lock._vendor.poetry.repositories.repository_pool import Priority + + +@dataclasses.dataclass(order=True, eq=True) +class Source: + name: str + url: str = "" + default: dataclasses.InitVar[bool] = False + secondary: dataclasses.InitVar[bool] = False + priority: Priority = ( + Priority.PRIMARY + ) # cheating in annotation: str will be converted to Priority in __post_init__ + + def __post_init__(self, default: bool, secondary: bool) -> None: + if isinstance(self.priority, str): + self.priority = Priority[self.priority.upper()] + if default or secondary: + warnings.warn( + "Parameters 'default' and 'secondary' to" + " 'Source' are deprecated. Please provide" + " 'priority' instead.", + DeprecationWarning, + stacklevel=2, + ) + if default: + self.priority = Priority.DEFAULT + elif secondary: + self.priority = Priority.SECONDARY + + def to_dict(self) -> dict[str, str | bool]: + return dataclasses.asdict( + self, + dict_factory=lambda x: { + k: v if not isinstance(v, Priority) else v.name.lower() + for (k, v) in x + if v + }, + ) diff --git a/conda_lock/_vendor/poetry/console/__init__.py b/conda_lock/_vendor/poetry/console/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/conda_lock/_vendor/poetry/console/application.py b/conda_lock/_vendor/poetry/console/application.py new file mode 100644 index 00000000..1552b468 --- /dev/null +++ b/conda_lock/_vendor/poetry/console/application.py @@ -0,0 +1,416 @@ +from __future__ import annotations + +import logging +import re + +from contextlib import suppress +from importlib import import_module +from typing import TYPE_CHECKING +from typing import cast + +from conda_lock._vendor.cleo.application import Application as BaseApplication +from conda_lock._vendor.cleo.events.console_command_event import ConsoleCommandEvent +from conda_lock._vendor.cleo.events.console_events import COMMAND +from conda_lock._vendor.cleo.events.event_dispatcher import EventDispatcher +from conda_lock._vendor.cleo.exceptions import CleoError +from conda_lock._vendor.cleo.formatters.style import Style +from conda_lock._vendor.cleo.io.null_io import NullIO + +from conda_lock._vendor.poetry.__version__ import __version__ +from conda_lock._vendor.poetry.console.command_loader import CommandLoader +from conda_lock._vendor.poetry.console.commands.command import Command + + +if TYPE_CHECKING: + from collections.abc import Callable + + from conda_lock._vendor.cleo.events.event import Event + from conda_lock._vendor.cleo.io.inputs.argv_input import ArgvInput + from conda_lock._vendor.cleo.io.inputs.definition import Definition + from conda_lock._vendor.cleo.io.inputs.input import Input + from conda_lock._vendor.cleo.io.io import IO + from conda_lock._vendor.cleo.io.outputs.output import Output + from crashtest.solution_providers.solution_provider_repository import ( + SolutionProviderRepository, + ) + + from conda_lock._vendor.poetry.console.commands.installer_command import InstallerCommand + from conda_lock._vendor.poetry.poetry import Poetry + + +def load_command(name: str) -> Callable[[], Command]: + def _load() -> Command: + words = name.split(" ") + module = import_module("poetry.console.commands." + ".".join(words)) + command_class = getattr(module, "".join(c.title() for c in words) + "Command") + command: Command = command_class() + return command + + return _load + + +COMMANDS = [ + "about", + "add", + "build", + "check", + "config", + "init", + "install", + "lock", + "new", + "publish", + "remove", + "run", + "search", + "shell", + "show", + "update", + "version", + # Cache commands + "cache clear", + "cache list", + # Debug commands + "debug info", + "debug resolve", + # Env commands + "env info", + "env list", + "env remove", + "env use", + # Self commands + "self add", + "self install", + "self lock", + "self remove", + "self update", + "self show", + "self show plugins", + # Source commands + "source add", + "source remove", + "source show", +] + + +class Application(BaseApplication): + def __init__(self) -> None: + super().__init__("poetry", __version__) + + self._poetry: Poetry | None = None + self._io: IO | None = None + self._disable_plugins = False + self._disable_cache = False + self._plugins_loaded = False + + dispatcher = EventDispatcher() + dispatcher.add_listener(COMMAND, self.register_command_loggers) + dispatcher.add_listener(COMMAND, self.configure_env) + dispatcher.add_listener(COMMAND, self.configure_installer_for_event) + self.set_event_dispatcher(dispatcher) + + command_loader = CommandLoader({name: load_command(name) for name in COMMANDS}) + self.set_command_loader(command_loader) + + @property + def poetry(self) -> Poetry: + from pathlib import Path + + from conda_lock._vendor.poetry.factory import Factory + + if self._poetry is not None: + return self._poetry + + project_path = Path.cwd() + + if self._io and self._io.input.option("directory"): + project_path = self._io.input.option("directory") + + self._poetry = Factory().create_poetry( + cwd=project_path, + io=self._io, + disable_plugins=self._disable_plugins, + disable_cache=self._disable_cache, + ) + + return self._poetry + + @property + def command_loader(self) -> CommandLoader: + command_loader = self._command_loader + assert isinstance(command_loader, CommandLoader) + return command_loader + + def reset_poetry(self) -> None: + self._poetry = None + + def create_io( + self, + input: Input | None = None, + output: Output | None = None, + error_output: Output | None = None, + ) -> IO: + io = super().create_io(input, output, error_output) + + # Set our own CLI styles + formatter = io.output.formatter + formatter.set_style("c1", Style("cyan")) + formatter.set_style("c2", Style("default", options=["bold"])) + formatter.set_style("info", Style("blue")) + formatter.set_style("comment", Style("green")) + formatter.set_style("warning", Style("yellow")) + formatter.set_style("debug", Style("default", options=["dark"])) + formatter.set_style("success", Style("green")) + + # Dark variants + formatter.set_style("c1_dark", Style("cyan", options=["dark"])) + formatter.set_style("c2_dark", Style("default", options=["bold", "dark"])) + formatter.set_style("success_dark", Style("green", options=["dark"])) + + io.output.set_formatter(formatter) + io.error_output.set_formatter(formatter) + + self._io = io + + return io + + def render_error(self, error: Exception, io: IO) -> None: + # We set the solution provider repository here to load providers + # only when an error occurs + self.set_solution_provider_repository(self._get_solution_provider_repository()) + + super().render_error(error, io) + + def _run(self, io: IO) -> int: + self._disable_plugins = io.input.parameter_option("--no-plugins") + self._disable_cache = io.input.has_parameter_option("--no-cache") + + self._load_plugins(io) + + exit_code: int = super()._run(io) + return exit_code + + def _configure_io(self, io: IO) -> None: + # We need to check if the command being run + # is the "run" command. + definition = self.definition + with suppress(CleoError): + io.input.bind(definition) + + name = io.input.first_argument + if name == "run": + from conda_lock._vendor.poetry.console.io.inputs.run_argv_input import RunArgvInput + + input = cast("ArgvInput", io.input) + run_input = RunArgvInput([self._name or "", *input._tokens]) + # For the run command reset the definition + # with only the set options (i.e. the options given before the command) + for option_name, value in input.options.items(): + if value: + option = definition.option(option_name) + run_input.add_parameter_option("--" + option.name) + if option.shortcut: + shortcuts = re.split(r"\|-?", option.shortcut.lstrip("-")) + shortcuts = [s for s in shortcuts if s] + for shortcut in shortcuts: + run_input.add_parameter_option("-" + shortcut.lstrip("-")) + + with suppress(CleoError): + run_input.bind(definition) + + for option_name, value in input.options.items(): + if value: + run_input.set_option(option_name, value) + + io.set_input(run_input) + + super()._configure_io(io) + + def register_command_loggers( + self, event: Event, event_name: str, _: EventDispatcher + ) -> None: + from conda_lock._vendor.poetry.console.logging.filters import POETRY_FILTER + from conda_lock._vendor.poetry.console.logging.io_formatter import IOFormatter + from conda_lock._vendor.poetry.console.logging.io_handler import IOHandler + + assert isinstance(event, ConsoleCommandEvent) + command = event.command + if not isinstance(command, Command): + return + + io = event.io + + loggers = [ + "poetry.packages.locker", + "poetry.packages.package", + "poetry.utils.password_manager", + ] + + loggers += command.loggers + + handler = IOHandler(io) + handler.setFormatter(IOFormatter()) + + level = logging.WARNING + + if io.is_debug(): + level = logging.DEBUG + elif io.is_very_verbose() or io.is_verbose(): + level = logging.INFO + + logging.basicConfig(level=level, handlers=[handler]) + + # only log third-party packages when very verbose + if not io.is_very_verbose(): + handler.addFilter(POETRY_FILTER) + + for name in loggers: + logger = logging.getLogger(name) + + _level = level + # The builders loggers are special and we can actually + # start at the INFO level. + if ( + logger.name.startswith("poetry.core.masonry.builders") + and _level > logging.INFO + ): + _level = logging.INFO + + logger.setLevel(_level) + + def configure_env(self, event: Event, event_name: str, _: EventDispatcher) -> None: + from conda_lock._vendor.poetry.console.commands.env_command import EnvCommand + from conda_lock._vendor.poetry.console.commands.self.self_command import SelfCommand + + assert isinstance(event, ConsoleCommandEvent) + command = event.command + if not isinstance(command, EnvCommand) or isinstance(command, SelfCommand): + return + + if command._env is not None: + return + + from conda_lock._vendor.poetry.utils.env import EnvManager + + io = event.io + poetry = command.poetry + + env_manager = EnvManager(poetry, io=io) + env = env_manager.create_venv() + + if env.is_venv() and io.is_verbose(): + io.write_line(f"Using virtualenv: {env.path}") + + command.set_env(env) + + @classmethod + def configure_installer_for_event( + cls, event: Event, event_name: str, _: EventDispatcher + ) -> None: + from conda_lock._vendor.poetry.console.commands.installer_command import InstallerCommand + + assert isinstance(event, ConsoleCommandEvent) + command = event.command + if not isinstance(command, InstallerCommand): + return + + # If the command already has an installer + # we skip this step + if command._installer is not None: + return + + cls.configure_installer_for_command(command, event.io) + + @staticmethod + def configure_installer_for_command(command: InstallerCommand, io: IO) -> None: + from conda_lock._vendor.poetry.installation.installer import Installer + + poetry = command.poetry + installer = Installer( + io, + command.env, + poetry.package, + poetry.locker, + poetry.pool, + poetry.config, + disable_cache=poetry.disable_cache, + ) + command.set_installer(installer) + + def _load_plugins(self, io: IO | None = None) -> None: + if self._plugins_loaded: + return + + if io is None: + io = NullIO() + + self._disable_plugins = io.input.has_parameter_option("--no-plugins") + + if not self._disable_plugins: + from conda_lock._vendor.poetry.plugins.application_plugin import ApplicationPlugin + from conda_lock._vendor.poetry.plugins.plugin_manager import PluginManager + + manager = PluginManager(ApplicationPlugin.group) + manager.load_plugins() + manager.activate(self) + + # We have to override the command from poetry-plugin-export + # with the wrapper. + if self.command_loader.has("export"): + del self.command_loader._factories["export"] + self.command_loader._factories["export"] = load_command("export") + + self._plugins_loaded = True + + @property + def _default_definition(self) -> Definition: + from conda_lock._vendor.cleo.io.inputs.option import Option + + definition = super()._default_definition + + definition.add_option( + Option("--no-plugins", flag=True, description="Disables plugins.") + ) + + definition.add_option( + Option( + "--no-cache", flag=True, description="Disables Poetry source caches." + ) + ) + + definition.add_option( + Option( + "--directory", + "-C", + flag=False, + description=( + "The working directory for the Poetry command (defaults to the" + " current working directory)." + ), + ) + ) + + return definition + + def _get_solution_provider_repository(self) -> SolutionProviderRepository: + from crashtest.solution_providers.solution_provider_repository import ( + SolutionProviderRepository, + ) + + from conda_lock._vendor.poetry.mixology.solutions.providers.python_requirement_solution_provider import ( + PythonRequirementSolutionProvider, + ) + + repository = SolutionProviderRepository() + repository.register_solution_providers([PythonRequirementSolutionProvider]) + + return repository + + +def main() -> int: + exit_code: int = Application().run() + return exit_code + + +if __name__ == "__main__": + main() diff --git a/conda_lock/_vendor/poetry/console/command_loader.py b/conda_lock/_vendor/poetry/console/command_loader.py new file mode 100644 index 00000000..cada7246 --- /dev/null +++ b/conda_lock/_vendor/poetry/console/command_loader.py @@ -0,0 +1,22 @@ +from __future__ import annotations + +from typing import TYPE_CHECKING + +from conda_lock._vendor.cleo.exceptions import CleoLogicError +from conda_lock._vendor.cleo.loaders.factory_command_loader import FactoryCommandLoader + + +if TYPE_CHECKING: + from collections.abc import Callable + + from conda_lock._vendor.cleo.commands.command import Command + + +class CommandLoader(FactoryCommandLoader): + def register_factory( + self, command_name: str, factory: Callable[[], Command] + ) -> None: + if command_name in self._factories: + raise CleoLogicError(f'The command "{command_name}" already exists.') + + self._factories[command_name] = factory diff --git a/conda_lock/_vendor/poetry/console/commands/__init__.py b/conda_lock/_vendor/poetry/console/commands/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/conda_lock/_vendor/poetry/console/commands/about.py b/conda_lock/_vendor/poetry/console/commands/about.py new file mode 100644 index 00000000..0464d2d0 --- /dev/null +++ b/conda_lock/_vendor/poetry/console/commands/about.py @@ -0,0 +1,37 @@ +from __future__ import annotations + +from typing import TYPE_CHECKING + +from conda_lock._vendor.poetry.console.commands.command import Command + + +if TYPE_CHECKING: + from collections.abc import Callable + + +class AboutCommand(Command): + name = "about" + + description = "Shows information about Poetry." + + def handle(self) -> int: + from conda_lock._vendor.poetry.utils._compat import metadata + + # The metadata.version that we import for Python 3.7 is untyped, work around + # that. + version: Callable[[str], str] = metadata.version + + self.line( + f"""\ +Poetry - Package Management for Python + +Version: {version('poetry')} +Poetry-Core Version: {version('poetry-core')} + +Poetry is a dependency manager tracking local dependencies of your projects\ + and libraries. +See https://github.com/python-poetry/poetry for more information.\ +""" + ) + + return 0 diff --git a/conda_lock/_vendor/poetry/console/commands/add.py b/conda_lock/_vendor/poetry/console/commands/add.py new file mode 100644 index 00000000..a4e66dee --- /dev/null +++ b/conda_lock/_vendor/poetry/console/commands/add.py @@ -0,0 +1,306 @@ +from __future__ import annotations + +import contextlib + +from typing import Any + +from conda_lock._vendor.cleo.helpers import argument +from conda_lock._vendor.cleo.helpers import option +from packaging.utils import canonicalize_name +from conda_lock._vendor.poetry.core.packages.dependency_group import MAIN_GROUP +from tomlkit.toml_document import TOMLDocument + +from conda_lock._vendor.poetry.console.commands.init import InitCommand +from conda_lock._vendor.poetry.console.commands.installer_command import InstallerCommand + + +class AddCommand(InstallerCommand, InitCommand): + name = "add" + description = "Adds a new dependency to pyproject.toml and installs it." + + arguments = [argument("name", "The packages to add.", multiple=True)] + options = [ + option( + "group", + "-G", + "The group to add the dependency to.", + flag=False, + default=MAIN_GROUP, + ), + option( + "dev", + "D", + "Add as a development dependency. (Deprecated) Use" + " --group=dev instead.", + ), + option("editable", "e", "Add vcs/path dependencies as editable."), + option( + "extras", + "E", + "Extras to activate for the dependency.", + flag=False, + multiple=True, + ), + option("optional", None, "Add as an optional dependency."), + option( + "python", + None, + "Python version for which the dependency must be installed.", + flag=False, + ), + option( + "platform", + None, + "Platforms for which the dependency must be installed.", + flag=False, + ), + option( + "source", + None, + "Name of the source to use to install the package.", + flag=False, + ), + option("allow-prereleases", None, "Accept prereleases."), + option( + "dry-run", + None, + "Output the operations but do not execute anything (implicitly enables" + " --verbose).", + ), + option("lock", None, "Do not perform operations (only update the lockfile)."), + ] + examples = """\ +If you do not specify a version constraint, poetry will choose a suitable one based on\ + the available package versions. + +You can specify a package in the following forms: + - A single name (requests) + - A name and a constraint (requests@^2.23.0) + - A git url (git+https://github.com/python-poetry/poetry.git) + - A git url with a revision\ + (git+https://github.com/python-poetry/poetry.git#develop) + - A subdirectory of a git repository\ + (git+https://github.com/python-poetry/poetry.git#subdirectory=tests/fixtures/sample_project) + - A git SSH url (git+ssh://github.com/python-poetry/poetry.git) + - A git SSH url with a revision\ + (git+ssh://github.com/python-poetry/poetry.git#develop) + - A file path (../my-package/my-package.whl) + - A directory (../my-package/) + - A url (https://example.com/packages/my-package-0.1.0.tar.gz) +""" + help = f"""\ +The add command adds required packages to your pyproject.toml and installs\ + them. + +{examples} +""" + + loggers = ["poetry.repositories.pypi_repository", "poetry.inspection.info"] + + def handle(self) -> int: + from conda_lock._vendor.poetry.core.constraints.version import parse_constraint + from tomlkit import inline_table + from tomlkit import parse as parse_toml + from tomlkit import table + + from conda_lock._vendor.poetry.factory import Factory + + packages = self.argument("name") + if self.option("dev"): + self.line_error( + "The --dev option is deprecated, " + "use the `--group dev` notation instead." + ) + group = "dev" + else: + group = self.option("group", self.default_group or MAIN_GROUP) + + if self.option("extras") and len(packages) > 1: + raise ValueError( + "You can only specify one package when using the --extras option" + ) + + # tomlkit types are awkward to work with, treat content as a mostly untyped + # dictionary. + content: dict[str, Any] = self.poetry.file.read() + poetry_content = content["tool"]["poetry"] + project_name = ( + canonicalize_name(name) if (name := poetry_content.get("name")) else None + ) + + if group == MAIN_GROUP: + if "dependencies" not in poetry_content: + poetry_content["dependencies"] = table() + + section = poetry_content["dependencies"] + else: + if "group" not in poetry_content: + poetry_content["group"] = table(is_super_table=True) + + groups = poetry_content["group"] + if group not in groups: + dependencies_toml: dict[str, Any] = parse_toml( + f"[tool.poetry.group.{group}.dependencies]\n\n" + ) + group_table = dependencies_toml["tool"]["poetry"]["group"][group] + poetry_content["group"][group] = group_table + + if "dependencies" not in poetry_content["group"][group]: + poetry_content["group"][group]["dependencies"] = table() + + section = poetry_content["group"][group]["dependencies"] + + existing_packages = self.get_existing_packages_from_input(packages, section) + + if existing_packages: + self.notify_about_existing_packages(existing_packages) + + packages = [name for name in packages if name not in existing_packages] + + if not packages: + self.line("Nothing to add.") + return 0 + + requirements = self._determine_requirements( + packages, + allow_prereleases=self.option("allow-prereleases"), + source=self.option("source"), + ) + + for _constraint in requirements: + version = _constraint.get("version") + if version is not None: + # Validate version constraint + assert isinstance(version, str) + parse_constraint(version) + + constraint: dict[str, Any] = inline_table() + for name, value in _constraint.items(): + if name == "name": + continue + + constraint[name] = value + + if self.option("optional"): + constraint["optional"] = True + + if self.option("allow-prereleases"): + constraint["allow-prereleases"] = True + + if self.option("extras"): + extras = [] + for extra in self.option("extras"): + extras += extra.split() + + constraint["extras"] = extras + + if self.option("editable"): + if "git" in _constraint or "path" in _constraint: + constraint["develop"] = True + else: + self.line_error( + "\n" + "Failed to add packages. " + "Only vcs/path dependencies support editable installs. " + f"{_constraint['name']} is neither." + ) + self.line_error("\nNo changes were applied.") + return 1 + + if self.option("python"): + constraint["python"] = self.option("python") + + if self.option("platform"): + constraint["platform"] = self.option("platform") + + if self.option("source"): + constraint["source"] = self.option("source") + + if len(constraint) == 1 and "version" in constraint: + constraint = constraint["version"] + + constraint_name = _constraint["name"] + assert isinstance(constraint_name, str) + + canonical_constraint_name = canonicalize_name(constraint_name) + + if canonical_constraint_name == project_name: + self.line_error( + f"Cannot add dependency on {constraint_name} to" + " project with the same name." + ) + self.line_error("\nNo changes were applied.") + return 1 + + for key in section: + if canonicalize_name(key) == canonical_constraint_name: + section[key] = constraint + break + else: + section[constraint_name] = constraint + + with contextlib.suppress(ValueError): + self.poetry.package.dependency_group(group).remove_dependency( + constraint_name + ) + + self.poetry.package.add_dependency( + Factory.create_dependency( + constraint_name, + constraint, + groups=[group], + root_dir=self.poetry.file.path.parent, + ) + ) + + # Refresh the locker + self.poetry.locker.set_local_config(poetry_content) + self.installer.set_locker(self.poetry.locker) + + # Cosmetic new line + self.line("") + + self.installer.set_package(self.poetry.package) + self.installer.dry_run(self.option("dry-run")) + self.installer.verbose(self.io.is_verbose()) + self.installer.update(True) + self.installer.execute_operations(not self.option("lock")) + + self.installer.whitelist([r["name"] for r in requirements]) + + status = self.installer.run() + + if status == 0 and not self.option("dry-run"): + assert isinstance(content, TOMLDocument) + self.poetry.file.write(content) + + return status + + def get_existing_packages_from_input( + self, packages: list[str], section: dict[str, Any] + ) -> list[str]: + existing_packages = [] + + for name in packages: + for key in section: + if canonicalize_name(key) == canonicalize_name(name): + existing_packages.append(name) + + return existing_packages + + @property + def _hint_update_packages(self) -> str: + return ( + "\nIf you want to update it to the latest compatible version, you can use" + " `poetry update package`.\nIf you prefer to upgrade it to the latest" + " available version, you can use `poetry add package@latest`.\n" + ) + + def notify_about_existing_packages(self, existing_packages: list[str]) -> None: + self.line( + "The following packages are already present in the pyproject.toml and will" + " be skipped:\n" + ) + for name in existing_packages: + self.line(f" - {name}") + self.line(self._hint_update_packages) diff --git a/conda_lock/_vendor/poetry/console/commands/build.py b/conda_lock/_vendor/poetry/console/commands/build.py new file mode 100644 index 00000000..548c6ac5 --- /dev/null +++ b/conda_lock/_vendor/poetry/console/commands/build.py @@ -0,0 +1,68 @@ +from __future__ import annotations + +from pathlib import Path + +from conda_lock._vendor.cleo.helpers import option + +from conda_lock._vendor.poetry.console.commands.env_command import EnvCommand +from conda_lock._vendor.poetry.utils.env import build_environment + + +class BuildCommand(EnvCommand): + name = "build" + description = "Builds a package, as a tarball and a wheel by default." + + options = [ + option("format", "f", "Limit the format to either sdist or wheel.", flag=False), + option( + "output", + "o", + "Set output directory for build artifacts. Default is `dist`.", + default="dist", + flag=False, + ), + ] + + loggers = [ + "poetry.core.masonry.builders.builder", + "poetry.core.masonry.builders.sdist", + "poetry.core.masonry.builders.wheel", + ] + + def _build( + self, + fmt: str, + executable: str | Path | None = None, + *, + target_dir: Path | None = None, + ) -> None: + from conda_lock._vendor.poetry.masonry.builders import BUILD_FORMATS + + if fmt in BUILD_FORMATS: + builders = [BUILD_FORMATS[fmt]] + elif fmt == "all": + builders = list(BUILD_FORMATS.values()) + else: + raise ValueError(f"Invalid format: {fmt}") + + for builder in builders: + builder(self.poetry, executable=executable).build(target_dir) + + def handle(self) -> int: + if not self.poetry.is_package_mode: + self.line_error("Building a package is not possible in non-package mode.") + return 1 + + with build_environment(poetry=self.poetry, env=self.env, io=self.io) as env: + fmt = self.option("format") or "all" + dist_dir = Path(self.option("output")) + package = self.poetry.package + self.line( + f"Building {package.pretty_name} ({package.version})" + ) + + if not dist_dir.is_absolute(): + dist_dir = self.poetry.pyproject_path.parent / dist_dir + self._build(fmt, executable=env.python, target_dir=dist_dir) + + return 0 diff --git a/conda_lock/_vendor/poetry/console/commands/cache/__init__.py b/conda_lock/_vendor/poetry/console/commands/cache/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/conda_lock/_vendor/poetry/console/commands/cache/clear.py b/conda_lock/_vendor/poetry/console/commands/cache/clear.py new file mode 100644 index 00000000..4e6b321c --- /dev/null +++ b/conda_lock/_vendor/poetry/console/commands/cache/clear.py @@ -0,0 +1,78 @@ +from __future__ import annotations + +import os + +from conda_lock._vendor.cleo.helpers import argument +from conda_lock._vendor.cleo.helpers import option +from packaging.utils import canonicalize_name + +from conda_lock._vendor.poetry.config.config import Config +from conda_lock._vendor.poetry.console.commands.command import Command +from conda_lock._vendor.poetry.utils.cache import FileCache + + +class CacheClearCommand(Command): + name = "cache clear" + description = "Clears a Poetry cache by name." + + arguments = [argument("cache", description="The name of the cache to clear.")] + options = [option("all", description="Clear all entries in the cache.")] + + def handle(self) -> int: + cache = self.argument("cache") + + parts = cache.split(":") + root = parts[0] + + config = Config.create() + cache_dir = config.repository_cache_directory / root + + try: + cache_dir.relative_to(config.repository_cache_directory) + except ValueError: + raise ValueError(f"{root} is not a valid repository cache") + + cache = FileCache(cache_dir) + + if len(parts) == 1: + if not self.option("all"): + raise RuntimeError( + f"Add the --all option if you want to clear all {parts[0]} caches" + ) + + if not cache_dir.exists(): + self.line(f"No cache entries for {parts[0]}") + return 0 + + # Calculate number of entries + entries_count = sum( + len(files) for _path, _dirs, files in os.walk(str(cache_dir)) + ) + + delete = self.confirm(f"Delete {entries_count} entries?", True) + if not delete: + return 0 + + cache.flush() + elif len(parts) == 2: + raise RuntimeError( + "Only specifying the package name is not yet supported. " + "Add a specific version to clear" + ) + elif len(parts) == 3: + package = canonicalize_name(parts[1]) + version = parts[2] + + if not cache.has(f"{package}:{version}"): + self.line(f"No cache entries for {package}:{version}") + return 0 + + delete = self.confirm(f"Delete cache entry {package}:{version}", True) + if not delete: + return 0 + + cache.forget(f"{package}:{version}") + else: + raise ValueError("Invalid cache key") + + return 0 diff --git a/conda_lock/_vendor/poetry/console/commands/cache/list.py b/conda_lock/_vendor/poetry/console/commands/cache/list.py new file mode 100644 index 00000000..8705131a --- /dev/null +++ b/conda_lock/_vendor/poetry/console/commands/cache/list.py @@ -0,0 +1,21 @@ +from __future__ import annotations + +from conda_lock._vendor.poetry.config.config import Config +from conda_lock._vendor.poetry.console.commands.command import Command + + +class CacheListCommand(Command): + name = "cache list" + description = "List Poetry's caches." + + def handle(self) -> int: + config = Config.create() + if config.repository_cache_directory.exists(): + caches = sorted(config.repository_cache_directory.iterdir()) + if caches: + for cache in caches: + self.line(f"{cache.name}") + return 0 + + self.line_error("No caches found") + return 0 diff --git a/conda_lock/_vendor/poetry/console/commands/check.py b/conda_lock/_vendor/poetry/console/commands/check.py new file mode 100644 index 00000000..3cdf28ec --- /dev/null +++ b/conda_lock/_vendor/poetry/console/commands/check.py @@ -0,0 +1,166 @@ +from __future__ import annotations + +from typing import TYPE_CHECKING +from typing import Any + +from conda_lock._vendor.cleo.helpers import option + +from conda_lock._vendor.poetry.console.commands.command import Command + + +if TYPE_CHECKING: + from pathlib import Path + + +class CheckCommand(Command): + name = "check" + description = ( + "Validates the content of the pyproject.toml file and its" + " consistency with the poetry.lock file." + ) + + options = [ + option( + "lock", + None, + "Checks that poetry.lock exists for the current" + " version of pyproject.toml.", + ), + ] + + def _validate_classifiers( + self, project_classifiers: set[str] + ) -> tuple[list[str], list[str]]: + """Identify unrecognized and deprecated trove classifiers. + + A fully-qualified classifier is a string delimited by `` :: `` separators. To + make the error message more readable we need to have visual clues to + materialize the start and end of a classifier string. That way the user can + easily copy and paste it from the messages while reducing mistakes because of + extra spaces. + + We use ``!r`` (``repr()``) for classifiers and list of classifiers for + consistency. That way all strings will be rendered with the same kind of quotes + (i.e. simple tick: ``'``). + """ + from trove_classifiers import classifiers + from trove_classifiers import deprecated_classifiers + + errors = [] + warnings = [] + + unrecognized = sorted( + project_classifiers - set(classifiers) - set(deprecated_classifiers) + ) + # Allow "Private ::" classifiers as recommended on PyPI and the packaging guide + # to allow users to avoid accidentally publishing private packages to PyPI. + # https://pypi.org/classifiers/ + unrecognized = [u for u in unrecognized if not u.startswith("Private ::")] + if unrecognized: + errors.append(f"Unrecognized classifiers: {unrecognized!r}.") + + deprecated = sorted( + project_classifiers.intersection(set(deprecated_classifiers)) + ) + if deprecated: + for old_classifier in deprecated: + new_classifiers = deprecated_classifiers[old_classifier] + if new_classifiers: + message = ( + f"Deprecated classifier {old_classifier!r}. " + f"Must be replaced by {new_classifiers!r}." + ) + else: + message = ( + f"Deprecated classifier {old_classifier!r}. Must be removed." + ) + warnings.append(message) + + return errors, warnings + + def _validate_readme(self, readme: str | list[str], poetry_file: Path) -> list[str]: + """Check existence of referenced readme files""" + + readmes = [readme] if isinstance(readme, str) else readme + + errors = [] + for name in readmes: + if not (poetry_file.parent / name).exists(): + errors.append(f"Declared README file does not exist: {name}") + return errors + + def _validate_dependencies_source(self, config: dict[str, Any]) -> list[str]: + """Check dependencies's source are valid""" + sources = {k["name"] for k in config.get("source", [])} + + dependency_declarations: list[ + dict[str, str | dict[str, str] | list[dict[str, str]]] + ] = [] + # scan dependencies and group dependencies settings in pyproject.toml + if "dependencies" in config: + dependency_declarations.append(config["dependencies"]) + + for group in config.get("group", {}).values(): + if "dependencies" in group: + dependency_declarations.append(group["dependencies"]) + + all_referenced_sources: set[str] = set() + + for dependency_declaration in dependency_declarations: + for declaration in dependency_declaration.values(): + if isinstance(declaration, list): + for item in declaration: + if "source" in item: + all_referenced_sources.add(item["source"]) + elif isinstance(declaration, dict) and "source" in declaration: + all_referenced_sources.add(declaration["source"]) + + return [ + f'Invalid source "{source}" referenced in dependencies.' + for source in sorted(all_referenced_sources - sources) + ] + + def handle(self) -> int: + from conda_lock._vendor.poetry.core.pyproject.toml import PyProjectTOML + + from conda_lock._vendor.poetry.factory import Factory + + # Load poetry config and display errors, if any + poetry_file = self.poetry.file.path + config = PyProjectTOML(poetry_file).poetry_config + check_result = Factory.validate(config, strict=True) + + # Validate trove classifiers + project_classifiers = set(config.get("classifiers", [])) + errors, warnings = self._validate_classifiers(project_classifiers) + check_result["errors"].extend(errors) + check_result["warnings"].extend(warnings) + + # Validate readme (files must exist) + if "readme" in config: + errors = self._validate_readme(config["readme"], poetry_file) + check_result["errors"].extend(errors) + + check_result["errors"] += self._validate_dependencies_source(config) + + # Verify that lock file is consistent + if self.option("lock") and not self.poetry.locker.is_locked(): + check_result["errors"] += ["poetry.lock was not found."] + if self.poetry.locker.is_locked() and not self.poetry.locker.is_fresh(): + check_result["errors"] += [ + "pyproject.toml changed significantly since poetry.lock was last generated. " + "Run `poetry lock [--no-update]` to fix the lock file." + ] + + if not check_result["errors"] and not check_result["warnings"]: + self.info("All set!") + + return 0 + + for error in check_result["errors"]: + self.line_error(f"Error: {error}") + + for error in check_result["warnings"]: + self.line_error(f"Warning: {error}") + + return 1 diff --git a/conda_lock/_vendor/poetry/console/commands/command.py b/conda_lock/_vendor/poetry/console/commands/command.py new file mode 100644 index 00000000..0f40e7be --- /dev/null +++ b/conda_lock/_vendor/poetry/console/commands/command.py @@ -0,0 +1,44 @@ +from __future__ import annotations + +from typing import TYPE_CHECKING +from typing import Any + +from conda_lock._vendor.cleo.commands.command import Command as BaseCommand +from conda_lock._vendor.cleo.exceptions import CleoValueError + + +if TYPE_CHECKING: + from conda_lock._vendor.poetry.console.application import Application + from conda_lock._vendor.poetry.poetry import Poetry + + +class Command(BaseCommand): + loggers: list[str] = [] + + _poetry: Poetry | None = None + + @property + def poetry(self) -> Poetry: + if self._poetry is None: + return self.get_application().poetry + + return self._poetry + + def set_poetry(self, poetry: Poetry) -> None: + self._poetry = poetry + + def get_application(self) -> Application: + from conda_lock._vendor.poetry.console.application import Application + + application = self.application + assert isinstance(application, Application) + return application + + def reset_poetry(self) -> None: + self.get_application().reset_poetry() + + def option(self, name: str, default: Any = None) -> Any: + try: + return super().option(name) + except CleoValueError: + return default diff --git a/conda_lock/_vendor/poetry/console/commands/config.py b/conda_lock/_vendor/poetry/console/commands/config.py new file mode 100644 index 00000000..4ffe871f --- /dev/null +++ b/conda_lock/_vendor/poetry/console/commands/config.py @@ -0,0 +1,323 @@ +from __future__ import annotations + +import json +import re + +from pathlib import Path +from typing import TYPE_CHECKING +from typing import Any +from typing import cast + +from conda_lock._vendor.cleo.helpers import argument +from conda_lock._vendor.cleo.helpers import option + +from conda_lock._vendor.poetry.config.config import PackageFilterPolicy +from conda_lock._vendor.poetry.config.config import boolean_normalizer +from conda_lock._vendor.poetry.config.config import boolean_validator +from conda_lock._vendor.poetry.config.config import int_normalizer +from conda_lock._vendor.poetry.console.commands.command import Command + + +if TYPE_CHECKING: + from conda_lock._vendor.poetry.config.config_source import ConfigSource + + +class ConfigCommand(Command): + name = "config" + description = "Manages configuration settings." + + arguments = [ + argument("key", "Setting key.", optional=True), + argument("value", "Setting value.", optional=True, multiple=True), + ] + + options = [ + option("list", None, "List configuration settings."), + option("unset", None, "Unset configuration setting."), + option("local", None, "Set/Get from the project's local configuration."), + ] + + help = """\ +This command allows you to edit the poetry config settings and repositories. + +To add a repository: + + poetry config repositories.foo https://bar.com/simple/ + +To remove a repository (repo is a short alias for repositories): + + poetry config --unset repo.foo""" + + LIST_PROHIBITED_SETTINGS = {"http-basic", "pypi-token"} + + @property + def unique_config_values(self) -> dict[str, tuple[Any, Any]]: + unique_config_values = { + "cache-dir": (str, lambda val: str(Path(val))), + "virtualenvs.create": (boolean_validator, boolean_normalizer), + "virtualenvs.in-project": (boolean_validator, boolean_normalizer), + "virtualenvs.options.always-copy": (boolean_validator, boolean_normalizer), + "virtualenvs.options.system-site-packages": ( + boolean_validator, + boolean_normalizer, + ), + "virtualenvs.options.no-pip": (boolean_validator, boolean_normalizer), + "virtualenvs.options.no-setuptools": ( + boolean_validator, + boolean_normalizer, + ), + "virtualenvs.path": (str, lambda val: str(Path(val))), + "virtualenvs.prefer-active-python": (boolean_validator, boolean_normalizer), + "virtualenvs.prompt": (str, str), + "experimental.system-git-client": (boolean_validator, boolean_normalizer), + "installer.modern-installation": (boolean_validator, boolean_normalizer), + "installer.parallel": (boolean_validator, boolean_normalizer), + "installer.max-workers": (lambda val: int(val) > 0, int_normalizer), + "installer.no-binary": ( + PackageFilterPolicy.validator, + PackageFilterPolicy.normalize, + ), + "solver.lazy-wheel": (boolean_validator, boolean_normalizer), + "warnings.export": (boolean_validator, boolean_normalizer), + "keyring.enabled": (boolean_validator, boolean_normalizer), + } + + return unique_config_values + + def handle(self) -> int: + from pathlib import Path + + from conda_lock._vendor.poetry.core.pyproject.exceptions import PyProjectException + + from conda_lock._vendor.poetry.config.config import Config + from conda_lock._vendor.poetry.config.file_config_source import FileConfigSource + from conda_lock._vendor.poetry.locations import CONFIG_DIR + from conda_lock._vendor.poetry.toml.file import TOMLFile + + config = Config.create() + config_file = TOMLFile(CONFIG_DIR / "config.toml") + + try: + local_config_file = TOMLFile(self.poetry.file.path.parent / "poetry.toml") + if local_config_file.exists(): + config.merge(local_config_file.read()) + except (RuntimeError, PyProjectException): + local_config_file = TOMLFile(Path.cwd() / "poetry.toml") + + if self.option("local"): + config.set_config_source(FileConfigSource(local_config_file)) + + if not config_file.exists(): + config_file.path.parent.mkdir(parents=True, exist_ok=True) + config_file.path.touch(mode=0o0600) + + if self.option("list"): + self._list_configuration(config.all(), config.raw()) + + return 0 + + setting_key = self.argument("key") + if not setting_key: + return 0 + + if self.argument("value") and self.option("unset"): + raise RuntimeError("You can not combine a setting value with --unset") + + # show the value if no value is provided + if not self.argument("value") and not self.option("unset"): + if setting_key.split(".")[0] in self.LIST_PROHIBITED_SETTINGS: + raise ValueError(f"Expected a value for {setting_key} setting.") + + m = re.match(r"^repos?(?:itories)?(?:\.(.+))?", self.argument("key")) + value: str | dict[str, Any] + if m: + if not m.group(1): + value = {} + if config.get("repositories") is not None: + value = config.get("repositories") + else: + repo = config.get(f"repositories.{m.group(1)}") + if repo is None: + raise ValueError(f"There is no {m.group(1)} repository defined") + + value = repo + + self.line(str(value)) + else: + if setting_key not in self.unique_config_values: + raise ValueError(f"There is no {setting_key} setting.") + + value = config.get(setting_key) + + if not isinstance(value, str): + value = json.dumps(value) + + self.line(value) + + return 0 + + values: list[str] = self.argument("value") + + if setting_key in self.unique_config_values: + if self.option("unset"): + config.config_source.remove_property(setting_key) + return 0 + + return self._handle_single_value( + config.config_source, + setting_key, + self.unique_config_values[setting_key], + values, + ) + + # handle repositories + m = re.match(r"^repos?(?:itories)?(?:\.(.+))?", self.argument("key")) + if m: + if not m.group(1): + raise ValueError("You cannot remove the [repositories] section") + + if self.option("unset"): + repo = config.get(f"repositories.{m.group(1)}") + if repo is None: + raise ValueError(f"There is no {m.group(1)} repository defined") + + config.config_source.remove_property(f"repositories.{m.group(1)}") + + return 0 + + if len(values) == 1: + url = values[0] + + config.config_source.add_property(f"repositories.{m.group(1)}.url", url) + + return 0 + + raise ValueError( + "You must pass the url. " + "Example: poetry config repositories.foo https://bar.com" + ) + + # handle auth + m = re.match(r"^(http-basic|pypi-token)\.(.+)", self.argument("key")) + if m: + from conda_lock._vendor.poetry.utils.password_manager import PasswordManager + + password_manager = PasswordManager(config) + if self.option("unset"): + if m.group(1) == "http-basic": + password_manager.delete_http_password(m.group(2)) + elif m.group(1) == "pypi-token": + password_manager.delete_pypi_token(m.group(2)) + + return 0 + + if m.group(1) == "http-basic": + if len(values) == 1: + username = values[0] + # Only username, so we prompt for password + password = self.secret("Password:") + assert isinstance(password, str) + elif len(values) != 2: + raise ValueError( + "Expected one or two arguments " + f"(username, password), got {len(values)}" + ) + else: + username = values[0] + password = values[1] + + password_manager.set_http_password(m.group(2), username, password) + elif m.group(1) == "pypi-token": + if len(values) != 1: + raise ValueError( + f"Expected only one argument (token), got {len(values)}" + ) + + token = values[0] + + password_manager.set_pypi_token(m.group(2), token) + + return 0 + + # handle certs + m = re.match(r"certificates\.([^.]+)\.(cert|client-cert)", self.argument("key")) + if m: + repository = m.group(1) + key = m.group(2) + + if self.option("unset"): + config.auth_config_source.remove_property( + f"certificates.{repository}.{key}" + ) + + return 0 + + if len(values) == 1: + new_value: str | bool = values[0] + + if key == "cert" and boolean_validator(values[0]): + new_value = boolean_normalizer(values[0]) + + config.auth_config_source.add_property( + f"certificates.{repository}.{key}", new_value + ) + else: + raise ValueError("You must pass exactly 1 value") + + return 0 + + raise ValueError(f"Setting {self.argument('key')} does not exist") + + def _handle_single_value( + self, + source: ConfigSource, + key: str, + callbacks: tuple[Any, Any], + values: list[Any], + ) -> int: + validator, normalizer = callbacks + + if len(values) > 1: + raise RuntimeError("You can only pass one value.") + + value = values[0] + if not validator(value): + raise RuntimeError(f'"{value}" is an invalid value for {key}') + + source.add_property(key, normalizer(value)) + + return 0 + + def _list_configuration( + self, config: dict[str, Any], raw: dict[str, Any], k: str = "" + ) -> None: + orig_k = k + for key, value in sorted(config.items()): + if k + key in self.LIST_PROHIBITED_SETTINGS: + continue + + raw_val = raw.get(key) + + if isinstance(value, dict): + k += f"{key}." + raw_val = cast("dict[str, Any]", raw_val) + self._list_configuration(value, raw_val, k=k) + k = orig_k + + continue + elif isinstance(value, list): + value = ", ".join( + json.dumps(val) if isinstance(val, list) else val for val in value + ) + value = f"[{value}]" + + if k.startswith("repositories."): + message = f"{k + key} = {json.dumps(raw_val)}" + elif isinstance(raw_val, str) and raw_val != value: + message = ( + f"{k + key} = {json.dumps(raw_val)} # {value}" + ) + else: + message = f"{k + key} = {json.dumps(value)}" + + self.line(message) diff --git a/conda_lock/_vendor/poetry/console/commands/debug/__init__.py b/conda_lock/_vendor/poetry/console/commands/debug/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/conda_lock/_vendor/poetry/console/commands/debug/info.py b/conda_lock/_vendor/poetry/console/commands/debug/info.py new file mode 100644 index 00000000..a40d67d1 --- /dev/null +++ b/conda_lock/_vendor/poetry/console/commands/debug/info.py @@ -0,0 +1,28 @@ +from __future__ import annotations + +import sys + +from conda_lock._vendor.poetry.console.commands.command import Command + + +class DebugInfoCommand(Command): + name = "debug info" + description = "Shows debug information." + + def handle(self) -> int: + poetry_python_version = ".".join(str(s) for s in sys.version_info[:3]) + + self.line("") + self.line("Poetry") + self.line( + "\n".join( + [ + f"Version: {self.poetry.VERSION}", + f"Python: {poetry_python_version}", + ] + ) + ) + command = self.get_application().get("env info") + + exit_code: int = command.run(self.io) + return exit_code diff --git a/conda_lock/_vendor/poetry/console/commands/debug/resolve.py b/conda_lock/_vendor/poetry/console/commands/debug/resolve.py new file mode 100644 index 00000000..d62557ac --- /dev/null +++ b/conda_lock/_vendor/poetry/console/commands/debug/resolve.py @@ -0,0 +1,145 @@ +from __future__ import annotations + +from typing import TYPE_CHECKING + +from conda_lock._vendor.cleo.helpers import argument +from conda_lock._vendor.cleo.helpers import option +from conda_lock._vendor.cleo.io.outputs.output import Verbosity + +from conda_lock._vendor.poetry.console.commands.init import InitCommand +from conda_lock._vendor.poetry.console.commands.show import ShowCommand + + +if TYPE_CHECKING: + from conda_lock._vendor.cleo.ui.table import Rows + + +class DebugResolveCommand(InitCommand): + name = "debug resolve" + description = "Debugs dependency resolution." + + arguments = [ + argument("package", "The packages to resolve.", optional=True, multiple=True) + ] + options = [ + option( + "extras", + "E", + "Extras to activate for the dependency.", + flag=False, + multiple=True, + ), + option("python", None, "Python version(s) to use for resolution.", flag=False), + option("tree", None, "Display the dependency tree."), + option("install", None, "Show what would be installed for the current system."), + ] + + loggers = ["poetry.repositories.pypi_repository", "poetry.inspection.info"] + + def handle(self) -> int: + from conda_lock._vendor.cleo.io.null_io import NullIO + from conda_lock._vendor.poetry.core.packages.project_package import ProjectPackage + + from conda_lock._vendor.poetry.factory import Factory + from conda_lock._vendor.poetry.puzzle.solver import Solver + from conda_lock._vendor.poetry.repositories.repository import Repository + from conda_lock._vendor.poetry.repositories.repository_pool import RepositoryPool + from conda_lock._vendor.poetry.utils.env import EnvManager + + packages = self.argument("package") + + if not packages: + package = self.poetry.package + else: + # Using current pool for determine_requirements() + self._pool = self.poetry.pool + + package = ProjectPackage( + self.poetry.package.name, self.poetry.package.version + ) + + # Silencing output + verbosity = self.io.output.verbosity + self.io.output.set_verbosity(Verbosity.QUIET) + + requirements = self._determine_requirements(packages) + + self.io.output.set_verbosity(verbosity) + + for constraint in requirements: + name = constraint.pop("name") + assert isinstance(name, str) + extras = [] + for extra in self.option("extras"): + extras += extra.split() + + constraint["extras"] = extras + + package.add_dependency(Factory.create_dependency(name, constraint)) + + package.python_versions = self.option("python") or ( + self.poetry.package.python_versions + ) + + pool = self.poetry.pool + + solver = Solver(package, pool, [], [], self.io) + + ops = solver.solve().calculate_operations() + + self.line("") + self.line("Resolution results:") + self.line("") + + if self.option("tree"): + show_command = self.get_application().find("show") + assert isinstance(show_command, ShowCommand) + show_command.init_styles(self.io) + + packages = [op.package for op in ops] + + requires = package.all_requires + for pkg in packages: + for require in requires: + if pkg.name == require.name: + show_command.display_package_tree(self.io, pkg, packages) + break + + return 0 + + table = self.table(style="compact") + table.style.set_vertical_border_chars("", " ") + rows: Rows = [] + + if self.option("install"): + env = EnvManager(self.poetry).get() + pool = RepositoryPool(config=self.poetry.config) + locked_repository = Repository("poetry-locked") + for op in ops: + locked_repository.add_package(op.package) + + pool.add_repository(locked_repository) + + solver = Solver(package, pool, [], [], NullIO()) + with solver.use_environment(env): + ops = solver.solve().calculate_operations() + + for op in ops: + if self.option("install") and op.skipped: + continue + + pkg = op.package + row = [ + f"{pkg.complete_name}", + f"{pkg.version}", + ] + + if not pkg.marker.is_any(): + row[2] = str(pkg.marker) + + rows.append(row) + + table.set_rows(rows) + table.render() + + return 0 diff --git a/conda_lock/_vendor/poetry/console/commands/env/__init__.py b/conda_lock/_vendor/poetry/console/commands/env/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/conda_lock/_vendor/poetry/console/commands/env/info.py b/conda_lock/_vendor/poetry/console/commands/env/info.py new file mode 100644 index 00000000..20295f2f --- /dev/null +++ b/conda_lock/_vendor/poetry/console/commands/env/info.py @@ -0,0 +1,87 @@ +from __future__ import annotations + +from typing import TYPE_CHECKING + +from conda_lock._vendor.cleo.helpers import option + +from conda_lock._vendor.poetry.console.commands.command import Command + + +if TYPE_CHECKING: + from conda_lock._vendor.poetry.utils.env import Env + + +class EnvInfoCommand(Command): + name = "env info" + description = "Displays information about the current environment." + + options = [ + option("path", "p", "Only display the environment's path."), + option( + "executable", "e", "Only display the environment's python executable path." + ), + ] + + def handle(self) -> int: + from conda_lock._vendor.poetry.utils.env import EnvManager + + env = EnvManager(self.poetry).get() + + if self.option("path"): + if not env.is_venv(): + return 1 + + self.line(str(env.path)) + + return 0 + + if self.option("executable"): + if not env.is_venv(): + return 1 + + self.line(str(env.python)) + + return 0 + + self._display_complete_info(env) + return 0 + + def _display_complete_info(self, env: Env) -> None: + env_python_version = ".".join(str(s) for s in env.version_info[:3]) + self.line("") + self.line("Virtualenv") + listing = [ + f"Python: {env_python_version}", + f"Implementation: {env.python_implementation}", + ( + "Path: " + f" {env.path if env.is_venv() else 'NA'}" + ), + ( + "Executable: " + f" {env.python if env.is_venv() else 'NA'}" + ), + ] + if env.is_venv(): + listing.append( + "Valid: " + f" <{'comment' if env.is_sane() else 'error'}>{env.is_sane()}" + ) + self.line("\n".join(listing)) + + self.line("") + + base_env = env.parent_env + python = ".".join(str(v) for v in base_env.version_info[:3]) + self.line("Base") + self.line( + "\n".join( + [ + f"Platform: {env.platform}", + f"OS: {env.os}", + f"Python: {python}", + f"Path: {base_env.path}", + f"Executable: {base_env.python}", + ] + ) + ) diff --git a/conda_lock/_vendor/poetry/console/commands/env/list.py b/conda_lock/_vendor/poetry/console/commands/env/list.py new file mode 100644 index 00000000..26479dd2 --- /dev/null +++ b/conda_lock/_vendor/poetry/console/commands/env/list.py @@ -0,0 +1,32 @@ +from __future__ import annotations + +from conda_lock._vendor.cleo.helpers import option + +from conda_lock._vendor.poetry.console.commands.command import Command + + +class EnvListCommand(Command): + name = "env list" + description = "Lists all virtualenvs associated with the current project." + + options = [option("full-path", None, "Output the full paths of the virtualenvs.")] + + def handle(self) -> int: + from conda_lock._vendor.poetry.utils.env import EnvManager + + manager = EnvManager(self.poetry) + current_env = manager.get() + + for venv in manager.list(): + name = venv.path.name + if self.option("full-path"): + name = str(venv.path) + + if venv == current_env: + self.line(f"{name} (Activated)") + + continue + + self.line(name) + + return 0 diff --git a/conda_lock/_vendor/poetry/console/commands/env/remove.py b/conda_lock/_vendor/poetry/console/commands/env/remove.py new file mode 100644 index 00000000..93fc396c --- /dev/null +++ b/conda_lock/_vendor/poetry/console/commands/env/remove.py @@ -0,0 +1,54 @@ +from __future__ import annotations + +from conda_lock._vendor.cleo.helpers import argument +from conda_lock._vendor.cleo.helpers import option + +from conda_lock._vendor.poetry.console.commands.command import Command + + +class EnvRemoveCommand(Command): + name = "env remove" + description = "Remove virtual environments associated with the project." + + arguments = [ + argument( + "python", + "The python executables associated with, or names of the virtual" + " environments which are to be removed.", + optional=True, + multiple=True, + ) + ] + options = [ + option( + "all", + description=( + "Remove all managed virtual environments associated with the project." + ), + ), + ] + + def handle(self) -> int: + from conda_lock._vendor.poetry.utils.env import EnvManager + + pythons = self.argument("python") + all = self.option("all") + if not (pythons or all): + self.line("No virtualenv provided.") + + manager = EnvManager(self.poetry) + # TODO: refactor env.py to allow removal with one loop + for python in pythons: + venv = manager.remove(python) + self.line(f"Deleted virtualenv: {venv.path}") + if all: + for venv in manager.list(): + manager.remove_venv(venv.path) + self.line(f"Deleted virtualenv: {venv.path}") + # Since we remove all the virtualenvs, we can also remove the entry + # in the envs file. (Strictly speaking, we should do this explicitly, + # in case it points to a virtualenv that had been removed manually before.) + if manager.envs_file.exists(): + manager.envs_file.remove_section(manager.base_env_name) + + return 0 diff --git a/conda_lock/_vendor/poetry/console/commands/env/use.py b/conda_lock/_vendor/poetry/console/commands/env/use.py new file mode 100644 index 00000000..3817b093 --- /dev/null +++ b/conda_lock/_vendor/poetry/console/commands/env/use.py @@ -0,0 +1,28 @@ +from __future__ import annotations + +from conda_lock._vendor.cleo.helpers import argument + +from conda_lock._vendor.poetry.console.commands.command import Command + + +class EnvUseCommand(Command): + name = "env use" + description = "Activates or creates a new virtualenv for the current project." + + arguments = [argument("python", "The python executable to use.")] + + def handle(self) -> int: + from conda_lock._vendor.poetry.utils.env import EnvManager + + manager = EnvManager(self.poetry, io=self.io) + + if self.argument("python") == "system": + manager.deactivate() + + return 0 + + env = manager.activate(self.argument("python")) + + self.line(f"Using virtualenv: {env.path}") + + return 0 diff --git a/conda_lock/_vendor/poetry/console/commands/env_command.py b/conda_lock/_vendor/poetry/console/commands/env_command.py new file mode 100644 index 00000000..ec020b9c --- /dev/null +++ b/conda_lock/_vendor/poetry/console/commands/env_command.py @@ -0,0 +1,25 @@ +from __future__ import annotations + +from typing import TYPE_CHECKING + +from conda_lock._vendor.poetry.console.commands.command import Command + + +if TYPE_CHECKING: + from conda_lock._vendor.poetry.utils.env import Env + + +class EnvCommand(Command): + def __init__(self) -> None: + # Set in poetry.console.application.Application.configure_env + self._env: Env | None = None + + super().__init__() + + @property + def env(self) -> Env: + assert self._env is not None + return self._env + + def set_env(self, env: Env) -> None: + self._env = env diff --git a/conda_lock/_vendor/poetry/console/commands/export.py b/conda_lock/_vendor/poetry/console/commands/export.py new file mode 100644 index 00000000..cf18e279 --- /dev/null +++ b/conda_lock/_vendor/poetry/console/commands/export.py @@ -0,0 +1,21 @@ +from __future__ import annotations + +from poetry_plugin_export.command import ( # type: ignore[import-untyped] + ExportCommand as BaseExportCommand, +) + + +class ExportCommand(BaseExportCommand): # type: ignore[misc] + def handle(self) -> int: + if self.poetry.config.get("warnings.export"): + self.line_error( + "Warning: poetry-plugin-export will not be installed by default in a" + " future version of Poetry.\n" + "In order to avoid a breaking change and make your automation" + " forward-compatible, please install poetry-plugin-export explicitly." + " See https://python-poetry.org/docs/plugins/#using-plugins for details" + " on how to install a plugin.\n" + "To disable this warning run 'poetry config warnings.export false'.", + style="warning", + ) + return super().handle() # type: ignore[no-any-return] diff --git a/conda_lock/_vendor/poetry/console/commands/group_command.py b/conda_lock/_vendor/poetry/console/commands/group_command.py new file mode 100644 index 00000000..dab020e7 --- /dev/null +++ b/conda_lock/_vendor/poetry/console/commands/group_command.py @@ -0,0 +1,131 @@ +from __future__ import annotations + +from collections import defaultdict +from typing import TYPE_CHECKING + +from conda_lock._vendor.cleo.helpers import option +from conda_lock._vendor.poetry.core.packages.dependency_group import MAIN_GROUP + +from conda_lock._vendor.poetry.console.commands.command import Command +from conda_lock._vendor.poetry.console.exceptions import GroupNotFound + + +if TYPE_CHECKING: + from conda_lock._vendor.cleo.io.inputs.option import Option + from conda_lock._vendor.poetry.core.packages.project_package import ProjectPackage + + +class GroupCommand(Command): + @staticmethod + def _group_dependency_options() -> list[Option]: + return [ + option( + "without", + None, + "The dependency groups to ignore.", + flag=False, + multiple=True, + ), + option( + "with", + None, + "The optional dependency groups to include.", + flag=False, + multiple=True, + ), + option( + "only", + None, + "The only dependency groups to include.", + flag=False, + multiple=True, + ), + ] + + @property + def non_optional_groups(self) -> set[str]: + # TODO: this should move into poetry-core + return { + group.name + for group in self.poetry.package._dependency_groups.values() + if not group.is_optional() + } + + @property + def default_group(self) -> str | None: + """ + The default group to use when no group is specified. This is useful + for command that have the `--group` option, eg: add, remove. + + Can be overridden to adapt behavior. + """ + return None + + @property + def default_groups(self) -> set[str]: + """ + The groups that are considered by the command by default. + + Can be overridden to adapt behavior. + """ + return self.non_optional_groups + + @property + def activated_groups(self) -> set[str]: + groups = {} + + for key in {"with", "without", "only"}: + groups[key] = { + group.strip() + for groups in self.option(key, "") + for group in groups.split(",") + } + self._validate_group_options(groups) + + for opt, new, group in [ + ("no-dev", "only", MAIN_GROUP), + ("dev", "with", "dev"), + ]: + if self.io.input.has_option(opt) and self.option(opt): + self.line_error( + f"The `--{opt}` option is" + f" deprecated, use the `--{new} {group}`" + " notation instead." + ) + groups[new].add(group) + + if groups["only"] and (groups["with"] or groups["without"]): + self.line_error( + "The `--with` and " + "`--without` options are ignored when used" + " along with the `--only` option." + "" + ) + + return groups["only"] or self.default_groups.union(groups["with"]).difference( + groups["without"] + ) + + def project_with_activated_groups_only(self) -> ProjectPackage: + return self.poetry.package.with_dependency_groups( + list(self.activated_groups), only=True + ) + + def _validate_group_options(self, group_options: dict[str, set[str]]) -> None: + """ + Raises an error if it detects that a group is not part of pyproject.toml + """ + invalid_options = defaultdict(set) + for opt, groups in group_options.items(): + for group in groups: + if not self.poetry.package.has_dependency_group(group): + invalid_options[group].add(opt) + if invalid_options: + message_parts = [] + for group in sorted(invalid_options): + opts = ", ".join( + f"--{opt}" + for opt in sorted(invalid_options[group]) + ) + message_parts.append(f"{group} (via {opts})") + raise GroupNotFound(f"Group(s) not found: {', '.join(message_parts)}") diff --git a/conda_lock/_vendor/poetry/console/commands/init.py b/conda_lock/_vendor/poetry/console/commands/init.py new file mode 100644 index 00000000..8ef17b23 --- /dev/null +++ b/conda_lock/_vendor/poetry/console/commands/init.py @@ -0,0 +1,498 @@ +from __future__ import annotations + +from pathlib import Path +from typing import TYPE_CHECKING +from typing import Any +from typing import Dict +from typing import Mapping +from typing import Union + +from conda_lock._vendor.cleo.helpers import option +from packaging.utils import canonicalize_name +from tomlkit import inline_table + +from conda_lock._vendor.poetry.console.commands.command import Command +from conda_lock._vendor.poetry.console.commands.env_command import EnvCommand +from conda_lock._vendor.poetry.utils.dependency_specification import RequirementsParser + + +if TYPE_CHECKING: + from packaging.utils import NormalizedName + from conda_lock._vendor.poetry.core.packages.package import Package + from tomlkit.items import InlineTable + + from conda_lock._vendor.poetry.repositories import RepositoryPool + +Requirements = Dict[str, Union[str, Mapping[str, Any]]] + + +class InitCommand(Command): + name = "init" + description = ( + "Creates a basic pyproject.toml file in the current directory." + ) + + options = [ + option("name", None, "Name of the package.", flag=False), + option("description", None, "Description of the package.", flag=False), + option("author", None, "Author name of the package.", flag=False), + option("python", None, "Compatible Python versions.", flag=False), + option( + "dependency", + None, + "Package to require, with an optional version constraint, " + "e.g. requests:^2.10.0 or requests=2.11.1.", + flag=False, + multiple=True, + ), + option( + "dev-dependency", + None, + "Package to require for development, with an optional version" + " constraint, e.g. requests:^2.10.0 or requests=2.11.1.", + flag=False, + multiple=True, + ), + option("license", "l", "License of the package.", flag=False), + ] + + help = """\ +The init command creates a basic pyproject.toml file in the\ + current directory. +""" + + def __init__(self) -> None: + super().__init__() + + self._pool: RepositoryPool | None = None + + def handle(self) -> int: + from pathlib import Path + + from conda_lock._vendor.poetry.core.vcs.git import GitConfig + + from conda_lock._vendor.poetry.config.config import Config + from conda_lock._vendor.poetry.layouts import layout + from conda_lock._vendor.poetry.pyproject.toml import PyProjectTOML + from conda_lock._vendor.poetry.utils.env import EnvManager + + project_path = Path.cwd() + + if self.io.input.option("directory"): + project_path = Path(self.io.input.option("directory")) + if not project_path.exists() or not project_path.is_dir(): + self.line_error( + "The --directory path is not a directory." + ) + return 1 + + pyproject = PyProjectTOML(project_path / "pyproject.toml") + + if pyproject.file.exists(): + if pyproject.is_poetry_project(): + self.line_error( + "A pyproject.toml file with a poetry section already" + " exists." + ) + return 1 + + if pyproject.data.get("build-system"): + self.line_error( + "A pyproject.toml file with a defined build-system already" + " exists." + ) + return 1 + + vcs_config = GitConfig() + + if self.io.is_interactive(): + self.line("") + self.line( + "This command will guide you through creating your" + " pyproject.toml config." + ) + self.line("") + + name = self.option("name") + if not name: + name = Path.cwd().name.lower() + + question = self.create_question( + f"Package name [{name}]: ", default=name + ) + name = self.ask(question) + + version = "0.1.0" + question = self.create_question( + f"Version [{version}]: ", default=version + ) + version = self.ask(question) + + description = self.option("description") + if not description: + description = self.ask(self.create_question("Description []: ", default="")) + + author = self.option("author") + if not author and vcs_config.get("user.name"): + author = vcs_config["user.name"] + author_email = vcs_config.get("user.email") + if author_email: + author += f" <{author_email}>" + + question = self.create_question( + f"Author [{author}, n to skip]: ", default=author + ) + question.set_validator(lambda v: self._validate_author(v, author)) + author = self.ask(question) + + authors = [author] if author else [] + + license = self.option("license") + if not license: + license = self.ask(self.create_question("License []: ", default="")) + + python = self.option("python") + if not python: + config = Config.create() + default_python = ( + "^" + + EnvManager.get_python_version( + precision=2, + prefer_active_python=config.get("virtualenvs.prefer-active-python"), + io=self.io, + ).to_string() + ) + + question = self.create_question( + f"Compatible Python versions [{default_python}]: ", + default=default_python, + ) + python = self.ask(question) + + if self.io.is_interactive(): + self.line("") + + requirements: Requirements = {} + if self.option("dependency"): + requirements = self._format_requirements( + self._determine_requirements(self.option("dependency")) + ) + + question_text = "Would you like to define your main dependencies interactively?" + help_message = """\ +You can specify a package in the following forms: + - A single name (requests): this will search for matches on PyPI + - A name and a constraint (requests@^2.23.0) + - A git url (git+https://github.com/python-poetry/poetry.git) + - A git url with a revision\ + (git+https://github.com/python-poetry/poetry.git#develop) + - A file path (../my-package/my-package.whl) + - A directory (../my-package/) + - A url (https://example.com/packages/my-package-0.1.0.tar.gz) +""" + + help_displayed = False + if self.confirm(question_text, True): + if self.io.is_interactive(): + self.line(help_message) + help_displayed = True + requirements.update( + self._format_requirements(self._determine_requirements([])) + ) + if self.io.is_interactive(): + self.line("") + + dev_requirements: Requirements = {} + if self.option("dev-dependency"): + dev_requirements = self._format_requirements( + self._determine_requirements(self.option("dev-dependency")) + ) + + question_text = ( + "Would you like to define your development dependencies interactively?" + ) + if self.confirm(question_text, True): + if self.io.is_interactive() and not help_displayed: + self.line(help_message) + + dev_requirements.update( + self._format_requirements(self._determine_requirements([])) + ) + if self.io.is_interactive(): + self.line("") + + layout_ = layout("standard")( + name, + version, + description=description, + author=authors[0] if authors else None, + license=license, + python=python, + dependencies=requirements, + dev_dependencies=dev_requirements, + ) + + content = layout_.generate_poetry_content() + for section, item in content.items(): + pyproject.data.append(section, item) + + if self.io.is_interactive(): + self.line("Generated file") + self.line("") + self.line(pyproject.data.as_string().replace("\r\n", "\n")) + self.line("") + + if not self.confirm("Do you confirm generation?", True): + self.line_error("Command aborted") + + return 1 + + pyproject.save() + + return 0 + + def _generate_choice_list( + self, matches: list[Package], canonicalized_name: NormalizedName + ) -> list[str]: + choices = [] + matches_names = [p.name for p in matches] + exact_match = canonicalized_name in matches_names + if exact_match: + choices.append(matches[matches_names.index(canonicalized_name)].pretty_name) + + for found_package in matches: + if len(choices) >= 10: + break + + if found_package.name == canonicalized_name: + continue + + choices.append(found_package.pretty_name) + + return choices + + def _determine_requirements( + self, + requires: list[str], + allow_prereleases: bool = False, + source: str | None = None, + ) -> list[dict[str, Any]]: + if not requires: + result = [] + + question = self.create_question( + "Package to add or search for (leave blank to skip):" + ) + question.set_validator(self._validate_package) + + follow_up_question = self.create_question( + "\nAdd a package (leave blank to skip):" + ) + follow_up_question.set_validator(self._validate_package) + + package = self.ask(question) + while package: + constraint = self._parse_requirements([package])[0] + if ( + "git" in constraint + or "url" in constraint + or "path" in constraint + or "version" in constraint + ): + self.line(f"Adding {package}") + result.append(constraint) + package = self.ask(follow_up_question) + continue + + canonicalized_name = canonicalize_name(constraint["name"]) + matches = self._get_pool().search(canonicalized_name) + if not matches: + self.line_error("Unable to find package") + package = False + else: + choices = self._generate_choice_list(matches, canonicalized_name) + + info_string = ( + f"Found {len(matches)} packages matching" + f" {package}" + ) + + if len(matches) > 10: + info_string += "\nShowing the first 10 matches" + + self.line(info_string) + + # Default to an empty value to signal no package was selected + choices.append("") + + package = self.choice( + "\nEnter package # to add, or the complete package name if" + " it is not listed", + choices, + attempts=3, + default=len(choices) - 1, + ) + + if not package: + self.line("No package selected") + + # package selected by user, set constraint name to package name + if package: + constraint["name"] = package + + # no constraint yet, determine the best version automatically + if package and "version" not in constraint: + question = self.create_question( + "Enter the version constraint to require " + "(or leave blank to use the latest version):" + ) + question.set_max_attempts(3) + question.set_validator(lambda x: (x or "").strip() or None) + + package_constraint = self.ask(question) + + if package_constraint is None: + _, package_constraint = self._find_best_version_for_package( + package + ) + + self.line( + f"Using version {package_constraint} for" + f" {package}" + ) + + constraint["version"] = package_constraint + + if package: + result.append(constraint) + + if self.io.is_interactive(): + package = self.ask(follow_up_question) + + return result + + result = [] + for requirement in self._parse_requirements(requires): + if "git" in requirement or "url" in requirement or "path" in requirement: + result.append(requirement) + continue + elif "version" not in requirement: + # determine the best version automatically + name, version = self._find_best_version_for_package( + requirement["name"], + allow_prereleases=allow_prereleases, + source=source, + ) + requirement["version"] = version + requirement["name"] = name + + self.line(f"Using version {version} for {name}") + else: + # check that the specified version/constraint exists + # before we proceed + name, _ = self._find_best_version_for_package( + requirement["name"], + requirement["version"], + allow_prereleases=allow_prereleases, + source=source, + ) + + requirement["name"] = name + + result.append(requirement) + + return result + + def _find_best_version_for_package( + self, + name: str, + required_version: str | None = None, + allow_prereleases: bool = False, + source: str | None = None, + ) -> tuple[str, str]: + from conda_lock._vendor.poetry.version.version_selector import VersionSelector + + selector = VersionSelector(self._get_pool()) + package = selector.find_best_candidate( + name, required_version, allow_prereleases=allow_prereleases, source=source + ) + + if not package: + # TODO: find similar + raise ValueError(f"Could not find a matching version of package {name}") + + return package.pretty_name, f"^{package.version.to_string()}" + + def _parse_requirements(self, requirements: list[str]) -> list[dict[str, Any]]: + from conda_lock._vendor.poetry.core.pyproject.exceptions import PyProjectException + + try: + cwd = self.poetry.file.path.parent + artifact_cache = self.poetry.pool.artifact_cache + except (PyProjectException, RuntimeError): + cwd = Path.cwd() + artifact_cache = self._get_pool().artifact_cache + + parser = RequirementsParser( + artifact_cache=artifact_cache, + env=self.env if isinstance(self, EnvCommand) else None, + cwd=cwd, + ) + return [parser.parse(requirement) for requirement in requirements] + + def _format_requirements(self, requirements: list[dict[str, str]]) -> Requirements: + requires: Requirements = {} + for requirement in requirements: + name = requirement.pop("name") + constraint: str | InlineTable + if "version" in requirement and len(requirement) == 1: + constraint = requirement["version"] + else: + constraint = inline_table() + constraint.trivia.trail = "\n" + constraint.update(requirement) + + requires[name] = constraint + + return requires + + @staticmethod + def _validate_author(author: str, default: str) -> str | None: + from conda_lock._vendor.poetry.core.packages.package import AUTHOR_REGEX + from conda_lock._vendor.poetry.core.utils.helpers import combine_unicode + + author = combine_unicode(author or default) + + if author in ["n", "no"]: + return None + + m = AUTHOR_REGEX.match(author) + if not m: + raise ValueError( + "Invalid author string. Must be in the format: " + "John Smith " + ) + + return author + + @staticmethod + def _validate_package(package: str | None) -> str | None: + if package and len(package.split()) > 2: + raise ValueError("Invalid package definition.") + + return package + + def _get_pool(self) -> RepositoryPool: + from conda_lock._vendor.poetry.config.config import Config + from conda_lock._vendor.poetry.repositories import RepositoryPool + from conda_lock._vendor.poetry.repositories.pypi_repository import PyPiRepository + + if isinstance(self, EnvCommand): + return self.poetry.pool + + if self._pool is None: + self._pool = RepositoryPool() + pool_size = Config.create().installer_max_workers + self._pool.add_repository(PyPiRepository(pool_size=pool_size)) + + return self._pool diff --git a/conda_lock/_vendor/poetry/console/commands/install.py b/conda_lock/_vendor/poetry/console/commands/install.py new file mode 100644 index 00000000..7c077dd2 --- /dev/null +++ b/conda_lock/_vendor/poetry/console/commands/install.py @@ -0,0 +1,205 @@ +from __future__ import annotations + +from conda_lock._vendor.cleo.helpers import option + +from conda_lock._vendor.poetry.console.commands.installer_command import InstallerCommand + + +class InstallCommand(InstallerCommand): + name = "install" + description = "Installs the project dependencies." + + options = [ + *InstallerCommand._group_dependency_options(), + option( + "no-dev", + None, + "Do not install the development dependencies." + " (Deprecated)", + ), + option( + "sync", + None, + "Synchronize the environment with the locked packages and the specified" + " groups.", + ), + option( + "no-root", None, "Do not install the root package (the current project)." + ), + option( + "no-directory", + None, + "Do not install any directory path dependencies; useful to install" + " dependencies without source code, e.g. for caching of Docker layers)", + flag=True, + multiple=False, + ), + option( + "dry-run", + None, + "Output the operations but do not execute anything " + "(implicitly enables --verbose).", + ), + option( + "remove-untracked", + None, + "Removes packages not present in the lock file." + " (Deprecated)", + ), + option( + "extras", + "E", + "Extra sets of dependencies to install.", + flag=False, + multiple=True, + ), + option("all-extras", None, "Install all extra dependencies."), + option("only-root", None, "Exclude all dependencies."), + option( + "compile", + None, + "Compile Python source files to bytecode." + " (This option has no effect if modern-installation is disabled" + " because the old installer always compiles.)", + ), + ] + + help = """\ +The install command reads the poetry.lock file from +the current directory, processes it, and downloads and installs all the +libraries and dependencies outlined in that file. If the file does not +exist it will look for pyproject.toml and do the same. + +poetry install + +By default, the above command will also install the current project. To install only the +dependencies and not including the current project, run the command with the +--no-root option like below: + + poetry install --no-root + +If you want to use Poetry only for dependency management but not for packaging, +you can set the "package-mode" to false in your pyproject.toml file. +""" + + _loggers = ["poetry.repositories.pypi_repository", "poetry.inspection.info"] + + @property + def activated_groups(self) -> set[str]: + if self.option("only-root"): + return set() + else: + return super().activated_groups + + def handle(self) -> int: + from conda_lock._vendor.poetry.core.masonry.utils.module import ModuleOrPackageNotFound + + from conda_lock._vendor.poetry.masonry.builders.editable import EditableBuilder + + if self.option("extras") and self.option("all-extras"): + self.line_error( + "You cannot specify explicit" + " `--extras` while installing" + " using `--all-extras`." + ) + return 1 + + if self.option("only-root") and any( + self.option(key) for key in {"with", "without", "only"} + ): + self.line_error( + "The `--with`," + " `--without` and" + " `--only` options cannot be used with" + " the `--only-root`" + " option." + ) + return 1 + + if self.option("only-root") and self.option("no-root"): + self.line_error( + "You cannot specify `--no-root`" + " when using `--only-root`." + ) + return 1 + + extras: list[str] + if self.option("all-extras"): + extras = list(self.poetry.package.extras.keys()) + else: + extras = [] + for extra in self.option("extras", []): + extras += extra.split() + + self.installer.extras(extras) + + with_synchronization = self.option("sync") + if self.option("remove-untracked"): + self.line_error( + "The `--remove-untracked` option is" + " deprecated, use the `--sync` option" + " instead." + ) + + with_synchronization = True + + self.installer.only_groups(self.activated_groups) + self.installer.skip_directory(self.option("no-directory")) + self.installer.dry_run(self.option("dry-run")) + self.installer.requires_synchronization(with_synchronization) + self.installer.executor.enable_bytecode_compilation(self.option("compile")) + self.installer.verbose(self.io.is_verbose()) + + return_code = self.installer.run() + + if return_code != 0: + return return_code + + if self.option("no-root") or not self.poetry.is_package_mode: + return 0 + + log_install = ( + "Installing the current project:" + f" {self.poetry.package.pretty_name}" + f" (<{{tag}}>{self.poetry.package.pretty_version})" + ) + overwrite = self.io.output.is_decorated() and not self.io.is_debug() + self.line("") + self.write(log_install.format(tag="c2")) + if not overwrite: + self.line("") + + if self.option("dry-run"): + self.line("") + return 0 + + # Prior to https://github.com/python-poetry/poetry-core/pull/629 + # the existence of a module/package was checked when creating the + # EditableBuilder. Afterwards, the existence is checked after + # executing the build script (if there is one), + # i.e. during EditableBuilder.build(). + try: + builder = EditableBuilder(self.poetry, self.env, self.io) + builder.build() + except (ModuleOrPackageNotFound, FileNotFoundError) as e: + # This is likely due to the fact that the project is an application + # not following the structure expected by Poetry. + # No need for an editable install in this case. + self.line("") + self.line_error( + f"Warning: The current project could not be installed: {e}\n" + "If you do not want to install the current project" + " use --no-root.\n" + "If you want to use Poetry only for dependency management" + " but not for packaging, you can disable package mode by setting" + " package-mode = false in your pyproject.toml file.\n" + "In a future version of Poetry this warning will become an error!", + style="warning", + ) + return 0 + + if overwrite: + self.overwrite(log_install.format(tag="success")) + self.line("") + + return 0 diff --git a/conda_lock/_vendor/poetry/console/commands/installer_command.py b/conda_lock/_vendor/poetry/console/commands/installer_command.py new file mode 100644 index 00000000..0083fb10 --- /dev/null +++ b/conda_lock/_vendor/poetry/console/commands/installer_command.py @@ -0,0 +1,32 @@ +from __future__ import annotations + +from typing import TYPE_CHECKING + +from conda_lock._vendor.poetry.console.commands.env_command import EnvCommand +from conda_lock._vendor.poetry.console.commands.group_command import GroupCommand + + +if TYPE_CHECKING: + from conda_lock._vendor.poetry.installation.installer import Installer + + +class InstallerCommand(GroupCommand, EnvCommand): + def __init__(self) -> None: + # Set in poetry.console.application.Application.configure_installer + self._installer: Installer | None = None + + super().__init__() + + def reset_poetry(self) -> None: + super().reset_poetry() + + self.installer.set_package(self.poetry.package) + self.installer.set_locker(self.poetry.locker) + + @property + def installer(self) -> Installer: + assert self._installer is not None + return self._installer + + def set_installer(self, installer: Installer) -> None: + self._installer = installer diff --git a/conda_lock/_vendor/poetry/console/commands/lock.py b/conda_lock/_vendor/poetry/console/commands/lock.py new file mode 100644 index 00000000..c072501a --- /dev/null +++ b/conda_lock/_vendor/poetry/console/commands/lock.py @@ -0,0 +1,55 @@ +from __future__ import annotations + +from conda_lock._vendor.cleo.helpers import option + +from conda_lock._vendor.poetry.console.commands.installer_command import InstallerCommand + + +class LockCommand(InstallerCommand): + name = "lock" + description = "Locks the project dependencies." + + options = [ + option( + "no-update", None, "Do not update locked versions, only refresh lock file." + ), + option( + "check", + None, + "Check that the poetry.lock file corresponds to the current" + " version of pyproject.toml. (Deprecated) Use" + " poetry check --lock instead.", + ), + ] + + help = """ +The lock command reads the pyproject.toml file from the +current directory, processes it, and locks the dependencies in the\ + poetry.lock +file. + +poetry lock +""" + + loggers = ["poetry.repositories.pypi_repository"] + + def handle(self) -> int: + if self.option("check"): + self.line_error( + "poetry lock --check is deprecated, use `poetry" + " check --lock` instead." + ) + if self.poetry.locker.is_locked() and self.poetry.locker.is_fresh(): + self.line("poetry.lock is consistent with pyproject.toml.") + return 0 + self.line_error( + "" + "Error: pyproject.toml changed significantly since poetry.lock was last generated. " + "Run `poetry lock [--no-update]` to fix the lock file." + "" + ) + return 1 + + self.installer.lock(update=not self.option("no-update")) + + return self.installer.run() diff --git a/conda_lock/_vendor/poetry/console/commands/new.py b/conda_lock/_vendor/poetry/console/commands/new.py new file mode 100644 index 00000000..5d88ee34 --- /dev/null +++ b/conda_lock/_vendor/poetry/console/commands/new.py @@ -0,0 +1,101 @@ +from __future__ import annotations + +from contextlib import suppress + +from conda_lock._vendor.cleo.helpers import argument +from conda_lock._vendor.cleo.helpers import option + +from conda_lock._vendor.poetry.console.commands.command import Command + + +class NewCommand(Command): + name = "new" + description = "Creates a new Python project at ." + + arguments = [argument("path", "The path to create the project at.")] + options = [ + option("name", None, "Set the resulting package name.", flag=False), + option("src", None, "Use the src layout for the project."), + option( + "readme", + None, + "Specify the readme file format. Default is md.", + flag=False, + ), + ] + + def handle(self) -> int: + from pathlib import Path + + from conda_lock._vendor.poetry.core.vcs.git import GitConfig + + from conda_lock._vendor.poetry.config.config import Config + from conda_lock._vendor.poetry.layouts import layout + from conda_lock._vendor.poetry.utils.env import EnvManager + + if self.io.input.option("directory"): + self.line_error( + "--directory only makes sense with existing projects, and will" + " be ignored. You should consider the option --path instead." + ) + + layout_cls = layout("src") if self.option("src") else layout("standard") + + path = Path(self.argument("path")) + if not path.is_absolute(): + # we do not use resolve here due to compatibility issues + # for path.resolve(strict=False) + path = Path.cwd().joinpath(path) + + name = self.option("name") + if not name: + name = path.name + + if path.exists() and list(path.glob("*")): + # Directory is not empty. Aborting. + raise RuntimeError( + f"Destination {path} exists and is not empty" + ) + + readme_format = self.option("readme") or "md" + + config = GitConfig() + author = None + if config.get("user.name"): + author = config["user.name"] + author_email = config.get("user.email") + if author_email: + author += f" <{author_email}>" + + poetry_config = Config.create() + default_python = ( + "^" + + EnvManager.get_python_version( + precision=2, + prefer_active_python=poetry_config.get( + "virtualenvs.prefer-active-python" + ), + io=self.io, + ).to_string() + ) + + layout_ = layout_cls( + name, + "0.1.0", + author=author, + readme_format=readme_format, + python=default_python, + ) + layout_.create(path) + + path = path.resolve() + + with suppress(ValueError): + path = path.relative_to(Path.cwd()) + + self.line( + f"Created package {layout_._package_name} in" + f" {path.as_posix()}" + ) + + return 0 diff --git a/conda_lock/_vendor/poetry/console/commands/publish.py b/conda_lock/_vendor/poetry/console/commands/publish.py new file mode 100644 index 00000000..85399ff4 --- /dev/null +++ b/conda_lock/_vendor/poetry/console/commands/publish.py @@ -0,0 +1,105 @@ +from __future__ import annotations + +from pathlib import Path + +from conda_lock._vendor.cleo.helpers import option + +from conda_lock._vendor.poetry.console.commands.command import Command + + +class PublishCommand(Command): + name = "publish" + description = "Publishes a package to a remote repository." + + options = [ + option( + "repository", "r", "The repository to publish the package to.", flag=False + ), + option("username", "u", "The username to access the repository.", flag=False), + option("password", "p", "The password to access the repository.", flag=False), + option( + "cert", None, "Certificate authority to access the repository.", flag=False + ), + option( + "client-cert", + None, + "Client certificate to access the repository.", + flag=False, + ), + option( + "dist-dir", + None, + "Dist directory where built artifact are stored. Default is `dist`.", + default="dist", + flag=False, + ), + option("build", None, "Build the package before publishing."), + option("dry-run", None, "Perform all actions except upload the package."), + option( + "skip-existing", + None, + "Ignore errors from files already existing in the repository.", + ), + ] + + help = """The publish command builds and uploads the package to a remote repository. + +By default, it will upload to PyPI but if you pass the --repository option it will +upload to it instead. + +The --repository option should match the name of a configured repository using +the config command. +""" + + loggers = ["poetry.publishing.publisher"] + + def handle(self) -> int: + from conda_lock._vendor.poetry.publishing.publisher import Publisher + + if not self.poetry.is_package_mode: + self.line_error("Publishing a package is not possible in non-package mode.") + return 1 + + dist_dir = self.option("dist-dir") + + publisher = Publisher(self.poetry, self.io, Path(dist_dir)) + + # Building package first, if told + if self.option("build"): + if publisher.files and not self.confirm( + f"There are {len(publisher.files)} files ready for" + " publishing. Build anyway?" + ): + self.line_error("Aborted!") + + return 1 + + self.call("build", args=f"--output {dist_dir}") + + files = publisher.files + if not files: + self.line_error( + "No files to publish. " + "Run poetry build first or use the --build option." + ) + + return 1 + + self.line("") + + cert = Path(self.option("cert")) if self.option("cert") else None + client_cert = ( + Path(self.option("client-cert")) if self.option("client-cert") else None + ) + + publisher.publish( + self.option("repository"), + self.option("username"), + self.option("password"), + cert, + client_cert, + self.option("dry-run"), + self.option("skip-existing"), + ) + + return 0 diff --git a/conda_lock/_vendor/poetry/console/commands/remove.py b/conda_lock/_vendor/poetry/console/commands/remove.py new file mode 100644 index 00000000..743f0724 --- /dev/null +++ b/conda_lock/_vendor/poetry/console/commands/remove.py @@ -0,0 +1,139 @@ +from __future__ import annotations + +from typing import Any + +from conda_lock._vendor.cleo.helpers import argument +from conda_lock._vendor.cleo.helpers import option +from packaging.utils import canonicalize_name +from conda_lock._vendor.poetry.core.packages.dependency_group import MAIN_GROUP +from tomlkit.toml_document import TOMLDocument + +from conda_lock._vendor.poetry.console.commands.installer_command import InstallerCommand + + +class RemoveCommand(InstallerCommand): + name = "remove" + description = "Removes a package from the project dependencies." + + arguments = [argument("packages", "The packages to remove.", multiple=True)] + options = [ + option("group", "G", "The group to remove the dependency from.", flag=False), + option( + "dev", + "D", + "Remove a package from the development dependencies." + " (Deprecated)" + " Use --group=dev instead.", + ), + option( + "dry-run", + None, + "Output the operations but do not execute anything " + "(implicitly enables --verbose).", + ), + option("lock", None, "Do not perform operations (only update the lockfile)."), + ] + + help = """The remove command removes a package from the current +list of installed packages + +poetry remove""" + + loggers = ["poetry.repositories.pypi_repository", "poetry.inspection.info"] + + def handle(self) -> int: + packages = self.argument("packages") + + if self.option("dev"): + self.line_error( + "The --dev option is deprecated, " + "use the `--group dev` notation instead." + ) + group = "dev" + else: + group = self.option("group", self.default_group) + + content: dict[str, Any] = self.poetry.file.read() + poetry_content = content["tool"]["poetry"] + + if group is None: + removed = [] + group_sections = [ + (group_name, group_section.get("dependencies", {})) + for group_name, group_section in poetry_content.get("group", {}).items() + ] + + for group_name, section in [ + (MAIN_GROUP, poetry_content["dependencies"]), + *group_sections, + ]: + removed += self._remove_packages(packages, section, group_name) + if group_name != MAIN_GROUP: + if not section: + del poetry_content["group"][group_name] + else: + poetry_content["group"][group_name]["dependencies"] = section + elif group == "dev" and "dev-dependencies" in poetry_content: + # We need to account for the old `dev-dependencies` section + removed = self._remove_packages( + packages, poetry_content["dev-dependencies"], "dev" + ) + + if not poetry_content["dev-dependencies"]: + del poetry_content["dev-dependencies"] + else: + removed = [] + if "group" in poetry_content: + if group in poetry_content["group"]: + removed = self._remove_packages( + packages, + poetry_content["group"][group].get("dependencies", {}), + group, + ) + + if not poetry_content["group"][group]: + del poetry_content["group"][group] + + if "group" in poetry_content and not poetry_content["group"]: + del poetry_content["group"] + + removed_set = set(removed) + not_found = set(packages).difference(removed_set) + if not_found: + raise ValueError( + "The following packages were not found: " + ", ".join(sorted(not_found)) + ) + + # Refresh the locker + self.poetry.locker.set_local_config(poetry_content) + self.installer.set_locker(self.poetry.locker) + self.installer.set_package(self.poetry.package) + self.installer.dry_run(self.option("dry-run", False)) + self.installer.verbose(self.io.is_verbose()) + self.installer.update(True) + self.installer.execute_operations(not self.option("lock")) + self.installer.whitelist(removed_set) + + status = self.installer.run() + + if not self.option("dry-run") and status == 0: + assert isinstance(content, TOMLDocument) + self.poetry.file.write(content) + + return status + + def _remove_packages( + self, packages: list[str], section: dict[str, Any], group_name: str + ) -> list[str]: + removed = [] + group = self.poetry.package.dependency_group(group_name) + section_keys = list(section.keys()) + + for package in packages: + for existing_package in section_keys: + if canonicalize_name(existing_package) == canonicalize_name(package): + del section[existing_package] + removed.append(package) + group.remove_dependency(package) + + return removed diff --git a/conda_lock/_vendor/poetry/console/commands/run.py b/conda_lock/_vendor/poetry/console/commands/run.py new file mode 100644 index 00000000..98f06588 --- /dev/null +++ b/conda_lock/_vendor/poetry/console/commands/run.py @@ -0,0 +1,97 @@ +from __future__ import annotations + +from typing import TYPE_CHECKING + +from conda_lock._vendor.cleo.helpers import argument + +from conda_lock._vendor.poetry.console.commands.env_command import EnvCommand +from conda_lock._vendor.poetry.utils._compat import WINDOWS + + +if TYPE_CHECKING: + from conda_lock._vendor.poetry.core.masonry.utils.module import Module + + +class RunCommand(EnvCommand): + name = "run" + description = "Runs a command in the appropriate environment." + + arguments = [ + argument("args", "The command and arguments/options to run.", multiple=True) + ] + + def handle(self) -> int: + args = self.argument("args") + script = args[0] + scripts = self.poetry.local_config.get("scripts") + + if scripts and script in scripts: + return self.run_script(scripts[script], args) + + try: + return self.env.execute(*args) + except FileNotFoundError: + self.line_error(f"Command not found: {script}") + return 1 + + @property + def _module(self) -> Module: + from conda_lock._vendor.poetry.core.masonry.utils.module import Module + + poetry = self.poetry + package = poetry.package + path = poetry.file.path.parent + module = Module(package.name, path.as_posix(), package.packages) + + return module + + def run_script(self, script: str | dict[str, str], args: list[str]) -> int: + """Runs an entry point script defined in the section ``[tool.poetry.scripts]``. + + When a script exists in the venv bin folder, i.e. after ``poetry install``, + then ``sys.argv[0]`` must be set to the full path of the executable, so + ``poetry run foo`` and ``poetry shell``, ``foo`` have the same ``sys.argv[0]`` + that points to the full path. + + Otherwise (when an entry point script does not exist), ``sys.argv[0]`` is the + script name only, i.e. ``poetry run foo`` has ``sys.argv == ['foo']``. + """ + for script_dir in self.env.script_dirs: + script_path = script_dir / args[0] + if WINDOWS: + script_path = script_path.with_suffix(".cmd") + if script_path.exists(): + args = [str(script_path), *args[1:]] + break + else: + # If we reach this point, the script is not installed + self._warning_not_installed_script(args[0]) + + if isinstance(script, dict): + script = script["callable"] + + module, callable_ = script.split(":") + + src_in_sys_path = "sys.path.append('src'); " if self._module.is_in_src() else "" + + cmd = ["python", "-c"] + + cmd += [ + "import sys; " + "from importlib import import_module; " + f"sys.argv = {args!r}; {src_in_sys_path}" + f"sys.exit(import_module('{module}').{callable_}())" + ] + + return self.env.execute(*cmd) + + def _warning_not_installed_script(self, script: str) -> None: + message = f"""\ +Warning: '{script}' is an entry point defined in pyproject.toml, but it's not \ +installed as a script. You may get improper `sys.argv[0]`. + +The support to run uninstalled scripts will be removed in a future release. + +Run `poetry install` to resolve and get rid of this message. +""" + self.line_error(message, style="warning") diff --git a/conda_lock/_vendor/poetry/console/commands/search.py b/conda_lock/_vendor/poetry/console/commands/search.py new file mode 100644 index 00000000..32f83d4c --- /dev/null +++ b/conda_lock/_vendor/poetry/console/commands/search.py @@ -0,0 +1,30 @@ +from __future__ import annotations + +from conda_lock._vendor.cleo.helpers import argument + +from conda_lock._vendor.poetry.console.commands.command import Command + + +class SearchCommand(Command): + name = "search" + description = "Searches for packages on remote repositories." + + arguments = [argument("tokens", "The tokens to search for.", multiple=True)] + + def handle(self) -> int: + from conda_lock._vendor.poetry.repositories.pypi_repository import PyPiRepository + + results = PyPiRepository().search(self.argument("tokens")) + + for result in results: + self.line("") + name = f"{result.name}" + + name += f" ({result.version})" + + self.line(name) + + if result.description: + self.line(f" {result.description}") + + return 0 diff --git a/conda_lock/_vendor/poetry/console/commands/self/__init__.py b/conda_lock/_vendor/poetry/console/commands/self/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/conda_lock/_vendor/poetry/console/commands/self/add.py b/conda_lock/_vendor/poetry/console/commands/self/add.py new file mode 100644 index 00000000..ad713878 --- /dev/null +++ b/conda_lock/_vendor/poetry/console/commands/self/add.py @@ -0,0 +1,40 @@ +from __future__ import annotations + +from conda_lock._vendor.poetry.core.constraints.version import Version + +from conda_lock._vendor.poetry.__version__ import __version__ +from conda_lock._vendor.poetry.console.commands.add import AddCommand +from conda_lock._vendor.poetry.console.commands.self.self_command import SelfCommand + + +class SelfAddCommand(SelfCommand, AddCommand): + name = "self add" + description = "Add additional packages to Poetry's runtime environment." + options = [ + o + for o in AddCommand.options + if o.name in {"editable", "extras", "source", "dry-run", "allow-prereleases"} + ] + help = f"""\ +The self add command installs additional packages to Poetry's runtime \ +environment. + +This is managed in the {SelfCommand.get_default_system_pyproject_file()} \ +file. + +{AddCommand.examples} +""" + + @property + def _hint_update_packages(self) -> str: + version = Version.parse(__version__) + flags = "" + + if not version.is_stable(): + flags = " --preview" + + return ( + "\nIf you want to update it to the latest compatible version, you can use" + f" `poetry self update{flags}`.\nIf you prefer to upgrade it to the latest" + " available version, you can use `poetry self add package@latest`.\n" + ) diff --git a/conda_lock/_vendor/poetry/console/commands/self/install.py b/conda_lock/_vendor/poetry/console/commands/self/install.py new file mode 100644 index 00000000..d2a3567b --- /dev/null +++ b/conda_lock/_vendor/poetry/console/commands/self/install.py @@ -0,0 +1,28 @@ +from __future__ import annotations + +from conda_lock._vendor.poetry.core.packages.dependency_group import MAIN_GROUP + +from conda_lock._vendor.poetry.console.commands.install import InstallCommand +from conda_lock._vendor.poetry.console.commands.self.self_command import SelfCommand + + +class SelfInstallCommand(SelfCommand, InstallCommand): + name = "self install" + description = ( + "Install locked packages (incl. addons) required by this Poetry installation." + ) + options = [o for o in InstallCommand.options if o.name in {"sync", "dry-run"}] + help = f"""\ +The self install command ensures all additional packages specified are \ +installed in the current runtime environment. + +This is managed in the {SelfCommand.get_default_system_pyproject_file()} \ +file. + +You can add more packages using the self add command and remove them using \ +the self remove command. +""" + + @property + def activated_groups(self) -> set[str]: + return {MAIN_GROUP, self.default_group} diff --git a/conda_lock/_vendor/poetry/console/commands/self/lock.py b/conda_lock/_vendor/poetry/console/commands/self/lock.py new file mode 100644 index 00000000..5e4b38e7 --- /dev/null +++ b/conda_lock/_vendor/poetry/console/commands/self/lock.py @@ -0,0 +1,17 @@ +from __future__ import annotations + +from conda_lock._vendor.poetry.console.commands.lock import LockCommand +from conda_lock._vendor.poetry.console.commands.self.self_command import SelfCommand + + +class SelfLockCommand(SelfCommand, LockCommand): + name = "self lock" + description = "Lock the Poetry installation's system requirements." + help = f"""\ +The self lock command reads this Poetry installation's system requirements as \ +specified in the {SelfCommand.get_default_system_pyproject_file()} file. + +The system dependencies are locked in the \ +{SelfCommand.get_default_system_pyproject_file().parent.joinpath("poetry.lock")} \ +file. +""" diff --git a/conda_lock/_vendor/poetry/console/commands/self/remove.py b/conda_lock/_vendor/poetry/console/commands/self/remove.py new file mode 100644 index 00000000..d2bee98b --- /dev/null +++ b/conda_lock/_vendor/poetry/console/commands/self/remove.py @@ -0,0 +1,17 @@ +from __future__ import annotations + +from conda_lock._vendor.poetry.console.commands.remove import RemoveCommand +from conda_lock._vendor.poetry.console.commands.self.self_command import SelfCommand + + +class SelfRemoveCommand(SelfCommand, RemoveCommand): + name = "self remove" + description = "Remove additional packages from Poetry's runtime environment." + options = [o for o in RemoveCommand.options if o.name in {"dry-run"}] + help = f"""\ +The self remove command removes additional package's to Poetry's runtime \ +environment. + +This is managed in the {SelfCommand.get_default_system_pyproject_file()} \ +file. +""" diff --git a/conda_lock/_vendor/poetry/console/commands/self/self_command.py b/conda_lock/_vendor/poetry/console/commands/self/self_command.py new file mode 100644 index 00000000..dbce2947 --- /dev/null +++ b/conda_lock/_vendor/poetry/console/commands/self/self_command.py @@ -0,0 +1,132 @@ +from __future__ import annotations + +from pathlib import Path +from typing import TYPE_CHECKING + +from conda_lock._vendor.poetry.core.packages.dependency import Dependency +from conda_lock._vendor.poetry.core.packages.project_package import ProjectPackage + +from conda_lock._vendor.poetry.__version__ import __version__ +from conda_lock._vendor.poetry.console.commands.installer_command import InstallerCommand +from conda_lock._vendor.poetry.factory import Factory +from conda_lock._vendor.poetry.pyproject.toml import PyProjectTOML +from conda_lock._vendor.poetry.utils.env import EnvManager +from conda_lock._vendor.poetry.utils.env import SystemEnv +from conda_lock._vendor.poetry.utils.helpers import directory + + +if TYPE_CHECKING: + from conda_lock._vendor.poetry.poetry import Poetry + from conda_lock._vendor.poetry.utils.env import Env + + +class SelfCommand(InstallerCommand): + ADDITIONAL_PACKAGE_GROUP = "additional" + + @staticmethod + def get_default_system_pyproject_file() -> Path: + # We separate this out to avoid unwanted side effect during testing while + # maintaining dynamic use in help text. + # + # This is not ideal, but is the simplest solution for now. + from conda_lock._vendor.poetry.locations import CONFIG_DIR + + return Path(CONFIG_DIR).joinpath("pyproject.toml") + + @property + def system_pyproject(self) -> Path: + file = self.get_default_system_pyproject_file() + file.parent.mkdir(parents=True, exist_ok=True) + return file + + def reset_env(self) -> None: + self._env = EnvManager.get_system_env(naive=True) + + @property + def env(self) -> Env: + if not isinstance(self._env, SystemEnv): + self.reset_env() + assert self._env is not None + return self._env + + @property + def default_group(self) -> str: + return self.ADDITIONAL_PACKAGE_GROUP + + @property + def activated_groups(self) -> set[str]: + return {self.default_group} + + def generate_system_pyproject(self) -> None: + preserved = {} + + if self.system_pyproject.exists(): + content = PyProjectTOML(self.system_pyproject).poetry_config + + for key in {"group", "source"}: + if key in content: + preserved[key] = content[key] + + package = ProjectPackage(name="poetry-instance", version=__version__) + package.add_dependency(Dependency(name="poetry", constraint=f"{__version__}")) + + package.python_versions = ".".join(str(v) for v in self.env.version_info[:3]) + + content = Factory.create_pyproject_from_package(package=package) + content["tool"]["poetry"]["package-mode"] = False # type: ignore[index] + + for key in preserved: + content["tool"]["poetry"][key] = preserved[key] # type: ignore[index] + + pyproject = PyProjectTOML(self.system_pyproject) + pyproject.file.write(content) + + def reset_poetry(self) -> None: + with directory(self.system_pyproject.parent): + self.generate_system_pyproject() + self._poetry = Factory().create_poetry( + self.system_pyproject.parent, io=self.io, disable_plugins=True + ) + + @property + def poetry(self) -> Poetry: + if self._poetry is None: + self.reset_poetry() + + assert self._poetry is not None + return self._poetry + + def _system_project_handle(self) -> int: + """ + This is a helper method that by default calls the handle method implemented in + the child class's next MRO sibling. Override this if you want special handling + either before calling the handle() from the super class or have custom logic + to handle the command. + + The default implementations handles cases where a `self` command delegates + handling to an existing command. Eg: `SelfAddCommand(SelfCommand, AddCommand)`. + """ + return_code: int = super().handle() + return return_code + + def reset(self) -> None: + """ + Reset current command instance's environment and poetry instances to ensure + use of the system specific ones. + """ + self.reset_env() + self.reset_poetry() + + def handle(self) -> int: + # We override the base class's handle() method to ensure that poetry and env + # are reset to work within the system project instead of current context. + # Further, during execution, the working directory is temporarily changed + # to parent directory of Poetry system pyproject.toml file. + # + # This method **should not** be overridden in child classes as it may have + # unexpected consequences. + + self.reset() + + with directory(self.system_pyproject.parent): + return self._system_project_handle() diff --git a/conda_lock/_vendor/poetry/console/commands/self/show/__init__.py b/conda_lock/_vendor/poetry/console/commands/self/show/__init__.py new file mode 100644 index 00000000..3ce5d915 --- /dev/null +++ b/conda_lock/_vendor/poetry/console/commands/self/show/__init__.py @@ -0,0 +1,34 @@ +from __future__ import annotations + +from conda_lock._vendor.cleo.helpers import option + +from conda_lock._vendor.poetry.console.commands.self.self_command import SelfCommand +from conda_lock._vendor.poetry.console.commands.show import ShowCommand + + +class SelfShowCommand(SelfCommand, ShowCommand): + name = "self show" + options = [ + option("addons", None, "List only add-on packages installed."), + *[o for o in ShowCommand.options if o.name in {"tree", "latest", "outdated"}], + ] + description = "Show packages from Poetry's runtime environment." + help = f"""\ +The self show command behaves similar to the show command, but +working within Poetry's runtime environment. This lists all packages installed within +the Poetry install environment. + +To show only additional packages that have been added via self add and their +dependencies use self show --addons. + +This is managed in the {SelfCommand.get_default_system_pyproject_file()} \ +file. +""" + + @property + def activated_groups(self) -> set[str]: + if self.option("addons", False): + return {SelfCommand.ADDITIONAL_PACKAGE_GROUP} + + groups: set[str] = super(ShowCommand, self).activated_groups + return groups diff --git a/conda_lock/_vendor/poetry/console/commands/self/show/plugins.py b/conda_lock/_vendor/poetry/console/commands/self/show/plugins.py new file mode 100644 index 00000000..71654e81 --- /dev/null +++ b/conda_lock/_vendor/poetry/console/commands/self/show/plugins.py @@ -0,0 +1,118 @@ +from __future__ import annotations + +import dataclasses + +from typing import TYPE_CHECKING + +from conda_lock._vendor.poetry.console.commands.self.self_command import SelfCommand + + +if TYPE_CHECKING: + from conda_lock._vendor.poetry.core.packages.package import Package + + from conda_lock._vendor.poetry.utils._compat import metadata + + +@dataclasses.dataclass +class PluginPackage: + package: Package + plugins: list[metadata.EntryPoint] = dataclasses.field(default_factory=list) + application_plugins: list[metadata.EntryPoint] = dataclasses.field( + default_factory=list + ) + + def append(self, entry_point: metadata.EntryPoint) -> None: + from conda_lock._vendor.poetry.plugins.application_plugin import ApplicationPlugin + from conda_lock._vendor.poetry.plugins.plugin import Plugin + + group = entry_point.group + + if group == ApplicationPlugin.group: + self.application_plugins.append(entry_point) + elif group == Plugin.group: + self.plugins.append(entry_point) + else: + name = entry_point.name + raise ValueError(f"Unknown plugin group ({group}) for {name}") + + +class SelfShowPluginsCommand(SelfCommand): + name = "self show plugins" + description = "Shows information about the currently installed plugins." + help = """\ +The self show plugins command lists all installed Poetry plugins. + +Plugins can be added and removed using the self add and self remove \ +commands respectively. + +This command does not list packages that do not provide a Poetry plugin. +""" + + def _system_project_handle(self) -> int: + from packaging.utils import canonicalize_name + + from conda_lock._vendor.poetry.plugins.application_plugin import ApplicationPlugin + from conda_lock._vendor.poetry.plugins.plugin import Plugin + from conda_lock._vendor.poetry.plugins.plugin_manager import PluginManager + from conda_lock._vendor.poetry.repositories.installed_repository import InstalledRepository + from conda_lock._vendor.poetry.utils.env import EnvManager + from conda_lock._vendor.poetry.utils.helpers import pluralize + + plugins: dict[str, PluginPackage] = {} + + system_env = EnvManager.get_system_env(naive=True) + installed_repository = InstalledRepository.load( + system_env, with_dependencies=True + ) + + packages_by_name: dict[str, Package] = { + pkg.name: pkg for pkg in installed_repository.packages + } + + for group in [ApplicationPlugin.group, Plugin.group]: + for entry_point in PluginManager(group).get_plugin_entry_points( + env=system_env + ): + assert entry_point.dist is not None + + package = packages_by_name[canonicalize_name(entry_point.dist.name)] + + name = package.pretty_name + + info = plugins.get(name) or PluginPackage(package=package) + info.append(entry_point) + + plugins[name] = info + + for name, info in plugins.items(): + package = info.package + description = " " + package.description if package.description else "" + self.line("") + self.line(f" - {name} ({package.version}){description}") + provide_line = " " + + if info.plugins: + count = len(info.plugins) + provide_line += f" {count} plugin{pluralize(count)}" + + if info.application_plugins: + if info.plugins: + provide_line += " and" + + count = len(info.application_plugins) + provide_line += ( + f" {count} application plugin{pluralize(count)}" + ) + + self.line(provide_line) + + if package.requires: + self.line("") + self.line(" Dependencies") + for dependency in package.requires: + self.line( + f" - {dependency.pretty_name}" + f" ({dependency.pretty_constraint})" + ) + + return 0 diff --git a/conda_lock/_vendor/poetry/console/commands/self/update.py b/conda_lock/_vendor/poetry/console/commands/self/update.py new file mode 100644 index 00000000..1b9d311d --- /dev/null +++ b/conda_lock/_vendor/poetry/console/commands/self/update.py @@ -0,0 +1,58 @@ +from __future__ import annotations + +from conda_lock._vendor.cleo.helpers import argument +from conda_lock._vendor.cleo.helpers import option +from conda_lock._vendor.cleo.io.inputs.string_input import StringInput +from conda_lock._vendor.cleo.io.io import IO + +from conda_lock._vendor.poetry.console.commands.add import AddCommand +from conda_lock._vendor.poetry.console.commands.self.self_command import SelfCommand + + +class SelfUpdateCommand(SelfCommand): + name = "self update" + description = "Updates Poetry to the latest version." + + arguments = [ + argument( + "version", "The version to update to.", optional=True, default="latest" + ) + ] + options = [ + option("preview", None, "Allow the installation of pre-release versions."), + option( + "dry-run", + None, + "Output the operations but do not execute anything " + "(implicitly enables --verbose).", + ), + ] + help = """\ +The self update command updates Poetry version in its current runtime \ +environment. +""" + + def _system_project_handle(self) -> int: + self.write("Updating Poetry version ...\n\n") + application = self.get_application() + add_command = application.find("add") + assert isinstance(add_command, AddCommand) + add_command.set_env(self.env) + application.configure_installer_for_command(add_command, self.io) + + argv = ["add", f"poetry@{self.argument('version')}"] + + if self.option("dry-run"): + argv.append("--dry-run") + + if self.option("preview"): + argv.append("--allow-prereleases") + + exit_code: int = add_command.run( + IO( + StringInput(" ".join(argv)), + self.io.output, + self.io.error_output, + ) + ) + return exit_code diff --git a/conda_lock/_vendor/poetry/console/commands/shell.py b/conda_lock/_vendor/poetry/console/commands/shell.py new file mode 100644 index 00000000..2eaacbcf --- /dev/null +++ b/conda_lock/_vendor/poetry/console/commands/shell.py @@ -0,0 +1,57 @@ +from __future__ import annotations + +import os +import sys + +from typing import TYPE_CHECKING +from typing import cast + +from conda_lock._vendor.poetry.console.commands.env_command import EnvCommand + + +if TYPE_CHECKING: + from conda_lock._vendor.poetry.utils.env import VirtualEnv + + +class ShellCommand(EnvCommand): + name = "shell" + description = "Spawns a shell within the virtual environment." + + help = f"""The shell command spawns a shell within the project's virtual environment. + +By default, the current active shell is detected and used. Failing that, +the shell defined via the environment variable {'COMSPEC' if os.name == 'nt' else 'SHELL'} is used. + +If a virtual environment does not exist, it will be created. +""" + + def handle(self) -> int: + from conda_lock._vendor.poetry.utils.shell import Shell + + # Check if it's already activated or doesn't exist and won't be created + if self._is_venv_activated(): + self.line( + f"Virtual environment already activated: {self.env.path}" + ) + + return 0 + + self.line(f"Spawning shell within {self.env.path}") + + # Be sure that we have the right type of environment. + env = self.env + assert env.is_venv() + env = cast("VirtualEnv", env) + + # Setting this to avoid spawning unnecessary nested shells + os.environ["POETRY_ACTIVE"] = "1" + shell = Shell.get() + shell.activate(env) + os.environ.pop("POETRY_ACTIVE") + + return 0 + + def _is_venv_activated(self) -> bool: + return bool(os.environ.get("POETRY_ACTIVE")) or getattr( + sys, "real_prefix", sys.prefix + ) == str(self.env.path) diff --git a/conda_lock/_vendor/poetry/console/commands/show.py b/conda_lock/_vendor/poetry/console/commands/show.py new file mode 100644 index 00000000..708163db --- /dev/null +++ b/conda_lock/_vendor/poetry/console/commands/show.py @@ -0,0 +1,584 @@ +from __future__ import annotations + +from typing import TYPE_CHECKING + +from conda_lock._vendor.cleo.helpers import argument +from conda_lock._vendor.cleo.helpers import option +from packaging.utils import canonicalize_name + +from conda_lock._vendor.poetry.console.commands.env_command import EnvCommand +from conda_lock._vendor.poetry.console.commands.group_command import GroupCommand + + +if TYPE_CHECKING: + from conda_lock._vendor.cleo.io.io import IO + from conda_lock._vendor.cleo.ui.table import Rows + from packaging.utils import NormalizedName + from conda_lock._vendor.poetry.core.packages.dependency import Dependency + from conda_lock._vendor.poetry.core.packages.package import Package + from conda_lock._vendor.poetry.core.packages.project_package import ProjectPackage + + from conda_lock._vendor.poetry.repositories.repository import Repository + + +def reverse_deps(pkg: Package, repo: Repository) -> dict[str, str]: + required_by = {} + for locked in repo.packages: + dependencies = {d.name: d.pretty_constraint for d in locked.requires} + + if pkg.name in dependencies: + required_by[locked.pretty_name] = dependencies[pkg.name] + + return required_by + + +class ShowCommand(GroupCommand, EnvCommand): + name = "show" + description = "Shows information about packages." + + arguments = [argument("package", "The package to inspect", optional=True)] + options = [ + *GroupCommand._group_dependency_options(), + option( + "no-dev", + None, + "Do not list the development dependencies. (Deprecated)", + ), + option("tree", "t", "List the dependencies as a tree."), + option( + "why", + None, + "When showing the full list, or a --tree for a single package," + " display whether they are a direct dependency or required by other" + " packages", + ), + option("latest", "l", "Show the latest version."), + option( + "outdated", + "o", + "Show the latest version but only for packages that are outdated.", + ), + option( + "all", + "a", + "Show all packages (even those not compatible with current system).", + ), + option("top-level", "T", "Show only top-level dependencies."), + ] + + help = """The show command displays detailed information about a package, or +lists all packages available.""" + + colors = ["cyan", "yellow", "green", "magenta", "blue"] + + def handle(self) -> int: + package = self.argument("package") + + if self.option("tree"): + self.init_styles(self.io) + + if self.option("top-level"): + if self.option("tree"): + self.line_error( + "Error: Cannot use --tree and --top-level at the same" + " time." + ) + return 1 + if package is not None: + self.line_error( + "Error: Cannot use --top-level when displaying a single" + " package." + ) + return 1 + + if self.option("why"): + if self.option("tree") and package is None: + self.line_error( + "Error: --why requires a package when combined with" + " --tree." + ) + + return 1 + + if not self.option("tree") and package: + self.line_error( + "Error: --why cannot be used without --tree when displaying" + " a single package." + ) + + return 1 + + if self.option("outdated"): + self.io.input.set_option("latest", True) + + if not self.poetry.locker.is_locked(): + self.line_error( + "Error: poetry.lock not found. Run `poetry lock` to create" + " it." + ) + return 1 + + locked_repo = self.poetry.locker.locked_repository() + + if package: + return self._display_single_package_information(package, locked_repo) + + root = self.project_with_activated_groups_only() + + # Show tree view if requested + if self.option("tree"): + return self._display_packages_tree_information(locked_repo, root) + + return self._display_packages_information(locked_repo, root) + + def _display_single_package_information( + self, package: str, locked_repository: Repository + ) -> int: + locked_packages = locked_repository.packages + canonicalized_package = canonicalize_name(package) + pkg = None + + for locked in locked_packages: + if locked.name == canonicalized_package: + pkg = locked + break + + if not pkg: + raise ValueError(f"Package {package} not found") + + required_by = reverse_deps(pkg, locked_repository) + + if self.option("tree"): + if self.option("why"): + # The default case if there's no reverse dependencies is to query + # the subtree for pkg but if any rev-deps exist we'll query for each + # of them in turn + packages = [pkg] + if required_by: + packages = [ + p for p in locked_packages for r in required_by if p.name == r + ] + else: + # if no rev-deps exist we'll make this clear as it can otherwise + # look very odd for packages that also have no or few direct + # dependencies + self.io.write_line(f"Package {package} is a direct dependency.") + + for p in packages: + self.display_package_tree( + self.io, p, locked_packages, why_package=pkg + ) + + else: + self.display_package_tree(self.io, pkg, locked_packages) + + return 0 + + rows: Rows = [ + ["name", f" : {pkg.pretty_name}"], + ["version", f" : {pkg.pretty_version}"], + ["description", f" : {pkg.description}"], + ] + + self.table(rows=rows, style="compact").render() + + if pkg.requires: + self.line("") + self.line("dependencies") + for dependency in pkg.requires: + self.line( + f" - {dependency.pretty_name}" + f" {dependency.pretty_constraint}" + ) + + if required_by: + self.line("") + self.line("required by") + for parent, requires_version in required_by.items(): + self.line(f" - {parent} {requires_version}") + + return 0 + + def _display_packages_information( + self, locked_repository: Repository, root: ProjectPackage + ) -> int: + import shutil + + from conda_lock._vendor.cleo.io.null_io import NullIO + + from conda_lock._vendor.poetry.puzzle.solver import Solver + from conda_lock._vendor.poetry.repositories.installed_repository import InstalledRepository + from conda_lock._vendor.poetry.repositories.repository_pool import RepositoryPool + from conda_lock._vendor.poetry.utils.helpers import get_package_version_display_string + + locked_packages = locked_repository.packages + pool = RepositoryPool.from_packages(locked_packages, self.poetry.config) + solver = Solver( + root, + pool=pool, + installed=[], + locked=locked_packages, + io=NullIO(), + ) + solver.provider.load_deferred(False) + with solver.use_environment(self.env): + ops = solver.solve().calculate_operations() + + required_locked_packages = {op.package for op in ops if not op.skipped} + + show_latest = self.option("latest") + show_all = self.option("all") + show_top_level = self.option("top-level") + width = shutil.get_terminal_size().columns + name_length = version_length = latest_length = required_by_length = 0 + latest_packages = {} + latest_statuses = {} + installed_repo = InstalledRepository.load(self.env) + + # Computing widths + for locked in locked_packages: + if locked not in required_locked_packages and not show_all: + continue + + current_length = len(locked.pretty_name) + if not self.io.output.is_decorated(): + installed_status = self.get_installed_status( + locked, installed_repo.packages + ) + + if installed_status == "not-installed": + current_length += 4 + + if show_latest: + latest = self.find_latest_package(locked, root) + if not latest: + latest = locked + + latest_packages[locked.pretty_name] = latest + update_status = latest_statuses[locked.pretty_name] = ( + self.get_update_status(latest, locked) + ) + + if not self.option("outdated") or update_status != "up-to-date": + name_length = max(name_length, current_length) + version_length = max( + version_length, + len( + get_package_version_display_string( + locked, root=self.poetry.file.path.parent + ) + ), + ) + latest_length = max( + latest_length, + len( + get_package_version_display_string( + latest, root=self.poetry.file.path.parent + ) + ), + ) + + if self.option("why"): + required_by = reverse_deps(locked, locked_repository) + required_by_length = max( + required_by_length, + len(" from " + ",".join(required_by.keys())), + ) + else: + name_length = max(name_length, current_length) + version_length = max( + version_length, + len( + get_package_version_display_string( + locked, root=self.poetry.file.path.parent + ) + ), + ) + + if self.option("why"): + required_by = reverse_deps(locked, locked_repository) + required_by_length = max( + required_by_length, len(" from " + ",".join(required_by.keys())) + ) + + write_version = name_length + version_length + 3 <= width + write_latest = name_length + version_length + latest_length + 3 <= width + + why_end_column = ( + name_length + version_length + latest_length + required_by_length + ) + write_why = self.option("why") and (why_end_column + 3) <= width + write_description = (why_end_column + 24) <= width + + requires = root.all_requires + + for locked in locked_packages: + color = "cyan" + name = locked.pretty_name + install_marker = "" + + if show_top_level and not any(locked.satisfies(r) for r in requires): + continue + + if locked not in required_locked_packages: + if not show_all: + continue + + color = "black;options=bold" + else: + installed_status = self.get_installed_status( + locked, installed_repo.packages + ) + if installed_status == "not-installed": + color = "red" + + if not self.io.output.is_decorated(): + # Non installed in non decorated mode + install_marker = " (!)" + + if ( + show_latest + and self.option("outdated") + and latest_statuses[locked.pretty_name] == "up-to-date" + ): + continue + + line = ( + f"" + f"{name:{name_length - len(install_marker)}}{install_marker}" + ) + if write_version: + version = get_package_version_display_string( + locked, root=self.poetry.file.path.parent + ) + line += f" {version:{version_length}}" + if show_latest: + latest = latest_packages[locked.pretty_name] + update_status = latest_statuses[locked.pretty_name] + + if write_latest: + color = "green" + if update_status == "semver-safe-update": + color = "red" + elif update_status == "update-possible": + color = "yellow" + + version = get_package_version_display_string( + latest, root=self.poetry.file.path.parent + ) + line += f" {version:{latest_length}}" + + if write_why: + required_by = reverse_deps(locked, locked_repository) + if required_by: + content = ",".join(required_by.keys()) + # subtract 6 for ' from ' + line += f" from {content:{required_by_length - 6}}" + else: + line += " " * required_by_length + + if write_description: + description = locked.description + remaining = ( + width - name_length - version_length - required_by_length - 4 + ) + + if show_latest: + remaining -= latest_length + + if len(locked.description) > remaining: + description = description[: remaining - 3] + "..." + + line += " " + description + + self.line(line) + + return 0 + + def _display_packages_tree_information( + self, locked_repository: Repository, root: ProjectPackage + ) -> int: + packages = locked_repository.packages + + for p in packages: + for require in root.all_requires: + if p.name == require.name: + self.display_package_tree(self.io, p, packages) + break + + return 0 + + def display_package_tree( + self, + io: IO, + package: Package, + installed_packages: list[Package], + why_package: Package | None = None, + ) -> None: + io.write(f"{package.pretty_name}") + description = "" + if package.description: + description = " " + package.description + + io.write_line(f" {package.pretty_version}{description}") + + if why_package is not None: + dependencies = [p for p in package.requires if p.name == why_package.name] + else: + dependencies = package.requires + dependencies = sorted( + dependencies, + key=lambda x: x.name, + ) + + tree_bar = "├" + total = len(dependencies) + for i, dependency in enumerate(dependencies, 1): + if i == total: + tree_bar = "└" + + level = 1 + color = self.colors[level] + info = ( + f"{tree_bar}── <{color}>{dependency.name}" + f" {dependency.pretty_constraint}" + ) + self._write_tree_line(io, info) + + tree_bar = tree_bar.replace("└", " ") + packages_in_tree = [package.name, dependency.name] + + self._display_tree( + io, + dependency, + installed_packages, + packages_in_tree, + tree_bar, + level + 1, + ) + + def _display_tree( + self, + io: IO, + dependency: Dependency, + installed_packages: list[Package], + packages_in_tree: list[NormalizedName], + previous_tree_bar: str = "├", + level: int = 1, + ) -> None: + previous_tree_bar = previous_tree_bar.replace("├", "│") + + dependencies = [] + for package in installed_packages: + if package.name == dependency.name: + dependencies = package.requires + + break + + dependencies = sorted( + dependencies, + key=lambda x: x.name, + ) + tree_bar = previous_tree_bar + " ├" + total = len(dependencies) + for i, dependency in enumerate(dependencies, 1): + current_tree = packages_in_tree + if i == total: + tree_bar = previous_tree_bar + " └" + + color_ident = level % len(self.colors) + color = self.colors[color_ident] + + circular_warn = "" + if dependency.name in current_tree: + circular_warn = "(circular dependency aborted here)" + + info = ( + f"{tree_bar}── <{color}>{dependency.name}" + f" {dependency.pretty_constraint} {circular_warn}" + ) + self._write_tree_line(io, info) + + tree_bar = tree_bar.replace("└", " ") + + if dependency.name not in current_tree: + current_tree.append(dependency.name) + + self._display_tree( + io, + dependency, + installed_packages, + current_tree, + tree_bar, + level + 1, + ) + + def _write_tree_line(self, io: IO, line: str) -> None: + if not io.output.supports_utf8(): + line = line.replace("└", "`-") + line = line.replace("├", "|-") + line = line.replace("──", "-") + line = line.replace("│", "|") + + io.write_line(line) + + def init_styles(self, io: IO) -> None: + from conda_lock._vendor.cleo.formatters.style import Style + + for color in self.colors: + style = Style(color) + io.output.formatter.set_style(color, style) + io.error_output.formatter.set_style(color, style) + + def find_latest_package( + self, package: Package, root: ProjectPackage + ) -> Package | None: + from conda_lock._vendor.cleo.io.null_io import NullIO + + from conda_lock._vendor.poetry.puzzle.provider import Provider + from conda_lock._vendor.poetry.version.version_selector import VersionSelector + + # find the latest version allowed in this pool + requires = root.all_requires + if package.is_direct_origin(): + for dep in requires: + if dep.name == package.name and dep.source_type == package.source_type: + provider = Provider(root, self.poetry.pool, NullIO()) + return provider.search_for_direct_origin_dependency(dep) + + allow_prereleases = False + for dep in requires: + if dep.name == package.name: + allow_prereleases = dep.allows_prereleases() + break + + name = package.name + selector = VersionSelector(self.poetry.pool) + + return selector.find_best_candidate( + name, f">={package.pretty_version}", allow_prereleases + ) + + def get_update_status(self, latest: Package, package: Package) -> str: + from conda_lock._vendor.poetry.core.constraints.version import parse_constraint + + if latest.full_pretty_version == package.full_pretty_version: + return "up-to-date" + + constraint = parse_constraint("^" + package.pretty_version) + + if constraint.allows(latest.version): + # It needs an immediate semver-compliant upgrade + return "semver-safe-update" + + # it needs an upgrade but has potential BC breaks so is not urgent + return "update-possible" + + def get_installed_status( + self, locked: Package, installed_packages: list[Package] + ) -> str: + for package in installed_packages: + if locked.name == package.name: + return "installed" + + return "not-installed" diff --git a/conda_lock/_vendor/poetry/console/commands/source/__init__.py b/conda_lock/_vendor/poetry/console/commands/source/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/conda_lock/_vendor/poetry/console/commands/source/add.py b/conda_lock/_vendor/poetry/console/commands/source/add.py new file mode 100644 index 00000000..5aceac9e --- /dev/null +++ b/conda_lock/_vendor/poetry/console/commands/source/add.py @@ -0,0 +1,163 @@ +from __future__ import annotations + +from conda_lock._vendor.cleo.helpers import argument +from conda_lock._vendor.cleo.helpers import option +from conda_lock._vendor.cleo.io.null_io import NullIO +from tomlkit.items import AoT + +from conda_lock._vendor.poetry.config.source import Source +from conda_lock._vendor.poetry.console.commands.command import Command +from conda_lock._vendor.poetry.repositories.repository_pool import Priority + + +class SourceAddCommand(Command): + name = "source add" + description = "Add source configuration for project." + + arguments = [ + argument( + "name", + "Source repository name.", + ), + argument( + "url", + "Source repository URL." + " Required, except for PyPI, for which it is not allowed.", + optional=True, + ), + ] + + options = [ + option( + "default", + "d", + "Set this source as the default (disable PyPI). A " + "default source will also be the fallback source if " + "you add other sources. (Deprecated, use --priority)", + ), + option( + "secondary", + "s", + "Set this source as secondary. (Deprecated, use" + " --priority)", + ), + option( + "priority", + "p", + "Set the priority of this source. One of:" + f" {', '.join(p.name.lower() for p in Priority)}. Defaults to" + f" {Priority.PRIMARY.name.lower()}.", + flag=False, + ), + ] + + def handle(self) -> int: + from conda_lock._vendor.poetry.factory import Factory + from conda_lock._vendor.poetry.utils.source import source_to_table + + name: str = self.argument("name") + lower_name = name.lower() + url: str = self.argument("url") + is_default: bool = self.option("default", False) + is_secondary: bool = self.option("secondary", False) + priority_str: str | None = self.option("priority", None) + + if lower_name == "pypi": + name = "PyPI" + if url: + self.line_error( + "The URL of PyPI is fixed and cannot be set." + ) + return 1 + elif not url: + self.line_error( + "A custom source cannot be added without a URL." + ) + return 1 + + if is_default and is_secondary: + self.line_error( + "Cannot configure a source as both default and" + " secondary." + ) + return 1 + + if is_default or is_secondary: + if priority_str is not None: + self.line_error( + "Priority was passed through both --priority and a" + " deprecated flag (--default or --secondary). Please only provide" + " one of these." + ) + return 1 + else: + self.line_error( + "Warning: Priority was set through a deprecated flag" + " (--default or --secondary). Consider using --priority next" + " time." + ) + + if is_default: + priority = Priority.DEFAULT + elif is_secondary: + priority = Priority.SECONDARY + elif priority_str is None: + priority = Priority.PRIMARY + else: + priority = Priority[priority_str.upper()] + + if priority is Priority.SECONDARY: + allowed_prios = ( + p for p in Priority if p not in {Priority.DEFAULT, Priority.SECONDARY} + ) + self.line_error( + "Warning: Priority 'secondary' is deprecated. Consider" + " changing the priority to one of the non-deprecated values:" + f" {', '.join(repr(p.name.lower()) for p in allowed_prios)}." + ) + if priority is Priority.DEFAULT: + self.line_error( + "Warning: Priority 'default' is deprecated. You can achieve" + " the same effect by changing the priority to 'primary' and putting" + " the source first." + ) + + sources = AoT([]) + new_source = Source(name=name, url=url, priority=priority) + is_new_source = True + + for source in self.poetry.get_sources(): + if source.priority is Priority.DEFAULT and priority is Priority.DEFAULT: + self.line_error( + f"Source with name {source.name} is already set to" + " default. Only one default source can be configured at a" + " time." + ) + return 1 + + if source.name.lower() == lower_name: + source = new_source + is_new_source = False + + sources.append(source_to_table(source)) + + if is_new_source: + self.line(f"Adding source with name {name}.") + sources.append(source_to_table(new_source)) + else: + self.line(f"Source with name {name} already exists. Updating.") + + # ensure new source is valid. eg: invalid name etc. + try: + pool = Factory.create_pool(self.poetry.config, sources, NullIO()) + pool.repository(name) + except ValueError as e: + self.line_error( + f"Failed to validate addition of {name}: {e}" + ) + return 1 + + self.poetry.pyproject.poetry_config["source"] = sources + self.poetry.pyproject.save() + + return 0 diff --git a/conda_lock/_vendor/poetry/console/commands/source/remove.py b/conda_lock/_vendor/poetry/console/commands/source/remove.py new file mode 100644 index 00000000..fcf246ab --- /dev/null +++ b/conda_lock/_vendor/poetry/console/commands/source/remove.py @@ -0,0 +1,45 @@ +from __future__ import annotations + +from conda_lock._vendor.cleo.helpers import argument +from tomlkit.items import AoT + +from conda_lock._vendor.poetry.console.commands.command import Command + + +class SourceRemoveCommand(Command): + name = "source remove" + description = "Remove source configured for the project." + + arguments = [ + argument( + "name", + "Source repository name.", + ), + ] + + def handle(self) -> int: + from conda_lock._vendor.poetry.utils.source import source_to_table + + name = self.argument("name") + lower_name = name.lower() + + sources = AoT([]) + removed = False + + for source in self.poetry.get_sources(): + if source.name.lower() == lower_name: + self.line(f"Removing source with name {source.name}.") + removed = True + continue + sources.append(source_to_table(source)) + + if not removed: + self.line_error( + f"Source with name {name} was not found." + ) + return 1 + + self.poetry.pyproject.poetry_config["source"] = sources + self.poetry.pyproject.save() + + return 0 diff --git a/conda_lock/_vendor/poetry/console/commands/source/show.py b/conda_lock/_vendor/poetry/console/commands/source/show.py new file mode 100644 index 00000000..baa97857 --- /dev/null +++ b/conda_lock/_vendor/poetry/console/commands/source/show.py @@ -0,0 +1,56 @@ +from __future__ import annotations + +from typing import TYPE_CHECKING + +from conda_lock._vendor.cleo.helpers import argument + +from conda_lock._vendor.poetry.console.commands.command import Command + + +if TYPE_CHECKING: + from conda_lock._vendor.cleo.ui.table import Rows + + +class SourceShowCommand(Command): + name = "source show" + description = "Show information about sources configured for the project." + + arguments = [ + argument( + "source", + "Source(s) to show information for. Defaults to showing all sources.", + optional=True, + multiple=True, + ), + ] + + def handle(self) -> int: + sources = self.poetry.get_sources() + names = self.argument("source") + lower_names = [name.lower() for name in names] + + if not sources: + self.line("No sources configured for this project.") + return 0 + + if names and not any(s.name.lower() in lower_names for s in sources): + self.line_error( + f"No source found with name(s): {', '.join(names)}", + style="error", + ) + return 1 + + for source in sources: + if names and source.name.lower() not in lower_names: + continue + + table = self.table(style="compact") + rows: Rows = [["name", f" : {source.name}"]] + if source.url: + rows.append(["url", f" : {source.url}"]) + rows.append(["priority", f" : {source.priority.name.lower()}"]) + table.add_rows(rows) + table.render() + self.line("") + + return 0 diff --git a/conda_lock/_vendor/poetry/console/commands/update.py b/conda_lock/_vendor/poetry/console/commands/update.py new file mode 100644 index 00000000..c0dadc32 --- /dev/null +++ b/conda_lock/_vendor/poetry/console/commands/update.py @@ -0,0 +1,56 @@ +from __future__ import annotations + +from conda_lock._vendor.cleo.helpers import argument +from conda_lock._vendor.cleo.helpers import option + +from conda_lock._vendor.poetry.console.commands.installer_command import InstallerCommand + + +class UpdateCommand(InstallerCommand): + name = "update" + description = ( + "Update the dependencies as according to the pyproject.toml file." + ) + + arguments = [ + argument("packages", "The packages to update", optional=True, multiple=True) + ] + options = [ + *InstallerCommand._group_dependency_options(), + option( + "no-dev", + None, + "Do not update the development dependencies." + " (Deprecated)", + ), + option( + "sync", + None, + "Synchronize the environment with the locked packages and the specified" + " groups.", + ), + option( + "dry-run", + None, + "Output the operations but do not execute anything " + "(implicitly enables --verbose).", + ), + option("lock", None, "Do not perform operations (only update the lockfile)."), + ] + + loggers = ["poetry.repositories.pypi_repository"] + + def handle(self) -> int: + packages = self.argument("packages") + if packages: + self.installer.whitelist({name: "*" for name in packages}) + + self.installer.only_groups(self.activated_groups) + self.installer.dry_run(self.option("dry-run")) + self.installer.requires_synchronization(self.option("sync")) + self.installer.execute_operations(not self.option("lock")) + + # Force update + self.installer.update(True) + + return self.installer.run() diff --git a/conda_lock/_vendor/poetry/console/commands/version.py b/conda_lock/_vendor/poetry/console/commands/version.py new file mode 100644 index 00000000..c2f8714f --- /dev/null +++ b/conda_lock/_vendor/poetry/console/commands/version.py @@ -0,0 +1,128 @@ +from __future__ import annotations + +from typing import TYPE_CHECKING +from typing import Any + +from conda_lock._vendor.cleo.helpers import argument +from conda_lock._vendor.cleo.helpers import option +from conda_lock._vendor.poetry.core.version.exceptions import InvalidVersion +from tomlkit.toml_document import TOMLDocument + +from conda_lock._vendor.poetry.console.commands.command import Command + + +if TYPE_CHECKING: + from conda_lock._vendor.poetry.core.constraints.version import Version + + +class VersionCommand(Command): + name = "version" + description = ( + "Shows the version of the project or bumps it when a valid " + "bump rule is provided." + ) + + arguments = [ + argument( + "version", + "The version number or the rule to update the version.", + optional=True, + ), + ] + options = [ + option("short", "s", "Output the version number only"), + option( + "dry-run", + None, + "Do not update pyproject.toml file", + ), + option("next-phase", None, "Increment the phase of the current version"), + ] + + help = """\ +The version command shows the current version of the project or bumps the version of +the project and writes the new version back to pyproject.toml if a valid +bump rule is provided. + +The new version should ideally be a valid semver string or a valid bump rule: +patch, minor, major, prepatch, preminor, premajor, prerelease. +""" + + RESERVED = { + "major", + "minor", + "patch", + "premajor", + "preminor", + "prepatch", + "prerelease", + } + + def handle(self) -> int: + version = self.argument("version") + + if version: + version = self.increment_version( + self.poetry.package.pretty_version, version, self.option("next-phase") + ) + + if self.option("short"): + self.line(version.to_string()) + else: + self.line( + f"Bumping version from {self.poetry.package.pretty_version}" + f" to {version}" + ) + + if not self.option("dry-run"): + content: dict[str, Any] = self.poetry.file.read() + poetry_content = content["tool"]["poetry"] + poetry_content["version"] = version.text + + assert isinstance(content, TOMLDocument) + self.poetry.file.write(content) + else: + if self.option("short"): + self.line(self.poetry.package.pretty_version) + else: + self.line( + f"{self.poetry.package.pretty_name}" + f" {self.poetry.package.pretty_version}" + ) + + return 0 + + def increment_version( + self, version: str, rule: str, next_phase: bool = False + ) -> Version: + from conda_lock._vendor.poetry.core.constraints.version import Version + + try: + parsed = Version.parse(version) + except InvalidVersion: + raise ValueError("The project's version doesn't seem to follow semver") + + if rule in {"major", "premajor"}: + new = parsed.next_major() + if rule == "premajor": + new = new.first_prerelease() + elif rule in {"minor", "preminor"}: + new = parsed.next_minor() + if rule == "preminor": + new = new.first_prerelease() + elif rule in {"patch", "prepatch"}: + new = parsed.next_patch() + if rule == "prepatch": + new = new.first_prerelease() + elif rule == "prerelease": + if parsed.is_unstable(): + pre = parsed.pre + assert pre is not None + pre = pre.next_phase() if next_phase else pre.next() + new = Version(parsed.epoch, parsed.release, pre) + else: + new = parsed.next_patch().first_prerelease() + else: + new = Version.parse(rule) + + return new diff --git a/conda_lock/_vendor/poetry/console/events/__init__.py b/conda_lock/_vendor/poetry/console/events/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/conda_lock/_vendor/poetry/console/events/console_events.py b/conda_lock/_vendor/poetry/console/events/console_events.py new file mode 100644 index 00000000..e69de29b diff --git a/conda_lock/_vendor/poetry/console/exceptions.py b/conda_lock/_vendor/poetry/console/exceptions.py new file mode 100644 index 00000000..d33020e4 --- /dev/null +++ b/conda_lock/_vendor/poetry/console/exceptions.py @@ -0,0 +1,11 @@ +from __future__ import annotations + +from conda_lock._vendor.cleo.exceptions import CleoError + + +class PoetryConsoleError(CleoError): + pass + + +class GroupNotFound(PoetryConsoleError): + pass diff --git a/conda_lock/_vendor/poetry/console/io/__init__.py b/conda_lock/_vendor/poetry/console/io/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/conda_lock/_vendor/poetry/console/io/inputs/__init__.py b/conda_lock/_vendor/poetry/console/io/inputs/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/conda_lock/_vendor/poetry/console/io/inputs/run_argv_input.py b/conda_lock/_vendor/poetry/console/io/inputs/run_argv_input.py new file mode 100644 index 00000000..25a3fccf --- /dev/null +++ b/conda_lock/_vendor/poetry/console/io/inputs/run_argv_input.py @@ -0,0 +1,83 @@ +from __future__ import annotations + +from typing import TYPE_CHECKING + +from conda_lock._vendor.cleo.io.inputs.argv_input import ArgvInput + + +if TYPE_CHECKING: + from conda_lock._vendor.cleo.io.inputs.definition import Definition + + +class RunArgvInput(ArgvInput): + def __init__( + self, + argv: list[str] | None = None, + definition: Definition | None = None, + ) -> None: + super().__init__(argv, definition=definition) + + self._parameter_options: list[str] = [] + + @property + def first_argument(self) -> str | None: + return "run" + + def add_parameter_option(self, name: str) -> None: + self._parameter_options.append(name) + + def has_parameter_option( + self, values: str | list[str], only_params: bool = False + ) -> bool: + if not isinstance(values, list): + values = [values] + + for token in self._tokens: + if only_params and token == "--": + return False + + for value in values: + if value not in self._parameter_options: + continue + + # Options with values: + # For long options, test for '--option=' at beginning + # For short options, test for '-o' at beginning + leading = value + "=" if value.startswith("--") else value + + if token == value or leading != "" and token.startswith(leading): + return True + + return False + + def _parse(self) -> None: + parse_options = True + self._parsed = self._tokens[:] + + try: + token = self._parsed.pop(0) + except IndexError: + token = None + + while token is not None: + if parse_options and token == "": + self._parse_argument(token) + elif parse_options and token == "--": + parse_options = False + elif parse_options and token.find("--") == 0: + if token in self._parameter_options: + self._parse_long_option(token) + else: + self._parse_argument(token) + elif parse_options and token[0] == "-" and token != "-": + if token in self._parameter_options: + self._parse_short_option(token) + else: + self._parse_argument(token) + else: + self._parse_argument(token) + + try: + token = self._parsed.pop(0) + except IndexError: + token = None diff --git a/conda_lock/_vendor/poetry/console/logging/__init__.py b/conda_lock/_vendor/poetry/console/logging/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/conda_lock/_vendor/poetry/console/logging/filters.py b/conda_lock/_vendor/poetry/console/logging/filters.py new file mode 100644 index 00000000..3ae42b5e --- /dev/null +++ b/conda_lock/_vendor/poetry/console/logging/filters.py @@ -0,0 +1,6 @@ +from __future__ import annotations + +import logging + + +POETRY_FILTER = logging.Filter(name="poetry") diff --git a/conda_lock/_vendor/poetry/console/logging/formatters/__init__.py b/conda_lock/_vendor/poetry/console/logging/formatters/__init__.py new file mode 100644 index 00000000..7b9bb1ce --- /dev/null +++ b/conda_lock/_vendor/poetry/console/logging/formatters/__init__.py @@ -0,0 +1,10 @@ +from __future__ import annotations + +from conda_lock._vendor.poetry.console.logging.formatters.builder_formatter import BuilderLogFormatter + + +FORMATTERS = { + "poetry.core.masonry.builders.builder": BuilderLogFormatter(), + "poetry.core.masonry.builders.sdist": BuilderLogFormatter(), + "poetry.core.masonry.builders.wheel": BuilderLogFormatter(), +} diff --git a/conda_lock/_vendor/poetry/console/logging/formatters/builder_formatter.py b/conda_lock/_vendor/poetry/console/logging/formatters/builder_formatter.py new file mode 100644 index 00000000..52dc64e0 --- /dev/null +++ b/conda_lock/_vendor/poetry/console/logging/formatters/builder_formatter.py @@ -0,0 +1,23 @@ +from __future__ import annotations + +import re + +from conda_lock._vendor.poetry.console.logging.formatters.formatter import Formatter + + +class BuilderLogFormatter(Formatter): + def format(self, msg: str) -> str: + if msg.startswith("Building "): + msg = re.sub("Building (.+)", " - Building \\1", msg) + elif msg.startswith("Built "): + msg = re.sub("Built (.+)", " - Built \\1", msg) + elif msg.startswith("Adding: "): + msg = re.sub("Adding: (.+)", " - Adding: \\1", msg) + elif msg.startswith("Executing build script: "): + msg = re.sub( + "Executing build script: (.+)", + " - Executing build script: \\1", + msg, + ) + + return msg diff --git a/conda_lock/_vendor/poetry/console/logging/formatters/formatter.py b/conda_lock/_vendor/poetry/console/logging/formatters/formatter.py new file mode 100644 index 00000000..8b595137 --- /dev/null +++ b/conda_lock/_vendor/poetry/console/logging/formatters/formatter.py @@ -0,0 +1,6 @@ +from __future__ import annotations + + +class Formatter: + def format(self, record: str) -> str: + raise NotImplementedError() diff --git a/conda_lock/_vendor/poetry/console/logging/io_formatter.py b/conda_lock/_vendor/poetry/console/logging/io_formatter.py new file mode 100644 index 00000000..a121f863 --- /dev/null +++ b/conda_lock/_vendor/poetry/console/logging/io_formatter.py @@ -0,0 +1,71 @@ +from __future__ import annotations + +import logging +import sys +import textwrap + +from pathlib import Path +from typing import TYPE_CHECKING + +from conda_lock._vendor.poetry.console.logging.filters import POETRY_FILTER +from conda_lock._vendor.poetry.console.logging.formatters import FORMATTERS + + +if TYPE_CHECKING: + from logging import LogRecord + + +class IOFormatter(logging.Formatter): + _colors = { + "error": "fg=red", + "warning": "fg=yellow", + "debug": "debug", + "info": "fg=blue", + } + + def format(self, record: LogRecord) -> str: + if not record.exc_info: + level = record.levelname.lower() + msg = record.msg + + if record.name in FORMATTERS: + msg = FORMATTERS[record.name].format(msg) + elif level in self._colors: + msg = f"<{self._colors[level]}>{msg}" + + record.msg = msg + + formatted = super().format(record) + + if not POETRY_FILTER.filter(record): + # prefix all lines from third-party packages for easier debugging + formatted = textwrap.indent( + formatted, f"[{_log_prefix(record)}] ", lambda line: True + ) + + return formatted + + +def _log_prefix(record: LogRecord) -> str: + prefix = _path_to_package(Path(record.pathname)) or record.module + if record.name != "root": + prefix = ":".join([prefix, record.name]) + return prefix + + +def _path_to_package(path: Path) -> str | None: + """Return main package name from the LogRecord.pathname.""" + prefix: Path | None = None + # Find the most specific prefix in sys.path. + # We have to search the entire sys.path because a subsequent path might be + # a sub path of the first match and thereby a better match. + for syspath in sys.path: + if ( + prefix and prefix in (p := Path(syspath)).parents and p in path.parents + ) or (not prefix and (p := Path(syspath)) in path.parents): + prefix = p + if not prefix: + # this is unexpected, but let's play it safe + return None + path = path.relative_to(prefix) + return path.parts[0] # main package name diff --git a/conda_lock/_vendor/poetry/console/logging/io_handler.py b/conda_lock/_vendor/poetry/console/logging/io_handler.py new file mode 100644 index 00000000..a89176b1 --- /dev/null +++ b/conda_lock/_vendor/poetry/console/logging/io_handler.py @@ -0,0 +1,30 @@ +from __future__ import annotations + +import logging + +from typing import TYPE_CHECKING + + +if TYPE_CHECKING: + from logging import LogRecord + + from conda_lock._vendor.cleo.io.io import IO + + +class IOHandler(logging.Handler): + def __init__(self, io: IO) -> None: + self._io = io + + super().__init__() + + def emit(self, record: LogRecord) -> None: + try: + msg = self.format(record) + level = record.levelname.lower() + err = level in ("warning", "error", "exception", "critical") + if err: + self._io.write_error_line(msg) + else: + self._io.write_line(msg) + except Exception: + self.handleError(record) diff --git a/conda_lock/_vendor/poetry/core/__init__.py b/conda_lock/_vendor/poetry/core/__init__.py index f7d95ece..8a2867f0 100644 --- a/conda_lock/_vendor/poetry/core/__init__.py +++ b/conda_lock/_vendor/poetry/core/__init__.py @@ -1,13 +1,13 @@ +from __future__ import annotations + import sys +from pathlib import Path -try: - from pathlib import Path -except ImportError: - # noinspection PyUnresolvedReferences - from pathlib2 import Path -__version__ = "1.0.8" +# this cannot presently be replaced with importlib.metadata.version as when building +# itself, poetry-core is not available as an installed distribution. +__version__ = "1.9.0" __vendor_site__ = (Path(__file__).parent / "_vendor").as_posix() diff --git a/conda_lock/_vendor/poetry/core/_vendor/_pyrsistent_version.py b/conda_lock/_vendor/poetry/core/_vendor/_pyrsistent_version.py deleted file mode 100644 index 9513287c..00000000 --- a/conda_lock/_vendor/poetry/core/_vendor/_pyrsistent_version.py +++ /dev/null @@ -1 +0,0 @@ -__version__ = '0.16.1' diff --git a/conda_lock/_vendor/poetry/core/_vendor/attr/__init__.py b/conda_lock/_vendor/poetry/core/_vendor/attr/__init__.py deleted file mode 100644 index bf329cad..00000000 --- a/conda_lock/_vendor/poetry/core/_vendor/attr/__init__.py +++ /dev/null @@ -1,76 +0,0 @@ -from __future__ import absolute_import, division, print_function - -import sys - -from functools import partial - -from . import converters, exceptions, filters, setters, validators -from ._config import get_run_validators, set_run_validators -from ._funcs import asdict, assoc, astuple, evolve, has, resolve_types -from ._make import ( - NOTHING, - Attribute, - Factory, - attrib, - attrs, - fields, - fields_dict, - make_class, - validate, -) -from ._version_info import VersionInfo - - -__version__ = "20.3.0" -__version_info__ = VersionInfo._from_version_string(__version__) - -__title__ = "attrs" -__description__ = "Classes Without Boilerplate" -__url__ = "https://www.attrs.org/" -__uri__ = __url__ -__doc__ = __description__ + " <" + __uri__ + ">" - -__author__ = "Hynek Schlawack" -__email__ = "hs@ox.cx" - -__license__ = "MIT" -__copyright__ = "Copyright (c) 2015 Hynek Schlawack" - - -s = attributes = attrs -ib = attr = attrib -dataclass = partial(attrs, auto_attribs=True) # happy Easter ;) - -__all__ = [ - "Attribute", - "Factory", - "NOTHING", - "asdict", - "assoc", - "astuple", - "attr", - "attrib", - "attributes", - "attrs", - "converters", - "evolve", - "exceptions", - "fields", - "fields_dict", - "filters", - "get_run_validators", - "has", - "ib", - "make_class", - "resolve_types", - "s", - "set_run_validators", - "setters", - "validate", - "validators", -] - -if sys.version_info[:2] >= (3, 6): - from ._next_gen import define, field, frozen, mutable - - __all__.extend((define, field, frozen, mutable)) diff --git a/conda_lock/_vendor/poetry/core/_vendor/attr/_compat.py b/conda_lock/_vendor/poetry/core/_vendor/attr/_compat.py deleted file mode 100644 index b0ead6e1..00000000 --- a/conda_lock/_vendor/poetry/core/_vendor/attr/_compat.py +++ /dev/null @@ -1,231 +0,0 @@ -from __future__ import absolute_import, division, print_function - -import platform -import sys -import types -import warnings - - -PY2 = sys.version_info[0] == 2 -PYPY = platform.python_implementation() == "PyPy" - - -if PYPY or sys.version_info[:2] >= (3, 6): - ordered_dict = dict -else: - from collections import OrderedDict - - ordered_dict = OrderedDict - - -if PY2: - from collections import Mapping, Sequence - - from UserDict import IterableUserDict - - # We 'bundle' isclass instead of using inspect as importing inspect is - # fairly expensive (order of 10-15 ms for a modern machine in 2016) - def isclass(klass): - return isinstance(klass, (type, types.ClassType)) - - # TYPE is used in exceptions, repr(int) is different on Python 2 and 3. - TYPE = "type" - - def iteritems(d): - return d.iteritems() - - # Python 2 is bereft of a read-only dict proxy, so we make one! - class ReadOnlyDict(IterableUserDict): - """ - Best-effort read-only dict wrapper. - """ - - def __setitem__(self, key, val): - # We gently pretend we're a Python 3 mappingproxy. - raise TypeError( - "'mappingproxy' object does not support item assignment" - ) - - def update(self, _): - # We gently pretend we're a Python 3 mappingproxy. - raise AttributeError( - "'mappingproxy' object has no attribute 'update'" - ) - - def __delitem__(self, _): - # We gently pretend we're a Python 3 mappingproxy. - raise TypeError( - "'mappingproxy' object does not support item deletion" - ) - - def clear(self): - # We gently pretend we're a Python 3 mappingproxy. - raise AttributeError( - "'mappingproxy' object has no attribute 'clear'" - ) - - def pop(self, key, default=None): - # We gently pretend we're a Python 3 mappingproxy. - raise AttributeError( - "'mappingproxy' object has no attribute 'pop'" - ) - - def popitem(self): - # We gently pretend we're a Python 3 mappingproxy. - raise AttributeError( - "'mappingproxy' object has no attribute 'popitem'" - ) - - def setdefault(self, key, default=None): - # We gently pretend we're a Python 3 mappingproxy. - raise AttributeError( - "'mappingproxy' object has no attribute 'setdefault'" - ) - - def __repr__(self): - # Override to be identical to the Python 3 version. - return "mappingproxy(" + repr(self.data) + ")" - - def metadata_proxy(d): - res = ReadOnlyDict() - res.data.update(d) # We blocked update, so we have to do it like this. - return res - - def just_warn(*args, **kw): # pragma: no cover - """ - We only warn on Python 3 because we are not aware of any concrete - consequences of not setting the cell on Python 2. - """ - - -else: # Python 3 and later. - from collections.abc import Mapping, Sequence # noqa - - def just_warn(*args, **kw): - """ - We only warn on Python 3 because we are not aware of any concrete - consequences of not setting the cell on Python 2. - """ - warnings.warn( - "Running interpreter doesn't sufficiently support code object " - "introspection. Some features like bare super() or accessing " - "__class__ will not work with slotted classes.", - RuntimeWarning, - stacklevel=2, - ) - - def isclass(klass): - return isinstance(klass, type) - - TYPE = "class" - - def iteritems(d): - return d.items() - - def metadata_proxy(d): - return types.MappingProxyType(dict(d)) - - -def make_set_closure_cell(): - """Return a function of two arguments (cell, value) which sets - the value stored in the closure cell `cell` to `value`. - """ - # pypy makes this easy. (It also supports the logic below, but - # why not do the easy/fast thing?) - if PYPY: - - def set_closure_cell(cell, value): - cell.__setstate__((value,)) - - return set_closure_cell - - # Otherwise gotta do it the hard way. - - # Create a function that will set its first cellvar to `value`. - def set_first_cellvar_to(value): - x = value - return - - # This function will be eliminated as dead code, but - # not before its reference to `x` forces `x` to be - # represented as a closure cell rather than a local. - def force_x_to_be_a_cell(): # pragma: no cover - return x - - try: - # Extract the code object and make sure our assumptions about - # the closure behavior are correct. - if PY2: - co = set_first_cellvar_to.func_code - else: - co = set_first_cellvar_to.__code__ - if co.co_cellvars != ("x",) or co.co_freevars != (): - raise AssertionError # pragma: no cover - - # Convert this code object to a code object that sets the - # function's first _freevar_ (not cellvar) to the argument. - if sys.version_info >= (3, 8): - # CPython 3.8+ has an incompatible CodeType signature - # (added a posonlyargcount argument) but also added - # CodeType.replace() to do this without counting parameters. - set_first_freevar_code = co.replace( - co_cellvars=co.co_freevars, co_freevars=co.co_cellvars - ) - else: - args = [co.co_argcount] - if not PY2: - args.append(co.co_kwonlyargcount) - args.extend( - [ - co.co_nlocals, - co.co_stacksize, - co.co_flags, - co.co_code, - co.co_consts, - co.co_names, - co.co_varnames, - co.co_filename, - co.co_name, - co.co_firstlineno, - co.co_lnotab, - # These two arguments are reversed: - co.co_cellvars, - co.co_freevars, - ] - ) - set_first_freevar_code = types.CodeType(*args) - - def set_closure_cell(cell, value): - # Create a function using the set_first_freevar_code, - # whose first closure cell is `cell`. Calling it will - # change the value of that cell. - setter = types.FunctionType( - set_first_freevar_code, {}, "setter", (), (cell,) - ) - # And call it to set the cell. - setter(value) - - # Make sure it works on this interpreter: - def make_func_with_cell(): - x = None - - def func(): - return x # pragma: no cover - - return func - - if PY2: - cell = make_func_with_cell().func_closure[0] - else: - cell = make_func_with_cell().__closure__[0] - set_closure_cell(cell, 100) - if cell.cell_contents != 100: - raise AssertionError # pragma: no cover - - except Exception: - return just_warn - else: - return set_closure_cell - - -set_closure_cell = make_set_closure_cell() diff --git a/conda_lock/_vendor/poetry/core/_vendor/attr/_config.py b/conda_lock/_vendor/poetry/core/_vendor/attr/_config.py deleted file mode 100644 index 8ec92096..00000000 --- a/conda_lock/_vendor/poetry/core/_vendor/attr/_config.py +++ /dev/null @@ -1,23 +0,0 @@ -from __future__ import absolute_import, division, print_function - - -__all__ = ["set_run_validators", "get_run_validators"] - -_run_validators = True - - -def set_run_validators(run): - """ - Set whether or not validators are run. By default, they are run. - """ - if not isinstance(run, bool): - raise TypeError("'run' must be bool.") - global _run_validators - _run_validators = run - - -def get_run_validators(): - """ - Return whether or not validators are run. - """ - return _run_validators diff --git a/conda_lock/_vendor/poetry/core/_vendor/attr/_funcs.py b/conda_lock/_vendor/poetry/core/_vendor/attr/_funcs.py deleted file mode 100644 index e6c930cb..00000000 --- a/conda_lock/_vendor/poetry/core/_vendor/attr/_funcs.py +++ /dev/null @@ -1,390 +0,0 @@ -from __future__ import absolute_import, division, print_function - -import copy - -from ._compat import iteritems -from ._make import NOTHING, _obj_setattr, fields -from .exceptions import AttrsAttributeNotFoundError - - -def asdict( - inst, - recurse=True, - filter=None, - dict_factory=dict, - retain_collection_types=False, - value_serializer=None, -): - """ - Return the ``attrs`` attribute values of *inst* as a dict. - - Optionally recurse into other ``attrs``-decorated classes. - - :param inst: Instance of an ``attrs``-decorated class. - :param bool recurse: Recurse into classes that are also - ``attrs``-decorated. - :param callable filter: A callable whose return code determines whether an - attribute or element is included (``True``) or dropped (``False``). Is - called with the `attr.Attribute` as the first argument and the - value as the second argument. - :param callable dict_factory: A callable to produce dictionaries from. For - example, to produce ordered dictionaries instead of normal Python - dictionaries, pass in ``collections.OrderedDict``. - :param bool retain_collection_types: Do not convert to ``list`` when - encountering an attribute whose type is ``tuple`` or ``set``. Only - meaningful if ``recurse`` is ``True``. - :param Optional[callable] value_serializer: A hook that is called for every - attribute or dict key/value. It receives the current instance, field - and value and must return the (updated) value. The hook is run *after* - the optional *filter* has been applied. - - :rtype: return type of *dict_factory* - - :raise attr.exceptions.NotAnAttrsClassError: If *cls* is not an ``attrs`` - class. - - .. versionadded:: 16.0.0 *dict_factory* - .. versionadded:: 16.1.0 *retain_collection_types* - .. versionadded:: 20.3.0 *value_serializer* - """ - attrs = fields(inst.__class__) - rv = dict_factory() - for a in attrs: - v = getattr(inst, a.name) - if filter is not None and not filter(a, v): - continue - - if value_serializer is not None: - v = value_serializer(inst, a, v) - - if recurse is True: - if has(v.__class__): - rv[a.name] = asdict( - v, - True, - filter, - dict_factory, - retain_collection_types, - value_serializer, - ) - elif isinstance(v, (tuple, list, set, frozenset)): - cf = v.__class__ if retain_collection_types is True else list - rv[a.name] = cf( - [ - _asdict_anything( - i, - filter, - dict_factory, - retain_collection_types, - value_serializer, - ) - for i in v - ] - ) - elif isinstance(v, dict): - df = dict_factory - rv[a.name] = df( - ( - _asdict_anything( - kk, - filter, - df, - retain_collection_types, - value_serializer, - ), - _asdict_anything( - vv, - filter, - df, - retain_collection_types, - value_serializer, - ), - ) - for kk, vv in iteritems(v) - ) - else: - rv[a.name] = v - else: - rv[a.name] = v - return rv - - -def _asdict_anything( - val, - filter, - dict_factory, - retain_collection_types, - value_serializer, -): - """ - ``asdict`` only works on attrs instances, this works on anything. - """ - if getattr(val.__class__, "__attrs_attrs__", None) is not None: - # Attrs class. - rv = asdict( - val, - True, - filter, - dict_factory, - retain_collection_types, - value_serializer, - ) - elif isinstance(val, (tuple, list, set, frozenset)): - cf = val.__class__ if retain_collection_types is True else list - rv = cf( - [ - _asdict_anything( - i, - filter, - dict_factory, - retain_collection_types, - value_serializer, - ) - for i in val - ] - ) - elif isinstance(val, dict): - df = dict_factory - rv = df( - ( - _asdict_anything( - kk, filter, df, retain_collection_types, value_serializer - ), - _asdict_anything( - vv, filter, df, retain_collection_types, value_serializer - ), - ) - for kk, vv in iteritems(val) - ) - else: - rv = val - if value_serializer is not None: - rv = value_serializer(None, None, rv) - - return rv - - -def astuple( - inst, - recurse=True, - filter=None, - tuple_factory=tuple, - retain_collection_types=False, -): - """ - Return the ``attrs`` attribute values of *inst* as a tuple. - - Optionally recurse into other ``attrs``-decorated classes. - - :param inst: Instance of an ``attrs``-decorated class. - :param bool recurse: Recurse into classes that are also - ``attrs``-decorated. - :param callable filter: A callable whose return code determines whether an - attribute or element is included (``True``) or dropped (``False``). Is - called with the `attr.Attribute` as the first argument and the - value as the second argument. - :param callable tuple_factory: A callable to produce tuples from. For - example, to produce lists instead of tuples. - :param bool retain_collection_types: Do not convert to ``list`` - or ``dict`` when encountering an attribute which type is - ``tuple``, ``dict`` or ``set``. Only meaningful if ``recurse`` is - ``True``. - - :rtype: return type of *tuple_factory* - - :raise attr.exceptions.NotAnAttrsClassError: If *cls* is not an ``attrs`` - class. - - .. versionadded:: 16.2.0 - """ - attrs = fields(inst.__class__) - rv = [] - retain = retain_collection_types # Very long. :/ - for a in attrs: - v = getattr(inst, a.name) - if filter is not None and not filter(a, v): - continue - if recurse is True: - if has(v.__class__): - rv.append( - astuple( - v, - recurse=True, - filter=filter, - tuple_factory=tuple_factory, - retain_collection_types=retain, - ) - ) - elif isinstance(v, (tuple, list, set, frozenset)): - cf = v.__class__ if retain is True else list - rv.append( - cf( - [ - astuple( - j, - recurse=True, - filter=filter, - tuple_factory=tuple_factory, - retain_collection_types=retain, - ) - if has(j.__class__) - else j - for j in v - ] - ) - ) - elif isinstance(v, dict): - df = v.__class__ if retain is True else dict - rv.append( - df( - ( - astuple( - kk, - tuple_factory=tuple_factory, - retain_collection_types=retain, - ) - if has(kk.__class__) - else kk, - astuple( - vv, - tuple_factory=tuple_factory, - retain_collection_types=retain, - ) - if has(vv.__class__) - else vv, - ) - for kk, vv in iteritems(v) - ) - ) - else: - rv.append(v) - else: - rv.append(v) - - return rv if tuple_factory is list else tuple_factory(rv) - - -def has(cls): - """ - Check whether *cls* is a class with ``attrs`` attributes. - - :param type cls: Class to introspect. - :raise TypeError: If *cls* is not a class. - - :rtype: bool - """ - return getattr(cls, "__attrs_attrs__", None) is not None - - -def assoc(inst, **changes): - """ - Copy *inst* and apply *changes*. - - :param inst: Instance of a class with ``attrs`` attributes. - :param changes: Keyword changes in the new copy. - - :return: A copy of inst with *changes* incorporated. - - :raise attr.exceptions.AttrsAttributeNotFoundError: If *attr_name* couldn't - be found on *cls*. - :raise attr.exceptions.NotAnAttrsClassError: If *cls* is not an ``attrs`` - class. - - .. deprecated:: 17.1.0 - Use `evolve` instead. - """ - import warnings - - warnings.warn( - "assoc is deprecated and will be removed after 2018/01.", - DeprecationWarning, - stacklevel=2, - ) - new = copy.copy(inst) - attrs = fields(inst.__class__) - for k, v in iteritems(changes): - a = getattr(attrs, k, NOTHING) - if a is NOTHING: - raise AttrsAttributeNotFoundError( - "{k} is not an attrs attribute on {cl}.".format( - k=k, cl=new.__class__ - ) - ) - _obj_setattr(new, k, v) - return new - - -def evolve(inst, **changes): - """ - Create a new instance, based on *inst* with *changes* applied. - - :param inst: Instance of a class with ``attrs`` attributes. - :param changes: Keyword changes in the new copy. - - :return: A copy of inst with *changes* incorporated. - - :raise TypeError: If *attr_name* couldn't be found in the class - ``__init__``. - :raise attr.exceptions.NotAnAttrsClassError: If *cls* is not an ``attrs`` - class. - - .. versionadded:: 17.1.0 - """ - cls = inst.__class__ - attrs = fields(cls) - for a in attrs: - if not a.init: - continue - attr_name = a.name # To deal with private attributes. - init_name = attr_name if attr_name[0] != "_" else attr_name[1:] - if init_name not in changes: - changes[init_name] = getattr(inst, attr_name) - - return cls(**changes) - - -def resolve_types(cls, globalns=None, localns=None): - """ - Resolve any strings and forward annotations in type annotations. - - This is only required if you need concrete types in `Attribute`'s *type* - field. In other words, you don't need to resolve your types if you only - use them for static type checking. - - With no arguments, names will be looked up in the module in which the class - was created. If this is not what you want, e.g. if the name only exists - inside a method, you may pass *globalns* or *localns* to specify other - dictionaries in which to look up these names. See the docs of - `typing.get_type_hints` for more details. - - :param type cls: Class to resolve. - :param Optional[dict] globalns: Dictionary containing global variables. - :param Optional[dict] localns: Dictionary containing local variables. - - :raise TypeError: If *cls* is not a class. - :raise attr.exceptions.NotAnAttrsClassError: If *cls* is not an ``attrs`` - class. - :raise NameError: If types cannot be resolved because of missing variables. - - :returns: *cls* so you can use this function also as a class decorator. - Please note that you have to apply it **after** `attr.s`. That means - the decorator has to come in the line **before** `attr.s`. - - .. versionadded:: 20.1.0 - """ - try: - # Since calling get_type_hints is expensive we cache whether we've - # done it already. - cls.__attrs_types_resolved__ - except AttributeError: - import typing - - hints = typing.get_type_hints(cls, globalns=globalns, localns=localns) - for field in fields(cls): - if field.name in hints: - # Since fields have been frozen we must work around it. - _obj_setattr(field, "type", hints[field.name]) - cls.__attrs_types_resolved__ = True - - # Return the class so you can use it as a decorator too. - return cls diff --git a/conda_lock/_vendor/poetry/core/_vendor/attr/_make.py b/conda_lock/_vendor/poetry/core/_vendor/attr/_make.py deleted file mode 100644 index 49484f93..00000000 --- a/conda_lock/_vendor/poetry/core/_vendor/attr/_make.py +++ /dev/null @@ -1,2765 +0,0 @@ -from __future__ import absolute_import, division, print_function - -import copy -import linecache -import sys -import threading -import uuid -import warnings - -from operator import itemgetter - -from . import _config, setters -from ._compat import ( - PY2, - PYPY, - isclass, - iteritems, - metadata_proxy, - ordered_dict, - set_closure_cell, -) -from .exceptions import ( - DefaultAlreadySetError, - FrozenInstanceError, - NotAnAttrsClassError, - PythonTooOldError, - UnannotatedAttributeError, -) - - -# This is used at least twice, so cache it here. -_obj_setattr = object.__setattr__ -_init_converter_pat = "__attr_converter_%s" -_init_factory_pat = "__attr_factory_{}" -_tuple_property_pat = ( - " {attr_name} = _attrs_property(_attrs_itemgetter({index}))" -) -_classvar_prefixes = ("typing.ClassVar", "t.ClassVar", "ClassVar") -# we don't use a double-underscore prefix because that triggers -# name mangling when trying to create a slot for the field -# (when slots=True) -_hash_cache_field = "_attrs_cached_hash" - -_empty_metadata_singleton = metadata_proxy({}) - -# Unique object for unequivocal getattr() defaults. -_sentinel = object() - - -class _Nothing(object): - """ - Sentinel class to indicate the lack of a value when ``None`` is ambiguous. - - ``_Nothing`` is a singleton. There is only ever one of it. - """ - - _singleton = None - - def __new__(cls): - if _Nothing._singleton is None: - _Nothing._singleton = super(_Nothing, cls).__new__(cls) - return _Nothing._singleton - - def __repr__(self): - return "NOTHING" - - -NOTHING = _Nothing() -""" -Sentinel to indicate the lack of a value when ``None`` is ambiguous. -""" - - -class _CacheHashWrapper(int): - """ - An integer subclass that pickles / copies as None - - This is used for non-slots classes with ``cache_hash=True``, to avoid - serializing a potentially (even likely) invalid hash value. Since ``None`` - is the default value for uncalculated hashes, whenever this is copied, - the copy's value for the hash should automatically reset. - - See GH #613 for more details. - """ - - if PY2: - # For some reason `type(None)` isn't callable in Python 2, but we don't - # actually need a constructor for None objects, we just need any - # available function that returns None. - def __reduce__(self, _none_constructor=getattr, _args=(0, "", None)): - return _none_constructor, _args - - else: - - def __reduce__(self, _none_constructor=type(None), _args=()): - return _none_constructor, _args - - -def attrib( - default=NOTHING, - validator=None, - repr=True, - cmp=None, - hash=None, - init=True, - metadata=None, - type=None, - converter=None, - factory=None, - kw_only=False, - eq=None, - order=None, - on_setattr=None, -): - """ - Create a new attribute on a class. - - .. warning:: - - Does *not* do anything unless the class is also decorated with - `attr.s`! - - :param default: A value that is used if an ``attrs``-generated ``__init__`` - is used and no value is passed while instantiating or the attribute is - excluded using ``init=False``. - - If the value is an instance of `Factory`, its callable will be - used to construct a new value (useful for mutable data types like lists - or dicts). - - If a default is not set (or set manually to `attr.NOTHING`), a value - *must* be supplied when instantiating; otherwise a `TypeError` - will be raised. - - The default can also be set using decorator notation as shown below. - - :type default: Any value - - :param callable factory: Syntactic sugar for - ``default=attr.Factory(factory)``. - - :param validator: `callable` that is called by ``attrs``-generated - ``__init__`` methods after the instance has been initialized. They - receive the initialized instance, the `Attribute`, and the - passed value. - - The return value is *not* inspected so the validator has to throw an - exception itself. - - If a `list` is passed, its items are treated as validators and must - all pass. - - Validators can be globally disabled and re-enabled using - `get_run_validators`. - - The validator can also be set using decorator notation as shown below. - - :type validator: `callable` or a `list` of `callable`\\ s. - - :param repr: Include this attribute in the generated ``__repr__`` - method. If ``True``, include the attribute; if ``False``, omit it. By - default, the built-in ``repr()`` function is used. To override how the - attribute value is formatted, pass a ``callable`` that takes a single - value and returns a string. Note that the resulting string is used - as-is, i.e. it will be used directly *instead* of calling ``repr()`` - (the default). - :type repr: a `bool` or a `callable` to use a custom function. - :param bool eq: If ``True`` (default), include this attribute in the - generated ``__eq__`` and ``__ne__`` methods that check two instances - for equality. - :param bool order: If ``True`` (default), include this attributes in the - generated ``__lt__``, ``__le__``, ``__gt__`` and ``__ge__`` methods. - :param bool cmp: Setting to ``True`` is equivalent to setting ``eq=True, - order=True``. Deprecated in favor of *eq* and *order*. - :param Optional[bool] hash: Include this attribute in the generated - ``__hash__`` method. If ``None`` (default), mirror *eq*'s value. This - is the correct behavior according the Python spec. Setting this value - to anything else than ``None`` is *discouraged*. - :param bool init: Include this attribute in the generated ``__init__`` - method. It is possible to set this to ``False`` and set a default - value. In that case this attributed is unconditionally initialized - with the specified default value or factory. - :param callable converter: `callable` that is called by - ``attrs``-generated ``__init__`` methods to convert attribute's value - to the desired format. It is given the passed-in value, and the - returned value will be used as the new value of the attribute. The - value is converted before being passed to the validator, if any. - :param metadata: An arbitrary mapping, to be used by third-party - components. See `extending_metadata`. - :param type: The type of the attribute. In Python 3.6 or greater, the - preferred method to specify the type is using a variable annotation - (see `PEP 526 `_). - This argument is provided for backward compatibility. - Regardless of the approach used, the type will be stored on - ``Attribute.type``. - - Please note that ``attrs`` doesn't do anything with this metadata by - itself. You can use it as part of your own code or for - `static type checking `. - :param kw_only: Make this attribute keyword-only (Python 3+) - in the generated ``__init__`` (if ``init`` is ``False``, this - parameter is ignored). - :param on_setattr: Allows to overwrite the *on_setattr* setting from - `attr.s`. If left `None`, the *on_setattr* value from `attr.s` is used. - Set to `attr.setters.NO_OP` to run **no** `setattr` hooks for this - attribute -- regardless of the setting in `attr.s`. - :type on_setattr: `callable`, or a list of callables, or `None`, or - `attr.setters.NO_OP` - - .. versionadded:: 15.2.0 *convert* - .. versionadded:: 16.3.0 *metadata* - .. versionchanged:: 17.1.0 *validator* can be a ``list`` now. - .. versionchanged:: 17.1.0 - *hash* is ``None`` and therefore mirrors *eq* by default. - .. versionadded:: 17.3.0 *type* - .. deprecated:: 17.4.0 *convert* - .. versionadded:: 17.4.0 *converter* as a replacement for the deprecated - *convert* to achieve consistency with other noun-based arguments. - .. versionadded:: 18.1.0 - ``factory=f`` is syntactic sugar for ``default=attr.Factory(f)``. - .. versionadded:: 18.2.0 *kw_only* - .. versionchanged:: 19.2.0 *convert* keyword argument removed - .. versionchanged:: 19.2.0 *repr* also accepts a custom callable. - .. deprecated:: 19.2.0 *cmp* Removal on or after 2021-06-01. - .. versionadded:: 19.2.0 *eq* and *order* - .. versionadded:: 20.1.0 *on_setattr* - .. versionchanged:: 20.3.0 *kw_only* backported to Python 2 - """ - eq, order = _determine_eq_order(cmp, eq, order, True) - - if hash is not None and hash is not True and hash is not False: - raise TypeError( - "Invalid value for hash. Must be True, False, or None." - ) - - if factory is not None: - if default is not NOTHING: - raise ValueError( - "The `default` and `factory` arguments are mutually " - "exclusive." - ) - if not callable(factory): - raise ValueError("The `factory` argument must be a callable.") - default = Factory(factory) - - if metadata is None: - metadata = {} - - # Apply syntactic sugar by auto-wrapping. - if isinstance(on_setattr, (list, tuple)): - on_setattr = setters.pipe(*on_setattr) - - if validator and isinstance(validator, (list, tuple)): - validator = and_(*validator) - - if converter and isinstance(converter, (list, tuple)): - converter = pipe(*converter) - - return _CountingAttr( - default=default, - validator=validator, - repr=repr, - cmp=None, - hash=hash, - init=init, - converter=converter, - metadata=metadata, - type=type, - kw_only=kw_only, - eq=eq, - order=order, - on_setattr=on_setattr, - ) - - -def _make_attr_tuple_class(cls_name, attr_names): - """ - Create a tuple subclass to hold `Attribute`s for an `attrs` class. - - The subclass is a bare tuple with properties for names. - - class MyClassAttributes(tuple): - __slots__ = () - x = property(itemgetter(0)) - """ - attr_class_name = "{}Attributes".format(cls_name) - attr_class_template = [ - "class {}(tuple):".format(attr_class_name), - " __slots__ = ()", - ] - if attr_names: - for i, attr_name in enumerate(attr_names): - attr_class_template.append( - _tuple_property_pat.format(index=i, attr_name=attr_name) - ) - else: - attr_class_template.append(" pass") - globs = {"_attrs_itemgetter": itemgetter, "_attrs_property": property} - eval(compile("\n".join(attr_class_template), "", "exec"), globs) - - return globs[attr_class_name] - - -# Tuple class for extracted attributes from a class definition. -# `base_attrs` is a subset of `attrs`. -_Attributes = _make_attr_tuple_class( - "_Attributes", - [ - # all attributes to build dunder methods for - "attrs", - # attributes that have been inherited - "base_attrs", - # map inherited attributes to their originating classes - "base_attrs_map", - ], -) - - -def _is_class_var(annot): - """ - Check whether *annot* is a typing.ClassVar. - - The string comparison hack is used to avoid evaluating all string - annotations which would put attrs-based classes at a performance - disadvantage compared to plain old classes. - """ - return str(annot).startswith(_classvar_prefixes) - - -def _has_own_attribute(cls, attrib_name): - """ - Check whether *cls* defines *attrib_name* (and doesn't just inherit it). - - Requires Python 3. - """ - attr = getattr(cls, attrib_name, _sentinel) - if attr is _sentinel: - return False - - for base_cls in cls.__mro__[1:]: - a = getattr(base_cls, attrib_name, None) - if attr is a: - return False - - return True - - -def _get_annotations(cls): - """ - Get annotations for *cls*. - """ - if _has_own_attribute(cls, "__annotations__"): - return cls.__annotations__ - - return {} - - -def _counter_getter(e): - """ - Key function for sorting to avoid re-creating a lambda for every class. - """ - return e[1].counter - - -def _collect_base_attrs(cls, taken_attr_names): - """ - Collect attr.ibs from base classes of *cls*, except *taken_attr_names*. - """ - base_attrs = [] - base_attr_map = {} # A dictionary of base attrs to their classes. - - # Traverse the MRO and collect attributes. - for base_cls in reversed(cls.__mro__[1:-1]): - for a in getattr(base_cls, "__attrs_attrs__", []): - if a.inherited or a.name in taken_attr_names: - continue - - a = a.evolve(inherited=True) - base_attrs.append(a) - base_attr_map[a.name] = base_cls - - # For each name, only keep the freshest definition i.e. the furthest at the - # back. base_attr_map is fine because it gets overwritten with every new - # instance. - filtered = [] - seen = set() - for a in reversed(base_attrs): - if a.name in seen: - continue - filtered.insert(0, a) - seen.add(a.name) - - return filtered, base_attr_map - - -def _collect_base_attrs_broken(cls, taken_attr_names): - """ - Collect attr.ibs from base classes of *cls*, except *taken_attr_names*. - - N.B. *taken_attr_names* will be mutated. - - Adhere to the old incorrect behavior. - - Notably it collects from the front and considers inherited attributes which - leads to the buggy behavior reported in #428. - """ - base_attrs = [] - base_attr_map = {} # A dictionary of base attrs to their classes. - - # Traverse the MRO and collect attributes. - for base_cls in cls.__mro__[1:-1]: - for a in getattr(base_cls, "__attrs_attrs__", []): - if a.name in taken_attr_names: - continue - - a = a.evolve(inherited=True) - taken_attr_names.add(a.name) - base_attrs.append(a) - base_attr_map[a.name] = base_cls - - return base_attrs, base_attr_map - - -def _transform_attrs( - cls, these, auto_attribs, kw_only, collect_by_mro, field_transformer -): - """ - Transform all `_CountingAttr`s on a class into `Attribute`s. - - If *these* is passed, use that and don't look for them on the class. - - *collect_by_mro* is True, collect them in the correct MRO order, otherwise - use the old -- incorrect -- order. See #428. - - Return an `_Attributes`. - """ - cd = cls.__dict__ - anns = _get_annotations(cls) - - if these is not None: - ca_list = [(name, ca) for name, ca in iteritems(these)] - - if not isinstance(these, ordered_dict): - ca_list.sort(key=_counter_getter) - elif auto_attribs is True: - ca_names = { - name - for name, attr in cd.items() - if isinstance(attr, _CountingAttr) - } - ca_list = [] - annot_names = set() - for attr_name, type in anns.items(): - if _is_class_var(type): - continue - annot_names.add(attr_name) - a = cd.get(attr_name, NOTHING) - - if not isinstance(a, _CountingAttr): - if a is NOTHING: - a = attrib() - else: - a = attrib(default=a) - ca_list.append((attr_name, a)) - - unannotated = ca_names - annot_names - if len(unannotated) > 0: - raise UnannotatedAttributeError( - "The following `attr.ib`s lack a type annotation: " - + ", ".join( - sorted(unannotated, key=lambda n: cd.get(n).counter) - ) - + "." - ) - else: - ca_list = sorted( - ( - (name, attr) - for name, attr in cd.items() - if isinstance(attr, _CountingAttr) - ), - key=lambda e: e[1].counter, - ) - - own_attrs = [ - Attribute.from_counting_attr( - name=attr_name, ca=ca, type=anns.get(attr_name) - ) - for attr_name, ca in ca_list - ] - - if collect_by_mro: - base_attrs, base_attr_map = _collect_base_attrs( - cls, {a.name for a in own_attrs} - ) - else: - base_attrs, base_attr_map = _collect_base_attrs_broken( - cls, {a.name for a in own_attrs} - ) - - attr_names = [a.name for a in base_attrs + own_attrs] - - AttrsClass = _make_attr_tuple_class(cls.__name__, attr_names) - - if kw_only: - own_attrs = [a.evolve(kw_only=True) for a in own_attrs] - base_attrs = [a.evolve(kw_only=True) for a in base_attrs] - - attrs = AttrsClass(base_attrs + own_attrs) - - # Mandatory vs non-mandatory attr order only matters when they are part of - # the __init__ signature and when they aren't kw_only (which are moved to - # the end and can be mandatory or non-mandatory in any order, as they will - # be specified as keyword args anyway). Check the order of those attrs: - had_default = False - for a in (a for a in attrs if a.init is not False and a.kw_only is False): - if had_default is True and a.default is NOTHING: - raise ValueError( - "No mandatory attributes allowed after an attribute with a " - "default value or factory. Attribute in question: %r" % (a,) - ) - - if had_default is False and a.default is not NOTHING: - had_default = True - - if field_transformer is not None: - attrs = field_transformer(cls, attrs) - return _Attributes((attrs, base_attrs, base_attr_map)) - - -if PYPY: - - def _frozen_setattrs(self, name, value): - """ - Attached to frozen classes as __setattr__. - """ - if isinstance(self, BaseException) and name in ( - "__cause__", - "__context__", - ): - BaseException.__setattr__(self, name, value) - return - - raise FrozenInstanceError() - - -else: - - def _frozen_setattrs(self, name, value): - """ - Attached to frozen classes as __setattr__. - """ - raise FrozenInstanceError() - - -def _frozen_delattrs(self, name): - """ - Attached to frozen classes as __delattr__. - """ - raise FrozenInstanceError() - - -class _ClassBuilder(object): - """ - Iteratively build *one* class. - """ - - __slots__ = ( - "_attr_names", - "_attrs", - "_base_attr_map", - "_base_names", - "_cache_hash", - "_cls", - "_cls_dict", - "_delete_attribs", - "_frozen", - "_has_post_init", - "_is_exc", - "_on_setattr", - "_slots", - "_weakref_slot", - "_has_own_setattr", - "_has_custom_setattr", - ) - - def __init__( - self, - cls, - these, - slots, - frozen, - weakref_slot, - getstate_setstate, - auto_attribs, - kw_only, - cache_hash, - is_exc, - collect_by_mro, - on_setattr, - has_custom_setattr, - field_transformer, - ): - attrs, base_attrs, base_map = _transform_attrs( - cls, - these, - auto_attribs, - kw_only, - collect_by_mro, - field_transformer, - ) - - self._cls = cls - self._cls_dict = dict(cls.__dict__) if slots else {} - self._attrs = attrs - self._base_names = set(a.name for a in base_attrs) - self._base_attr_map = base_map - self._attr_names = tuple(a.name for a in attrs) - self._slots = slots - self._frozen = frozen - self._weakref_slot = weakref_slot - self._cache_hash = cache_hash - self._has_post_init = bool(getattr(cls, "__attrs_post_init__", False)) - self._delete_attribs = not bool(these) - self._is_exc = is_exc - self._on_setattr = on_setattr - - self._has_custom_setattr = has_custom_setattr - self._has_own_setattr = False - - self._cls_dict["__attrs_attrs__"] = self._attrs - - if frozen: - self._cls_dict["__setattr__"] = _frozen_setattrs - self._cls_dict["__delattr__"] = _frozen_delattrs - - self._has_own_setattr = True - - if getstate_setstate: - ( - self._cls_dict["__getstate__"], - self._cls_dict["__setstate__"], - ) = self._make_getstate_setstate() - - def __repr__(self): - return "<_ClassBuilder(cls={cls})>".format(cls=self._cls.__name__) - - def build_class(self): - """ - Finalize class based on the accumulated configuration. - - Builder cannot be used after calling this method. - """ - if self._slots is True: - return self._create_slots_class() - else: - return self._patch_original_class() - - def _patch_original_class(self): - """ - Apply accumulated methods and return the class. - """ - cls = self._cls - base_names = self._base_names - - # Clean class of attribute definitions (`attr.ib()`s). - if self._delete_attribs: - for name in self._attr_names: - if ( - name not in base_names - and getattr(cls, name, _sentinel) is not _sentinel - ): - try: - delattr(cls, name) - except AttributeError: - # This can happen if a base class defines a class - # variable and we want to set an attribute with the - # same name by using only a type annotation. - pass - - # Attach our dunder methods. - for name, value in self._cls_dict.items(): - setattr(cls, name, value) - - # If we've inherited an attrs __setattr__ and don't write our own, - # reset it to object's. - if not self._has_own_setattr and getattr( - cls, "__attrs_own_setattr__", False - ): - cls.__attrs_own_setattr__ = False - - if not self._has_custom_setattr: - cls.__setattr__ = object.__setattr__ - - return cls - - def _create_slots_class(self): - """ - Build and return a new class with a `__slots__` attribute. - """ - base_names = self._base_names - cd = { - k: v - for k, v in iteritems(self._cls_dict) - if k not in tuple(self._attr_names) + ("__dict__", "__weakref__") - } - - # If our class doesn't have its own implementation of __setattr__ - # (either from the user or by us), check the bases, if one of them has - # an attrs-made __setattr__, that needs to be reset. We don't walk the - # MRO because we only care about our immediate base classes. - # XXX: This can be confused by subclassing a slotted attrs class with - # XXX: a non-attrs class and subclass the resulting class with an attrs - # XXX: class. See `test_slotted_confused` for details. For now that's - # XXX: OK with us. - if not self._has_own_setattr: - cd["__attrs_own_setattr__"] = False - - if not self._has_custom_setattr: - for base_cls in self._cls.__bases__: - if base_cls.__dict__.get("__attrs_own_setattr__", False): - cd["__setattr__"] = object.__setattr__ - break - - # Traverse the MRO to check for an existing __weakref__. - weakref_inherited = False - for base_cls in self._cls.__mro__[1:-1]: - if base_cls.__dict__.get("__weakref__", None) is not None: - weakref_inherited = True - break - - names = self._attr_names - if ( - self._weakref_slot - and "__weakref__" not in getattr(self._cls, "__slots__", ()) - and "__weakref__" not in names - and not weakref_inherited - ): - names += ("__weakref__",) - - # We only add the names of attributes that aren't inherited. - # Setting __slots__ to inherited attributes wastes memory. - slot_names = [name for name in names if name not in base_names] - if self._cache_hash: - slot_names.append(_hash_cache_field) - cd["__slots__"] = tuple(slot_names) - - qualname = getattr(self._cls, "__qualname__", None) - if qualname is not None: - cd["__qualname__"] = qualname - - # Create new class based on old class and our methods. - cls = type(self._cls)(self._cls.__name__, self._cls.__bases__, cd) - - # The following is a fix for - # https://github.com/python-attrs/attrs/issues/102. On Python 3, - # if a method mentions `__class__` or uses the no-arg super(), the - # compiler will bake a reference to the class in the method itself - # as `method.__closure__`. Since we replace the class with a - # clone, we rewrite these references so it keeps working. - for item in cls.__dict__.values(): - if isinstance(item, (classmethod, staticmethod)): - # Class- and staticmethods hide their functions inside. - # These might need to be rewritten as well. - closure_cells = getattr(item.__func__, "__closure__", None) - else: - closure_cells = getattr(item, "__closure__", None) - - if not closure_cells: # Catch None or the empty list. - continue - for cell in closure_cells: - try: - match = cell.cell_contents is self._cls - except ValueError: # ValueError: Cell is empty - pass - else: - if match: - set_closure_cell(cell, cls) - - return cls - - def add_repr(self, ns): - self._cls_dict["__repr__"] = self._add_method_dunders( - _make_repr(self._attrs, ns=ns) - ) - return self - - def add_str(self): - repr = self._cls_dict.get("__repr__") - if repr is None: - raise ValueError( - "__str__ can only be generated if a __repr__ exists." - ) - - def __str__(self): - return self.__repr__() - - self._cls_dict["__str__"] = self._add_method_dunders(__str__) - return self - - def _make_getstate_setstate(self): - """ - Create custom __setstate__ and __getstate__ methods. - """ - # __weakref__ is not writable. - state_attr_names = tuple( - an for an in self._attr_names if an != "__weakref__" - ) - - def slots_getstate(self): - """ - Automatically created by attrs. - """ - return tuple(getattr(self, name) for name in state_attr_names) - - hash_caching_enabled = self._cache_hash - - def slots_setstate(self, state): - """ - Automatically created by attrs. - """ - __bound_setattr = _obj_setattr.__get__(self, Attribute) - for name, value in zip(state_attr_names, state): - __bound_setattr(name, value) - - # The hash code cache is not included when the object is - # serialized, but it still needs to be initialized to None to - # indicate that the first call to __hash__ should be a cache - # miss. - if hash_caching_enabled: - __bound_setattr(_hash_cache_field, None) - - return slots_getstate, slots_setstate - - def make_unhashable(self): - self._cls_dict["__hash__"] = None - return self - - def add_hash(self): - self._cls_dict["__hash__"] = self._add_method_dunders( - _make_hash( - self._cls, - self._attrs, - frozen=self._frozen, - cache_hash=self._cache_hash, - ) - ) - - return self - - def add_init(self): - self._cls_dict["__init__"] = self._add_method_dunders( - _make_init( - self._cls, - self._attrs, - self._has_post_init, - self._frozen, - self._slots, - self._cache_hash, - self._base_attr_map, - self._is_exc, - self._on_setattr is not None - and self._on_setattr is not setters.NO_OP, - ) - ) - - return self - - def add_eq(self): - cd = self._cls_dict - - cd["__eq__"] = self._add_method_dunders( - _make_eq(self._cls, self._attrs) - ) - cd["__ne__"] = self._add_method_dunders(_make_ne()) - - return self - - def add_order(self): - cd = self._cls_dict - - cd["__lt__"], cd["__le__"], cd["__gt__"], cd["__ge__"] = ( - self._add_method_dunders(meth) - for meth in _make_order(self._cls, self._attrs) - ) - - return self - - def add_setattr(self): - if self._frozen: - return self - - sa_attrs = {} - for a in self._attrs: - on_setattr = a.on_setattr or self._on_setattr - if on_setattr and on_setattr is not setters.NO_OP: - sa_attrs[a.name] = a, on_setattr - - if not sa_attrs: - return self - - if self._has_custom_setattr: - # We need to write a __setattr__ but there already is one! - raise ValueError( - "Can't combine custom __setattr__ with on_setattr hooks." - ) - - # docstring comes from _add_method_dunders - def __setattr__(self, name, val): - try: - a, hook = sa_attrs[name] - except KeyError: - nval = val - else: - nval = hook(self, a, val) - - _obj_setattr(self, name, nval) - - self._cls_dict["__attrs_own_setattr__"] = True - self._cls_dict["__setattr__"] = self._add_method_dunders(__setattr__) - self._has_own_setattr = True - - return self - - def _add_method_dunders(self, method): - """ - Add __module__ and __qualname__ to a *method* if possible. - """ - try: - method.__module__ = self._cls.__module__ - except AttributeError: - pass - - try: - method.__qualname__ = ".".join( - (self._cls.__qualname__, method.__name__) - ) - except AttributeError: - pass - - try: - method.__doc__ = "Method generated by attrs for class %s." % ( - self._cls.__qualname__, - ) - except AttributeError: - pass - - return method - - -_CMP_DEPRECATION = ( - "The usage of `cmp` is deprecated and will be removed on or after " - "2021-06-01. Please use `eq` and `order` instead." -) - - -def _determine_eq_order(cmp, eq, order, default_eq): - """ - Validate the combination of *cmp*, *eq*, and *order*. Derive the effective - values of eq and order. If *eq* is None, set it to *default_eq*. - """ - if cmp is not None and any((eq is not None, order is not None)): - raise ValueError("Don't mix `cmp` with `eq' and `order`.") - - # cmp takes precedence due to bw-compatibility. - if cmp is not None: - warnings.warn(_CMP_DEPRECATION, DeprecationWarning, stacklevel=3) - - return cmp, cmp - - # If left None, equality is set to the specified default and ordering - # mirrors equality. - if eq is None: - eq = default_eq - - if order is None: - order = eq - - if eq is False and order is True: - raise ValueError("`order` can only be True if `eq` is True too.") - - return eq, order - - -def _determine_whether_to_implement( - cls, flag, auto_detect, dunders, default=True -): - """ - Check whether we should implement a set of methods for *cls*. - - *flag* is the argument passed into @attr.s like 'init', *auto_detect* the - same as passed into @attr.s and *dunders* is a tuple of attribute names - whose presence signal that the user has implemented it themselves. - - Return *default* if no reason for either for or against is found. - - auto_detect must be False on Python 2. - """ - if flag is True or flag is False: - return flag - - if flag is None and auto_detect is False: - return default - - # Logically, flag is None and auto_detect is True here. - for dunder in dunders: - if _has_own_attribute(cls, dunder): - return False - - return default - - -def attrs( - maybe_cls=None, - these=None, - repr_ns=None, - repr=None, - cmp=None, - hash=None, - init=None, - slots=False, - frozen=False, - weakref_slot=True, - str=False, - auto_attribs=False, - kw_only=False, - cache_hash=False, - auto_exc=False, - eq=None, - order=None, - auto_detect=False, - collect_by_mro=False, - getstate_setstate=None, - on_setattr=None, - field_transformer=None, -): - r""" - A class decorator that adds `dunder - `_\ -methods according to the - specified attributes using `attr.ib` or the *these* argument. - - :param these: A dictionary of name to `attr.ib` mappings. This is - useful to avoid the definition of your attributes within the class body - because you can't (e.g. if you want to add ``__repr__`` methods to - Django models) or don't want to. - - If *these* is not ``None``, ``attrs`` will *not* search the class body - for attributes and will *not* remove any attributes from it. - - If *these* is an ordered dict (`dict` on Python 3.6+, - `collections.OrderedDict` otherwise), the order is deduced from - the order of the attributes inside *these*. Otherwise the order - of the definition of the attributes is used. - - :type these: `dict` of `str` to `attr.ib` - - :param str repr_ns: When using nested classes, there's no way in Python 2 - to automatically detect that. Therefore it's possible to set the - namespace explicitly for a more meaningful ``repr`` output. - :param bool auto_detect: Instead of setting the *init*, *repr*, *eq*, - *order*, and *hash* arguments explicitly, assume they are set to - ``True`` **unless any** of the involved methods for one of the - arguments is implemented in the *current* class (i.e. it is *not* - inherited from some base class). - - So for example by implementing ``__eq__`` on a class yourself, - ``attrs`` will deduce ``eq=False`` and won't create *neither* - ``__eq__`` *nor* ``__ne__`` (but Python classes come with a sensible - ``__ne__`` by default, so it *should* be enough to only implement - ``__eq__`` in most cases). - - .. warning:: - - If you prevent ``attrs`` from creating the ordering methods for you - (``order=False``, e.g. by implementing ``__le__``), it becomes - *your* responsibility to make sure its ordering is sound. The best - way is to use the `functools.total_ordering` decorator. - - - Passing ``True`` or ``False`` to *init*, *repr*, *eq*, *order*, - *cmp*, or *hash* overrides whatever *auto_detect* would determine. - - *auto_detect* requires Python 3. Setting it ``True`` on Python 2 raises - a `PythonTooOldError`. - - :param bool repr: Create a ``__repr__`` method with a human readable - representation of ``attrs`` attributes.. - :param bool str: Create a ``__str__`` method that is identical to - ``__repr__``. This is usually not necessary except for - `Exception`\ s. - :param Optional[bool] eq: If ``True`` or ``None`` (default), add ``__eq__`` - and ``__ne__`` methods that check two instances for equality. - - They compare the instances as if they were tuples of their ``attrs`` - attributes if and only if the types of both classes are *identical*! - :param Optional[bool] order: If ``True``, add ``__lt__``, ``__le__``, - ``__gt__``, and ``__ge__`` methods that behave like *eq* above and - allow instances to be ordered. If ``None`` (default) mirror value of - *eq*. - :param Optional[bool] cmp: Setting to ``True`` is equivalent to setting - ``eq=True, order=True``. Deprecated in favor of *eq* and *order*, has - precedence over them for backward-compatibility though. Must not be - mixed with *eq* or *order*. - :param Optional[bool] hash: If ``None`` (default), the ``__hash__`` method - is generated according how *eq* and *frozen* are set. - - 1. If *both* are True, ``attrs`` will generate a ``__hash__`` for you. - 2. If *eq* is True and *frozen* is False, ``__hash__`` will be set to - None, marking it unhashable (which it is). - 3. If *eq* is False, ``__hash__`` will be left untouched meaning the - ``__hash__`` method of the base class will be used (if base class is - ``object``, this means it will fall back to id-based hashing.). - - Although not recommended, you can decide for yourself and force - ``attrs`` to create one (e.g. if the class is immutable even though you - didn't freeze it programmatically) by passing ``True`` or not. Both of - these cases are rather special and should be used carefully. - - See our documentation on `hashing`, Python's documentation on - `object.__hash__`, and the `GitHub issue that led to the default \ - behavior `_ for more - details. - :param bool init: Create a ``__init__`` method that initializes the - ``attrs`` attributes. Leading underscores are stripped for the - argument name. If a ``__attrs_post_init__`` method exists on the - class, it will be called after the class is fully initialized. - :param bool slots: Create a `slotted class ` that's more - memory-efficient. Slotted classes are generally superior to the default - dict classes, but have some gotchas you should know about, so we - encourage you to read the `glossary entry `. - :param bool frozen: Make instances immutable after initialization. If - someone attempts to modify a frozen instance, - `attr.exceptions.FrozenInstanceError` is raised. - - .. note:: - - 1. This is achieved by installing a custom ``__setattr__`` method - on your class, so you can't implement your own. - - 2. True immutability is impossible in Python. - - 3. This *does* have a minor a runtime performance `impact - ` when initializing new instances. In other words: - ``__init__`` is slightly slower with ``frozen=True``. - - 4. If a class is frozen, you cannot modify ``self`` in - ``__attrs_post_init__`` or a self-written ``__init__``. You can - circumvent that limitation by using - ``object.__setattr__(self, "attribute_name", value)``. - - 5. Subclasses of a frozen class are frozen too. - - :param bool weakref_slot: Make instances weak-referenceable. This has no - effect unless ``slots`` is also enabled. - :param bool auto_attribs: If ``True``, collect `PEP 526`_-annotated - attributes (Python 3.6 and later only) from the class body. - - In this case, you **must** annotate every field. If ``attrs`` - encounters a field that is set to an `attr.ib` but lacks a type - annotation, an `attr.exceptions.UnannotatedAttributeError` is - raised. Use ``field_name: typing.Any = attr.ib(...)`` if you don't - want to set a type. - - If you assign a value to those attributes (e.g. ``x: int = 42``), that - value becomes the default value like if it were passed using - ``attr.ib(default=42)``. Passing an instance of `Factory` also - works as expected. - - Attributes annotated as `typing.ClassVar`, and attributes that are - neither annotated nor set to an `attr.ib` are **ignored**. - - .. _`PEP 526`: https://www.python.org/dev/peps/pep-0526/ - :param bool kw_only: Make all attributes keyword-only (Python 3+) - in the generated ``__init__`` (if ``init`` is ``False``, this - parameter is ignored). - :param bool cache_hash: Ensure that the object's hash code is computed - only once and stored on the object. If this is set to ``True``, - hashing must be either explicitly or implicitly enabled for this - class. If the hash code is cached, avoid any reassignments of - fields involved in hash code computation or mutations of the objects - those fields point to after object creation. If such changes occur, - the behavior of the object's hash code is undefined. - :param bool auto_exc: If the class subclasses `BaseException` - (which implicitly includes any subclass of any exception), the - following happens to behave like a well-behaved Python exceptions - class: - - - the values for *eq*, *order*, and *hash* are ignored and the - instances compare and hash by the instance's ids (N.B. ``attrs`` will - *not* remove existing implementations of ``__hash__`` or the equality - methods. It just won't add own ones.), - - all attributes that are either passed into ``__init__`` or have a - default value are additionally available as a tuple in the ``args`` - attribute, - - the value of *str* is ignored leaving ``__str__`` to base classes. - :param bool collect_by_mro: Setting this to `True` fixes the way ``attrs`` - collects attributes from base classes. The default behavior is - incorrect in certain cases of multiple inheritance. It should be on by - default but is kept off for backward-compatability. - - See issue `#428 `_ for - more details. - - :param Optional[bool] getstate_setstate: - .. note:: - This is usually only interesting for slotted classes and you should - probably just set *auto_detect* to `True`. - - If `True`, ``__getstate__`` and - ``__setstate__`` are generated and attached to the class. This is - necessary for slotted classes to be pickleable. If left `None`, it's - `True` by default for slotted classes and ``False`` for dict classes. - - If *auto_detect* is `True`, and *getstate_setstate* is left `None`, - and **either** ``__getstate__`` or ``__setstate__`` is detected directly - on the class (i.e. not inherited), it is set to `False` (this is usually - what you want). - - :param on_setattr: A callable that is run whenever the user attempts to set - an attribute (either by assignment like ``i.x = 42`` or by using - `setattr` like ``setattr(i, "x", 42)``). It receives the same arguments - as validators: the instance, the attribute that is being modified, and - the new value. - - If no exception is raised, the attribute is set to the return value of - the callable. - - If a list of callables is passed, they're automatically wrapped in an - `attr.setters.pipe`. - - :param Optional[callable] field_transformer: - A function that is called with the original class object and all - fields right before ``attrs`` finalizes the class. You can use - this, e.g., to automatically add converters or validators to - fields based on their types. See `transform-fields` for more details. - - .. versionadded:: 16.0.0 *slots* - .. versionadded:: 16.1.0 *frozen* - .. versionadded:: 16.3.0 *str* - .. versionadded:: 16.3.0 Support for ``__attrs_post_init__``. - .. versionchanged:: 17.1.0 - *hash* supports ``None`` as value which is also the default now. - .. versionadded:: 17.3.0 *auto_attribs* - .. versionchanged:: 18.1.0 - If *these* is passed, no attributes are deleted from the class body. - .. versionchanged:: 18.1.0 If *these* is ordered, the order is retained. - .. versionadded:: 18.2.0 *weakref_slot* - .. deprecated:: 18.2.0 - ``__lt__``, ``__le__``, ``__gt__``, and ``__ge__`` now raise a - `DeprecationWarning` if the classes compared are subclasses of - each other. ``__eq`` and ``__ne__`` never tried to compared subclasses - to each other. - .. versionchanged:: 19.2.0 - ``__lt__``, ``__le__``, ``__gt__``, and ``__ge__`` now do not consider - subclasses comparable anymore. - .. versionadded:: 18.2.0 *kw_only* - .. versionadded:: 18.2.0 *cache_hash* - .. versionadded:: 19.1.0 *auto_exc* - .. deprecated:: 19.2.0 *cmp* Removal on or after 2021-06-01. - .. versionadded:: 19.2.0 *eq* and *order* - .. versionadded:: 20.1.0 *auto_detect* - .. versionadded:: 20.1.0 *collect_by_mro* - .. versionadded:: 20.1.0 *getstate_setstate* - .. versionadded:: 20.1.0 *on_setattr* - .. versionadded:: 20.3.0 *field_transformer* - """ - if auto_detect and PY2: - raise PythonTooOldError( - "auto_detect only works on Python 3 and later." - ) - - eq_, order_ = _determine_eq_order(cmp, eq, order, None) - hash_ = hash # work around the lack of nonlocal - - if isinstance(on_setattr, (list, tuple)): - on_setattr = setters.pipe(*on_setattr) - - def wrap(cls): - - if getattr(cls, "__class__", None) is None: - raise TypeError("attrs only works with new-style classes.") - - is_frozen = frozen or _has_frozen_base_class(cls) - is_exc = auto_exc is True and issubclass(cls, BaseException) - has_own_setattr = auto_detect and _has_own_attribute( - cls, "__setattr__" - ) - - if has_own_setattr and is_frozen: - raise ValueError("Can't freeze a class with a custom __setattr__.") - - builder = _ClassBuilder( - cls, - these, - slots, - is_frozen, - weakref_slot, - _determine_whether_to_implement( - cls, - getstate_setstate, - auto_detect, - ("__getstate__", "__setstate__"), - default=slots, - ), - auto_attribs, - kw_only, - cache_hash, - is_exc, - collect_by_mro, - on_setattr, - has_own_setattr, - field_transformer, - ) - if _determine_whether_to_implement( - cls, repr, auto_detect, ("__repr__",) - ): - builder.add_repr(repr_ns) - if str is True: - builder.add_str() - - eq = _determine_whether_to_implement( - cls, eq_, auto_detect, ("__eq__", "__ne__") - ) - if not is_exc and eq is True: - builder.add_eq() - if not is_exc and _determine_whether_to_implement( - cls, order_, auto_detect, ("__lt__", "__le__", "__gt__", "__ge__") - ): - builder.add_order() - - builder.add_setattr() - - if ( - hash_ is None - and auto_detect is True - and _has_own_attribute(cls, "__hash__") - ): - hash = False - else: - hash = hash_ - if hash is not True and hash is not False and hash is not None: - # Can't use `hash in` because 1 == True for example. - raise TypeError( - "Invalid value for hash. Must be True, False, or None." - ) - elif hash is False or (hash is None and eq is False) or is_exc: - # Don't do anything. Should fall back to __object__'s __hash__ - # which is by id. - if cache_hash: - raise TypeError( - "Invalid value for cache_hash. To use hash caching," - " hashing must be either explicitly or implicitly " - "enabled." - ) - elif hash is True or ( - hash is None and eq is True and is_frozen is True - ): - # Build a __hash__ if told so, or if it's safe. - builder.add_hash() - else: - # Raise TypeError on attempts to hash. - if cache_hash: - raise TypeError( - "Invalid value for cache_hash. To use hash caching," - " hashing must be either explicitly or implicitly " - "enabled." - ) - builder.make_unhashable() - - if _determine_whether_to_implement( - cls, init, auto_detect, ("__init__",) - ): - builder.add_init() - else: - if cache_hash: - raise TypeError( - "Invalid value for cache_hash. To use hash caching," - " init must be True." - ) - - return builder.build_class() - - # maybe_cls's type depends on the usage of the decorator. It's a class - # if it's used as `@attrs` but ``None`` if used as `@attrs()`. - if maybe_cls is None: - return wrap - else: - return wrap(maybe_cls) - - -_attrs = attrs -""" -Internal alias so we can use it in functions that take an argument called -*attrs*. -""" - - -if PY2: - - def _has_frozen_base_class(cls): - """ - Check whether *cls* has a frozen ancestor by looking at its - __setattr__. - """ - return ( - getattr(cls.__setattr__, "__module__", None) - == _frozen_setattrs.__module__ - and cls.__setattr__.__name__ == _frozen_setattrs.__name__ - ) - - -else: - - def _has_frozen_base_class(cls): - """ - Check whether *cls* has a frozen ancestor by looking at its - __setattr__. - """ - return cls.__setattr__ == _frozen_setattrs - - -def _attrs_to_tuple(obj, attrs): - """ - Create a tuple of all values of *obj*'s *attrs*. - """ - return tuple(getattr(obj, a.name) for a in attrs) - - -def _generate_unique_filename(cls, func_name): - """ - Create a "filename" suitable for a function being generated. - """ - unique_id = uuid.uuid4() - extra = "" - count = 1 - - while True: - unique_filename = "".format( - func_name, - cls.__module__, - getattr(cls, "__qualname__", cls.__name__), - extra, - ) - # To handle concurrency we essentially "reserve" our spot in - # the linecache with a dummy line. The caller can then - # set this value correctly. - cache_line = (1, None, (str(unique_id),), unique_filename) - if ( - linecache.cache.setdefault(unique_filename, cache_line) - == cache_line - ): - return unique_filename - - # Looks like this spot is taken. Try again. - count += 1 - extra = "-{0}".format(count) - - -def _make_hash(cls, attrs, frozen, cache_hash): - attrs = tuple( - a for a in attrs if a.hash is True or (a.hash is None and a.eq is True) - ) - - tab = " " - - unique_filename = _generate_unique_filename(cls, "hash") - type_hash = hash(unique_filename) - - hash_def = "def __hash__(self" - hash_func = "hash((" - closing_braces = "))" - if not cache_hash: - hash_def += "):" - else: - if not PY2: - hash_def += ", *" - - hash_def += ( - ", _cache_wrapper=" - + "__import__('attr._make')._make._CacheHashWrapper):" - ) - hash_func = "_cache_wrapper(" + hash_func - closing_braces += ")" - - method_lines = [hash_def] - - def append_hash_computation_lines(prefix, indent): - """ - Generate the code for actually computing the hash code. - Below this will either be returned directly or used to compute - a value which is then cached, depending on the value of cache_hash - """ - - method_lines.extend( - [ - indent + prefix + hash_func, - indent + " %d," % (type_hash,), - ] - ) - - for a in attrs: - method_lines.append(indent + " self.%s," % a.name) - - method_lines.append(indent + " " + closing_braces) - - if cache_hash: - method_lines.append(tab + "if self.%s is None:" % _hash_cache_field) - if frozen: - append_hash_computation_lines( - "object.__setattr__(self, '%s', " % _hash_cache_field, tab * 2 - ) - method_lines.append(tab * 2 + ")") # close __setattr__ - else: - append_hash_computation_lines( - "self.%s = " % _hash_cache_field, tab * 2 - ) - method_lines.append(tab + "return self.%s" % _hash_cache_field) - else: - append_hash_computation_lines("return ", tab) - - script = "\n".join(method_lines) - globs = {} - locs = {} - bytecode = compile(script, unique_filename, "exec") - eval(bytecode, globs, locs) - - # In order of debuggers like PDB being able to step through the code, - # we add a fake linecache entry. - linecache.cache[unique_filename] = ( - len(script), - None, - script.splitlines(True), - unique_filename, - ) - - return locs["__hash__"] - - -def _add_hash(cls, attrs): - """ - Add a hash method to *cls*. - """ - cls.__hash__ = _make_hash(cls, attrs, frozen=False, cache_hash=False) - return cls - - -def _make_ne(): - """ - Create __ne__ method. - """ - - def __ne__(self, other): - """ - Check equality and either forward a NotImplemented or - return the result negated. - """ - result = self.__eq__(other) - if result is NotImplemented: - return NotImplemented - - return not result - - return __ne__ - - -def _make_eq(cls, attrs): - """ - Create __eq__ method for *cls* with *attrs*. - """ - attrs = [a for a in attrs if a.eq] - - unique_filename = _generate_unique_filename(cls, "eq") - lines = [ - "def __eq__(self, other):", - " if other.__class__ is not self.__class__:", - " return NotImplemented", - ] - # We can't just do a big self.x = other.x and... clause due to - # irregularities like nan == nan is false but (nan,) == (nan,) is true. - if attrs: - lines.append(" return (") - others = [" ) == ("] - for a in attrs: - lines.append(" self.%s," % (a.name,)) - others.append(" other.%s," % (a.name,)) - - lines += others + [" )"] - else: - lines.append(" return True") - - script = "\n".join(lines) - globs = {} - locs = {} - bytecode = compile(script, unique_filename, "exec") - eval(bytecode, globs, locs) - - # In order of debuggers like PDB being able to step through the code, - # we add a fake linecache entry. - linecache.cache[unique_filename] = ( - len(script), - None, - script.splitlines(True), - unique_filename, - ) - return locs["__eq__"] - - -def _make_order(cls, attrs): - """ - Create ordering methods for *cls* with *attrs*. - """ - attrs = [a for a in attrs if a.order] - - def attrs_to_tuple(obj): - """ - Save us some typing. - """ - return _attrs_to_tuple(obj, attrs) - - def __lt__(self, other): - """ - Automatically created by attrs. - """ - if other.__class__ is self.__class__: - return attrs_to_tuple(self) < attrs_to_tuple(other) - - return NotImplemented - - def __le__(self, other): - """ - Automatically created by attrs. - """ - if other.__class__ is self.__class__: - return attrs_to_tuple(self) <= attrs_to_tuple(other) - - return NotImplemented - - def __gt__(self, other): - """ - Automatically created by attrs. - """ - if other.__class__ is self.__class__: - return attrs_to_tuple(self) > attrs_to_tuple(other) - - return NotImplemented - - def __ge__(self, other): - """ - Automatically created by attrs. - """ - if other.__class__ is self.__class__: - return attrs_to_tuple(self) >= attrs_to_tuple(other) - - return NotImplemented - - return __lt__, __le__, __gt__, __ge__ - - -def _add_eq(cls, attrs=None): - """ - Add equality methods to *cls* with *attrs*. - """ - if attrs is None: - attrs = cls.__attrs_attrs__ - - cls.__eq__ = _make_eq(cls, attrs) - cls.__ne__ = _make_ne() - - return cls - - -_already_repring = threading.local() - - -def _make_repr(attrs, ns): - """ - Make a repr method that includes relevant *attrs*, adding *ns* to the full - name. - """ - - # Figure out which attributes to include, and which function to use to - # format them. The a.repr value can be either bool or a custom callable. - attr_names_with_reprs = tuple( - (a.name, repr if a.repr is True else a.repr) - for a in attrs - if a.repr is not False - ) - - def __repr__(self): - """ - Automatically created by attrs. - """ - try: - working_set = _already_repring.working_set - except AttributeError: - working_set = set() - _already_repring.working_set = working_set - - if id(self) in working_set: - return "..." - real_cls = self.__class__ - if ns is None: - qualname = getattr(real_cls, "__qualname__", None) - if qualname is not None: - class_name = qualname.rsplit(">.", 1)[-1] - else: - class_name = real_cls.__name__ - else: - class_name = ns + "." + real_cls.__name__ - - # Since 'self' remains on the stack (i.e.: strongly referenced) for the - # duration of this call, it's safe to depend on id(...) stability, and - # not need to track the instance and therefore worry about properties - # like weakref- or hash-ability. - working_set.add(id(self)) - try: - result = [class_name, "("] - first = True - for name, attr_repr in attr_names_with_reprs: - if first: - first = False - else: - result.append(", ") - result.extend( - (name, "=", attr_repr(getattr(self, name, NOTHING))) - ) - return "".join(result) + ")" - finally: - working_set.remove(id(self)) - - return __repr__ - - -def _add_repr(cls, ns=None, attrs=None): - """ - Add a repr method to *cls*. - """ - if attrs is None: - attrs = cls.__attrs_attrs__ - - cls.__repr__ = _make_repr(attrs, ns) - return cls - - -def fields(cls): - """ - Return the tuple of ``attrs`` attributes for a class. - - The tuple also allows accessing the fields by their names (see below for - examples). - - :param type cls: Class to introspect. - - :raise TypeError: If *cls* is not a class. - :raise attr.exceptions.NotAnAttrsClassError: If *cls* is not an ``attrs`` - class. - - :rtype: tuple (with name accessors) of `attr.Attribute` - - .. versionchanged:: 16.2.0 Returned tuple allows accessing the fields - by name. - """ - if not isclass(cls): - raise TypeError("Passed object must be a class.") - attrs = getattr(cls, "__attrs_attrs__", None) - if attrs is None: - raise NotAnAttrsClassError( - "{cls!r} is not an attrs-decorated class.".format(cls=cls) - ) - return attrs - - -def fields_dict(cls): - """ - Return an ordered dictionary of ``attrs`` attributes for a class, whose - keys are the attribute names. - - :param type cls: Class to introspect. - - :raise TypeError: If *cls* is not a class. - :raise attr.exceptions.NotAnAttrsClassError: If *cls* is not an ``attrs`` - class. - - :rtype: an ordered dict where keys are attribute names and values are - `attr.Attribute`\\ s. This will be a `dict` if it's - naturally ordered like on Python 3.6+ or an - :class:`~collections.OrderedDict` otherwise. - - .. versionadded:: 18.1.0 - """ - if not isclass(cls): - raise TypeError("Passed object must be a class.") - attrs = getattr(cls, "__attrs_attrs__", None) - if attrs is None: - raise NotAnAttrsClassError( - "{cls!r} is not an attrs-decorated class.".format(cls=cls) - ) - return ordered_dict(((a.name, a) for a in attrs)) - - -def validate(inst): - """ - Validate all attributes on *inst* that have a validator. - - Leaves all exceptions through. - - :param inst: Instance of a class with ``attrs`` attributes. - """ - if _config._run_validators is False: - return - - for a in fields(inst.__class__): - v = a.validator - if v is not None: - v(inst, a, getattr(inst, a.name)) - - -def _is_slot_cls(cls): - return "__slots__" in cls.__dict__ - - -def _is_slot_attr(a_name, base_attr_map): - """ - Check if the attribute name comes from a slot class. - """ - return a_name in base_attr_map and _is_slot_cls(base_attr_map[a_name]) - - -def _make_init( - cls, - attrs, - post_init, - frozen, - slots, - cache_hash, - base_attr_map, - is_exc, - has_global_on_setattr, -): - if frozen and has_global_on_setattr: - raise ValueError("Frozen classes can't use on_setattr.") - - needs_cached_setattr = cache_hash or frozen - filtered_attrs = [] - attr_dict = {} - for a in attrs: - if not a.init and a.default is NOTHING: - continue - - filtered_attrs.append(a) - attr_dict[a.name] = a - - if a.on_setattr is not None: - if frozen is True: - raise ValueError("Frozen classes can't use on_setattr.") - - needs_cached_setattr = True - elif ( - has_global_on_setattr and a.on_setattr is not setters.NO_OP - ) or _is_slot_attr(a.name, base_attr_map): - needs_cached_setattr = True - - unique_filename = _generate_unique_filename(cls, "init") - - script, globs, annotations = _attrs_to_init_script( - filtered_attrs, - frozen, - slots, - post_init, - cache_hash, - base_attr_map, - is_exc, - needs_cached_setattr, - has_global_on_setattr, - ) - locs = {} - bytecode = compile(script, unique_filename, "exec") - globs.update({"NOTHING": NOTHING, "attr_dict": attr_dict}) - - if needs_cached_setattr: - # Save the lookup overhead in __init__ if we need to circumvent - # setattr hooks. - globs["_cached_setattr"] = _obj_setattr - - eval(bytecode, globs, locs) - - # In order of debuggers like PDB being able to step through the code, - # we add a fake linecache entry. - linecache.cache[unique_filename] = ( - len(script), - None, - script.splitlines(True), - unique_filename, - ) - - __init__ = locs["__init__"] - __init__.__annotations__ = annotations - - return __init__ - - -def _setattr(attr_name, value_var, has_on_setattr): - """ - Use the cached object.setattr to set *attr_name* to *value_var*. - """ - return "_setattr('%s', %s)" % (attr_name, value_var) - - -def _setattr_with_converter(attr_name, value_var, has_on_setattr): - """ - Use the cached object.setattr to set *attr_name* to *value_var*, but run - its converter first. - """ - return "_setattr('%s', %s(%s))" % ( - attr_name, - _init_converter_pat % (attr_name,), - value_var, - ) - - -def _assign(attr_name, value, has_on_setattr): - """ - Unless *attr_name* has an on_setattr hook, use normal assignment. Otherwise - relegate to _setattr. - """ - if has_on_setattr: - return _setattr(attr_name, value, True) - - return "self.%s = %s" % (attr_name, value) - - -def _assign_with_converter(attr_name, value_var, has_on_setattr): - """ - Unless *attr_name* has an on_setattr hook, use normal assignment after - conversion. Otherwise relegate to _setattr_with_converter. - """ - if has_on_setattr: - return _setattr_with_converter(attr_name, value_var, True) - - return "self.%s = %s(%s)" % ( - attr_name, - _init_converter_pat % (attr_name,), - value_var, - ) - - -if PY2: - - def _unpack_kw_only_py2(attr_name, default=None): - """ - Unpack *attr_name* from _kw_only dict. - """ - if default is not None: - arg_default = ", %s" % default - else: - arg_default = "" - return "%s = _kw_only.pop('%s'%s)" % ( - attr_name, - attr_name, - arg_default, - ) - - def _unpack_kw_only_lines_py2(kw_only_args): - """ - Unpack all *kw_only_args* from _kw_only dict and handle errors. - - Given a list of strings "{attr_name}" and "{attr_name}={default}" - generates list of lines of code that pop attrs from _kw_only dict and - raise TypeError similar to builtin if required attr is missing or - extra key is passed. - - >>> print("\n".join(_unpack_kw_only_lines_py2(["a", "b=42"]))) - try: - a = _kw_only.pop('a') - b = _kw_only.pop('b', 42) - except KeyError as _key_error: - raise TypeError( - ... - if _kw_only: - raise TypeError( - ... - """ - lines = ["try:"] - lines.extend( - " " + _unpack_kw_only_py2(*arg.split("=")) - for arg in kw_only_args - ) - lines += """\ -except KeyError as _key_error: - raise TypeError( - '__init__() missing required keyword-only argument: %s' % _key_error - ) -if _kw_only: - raise TypeError( - '__init__() got an unexpected keyword argument %r' - % next(iter(_kw_only)) - ) -""".split( - "\n" - ) - return lines - - -def _attrs_to_init_script( - attrs, - frozen, - slots, - post_init, - cache_hash, - base_attr_map, - is_exc, - needs_cached_setattr, - has_global_on_setattr, -): - """ - Return a script of an initializer for *attrs* and a dict of globals. - - The globals are expected by the generated script. - - If *frozen* is True, we cannot set the attributes directly so we use - a cached ``object.__setattr__``. - """ - lines = [] - if needs_cached_setattr: - lines.append( - # Circumvent the __setattr__ descriptor to save one lookup per - # assignment. - # Note _setattr will be used again below if cache_hash is True - "_setattr = _cached_setattr.__get__(self, self.__class__)" - ) - - if frozen is True: - if slots is True: - fmt_setter = _setattr - fmt_setter_with_converter = _setattr_with_converter - else: - # Dict frozen classes assign directly to __dict__. - # But only if the attribute doesn't come from an ancestor slot - # class. - # Note _inst_dict will be used again below if cache_hash is True - lines.append("_inst_dict = self.__dict__") - - def fmt_setter(attr_name, value_var, has_on_setattr): - if _is_slot_attr(attr_name, base_attr_map): - return _setattr(attr_name, value_var, has_on_setattr) - - return "_inst_dict['%s'] = %s" % (attr_name, value_var) - - def fmt_setter_with_converter( - attr_name, value_var, has_on_setattr - ): - if has_on_setattr or _is_slot_attr(attr_name, base_attr_map): - return _setattr_with_converter( - attr_name, value_var, has_on_setattr - ) - - return "_inst_dict['%s'] = %s(%s)" % ( - attr_name, - _init_converter_pat % (attr_name,), - value_var, - ) - - else: - # Not frozen. - fmt_setter = _assign - fmt_setter_with_converter = _assign_with_converter - - args = [] - kw_only_args = [] - attrs_to_validate = [] - - # This is a dictionary of names to validator and converter callables. - # Injecting this into __init__ globals lets us avoid lookups. - names_for_globals = {} - annotations = {"return": None} - - for a in attrs: - if a.validator: - attrs_to_validate.append(a) - - attr_name = a.name - has_on_setattr = a.on_setattr is not None or ( - a.on_setattr is not setters.NO_OP and has_global_on_setattr - ) - arg_name = a.name.lstrip("_") - - has_factory = isinstance(a.default, Factory) - if has_factory and a.default.takes_self: - maybe_self = "self" - else: - maybe_self = "" - - if a.init is False: - if has_factory: - init_factory_name = _init_factory_pat.format(a.name) - if a.converter is not None: - lines.append( - fmt_setter_with_converter( - attr_name, - init_factory_name + "(%s)" % (maybe_self,), - has_on_setattr, - ) - ) - conv_name = _init_converter_pat % (a.name,) - names_for_globals[conv_name] = a.converter - else: - lines.append( - fmt_setter( - attr_name, - init_factory_name + "(%s)" % (maybe_self,), - has_on_setattr, - ) - ) - names_for_globals[init_factory_name] = a.default.factory - else: - if a.converter is not None: - lines.append( - fmt_setter_with_converter( - attr_name, - "attr_dict['%s'].default" % (attr_name,), - has_on_setattr, - ) - ) - conv_name = _init_converter_pat % (a.name,) - names_for_globals[conv_name] = a.converter - else: - lines.append( - fmt_setter( - attr_name, - "attr_dict['%s'].default" % (attr_name,), - has_on_setattr, - ) - ) - elif a.default is not NOTHING and not has_factory: - arg = "%s=attr_dict['%s'].default" % (arg_name, attr_name) - if a.kw_only: - kw_only_args.append(arg) - else: - args.append(arg) - - if a.converter is not None: - lines.append( - fmt_setter_with_converter( - attr_name, arg_name, has_on_setattr - ) - ) - names_for_globals[ - _init_converter_pat % (a.name,) - ] = a.converter - else: - lines.append(fmt_setter(attr_name, arg_name, has_on_setattr)) - - elif has_factory: - arg = "%s=NOTHING" % (arg_name,) - if a.kw_only: - kw_only_args.append(arg) - else: - args.append(arg) - lines.append("if %s is not NOTHING:" % (arg_name,)) - - init_factory_name = _init_factory_pat.format(a.name) - if a.converter is not None: - lines.append( - " " - + fmt_setter_with_converter( - attr_name, arg_name, has_on_setattr - ) - ) - lines.append("else:") - lines.append( - " " - + fmt_setter_with_converter( - attr_name, - init_factory_name + "(" + maybe_self + ")", - has_on_setattr, - ) - ) - names_for_globals[ - _init_converter_pat % (a.name,) - ] = a.converter - else: - lines.append( - " " + fmt_setter(attr_name, arg_name, has_on_setattr) - ) - lines.append("else:") - lines.append( - " " - + fmt_setter( - attr_name, - init_factory_name + "(" + maybe_self + ")", - has_on_setattr, - ) - ) - names_for_globals[init_factory_name] = a.default.factory - else: - if a.kw_only: - kw_only_args.append(arg_name) - else: - args.append(arg_name) - - if a.converter is not None: - lines.append( - fmt_setter_with_converter( - attr_name, arg_name, has_on_setattr - ) - ) - names_for_globals[ - _init_converter_pat % (a.name,) - ] = a.converter - else: - lines.append(fmt_setter(attr_name, arg_name, has_on_setattr)) - - if a.init is True and a.converter is None and a.type is not None: - annotations[arg_name] = a.type - - if attrs_to_validate: # we can skip this if there are no validators. - names_for_globals["_config"] = _config - lines.append("if _config._run_validators is True:") - for a in attrs_to_validate: - val_name = "__attr_validator_" + a.name - attr_name = "__attr_" + a.name - lines.append( - " %s(self, %s, self.%s)" % (val_name, attr_name, a.name) - ) - names_for_globals[val_name] = a.validator - names_for_globals[attr_name] = a - - if post_init: - lines.append("self.__attrs_post_init__()") - - # because this is set only after __attrs_post_init is called, a crash - # will result if post-init tries to access the hash code. This seemed - # preferable to setting this beforehand, in which case alteration to - # field values during post-init combined with post-init accessing the - # hash code would result in silent bugs. - if cache_hash: - if frozen: - if slots: - # if frozen and slots, then _setattr defined above - init_hash_cache = "_setattr('%s', %s)" - else: - # if frozen and not slots, then _inst_dict defined above - init_hash_cache = "_inst_dict['%s'] = %s" - else: - init_hash_cache = "self.%s = %s" - lines.append(init_hash_cache % (_hash_cache_field, "None")) - - # For exceptions we rely on BaseException.__init__ for proper - # initialization. - if is_exc: - vals = ",".join("self." + a.name for a in attrs if a.init) - - lines.append("BaseException.__init__(self, %s)" % (vals,)) - - args = ", ".join(args) - if kw_only_args: - if PY2: - lines = _unpack_kw_only_lines_py2(kw_only_args) + lines - - args += "%s**_kw_only" % (", " if args else "",) # leading comma - else: - args += "%s*, %s" % ( - ", " if args else "", # leading comma - ", ".join(kw_only_args), # kw_only args - ) - return ( - """\ -def __init__(self, {args}): - {lines} -""".format( - args=args, lines="\n ".join(lines) if lines else "pass" - ), - names_for_globals, - annotations, - ) - - -class Attribute(object): - """ - *Read-only* representation of an attribute. - - Instances of this class are frequently used for introspection purposes - like: - - - `fields` returns a tuple of them. - - Validators get them passed as the first argument. - - The *field transformer* hook receives a list of them. - - :attribute name: The name of the attribute. - :attribute inherited: Whether or not that attribute has been inherited from - a base class. - - Plus *all* arguments of `attr.ib` (except for ``factory`` - which is only syntactic sugar for ``default=Factory(...)``. - - .. versionadded:: 20.1.0 *inherited* - .. versionadded:: 20.1.0 *on_setattr* - .. versionchanged:: 20.2.0 *inherited* is not taken into account for - equality checks and hashing anymore. - - For the full version history of the fields, see `attr.ib`. - """ - - __slots__ = ( - "name", - "default", - "validator", - "repr", - "eq", - "order", - "hash", - "init", - "metadata", - "type", - "converter", - "kw_only", - "inherited", - "on_setattr", - ) - - def __init__( - self, - name, - default, - validator, - repr, - cmp, # XXX: unused, remove along with other cmp code. - hash, - init, - inherited, - metadata=None, - type=None, - converter=None, - kw_only=False, - eq=None, - order=None, - on_setattr=None, - ): - eq, order = _determine_eq_order(cmp, eq, order, True) - - # Cache this descriptor here to speed things up later. - bound_setattr = _obj_setattr.__get__(self, Attribute) - - # Despite the big red warning, people *do* instantiate `Attribute` - # themselves. - bound_setattr("name", name) - bound_setattr("default", default) - bound_setattr("validator", validator) - bound_setattr("repr", repr) - bound_setattr("eq", eq) - bound_setattr("order", order) - bound_setattr("hash", hash) - bound_setattr("init", init) - bound_setattr("converter", converter) - bound_setattr( - "metadata", - ( - metadata_proxy(metadata) - if metadata - else _empty_metadata_singleton - ), - ) - bound_setattr("type", type) - bound_setattr("kw_only", kw_only) - bound_setattr("inherited", inherited) - bound_setattr("on_setattr", on_setattr) - - def __setattr__(self, name, value): - raise FrozenInstanceError() - - @classmethod - def from_counting_attr(cls, name, ca, type=None): - # type holds the annotated value. deal with conflicts: - if type is None: - type = ca.type - elif ca.type is not None: - raise ValueError( - "Type annotation and type argument cannot both be present" - ) - inst_dict = { - k: getattr(ca, k) - for k in Attribute.__slots__ - if k - not in ( - "name", - "validator", - "default", - "type", - "inherited", - ) # exclude methods and deprecated alias - } - return cls( - name=name, - validator=ca._validator, - default=ca._default, - type=type, - cmp=None, - inherited=False, - **inst_dict - ) - - @property - def cmp(self): - """ - Simulate the presence of a cmp attribute and warn. - """ - warnings.warn(_CMP_DEPRECATION, DeprecationWarning, stacklevel=2) - - return self.eq and self.order - - # Don't use attr.evolve since fields(Attribute) doesn't work - def evolve(self, **changes): - """ - Copy *self* and apply *changes*. - - This works similarly to `attr.evolve` but that function does not work - with ``Attribute``. - - It is mainly meant to be used for `transform-fields`. - - .. versionadded:: 20.3.0 - """ - new = copy.copy(self) - - new._setattrs(changes.items()) - - return new - - # Don't use _add_pickle since fields(Attribute) doesn't work - def __getstate__(self): - """ - Play nice with pickle. - """ - return tuple( - getattr(self, name) if name != "metadata" else dict(self.metadata) - for name in self.__slots__ - ) - - def __setstate__(self, state): - """ - Play nice with pickle. - """ - self._setattrs(zip(self.__slots__, state)) - - def _setattrs(self, name_values_pairs): - bound_setattr = _obj_setattr.__get__(self, Attribute) - for name, value in name_values_pairs: - if name != "metadata": - bound_setattr(name, value) - else: - bound_setattr( - name, - metadata_proxy(value) - if value - else _empty_metadata_singleton, - ) - - -_a = [ - Attribute( - name=name, - default=NOTHING, - validator=None, - repr=True, - cmp=None, - eq=True, - order=False, - hash=(name != "metadata"), - init=True, - inherited=False, - ) - for name in Attribute.__slots__ -] - -Attribute = _add_hash( - _add_eq( - _add_repr(Attribute, attrs=_a), - attrs=[a for a in _a if a.name != "inherited"], - ), - attrs=[a for a in _a if a.hash and a.name != "inherited"], -) - - -class _CountingAttr(object): - """ - Intermediate representation of attributes that uses a counter to preserve - the order in which the attributes have been defined. - - *Internal* data structure of the attrs library. Running into is most - likely the result of a bug like a forgotten `@attr.s` decorator. - """ - - __slots__ = ( - "counter", - "_default", - "repr", - "eq", - "order", - "hash", - "init", - "metadata", - "_validator", - "converter", - "type", - "kw_only", - "on_setattr", - ) - __attrs_attrs__ = tuple( - Attribute( - name=name, - default=NOTHING, - validator=None, - repr=True, - cmp=None, - hash=True, - init=True, - kw_only=False, - eq=True, - order=False, - inherited=False, - on_setattr=None, - ) - for name in ( - "counter", - "_default", - "repr", - "eq", - "order", - "hash", - "init", - "on_setattr", - ) - ) + ( - Attribute( - name="metadata", - default=None, - validator=None, - repr=True, - cmp=None, - hash=False, - init=True, - kw_only=False, - eq=True, - order=False, - inherited=False, - on_setattr=None, - ), - ) - cls_counter = 0 - - def __init__( - self, - default, - validator, - repr, - cmp, # XXX: unused, remove along with cmp - hash, - init, - converter, - metadata, - type, - kw_only, - eq, - order, - on_setattr, - ): - _CountingAttr.cls_counter += 1 - self.counter = _CountingAttr.cls_counter - self._default = default - self._validator = validator - self.converter = converter - self.repr = repr - self.eq = eq - self.order = order - self.hash = hash - self.init = init - self.metadata = metadata - self.type = type - self.kw_only = kw_only - self.on_setattr = on_setattr - - def validator(self, meth): - """ - Decorator that adds *meth* to the list of validators. - - Returns *meth* unchanged. - - .. versionadded:: 17.1.0 - """ - if self._validator is None: - self._validator = meth - else: - self._validator = and_(self._validator, meth) - return meth - - def default(self, meth): - """ - Decorator that allows to set the default for an attribute. - - Returns *meth* unchanged. - - :raises DefaultAlreadySetError: If default has been set before. - - .. versionadded:: 17.1.0 - """ - if self._default is not NOTHING: - raise DefaultAlreadySetError() - - self._default = Factory(meth, takes_self=True) - - return meth - - -_CountingAttr = _add_eq(_add_repr(_CountingAttr)) - - -@attrs(slots=True, init=False, hash=True) -class Factory(object): - """ - Stores a factory callable. - - If passed as the default value to `attr.ib`, the factory is used to - generate a new value. - - :param callable factory: A callable that takes either none or exactly one - mandatory positional argument depending on *takes_self*. - :param bool takes_self: Pass the partially initialized instance that is - being initialized as a positional argument. - - .. versionadded:: 17.1.0 *takes_self* - """ - - factory = attrib() - takes_self = attrib() - - def __init__(self, factory, takes_self=False): - """ - `Factory` is part of the default machinery so if we want a default - value here, we have to implement it ourselves. - """ - self.factory = factory - self.takes_self = takes_self - - -def make_class(name, attrs, bases=(object,), **attributes_arguments): - """ - A quick way to create a new class called *name* with *attrs*. - - :param str name: The name for the new class. - - :param attrs: A list of names or a dictionary of mappings of names to - attributes. - - If *attrs* is a list or an ordered dict (`dict` on Python 3.6+, - `collections.OrderedDict` otherwise), the order is deduced from - the order of the names or attributes inside *attrs*. Otherwise the - order of the definition of the attributes is used. - :type attrs: `list` or `dict` - - :param tuple bases: Classes that the new class will subclass. - - :param attributes_arguments: Passed unmodified to `attr.s`. - - :return: A new class with *attrs*. - :rtype: type - - .. versionadded:: 17.1.0 *bases* - .. versionchanged:: 18.1.0 If *attrs* is ordered, the order is retained. - """ - if isinstance(attrs, dict): - cls_dict = attrs - elif isinstance(attrs, (list, tuple)): - cls_dict = dict((a, attrib()) for a in attrs) - else: - raise TypeError("attrs argument must be a dict or a list.") - - post_init = cls_dict.pop("__attrs_post_init__", None) - type_ = type( - name, - bases, - {} if post_init is None else {"__attrs_post_init__": post_init}, - ) - # For pickling to work, the __module__ variable needs to be set to the - # frame where the class is created. Bypass this step in environments where - # sys._getframe is not defined (Jython for example) or sys._getframe is not - # defined for arguments greater than 0 (IronPython). - try: - type_.__module__ = sys._getframe(1).f_globals.get( - "__name__", "__main__" - ) - except (AttributeError, ValueError): - pass - - # We do it here for proper warnings with meaningful stacklevel. - cmp = attributes_arguments.pop("cmp", None) - ( - attributes_arguments["eq"], - attributes_arguments["order"], - ) = _determine_eq_order( - cmp, - attributes_arguments.get("eq"), - attributes_arguments.get("order"), - True, - ) - - return _attrs(these=cls_dict, **attributes_arguments)(type_) - - -# These are required by within this module so we define them here and merely -# import into .validators / .converters. - - -@attrs(slots=True, hash=True) -class _AndValidator(object): - """ - Compose many validators to a single one. - """ - - _validators = attrib() - - def __call__(self, inst, attr, value): - for v in self._validators: - v(inst, attr, value) - - -def and_(*validators): - """ - A validator that composes multiple validators into one. - - When called on a value, it runs all wrapped validators. - - :param callables validators: Arbitrary number of validators. - - .. versionadded:: 17.1.0 - """ - vals = [] - for validator in validators: - vals.extend( - validator._validators - if isinstance(validator, _AndValidator) - else [validator] - ) - - return _AndValidator(tuple(vals)) - - -def pipe(*converters): - """ - A converter that composes multiple converters into one. - - When called on a value, it runs all wrapped converters, returning the - *last* value. - - :param callables converters: Arbitrary number of converters. - - .. versionadded:: 20.1.0 - """ - - def pipe_converter(val): - for converter in converters: - val = converter(val) - - return val - - return pipe_converter diff --git a/conda_lock/_vendor/poetry/core/_vendor/attr/_next_gen.py b/conda_lock/_vendor/poetry/core/_vendor/attr/_next_gen.py deleted file mode 100644 index 2b5565c5..00000000 --- a/conda_lock/_vendor/poetry/core/_vendor/attr/_next_gen.py +++ /dev/null @@ -1,160 +0,0 @@ -""" -This is a Python 3.6 and later-only, keyword-only, and **provisional** API that -calls `attr.s` with different default values. - -Provisional APIs that shall become "import attrs" one glorious day. -""" - -from functools import partial - -from attr.exceptions import UnannotatedAttributeError - -from . import setters -from ._make import NOTHING, _frozen_setattrs, attrib, attrs - - -def define( - maybe_cls=None, - *, - these=None, - repr=None, - hash=None, - init=None, - slots=True, - frozen=False, - weakref_slot=True, - str=False, - auto_attribs=None, - kw_only=False, - cache_hash=False, - auto_exc=True, - eq=None, - order=False, - auto_detect=True, - getstate_setstate=None, - on_setattr=None, - field_transformer=None, -): - r""" - The only behavioral differences are the handling of the *auto_attribs* - option: - - :param Optional[bool] auto_attribs: If set to `True` or `False`, it behaves - exactly like `attr.s`. If left `None`, `attr.s` will try to guess: - - 1. If all attributes are annotated and no `attr.ib` is found, it assumes - *auto_attribs=True*. - 2. Otherwise it assumes *auto_attribs=False* and tries to collect - `attr.ib`\ s. - - and that mutable classes (``frozen=False``) validate on ``__setattr__``. - - .. versionadded:: 20.1.0 - """ - - def do_it(cls, auto_attribs): - return attrs( - maybe_cls=cls, - these=these, - repr=repr, - hash=hash, - init=init, - slots=slots, - frozen=frozen, - weakref_slot=weakref_slot, - str=str, - auto_attribs=auto_attribs, - kw_only=kw_only, - cache_hash=cache_hash, - auto_exc=auto_exc, - eq=eq, - order=order, - auto_detect=auto_detect, - collect_by_mro=True, - getstate_setstate=getstate_setstate, - on_setattr=on_setattr, - field_transformer=field_transformer, - ) - - def wrap(cls): - """ - Making this a wrapper ensures this code runs during class creation. - - We also ensure that frozen-ness of classes is inherited. - """ - nonlocal frozen, on_setattr - - had_on_setattr = on_setattr not in (None, setters.NO_OP) - - # By default, mutable classes validate on setattr. - if frozen is False and on_setattr is None: - on_setattr = setters.validate - - # However, if we subclass a frozen class, we inherit the immutability - # and disable on_setattr. - for base_cls in cls.__bases__: - if base_cls.__setattr__ is _frozen_setattrs: - if had_on_setattr: - raise ValueError( - "Frozen classes can't use on_setattr " - "(frozen-ness was inherited)." - ) - - on_setattr = setters.NO_OP - break - - if auto_attribs is not None: - return do_it(cls, auto_attribs) - - try: - return do_it(cls, True) - except UnannotatedAttributeError: - return do_it(cls, False) - - # maybe_cls's type depends on the usage of the decorator. It's a class - # if it's used as `@attrs` but ``None`` if used as `@attrs()`. - if maybe_cls is None: - return wrap - else: - return wrap(maybe_cls) - - -mutable = define -frozen = partial(define, frozen=True, on_setattr=None) - - -def field( - *, - default=NOTHING, - validator=None, - repr=True, - hash=None, - init=True, - metadata=None, - converter=None, - factory=None, - kw_only=False, - eq=None, - order=None, - on_setattr=None, -): - """ - Identical to `attr.ib`, except keyword-only and with some arguments - removed. - - .. versionadded:: 20.1.0 - """ - return attrib( - default=default, - validator=validator, - repr=repr, - hash=hash, - init=init, - metadata=metadata, - converter=converter, - factory=factory, - kw_only=kw_only, - eq=eq, - order=order, - on_setattr=on_setattr, - ) diff --git a/conda_lock/_vendor/poetry/core/_vendor/attr/_version_info.py b/conda_lock/_vendor/poetry/core/_vendor/attr/_version_info.py deleted file mode 100644 index 014e78a1..00000000 --- a/conda_lock/_vendor/poetry/core/_vendor/attr/_version_info.py +++ /dev/null @@ -1,85 +0,0 @@ -from __future__ import absolute_import, division, print_function - -from functools import total_ordering - -from ._funcs import astuple -from ._make import attrib, attrs - - -@total_ordering -@attrs(eq=False, order=False, slots=True, frozen=True) -class VersionInfo(object): - """ - A version object that can be compared to tuple of length 1--4: - - >>> attr.VersionInfo(19, 1, 0, "final") <= (19, 2) - True - >>> attr.VersionInfo(19, 1, 0, "final") < (19, 1, 1) - True - >>> vi = attr.VersionInfo(19, 2, 0, "final") - >>> vi < (19, 1, 1) - False - >>> vi < (19,) - False - >>> vi == (19, 2,) - True - >>> vi == (19, 2, 1) - False - - .. versionadded:: 19.2 - """ - - year = attrib(type=int) - minor = attrib(type=int) - micro = attrib(type=int) - releaselevel = attrib(type=str) - - @classmethod - def _from_version_string(cls, s): - """ - Parse *s* and return a _VersionInfo. - """ - v = s.split(".") - if len(v) == 3: - v.append("final") - - return cls( - year=int(v[0]), minor=int(v[1]), micro=int(v[2]), releaselevel=v[3] - ) - - def _ensure_tuple(self, other): - """ - Ensure *other* is a tuple of a valid length. - - Returns a possibly transformed *other* and ourselves as a tuple of - the same length as *other*. - """ - - if self.__class__ is other.__class__: - other = astuple(other) - - if not isinstance(other, tuple): - raise NotImplementedError - - if not (1 <= len(other) <= 4): - raise NotImplementedError - - return astuple(self)[: len(other)], other - - def __eq__(self, other): - try: - us, them = self._ensure_tuple(other) - except NotImplementedError: - return NotImplemented - - return us == them - - def __lt__(self, other): - try: - us, them = self._ensure_tuple(other) - except NotImplementedError: - return NotImplemented - - # Since alphabetically "dev0" < "final" < "post1" < "post2", we don't - # have to do anything special with releaselevel for now. - return us < them diff --git a/conda_lock/_vendor/poetry/core/_vendor/attr/converters.py b/conda_lock/_vendor/poetry/core/_vendor/attr/converters.py deleted file mode 100644 index 715ce178..00000000 --- a/conda_lock/_vendor/poetry/core/_vendor/attr/converters.py +++ /dev/null @@ -1,85 +0,0 @@ -""" -Commonly useful converters. -""" - -from __future__ import absolute_import, division, print_function - -from ._make import NOTHING, Factory, pipe - - -__all__ = [ - "pipe", - "optional", - "default_if_none", -] - - -def optional(converter): - """ - A converter that allows an attribute to be optional. An optional attribute - is one which can be set to ``None``. - - :param callable converter: the converter that is used for non-``None`` - values. - - .. versionadded:: 17.1.0 - """ - - def optional_converter(val): - if val is None: - return None - return converter(val) - - return optional_converter - - -def default_if_none(default=NOTHING, factory=None): - """ - A converter that allows to replace ``None`` values by *default* or the - result of *factory*. - - :param default: Value to be used if ``None`` is passed. Passing an instance - of `attr.Factory` is supported, however the ``takes_self`` option - is *not*. - :param callable factory: A callable that takes not parameters whose result - is used if ``None`` is passed. - - :raises TypeError: If **neither** *default* or *factory* is passed. - :raises TypeError: If **both** *default* and *factory* are passed. - :raises ValueError: If an instance of `attr.Factory` is passed with - ``takes_self=True``. - - .. versionadded:: 18.2.0 - """ - if default is NOTHING and factory is None: - raise TypeError("Must pass either `default` or `factory`.") - - if default is not NOTHING and factory is not None: - raise TypeError( - "Must pass either `default` or `factory` but not both." - ) - - if factory is not None: - default = Factory(factory) - - if isinstance(default, Factory): - if default.takes_self: - raise ValueError( - "`takes_self` is not supported by default_if_none." - ) - - def default_if_none_converter(val): - if val is not None: - return val - - return default.factory() - - else: - - def default_if_none_converter(val): - if val is not None: - return val - - return default - - return default_if_none_converter diff --git a/conda_lock/_vendor/poetry/core/_vendor/attr/exceptions.py b/conda_lock/_vendor/poetry/core/_vendor/attr/exceptions.py deleted file mode 100644 index fcd89106..00000000 --- a/conda_lock/_vendor/poetry/core/_vendor/attr/exceptions.py +++ /dev/null @@ -1,92 +0,0 @@ -from __future__ import absolute_import, division, print_function - - -class FrozenError(AttributeError): - """ - A frozen/immutable instance or attribute haave been attempted to be - modified. - - It mirrors the behavior of ``namedtuples`` by using the same error message - and subclassing `AttributeError`. - - .. versionadded:: 20.1.0 - """ - - msg = "can't set attribute" - args = [msg] - - -class FrozenInstanceError(FrozenError): - """ - A frozen instance has been attempted to be modified. - - .. versionadded:: 16.1.0 - """ - - -class FrozenAttributeError(FrozenError): - """ - A frozen attribute has been attempted to be modified. - - .. versionadded:: 20.1.0 - """ - - -class AttrsAttributeNotFoundError(ValueError): - """ - An ``attrs`` function couldn't find an attribute that the user asked for. - - .. versionadded:: 16.2.0 - """ - - -class NotAnAttrsClassError(ValueError): - """ - A non-``attrs`` class has been passed into an ``attrs`` function. - - .. versionadded:: 16.2.0 - """ - - -class DefaultAlreadySetError(RuntimeError): - """ - A default has been set using ``attr.ib()`` and is attempted to be reset - using the decorator. - - .. versionadded:: 17.1.0 - """ - - -class UnannotatedAttributeError(RuntimeError): - """ - A class with ``auto_attribs=True`` has an ``attr.ib()`` without a type - annotation. - - .. versionadded:: 17.3.0 - """ - - -class PythonTooOldError(RuntimeError): - """ - It was attempted to use an ``attrs`` feature that requires a newer Python - version. - - .. versionadded:: 18.2.0 - """ - - -class NotCallableError(TypeError): - """ - A ``attr.ib()`` requiring a callable has been set with a value - that is not callable. - - .. versionadded:: 19.2.0 - """ - - def __init__(self, msg, value): - super(TypeError, self).__init__(msg, value) - self.msg = msg - self.value = value - - def __str__(self): - return str(self.msg) diff --git a/conda_lock/_vendor/poetry/core/_vendor/attr/filters.py b/conda_lock/_vendor/poetry/core/_vendor/attr/filters.py deleted file mode 100644 index dc47e8fa..00000000 --- a/conda_lock/_vendor/poetry/core/_vendor/attr/filters.py +++ /dev/null @@ -1,52 +0,0 @@ -""" -Commonly useful filters for `attr.asdict`. -""" - -from __future__ import absolute_import, division, print_function - -from ._compat import isclass -from ._make import Attribute - - -def _split_what(what): - """ - Returns a tuple of `frozenset`s of classes and attributes. - """ - return ( - frozenset(cls for cls in what if isclass(cls)), - frozenset(cls for cls in what if isinstance(cls, Attribute)), - ) - - -def include(*what): - """ - Whitelist *what*. - - :param what: What to whitelist. - :type what: `list` of `type` or `attr.Attribute`\\ s - - :rtype: `callable` - """ - cls, attrs = _split_what(what) - - def include_(attribute, value): - return value.__class__ in cls or attribute in attrs - - return include_ - - -def exclude(*what): - """ - Blacklist *what*. - - :param what: What to blacklist. - :type what: `list` of classes or `attr.Attribute`\\ s. - - :rtype: `callable` - """ - cls, attrs = _split_what(what) - - def exclude_(attribute, value): - return value.__class__ not in cls and attribute not in attrs - - return exclude_ diff --git a/conda_lock/_vendor/poetry/core/_vendor/attr/setters.py b/conda_lock/_vendor/poetry/core/_vendor/attr/setters.py deleted file mode 100644 index 240014b3..00000000 --- a/conda_lock/_vendor/poetry/core/_vendor/attr/setters.py +++ /dev/null @@ -1,77 +0,0 @@ -""" -Commonly used hooks for on_setattr. -""" - -from __future__ import absolute_import, division, print_function - -from . import _config -from .exceptions import FrozenAttributeError - - -def pipe(*setters): - """ - Run all *setters* and return the return value of the last one. - - .. versionadded:: 20.1.0 - """ - - def wrapped_pipe(instance, attrib, new_value): - rv = new_value - - for setter in setters: - rv = setter(instance, attrib, rv) - - return rv - - return wrapped_pipe - - -def frozen(_, __, ___): - """ - Prevent an attribute to be modified. - - .. versionadded:: 20.1.0 - """ - raise FrozenAttributeError() - - -def validate(instance, attrib, new_value): - """ - Run *attrib*'s validator on *new_value* if it has one. - - .. versionadded:: 20.1.0 - """ - if _config._run_validators is False: - return new_value - - v = attrib.validator - if not v: - return new_value - - v(instance, attrib, new_value) - - return new_value - - -def convert(instance, attrib, new_value): - """ - Run *attrib*'s converter -- if it has one -- on *new_value* and return the - result. - - .. versionadded:: 20.1.0 - """ - c = attrib.converter - if c: - return c(new_value) - - return new_value - - -NO_OP = object() -""" -Sentinel for disabling class-wide *on_setattr* hooks for certain attributes. - -Does not work in `pipe` or within lists. - -.. versionadded:: 20.1.0 -""" diff --git a/conda_lock/_vendor/poetry/core/_vendor/attr/validators.py b/conda_lock/_vendor/poetry/core/_vendor/attr/validators.py deleted file mode 100644 index b9a73054..00000000 --- a/conda_lock/_vendor/poetry/core/_vendor/attr/validators.py +++ /dev/null @@ -1,379 +0,0 @@ -""" -Commonly useful validators. -""" - -from __future__ import absolute_import, division, print_function - -import re - -from ._make import _AndValidator, and_, attrib, attrs -from .exceptions import NotCallableError - - -__all__ = [ - "and_", - "deep_iterable", - "deep_mapping", - "in_", - "instance_of", - "is_callable", - "matches_re", - "optional", - "provides", -] - - -@attrs(repr=False, slots=True, hash=True) -class _InstanceOfValidator(object): - type = attrib() - - def __call__(self, inst, attr, value): - """ - We use a callable class to be able to change the ``__repr__``. - """ - if not isinstance(value, self.type): - raise TypeError( - "'{name}' must be {type!r} (got {value!r} that is a " - "{actual!r}).".format( - name=attr.name, - type=self.type, - actual=value.__class__, - value=value, - ), - attr, - self.type, - value, - ) - - def __repr__(self): - return "".format( - type=self.type - ) - - -def instance_of(type): - """ - A validator that raises a `TypeError` if the initializer is called - with a wrong type for this particular attribute (checks are performed using - `isinstance` therefore it's also valid to pass a tuple of types). - - :param type: The type to check for. - :type type: type or tuple of types - - :raises TypeError: With a human readable error message, the attribute - (of type `attr.Attribute`), the expected type, and the value it - got. - """ - return _InstanceOfValidator(type) - - -@attrs(repr=False, frozen=True, slots=True) -class _MatchesReValidator(object): - regex = attrib() - flags = attrib() - match_func = attrib() - - def __call__(self, inst, attr, value): - """ - We use a callable class to be able to change the ``__repr__``. - """ - if not self.match_func(value): - raise ValueError( - "'{name}' must match regex {regex!r}" - " ({value!r} doesn't)".format( - name=attr.name, regex=self.regex.pattern, value=value - ), - attr, - self.regex, - value, - ) - - def __repr__(self): - return "".format( - regex=self.regex - ) - - -def matches_re(regex, flags=0, func=None): - r""" - A validator that raises `ValueError` if the initializer is called - with a string that doesn't match *regex*. - - :param str regex: a regex string to match against - :param int flags: flags that will be passed to the underlying re function - (default 0) - :param callable func: which underlying `re` function to call (options - are `re.fullmatch`, `re.search`, `re.match`, default - is ``None`` which means either `re.fullmatch` or an emulation of - it on Python 2). For performance reasons, they won't be used directly - but on a pre-`re.compile`\ ed pattern. - - .. versionadded:: 19.2.0 - """ - fullmatch = getattr(re, "fullmatch", None) - valid_funcs = (fullmatch, None, re.search, re.match) - if func not in valid_funcs: - raise ValueError( - "'func' must be one of %s." - % ( - ", ".join( - sorted( - e and e.__name__ or "None" for e in set(valid_funcs) - ) - ), - ) - ) - - pattern = re.compile(regex, flags) - if func is re.match: - match_func = pattern.match - elif func is re.search: - match_func = pattern.search - else: - if fullmatch: - match_func = pattern.fullmatch - else: - pattern = re.compile(r"(?:{})\Z".format(regex), flags) - match_func = pattern.match - - return _MatchesReValidator(pattern, flags, match_func) - - -@attrs(repr=False, slots=True, hash=True) -class _ProvidesValidator(object): - interface = attrib() - - def __call__(self, inst, attr, value): - """ - We use a callable class to be able to change the ``__repr__``. - """ - if not self.interface.providedBy(value): - raise TypeError( - "'{name}' must provide {interface!r} which {value!r} " - "doesn't.".format( - name=attr.name, interface=self.interface, value=value - ), - attr, - self.interface, - value, - ) - - def __repr__(self): - return "".format( - interface=self.interface - ) - - -def provides(interface): - """ - A validator that raises a `TypeError` if the initializer is called - with an object that does not provide the requested *interface* (checks are - performed using ``interface.providedBy(value)`` (see `zope.interface - `_). - - :param interface: The interface to check for. - :type interface: ``zope.interface.Interface`` - - :raises TypeError: With a human readable error message, the attribute - (of type `attr.Attribute`), the expected interface, and the - value it got. - """ - return _ProvidesValidator(interface) - - -@attrs(repr=False, slots=True, hash=True) -class _OptionalValidator(object): - validator = attrib() - - def __call__(self, inst, attr, value): - if value is None: - return - - self.validator(inst, attr, value) - - def __repr__(self): - return "".format( - what=repr(self.validator) - ) - - -def optional(validator): - """ - A validator that makes an attribute optional. An optional attribute is one - which can be set to ``None`` in addition to satisfying the requirements of - the sub-validator. - - :param validator: A validator (or a list of validators) that is used for - non-``None`` values. - :type validator: callable or `list` of callables. - - .. versionadded:: 15.1.0 - .. versionchanged:: 17.1.0 *validator* can be a list of validators. - """ - if isinstance(validator, list): - return _OptionalValidator(_AndValidator(validator)) - return _OptionalValidator(validator) - - -@attrs(repr=False, slots=True, hash=True) -class _InValidator(object): - options = attrib() - - def __call__(self, inst, attr, value): - try: - in_options = value in self.options - except TypeError: # e.g. `1 in "abc"` - in_options = False - - if not in_options: - raise ValueError( - "'{name}' must be in {options!r} (got {value!r})".format( - name=attr.name, options=self.options, value=value - ) - ) - - def __repr__(self): - return "".format( - options=self.options - ) - - -def in_(options): - """ - A validator that raises a `ValueError` if the initializer is called - with a value that does not belong in the options provided. The check is - performed using ``value in options``. - - :param options: Allowed options. - :type options: list, tuple, `enum.Enum`, ... - - :raises ValueError: With a human readable error message, the attribute (of - type `attr.Attribute`), the expected options, and the value it - got. - - .. versionadded:: 17.1.0 - """ - return _InValidator(options) - - -@attrs(repr=False, slots=False, hash=True) -class _IsCallableValidator(object): - def __call__(self, inst, attr, value): - """ - We use a callable class to be able to change the ``__repr__``. - """ - if not callable(value): - message = ( - "'{name}' must be callable " - "(got {value!r} that is a {actual!r})." - ) - raise NotCallableError( - msg=message.format( - name=attr.name, value=value, actual=value.__class__ - ), - value=value, - ) - - def __repr__(self): - return "" - - -def is_callable(): - """ - A validator that raises a `attr.exceptions.NotCallableError` if the - initializer is called with a value for this particular attribute - that is not callable. - - .. versionadded:: 19.1.0 - - :raises `attr.exceptions.NotCallableError`: With a human readable error - message containing the attribute (`attr.Attribute`) name, - and the value it got. - """ - return _IsCallableValidator() - - -@attrs(repr=False, slots=True, hash=True) -class _DeepIterable(object): - member_validator = attrib(validator=is_callable()) - iterable_validator = attrib( - default=None, validator=optional(is_callable()) - ) - - def __call__(self, inst, attr, value): - """ - We use a callable class to be able to change the ``__repr__``. - """ - if self.iterable_validator is not None: - self.iterable_validator(inst, attr, value) - - for member in value: - self.member_validator(inst, attr, member) - - def __repr__(self): - iterable_identifier = ( - "" - if self.iterable_validator is None - else " {iterable!r}".format(iterable=self.iterable_validator) - ) - return ( - "" - ).format( - iterable_identifier=iterable_identifier, - member=self.member_validator, - ) - - -def deep_iterable(member_validator, iterable_validator=None): - """ - A validator that performs deep validation of an iterable. - - :param member_validator: Validator to apply to iterable members - :param iterable_validator: Validator to apply to iterable itself - (optional) - - .. versionadded:: 19.1.0 - - :raises TypeError: if any sub-validators fail - """ - return _DeepIterable(member_validator, iterable_validator) - - -@attrs(repr=False, slots=True, hash=True) -class _DeepMapping(object): - key_validator = attrib(validator=is_callable()) - value_validator = attrib(validator=is_callable()) - mapping_validator = attrib(default=None, validator=optional(is_callable())) - - def __call__(self, inst, attr, value): - """ - We use a callable class to be able to change the ``__repr__``. - """ - if self.mapping_validator is not None: - self.mapping_validator(inst, attr, value) - - for key in value: - self.key_validator(inst, attr, key) - self.value_validator(inst, attr, value[key]) - - def __repr__(self): - return ( - "" - ).format(key=self.key_validator, value=self.value_validator) - - -def deep_mapping(key_validator, value_validator, mapping_validator=None): - """ - A validator that performs deep validation of a dictionary. - - :param key_validator: Validator to apply to dictionary keys - :param value_validator: Validator to apply to dictionary values - :param mapping_validator: Validator to apply to top-level mapping - attribute (optional) - - .. versionadded:: 19.1.0 - - :raises TypeError: if any sub-validators fail - """ - return _DeepMapping(key_validator, value_validator, mapping_validator) diff --git a/conda_lock/_vendor/poetry/core/_vendor/fastjsonschema/LICENSE b/conda_lock/_vendor/poetry/core/_vendor/fastjsonschema/LICENSE new file mode 100644 index 00000000..dcbe158d --- /dev/null +++ b/conda_lock/_vendor/poetry/core/_vendor/fastjsonschema/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2018, Michal Horejsek +All rights reserved. + +Redistribution and use in source and binary forms, with or without modification, +are permitted provided that the following conditions are met: + + Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + + Redistributions in binary form must reproduce the above copyright notice, this + list of conditions and the following disclaimer in the documentation and/or + other materials provided with the distribution. + + Neither the name of the {organization} nor the names of its + contributors may be used to endorse or promote products derived from + this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR +ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON +ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/conda_lock/_vendor/poetry/core/_vendor/fastjsonschema/__init__.py b/conda_lock/_vendor/poetry/core/_vendor/fastjsonschema/__init__.py new file mode 100644 index 00000000..b0b8849e --- /dev/null +++ b/conda_lock/_vendor/poetry/core/_vendor/fastjsonschema/__init__.py @@ -0,0 +1,271 @@ +# ___ +# \./ DANGER: This project implements some code generation +# .--.O.--. techniques involving string concatenation. +# \/ \/ If you look at it, you might die. +# + +r""" +Installation +************ + +.. code-block:: bash + + pip install fastjsonschema + +Support only for Python 3.3 and higher. + +About +***** + +``fastjsonschema`` implements validation of JSON documents by JSON schema. +The library implements JSON schema drafts 04, 06, and 07. The main purpose is +to have a really fast implementation. See some numbers: + + * Probably the most popular, ``jsonschema``, can take up to 5 seconds for valid + inputs and 1.2 seconds for invalid inputs. + * Second most popular, ``json-spec``, is even worse with up to 7.2 and 1.7 seconds. + * Last ``validictory``, now deprecated, is much better with 370 or 23 milliseconds, + but it does not follow all standards, and it can be still slow for some purposes. + +With this library you can gain big improvements as ``fastjsonschema`` takes +only about 25 milliseconds for valid inputs and 2 milliseconds for invalid ones. +Pretty amazing, right? :-) + +Technically it works by generating the most stupid code on the fly, which is fast but +is hard to write by hand. The best efficiency is achieved when a validator is compiled +once and used many times, of course. It works similarly like regular expressions. But +you can also generate the code to a file, which is even slightly faster. + +You can run the performance benchmarks on your computer or server with the included +script: + +.. code-block:: bash + + $ make performance + fast_compiled valid ==> 0.0464646 + fast_compiled invalid ==> 0.0030227 + fast_file valid ==> 0.0461219 + fast_file invalid ==> 0.0030608 + fast_not_compiled valid ==> 11.4627202 + fast_not_compiled invalid ==> 2.5726230 + jsonschema valid ==> 7.5844927 + jsonschema invalid ==> 1.9204665 + jsonschema_compiled valid ==> 0.6938364 + jsonschema_compiled invalid ==> 0.0359244 + jsonspec valid ==> 9.0715843 + jsonspec invalid ==> 2.1650488 + validictory valid ==> 0.4874793 + validictory invalid ==> 0.0232244 + +This library follows and implements `JSON schema draft-04, draft-06, and draft-07 +`_. Sometimes it's not perfectly clear, so I recommend also +check out this `understanding JSON schema `_. + +Note that there are some differences compared to JSON schema standard: + + * Regular expressions are full Python ones, not only what JSON schema allows. It's easier + to allow everything, and also it's faster to compile without limits. So keep in mind that when + you will use a more advanced regular expression, it may not work with other libraries or in + other languages. + * Because Python matches new line for a dollar in regular expressions (``a$`` matches ``a`` and ``a\\n``), + instead of ``$`` is used ``\Z`` and all dollars in your regular expression are changed to ``\\Z`` + as well. When you want to use dollar as regular character, you have to escape it (``\$``). + * JSON schema says you can use keyword ``default`` for providing default values. This implementation + uses that and always returns transformed input data. + +Usage +***** + +.. code-block:: python + + import fastjsonschema + + point_schema = { + "type": "object", + "properties": { + "x": { + "type": "number", + }, + "y": { + "type": "number", + }, + }, + "required": ["x", "y"], + "additionalProperties": False, + } + + point_validator = fastjsonschema.compile(point_schema) + try: + point_validator({"x": 1.0, "y": 2.0}) + except fastjsonschema.JsonSchemaException as e: + print(f"Data failed validation: {e}") + +API +*** +""" +from functools import partial, update_wrapper + +from .draft04 import CodeGeneratorDraft04 +from .draft06 import CodeGeneratorDraft06 +from .draft07 import CodeGeneratorDraft07 +from .exceptions import JsonSchemaException, JsonSchemaValueException, JsonSchemaDefinitionException +from .ref_resolver import RefResolver +from .version import VERSION + +__all__ = ( + 'VERSION', + 'JsonSchemaException', + 'JsonSchemaValueException', + 'JsonSchemaDefinitionException', + 'validate', + 'compile', + 'compile_to_code', +) + + +def validate(definition, data, handlers={}, formats={}, use_default=True, use_formats=True): + """ + Validation function for lazy programmers or for use cases when you need + to call validation only once, so you do not have to compile it first. + Use it only when you do not care about performance (even though it will + be still faster than alternative implementations). + + .. code-block:: python + + import fastjsonschema + + fastjsonschema.validate({'type': 'string'}, 'hello') + # same as: compile({'type': 'string'})('hello') + + Preferred is to use :any:`compile` function. + """ + return compile(definition, handlers, formats, use_default, use_formats)(data) + + +#TODO: Change use_default to False when upgrading to version 3. +# pylint: disable=redefined-builtin,dangerous-default-value,exec-used +def compile(definition, handlers={}, formats={}, use_default=True, use_formats=True): + """ + Generates validation function for validating JSON schema passed in ``definition``. + Example: + + .. code-block:: python + + import fastjsonschema + + validate = fastjsonschema.compile({'type': 'string'}) + validate('hello') + + This implementation supports keyword ``default`` (can be turned off + by passing `use_default=False`): + + .. code-block:: python + + validate = fastjsonschema.compile({ + 'type': 'object', + 'properties': { + 'a': {'type': 'number', 'default': 42}, + }, + }) + + data = validate({}) + assert data == {'a': 42} + + Supported implementations are draft-04, draft-06 and draft-07. Which version + should be used is determined by `$draft` in your ``definition``. When not + specified, the latest implementation is used (draft-07). + + .. code-block:: python + + validate = fastjsonschema.compile({ + '$schema': 'http://json-schema.org/draft-04/schema', + 'type': 'number', + }) + + You can pass mapping from URI to function that should be used to retrieve + remote schemes used in your ``definition`` in parameter ``handlers``. + + Also, you can pass mapping for custom formats. Key is the name of your + formatter and value can be regular expression, which will be compiled or + callback returning `bool` (or you can raise your own exception). + + .. code-block:: python + + validate = fastjsonschema.compile(definition, formats={ + 'foo': r'foo|bar', + 'bar': lambda value: value in ('foo', 'bar'), + }) + + Note that formats are automatically used as assertions. It can be turned + off by passing `use_formats=False`. When disabled, custom formats are + disabled as well. (Added in 2.19.0.) + + Exception :any:`JsonSchemaDefinitionException` is raised when generating the + code fails (bad definition). + + Exception :any:`JsonSchemaValueException` is raised from generated function when + validation fails (data do not follow the definition). + """ + resolver, code_generator = _factory(definition, handlers, formats, use_default, use_formats) + global_state = code_generator.global_state + # Do not pass local state so it can recursively call itself. + exec(code_generator.func_code, global_state) + func = global_state[resolver.get_scope_name()] + if formats: + return update_wrapper(partial(func, custom_formats=formats), func) + return func + + +# pylint: disable=dangerous-default-value +def compile_to_code(definition, handlers={}, formats={}, use_default=True, use_formats=True): + """ + Generates validation code for validating JSON schema passed in ``definition``. + Example: + + .. code-block:: python + + import fastjsonschema + + code = fastjsonschema.compile_to_code({'type': 'string'}) + with open('your_file.py', 'w') as f: + f.write(code) + + You can also use it as a script: + + .. code-block:: bash + + echo "{'type': 'string'}" | python3 -m fastjsonschema > your_file.py + python3 -m fastjsonschema "{'type': 'string'}" > your_file.py + + Exception :any:`JsonSchemaDefinitionException` is raised when generating the + code fails (bad definition). + """ + _, code_generator = _factory(definition, handlers, formats, use_default, use_formats) + return ( + 'VERSION = "' + VERSION + '"\n' + + code_generator.global_state_code + '\n' + + code_generator.func_code + ) + + +def _factory(definition, handlers, formats={}, use_default=True, use_formats=True): + resolver = RefResolver.from_schema(definition, handlers=handlers, store={}) + code_generator = _get_code_generator_class(definition)( + definition, + resolver=resolver, + formats=formats, + use_default=use_default, + use_formats=use_formats, + ) + return resolver, code_generator + + +def _get_code_generator_class(schema): + # Schema in from draft-06 can be just the boolean value. + if isinstance(schema, dict): + schema_version = schema.get('$schema', '') + if 'draft-04' in schema_version: + return CodeGeneratorDraft04 + if 'draft-06' in schema_version: + return CodeGeneratorDraft06 + return CodeGeneratorDraft07 diff --git a/conda_lock/_vendor/poetry/core/_vendor/fastjsonschema/__main__.py b/conda_lock/_vendor/poetry/core/_vendor/fastjsonschema/__main__.py new file mode 100644 index 00000000..e5f3aa74 --- /dev/null +++ b/conda_lock/_vendor/poetry/core/_vendor/fastjsonschema/__main__.py @@ -0,0 +1,19 @@ +import json +import sys + +from . import compile_to_code + + +def main(): + if len(sys.argv) == 2: + definition = sys.argv[1] + else: + definition = sys.stdin.read() + + definition = json.loads(definition) + code = compile_to_code(definition) + print(code) + + +if __name__ == '__main__': + main() diff --git a/conda_lock/_vendor/poetry/core/_vendor/fastjsonschema/draft04.py b/conda_lock/_vendor/poetry/core/_vendor/fastjsonschema/draft04.py new file mode 100644 index 00000000..e2d9c8ab --- /dev/null +++ b/conda_lock/_vendor/poetry/core/_vendor/fastjsonschema/draft04.py @@ -0,0 +1,603 @@ +import decimal +import re + +from .exceptions import JsonSchemaDefinitionException +from .generator import CodeGenerator, enforce_list + + +JSON_TYPE_TO_PYTHON_TYPE = { + 'null': 'NoneType', + 'boolean': 'bool', + 'number': 'int, float, Decimal', + 'integer': 'int', + 'string': 'str', + 'array': 'list, tuple', + 'object': 'dict', +} + +DOLLAR_FINDER = re.compile(r"(? {maxLength}:'): + self.exc('{name} must be shorter than or equal to {maxLength} characters', rule='maxLength') + + def generate_pattern(self): + with self.l('if isinstance({variable}, str):'): + pattern = self._definition['pattern'] + safe_pattern = pattern.replace('\\', '\\\\').replace('"', '\\"') + end_of_string_fixed_pattern = DOLLAR_FINDER.sub(r'\\Z', pattern) + self._compile_regexps[pattern] = re.compile(end_of_string_fixed_pattern) + with self.l('if not REGEX_PATTERNS[{}].search({variable}):', repr(pattern)): + self.exc('{name} must match pattern {}', safe_pattern, rule='pattern') + + def generate_format(self): + """ + Means that value have to be in specified format. For example date, email or other. + + .. code-block:: python + + {'format': 'email'} + + Valid value for this definition is user@example.com but not @username + """ + if not self._use_formats: + return + with self.l('if isinstance({variable}, str):'): + format_ = self._definition['format'] + # Checking custom formats - user is allowed to override default formats. + if format_ in self._custom_formats: + custom_format = self._custom_formats[format_] + if isinstance(custom_format, str): + self._generate_format(format_, format_ + '_re_pattern', custom_format) + else: + with self.l('if not custom_formats["{}"]({variable}):', format_): + self.exc('{name} must be {}', format_, rule='format') + elif format_ in self.FORMAT_REGEXS: + format_regex = self.FORMAT_REGEXS[format_] + self._generate_format(format_, format_ + '_re_pattern', format_regex) + # Format regex is used only in meta schemas. + elif format_ == 'regex': + with self.l('try:', optimize=False): + self.l('re.compile({variable})') + with self.l('except Exception:'): + self.exc('{name} must be a valid regex', rule='format') + else: + raise JsonSchemaDefinitionException('Unknown format: {}'.format(format_)) + + + def _generate_format(self, format_name, regexp_name, regexp): + if self._definition['format'] == format_name: + if not regexp_name in self._compile_regexps: + self._compile_regexps[regexp_name] = re.compile(regexp) + with self.l('if not REGEX_PATTERNS["{}"].match({variable}):', regexp_name): + self.exc('{name} must be {}', format_name, rule='format') + + def generate_minimum(self): + with self.l('if isinstance({variable}, (int, float, Decimal)):'): + if not isinstance(self._definition['minimum'], (int, float, decimal.Decimal)): + raise JsonSchemaDefinitionException('minimum must be a number') + if self._definition.get('exclusiveMinimum', False): + with self.l('if {variable} <= {minimum}:'): + self.exc('{name} must be bigger than {minimum}', rule='minimum') + else: + with self.l('if {variable} < {minimum}:'): + self.exc('{name} must be bigger than or equal to {minimum}', rule='minimum') + + def generate_maximum(self): + with self.l('if isinstance({variable}, (int, float, Decimal)):'): + if not isinstance(self._definition['maximum'], (int, float, decimal.Decimal)): + raise JsonSchemaDefinitionException('maximum must be a number') + if self._definition.get('exclusiveMaximum', False): + with self.l('if {variable} >= {maximum}:'): + self.exc('{name} must be smaller than {maximum}', rule='maximum') + else: + with self.l('if {variable} > {maximum}:'): + self.exc('{name} must be smaller than or equal to {maximum}', rule='maximum') + + def generate_multiple_of(self): + with self.l('if isinstance({variable}, (int, float, Decimal)):'): + if not isinstance(self._definition['multipleOf'], (int, float, decimal.Decimal)): + raise JsonSchemaDefinitionException('multipleOf must be a number') + # For proper multiplication check of floats we need to use decimals, + # because for example 19.01 / 0.01 = 1901.0000000000002. + if isinstance(self._definition['multipleOf'], float): + self.l('quotient = Decimal(repr({variable})) / Decimal(repr({multipleOf}))') + else: + self.l('quotient = {variable} / {multipleOf}') + with self.l('if int(quotient) != quotient:'): + self.exc('{name} must be multiple of {multipleOf}', rule='multipleOf') + + def generate_min_items(self): + self.create_variable_is_list() + with self.l('if {variable}_is_list:'): + if not isinstance(self._definition['minItems'], int): + raise JsonSchemaDefinitionException('minItems must be a number') + self.create_variable_with_length() + with self.l('if {variable}_len < {minItems}:'): + self.exc('{name} must contain at least {minItems} items', rule='minItems') + + def generate_max_items(self): + self.create_variable_is_list() + with self.l('if {variable}_is_list:'): + if not isinstance(self._definition['maxItems'], int): + raise JsonSchemaDefinitionException('maxItems must be a number') + self.create_variable_with_length() + with self.l('if {variable}_len > {maxItems}:'): + self.exc('{name} must contain less than or equal to {maxItems} items', rule='maxItems') + + def generate_unique_items(self): + """ + With Python 3.4 module ``timeit`` recommended this solutions: + + .. code-block:: python + + >>> timeit.timeit("len(x) > len(set(x))", "x=range(100)+range(100)", number=100000) + 0.5839540958404541 + >>> timeit.timeit("len({}.fromkeys(x)) == len(x)", "x=range(100)+range(100)", number=100000) + 0.7094449996948242 + >>> timeit.timeit("seen = set(); any(i in seen or seen.add(i) for i in x)", "x=range(100)+range(100)", number=100000) + 2.0819358825683594 + >>> timeit.timeit("np.unique(x).size == len(x)", "x=range(100)+range(100); import numpy as np", number=100000) + 2.1439831256866455 + """ + unique_definition = self._definition['uniqueItems'] + if not unique_definition: + return + + self.create_variable_is_list() + with self.l('if {variable}_is_list:'): + self.l( + 'def fn(var): ' + 'return frozenset(dict((k, fn(v)) ' + 'for k, v in var.items()).items()) ' + 'if hasattr(var, "items") else tuple(fn(v) ' + 'for v in var) ' + 'if isinstance(var, (dict, list)) else str(var) ' + 'if isinstance(var, bool) else var') + self.create_variable_with_length() + with self.l('if {variable}_len > len(set(fn({variable}_x) for {variable}_x in {variable})):'): + self.exc('{name} must contain unique items', rule='uniqueItems') + + def generate_items(self): + """ + Means array is valid only when all items are valid by this definition. + + .. code-block:: python + + { + 'items': [ + {'type': 'integer'}, + {'type': 'string'}, + ], + } + + Valid arrays are those with integers or strings, nothing else. + + Since draft 06 definition can be also boolean. True means nothing, False + means everything is invalid. + """ + items_definition = self._definition['items'] + if items_definition is True: + return + + self.create_variable_is_list() + with self.l('if {variable}_is_list:'): + self.create_variable_with_length() + if items_definition is False: + with self.l('if {variable}:'): + self.exc('{name} must not be there', rule='items') + elif isinstance(items_definition, list): + for idx, item_definition in enumerate(items_definition): + with self.l('if {variable}_len > {}:', idx): + self.l('{variable}__{0} = {variable}[{0}]', idx) + self.generate_func_code_block( + item_definition, + '{}__{}'.format(self._variable, idx), + '{}[{}]'.format(self._variable_name, idx), + ) + if self._use_default and isinstance(item_definition, dict) and 'default' in item_definition: + self.l('else: {variable}.append({})', repr(item_definition['default'])) + + if 'additionalItems' in self._definition: + if self._definition['additionalItems'] is False: + with self.l('if {variable}_len > {}:', len(items_definition)): + self.exc('{name} must contain only specified items', rule='items') + else: + with self.l('for {variable}_x, {variable}_item in enumerate({variable}[{0}:], {0}):', len(items_definition)): + count = self.generate_func_code_block( + self._definition['additionalItems'], + '{}_item'.format(self._variable), + '{}[{{{}_x}}]'.format(self._variable_name, self._variable), + ) + if count == 0: + self.l('pass') + else: + if items_definition: + with self.l('for {variable}_x, {variable}_item in enumerate({variable}):'): + count = self.generate_func_code_block( + items_definition, + '{}_item'.format(self._variable), + '{}[{{{}_x}}]'.format(self._variable_name, self._variable), + ) + if count == 0: + self.l('pass') + + def generate_min_properties(self): + self.create_variable_is_dict() + with self.l('if {variable}_is_dict:'): + if not isinstance(self._definition['minProperties'], int): + raise JsonSchemaDefinitionException('minProperties must be a number') + self.create_variable_with_length() + with self.l('if {variable}_len < {minProperties}:'): + self.exc('{name} must contain at least {minProperties} properties', rule='minProperties') + + def generate_max_properties(self): + self.create_variable_is_dict() + with self.l('if {variable}_is_dict:'): + if not isinstance(self._definition['maxProperties'], int): + raise JsonSchemaDefinitionException('maxProperties must be a number') + self.create_variable_with_length() + with self.l('if {variable}_len > {maxProperties}:'): + self.exc('{name} must contain less than or equal to {maxProperties} properties', rule='maxProperties') + + def generate_required(self): + self.create_variable_is_dict() + with self.l('if {variable}_is_dict:'): + if not isinstance(self._definition['required'], (list, tuple)): + raise JsonSchemaDefinitionException('required must be an array') + self.l('{variable}__missing_keys = set({required}) - {variable}.keys()') + with self.l('if {variable}__missing_keys:'): + dynamic = 'str(sorted({variable}__missing_keys)) + " properties"' + self.exc('{name} must contain ', self.e(self._definition['required']), rule='required', append_to_msg=dynamic) + + def generate_properties(self): + """ + Means object with defined keys. + + .. code-block:: python + + { + 'properties': { + 'key': {'type': 'number'}, + }, + } + + Valid object is containing key called 'key' and value any number. + """ + self.create_variable_is_dict() + with self.l('if {variable}_is_dict:'): + self.create_variable_keys() + for key, prop_definition in self._definition['properties'].items(): + key_name = re.sub(r'($[^a-zA-Z]|[^a-zA-Z0-9])', '', key) + if not isinstance(prop_definition, (dict, bool)): + raise JsonSchemaDefinitionException('{}[{}] must be object'.format(self._variable, key_name)) + with self.l('if "{}" in {variable}_keys:', self.e(key)): + self.l('{variable}_keys.remove("{}")', self.e(key)) + self.l('{variable}__{0} = {variable}["{1}"]', key_name, self.e(key)) + self.generate_func_code_block( + prop_definition, + '{}__{}'.format(self._variable, key_name), + '{}.{}'.format(self._variable_name, self.e(key)), + clear_variables=True, + ) + if self._use_default and isinstance(prop_definition, dict) and 'default' in prop_definition: + self.l('else: {variable}["{}"] = {}', self.e(key), repr(prop_definition['default'])) + + def generate_pattern_properties(self): + """ + Means object with defined keys as patterns. + + .. code-block:: python + + { + 'patternProperties': { + '^x': {'type': 'number'}, + }, + } + + Valid object is containing key starting with a 'x' and value any number. + """ + self.create_variable_is_dict() + with self.l('if {variable}_is_dict:'): + self.create_variable_keys() + for pattern, definition in self._definition['patternProperties'].items(): + self._compile_regexps[pattern] = re.compile(pattern) + with self.l('for {variable}_key, {variable}_val in {variable}.items():'): + for pattern, definition in self._definition['patternProperties'].items(): + with self.l('if REGEX_PATTERNS[{}].search({variable}_key):', repr(pattern)): + with self.l('if {variable}_key in {variable}_keys:'): + self.l('{variable}_keys.remove({variable}_key)') + self.generate_func_code_block( + definition, + '{}_val'.format(self._variable), + '{}.{{{}_key}}'.format(self._variable_name, self._variable), + clear_variables=True, + ) + + def generate_additional_properties(self): + """ + Means object with keys with values defined by definition. + + .. code-block:: python + + { + 'properties': { + 'key': {'type': 'number'}, + } + 'additionalProperties': {'type': 'string'}, + } + + Valid object is containing key called 'key' and it's value any number and + any other key with any string. + """ + self.create_variable_is_dict() + with self.l('if {variable}_is_dict:'): + self.create_variable_keys() + add_prop_definition = self._definition["additionalProperties"] + if add_prop_definition is True or add_prop_definition == {}: + return + if add_prop_definition: + properties_keys = list(self._definition.get("properties", {}).keys()) + with self.l('for {variable}_key in {variable}_keys:'): + with self.l('if {variable}_key not in {}:', properties_keys): + self.l('{variable}_value = {variable}.get({variable}_key)') + self.generate_func_code_block( + add_prop_definition, + '{}_value'.format(self._variable), + '{}.{{{}_key}}'.format(self._variable_name, self._variable), + ) + else: + with self.l('if {variable}_keys:'): + self.exc('{name} must not contain "+str({variable}_keys)+" properties', rule='additionalProperties') + + def generate_dependencies(self): + """ + Means when object has property, it needs to have also other property. + + .. code-block:: python + + { + 'dependencies': { + 'bar': ['foo'], + }, + } + + Valid object is containing only foo, both bar and foo or none of them, but not + object with only bar. + + Since draft 06 definition can be boolean or empty array. True and empty array + means nothing, False means that key cannot be there at all. + """ + self.create_variable_is_dict() + with self.l('if {variable}_is_dict:'): + is_empty = True + for key, values in self._definition["dependencies"].items(): + if values == [] or values is True: + continue + is_empty = False + with self.l('if "{}" in {variable}:', self.e(key)): + if values is False: + self.exc('{} in {name} must not be there', key, rule='dependencies') + elif isinstance(values, list): + for value in values: + with self.l('if "{}" not in {variable}:', self.e(value)): + self.exc('{name} missing dependency {} for {}', self.e(value), self.e(key), rule='dependencies') + else: + self.generate_func_code_block(values, self._variable, self._variable_name, clear_variables=True) + if is_empty: + self.l('pass') diff --git a/conda_lock/_vendor/poetry/core/_vendor/fastjsonschema/draft06.py b/conda_lock/_vendor/poetry/core/_vendor/fastjsonschema/draft06.py new file mode 100644 index 00000000..2e679d04 --- /dev/null +++ b/conda_lock/_vendor/poetry/core/_vendor/fastjsonschema/draft06.py @@ -0,0 +1,188 @@ +import decimal +from .draft04 import CodeGeneratorDraft04, JSON_TYPE_TO_PYTHON_TYPE +from .exceptions import JsonSchemaDefinitionException +from .generator import enforce_list + + +class CodeGeneratorDraft06(CodeGeneratorDraft04): + FORMAT_REGEXS = dict(CodeGeneratorDraft04.FORMAT_REGEXS, **{ + 'json-pointer': r'^(/(([^/~])|(~[01]))*)*\Z', + 'uri-reference': r'^(\w+:(\/?\/?))?[^#\\\s]*(#[^\\\s]*)?\Z', + 'uri-template': ( + r'^(?:(?:[^\x00-\x20\"\'<>%\\^`{|}]|%[0-9a-f]{2})|' + r'\{[+#./;?&=,!@|]?(?:[a-z0-9_]|%[0-9a-f]{2})+' + r'(?::[1-9][0-9]{0,3}|\*)?(?:,(?:[a-z0-9_]|%[0-9a-f]{2})+' + r'(?::[1-9][0-9]{0,3}|\*)?)*\})*\Z' + ), + }) + + def __init__(self, definition, resolver=None, formats={}, use_default=True, use_formats=True): + super().__init__(definition, resolver, formats, use_default, use_formats) + self._json_keywords_to_function.update(( + ('exclusiveMinimum', self.generate_exclusive_minimum), + ('exclusiveMaximum', self.generate_exclusive_maximum), + ('propertyNames', self.generate_property_names), + ('contains', self.generate_contains), + ('const', self.generate_const), + )) + + def _generate_func_code_block(self, definition): + if isinstance(definition, bool): + self.generate_boolean_schema() + elif '$ref' in definition: + # needed because ref overrides any sibling keywords + self.generate_ref() + else: + self.run_generate_functions(definition) + + def generate_boolean_schema(self): + """ + Means that schema can be specified by boolean. + True means everything is valid, False everything is invalid. + """ + if self._definition is True: + self.l('pass') + if self._definition is False: + self.exc('{name} must not be there') + + def generate_type(self): + """ + Validation of type. Can be one type or list of types. + + Since draft 06 a float without fractional part is an integer. + + .. code-block:: python + + {'type': 'string'} + {'type': ['string', 'number']} + """ + types = enforce_list(self._definition['type']) + try: + python_types = ', '.join(JSON_TYPE_TO_PYTHON_TYPE[t] for t in types) + except KeyError as exc: + raise JsonSchemaDefinitionException('Unknown type: {}'.format(exc)) + + extra = '' + + if 'integer' in types: + extra += ' and not (isinstance({variable}, float) and {variable}.is_integer())'.format( + variable=self._variable, + ) + + if ('number' in types or 'integer' in types) and 'boolean' not in types: + extra += ' or isinstance({variable}, bool)'.format(variable=self._variable) + + with self.l('if not isinstance({variable}, ({})){}:', python_types, extra): + self.exc('{name} must be {}', ' or '.join(types), rule='type') + + def generate_exclusive_minimum(self): + with self.l('if isinstance({variable}, (int, float, Decimal)):'): + if not isinstance(self._definition['exclusiveMinimum'], (int, float, decimal.Decimal)): + raise JsonSchemaDefinitionException('exclusiveMinimum must be an integer, a float or a decimal') + with self.l('if {variable} <= {exclusiveMinimum}:'): + self.exc('{name} must be bigger than {exclusiveMinimum}', rule='exclusiveMinimum') + + def generate_exclusive_maximum(self): + with self.l('if isinstance({variable}, (int, float, Decimal)):'): + if not isinstance(self._definition['exclusiveMaximum'], (int, float, decimal.Decimal)): + raise JsonSchemaDefinitionException('exclusiveMaximum must be an integer, a float or a decimal') + with self.l('if {variable} >= {exclusiveMaximum}:'): + self.exc('{name} must be smaller than {exclusiveMaximum}', rule='exclusiveMaximum') + + def generate_property_names(self): + """ + Means that keys of object must to follow this definition. + + .. code-block:: python + + { + 'propertyNames': { + 'maxLength': 3, + }, + } + + Valid keys of object for this definition are foo, bar, ... but not foobar for example. + """ + property_names_definition = self._definition.get('propertyNames', {}) + if property_names_definition is True: + pass + elif property_names_definition is False: + self.create_variable_keys() + with self.l('if {variable}_keys:'): + self.exc('{name} must not be there', rule='propertyNames') + else: + self.create_variable_is_dict() + with self.l('if {variable}_is_dict:'): + self.create_variable_with_length() + with self.l('if {variable}_len != 0:'): + self.l('{variable}_property_names = True') + with self.l('for {variable}_key in {variable}:'): + with self.l('try:'): + self.generate_func_code_block( + property_names_definition, + '{}_key'.format(self._variable), + self._variable_name, + clear_variables=True, + ) + with self.l('except JsonSchemaValueException:'): + self.l('{variable}_property_names = False') + with self.l('if not {variable}_property_names:'): + self.exc('{name} must be named by propertyName definition', rule='propertyNames') + + def generate_contains(self): + """ + Means that array must contain at least one defined item. + + .. code-block:: python + + { + 'contains': { + 'type': 'number', + }, + } + + Valid array is any with at least one number. + """ + self.create_variable_is_list() + with self.l('if {variable}_is_list:'): + contains_definition = self._definition['contains'] + + if contains_definition is False: + self.exc('{name} is always invalid', rule='contains') + elif contains_definition is True: + with self.l('if not {variable}:'): + self.exc('{name} must not be empty', rule='contains') + else: + self.l('{variable}_contains = False') + with self.l('for {variable}_key in {variable}:'): + with self.l('try:'): + self.generate_func_code_block( + contains_definition, + '{}_key'.format(self._variable), + self._variable_name, + clear_variables=True, + ) + self.l('{variable}_contains = True') + self.l('break') + self.l('except JsonSchemaValueException: pass') + + with self.l('if not {variable}_contains:'): + self.exc('{name} must contain one of contains definition', rule='contains') + + def generate_const(self): + """ + Means that value is valid when is equeal to const definition. + + .. code-block:: python + + { + 'const': 42, + } + + Only valid value is 42 in this example. + """ + const = self._definition['const'] + if isinstance(const, str): + const = '"{}"'.format(self.e(const)) + with self.l('if {variable} != {}:', const): + self.exc('{name} must be same as const definition: {definition_rule}', rule='const') diff --git a/conda_lock/_vendor/poetry/core/_vendor/fastjsonschema/draft07.py b/conda_lock/_vendor/poetry/core/_vendor/fastjsonschema/draft07.py new file mode 100644 index 00000000..18546d9c --- /dev/null +++ b/conda_lock/_vendor/poetry/core/_vendor/fastjsonschema/draft07.py @@ -0,0 +1,116 @@ +from .draft06 import CodeGeneratorDraft06 + + +class CodeGeneratorDraft07(CodeGeneratorDraft06): + FORMAT_REGEXS = dict(CodeGeneratorDraft06.FORMAT_REGEXS, **{ + 'date': r'^(?P\d{4})-(?P\d{2})-(?P\d{2})\Z', + 'iri': r'^\w+:(\/?\/?)[^\s]+\Z', + 'iri-reference': r'^(\w+:(\/?\/?))?[^#\\\s]*(#[^\\\s]*)?\Z', + 'idn-email': r'^[^@]+@[^@]+\.[^@]+\Z', + #'idn-hostname': r'', + 'relative-json-pointer': r'^(?:0|[1-9][0-9]*)(?:#|(?:\/(?:[^~/]|~0|~1)*)*)\Z', + #'regex': r'', + 'time': ( + r'^(?P\d{1,2}):(?P\d{1,2})' + r'(?::(?P\d{1,2})(?:\.(?P\d{1,6}))?' + r'([zZ]|[+-]\d\d:\d\d)?)?\Z' + ), + }) + + def __init__(self, definition, resolver=None, formats={}, use_default=True, use_formats=True): + super().__init__(definition, resolver, formats, use_default, use_formats) + # pylint: disable=duplicate-code + self._json_keywords_to_function.update(( + ('if', self.generate_if_then_else), + ('contentEncoding', self.generate_content_encoding), + ('contentMediaType', self.generate_content_media_type), + )) + + def generate_if_then_else(self): + """ + Implementation of if-then-else. + + .. code-block:: python + + { + 'if': { + 'exclusiveMaximum': 0, + }, + 'then': { + 'minimum': -10, + }, + 'else': { + 'multipleOf': 2, + }, + } + + Valid values are any between -10 and 0 or any multiplication of two. + """ + with self.l('try:', optimize=False): + self.generate_func_code_block( + self._definition['if'], + self._variable, + self._variable_name, + clear_variables=True + ) + with self.l('except JsonSchemaValueException:'): + if 'else' in self._definition: + self.generate_func_code_block( + self._definition['else'], + self._variable, + self._variable_name, + clear_variables=True + ) + else: + self.l('pass') + if 'then' in self._definition: + with self.l('else:'): + self.generate_func_code_block( + self._definition['then'], + self._variable, + self._variable_name, + clear_variables=True + ) + + def generate_content_encoding(self): + """ + Means decoding value when it's encoded by base64. + + .. code-block:: python + + { + 'contentEncoding': 'base64', + } + """ + if self._definition['contentEncoding'] == 'base64': + with self.l('if isinstance({variable}, str):'): + with self.l('try:'): + self.l('import base64') + self.l('{variable} = base64.b64decode({variable})') + with self.l('except Exception:'): + self.exc('{name} must be encoded by base64') + with self.l('if {variable} == "":'): + self.exc('contentEncoding must be base64') + + def generate_content_media_type(self): + """ + Means loading value when it's specified as JSON. + + .. code-block:: python + + { + 'contentMediaType': 'application/json', + } + """ + if self._definition['contentMediaType'] == 'application/json': + with self.l('if isinstance({variable}, bytes):'): + with self.l('try:'): + self.l('{variable} = {variable}.decode("utf-8")') + with self.l('except Exception:'): + self.exc('{name} must encoded by utf8') + with self.l('if isinstance({variable}, str):'): + with self.l('try:'): + self.l('import json') + self.l('{variable} = json.loads({variable})') + with self.l('except Exception:'): + self.exc('{name} must be valid JSON') diff --git a/conda_lock/_vendor/poetry/core/_vendor/fastjsonschema/exceptions.py b/conda_lock/_vendor/poetry/core/_vendor/fastjsonschema/exceptions.py new file mode 100644 index 00000000..d2dddd6a --- /dev/null +++ b/conda_lock/_vendor/poetry/core/_vendor/fastjsonschema/exceptions.py @@ -0,0 +1,51 @@ +import re + + +SPLIT_RE = re.compile(r'[\.\[\]]+') + + +class JsonSchemaException(ValueError): + """ + Base exception of ``fastjsonschema`` library. + """ + + +class JsonSchemaValueException(JsonSchemaException): + """ + Exception raised by validation function. Available properties: + + * ``message`` containing human-readable information what is wrong (e.g. ``data.property[index] must be smaller than or equal to 42``), + * invalid ``value`` (e.g. ``60``), + * ``name`` of a path in the data structure (e.g. ``data.property[index]``), + * ``path`` as an array in the data structure (e.g. ``['data', 'property', 'index']``), + * the whole ``definition`` which the ``value`` has to fulfil (e.g. ``{'type': 'number', 'maximum': 42}``), + * ``rule`` which the ``value`` is breaking (e.g. ``maximum``) + * and ``rule_definition`` (e.g. ``42``). + + .. versionchanged:: 2.14.0 + Added all extra properties. + """ + + def __init__(self, message, value=None, name=None, definition=None, rule=None): + super().__init__(message) + self.message = message + self.value = value + self.name = name + self.definition = definition + self.rule = rule + + @property + def path(self): + return [item for item in SPLIT_RE.split(self.name) if item != ''] + + @property + def rule_definition(self): + if not self.rule or not self.definition: + return None + return self.definition.get(self.rule) + + +class JsonSchemaDefinitionException(JsonSchemaException): + """ + Exception raised by generator of validation function. + """ diff --git a/conda_lock/_vendor/poetry/core/_vendor/fastjsonschema/generator.py b/conda_lock/_vendor/poetry/core/_vendor/fastjsonschema/generator.py new file mode 100644 index 00000000..a7f96c5c --- /dev/null +++ b/conda_lock/_vendor/poetry/core/_vendor/fastjsonschema/generator.py @@ -0,0 +1,348 @@ +from collections import OrderedDict +from decimal import Decimal +import re + +from .exceptions import JsonSchemaValueException, JsonSchemaDefinitionException +from .indent import indent +from .ref_resolver import RefResolver + + +def enforce_list(variable): + if isinstance(variable, list): + return variable + return [variable] + + +# pylint: disable=too-many-instance-attributes,too-many-public-methods +class CodeGenerator: + """ + This class is not supposed to be used directly. Anything + inside of this class can be changed without noticing. + + This class generates code of validation function from JSON + schema object as string. Example: + + .. code-block:: python + + CodeGenerator(json_schema_definition).func_code + """ + + INDENT = 4 # spaces + + def __init__(self, definition, resolver=None): + self._code = [] + self._compile_regexps = {} + self._custom_formats = {} + + # Any extra library should be here to be imported only once. + # Lines are imports to be printed in the file and objects + # key-value pair to pass to compile function directly. + self._extra_imports_lines = [ + "from decimal import Decimal", + ] + self._extra_imports_objects = { + "Decimal": Decimal, + } + + self._variables = set() + self._indent = 0 + self._indent_last_line = None + self._variable = None + self._variable_name = None + self._root_definition = definition + self._definition = None + + # map schema URIs to validation function names for functions + # that are not yet generated, but need to be generated + self._needed_validation_functions = {} + # validation function names that are already done + self._validation_functions_done = set() + + if resolver is None: + resolver = RefResolver.from_schema(definition, store={}) + self._resolver = resolver + + # add main function to `self._needed_validation_functions` + self._needed_validation_functions[self._resolver.get_uri()] = self._resolver.get_scope_name() + + self._json_keywords_to_function = OrderedDict() + + @property + def func_code(self): + """ + Returns generated code of whole validation function as string. + """ + self._generate_func_code() + + return '\n'.join(self._code) + + @property + def global_state(self): + """ + Returns global variables for generating function from ``func_code``. Includes + compiled regular expressions and imports, so it does not have to do it every + time when validation function is called. + """ + self._generate_func_code() + + return dict( + **self._extra_imports_objects, + REGEX_PATTERNS=self._compile_regexps, + re=re, + JsonSchemaValueException=JsonSchemaValueException, + ) + + @property + def global_state_code(self): + """ + Returns global variables for generating function from ``func_code`` as code. + Includes compiled regular expressions and imports. + """ + self._generate_func_code() + + if not self._compile_regexps: + return '\n'.join(self._extra_imports_lines + [ + 'from fastjsonschema import JsonSchemaValueException', + '', + '', + ]) + return '\n'.join(self._extra_imports_lines + [ + 'import re', + 'from fastjsonschema import JsonSchemaValueException', + '', + '', + 'REGEX_PATTERNS = ' + serialize_regexes(self._compile_regexps), + '', + ]) + + + def _generate_func_code(self): + if not self._code: + self.generate_func_code() + + def generate_func_code(self): + """ + Creates base code of validation function and calls helper + for creating code by definition. + """ + self.l('NoneType = type(None)') + # Generate parts that are referenced and not yet generated + while self._needed_validation_functions: + # During generation of validation function, could be needed to generate + # new one that is added again to `_needed_validation_functions`. + # Therefore usage of while instead of for loop. + uri, name = self._needed_validation_functions.popitem() + self.generate_validation_function(uri, name) + + def generate_validation_function(self, uri, name): + """ + Generate validation function for given uri with given name + """ + self._validation_functions_done.add(uri) + self.l('') + with self._resolver.resolving(uri) as definition: + with self.l('def {}(data, custom_formats={{}}, name_prefix=None):', name): + self.generate_func_code_block(definition, 'data', 'data', clear_variables=True) + self.l('return data') + + def generate_func_code_block(self, definition, variable, variable_name, clear_variables=False): + """ + Creates validation rules for current definition. + + Returns the number of validation rules generated as code. + """ + backup = self._definition, self._variable, self._variable_name + self._definition, self._variable, self._variable_name = definition, variable, variable_name + if clear_variables: + backup_variables = self._variables + self._variables = set() + + count = self._generate_func_code_block(definition) + + self._definition, self._variable, self._variable_name = backup + if clear_variables: + self._variables = backup_variables + + return count + + def _generate_func_code_block(self, definition): + if not isinstance(definition, dict): + raise JsonSchemaDefinitionException("definition must be an object") + if '$ref' in definition: + # needed because ref overrides any sibling keywords + return self.generate_ref() + else: + return self.run_generate_functions(definition) + + def run_generate_functions(self, definition): + """Returns the number of generate functions that were executed.""" + count = 0 + for key, func in self._json_keywords_to_function.items(): + if key in definition: + func() + count += 1 + return count + + def generate_ref(self): + """ + Ref can be link to remote or local definition. + + .. code-block:: python + + {'$ref': 'http://json-schema.org/draft-04/schema#'} + { + 'properties': { + 'foo': {'type': 'integer'}, + 'bar': {'$ref': '#/properties/foo'} + } + } + """ + with self._resolver.in_scope(self._definition['$ref']): + name = self._resolver.get_scope_name() + uri = self._resolver.get_uri() + if uri not in self._validation_functions_done: + self._needed_validation_functions[uri] = name + # call validation function + assert self._variable_name.startswith("data") + path = self._variable_name[4:] + name_arg = '(name_prefix or "data") + "{}"'.format(path) + if '{' in name_arg: + name_arg = name_arg + '.format(**locals())' + self.l('{}({variable}, custom_formats, {name_arg})', name, name_arg=name_arg) + + + # pylint: disable=invalid-name + @indent + def l(self, line, *args, **kwds): + """ + Short-cut of line. Used for inserting line. It's formated with parameters + ``variable``, ``variable_name`` (as ``name`` for short-cut), all keys from + current JSON schema ``definition`` and also passed arguments in ``args`` + and named ``kwds``. + + .. code-block:: python + + self.l('if {variable} not in {enum}: raise JsonSchemaValueException("Wrong!")') + + When you want to indent block, use it as context manager. For example: + + .. code-block:: python + + with self.l('if {variable} not in {enum}:'): + self.l('raise JsonSchemaValueException("Wrong!")') + """ + spaces = ' ' * self.INDENT * self._indent + + name = self._variable_name + if name: + # Add name_prefix to the name when it is being outputted. + assert name.startswith('data') + name = '" + (name_prefix or "data") + "' + name[4:] + if '{' in name: + name = name + '".format(**locals()) + "' + + context = dict( + self._definition if self._definition and self._definition is not True else {}, + variable=self._variable, + name=name, + **kwds + ) + line = line.format(*args, **context) + line = line.replace('\n', '\\n').replace('\r', '\\r') + self._code.append(spaces + line) + return line + + def e(self, string): + """ + Short-cut of escape. Used for inserting user values into a string message. + + .. code-block:: python + + self.l('raise JsonSchemaValueException("Variable: {}")', self.e(variable)) + """ + return str(string).replace('"', '\\"') + + def exc(self, msg, *args, append_to_msg=None, rule=None): + """ + Short-cut for creating raising exception in the code. + """ + arg = '"'+msg+'"' + if append_to_msg: + arg += ' + (' + append_to_msg + ')' + msg = 'raise JsonSchemaValueException('+arg+', value={variable}, name="{name}", definition={definition}, rule={rule})' + definition = self._expand_refs(self._definition) + definition_rule = self.e(definition.get(rule) if isinstance(definition, dict) else None) + self.l(msg, *args, definition=repr(definition), rule=repr(rule), definition_rule=definition_rule) + + def _expand_refs(self, definition): + if isinstance(definition, list): + return [self._expand_refs(v) for v in definition] + if not isinstance(definition, dict): + return definition + if "$ref" in definition and isinstance(definition["$ref"], str): + with self._resolver.resolving(definition["$ref"]) as schema: + return schema + return {k: self._expand_refs(v) for k, v in definition.items()} + + def create_variable_with_length(self): + """ + Append code for creating variable with length of that variable + (for example length of list or dictionary) with name ``{variable}_len``. + It can be called several times and always it's done only when that variable + still does not exists. + """ + variable_name = '{}_len'.format(self._variable) + if variable_name in self._variables: + return + self._variables.add(variable_name) + self.l('{variable}_len = len({variable})') + + def create_variable_keys(self): + """ + Append code for creating variable with keys of that variable (dictionary) + with a name ``{variable}_keys``. Similar to `create_variable_with_length`. + """ + variable_name = '{}_keys'.format(self._variable) + if variable_name in self._variables: + return + self._variables.add(variable_name) + self.l('{variable}_keys = set({variable}.keys())') + + def create_variable_is_list(self): + """ + Append code for creating variable with bool if it's instance of list + with a name ``{variable}_is_list``. Similar to `create_variable_with_length`. + """ + variable_name = '{}_is_list'.format(self._variable) + if variable_name in self._variables: + return + self._variables.add(variable_name) + self.l('{variable}_is_list = isinstance({variable}, (list, tuple))') + + def create_variable_is_dict(self): + """ + Append code for creating variable with bool if it's instance of list + with a name ``{variable}_is_dict``. Similar to `create_variable_with_length`. + """ + variable_name = '{}_is_dict'.format(self._variable) + if variable_name in self._variables: + return + self._variables.add(variable_name) + self.l('{variable}_is_dict = isinstance({variable}, dict)') + + +def serialize_regexes(patterns_dict): + # Unfortunately using `pprint.pformat` is causing errors + # specially with big regexes + regex_patterns = ( + repr(k) + ": " + repr_regex(v) + for k, v in patterns_dict.items() + ) + return '{\n ' + ",\n ".join(regex_patterns) + "\n}" + + +def repr_regex(regex): + all_flags = ("A", "I", "DEBUG", "L", "M", "S", "X") + flags = " | ".join(f"re.{f}" for f in all_flags if regex.flags & getattr(re, f)) + flags = ", " + flags if flags else "" + return "re.compile({!r}{})".format(regex.pattern, flags) diff --git a/conda_lock/_vendor/poetry/core/_vendor/fastjsonschema/indent.py b/conda_lock/_vendor/poetry/core/_vendor/fastjsonschema/indent.py new file mode 100644 index 00000000..411c69f6 --- /dev/null +++ b/conda_lock/_vendor/poetry/core/_vendor/fastjsonschema/indent.py @@ -0,0 +1,28 @@ +def indent(func): + """ + Decorator for allowing to use method as normal method or with + context manager for auto-indenting code blocks. + """ + def wrapper(self, line, *args, optimize=True, **kwds): + last_line = self._indent_last_line + line = func(self, line, *args, **kwds) + # When two blocks have the same condition (such as value has to be dict), + # do the check only once and keep it under one block. + if optimize and last_line == line: + self._code.pop() + self._indent_last_line = line + return Indent(self, line) + return wrapper + + +class Indent: + def __init__(self, instance, line): + self.instance = instance + self.line = line + + def __enter__(self): + self.instance._indent += 1 + + def __exit__(self, type_, value, traceback): + self.instance._indent -= 1 + self.instance._indent_last_line = self.line diff --git a/conda_lock/_vendor/poetry/core/_vendor/fastjsonschema/ref_resolver.py b/conda_lock/_vendor/poetry/core/_vendor/fastjsonschema/ref_resolver.py new file mode 100644 index 00000000..b94813a6 --- /dev/null +++ b/conda_lock/_vendor/poetry/core/_vendor/fastjsonschema/ref_resolver.py @@ -0,0 +1,176 @@ +""" +JSON Schema URI resolution scopes and dereferencing + +https://tools.ietf.org/id/draft-zyp-json-schema-04.html#rfc.section.7 + +Code adapted from https://github.com/Julian/jsonschema +""" + +import contextlib +import json +import re +from urllib import parse as urlparse +from urllib.parse import unquote + +from .exceptions import JsonSchemaDefinitionException + + +def get_id(schema): + """ + Originally ID was `id` and since v7 it's `$id`. + """ + return schema.get('$id', schema.get('id', '')) + + +def resolve_path(schema, fragment): + """ + Return definition from path. + + Path is unescaped according https://tools.ietf.org/html/rfc6901 + """ + fragment = fragment.lstrip('/') + parts = unquote(fragment).split('/') if fragment else [] + for part in parts: + part = part.replace('~1', '/').replace('~0', '~') + if isinstance(schema, list): + schema = schema[int(part)] + elif part in schema: + schema = schema[part] + else: + raise JsonSchemaDefinitionException('Unresolvable ref: {}'.format(part)) + return schema + + +def normalize(uri): + return urlparse.urlsplit(uri).geturl() + + +def resolve_remote(uri, handlers): + """ + Resolve a remote ``uri``. + + .. note:: + + urllib library is used to fetch requests from the remote ``uri`` + if handlers does notdefine otherwise. + """ + scheme = urlparse.urlsplit(uri).scheme + if scheme in handlers: + result = handlers[scheme](uri) + else: + from urllib.request import urlopen + + req = urlopen(uri) + encoding = req.info().get_content_charset() or 'utf-8' + try: + result = json.loads(req.read().decode(encoding),) + except ValueError as exc: + raise JsonSchemaDefinitionException('{} failed to decode: {}'.format(uri, exc)) + return result + + +class RefResolver: + """ + Resolve JSON References. + """ + + # pylint: disable=dangerous-default-value,too-many-arguments + def __init__(self, base_uri, schema, store={}, cache=True, handlers={}): + """ + `base_uri` is URI of the referring document from the `schema`. + `store` is an dictionary that will be used to cache the fetched schemas + (if `cache=True`). + + Please notice that you can have caching problems when compiling schemas + with colliding `$ref`. To force overwriting use `cache=False` or + explicitly pass the `store` argument (with a brand new dictionary) + """ + self.base_uri = base_uri + self.resolution_scope = base_uri + self.schema = schema + self.store = store + self.cache = cache + self.handlers = handlers + self.walk(schema) + + @classmethod + def from_schema(cls, schema, handlers={}, **kwargs): + """ + Construct a resolver from a JSON schema object. + """ + return cls( + get_id(schema) if isinstance(schema, dict) else '', + schema, + handlers=handlers, + **kwargs + ) + + @contextlib.contextmanager + def in_scope(self, scope: str): + """ + Context manager to handle current scope. + """ + old_scope = self.resolution_scope + self.resolution_scope = urlparse.urljoin(old_scope, scope) + try: + yield + finally: + self.resolution_scope = old_scope + + @contextlib.contextmanager + def resolving(self, ref: str): + """ + Context manager which resolves a JSON ``ref`` and enters the + resolution scope of this ref. + """ + new_uri = urlparse.urljoin(self.resolution_scope, ref) + uri, fragment = urlparse.urldefrag(new_uri) + + if uri and normalize(uri) in self.store: + schema = self.store[normalize(uri)] + elif not uri or uri == self.base_uri: + schema = self.schema + else: + schema = resolve_remote(uri, self.handlers) + if self.cache: + self.store[normalize(uri)] = schema + + old_base_uri, old_schema = self.base_uri, self.schema + self.base_uri, self.schema = uri, schema + try: + with self.in_scope(uri): + yield resolve_path(schema, fragment) + finally: + self.base_uri, self.schema = old_base_uri, old_schema + + def get_uri(self): + return normalize(self.resolution_scope) + + def get_scope_name(self): + """ + Get current scope and return it as a valid function name. + """ + name = 'validate_' + unquote(self.resolution_scope).replace('~1', '_').replace('~0', '_').replace('"', '') + name = re.sub(r'($[^a-zA-Z]|[^a-zA-Z0-9])', '_', name) + name = name.lower().rstrip('_') + return name + + def walk(self, node: dict): + """ + Walk thru schema and dereferencing ``id`` and ``$ref`` instances + """ + if isinstance(node, bool): + pass + elif '$ref' in node and isinstance(node['$ref'], str): + ref = node['$ref'] + node['$ref'] = urlparse.urljoin(self.resolution_scope, ref) + elif ('$id' in node or 'id' in node) and isinstance(get_id(node), str): + with self.in_scope(get_id(node)): + self.store[normalize(self.resolution_scope)] = node + for _, item in node.items(): + if isinstance(item, dict): + self.walk(item) + else: + for _, item in node.items(): + if isinstance(item, dict): + self.walk(item) diff --git a/conda_lock/_vendor/poetry/core/_vendor/fastjsonschema/version.py b/conda_lock/_vendor/poetry/core/_vendor/fastjsonschema/version.py new file mode 100644 index 00000000..aa8b4391 --- /dev/null +++ b/conda_lock/_vendor/poetry/core/_vendor/fastjsonschema/version.py @@ -0,0 +1 @@ +VERSION = '2.19.1' diff --git a/conda_lock/_vendor/poetry/core/_vendor/jsonschema/COPYING b/conda_lock/_vendor/poetry/core/_vendor/jsonschema/COPYING deleted file mode 100644 index af9cfbdb..00000000 --- a/conda_lock/_vendor/poetry/core/_vendor/jsonschema/COPYING +++ /dev/null @@ -1,19 +0,0 @@ -Copyright (c) 2013 Julian Berman - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in -all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -THE SOFTWARE. diff --git a/conda_lock/_vendor/poetry/core/_vendor/jsonschema/LICENSE b/conda_lock/_vendor/poetry/core/_vendor/jsonschema/LICENSE deleted file mode 100644 index c28adbad..00000000 --- a/conda_lock/_vendor/poetry/core/_vendor/jsonschema/LICENSE +++ /dev/null @@ -1,19 +0,0 @@ -Copyright (c) 2012 Julian Berman - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in -all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -THE SOFTWARE. diff --git a/conda_lock/_vendor/poetry/core/_vendor/jsonschema/__init__.py b/conda_lock/_vendor/poetry/core/_vendor/jsonschema/__init__.py deleted file mode 100644 index 1791fe7f..00000000 --- a/conda_lock/_vendor/poetry/core/_vendor/jsonschema/__init__.py +++ /dev/null @@ -1,31 +0,0 @@ -""" -An implementation of JSON Schema for Python - -The main functionality is provided by the validator classes for each of the -supported JSON Schema versions. - -Most commonly, `validate` is the quickest way to simply validate a given -instance under a schema, and will create a validator for you. -""" - -from jsonschema.exceptions import ( - ErrorTree, FormatError, RefResolutionError, SchemaError, ValidationError -) -from jsonschema._format import ( - FormatChecker, - draft3_format_checker, - draft4_format_checker, - draft6_format_checker, - draft7_format_checker, -) -from jsonschema._types import TypeChecker -from jsonschema.validators import ( - Draft3Validator, - Draft4Validator, - Draft6Validator, - Draft7Validator, - RefResolver, - validate, -) - -__version__ = "3.2.0" diff --git a/conda_lock/_vendor/poetry/core/_vendor/jsonschema/__main__.py b/conda_lock/_vendor/poetry/core/_vendor/jsonschema/__main__.py deleted file mode 100644 index 82c29fd3..00000000 --- a/conda_lock/_vendor/poetry/core/_vendor/jsonschema/__main__.py +++ /dev/null @@ -1,2 +0,0 @@ -from jsonschema.cli import main -main() diff --git a/conda_lock/_vendor/poetry/core/_vendor/jsonschema/_format.py b/conda_lock/_vendor/poetry/core/_vendor/jsonschema/_format.py deleted file mode 100644 index 281a7cfc..00000000 --- a/conda_lock/_vendor/poetry/core/_vendor/jsonschema/_format.py +++ /dev/null @@ -1,425 +0,0 @@ -import datetime -import re -import socket -import struct - -from jsonschema.compat import str_types -from jsonschema.exceptions import FormatError - - -class FormatChecker(object): - """ - A ``format`` property checker. - - JSON Schema does not mandate that the ``format`` property actually do any - validation. If validation is desired however, instances of this class can - be hooked into validators to enable format validation. - - `FormatChecker` objects always return ``True`` when asked about - formats that they do not know how to validate. - - To check a custom format using a function that takes an instance and - returns a ``bool``, use the `FormatChecker.checks` or - `FormatChecker.cls_checks` decorators. - - Arguments: - - formats (~collections.Iterable): - - The known formats to validate. This argument can be used to - limit which formats will be used during validation. - """ - - checkers = {} - - def __init__(self, formats=None): - if formats is None: - self.checkers = self.checkers.copy() - else: - self.checkers = dict((k, self.checkers[k]) for k in formats) - - def __repr__(self): - return "".format(sorted(self.checkers)) - - def checks(self, format, raises=()): - """ - Register a decorated function as validating a new format. - - Arguments: - - format (str): - - The format that the decorated function will check. - - raises (Exception): - - The exception(s) raised by the decorated function when an - invalid instance is found. - - The exception object will be accessible as the - `jsonschema.exceptions.ValidationError.cause` attribute of the - resulting validation error. - """ - - def _checks(func): - self.checkers[format] = (func, raises) - return func - return _checks - - cls_checks = classmethod(checks) - - def check(self, instance, format): - """ - Check whether the instance conforms to the given format. - - Arguments: - - instance (*any primitive type*, i.e. str, number, bool): - - The instance to check - - format (str): - - The format that instance should conform to - - - Raises: - - FormatError: if the instance does not conform to ``format`` - """ - - if format not in self.checkers: - return - - func, raises = self.checkers[format] - result, cause = None, None - try: - result = func(instance) - except raises as e: - cause = e - if not result: - raise FormatError( - "%r is not a %r" % (instance, format), cause=cause, - ) - - def conforms(self, instance, format): - """ - Check whether the instance conforms to the given format. - - Arguments: - - instance (*any primitive type*, i.e. str, number, bool): - - The instance to check - - format (str): - - The format that instance should conform to - - Returns: - - bool: whether it conformed - """ - - try: - self.check(instance, format) - except FormatError: - return False - else: - return True - - -draft3_format_checker = FormatChecker() -draft4_format_checker = FormatChecker() -draft6_format_checker = FormatChecker() -draft7_format_checker = FormatChecker() - - -_draft_checkers = dict( - draft3=draft3_format_checker, - draft4=draft4_format_checker, - draft6=draft6_format_checker, - draft7=draft7_format_checker, -) - - -def _checks_drafts( - name=None, - draft3=None, - draft4=None, - draft6=None, - draft7=None, - raises=(), -): - draft3 = draft3 or name - draft4 = draft4 or name - draft6 = draft6 or name - draft7 = draft7 or name - - def wrap(func): - if draft3: - func = _draft_checkers["draft3"].checks(draft3, raises)(func) - if draft4: - func = _draft_checkers["draft4"].checks(draft4, raises)(func) - if draft6: - func = _draft_checkers["draft6"].checks(draft6, raises)(func) - if draft7: - func = _draft_checkers["draft7"].checks(draft7, raises)(func) - - # Oy. This is bad global state, but relied upon for now, until - # deprecation. See https://github.com/Julian/jsonschema/issues/519 - # and test_format_checkers_come_with_defaults - FormatChecker.cls_checks(draft7 or draft6 or draft4 or draft3, raises)( - func, - ) - return func - return wrap - - -@_checks_drafts(name="idn-email") -@_checks_drafts(name="email") -def is_email(instance): - if not isinstance(instance, str_types): - return True - return "@" in instance - - -_ipv4_re = re.compile(r"^\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}$") - - -@_checks_drafts( - draft3="ip-address", draft4="ipv4", draft6="ipv4", draft7="ipv4", -) -def is_ipv4(instance): - if not isinstance(instance, str_types): - return True - if not _ipv4_re.match(instance): - return False - return all(0 <= int(component) <= 255 for component in instance.split(".")) - - -if hasattr(socket, "inet_pton"): - # FIXME: Really this only should raise struct.error, but see the sadness - # that is https://twistedmatrix.com/trac/ticket/9409 - @_checks_drafts( - name="ipv6", raises=(socket.error, struct.error, ValueError), - ) - def is_ipv6(instance): - if not isinstance(instance, str_types): - return True - return socket.inet_pton(socket.AF_INET6, instance) - - -_host_name_re = re.compile(r"^[A-Za-z0-9][A-Za-z0-9\.\-]{1,255}$") - - -@_checks_drafts( - draft3="host-name", - draft4="hostname", - draft6="hostname", - draft7="hostname", -) -def is_host_name(instance): - if not isinstance(instance, str_types): - return True - if not _host_name_re.match(instance): - return False - components = instance.split(".") - for component in components: - if len(component) > 63: - return False - return True - - -try: - # The built-in `idna` codec only implements RFC 3890, so we go elsewhere. - import idna -except ImportError: - pass -else: - @_checks_drafts(draft7="idn-hostname", raises=idna.IDNAError) - def is_idn_host_name(instance): - if not isinstance(instance, str_types): - return True - idna.encode(instance) - return True - - -try: - import rfc3987 -except ImportError: - try: - from rfc3986_validator import validate_rfc3986 - except ImportError: - pass - else: - @_checks_drafts(name="uri") - def is_uri(instance): - if not isinstance(instance, str_types): - return True - return validate_rfc3986(instance, rule="URI") - - @_checks_drafts( - draft6="uri-reference", - draft7="uri-reference", - raises=ValueError, - ) - def is_uri_reference(instance): - if not isinstance(instance, str_types): - return True - return validate_rfc3986(instance, rule="URI_reference") - -else: - @_checks_drafts(draft7="iri", raises=ValueError) - def is_iri(instance): - if not isinstance(instance, str_types): - return True - return rfc3987.parse(instance, rule="IRI") - - @_checks_drafts(draft7="iri-reference", raises=ValueError) - def is_iri_reference(instance): - if not isinstance(instance, str_types): - return True - return rfc3987.parse(instance, rule="IRI_reference") - - @_checks_drafts(name="uri", raises=ValueError) - def is_uri(instance): - if not isinstance(instance, str_types): - return True - return rfc3987.parse(instance, rule="URI") - - @_checks_drafts( - draft6="uri-reference", - draft7="uri-reference", - raises=ValueError, - ) - def is_uri_reference(instance): - if not isinstance(instance, str_types): - return True - return rfc3987.parse(instance, rule="URI_reference") - - -try: - from strict_rfc3339 import validate_rfc3339 -except ImportError: - try: - from rfc3339_validator import validate_rfc3339 - except ImportError: - validate_rfc3339 = None - -if validate_rfc3339: - @_checks_drafts(name="date-time") - def is_datetime(instance): - if not isinstance(instance, str_types): - return True - return validate_rfc3339(instance) - - @_checks_drafts(draft7="time") - def is_time(instance): - if not isinstance(instance, str_types): - return True - return is_datetime("1970-01-01T" + instance) - - -@_checks_drafts(name="regex", raises=re.error) -def is_regex(instance): - if not isinstance(instance, str_types): - return True - return re.compile(instance) - - -@_checks_drafts(draft3="date", draft7="date", raises=ValueError) -def is_date(instance): - if not isinstance(instance, str_types): - return True - return datetime.datetime.strptime(instance, "%Y-%m-%d") - - -@_checks_drafts(draft3="time", raises=ValueError) -def is_draft3_time(instance): - if not isinstance(instance, str_types): - return True - return datetime.datetime.strptime(instance, "%H:%M:%S") - - -try: - import webcolors -except ImportError: - pass -else: - def is_css_color_code(instance): - return webcolors.normalize_hex(instance) - - @_checks_drafts(draft3="color", raises=(ValueError, TypeError)) - def is_css21_color(instance): - if ( - not isinstance(instance, str_types) or - instance.lower() in webcolors.css21_names_to_hex - ): - return True - return is_css_color_code(instance) - - def is_css3_color(instance): - if instance.lower() in webcolors.css3_names_to_hex: - return True - return is_css_color_code(instance) - - -try: - import jsonpointer -except ImportError: - pass -else: - @_checks_drafts( - draft6="json-pointer", - draft7="json-pointer", - raises=jsonpointer.JsonPointerException, - ) - def is_json_pointer(instance): - if not isinstance(instance, str_types): - return True - return jsonpointer.JsonPointer(instance) - - # TODO: I don't want to maintain this, so it - # needs to go either into jsonpointer (pending - # https://github.com/stefankoegl/python-json-pointer/issues/34) or - # into a new external library. - @_checks_drafts( - draft7="relative-json-pointer", - raises=jsonpointer.JsonPointerException, - ) - def is_relative_json_pointer(instance): - # Definition taken from: - # https://tools.ietf.org/html/draft-handrews-relative-json-pointer-01#section-3 - if not isinstance(instance, str_types): - return True - non_negative_integer, rest = [], "" - for i, character in enumerate(instance): - if character.isdigit(): - non_negative_integer.append(character) - continue - - if not non_negative_integer: - return False - - rest = instance[i:] - break - return (rest == "#") or jsonpointer.JsonPointer(rest) - - -try: - import uritemplate.exceptions -except ImportError: - pass -else: - @_checks_drafts( - draft6="uri-template", - draft7="uri-template", - raises=uritemplate.exceptions.InvalidTemplate, - ) - def is_uri_template( - instance, - template_validator=uritemplate.Validator().force_balanced_braces(), - ): - template = uritemplate.URITemplate(instance) - return template_validator.validate(template) diff --git a/conda_lock/_vendor/poetry/core/_vendor/jsonschema/_legacy_validators.py b/conda_lock/_vendor/poetry/core/_vendor/jsonschema/_legacy_validators.py deleted file mode 100644 index 264ff7d7..00000000 --- a/conda_lock/_vendor/poetry/core/_vendor/jsonschema/_legacy_validators.py +++ /dev/null @@ -1,141 +0,0 @@ -from jsonschema import _utils -from jsonschema.compat import iteritems -from jsonschema.exceptions import ValidationError - - -def dependencies_draft3(validator, dependencies, instance, schema): - if not validator.is_type(instance, "object"): - return - - for property, dependency in iteritems(dependencies): - if property not in instance: - continue - - if validator.is_type(dependency, "object"): - for error in validator.descend( - instance, dependency, schema_path=property, - ): - yield error - elif validator.is_type(dependency, "string"): - if dependency not in instance: - yield ValidationError( - "%r is a dependency of %r" % (dependency, property) - ) - else: - for each in dependency: - if each not in instance: - message = "%r is a dependency of %r" - yield ValidationError(message % (each, property)) - - -def disallow_draft3(validator, disallow, instance, schema): - for disallowed in _utils.ensure_list(disallow): - if validator.is_valid(instance, {"type": [disallowed]}): - yield ValidationError( - "%r is disallowed for %r" % (disallowed, instance) - ) - - -def extends_draft3(validator, extends, instance, schema): - if validator.is_type(extends, "object"): - for error in validator.descend(instance, extends): - yield error - return - for index, subschema in enumerate(extends): - for error in validator.descend(instance, subschema, schema_path=index): - yield error - - -def items_draft3_draft4(validator, items, instance, schema): - if not validator.is_type(instance, "array"): - return - - if validator.is_type(items, "object"): - for index, item in enumerate(instance): - for error in validator.descend(item, items, path=index): - yield error - else: - for (index, item), subschema in zip(enumerate(instance), items): - for error in validator.descend( - item, subschema, path=index, schema_path=index, - ): - yield error - - -def minimum_draft3_draft4(validator, minimum, instance, schema): - if not validator.is_type(instance, "number"): - return - - if schema.get("exclusiveMinimum", False): - failed = instance <= minimum - cmp = "less than or equal to" - else: - failed = instance < minimum - cmp = "less than" - - if failed: - yield ValidationError( - "%r is %s the minimum of %r" % (instance, cmp, minimum) - ) - - -def maximum_draft3_draft4(validator, maximum, instance, schema): - if not validator.is_type(instance, "number"): - return - - if schema.get("exclusiveMaximum", False): - failed = instance >= maximum - cmp = "greater than or equal to" - else: - failed = instance > maximum - cmp = "greater than" - - if failed: - yield ValidationError( - "%r is %s the maximum of %r" % (instance, cmp, maximum) - ) - - -def properties_draft3(validator, properties, instance, schema): - if not validator.is_type(instance, "object"): - return - - for property, subschema in iteritems(properties): - if property in instance: - for error in validator.descend( - instance[property], - subschema, - path=property, - schema_path=property, - ): - yield error - elif subschema.get("required", False): - error = ValidationError("%r is a required property" % property) - error._set( - validator="required", - validator_value=subschema["required"], - instance=instance, - schema=schema, - ) - error.path.appendleft(property) - error.schema_path.extend([property, "required"]) - yield error - - -def type_draft3(validator, types, instance, schema): - types = _utils.ensure_list(types) - - all_errors = [] - for index, type in enumerate(types): - if validator.is_type(type, "object"): - errors = list(validator.descend(instance, type, schema_path=index)) - if not errors: - return - all_errors.extend(errors) - else: - if validator.is_type(instance, type): - return - else: - yield ValidationError( - _utils.types_msg(instance, types), context=all_errors, - ) diff --git a/conda_lock/_vendor/poetry/core/_vendor/jsonschema/_reflect.py b/conda_lock/_vendor/poetry/core/_vendor/jsonschema/_reflect.py deleted file mode 100644 index d09e38fb..00000000 --- a/conda_lock/_vendor/poetry/core/_vendor/jsonschema/_reflect.py +++ /dev/null @@ -1,155 +0,0 @@ -# -*- test-case-name: twisted.test.test_reflect -*- -# Copyright (c) Twisted Matrix Laboratories. -# See LICENSE for details. - -""" -Standardized versions of various cool and/or strange things that you can do -with Python's reflection capabilities. -""" - -import sys - -from jsonschema.compat import PY3 - - -class _NoModuleFound(Exception): - """ - No module was found because none exists. - """ - - - -class InvalidName(ValueError): - """ - The given name is not a dot-separated list of Python objects. - """ - - - -class ModuleNotFound(InvalidName): - """ - The module associated with the given name doesn't exist and it can't be - imported. - """ - - - -class ObjectNotFound(InvalidName): - """ - The object associated with the given name doesn't exist and it can't be - imported. - """ - - - -if PY3: - def reraise(exception, traceback): - raise exception.with_traceback(traceback) -else: - exec("""def reraise(exception, traceback): - raise exception.__class__, exception, traceback""") - -reraise.__doc__ = """ -Re-raise an exception, with an optional traceback, in a way that is compatible -with both Python 2 and Python 3. - -Note that on Python 3, re-raised exceptions will be mutated, with their -C{__traceback__} attribute being set. - -@param exception: The exception instance. -@param traceback: The traceback to use, or C{None} indicating a new traceback. -""" - - -def _importAndCheckStack(importName): - """ - Import the given name as a module, then walk the stack to determine whether - the failure was the module not existing, or some code in the module (for - example a dependent import) failing. This can be helpful to determine - whether any actual application code was run. For example, to distiguish - administrative error (entering the wrong module name), from programmer - error (writing buggy code in a module that fails to import). - - @param importName: The name of the module to import. - @type importName: C{str} - @raise Exception: if something bad happens. This can be any type of - exception, since nobody knows what loading some arbitrary code might - do. - @raise _NoModuleFound: if no module was found. - """ - try: - return __import__(importName) - except ImportError: - excType, excValue, excTraceback = sys.exc_info() - while excTraceback: - execName = excTraceback.tb_frame.f_globals["__name__"] - # in Python 2 execName is None when an ImportError is encountered, - # where in Python 3 execName is equal to the importName. - if execName is None or execName == importName: - reraise(excValue, excTraceback) - excTraceback = excTraceback.tb_next - raise _NoModuleFound() - - - -def namedAny(name): - """ - Retrieve a Python object by its fully qualified name from the global Python - module namespace. The first part of the name, that describes a module, - will be discovered and imported. Each subsequent part of the name is - treated as the name of an attribute of the object specified by all of the - name which came before it. For example, the fully-qualified name of this - object is 'twisted.python.reflect.namedAny'. - - @type name: L{str} - @param name: The name of the object to return. - - @raise InvalidName: If the name is an empty string, starts or ends with - a '.', or is otherwise syntactically incorrect. - - @raise ModuleNotFound: If the name is syntactically correct but the - module it specifies cannot be imported because it does not appear to - exist. - - @raise ObjectNotFound: If the name is syntactically correct, includes at - least one '.', but the module it specifies cannot be imported because - it does not appear to exist. - - @raise AttributeError: If an attribute of an object along the way cannot be - accessed, or a module along the way is not found. - - @return: the Python object identified by 'name'. - """ - if not name: - raise InvalidName('Empty module name') - - names = name.split('.') - - # if the name starts or ends with a '.' or contains '..', the __import__ - # will raise an 'Empty module name' error. This will provide a better error - # message. - if '' in names: - raise InvalidName( - "name must be a string giving a '.'-separated list of Python " - "identifiers, not %r" % (name,)) - - topLevelPackage = None - moduleNames = names[:] - while not topLevelPackage: - if moduleNames: - trialname = '.'.join(moduleNames) - try: - topLevelPackage = _importAndCheckStack(trialname) - except _NoModuleFound: - moduleNames.pop() - else: - if len(names) == 1: - raise ModuleNotFound("No module named %r" % (name,)) - else: - raise ObjectNotFound('%r does not name an object' % (name,)) - - obj = topLevelPackage - for n in names[1:]: - obj = getattr(obj, n) - - return obj diff --git a/conda_lock/_vendor/poetry/core/_vendor/jsonschema/_types.py b/conda_lock/_vendor/poetry/core/_vendor/jsonschema/_types.py deleted file mode 100644 index a71a4e34..00000000 --- a/conda_lock/_vendor/poetry/core/_vendor/jsonschema/_types.py +++ /dev/null @@ -1,188 +0,0 @@ -import numbers - -from pyrsistent import pmap -import attr - -from jsonschema.compat import int_types, str_types -from jsonschema.exceptions import UndefinedTypeCheck - - -def is_array(checker, instance): - return isinstance(instance, list) - - -def is_bool(checker, instance): - return isinstance(instance, bool) - - -def is_integer(checker, instance): - # bool inherits from int, so ensure bools aren't reported as ints - if isinstance(instance, bool): - return False - return isinstance(instance, int_types) - - -def is_null(checker, instance): - return instance is None - - -def is_number(checker, instance): - # bool inherits from int, so ensure bools aren't reported as ints - if isinstance(instance, bool): - return False - return isinstance(instance, numbers.Number) - - -def is_object(checker, instance): - return isinstance(instance, dict) - - -def is_string(checker, instance): - return isinstance(instance, str_types) - - -def is_any(checker, instance): - return True - - -@attr.s(frozen=True) -class TypeChecker(object): - """ - A ``type`` property checker. - - A `TypeChecker` performs type checking for an `IValidator`. Type - checks to perform are updated using `TypeChecker.redefine` or - `TypeChecker.redefine_many` and removed via `TypeChecker.remove`. - Each of these return a new `TypeChecker` object. - - Arguments: - - type_checkers (dict): - - The initial mapping of types to their checking functions. - """ - _type_checkers = attr.ib(default=pmap(), converter=pmap) - - def is_type(self, instance, type): - """ - Check if the instance is of the appropriate type. - - Arguments: - - instance (object): - - The instance to check - - type (str): - - The name of the type that is expected. - - Returns: - - bool: Whether it conformed. - - - Raises: - - `jsonschema.exceptions.UndefinedTypeCheck`: - if type is unknown to this object. - """ - try: - fn = self._type_checkers[type] - except KeyError: - raise UndefinedTypeCheck(type) - - return fn(self, instance) - - def redefine(self, type, fn): - """ - Produce a new checker with the given type redefined. - - Arguments: - - type (str): - - The name of the type to check. - - fn (collections.Callable): - - A function taking exactly two parameters - the type - checker calling the function and the instance to check. - The function should return true if instance is of this - type and false otherwise. - - Returns: - - A new `TypeChecker` instance. - """ - return self.redefine_many({type: fn}) - - def redefine_many(self, definitions=()): - """ - Produce a new checker with the given types redefined. - - Arguments: - - definitions (dict): - - A dictionary mapping types to their checking functions. - - Returns: - - A new `TypeChecker` instance. - """ - return attr.evolve( - self, type_checkers=self._type_checkers.update(definitions), - ) - - def remove(self, *types): - """ - Produce a new checker with the given types forgotten. - - Arguments: - - types (~collections.Iterable): - - the names of the types to remove. - - Returns: - - A new `TypeChecker` instance - - Raises: - - `jsonschema.exceptions.UndefinedTypeCheck`: - - if any given type is unknown to this object - """ - - checkers = self._type_checkers - for each in types: - try: - checkers = checkers.remove(each) - except KeyError: - raise UndefinedTypeCheck(each) - return attr.evolve(self, type_checkers=checkers) - - -draft3_type_checker = TypeChecker( - { - u"any": is_any, - u"array": is_array, - u"boolean": is_bool, - u"integer": is_integer, - u"object": is_object, - u"null": is_null, - u"number": is_number, - u"string": is_string, - }, -) -draft4_type_checker = draft3_type_checker.remove(u"any") -draft6_type_checker = draft4_type_checker.redefine( - u"integer", - lambda checker, instance: ( - is_integer(checker, instance) or - isinstance(instance, float) and instance.is_integer() - ), -) -draft7_type_checker = draft6_type_checker diff --git a/conda_lock/_vendor/poetry/core/_vendor/jsonschema/_utils.py b/conda_lock/_vendor/poetry/core/_vendor/jsonschema/_utils.py deleted file mode 100644 index 117eec24..00000000 --- a/conda_lock/_vendor/poetry/core/_vendor/jsonschema/_utils.py +++ /dev/null @@ -1,215 +0,0 @@ -import itertools -import json -import os -import re - -from jsonschema.compat import MutableMapping, str_types, urlsplit - - -class URIDict(MutableMapping): - """ - Dictionary which uses normalized URIs as keys. - """ - - def normalize(self, uri): - return urlsplit(uri).geturl() - - def __init__(self, *args, **kwargs): - self.store = dict() - self.store.update(*args, **kwargs) - - def __getitem__(self, uri): - return self.store[self.normalize(uri)] - - def __setitem__(self, uri, value): - self.store[self.normalize(uri)] = value - - def __delitem__(self, uri): - del self.store[self.normalize(uri)] - - def __iter__(self): - return iter(self.store) - - def __len__(self): - return len(self.store) - - def __repr__(self): - return repr(self.store) - - -class Unset(object): - """ - An as-of-yet unset attribute or unprovided default parameter. - """ - - def __repr__(self): - return "" - - -def load_schema(name): - """ - Load a schema from ./schemas/``name``.json and return it. - """ - with open( - os.path.join(os.path.dirname(__file__), "schemas", "{0}.json".format(name)) - ) as f: - data = f.read() - - return json.loads(data) - - -def indent(string, times=1): - """ - A dumb version of `textwrap.indent` from Python 3.3. - """ - - return "\n".join(" " * (4 * times) + line for line in string.splitlines()) - - -def format_as_index(indices): - """ - Construct a single string containing indexing operations for the indices. - - For example, [1, 2, "foo"] -> [1][2]["foo"] - - Arguments: - - indices (sequence): - - The indices to format. - """ - - if not indices: - return "" - return "[%s]" % "][".join(repr(index) for index in indices) - - -def find_additional_properties(instance, schema): - """ - Return the set of additional properties for the given ``instance``. - - Weeds out properties that should have been validated by ``properties`` and - / or ``patternProperties``. - - Assumes ``instance`` is dict-like already. - """ - - properties = schema.get("properties", {}) - patterns = "|".join(schema.get("patternProperties", {})) - for property in instance: - if property not in properties: - if patterns and re.search(patterns, property): - continue - yield property - - -def extras_msg(extras): - """ - Create an error message for extra items or properties. - """ - - if len(extras) == 1: - verb = "was" - else: - verb = "were" - return ", ".join(repr(extra) for extra in extras), verb - - -def types_msg(instance, types): - """ - Create an error message for a failure to match the given types. - - If the ``instance`` is an object and contains a ``name`` property, it will - be considered to be a description of that object and used as its type. - - Otherwise the message is simply the reprs of the given ``types``. - """ - - reprs = [] - for type in types: - try: - reprs.append(repr(type["name"])) - except Exception: - reprs.append(repr(type)) - return "%r is not of type %s" % (instance, ", ".join(reprs)) - - -def flatten(suitable_for_isinstance): - """ - isinstance() can accept a bunch of really annoying different types: - * a single type - * a tuple of types - * an arbitrary nested tree of tuples - - Return a flattened tuple of the given argument. - """ - - types = set() - - if not isinstance(suitable_for_isinstance, tuple): - suitable_for_isinstance = (suitable_for_isinstance,) - for thing in suitable_for_isinstance: - if isinstance(thing, tuple): - types.update(flatten(thing)) - else: - types.add(thing) - return tuple(types) - - -def ensure_list(thing): - """ - Wrap ``thing`` in a list if it's a single str. - - Otherwise, return it unchanged. - """ - - if isinstance(thing, str_types): - return [thing] - return thing - - -def equal(one, two): - """ - Check if two things are equal, but evade booleans and ints being equal. - """ - return unbool(one) == unbool(two) - - -def unbool(element, true=object(), false=object()): - """ - A hack to make True and 1 and False and 0 unique for ``uniq``. - """ - - if element is True: - return true - elif element is False: - return false - return element - - -def uniq(container): - """ - Check if all of a container's elements are unique. - - Successively tries first to rely that the elements are hashable, then - falls back on them being sortable, and finally falls back on brute - force. - """ - - try: - return len(set(unbool(i) for i in container)) == len(container) - except TypeError: - try: - sort = sorted(unbool(i) for i in container) - sliced = itertools.islice(sort, 1, None) - for i, j in zip(sort, sliced): - if i == j: - return False - except (NotImplementedError, TypeError): - seen = [] - for e in container: - e = unbool(e) - if e in seen: - return False - seen.append(e) - return True diff --git a/conda_lock/_vendor/poetry/core/_vendor/jsonschema/_validators.py b/conda_lock/_vendor/poetry/core/_vendor/jsonschema/_validators.py deleted file mode 100644 index 179fec09..00000000 --- a/conda_lock/_vendor/poetry/core/_vendor/jsonschema/_validators.py +++ /dev/null @@ -1,373 +0,0 @@ -import re - -from jsonschema._utils import ( - ensure_list, - equal, - extras_msg, - find_additional_properties, - types_msg, - unbool, - uniq, -) -from jsonschema.exceptions import FormatError, ValidationError -from jsonschema.compat import iteritems - - -def patternProperties(validator, patternProperties, instance, schema): - if not validator.is_type(instance, "object"): - return - - for pattern, subschema in iteritems(patternProperties): - for k, v in iteritems(instance): - if re.search(pattern, k): - for error in validator.descend( - v, subschema, path=k, schema_path=pattern, - ): - yield error - - -def propertyNames(validator, propertyNames, instance, schema): - if not validator.is_type(instance, "object"): - return - - for property in instance: - for error in validator.descend( - instance=property, - schema=propertyNames, - ): - yield error - - -def additionalProperties(validator, aP, instance, schema): - if not validator.is_type(instance, "object"): - return - - extras = set(find_additional_properties(instance, schema)) - - if validator.is_type(aP, "object"): - for extra in extras: - for error in validator.descend(instance[extra], aP, path=extra): - yield error - elif not aP and extras: - if "patternProperties" in schema: - patterns = sorted(schema["patternProperties"]) - if len(extras) == 1: - verb = "does" - else: - verb = "do" - error = "%s %s not match any of the regexes: %s" % ( - ", ".join(map(repr, sorted(extras))), - verb, - ", ".join(map(repr, patterns)), - ) - yield ValidationError(error) - else: - error = "Additional properties are not allowed (%s %s unexpected)" - yield ValidationError(error % extras_msg(extras)) - - -def items(validator, items, instance, schema): - if not validator.is_type(instance, "array"): - return - - if validator.is_type(items, "array"): - for (index, item), subschema in zip(enumerate(instance), items): - for error in validator.descend( - item, subschema, path=index, schema_path=index, - ): - yield error - else: - for index, item in enumerate(instance): - for error in validator.descend(item, items, path=index): - yield error - - -def additionalItems(validator, aI, instance, schema): - if ( - not validator.is_type(instance, "array") or - validator.is_type(schema.get("items", {}), "object") - ): - return - - len_items = len(schema.get("items", [])) - if validator.is_type(aI, "object"): - for index, item in enumerate(instance[len_items:], start=len_items): - for error in validator.descend(item, aI, path=index): - yield error - elif not aI and len(instance) > len(schema.get("items", [])): - error = "Additional items are not allowed (%s %s unexpected)" - yield ValidationError( - error % - extras_msg(instance[len(schema.get("items", [])):]) - ) - - -def const(validator, const, instance, schema): - if not equal(instance, const): - yield ValidationError("%r was expected" % (const,)) - - -def contains(validator, contains, instance, schema): - if not validator.is_type(instance, "array"): - return - - if not any(validator.is_valid(element, contains) for element in instance): - yield ValidationError( - "None of %r are valid under the given schema" % (instance,) - ) - - -def exclusiveMinimum(validator, minimum, instance, schema): - if not validator.is_type(instance, "number"): - return - - if instance <= minimum: - yield ValidationError( - "%r is less than or equal to the minimum of %r" % ( - instance, minimum, - ), - ) - - -def exclusiveMaximum(validator, maximum, instance, schema): - if not validator.is_type(instance, "number"): - return - - if instance >= maximum: - yield ValidationError( - "%r is greater than or equal to the maximum of %r" % ( - instance, maximum, - ), - ) - - -def minimum(validator, minimum, instance, schema): - if not validator.is_type(instance, "number"): - return - - if instance < minimum: - yield ValidationError( - "%r is less than the minimum of %r" % (instance, minimum) - ) - - -def maximum(validator, maximum, instance, schema): - if not validator.is_type(instance, "number"): - return - - if instance > maximum: - yield ValidationError( - "%r is greater than the maximum of %r" % (instance, maximum) - ) - - -def multipleOf(validator, dB, instance, schema): - if not validator.is_type(instance, "number"): - return - - if isinstance(dB, float): - quotient = instance / dB - failed = int(quotient) != quotient - else: - failed = instance % dB - - if failed: - yield ValidationError("%r is not a multiple of %r" % (instance, dB)) - - -def minItems(validator, mI, instance, schema): - if validator.is_type(instance, "array") and len(instance) < mI: - yield ValidationError("%r is too short" % (instance,)) - - -def maxItems(validator, mI, instance, schema): - if validator.is_type(instance, "array") and len(instance) > mI: - yield ValidationError("%r is too long" % (instance,)) - - -def uniqueItems(validator, uI, instance, schema): - if ( - uI and - validator.is_type(instance, "array") and - not uniq(instance) - ): - yield ValidationError("%r has non-unique elements" % (instance,)) - - -def pattern(validator, patrn, instance, schema): - if ( - validator.is_type(instance, "string") and - not re.search(patrn, instance) - ): - yield ValidationError("%r does not match %r" % (instance, patrn)) - - -def format(validator, format, instance, schema): - if validator.format_checker is not None: - try: - validator.format_checker.check(instance, format) - except FormatError as error: - yield ValidationError(error.message, cause=error.cause) - - -def minLength(validator, mL, instance, schema): - if validator.is_type(instance, "string") and len(instance) < mL: - yield ValidationError("%r is too short" % (instance,)) - - -def maxLength(validator, mL, instance, schema): - if validator.is_type(instance, "string") and len(instance) > mL: - yield ValidationError("%r is too long" % (instance,)) - - -def dependencies(validator, dependencies, instance, schema): - if not validator.is_type(instance, "object"): - return - - for property, dependency in iteritems(dependencies): - if property not in instance: - continue - - if validator.is_type(dependency, "array"): - for each in dependency: - if each not in instance: - message = "%r is a dependency of %r" - yield ValidationError(message % (each, property)) - else: - for error in validator.descend( - instance, dependency, schema_path=property, - ): - yield error - - -def enum(validator, enums, instance, schema): - if instance == 0 or instance == 1: - unbooled = unbool(instance) - if all(unbooled != unbool(each) for each in enums): - yield ValidationError("%r is not one of %r" % (instance, enums)) - elif instance not in enums: - yield ValidationError("%r is not one of %r" % (instance, enums)) - - -def ref(validator, ref, instance, schema): - resolve = getattr(validator.resolver, "resolve", None) - if resolve is None: - with validator.resolver.resolving(ref) as resolved: - for error in validator.descend(instance, resolved): - yield error - else: - scope, resolved = validator.resolver.resolve(ref) - validator.resolver.push_scope(scope) - - try: - for error in validator.descend(instance, resolved): - yield error - finally: - validator.resolver.pop_scope() - - -def type(validator, types, instance, schema): - types = ensure_list(types) - - if not any(validator.is_type(instance, type) for type in types): - yield ValidationError(types_msg(instance, types)) - - -def properties(validator, properties, instance, schema): - if not validator.is_type(instance, "object"): - return - - for property, subschema in iteritems(properties): - if property in instance: - for error in validator.descend( - instance[property], - subschema, - path=property, - schema_path=property, - ): - yield error - - -def required(validator, required, instance, schema): - if not validator.is_type(instance, "object"): - return - for property in required: - if property not in instance: - yield ValidationError("%r is a required property" % property) - - -def minProperties(validator, mP, instance, schema): - if validator.is_type(instance, "object") and len(instance) < mP: - yield ValidationError( - "%r does not have enough properties" % (instance,) - ) - - -def maxProperties(validator, mP, instance, schema): - if not validator.is_type(instance, "object"): - return - if validator.is_type(instance, "object") and len(instance) > mP: - yield ValidationError("%r has too many properties" % (instance,)) - - -def allOf(validator, allOf, instance, schema): - for index, subschema in enumerate(allOf): - for error in validator.descend(instance, subschema, schema_path=index): - yield error - - -def anyOf(validator, anyOf, instance, schema): - all_errors = [] - for index, subschema in enumerate(anyOf): - errs = list(validator.descend(instance, subschema, schema_path=index)) - if not errs: - break - all_errors.extend(errs) - else: - yield ValidationError( - "%r is not valid under any of the given schemas" % (instance,), - context=all_errors, - ) - - -def oneOf(validator, oneOf, instance, schema): - subschemas = enumerate(oneOf) - all_errors = [] - for index, subschema in subschemas: - errs = list(validator.descend(instance, subschema, schema_path=index)) - if not errs: - first_valid = subschema - break - all_errors.extend(errs) - else: - yield ValidationError( - "%r is not valid under any of the given schemas" % (instance,), - context=all_errors, - ) - - more_valid = [s for i, s in subschemas if validator.is_valid(instance, s)] - if more_valid: - more_valid.append(first_valid) - reprs = ", ".join(repr(schema) for schema in more_valid) - yield ValidationError( - "%r is valid under each of %s" % (instance, reprs) - ) - - -def not_(validator, not_schema, instance, schema): - if validator.is_valid(instance, not_schema): - yield ValidationError( - "%r is not allowed for %r" % (not_schema, instance) - ) - - -def if_(validator, if_schema, instance, schema): - if validator.is_valid(instance, if_schema): - if u"then" in schema: - then = schema[u"then"] - for error in validator.descend(instance, then, schema_path="then"): - yield error - elif u"else" in schema: - else_ = schema[u"else"] - for error in validator.descend(instance, else_, schema_path="else"): - yield error diff --git a/conda_lock/_vendor/poetry/core/_vendor/jsonschema/benchmarks/__init__.py b/conda_lock/_vendor/poetry/core/_vendor/jsonschema/benchmarks/__init__.py deleted file mode 100644 index e3dcc689..00000000 --- a/conda_lock/_vendor/poetry/core/_vendor/jsonschema/benchmarks/__init__.py +++ /dev/null @@ -1,5 +0,0 @@ -""" -Benchmarks for validation. - -This package is *not* public API. -""" diff --git a/conda_lock/_vendor/poetry/core/_vendor/jsonschema/benchmarks/issue232.py b/conda_lock/_vendor/poetry/core/_vendor/jsonschema/benchmarks/issue232.py deleted file mode 100644 index 65e3aedf..00000000 --- a/conda_lock/_vendor/poetry/core/_vendor/jsonschema/benchmarks/issue232.py +++ /dev/null @@ -1,26 +0,0 @@ -#!/usr/bin/env python -""" -A performance benchmark using the example from issue #232. - -See https://github.com/Julian/jsonschema/pull/232. -""" -from twisted.python.filepath import FilePath -from pyperf import Runner -from pyrsistent import m - -from jsonschema.tests._suite import Version -import jsonschema - - -issue232 = Version( - path=FilePath(__file__).sibling("issue232"), - remotes=m(), - name="issue232", -) - - -if __name__ == "__main__": - issue232.benchmark( - runner=Runner(), - Validator=jsonschema.Draft4Validator, - ) diff --git a/conda_lock/_vendor/poetry/core/_vendor/jsonschema/benchmarks/json_schema_test_suite.py b/conda_lock/_vendor/poetry/core/_vendor/jsonschema/benchmarks/json_schema_test_suite.py deleted file mode 100644 index 5add5051..00000000 --- a/conda_lock/_vendor/poetry/core/_vendor/jsonschema/benchmarks/json_schema_test_suite.py +++ /dev/null @@ -1,14 +0,0 @@ -#!/usr/bin/env python -""" -A performance benchmark using the official test suite. - -This benchmarks jsonschema using every valid example in the -JSON-Schema-Test-Suite. It will take some time to complete. -""" -from pyperf import Runner - -from jsonschema.tests._suite import Suite - - -if __name__ == "__main__": - Suite().benchmark(runner=Runner()) diff --git a/conda_lock/_vendor/poetry/core/_vendor/jsonschema/cli.py b/conda_lock/_vendor/poetry/core/_vendor/jsonschema/cli.py deleted file mode 100644 index ab3335b2..00000000 --- a/conda_lock/_vendor/poetry/core/_vendor/jsonschema/cli.py +++ /dev/null @@ -1,90 +0,0 @@ -""" -The ``jsonschema`` command line. -""" -from __future__ import absolute_import -import argparse -import json -import sys - -from jsonschema import __version__ -from jsonschema._reflect import namedAny -from jsonschema.validators import validator_for - - -def _namedAnyWithDefault(name): - if "." not in name: - name = "jsonschema." + name - return namedAny(name) - - -def _json_file(path): - with open(path) as file: - return json.load(file) - - -parser = argparse.ArgumentParser( - description="JSON Schema Validation CLI", -) -parser.add_argument( - "-i", "--instance", - action="append", - dest="instances", - type=_json_file, - help=( - "a path to a JSON instance (i.e. filename.json) " - "to validate (may be specified multiple times)" - ), -) -parser.add_argument( - "-F", "--error-format", - default="{error.instance}: {error.message}\n", - help=( - "the format to use for each error output message, specified in " - "a form suitable for passing to str.format, which will be called " - "with 'error' for each error" - ), -) -parser.add_argument( - "-V", "--validator", - type=_namedAnyWithDefault, - help=( - "the fully qualified object name of a validator to use, or, for " - "validators that are registered with jsonschema, simply the name " - "of the class." - ), -) -parser.add_argument( - "--version", - action="version", - version=__version__, -) -parser.add_argument( - "schema", - help="the JSON Schema to validate with (i.e. schema.json)", - type=_json_file, -) - - -def parse_args(args): - arguments = vars(parser.parse_args(args=args or ["--help"])) - if arguments["validator"] is None: - arguments["validator"] = validator_for(arguments["schema"]) - return arguments - - -def main(args=sys.argv[1:]): - sys.exit(run(arguments=parse_args(args=args))) - - -def run(arguments, stdout=sys.stdout, stderr=sys.stderr): - error_format = arguments["error_format"] - validator = arguments["validator"](schema=arguments["schema"]) - - validator.check_schema(arguments["schema"]) - - errored = False - for instance in arguments["instances"] or (): - for error in validator.iter_errors(instance): - stderr.write(error_format.format(error=error)) - errored = True - return errored diff --git a/conda_lock/_vendor/poetry/core/_vendor/jsonschema/compat.py b/conda_lock/_vendor/poetry/core/_vendor/jsonschema/compat.py deleted file mode 100644 index 47e09804..00000000 --- a/conda_lock/_vendor/poetry/core/_vendor/jsonschema/compat.py +++ /dev/null @@ -1,55 +0,0 @@ -""" -Python 2/3 compatibility helpers. - -Note: This module is *not* public API. -""" -import contextlib -import operator -import sys - - -try: - from collections.abc import MutableMapping, Sequence # noqa -except ImportError: - from collections import MutableMapping, Sequence # noqa - -PY3 = sys.version_info[0] >= 3 - -if PY3: - zip = zip - from functools import lru_cache - from io import StringIO as NativeIO - from urllib.parse import ( - unquote, urljoin, urlunsplit, SplitResult, urlsplit - ) - from urllib.request import pathname2url, urlopen - str_types = str, - int_types = int, - iteritems = operator.methodcaller("items") -else: - from itertools import izip as zip # noqa - from io import BytesIO as NativeIO - from urlparse import urljoin, urlunsplit, SplitResult, urlsplit - from urllib import pathname2url, unquote # noqa - import urllib2 # noqa - def urlopen(*args, **kwargs): - return contextlib.closing(urllib2.urlopen(*args, **kwargs)) - - str_types = basestring - int_types = int, long - iteritems = operator.methodcaller("iteritems") - - from functools32 import lru_cache - - -def urldefrag(url): - if "#" in url: - s, n, p, q, frag = urlsplit(url) - defrag = urlunsplit((s, n, p, q, "")) - else: - defrag = url - frag = "" - return defrag, frag - - -# flake8: noqa diff --git a/conda_lock/_vendor/poetry/core/_vendor/jsonschema/exceptions.py b/conda_lock/_vendor/poetry/core/_vendor/jsonschema/exceptions.py deleted file mode 100644 index 691dcffe..00000000 --- a/conda_lock/_vendor/poetry/core/_vendor/jsonschema/exceptions.py +++ /dev/null @@ -1,374 +0,0 @@ -""" -Validation errors, and some surrounding helpers. -""" -from collections import defaultdict, deque -import itertools -import pprint -import textwrap - -import attr - -from jsonschema import _utils -from jsonschema.compat import PY3, iteritems - - -WEAK_MATCHES = frozenset(["anyOf", "oneOf"]) -STRONG_MATCHES = frozenset() - -_unset = _utils.Unset() - - -class _Error(Exception): - def __init__( - self, - message, - validator=_unset, - path=(), - cause=None, - context=(), - validator_value=_unset, - instance=_unset, - schema=_unset, - schema_path=(), - parent=None, - ): - super(_Error, self).__init__( - message, - validator, - path, - cause, - context, - validator_value, - instance, - schema, - schema_path, - parent, - ) - self.message = message - self.path = self.relative_path = deque(path) - self.schema_path = self.relative_schema_path = deque(schema_path) - self.context = list(context) - self.cause = self.__cause__ = cause - self.validator = validator - self.validator_value = validator_value - self.instance = instance - self.schema = schema - self.parent = parent - - for error in context: - error.parent = self - - def __repr__(self): - return "<%s: %r>" % (self.__class__.__name__, self.message) - - def __unicode__(self): - essential_for_verbose = ( - self.validator, self.validator_value, self.instance, self.schema, - ) - if any(m is _unset for m in essential_for_verbose): - return self.message - - pschema = pprint.pformat(self.schema, width=72) - pinstance = pprint.pformat(self.instance, width=72) - return self.message + textwrap.dedent(""" - - Failed validating %r in %s%s: - %s - - On %s%s: - %s - """.rstrip() - ) % ( - self.validator, - self._word_for_schema_in_error_message, - _utils.format_as_index(list(self.relative_schema_path)[:-1]), - _utils.indent(pschema), - self._word_for_instance_in_error_message, - _utils.format_as_index(self.relative_path), - _utils.indent(pinstance), - ) - - if PY3: - __str__ = __unicode__ - else: - def __str__(self): - return unicode(self).encode("utf-8") - - @classmethod - def create_from(cls, other): - return cls(**other._contents()) - - @property - def absolute_path(self): - parent = self.parent - if parent is None: - return self.relative_path - - path = deque(self.relative_path) - path.extendleft(reversed(parent.absolute_path)) - return path - - @property - def absolute_schema_path(self): - parent = self.parent - if parent is None: - return self.relative_schema_path - - path = deque(self.relative_schema_path) - path.extendleft(reversed(parent.absolute_schema_path)) - return path - - def _set(self, **kwargs): - for k, v in iteritems(kwargs): - if getattr(self, k) is _unset: - setattr(self, k, v) - - def _contents(self): - attrs = ( - "message", "cause", "context", "validator", "validator_value", - "path", "schema_path", "instance", "schema", "parent", - ) - return dict((attr, getattr(self, attr)) for attr in attrs) - - -class ValidationError(_Error): - """ - An instance was invalid under a provided schema. - """ - - _word_for_schema_in_error_message = "schema" - _word_for_instance_in_error_message = "instance" - - -class SchemaError(_Error): - """ - A schema was invalid under its corresponding metaschema. - """ - - _word_for_schema_in_error_message = "metaschema" - _word_for_instance_in_error_message = "schema" - - -@attr.s(hash=True) -class RefResolutionError(Exception): - """ - A ref could not be resolved. - """ - - _cause = attr.ib() - - def __str__(self): - return str(self._cause) - - -class UndefinedTypeCheck(Exception): - """ - A type checker was asked to check a type it did not have registered. - """ - - def __init__(self, type): - self.type = type - - def __unicode__(self): - return "Type %r is unknown to this type checker" % self.type - - if PY3: - __str__ = __unicode__ - else: - def __str__(self): - return unicode(self).encode("utf-8") - - -class UnknownType(Exception): - """ - A validator was asked to validate an instance against an unknown type. - """ - - def __init__(self, type, instance, schema): - self.type = type - self.instance = instance - self.schema = schema - - def __unicode__(self): - pschema = pprint.pformat(self.schema, width=72) - pinstance = pprint.pformat(self.instance, width=72) - return textwrap.dedent(""" - Unknown type %r for validator with schema: - %s - - While checking instance: - %s - """.rstrip() - ) % (self.type, _utils.indent(pschema), _utils.indent(pinstance)) - - if PY3: - __str__ = __unicode__ - else: - def __str__(self): - return unicode(self).encode("utf-8") - - -class FormatError(Exception): - """ - Validating a format failed. - """ - - def __init__(self, message, cause=None): - super(FormatError, self).__init__(message, cause) - self.message = message - self.cause = self.__cause__ = cause - - def __unicode__(self): - return self.message - - if PY3: - __str__ = __unicode__ - else: - def __str__(self): - return self.message.encode("utf-8") - - -class ErrorTree(object): - """ - ErrorTrees make it easier to check which validations failed. - """ - - _instance = _unset - - def __init__(self, errors=()): - self.errors = {} - self._contents = defaultdict(self.__class__) - - for error in errors: - container = self - for element in error.path: - container = container[element] - container.errors[error.validator] = error - - container._instance = error.instance - - def __contains__(self, index): - """ - Check whether ``instance[index]`` has any errors. - """ - - return index in self._contents - - def __getitem__(self, index): - """ - Retrieve the child tree one level down at the given ``index``. - - If the index is not in the instance that this tree corresponds to and - is not known by this tree, whatever error would be raised by - ``instance.__getitem__`` will be propagated (usually this is some - subclass of `exceptions.LookupError`. - """ - - if self._instance is not _unset and index not in self: - self._instance[index] - return self._contents[index] - - def __setitem__(self, index, value): - """ - Add an error to the tree at the given ``index``. - """ - self._contents[index] = value - - def __iter__(self): - """ - Iterate (non-recursively) over the indices in the instance with errors. - """ - - return iter(self._contents) - - def __len__(self): - """ - Return the `total_errors`. - """ - return self.total_errors - - def __repr__(self): - return "<%s (%s total errors)>" % (self.__class__.__name__, len(self)) - - @property - def total_errors(self): - """ - The total number of errors in the entire tree, including children. - """ - - child_errors = sum(len(tree) for _, tree in iteritems(self._contents)) - return len(self.errors) + child_errors - - -def by_relevance(weak=WEAK_MATCHES, strong=STRONG_MATCHES): - """ - Create a key function that can be used to sort errors by relevance. - - Arguments: - weak (set): - a collection of validator names to consider to be "weak". - If there are two errors at the same level of the instance - and one is in the set of weak validator names, the other - error will take priority. By default, :validator:`anyOf` and - :validator:`oneOf` are considered weak validators and will - be superseded by other same-level validation errors. - - strong (set): - a collection of validator names to consider to be "strong" - """ - def relevance(error): - validator = error.validator - return -len(error.path), validator not in weak, validator in strong - return relevance - - -relevance = by_relevance() - - -def best_match(errors, key=relevance): - """ - Try to find an error that appears to be the best match among given errors. - - In general, errors that are higher up in the instance (i.e. for which - `ValidationError.path` is shorter) are considered better matches, - since they indicate "more" is wrong with the instance. - - If the resulting match is either :validator:`oneOf` or :validator:`anyOf`, - the *opposite* assumption is made -- i.e. the deepest error is picked, - since these validators only need to match once, and any other errors may - not be relevant. - - Arguments: - errors (collections.Iterable): - - the errors to select from. Do not provide a mixture of - errors from different validation attempts (i.e. from - different instances or schemas), since it won't produce - sensical output. - - key (collections.Callable): - - the key to use when sorting errors. See `relevance` and - transitively `by_relevance` for more details (the default is - to sort with the defaults of that function). Changing the - default is only useful if you want to change the function - that rates errors but still want the error context descent - done by this function. - - Returns: - the best matching error, or ``None`` if the iterable was empty - - .. note:: - - This function is a heuristic. Its return value may change for a given - set of inputs from version to version if better heuristics are added. - """ - errors = iter(errors) - best = next(errors, None) - if best is None: - return - best = max(itertools.chain([best], errors), key=key) - - while best.context: - best = min(best.context, key=key) - return best diff --git a/conda_lock/_vendor/poetry/core/_vendor/jsonschema/schemas/draft3.json b/conda_lock/_vendor/poetry/core/_vendor/jsonschema/schemas/draft3.json deleted file mode 100644 index f8a09c56..00000000 --- a/conda_lock/_vendor/poetry/core/_vendor/jsonschema/schemas/draft3.json +++ /dev/null @@ -1,199 +0,0 @@ -{ - "$schema": "http://json-schema.org/draft-03/schema#", - "dependencies": { - "exclusiveMaximum": "maximum", - "exclusiveMinimum": "minimum" - }, - "id": "http://json-schema.org/draft-03/schema#", - "properties": { - "$ref": { - "format": "uri", - "type": "string" - }, - "$schema": { - "format": "uri", - "type": "string" - }, - "additionalItems": { - "default": {}, - "type": [ - { - "$ref": "#" - }, - "boolean" - ] - }, - "additionalProperties": { - "default": {}, - "type": [ - { - "$ref": "#" - }, - "boolean" - ] - }, - "default": { - "type": "any" - }, - "dependencies": { - "additionalProperties": { - "items": { - "type": "string" - }, - "type": [ - "string", - "array", - { - "$ref": "#" - } - ] - }, - "default": {}, - "type": [ - "string", - "array", - "object" - ] - }, - "description": { - "type": "string" - }, - "disallow": { - "items": { - "type": [ - "string", - { - "$ref": "#" - } - ] - }, - "type": [ - "string", - "array" - ], - "uniqueItems": true - }, - "divisibleBy": { - "default": 1, - "exclusiveMinimum": true, - "minimum": 0, - "type": "number" - }, - "enum": { - "type": "array" - }, - "exclusiveMaximum": { - "default": false, - "type": "boolean" - }, - "exclusiveMinimum": { - "default": false, - "type": "boolean" - }, - "extends": { - "default": {}, - "items": { - "$ref": "#" - }, - "type": [ - { - "$ref": "#" - }, - "array" - ] - }, - "format": { - "type": "string" - }, - "id": { - "format": "uri", - "type": "string" - }, - "items": { - "default": {}, - "items": { - "$ref": "#" - }, - "type": [ - { - "$ref": "#" - }, - "array" - ] - }, - "maxDecimal": { - "minimum": 0, - "type": "number" - }, - "maxItems": { - "minimum": 0, - "type": "integer" - }, - "maxLength": { - "type": "integer" - }, - "maximum": { - "type": "number" - }, - "minItems": { - "default": 0, - "minimum": 0, - "type": "integer" - }, - "minLength": { - "default": 0, - "minimum": 0, - "type": "integer" - }, - "minimum": { - "type": "number" - }, - "pattern": { - "format": "regex", - "type": "string" - }, - "patternProperties": { - "additionalProperties": { - "$ref": "#" - }, - "default": {}, - "type": "object" - }, - "properties": { - "additionalProperties": { - "$ref": "#", - "type": "object" - }, - "default": {}, - "type": "object" - }, - "required": { - "default": false, - "type": "boolean" - }, - "title": { - "type": "string" - }, - "type": { - "default": "any", - "items": { - "type": [ - "string", - { - "$ref": "#" - } - ] - }, - "type": [ - "string", - "array" - ], - "uniqueItems": true - }, - "uniqueItems": { - "default": false, - "type": "boolean" - } - }, - "type": "object" -} diff --git a/conda_lock/_vendor/poetry/core/_vendor/jsonschema/schemas/draft4.json b/conda_lock/_vendor/poetry/core/_vendor/jsonschema/schemas/draft4.json deleted file mode 100644 index 9b666cff..00000000 --- a/conda_lock/_vendor/poetry/core/_vendor/jsonschema/schemas/draft4.json +++ /dev/null @@ -1,222 +0,0 @@ -{ - "$schema": "http://json-schema.org/draft-04/schema#", - "default": {}, - "definitions": { - "positiveInteger": { - "minimum": 0, - "type": "integer" - }, - "positiveIntegerDefault0": { - "allOf": [ - { - "$ref": "#/definitions/positiveInteger" - }, - { - "default": 0 - } - ] - }, - "schemaArray": { - "items": { - "$ref": "#" - }, - "minItems": 1, - "type": "array" - }, - "simpleTypes": { - "enum": [ - "array", - "boolean", - "integer", - "null", - "number", - "object", - "string" - ] - }, - "stringArray": { - "items": { - "type": "string" - }, - "minItems": 1, - "type": "array", - "uniqueItems": true - } - }, - "dependencies": { - "exclusiveMaximum": [ - "maximum" - ], - "exclusiveMinimum": [ - "minimum" - ] - }, - "description": "Core schema meta-schema", - "id": "http://json-schema.org/draft-04/schema#", - "properties": { - "$schema": { - "format": "uri", - "type": "string" - }, - "additionalItems": { - "anyOf": [ - { - "type": "boolean" - }, - { - "$ref": "#" - } - ], - "default": {} - }, - "additionalProperties": { - "anyOf": [ - { - "type": "boolean" - }, - { - "$ref": "#" - } - ], - "default": {} - }, - "allOf": { - "$ref": "#/definitions/schemaArray" - }, - "anyOf": { - "$ref": "#/definitions/schemaArray" - }, - "default": {}, - "definitions": { - "additionalProperties": { - "$ref": "#" - }, - "default": {}, - "type": "object" - }, - "dependencies": { - "additionalProperties": { - "anyOf": [ - { - "$ref": "#" - }, - { - "$ref": "#/definitions/stringArray" - } - ] - }, - "type": "object" - }, - "description": { - "type": "string" - }, - "enum": { - "type": "array" - }, - "exclusiveMaximum": { - "default": false, - "type": "boolean" - }, - "exclusiveMinimum": { - "default": false, - "type": "boolean" - }, - "format": { - "type": "string" - }, - "id": { - "format": "uri", - "type": "string" - }, - "items": { - "anyOf": [ - { - "$ref": "#" - }, - { - "$ref": "#/definitions/schemaArray" - } - ], - "default": {} - }, - "maxItems": { - "$ref": "#/definitions/positiveInteger" - }, - "maxLength": { - "$ref": "#/definitions/positiveInteger" - }, - "maxProperties": { - "$ref": "#/definitions/positiveInteger" - }, - "maximum": { - "type": "number" - }, - "minItems": { - "$ref": "#/definitions/positiveIntegerDefault0" - }, - "minLength": { - "$ref": "#/definitions/positiveIntegerDefault0" - }, - "minProperties": { - "$ref": "#/definitions/positiveIntegerDefault0" - }, - "minimum": { - "type": "number" - }, - "multipleOf": { - "exclusiveMinimum": true, - "minimum": 0, - "type": "number" - }, - "not": { - "$ref": "#" - }, - "oneOf": { - "$ref": "#/definitions/schemaArray" - }, - "pattern": { - "format": "regex", - "type": "string" - }, - "patternProperties": { - "additionalProperties": { - "$ref": "#" - }, - "default": {}, - "type": "object" - }, - "properties": { - "additionalProperties": { - "$ref": "#" - }, - "default": {}, - "type": "object" - }, - "required": { - "$ref": "#/definitions/stringArray" - }, - "title": { - "type": "string" - }, - "type": { - "anyOf": [ - { - "$ref": "#/definitions/simpleTypes" - }, - { - "items": { - "$ref": "#/definitions/simpleTypes" - }, - "minItems": 1, - "type": "array", - "uniqueItems": true - } - ] - }, - "uniqueItems": { - "default": false, - "type": "boolean" - } - }, - "type": "object" -} diff --git a/conda_lock/_vendor/poetry/core/_vendor/jsonschema/schemas/draft6.json b/conda_lock/_vendor/poetry/core/_vendor/jsonschema/schemas/draft6.json deleted file mode 100644 index a0d2bf78..00000000 --- a/conda_lock/_vendor/poetry/core/_vendor/jsonschema/schemas/draft6.json +++ /dev/null @@ -1,153 +0,0 @@ -{ - "$schema": "http://json-schema.org/draft-06/schema#", - "$id": "http://json-schema.org/draft-06/schema#", - "title": "Core schema meta-schema", - "definitions": { - "schemaArray": { - "type": "array", - "minItems": 1, - "items": { "$ref": "#" } - }, - "nonNegativeInteger": { - "type": "integer", - "minimum": 0 - }, - "nonNegativeIntegerDefault0": { - "allOf": [ - { "$ref": "#/definitions/nonNegativeInteger" }, - { "default": 0 } - ] - }, - "simpleTypes": { - "enum": [ - "array", - "boolean", - "integer", - "null", - "number", - "object", - "string" - ] - }, - "stringArray": { - "type": "array", - "items": { "type": "string" }, - "uniqueItems": true, - "default": [] - } - }, - "type": ["object", "boolean"], - "properties": { - "$id": { - "type": "string", - "format": "uri-reference" - }, - "$schema": { - "type": "string", - "format": "uri" - }, - "$ref": { - "type": "string", - "format": "uri-reference" - }, - "title": { - "type": "string" - }, - "description": { - "type": "string" - }, - "default": {}, - "examples": { - "type": "array", - "items": {} - }, - "multipleOf": { - "type": "number", - "exclusiveMinimum": 0 - }, - "maximum": { - "type": "number" - }, - "exclusiveMaximum": { - "type": "number" - }, - "minimum": { - "type": "number" - }, - "exclusiveMinimum": { - "type": "number" - }, - "maxLength": { "$ref": "#/definitions/nonNegativeInteger" }, - "minLength": { "$ref": "#/definitions/nonNegativeIntegerDefault0" }, - "pattern": { - "type": "string", - "format": "regex" - }, - "additionalItems": { "$ref": "#" }, - "items": { - "anyOf": [ - { "$ref": "#" }, - { "$ref": "#/definitions/schemaArray" } - ], - "default": {} - }, - "maxItems": { "$ref": "#/definitions/nonNegativeInteger" }, - "minItems": { "$ref": "#/definitions/nonNegativeIntegerDefault0" }, - "uniqueItems": { - "type": "boolean", - "default": false - }, - "contains": { "$ref": "#" }, - "maxProperties": { "$ref": "#/definitions/nonNegativeInteger" }, - "minProperties": { "$ref": "#/definitions/nonNegativeIntegerDefault0" }, - "required": { "$ref": "#/definitions/stringArray" }, - "additionalProperties": { "$ref": "#" }, - "definitions": { - "type": "object", - "additionalProperties": { "$ref": "#" }, - "default": {} - }, - "properties": { - "type": "object", - "additionalProperties": { "$ref": "#" }, - "default": {} - }, - "patternProperties": { - "type": "object", - "additionalProperties": { "$ref": "#" }, - "propertyNames": { "format": "regex" }, - "default": {} - }, - "dependencies": { - "type": "object", - "additionalProperties": { - "anyOf": [ - { "$ref": "#" }, - { "$ref": "#/definitions/stringArray" } - ] - } - }, - "propertyNames": { "$ref": "#" }, - "const": {}, - "enum": { - "type": "array" - }, - "type": { - "anyOf": [ - { "$ref": "#/definitions/simpleTypes" }, - { - "type": "array", - "items": { "$ref": "#/definitions/simpleTypes" }, - "minItems": 1, - "uniqueItems": true - } - ] - }, - "format": { "type": "string" }, - "allOf": { "$ref": "#/definitions/schemaArray" }, - "anyOf": { "$ref": "#/definitions/schemaArray" }, - "oneOf": { "$ref": "#/definitions/schemaArray" }, - "not": { "$ref": "#" } - }, - "default": {} -} diff --git a/conda_lock/_vendor/poetry/core/_vendor/jsonschema/schemas/draft7.json b/conda_lock/_vendor/poetry/core/_vendor/jsonschema/schemas/draft7.json deleted file mode 100644 index 746cde96..00000000 --- a/conda_lock/_vendor/poetry/core/_vendor/jsonschema/schemas/draft7.json +++ /dev/null @@ -1,166 +0,0 @@ -{ - "$schema": "http://json-schema.org/draft-07/schema#", - "$id": "http://json-schema.org/draft-07/schema#", - "title": "Core schema meta-schema", - "definitions": { - "schemaArray": { - "type": "array", - "minItems": 1, - "items": { "$ref": "#" } - }, - "nonNegativeInteger": { - "type": "integer", - "minimum": 0 - }, - "nonNegativeIntegerDefault0": { - "allOf": [ - { "$ref": "#/definitions/nonNegativeInteger" }, - { "default": 0 } - ] - }, - "simpleTypes": { - "enum": [ - "array", - "boolean", - "integer", - "null", - "number", - "object", - "string" - ] - }, - "stringArray": { - "type": "array", - "items": { "type": "string" }, - "uniqueItems": true, - "default": [] - } - }, - "type": ["object", "boolean"], - "properties": { - "$id": { - "type": "string", - "format": "uri-reference" - }, - "$schema": { - "type": "string", - "format": "uri" - }, - "$ref": { - "type": "string", - "format": "uri-reference" - }, - "$comment": { - "type": "string" - }, - "title": { - "type": "string" - }, - "description": { - "type": "string" - }, - "default": true, - "readOnly": { - "type": "boolean", - "default": false - }, - "examples": { - "type": "array", - "items": true - }, - "multipleOf": { - "type": "number", - "exclusiveMinimum": 0 - }, - "maximum": { - "type": "number" - }, - "exclusiveMaximum": { - "type": "number" - }, - "minimum": { - "type": "number" - }, - "exclusiveMinimum": { - "type": "number" - }, - "maxLength": { "$ref": "#/definitions/nonNegativeInteger" }, - "minLength": { "$ref": "#/definitions/nonNegativeIntegerDefault0" }, - "pattern": { - "type": "string", - "format": "regex" - }, - "additionalItems": { "$ref": "#" }, - "items": { - "anyOf": [ - { "$ref": "#" }, - { "$ref": "#/definitions/schemaArray" } - ], - "default": true - }, - "maxItems": { "$ref": "#/definitions/nonNegativeInteger" }, - "minItems": { "$ref": "#/definitions/nonNegativeIntegerDefault0" }, - "uniqueItems": { - "type": "boolean", - "default": false - }, - "contains": { "$ref": "#" }, - "maxProperties": { "$ref": "#/definitions/nonNegativeInteger" }, - "minProperties": { "$ref": "#/definitions/nonNegativeIntegerDefault0" }, - "required": { "$ref": "#/definitions/stringArray" }, - "additionalProperties": { "$ref": "#" }, - "definitions": { - "type": "object", - "additionalProperties": { "$ref": "#" }, - "default": {} - }, - "properties": { - "type": "object", - "additionalProperties": { "$ref": "#" }, - "default": {} - }, - "patternProperties": { - "type": "object", - "additionalProperties": { "$ref": "#" }, - "propertyNames": { "format": "regex" }, - "default": {} - }, - "dependencies": { - "type": "object", - "additionalProperties": { - "anyOf": [ - { "$ref": "#" }, - { "$ref": "#/definitions/stringArray" } - ] - } - }, - "propertyNames": { "$ref": "#" }, - "const": true, - "enum": { - "type": "array", - "items": true - }, - "type": { - "anyOf": [ - { "$ref": "#/definitions/simpleTypes" }, - { - "type": "array", - "items": { "$ref": "#/definitions/simpleTypes" }, - "minItems": 1, - "uniqueItems": true - } - ] - }, - "format": { "type": "string" }, - "contentMediaType": { "type": "string" }, - "contentEncoding": { "type": "string" }, - "if": {"$ref": "#"}, - "then": {"$ref": "#"}, - "else": {"$ref": "#"}, - "allOf": { "$ref": "#/definitions/schemaArray" }, - "anyOf": { "$ref": "#/definitions/schemaArray" }, - "oneOf": { "$ref": "#/definitions/schemaArray" }, - "not": { "$ref": "#" } - }, - "default": true -} diff --git a/conda_lock/_vendor/poetry/core/_vendor/jsonschema/validators.py b/conda_lock/_vendor/poetry/core/_vendor/jsonschema/validators.py deleted file mode 100644 index 1dc420c7..00000000 --- a/conda_lock/_vendor/poetry/core/_vendor/jsonschema/validators.py +++ /dev/null @@ -1,970 +0,0 @@ -""" -Creation and extension of validators, with implementations for existing drafts. -""" -from __future__ import division - -from warnings import warn -import contextlib -import json -import numbers - -from six import add_metaclass - -from jsonschema import ( - _legacy_validators, - _types, - _utils, - _validators, - exceptions, -) -from jsonschema.compat import ( - Sequence, - int_types, - iteritems, - lru_cache, - str_types, - unquote, - urldefrag, - urljoin, - urlopen, - urlsplit, -) - -# Sigh. https://gitlab.com/pycqa/flake8/issues/280 -# https://github.com/pyga/ebb-lint/issues/7 -# Imported for backwards compatibility. -from jsonschema.exceptions import ErrorTree -ErrorTree - - -class _DontDoThat(Exception): - """ - Raised when a Validators with non-default type checker is misused. - - Asking one for DEFAULT_TYPES doesn't make sense, since type checkers - exist for the unrepresentable cases where DEFAULT_TYPES can't - represent the type relationship. - """ - - def __str__(self): - return "DEFAULT_TYPES cannot be used on Validators using TypeCheckers" - - -validators = {} -meta_schemas = _utils.URIDict() - - -def _generate_legacy_type_checks(types=()): - """ - Generate newer-style type checks out of JSON-type-name-to-type mappings. - - Arguments: - - types (dict): - - A mapping of type names to their Python types - - Returns: - - A dictionary of definitions to pass to `TypeChecker` - """ - types = dict(types) - - def gen_type_check(pytypes): - pytypes = _utils.flatten(pytypes) - - def type_check(checker, instance): - if isinstance(instance, bool): - if bool not in pytypes: - return False - return isinstance(instance, pytypes) - - return type_check - - definitions = {} - for typename, pytypes in iteritems(types): - definitions[typename] = gen_type_check(pytypes) - - return definitions - - -_DEPRECATED_DEFAULT_TYPES = { - u"array": list, - u"boolean": bool, - u"integer": int_types, - u"null": type(None), - u"number": numbers.Number, - u"object": dict, - u"string": str_types, -} -_TYPE_CHECKER_FOR_DEPRECATED_DEFAULT_TYPES = _types.TypeChecker( - type_checkers=_generate_legacy_type_checks(_DEPRECATED_DEFAULT_TYPES), -) - - -def validates(version): - """ - Register the decorated validator for a ``version`` of the specification. - - Registered validators and their meta schemas will be considered when - parsing ``$schema`` properties' URIs. - - Arguments: - - version (str): - - An identifier to use as the version's name - - Returns: - - collections.Callable: - - a class decorator to decorate the validator with the version - """ - - def _validates(cls): - validators[version] = cls - meta_schema_id = cls.ID_OF(cls.META_SCHEMA) - if meta_schema_id: - meta_schemas[meta_schema_id] = cls - return cls - return _validates - - -def _DEFAULT_TYPES(self): - if self._CREATED_WITH_DEFAULT_TYPES is None: - raise _DontDoThat() - - warn( - ( - "The DEFAULT_TYPES attribute is deprecated. " - "See the type checker attached to this validator instead." - ), - DeprecationWarning, - stacklevel=2, - ) - return self._DEFAULT_TYPES - - -class _DefaultTypesDeprecatingMetaClass(type): - DEFAULT_TYPES = property(_DEFAULT_TYPES) - - -def _id_of(schema): - if schema is True or schema is False: - return u"" - return schema.get(u"$id", u"") - - -def create( - meta_schema, - validators=(), - version=None, - default_types=None, - type_checker=None, - id_of=_id_of, -): - """ - Create a new validator class. - - Arguments: - - meta_schema (collections.Mapping): - - the meta schema for the new validator class - - validators (collections.Mapping): - - a mapping from names to callables, where each callable will - validate the schema property with the given name. - - Each callable should take 4 arguments: - - 1. a validator instance, - 2. the value of the property being validated within the - instance - 3. the instance - 4. the schema - - version (str): - - an identifier for the version that this validator class will - validate. If provided, the returned validator class will - have its ``__name__`` set to include the version, and also - will have `jsonschema.validators.validates` automatically - called for the given version. - - type_checker (jsonschema.TypeChecker): - - a type checker, used when applying the :validator:`type` validator. - - If unprovided, a `jsonschema.TypeChecker` will be created - with a set of default types typical of JSON Schema drafts. - - default_types (collections.Mapping): - - .. deprecated:: 3.0.0 - - Please use the type_checker argument instead. - - If set, it provides mappings of JSON types to Python types - that will be converted to functions and redefined in this - object's `jsonschema.TypeChecker`. - - id_of (collections.Callable): - - A function that given a schema, returns its ID. - - Returns: - - a new `jsonschema.IValidator` class - """ - - if default_types is not None: - if type_checker is not None: - raise TypeError( - "Do not specify default_types when providing a type checker.", - ) - _created_with_default_types = True - warn( - ( - "The default_types argument is deprecated. " - "Use the type_checker argument instead." - ), - DeprecationWarning, - stacklevel=2, - ) - type_checker = _types.TypeChecker( - type_checkers=_generate_legacy_type_checks(default_types), - ) - else: - default_types = _DEPRECATED_DEFAULT_TYPES - if type_checker is None: - _created_with_default_types = False - type_checker = _TYPE_CHECKER_FOR_DEPRECATED_DEFAULT_TYPES - elif type_checker is _TYPE_CHECKER_FOR_DEPRECATED_DEFAULT_TYPES: - _created_with_default_types = False - else: - _created_with_default_types = None - - @add_metaclass(_DefaultTypesDeprecatingMetaClass) - class Validator(object): - - VALIDATORS = dict(validators) - META_SCHEMA = dict(meta_schema) - TYPE_CHECKER = type_checker - ID_OF = staticmethod(id_of) - - DEFAULT_TYPES = property(_DEFAULT_TYPES) - _DEFAULT_TYPES = dict(default_types) - _CREATED_WITH_DEFAULT_TYPES = _created_with_default_types - - def __init__( - self, - schema, - types=(), - resolver=None, - format_checker=None, - ): - if types: - warn( - ( - "The types argument is deprecated. Provide " - "a type_checker to jsonschema.validators.extend " - "instead." - ), - DeprecationWarning, - stacklevel=2, - ) - - self.TYPE_CHECKER = self.TYPE_CHECKER.redefine_many( - _generate_legacy_type_checks(types), - ) - - if resolver is None: - resolver = RefResolver.from_schema(schema, id_of=id_of) - - self.resolver = resolver - self.format_checker = format_checker - self.schema = schema - - @classmethod - def check_schema(cls, schema): - for error in cls(cls.META_SCHEMA).iter_errors(schema): - raise exceptions.SchemaError.create_from(error) - - def iter_errors(self, instance, _schema=None): - if _schema is None: - _schema = self.schema - - if _schema is True: - return - elif _schema is False: - yield exceptions.ValidationError( - "False schema does not allow %r" % (instance,), - validator=None, - validator_value=None, - instance=instance, - schema=_schema, - ) - return - - scope = id_of(_schema) - if scope: - self.resolver.push_scope(scope) - try: - ref = _schema.get(u"$ref") - if ref is not None: - validators = [(u"$ref", ref)] - else: - validators = iteritems(_schema) - - for k, v in validators: - validator = self.VALIDATORS.get(k) - if validator is None: - continue - - errors = validator(self, v, instance, _schema) or () - for error in errors: - # set details if not already set by the called fn - error._set( - validator=k, - validator_value=v, - instance=instance, - schema=_schema, - ) - if k != u"$ref": - error.schema_path.appendleft(k) - yield error - finally: - if scope: - self.resolver.pop_scope() - - def descend(self, instance, schema, path=None, schema_path=None): - for error in self.iter_errors(instance, schema): - if path is not None: - error.path.appendleft(path) - if schema_path is not None: - error.schema_path.appendleft(schema_path) - yield error - - def validate(self, *args, **kwargs): - for error in self.iter_errors(*args, **kwargs): - raise error - - def is_type(self, instance, type): - try: - return self.TYPE_CHECKER.is_type(instance, type) - except exceptions.UndefinedTypeCheck: - raise exceptions.UnknownType(type, instance, self.schema) - - def is_valid(self, instance, _schema=None): - error = next(self.iter_errors(instance, _schema), None) - return error is None - - if version is not None: - Validator = validates(version)(Validator) - Validator.__name__ = version.title().replace(" ", "") + "Validator" - - return Validator - - -def extend(validator, validators=(), version=None, type_checker=None): - """ - Create a new validator class by extending an existing one. - - Arguments: - - validator (jsonschema.IValidator): - - an existing validator class - - validators (collections.Mapping): - - a mapping of new validator callables to extend with, whose - structure is as in `create`. - - .. note:: - - Any validator callables with the same name as an - existing one will (silently) replace the old validator - callable entirely, effectively overriding any validation - done in the "parent" validator class. - - If you wish to instead extend the behavior of a parent's - validator callable, delegate and call it directly in - the new validator function by retrieving it using - ``OldValidator.VALIDATORS["validator_name"]``. - - version (str): - - a version for the new validator class - - type_checker (jsonschema.TypeChecker): - - a type checker, used when applying the :validator:`type` validator. - - If unprovided, the type checker of the extended - `jsonschema.IValidator` will be carried along.` - - Returns: - - a new `jsonschema.IValidator` class extending the one provided - - .. note:: Meta Schemas - - The new validator class will have its parent's meta schema. - - If you wish to change or extend the meta schema in the new - validator class, modify ``META_SCHEMA`` directly on the returned - class. Note that no implicit copying is done, so a copy should - likely be made before modifying it, in order to not affect the - old validator. - """ - - all_validators = dict(validator.VALIDATORS) - all_validators.update(validators) - - if type_checker is None: - type_checker = validator.TYPE_CHECKER - elif validator._CREATED_WITH_DEFAULT_TYPES: - raise TypeError( - "Cannot extend a validator created with default_types " - "with a type_checker. Update the validator to use a " - "type_checker when created." - ) - return create( - meta_schema=validator.META_SCHEMA, - validators=all_validators, - version=version, - type_checker=type_checker, - id_of=validator.ID_OF, - ) - - -Draft3Validator = create( - meta_schema=_utils.load_schema("draft3"), - validators={ - u"$ref": _validators.ref, - u"additionalItems": _validators.additionalItems, - u"additionalProperties": _validators.additionalProperties, - u"dependencies": _legacy_validators.dependencies_draft3, - u"disallow": _legacy_validators.disallow_draft3, - u"divisibleBy": _validators.multipleOf, - u"enum": _validators.enum, - u"extends": _legacy_validators.extends_draft3, - u"format": _validators.format, - u"items": _legacy_validators.items_draft3_draft4, - u"maxItems": _validators.maxItems, - u"maxLength": _validators.maxLength, - u"maximum": _legacy_validators.maximum_draft3_draft4, - u"minItems": _validators.minItems, - u"minLength": _validators.minLength, - u"minimum": _legacy_validators.minimum_draft3_draft4, - u"pattern": _validators.pattern, - u"patternProperties": _validators.patternProperties, - u"properties": _legacy_validators.properties_draft3, - u"type": _legacy_validators.type_draft3, - u"uniqueItems": _validators.uniqueItems, - }, - type_checker=_types.draft3_type_checker, - version="draft3", - id_of=lambda schema: schema.get(u"id", ""), -) - -Draft4Validator = create( - meta_schema=_utils.load_schema("draft4"), - validators={ - u"$ref": _validators.ref, - u"additionalItems": _validators.additionalItems, - u"additionalProperties": _validators.additionalProperties, - u"allOf": _validators.allOf, - u"anyOf": _validators.anyOf, - u"dependencies": _validators.dependencies, - u"enum": _validators.enum, - u"format": _validators.format, - u"items": _legacy_validators.items_draft3_draft4, - u"maxItems": _validators.maxItems, - u"maxLength": _validators.maxLength, - u"maxProperties": _validators.maxProperties, - u"maximum": _legacy_validators.maximum_draft3_draft4, - u"minItems": _validators.minItems, - u"minLength": _validators.minLength, - u"minProperties": _validators.minProperties, - u"minimum": _legacy_validators.minimum_draft3_draft4, - u"multipleOf": _validators.multipleOf, - u"not": _validators.not_, - u"oneOf": _validators.oneOf, - u"pattern": _validators.pattern, - u"patternProperties": _validators.patternProperties, - u"properties": _validators.properties, - u"required": _validators.required, - u"type": _validators.type, - u"uniqueItems": _validators.uniqueItems, - }, - type_checker=_types.draft4_type_checker, - version="draft4", - id_of=lambda schema: schema.get(u"id", ""), -) - -Draft6Validator = create( - meta_schema=_utils.load_schema("draft6"), - validators={ - u"$ref": _validators.ref, - u"additionalItems": _validators.additionalItems, - u"additionalProperties": _validators.additionalProperties, - u"allOf": _validators.allOf, - u"anyOf": _validators.anyOf, - u"const": _validators.const, - u"contains": _validators.contains, - u"dependencies": _validators.dependencies, - u"enum": _validators.enum, - u"exclusiveMaximum": _validators.exclusiveMaximum, - u"exclusiveMinimum": _validators.exclusiveMinimum, - u"format": _validators.format, - u"items": _validators.items, - u"maxItems": _validators.maxItems, - u"maxLength": _validators.maxLength, - u"maxProperties": _validators.maxProperties, - u"maximum": _validators.maximum, - u"minItems": _validators.minItems, - u"minLength": _validators.minLength, - u"minProperties": _validators.minProperties, - u"minimum": _validators.minimum, - u"multipleOf": _validators.multipleOf, - u"not": _validators.not_, - u"oneOf": _validators.oneOf, - u"pattern": _validators.pattern, - u"patternProperties": _validators.patternProperties, - u"properties": _validators.properties, - u"propertyNames": _validators.propertyNames, - u"required": _validators.required, - u"type": _validators.type, - u"uniqueItems": _validators.uniqueItems, - }, - type_checker=_types.draft6_type_checker, - version="draft6", -) - -Draft7Validator = create( - meta_schema=_utils.load_schema("draft7"), - validators={ - u"$ref": _validators.ref, - u"additionalItems": _validators.additionalItems, - u"additionalProperties": _validators.additionalProperties, - u"allOf": _validators.allOf, - u"anyOf": _validators.anyOf, - u"const": _validators.const, - u"contains": _validators.contains, - u"dependencies": _validators.dependencies, - u"enum": _validators.enum, - u"exclusiveMaximum": _validators.exclusiveMaximum, - u"exclusiveMinimum": _validators.exclusiveMinimum, - u"format": _validators.format, - u"if": _validators.if_, - u"items": _validators.items, - u"maxItems": _validators.maxItems, - u"maxLength": _validators.maxLength, - u"maxProperties": _validators.maxProperties, - u"maximum": _validators.maximum, - u"minItems": _validators.minItems, - u"minLength": _validators.minLength, - u"minProperties": _validators.minProperties, - u"minimum": _validators.minimum, - u"multipleOf": _validators.multipleOf, - u"oneOf": _validators.oneOf, - u"not": _validators.not_, - u"pattern": _validators.pattern, - u"patternProperties": _validators.patternProperties, - u"properties": _validators.properties, - u"propertyNames": _validators.propertyNames, - u"required": _validators.required, - u"type": _validators.type, - u"uniqueItems": _validators.uniqueItems, - }, - type_checker=_types.draft7_type_checker, - version="draft7", -) - -_LATEST_VERSION = Draft7Validator - - -class RefResolver(object): - """ - Resolve JSON References. - - Arguments: - - base_uri (str): - - The URI of the referring document - - referrer: - - The actual referring document - - store (dict): - - A mapping from URIs to documents to cache - - cache_remote (bool): - - Whether remote refs should be cached after first resolution - - handlers (dict): - - A mapping from URI schemes to functions that should be used - to retrieve them - - urljoin_cache (:func:`functools.lru_cache`): - - A cache that will be used for caching the results of joining - the resolution scope to subscopes. - - remote_cache (:func:`functools.lru_cache`): - - A cache that will be used for caching the results of - resolved remote URLs. - - Attributes: - - cache_remote (bool): - - Whether remote refs should be cached after first resolution - """ - - def __init__( - self, - base_uri, - referrer, - store=(), - cache_remote=True, - handlers=(), - urljoin_cache=None, - remote_cache=None, - ): - if urljoin_cache is None: - urljoin_cache = lru_cache(1024)(urljoin) - if remote_cache is None: - remote_cache = lru_cache(1024)(self.resolve_from_url) - - self.referrer = referrer - self.cache_remote = cache_remote - self.handlers = dict(handlers) - - self._scopes_stack = [base_uri] - self.store = _utils.URIDict( - (id, validator.META_SCHEMA) - for id, validator in iteritems(meta_schemas) - ) - self.store.update(store) - self.store[base_uri] = referrer - - self._urljoin_cache = urljoin_cache - self._remote_cache = remote_cache - - @classmethod - def from_schema(cls, schema, id_of=_id_of, *args, **kwargs): - """ - Construct a resolver from a JSON schema object. - - Arguments: - - schema: - - the referring schema - - Returns: - - `RefResolver` - """ - - return cls(base_uri=id_of(schema), referrer=schema, *args, **kwargs) - - def push_scope(self, scope): - """ - Enter a given sub-scope. - - Treats further dereferences as being performed underneath the - given scope. - """ - self._scopes_stack.append( - self._urljoin_cache(self.resolution_scope, scope), - ) - - def pop_scope(self): - """ - Exit the most recent entered scope. - - Treats further dereferences as being performed underneath the - original scope. - - Don't call this method more times than `push_scope` has been - called. - """ - try: - self._scopes_stack.pop() - except IndexError: - raise exceptions.RefResolutionError( - "Failed to pop the scope from an empty stack. " - "`pop_scope()` should only be called once for every " - "`push_scope()`" - ) - - @property - def resolution_scope(self): - """ - Retrieve the current resolution scope. - """ - return self._scopes_stack[-1] - - @property - def base_uri(self): - """ - Retrieve the current base URI, not including any fragment. - """ - uri, _ = urldefrag(self.resolution_scope) - return uri - - @contextlib.contextmanager - def in_scope(self, scope): - """ - Temporarily enter the given scope for the duration of the context. - """ - self.push_scope(scope) - try: - yield - finally: - self.pop_scope() - - @contextlib.contextmanager - def resolving(self, ref): - """ - Resolve the given ``ref`` and enter its resolution scope. - - Exits the scope on exit of this context manager. - - Arguments: - - ref (str): - - The reference to resolve - """ - - url, resolved = self.resolve(ref) - self.push_scope(url) - try: - yield resolved - finally: - self.pop_scope() - - def resolve(self, ref): - """ - Resolve the given reference. - """ - url = self._urljoin_cache(self.resolution_scope, ref) - return url, self._remote_cache(url) - - def resolve_from_url(self, url): - """ - Resolve the given remote URL. - """ - url, fragment = urldefrag(url) - try: - document = self.store[url] - except KeyError: - try: - document = self.resolve_remote(url) - except Exception as exc: - raise exceptions.RefResolutionError(exc) - - return self.resolve_fragment(document, fragment) - - def resolve_fragment(self, document, fragment): - """ - Resolve a ``fragment`` within the referenced ``document``. - - Arguments: - - document: - - The referent document - - fragment (str): - - a URI fragment to resolve within it - """ - - fragment = fragment.lstrip(u"/") - parts = unquote(fragment).split(u"/") if fragment else [] - - for part in parts: - part = part.replace(u"~1", u"/").replace(u"~0", u"~") - - if isinstance(document, Sequence): - # Array indexes should be turned into integers - try: - part = int(part) - except ValueError: - pass - try: - document = document[part] - except (TypeError, LookupError): - raise exceptions.RefResolutionError( - "Unresolvable JSON pointer: %r" % fragment - ) - - return document - - def resolve_remote(self, uri): - """ - Resolve a remote ``uri``. - - If called directly, does not check the store first, but after - retrieving the document at the specified URI it will be saved in - the store if :attr:`cache_remote` is True. - - .. note:: - - If the requests_ library is present, ``jsonschema`` will use it to - request the remote ``uri``, so that the correct encoding is - detected and used. - - If it isn't, or if the scheme of the ``uri`` is not ``http`` or - ``https``, UTF-8 is assumed. - - Arguments: - - uri (str): - - The URI to resolve - - Returns: - - The retrieved document - - .. _requests: https://pypi.org/project/requests/ - """ - try: - import requests - except ImportError: - requests = None - - scheme = urlsplit(uri).scheme - - if scheme in self.handlers: - result = self.handlers[scheme](uri) - elif scheme in [u"http", u"https"] and requests: - # Requests has support for detecting the correct encoding of - # json over http - result = requests.get(uri).json() - else: - # Otherwise, pass off to urllib and assume utf-8 - with urlopen(uri) as url: - result = json.loads(url.read().decode("utf-8")) - - if self.cache_remote: - self.store[uri] = result - return result - - -def validate(instance, schema, cls=None, *args, **kwargs): - """ - Validate an instance under the given schema. - - >>> validate([2, 3, 4], {"maxItems": 2}) - Traceback (most recent call last): - ... - ValidationError: [2, 3, 4] is too long - - :func:`validate` will first verify that the provided schema is - itself valid, since not doing so can lead to less obvious error - messages and fail in less obvious or consistent ways. - - If you know you have a valid schema already, especially if you - intend to validate multiple instances with the same schema, you - likely would prefer using the `IValidator.validate` method directly - on a specific validator (e.g. ``Draft7Validator.validate``). - - - Arguments: - - instance: - - The instance to validate - - schema: - - The schema to validate with - - cls (IValidator): - - The class that will be used to validate the instance. - - If the ``cls`` argument is not provided, two things will happen - in accordance with the specification. First, if the schema has a - :validator:`$schema` property containing a known meta-schema [#]_ - then the proper validator will be used. The specification recommends - that all schemas contain :validator:`$schema` properties for this - reason. If no :validator:`$schema` property is found, the default - validator class is the latest released draft. - - Any other provided positional and keyword arguments will be passed - on when instantiating the ``cls``. - - Raises: - - `jsonschema.exceptions.ValidationError` if the instance - is invalid - - `jsonschema.exceptions.SchemaError` if the schema itself - is invalid - - .. rubric:: Footnotes - .. [#] known by a validator registered with - `jsonschema.validators.validates` - """ - if cls is None: - cls = validator_for(schema) - - cls.check_schema(schema) - validator = cls(schema, *args, **kwargs) - error = exceptions.best_match(validator.iter_errors(instance)) - if error is not None: - raise error - - -def validator_for(schema, default=_LATEST_VERSION): - """ - Retrieve the validator class appropriate for validating the given schema. - - Uses the :validator:`$schema` property that should be present in the - given schema to look up the appropriate validator class. - - Arguments: - - schema (collections.Mapping or bool): - - the schema to look at - - default: - - the default to return if the appropriate validator class - cannot be determined. - - If unprovided, the default is to return the latest supported - draft. - """ - if schema is True or schema is False or u"$schema" not in schema: - return default - if schema[u"$schema"] not in meta_schemas: - warn( - ( - "The metaschema specified by $schema was not found. " - "Using the latest draft to validate, but this will raise " - "an error in the future." - ), - DeprecationWarning, - stacklevel=2, - ) - return meta_schemas.get(schema[u"$schema"], _LATEST_VERSION) diff --git a/conda_lock/_vendor/poetry/core/_vendor/lark-parser.LICENSE b/conda_lock/_vendor/poetry/core/_vendor/lark/LICENSE similarity index 99% rename from conda_lock/_vendor/poetry/core/_vendor/lark-parser.LICENSE rename to conda_lock/_vendor/poetry/core/_vendor/lark/LICENSE index efcb9665..aaf210b1 100644 --- a/conda_lock/_vendor/poetry/core/_vendor/lark-parser.LICENSE +++ b/conda_lock/_vendor/poetry/core/_vendor/lark/LICENSE @@ -16,4 +16,3 @@ FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - diff --git a/conda_lock/_vendor/poetry/core/_vendor/lark/__init__.py b/conda_lock/_vendor/poetry/core/_vendor/lark/__init__.py index 8ddab96a..a13c7b3b 100644 --- a/conda_lock/_vendor/poetry/core/_vendor/lark/__init__.py +++ b/conda_lock/_vendor/poetry/core/_vendor/lark/__init__.py @@ -1,9 +1,38 @@ -from .tree import Tree -from .visitors import Transformer, Visitor, v_args, Discard -from .visitors import InlineTransformer, inline_args # XXX Deprecated -from .exceptions import (ParseError, LexError, GrammarError, UnexpectedToken, - UnexpectedInput, UnexpectedCharacters, LarkError) -from .lexer import Token +from .exceptions import ( + GrammarError, + LarkError, + LexError, + ParseError, + UnexpectedCharacters, + UnexpectedEOF, + UnexpectedInput, + UnexpectedToken, +) from .lark import Lark +from .lexer import Token +from .tree import ParseTree, Tree +from .utils import logger +from .visitors import Discard, Transformer, Transformer_NonRecursive, Visitor, v_args + +__version__: str = "1.1.9" -__version__ = "0.9.0" +__all__ = ( + "GrammarError", + "LarkError", + "LexError", + "ParseError", + "UnexpectedCharacters", + "UnexpectedEOF", + "UnexpectedInput", + "UnexpectedToken", + "Lark", + "Token", + "ParseTree", + "Tree", + "logger", + "Discard", + "Transformer", + "Transformer_NonRecursive", + "Visitor", + "v_args", +) diff --git a/conda_lock/_vendor/poetry/core/_vendor/lark/__pyinstaller/__init__.py b/conda_lock/_vendor/poetry/core/_vendor/lark/__pyinstaller/__init__.py index fa02fc92..9da62a33 100644 --- a/conda_lock/_vendor/poetry/core/_vendor/lark/__pyinstaller/__init__.py +++ b/conda_lock/_vendor/poetry/core/_vendor/lark/__pyinstaller/__init__.py @@ -3,4 +3,4 @@ import os def get_hook_dirs(): - return [os.path.dirname(__file__)] \ No newline at end of file + return [os.path.dirname(__file__)] diff --git a/conda_lock/_vendor/poetry/core/_vendor/lark/ast_utils.py b/conda_lock/_vendor/poetry/core/_vendor/lark/ast_utils.py new file mode 100644 index 00000000..a5460f35 --- /dev/null +++ b/conda_lock/_vendor/poetry/core/_vendor/lark/ast_utils.py @@ -0,0 +1,59 @@ +""" + Module of utilities for transforming a lark.Tree into a custom Abstract Syntax Tree (AST defined in classes) +""" + +import inspect, re +import types +from typing import Optional, Callable + +from lark import Transformer, v_args + +class Ast: + """Abstract class + + Subclasses will be collected by `create_transformer()` + """ + pass + +class AsList: + """Abstract class + + Subclasses will be instantiated with the parse results as a single list, instead of as arguments. + """ + +class WithMeta: + """Abstract class + + Subclasses will be instantiated with the Meta instance of the tree. (see ``v_args`` for more detail) + """ + pass + +def camel_to_snake(name): + return re.sub(r'(? Transformer: + """Collects `Ast` subclasses from the given module, and creates a Lark transformer that builds the AST. + + For each class, we create a corresponding rule in the transformer, with a matching name. + CamelCase names will be converted into snake_case. Example: "CodeBlock" -> "code_block". + + Classes starting with an underscore (`_`) will be skipped. + + Parameters: + ast_module: A Python module containing all the subclasses of ``ast_utils.Ast`` + transformer (Optional[Transformer]): An initial transformer. Its attributes may be overwritten. + decorator_factory (Callable): An optional callable accepting two booleans, inline, and meta, + and returning a decorator for the methods of ``transformer``. (default: ``v_args``). + """ + t = transformer or Transformer() + + for name, obj in inspect.getmembers(ast_module): + if not name.startswith('_') and inspect.isclass(obj): + if issubclass(obj, Ast): + wrapper = decorator_factory(inline=not issubclass(obj, AsList), meta=issubclass(obj, WithMeta)) + obj = wrapper(obj).__get__(t) + setattr(t, camel_to_snake(name), obj) + + return t diff --git a/conda_lock/_vendor/poetry/core/_vendor/lark/common.py b/conda_lock/_vendor/poetry/core/_vendor/lark/common.py index c44f9cef..d6be890a 100644 --- a/conda_lock/_vendor/poetry/core/_vendor/lark/common.py +++ b/conda_lock/_vendor/poetry/core/_vendor/lark/common.py @@ -1,29 +1,89 @@ +from copy import deepcopy +import sys +from types import ModuleType +from typing import Callable, Collection, Dict, Optional, TYPE_CHECKING, List + +if TYPE_CHECKING: + from .lark import PostLex + from .lexer import Lexer + from .grammar import Rule + from typing import Union, Type + if sys.version_info >= (3, 8): + from typing import Literal + else: + from typing_extensions import Literal + if sys.version_info >= (3, 10): + from typing import TypeAlias + else: + from typing_extensions import TypeAlias + from .utils import Serialize -from .lexer import TerminalDef +from .lexer import TerminalDef, Token ###{standalone +_ParserArgType: 'TypeAlias' = 'Literal["earley", "lalr", "cyk", "auto"]' +_LexerArgType: 'TypeAlias' = 'Union[Literal["auto", "basic", "contextual", "dynamic", "dynamic_complete"], Type[Lexer]]' +_LexerCallback = Callable[[Token], Token] +ParserCallbacks = Dict[str, Callable] + class LexerConf(Serialize): - __serialize_fields__ = 'tokens', 'ignore', 'g_regex_flags' + __serialize_fields__ = 'terminals', 'ignore', 'g_regex_flags', 'use_bytes', 'lexer_type' __serialize_namespace__ = TerminalDef, - def __init__(self, tokens, ignore=(), postlex=None, callbacks=None, g_regex_flags=0): - self.tokens = tokens + terminals: Collection[TerminalDef] + re_module: ModuleType + ignore: Collection[str] + postlex: 'Optional[PostLex]' + callbacks: Dict[str, _LexerCallback] + g_regex_flags: int + skip_validation: bool + use_bytes: bool + lexer_type: Optional[_LexerArgType] + strict: bool + + def __init__(self, terminals: Collection[TerminalDef], re_module: ModuleType, ignore: Collection[str]=(), postlex: 'Optional[PostLex]'=None, + callbacks: Optional[Dict[str, _LexerCallback]]=None, g_regex_flags: int=0, skip_validation: bool=False, use_bytes: bool=False, strict: bool=False): + self.terminals = terminals + self.terminals_by_name = {t.name: t for t in self.terminals} + assert len(self.terminals) == len(self.terminals_by_name) self.ignore = ignore self.postlex = postlex self.callbacks = callbacks or {} self.g_regex_flags = g_regex_flags + self.re_module = re_module + self.skip_validation = skip_validation + self.use_bytes = use_bytes + self.strict = strict + self.lexer_type = None def _deserialize(self): - self.callbacks = {} # TODO + self.terminals_by_name = {t.name: t for t in self.terminals} -###} + def __deepcopy__(self, memo=None): + return type(self)( + deepcopy(self.terminals, memo), + self.re_module, + deepcopy(self.ignore, memo), + deepcopy(self.postlex, memo), + deepcopy(self.callbacks, memo), + deepcopy(self.g_regex_flags, memo), + deepcopy(self.skip_validation, memo), + deepcopy(self.use_bytes, memo), + ) -class ParserConf: - def __init__(self, rules, callbacks, start): +class ParserConf(Serialize): + __serialize_fields__ = 'rules', 'start', 'parser_type' + + rules: List['Rule'] + callbacks: ParserCallbacks + start: List[str] + parser_type: _ParserArgType + + def __init__(self, rules: List['Rule'], callbacks: ParserCallbacks, start: List[str]): assert isinstance(start, list) self.rules = rules self.callbacks = callbacks self.start = start - +###} diff --git a/conda_lock/_vendor/poetry/core/_vendor/lark/exceptions.py b/conda_lock/_vendor/poetry/core/_vendor/lark/exceptions.py index 1c5e533e..e099d596 100644 --- a/conda_lock/_vendor/poetry/core/_vendor/lark/exceptions.py +++ b/conda_lock/_vendor/poetry/core/_vendor/lark/exceptions.py @@ -1,119 +1,292 @@ -from .utils import STRING_TYPE +from .utils import logger, NO_VALUE +from typing import Mapping, Iterable, Callable, Union, TypeVar, Tuple, Any, List, Set, Optional, Collection, TYPE_CHECKING + +if TYPE_CHECKING: + from .lexer import Token + from .parsers.lalr_interactive_parser import InteractiveParser + from .tree import Tree ###{standalone + class LarkError(Exception): pass + +class ConfigurationError(LarkError, ValueError): + pass + + +def assert_config(value, options: Collection, msg='Got %r, expected one of %s'): + if value not in options: + raise ConfigurationError(msg % (value, options)) + + class GrammarError(LarkError): pass + class ParseError(LarkError): pass + class LexError(LarkError): pass -class UnexpectedEOF(ParseError): - def __init__(self, expected): - self.expected = expected +T = TypeVar('T') - message = ("Unexpected end-of-input. Expected one of: \n\t* %s\n" % '\n\t* '.join(x.name for x in self.expected)) - super(UnexpectedEOF, self).__init__(message) +class UnexpectedInput(LarkError): + """UnexpectedInput Error. + Used as a base class for the following exceptions: -class UnexpectedInput(LarkError): + - ``UnexpectedCharacters``: The lexer encountered an unexpected string + - ``UnexpectedToken``: The parser received an unexpected token + - ``UnexpectedEOF``: The parser expected a token, but the input ended + + After catching one of these exceptions, you may call the following helper methods to create a nicer error message. + """ + line: int + column: int pos_in_stream = None + state: Any + _terminals_by_name = None + interactive_parser: 'InteractiveParser' - def get_context(self, text, span=40): + def get_context(self, text: str, span: int=40) -> str: + """Returns a pretty string pinpointing the error in the text, + with span amount of context characters around it. + + Note: + The parser doesn't hold a copy of the text it has to parse, + so you have to provide it again + """ + assert self.pos_in_stream is not None, self pos = self.pos_in_stream start = max(pos - span, 0) end = pos + span - before = text[start:pos].rsplit('\n', 1)[-1] - after = text[pos:end].split('\n', 1)[0] - return before + after + '\n' + ' ' * len(before) + '^\n' - - def match_examples(self, parse_fn, examples, token_type_match_fallback=False): - """ Given a parser instance and a dictionary mapping some label with - some malformed syntax examples, it'll return the label for the - example that bests matches the current error. + if not isinstance(text, bytes): + before = text[start:pos].rsplit('\n', 1)[-1] + after = text[pos:end].split('\n', 1)[0] + return before + after + '\n' + ' ' * len(before.expandtabs()) + '^\n' + else: + before = text[start:pos].rsplit(b'\n', 1)[-1] + after = text[pos:end].split(b'\n', 1)[0] + return (before + after + b'\n' + b' ' * len(before.expandtabs()) + b'^\n').decode("ascii", "backslashreplace") + + def match_examples(self, parse_fn: 'Callable[[str], Tree]', + examples: Union[Mapping[T, Iterable[str]], Iterable[Tuple[T, Iterable[str]]]], + token_type_match_fallback: bool=False, + use_accepts: bool=True + ) -> Optional[T]: + """Allows you to detect what's wrong in the input text by matching + against example errors. + + Given a parser instance and a dictionary mapping some label with + some malformed syntax examples, it'll return the label for the + example that bests matches the current error. The function will + iterate the dictionary until it finds a matching error, and + return the corresponding value. + + For an example usage, see `examples/error_reporting_lalr.py` + + Parameters: + parse_fn: parse function (usually ``lark_instance.parse``) + examples: dictionary of ``{'example_string': value}``. + use_accepts: Recommended to keep this as ``use_accepts=True``. """ assert self.state is not None, "Not supported for this exception" + if isinstance(examples, Mapping): + examples = examples.items() + candidate = (None, False) - for label, example in examples.items(): - assert not isinstance(example, STRING_TYPE) + for i, (label, example) in enumerate(examples): + assert not isinstance(example, str), "Expecting a list" - for malformed in example: + for j, malformed in enumerate(example): try: parse_fn(malformed) except UnexpectedInput as ut: if ut.state == self.state: - try: + if ( + use_accepts + and isinstance(self, UnexpectedToken) + and isinstance(ut, UnexpectedToken) + and ut.accepts != self.accepts + ): + logger.debug("Different accepts with same state[%d]: %s != %s at example [%s][%s]" % + (self.state, self.accepts, ut.accepts, i, j)) + continue + if ( + isinstance(self, (UnexpectedToken, UnexpectedEOF)) + and isinstance(ut, (UnexpectedToken, UnexpectedEOF)) + ): if ut.token == self.token: # Try exact match first + logger.debug("Exact Match at example [%s][%s]" % (i, j)) return label if token_type_match_fallback: # Fallback to token types match if (ut.token.type == self.token.type) and not candidate[-1]: + logger.debug("Token Type Fallback at example [%s][%s]" % (i, j)) candidate = label, True - except AttributeError: - pass - if not candidate[0]: + if candidate[0] is None: + logger.debug("Same State match at example [%s][%s]" % (i, j)) candidate = label, False return candidate[0] + def _format_expected(self, expected): + if self._terminals_by_name: + d = self._terminals_by_name + expected = [d[t_name].user_repr() if t_name in d else t_name for t_name in expected] + return "Expected one of: \n\t* %s\n" % '\n\t* '.join(expected) + + +class UnexpectedEOF(ParseError, UnexpectedInput): + """An exception that is raised by the parser, when the input ends while it still expects a token. + """ + expected: 'List[Token]' + + def __init__(self, expected, state=None, terminals_by_name=None): + super(UnexpectedEOF, self).__init__() + + self.expected = expected + self.state = state + from .lexer import Token + self.token = Token("", "") # , line=-1, column=-1, pos_in_stream=-1) + self.pos_in_stream = -1 + self.line = -1 + self.column = -1 + self._terminals_by_name = terminals_by_name + + + def __str__(self): + message = "Unexpected end-of-input. " + message += self._format_expected(self.expected) + return message + class UnexpectedCharacters(LexError, UnexpectedInput): - def __init__(self, seq, lex_pos, line, column, allowed=None, considered_tokens=None, state=None, token_history=None): - message = "No terminal defined for '%s' at line %d col %d" % (seq[lex_pos], line, column) + """An exception that is raised by the lexer, when it cannot match the next + string of characters to any of its terminals. + """ + + allowed: Set[str] + considered_tokens: Set[Any] + def __init__(self, seq, lex_pos, line, column, allowed=None, considered_tokens=None, state=None, token_history=None, + terminals_by_name=None, considered_rules=None): + super(UnexpectedCharacters, self).__init__() + + # TODO considered_tokens and allowed can be figured out using state self.line = line self.column = column - self.allowed = allowed - self.considered_tokens = considered_tokens self.pos_in_stream = lex_pos self.state = state + self._terminals_by_name = terminals_by_name - message += '\n\n' + self.get_context(seq) - if allowed: - message += '\nExpecting: %s\n' % allowed - if token_history: - message += '\nPrevious tokens: %s\n' % ', '.join(repr(t) for t in token_history) + self.allowed = allowed + self.considered_tokens = considered_tokens + self.considered_rules = considered_rules + self.token_history = token_history + + if isinstance(seq, bytes): + self.char = seq[lex_pos:lex_pos + 1].decode("ascii", "backslashreplace") + else: + self.char = seq[lex_pos] + self._context = self.get_context(seq) - super(UnexpectedCharacters, self).__init__(message) + def __str__(self): + message = "No terminal matches '%s' in the current parser context, at line %d col %d" % (self.char, self.line, self.column) + message += '\n\n' + self._context + if self.allowed: + message += self._format_expected(self.allowed) + if self.token_history: + message += '\nPrevious tokens: %s\n' % ', '.join(repr(t) for t in self.token_history) + return message class UnexpectedToken(ParseError, UnexpectedInput): - def __init__(self, token, expected, considered_rules=None, state=None, puppet=None): - self.token = token - self.expected = expected # XXX str shouldn't necessary + """An exception that is raised by the parser, when the token it received + doesn't match any valid step forward. + + Parameters: + token: The mismatched token + expected: The set of expected tokens + considered_rules: Which rules were considered, to deduce the expected tokens + state: A value representing the parser state. Do not rely on its value or type. + interactive_parser: An instance of ``InteractiveParser``, that is initialized to the point of failure, + and can be used for debugging and error handling. + + Note: These parameters are available as attributes of the instance. + """ + + expected: Set[str] + considered_rules: Set[str] + + def __init__(self, token, expected, considered_rules=None, state=None, interactive_parser=None, terminals_by_name=None, token_history=None): + super(UnexpectedToken, self).__init__() + + # TODO considered_rules and expected can be figured out using state self.line = getattr(token, 'line', '?') self.column = getattr(token, 'column', '?') - self.considered_rules = considered_rules + self.pos_in_stream = getattr(token, 'start_pos', None) self.state = state - self.pos_in_stream = getattr(token, 'pos_in_stream', None) - self.puppet = puppet - message = ("Unexpected token %r at line %s, column %s.\n" - "Expected one of: \n\t* %s\n" - % (token, self.line, self.column, '\n\t* '.join(self.expected))) + self.token = token + self.expected = expected # XXX deprecate? `accepts` is better + self._accepts = NO_VALUE + self.considered_rules = considered_rules + self.interactive_parser = interactive_parser + self._terminals_by_name = terminals_by_name + self.token_history = token_history + + + @property + def accepts(self) -> Set[str]: + if self._accepts is NO_VALUE: + self._accepts = self.interactive_parser and self.interactive_parser.accepts() + return self._accepts + + def __str__(self): + message = ("Unexpected token %r at line %s, column %s.\n%s" + % (self.token, self.line, self.column, self._format_expected(self.accepts or self.expected))) + if self.token_history: + message += "Previous tokens: %r\n" % self.token_history + + return message + - super(UnexpectedToken, self).__init__(message) class VisitError(LarkError): """VisitError is raised when visitors are interrupted by an exception It provides the following attributes for inspection: - - obj: the tree node or token it was processing when the exception was raised - - orig_exc: the exception that cause it to fail + + Parameters: + rule: the name of the visit rule that failed + obj: the tree-node or token that was being processed + orig_exc: the exception that cause it to fail + + Note: These parameters are available as attributes """ + + obj: 'Union[Tree, Token]' + orig_exc: Exception + def __init__(self, rule, obj, orig_exc): + message = 'Error trying to process rule "%s":\n\n%s' % (rule, orig_exc) + super(VisitError, self).__init__(message) + + self.rule = rule self.obj = obj self.orig_exc = orig_exc - message = 'Error trying to process rule "%s":\n\n%s' % (rule, orig_exc) - super(VisitError, self).__init__(message) + +class MissingVariableError(LarkError): + pass + ###} diff --git a/conda_lock/_vendor/poetry/core/_vendor/lark/grammar.py b/conda_lock/_vendor/poetry/core/_vendor/lark/grammar.py index bb843513..1d226d9e 100644 --- a/conda_lock/_vendor/poetry/core/_vendor/lark/grammar.py +++ b/conda_lock/_vendor/poetry/core/_vendor/lark/grammar.py @@ -1,13 +1,18 @@ +from typing import Optional, Tuple, ClassVar, Sequence + from .utils import Serialize ###{standalone +TOKEN_DEFAULT_PRIORITY = 0 + class Symbol(Serialize): __slots__ = ('name',) - is_term = NotImplemented + name: str + is_term: ClassVar[bool] = NotImplemented - def __init__(self, name): + def __init__(self, name: str) -> None: self.name = name def __eq__(self, other): @@ -25,11 +30,14 @@ def __repr__(self): fullrepr = property(__repr__) + def renamed(self, f): + return type(self)(f(self.name)) + class Terminal(Symbol): __serialize_fields__ = 'name', 'filter_out' - is_term = True + is_term: ClassVar[bool] = True def __init__(self, name, filter_out=False): self.name = name @@ -39,19 +47,26 @@ def __init__(self, name, filter_out=False): def fullrepr(self): return '%s(%r, %r)' % (type(self).__name__, self.name, self.filter_out) + def renamed(self, f): + return type(self)(f(self.name), self.filter_out) class NonTerminal(Symbol): __serialize_fields__ = 'name', - is_term = False - + is_term: ClassVar[bool] = False class RuleOptions(Serialize): __serialize_fields__ = 'keep_all_tokens', 'expand1', 'priority', 'template_source', 'empty_indices' - def __init__(self, keep_all_tokens=False, expand1=False, priority=None, template_source=None, empty_indices=()): + keep_all_tokens: bool + expand1: bool + priority: Optional[int] + template_source: Optional[str] + empty_indices: Tuple[bool, ...] + + def __init__(self, keep_all_tokens: bool=False, expand1: bool=False, priority: Optional[int]=None, template_source: Optional[str]=None, empty_indices: Tuple[bool, ...]=()) -> None: self.keep_all_tokens = keep_all_tokens self.expand1 = expand1 self.priority = priority @@ -78,7 +93,15 @@ class Rule(Serialize): __serialize_fields__ = 'origin', 'expansion', 'order', 'alias', 'options' __serialize_namespace__ = Terminal, NonTerminal, RuleOptions - def __init__(self, origin, expansion, order=0, alias=None, options=None): + origin: NonTerminal + expansion: Sequence[Symbol] + order: int + alias: Optional[str] + options: RuleOptions + _hash: int + + def __init__(self, origin: NonTerminal, expansion: Sequence[Symbol], + order: int=0, alias: Optional[str]=None, options: Optional[RuleOptions]=None): self.origin = origin self.expansion = expansion self.alias = alias @@ -104,5 +127,4 @@ def __eq__(self, other): return self.origin == other.origin and self.expansion == other.expansion - ###} diff --git a/conda_lock/_vendor/poetry/core/_vendor/lark/grammars/__init__.py b/conda_lock/_vendor/poetry/core/_vendor/lark/grammars/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/conda_lock/_vendor/poetry/core/_vendor/lark/grammars/common.lark b/conda_lock/_vendor/poetry/core/_vendor/lark/grammars/common.lark index a675ca41..d2e86d17 100644 --- a/conda_lock/_vendor/poetry/core/_vendor/lark/grammars/common.lark +++ b/conda_lock/_vendor/poetry/core/_vendor/lark/grammars/common.lark @@ -1,3 +1,6 @@ +// Basic terminals for common use + + // // Numbers // @@ -21,7 +24,7 @@ SIGNED_NUMBER: ["+"|"-"] NUMBER // Strings // _STRING_INNER: /.*?/ -_STRING_ESC_INNER: _STRING_INNER /(? ignore + | "%import" import_path ["->" name] -> import + | "%import" import_path name_list -> multi_import + | "%override" rule -> override_rule + | "%declare" name+ -> declare + +!import_path: "."? name ("." name)* +name_list: "(" name ("," name)* ")" + +?expansions: alias (_VBAR alias)* + +?alias: expansion ["->" RULE] + +?expansion: expr* + +?expr: atom [OP | "~" NUMBER [".." NUMBER]] + +?atom: "(" expansions ")" + | "[" expansions "]" -> maybe + | value + +?value: STRING ".." STRING -> literal_range + | name + | (REGEXP | STRING) -> literal + | name "{" value ("," value)* "}" -> template_usage + +name: RULE + | TOKEN + +_VBAR: _NL? "|" +OP: /[+*]|[?](?![a-z])/ +RULE: /!?[_?]?[a-z][_a-z0-9]*/ +TOKEN: /_?[A-Z][_A-Z0-9]*/ +STRING: _STRING "i"? +REGEXP: /\/(?!\/)(\\\/|\\\\|[^\/])*?\/[imslux]*/ +_NL: /(\r?\n)+\s*/ + +%import common.ESCAPED_STRING -> _STRING +%import common.SIGNED_INT -> NUMBER +%import common.WS_INLINE + +COMMENT: /\s*/ "//" /[^\n]/* | /\s*/ "#" /[^\n]/* + +%ignore WS_INLINE +%ignore COMMENT diff --git a/conda_lock/_vendor/poetry/core/_vendor/lark/grammars/python.lark b/conda_lock/_vendor/poetry/core/_vendor/lark/grammars/python.lark new file mode 100644 index 00000000..8a75966b --- /dev/null +++ b/conda_lock/_vendor/poetry/core/_vendor/lark/grammars/python.lark @@ -0,0 +1,302 @@ +// Python 3 grammar for Lark + +// This grammar should parse all python 3.x code successfully. + +// Adapted from: https://docs.python.org/3/reference/grammar.html + +// Start symbols for the grammar: +// single_input is a single interactive statement; +// file_input is a module or sequence of commands read from an input file; +// eval_input is the input for the eval() functions. +// NB: compound_stmt in single_input is followed by extra NEWLINE! +// + +single_input: _NEWLINE | simple_stmt | compound_stmt _NEWLINE +file_input: (_NEWLINE | stmt)* +eval_input: testlist _NEWLINE* + +decorator: "@" dotted_name [ "(" [arguments] ")" ] _NEWLINE +decorators: decorator+ +decorated: decorators (classdef | funcdef | async_funcdef) + +async_funcdef: "async" funcdef +funcdef: "def" name "(" [parameters] ")" ["->" test] ":" suite + +parameters: paramvalue ("," paramvalue)* ["," SLASH ("," paramvalue)*] ["," [starparams | kwparams]] + | starparams + | kwparams + +SLASH: "/" // Otherwise the it will completely disappear and it will be undisguisable in the result +starparams: (starparam | starguard) poststarparams +starparam: "*" typedparam +starguard: "*" +poststarparams: ("," paramvalue)* ["," kwparams] +kwparams: "**" typedparam ","? + +?paramvalue: typedparam ("=" test)? +?typedparam: name (":" test)? + + +lambdef: "lambda" [lambda_params] ":" test +lambdef_nocond: "lambda" [lambda_params] ":" test_nocond +lambda_params: lambda_paramvalue ("," lambda_paramvalue)* ["," [lambda_starparams | lambda_kwparams]] + | lambda_starparams + | lambda_kwparams +?lambda_paramvalue: name ("=" test)? +lambda_starparams: "*" [name] ("," lambda_paramvalue)* ["," [lambda_kwparams]] +lambda_kwparams: "**" name ","? + + +?stmt: simple_stmt | compound_stmt +?simple_stmt: small_stmt (";" small_stmt)* [";"] _NEWLINE +?small_stmt: (expr_stmt | assign_stmt | del_stmt | pass_stmt | flow_stmt | import_stmt | global_stmt | nonlocal_stmt | assert_stmt) +expr_stmt: testlist_star_expr +assign_stmt: annassign | augassign | assign + +annassign: testlist_star_expr ":" test ["=" test] +assign: testlist_star_expr ("=" (yield_expr|testlist_star_expr))+ +augassign: testlist_star_expr augassign_op (yield_expr|testlist) +!augassign_op: "+=" | "-=" | "*=" | "@=" | "/=" | "%=" | "&=" | "|=" | "^=" | "<<=" | ">>=" | "**=" | "//=" +?testlist_star_expr: test_or_star_expr + | test_or_star_expr ("," test_or_star_expr)+ ","? -> tuple + | test_or_star_expr "," -> tuple + +// For normal and annotated assignments, additional restrictions enforced by the interpreter +del_stmt: "del" exprlist +pass_stmt: "pass" +?flow_stmt: break_stmt | continue_stmt | return_stmt | raise_stmt | yield_stmt +break_stmt: "break" +continue_stmt: "continue" +return_stmt: "return" [testlist] +yield_stmt: yield_expr +raise_stmt: "raise" [test ["from" test]] +import_stmt: import_name | import_from +import_name: "import" dotted_as_names +// note below: the ("." | "...") is necessary because "..." is tokenized as ELLIPSIS +import_from: "from" (dots? dotted_name | dots) "import" ("*" | "(" import_as_names ")" | import_as_names) +!dots: "."+ +import_as_name: name ["as" name] +dotted_as_name: dotted_name ["as" name] +import_as_names: import_as_name ("," import_as_name)* [","] +dotted_as_names: dotted_as_name ("," dotted_as_name)* +dotted_name: name ("." name)* +global_stmt: "global" name ("," name)* +nonlocal_stmt: "nonlocal" name ("," name)* +assert_stmt: "assert" test ["," test] + +?compound_stmt: if_stmt | while_stmt | for_stmt | try_stmt | match_stmt + | with_stmt | funcdef | classdef | decorated | async_stmt +async_stmt: "async" (funcdef | with_stmt | for_stmt) +if_stmt: "if" test ":" suite elifs ["else" ":" suite] +elifs: elif_* +elif_: "elif" test ":" suite +while_stmt: "while" test ":" suite ["else" ":" suite] +for_stmt: "for" exprlist "in" testlist ":" suite ["else" ":" suite] +try_stmt: "try" ":" suite except_clauses ["else" ":" suite] [finally] + | "try" ":" suite finally -> try_finally +finally: "finally" ":" suite +except_clauses: except_clause+ +except_clause: "except" [test ["as" name]] ":" suite +// NB compile.c makes sure that the default except clause is last + + +with_stmt: "with" with_items ":" suite +with_items: with_item ("," with_item)* +with_item: test ["as" name] + +match_stmt: "match" test ":" _NEWLINE _INDENT case+ _DEDENT + +case: "case" pattern ["if" test] ":" suite + +?pattern: sequence_item_pattern "," _sequence_pattern -> sequence_pattern + | as_pattern +?as_pattern: or_pattern ("as" NAME)? +?or_pattern: closed_pattern ("|" closed_pattern)* +?closed_pattern: literal_pattern + | NAME -> capture_pattern + | "_" -> any_pattern + | attr_pattern + | "(" as_pattern ")" + | "[" _sequence_pattern "]" -> sequence_pattern + | "(" (sequence_item_pattern "," _sequence_pattern)? ")" -> sequence_pattern + | "{" (mapping_item_pattern ("," mapping_item_pattern)* ","?)?"}" -> mapping_pattern + | "{" (mapping_item_pattern ("," mapping_item_pattern)* ",")? "**" NAME ","? "}" -> mapping_star_pattern + | class_pattern + +literal_pattern: inner_literal_pattern + +?inner_literal_pattern: "None" -> const_none + | "True" -> const_true + | "False" -> const_false + | STRING -> string + | number + +attr_pattern: NAME ("." NAME)+ -> value + +name_or_attr_pattern: NAME ("." NAME)* -> value + +mapping_item_pattern: (literal_pattern|attr_pattern) ":" as_pattern + +_sequence_pattern: (sequence_item_pattern ("," sequence_item_pattern)* ","?)? +?sequence_item_pattern: as_pattern + | "*" NAME -> star_pattern + +class_pattern: name_or_attr_pattern "(" [arguments_pattern ","?] ")" +arguments_pattern: pos_arg_pattern ["," keyws_arg_pattern] + | keyws_arg_pattern -> no_pos_arguments + +pos_arg_pattern: as_pattern ("," as_pattern)* +keyws_arg_pattern: keyw_arg_pattern ("," keyw_arg_pattern)* +keyw_arg_pattern: NAME "=" as_pattern + + + +suite: simple_stmt | _NEWLINE _INDENT stmt+ _DEDENT + +?test: or_test ("if" or_test "else" test)? + | lambdef + | assign_expr + +assign_expr: name ":=" test + +?test_nocond: or_test | lambdef_nocond + +?or_test: and_test ("or" and_test)* +?and_test: not_test_ ("and" not_test_)* +?not_test_: "not" not_test_ -> not_test + | comparison +?comparison: expr (comp_op expr)* +star_expr: "*" expr + +?expr: or_expr +?or_expr: xor_expr ("|" xor_expr)* +?xor_expr: and_expr ("^" and_expr)* +?and_expr: shift_expr ("&" shift_expr)* +?shift_expr: arith_expr (_shift_op arith_expr)* +?arith_expr: term (_add_op term)* +?term: factor (_mul_op factor)* +?factor: _unary_op factor | power + +!_unary_op: "+"|"-"|"~" +!_add_op: "+"|"-" +!_shift_op: "<<"|">>" +!_mul_op: "*"|"@"|"/"|"%"|"//" +// <> isn't actually a valid comparison operator in Python. It's here for the +// sake of a __future__ import described in PEP 401 (which really works :-) +!comp_op: "<"|">"|"=="|">="|"<="|"<>"|"!="|"in"|"not" "in"|"is"|"is" "not" + +?power: await_expr ("**" factor)? +?await_expr: AWAIT? atom_expr +AWAIT: "await" + +?atom_expr: atom_expr "(" [arguments] ")" -> funccall + | atom_expr "[" subscriptlist "]" -> getitem + | atom_expr "." name -> getattr + | atom + +?atom: "(" yield_expr ")" + | "(" _tuple_inner? ")" -> tuple + | "(" comprehension{test_or_star_expr} ")" -> tuple_comprehension + | "[" _exprlist? "]" -> list + | "[" comprehension{test_or_star_expr} "]" -> list_comprehension + | "{" _dict_exprlist? "}" -> dict + | "{" comprehension{key_value} "}" -> dict_comprehension + | "{" _exprlist "}" -> set + | "{" comprehension{test} "}" -> set_comprehension + | name -> var + | number + | string_concat + | "(" test ")" + | "..." -> ellipsis + | "None" -> const_none + | "True" -> const_true + | "False" -> const_false + + +?string_concat: string+ + +_tuple_inner: test_or_star_expr (("," test_or_star_expr)+ [","] | ",") + +?test_or_star_expr: test + | star_expr + +?subscriptlist: subscript + | subscript (("," subscript)+ [","] | ",") -> subscript_tuple +?subscript: test | ([test] ":" [test] [sliceop]) -> slice +sliceop: ":" [test] +?exprlist: (expr|star_expr) + | (expr|star_expr) (("," (expr|star_expr))+ [","]|",") +?testlist: test | testlist_tuple +testlist_tuple: test (("," test)+ [","] | ",") +_dict_exprlist: (key_value | "**" expr) ("," (key_value | "**" expr))* [","] + +key_value: test ":" test + +_exprlist: test_or_star_expr ("," test_or_star_expr)* [","] + +classdef: "class" name ["(" [arguments] ")"] ":" suite + + + +arguments: argvalue ("," argvalue)* ("," [ starargs | kwargs])? + | starargs + | kwargs + | comprehension{test} + +starargs: stararg ("," stararg)* ("," argvalue)* ["," kwargs] +stararg: "*" test +kwargs: "**" test ("," argvalue)* + +?argvalue: test ("=" test)? + + +comprehension{comp_result}: comp_result comp_fors [comp_if] +comp_fors: comp_for+ +comp_for: [ASYNC] "for" exprlist "in" or_test +ASYNC: "async" +?comp_if: "if" test_nocond + +// not used in grammar, but may appear in "node" passed from Parser to Compiler +encoding_decl: name + +yield_expr: "yield" [testlist] + | "yield" "from" test -> yield_from + +number: DEC_NUMBER | HEX_NUMBER | BIN_NUMBER | OCT_NUMBER | FLOAT_NUMBER | IMAG_NUMBER +string: STRING | LONG_STRING + +// Other terminals + +_NEWLINE: ( /\r?\n[\t ]*/ | COMMENT )+ + +%ignore /[\t \f]+/ // WS +%ignore /\\[\t \f]*\r?\n/ // LINE_CONT +%ignore COMMENT +%declare _INDENT _DEDENT + + +// Python terminals + +!name: NAME | "match" | "case" +NAME: /[^\W\d]\w*/ +COMMENT: /#[^\n]*/ + +STRING: /([ubf]?r?|r[ubf])("(?!"").*?(? None: + self.paren_level = 0 + self.indent_level = [0] assert self.tab_len > 0 - def handle_NL(self, token): + def handle_NL(self, token: Token) -> Iterator[Token]: if self.paren_level > 0: return @@ -26,13 +38,13 @@ def handle_NL(self, token): self.indent_level.pop() yield Token.new_borrow_pos(self.DEDENT_type, indent_str, token) - assert indent == self.indent_level[-1], '%s != %s' % (indent, self.indent_level[-1]) + if indent != self.indent_level[-1]: + raise DedentError('Unexpected dedent to column %s. Expected dedent to %s' % (indent, self.indent_level[-1])) def _process(self, stream): for token in stream: if token.type == self.NL_type: - for t in self.handle_NL(token): - yield t + yield from self.handle_NL(token) else: yield token @@ -58,4 +70,43 @@ def process(self, stream): def always_accept(self): return (self.NL_type,) + @property + @abstractmethod + def NL_type(self) -> str: + raise NotImplementedError() + + @property + @abstractmethod + def OPEN_PAREN_types(self) -> List[str]: + raise NotImplementedError() + + @property + @abstractmethod + def CLOSE_PAREN_types(self) -> List[str]: + raise NotImplementedError() + + @property + @abstractmethod + def INDENT_type(self) -> str: + raise NotImplementedError() + + @property + @abstractmethod + def DEDENT_type(self) -> str: + raise NotImplementedError() + + @property + @abstractmethod + def tab_len(self) -> int: + raise NotImplementedError() + + +class PythonIndenter(Indenter): + NL_type = '_NEWLINE' + OPEN_PAREN_types = ['LPAR', 'LSQB', 'LBRACE'] + CLOSE_PAREN_types = ['RPAR', 'RSQB', 'RBRACE'] + INDENT_type = '_INDENT' + DEDENT_type = '_DEDENT' + tab_len = 8 + ###} diff --git a/conda_lock/_vendor/poetry/core/_vendor/lark/lark.py b/conda_lock/_vendor/poetry/core/_vendor/lark/lark.py index 2b783cb2..6d34aa62 100644 --- a/conda_lock/_vendor/poetry/core/_vendor/lark/lark.py +++ b/conda_lock/_vendor/poetry/core/_vendor/lark/lark.py @@ -1,94 +1,170 @@ -from __future__ import absolute_import - -import sys, os, pickle, hashlib, logging -from io import open - - -from .utils import STRING_TYPE, Serialize, SerializeMemoizer, FS -from .load_grammar import load_grammar +from abc import ABC, abstractmethod +import getpass +import sys, os, pickle +import tempfile +import types +import re +from typing import ( + TypeVar, Type, List, Dict, Iterator, Callable, Union, Optional, Sequence, + Tuple, Iterable, IO, Any, TYPE_CHECKING, Collection +) +if TYPE_CHECKING: + from .parsers.lalr_interactive_parser import InteractiveParser + from .tree import ParseTree + from .visitors import Transformer + if sys.version_info >= (3, 8): + from typing import Literal + else: + from typing_extensions import Literal + from .parser_frontends import ParsingFrontend + +from .exceptions import ConfigurationError, assert_config, UnexpectedInput +from .utils import Serialize, SerializeMemoizer, FS, isascii, logger +from .load_grammar import load_grammar, FromPackageLoader, Grammar, verify_used_files, PackageResource, sha256_digest from .tree import Tree -from .common import LexerConf, ParserConf +from .common import LexerConf, ParserConf, _ParserArgType, _LexerArgType -from .lexer import Lexer, TraditionalLexer, TerminalDef, UnexpectedToken +from .lexer import Lexer, BasicLexer, TerminalDef, LexerThread, Token from .parse_tree_builder import ParseTreeBuilder -from .parser_frontends import get_frontend +from .parser_frontends import _validate_frontend_args, _get_lexer_callbacks, _deserialize_parsing_frontend, _construct_parsing_frontend from .grammar import Rule -import re + try: import regex + _has_regex = True except ImportError: - regex = None + _has_regex = False + ###{standalone + +class PostLex(ABC): + @abstractmethod + def process(self, stream: Iterator[Token]) -> Iterator[Token]: + return stream + + always_accept: Iterable[str] = () + class LarkOptions(Serialize): """Specifies the options for Lark """ - OPTIONS_DOC = """ -# General - - start - The start symbol. Either a string, or a list of strings for - multiple possible starts (Default: "start") - debug - Display debug information, such as warnings (default: False) - transformer - Applies the transformer to every parse tree (equivlent to - applying it after the parse, but faster) - propagate_positions - Propagates (line, column, end_line, end_column) - attributes into all tree branches. - maybe_placeholders - When True, the `[]` operator returns `None` when not matched. - When `False`, `[]` behaves like the `?` operator, - and returns no value at all. - (default=`False`. Recommended to set to `True`) - regex - When True, uses the `regex` module instead of the stdlib `re`. - cache - Cache the results of the Lark grammar analysis, for x2 to x3 faster loading. - LALR only for now. - When `False`, does nothing (default) - When `True`, caches to a temporary file in the local directory - When given a string, caches to the path pointed by the string - - g_regex_flags - Flags that are applied to all terminals - (both regex and strings) - keep_all_tokens - Prevent the tree builder from automagically - removing "punctuation" tokens (default: False) - -# Algorithm - - parser - Decides which parser engine to use - Accepts "earley" or "lalr". (Default: "earley") - (there is also a "cyk" option for legacy) - - lexer - Decides whether or not to use a lexer stage - "auto" (default): Choose for me based on the parser - "standard": Use a standard lexer - "contextual": Stronger lexer (only works with parser="lalr") - "dynamic": Flexible and powerful (only with parser="earley") - "dynamic_complete": Same as dynamic, but tries *every* variation - of tokenizing possible. - - ambiguity - Decides how to handle ambiguity in the parse. - Only relevant if parser="earley" - "resolve": The parser will automatically choose the simplest - derivation (it chooses consistently: greedy for - tokens, non-greedy for rules) - "explicit": The parser will return all derivations wrapped - in "_ambig" tree nodes (i.e. a forest). - -# Domain Specific - - postlex - Lexer post-processing (Default: None) Only works with the - standard and contextual lexers. - priority - How priorities should be evaluated - auto, none, normal, - invert (Default: auto) - lexer_callbacks - Dictionary of callbacks for the lexer. May alter - tokens during lexing. Use with caution. - edit_terminals - A callback + + start: List[str] + debug: bool + strict: bool + transformer: 'Optional[Transformer]' + propagate_positions: Union[bool, str] + maybe_placeholders: bool + cache: Union[bool, str] + regex: bool + g_regex_flags: int + keep_all_tokens: bool + tree_class: Optional[Callable[[str, List], Any]] + parser: _ParserArgType + lexer: _LexerArgType + ambiguity: 'Literal["auto", "resolve", "explicit", "forest"]' + postlex: Optional[PostLex] + priority: 'Optional[Literal["auto", "normal", "invert"]]' + lexer_callbacks: Dict[str, Callable[[Token], Token]] + use_bytes: bool + ordered_sets: bool + edit_terminals: Optional[Callable[[TerminalDef], TerminalDef]] + import_paths: 'List[Union[str, Callable[[Union[None, str, PackageResource], str], Tuple[str, str]]]]' + source_path: Optional[str] + + OPTIONS_DOC = r""" + **=== General Options ===** + + start + The start symbol. Either a string, or a list of strings for multiple possible starts (Default: "start") + debug + Display debug information and extra warnings. Use only when debugging (Default: ``False``) + When used with Earley, it generates a forest graph as "sppf.png", if 'dot' is installed. + strict + Throw an exception on any potential ambiguity, including shift/reduce conflicts, and regex collisions. + transformer + Applies the transformer to every parse tree (equivalent to applying it after the parse, but faster) + propagate_positions + Propagates positional attributes into the 'meta' attribute of all tree branches. + Sets attributes: (line, column, end_line, end_column, start_pos, end_pos, + container_line, container_column, container_end_line, container_end_column) + Accepts ``False``, ``True``, or a callable, which will filter which nodes to ignore when propagating. + maybe_placeholders + When ``True``, the ``[]`` operator returns ``None`` when not matched. + When ``False``, ``[]`` behaves like the ``?`` operator, and returns no value at all. + (default= ``True``) + cache + Cache the results of the Lark grammar analysis, for x2 to x3 faster loading. LALR only for now. + + - When ``False``, does nothing (default) + - When ``True``, caches to a temporary file in the local directory + - When given a string, caches to the path pointed by the string + regex + When True, uses the ``regex`` module instead of the stdlib ``re``. + g_regex_flags + Flags that are applied to all terminals (both regex and strings) + keep_all_tokens + Prevent the tree builder from automagically removing "punctuation" tokens (Default: ``False``) + tree_class + Lark will produce trees comprised of instances of this class instead of the default ``lark.Tree``. + + **=== Algorithm Options ===** + + parser + Decides which parser engine to use. Accepts "earley" or "lalr". (Default: "earley"). + (there is also a "cyk" option for legacy) + lexer + Decides whether or not to use a lexer stage + + - "auto" (default): Choose for me based on the parser + - "basic": Use a basic lexer + - "contextual": Stronger lexer (only works with parser="lalr") + - "dynamic": Flexible and powerful (only with parser="earley") + - "dynamic_complete": Same as dynamic, but tries *every* variation of tokenizing possible. + ambiguity + Decides how to handle ambiguity in the parse. Only relevant if parser="earley" + + - "resolve": The parser will automatically choose the simplest derivation + (it chooses consistently: greedy for tokens, non-greedy for rules) + - "explicit": The parser will return all derivations wrapped in "_ambig" tree nodes (i.e. a forest). + - "forest": The parser will return the root of the shared packed parse forest. + + **=== Misc. / Domain Specific Options ===** + + postlex + Lexer post-processing (Default: ``None``) Only works with the basic and contextual lexers. + priority + How priorities should be evaluated - "auto", ``None``, "normal", "invert" (Default: "auto") + lexer_callbacks + Dictionary of callbacks for the lexer. May alter tokens during lexing. Use with caution. + use_bytes + Accept an input of type ``bytes`` instead of ``str``. + ordered_sets + Should Earley use ordered-sets to achieve stable output (~10% slower than regular sets. Default: True) + edit_terminals + A callback for editing the terminals before parse. + import_paths + A List of either paths or loader functions to specify from where grammars are imported + source_path + Override the source of from where the grammar was loaded. Useful for relative imports and unconventional grammar loading + **=== End of Options ===** """ if __doc__: __doc__ += OPTIONS_DOC - _defaults = { + + # Adding a new option needs to be done in multiple places: + # - In the dictionary below. This is the primary truth of which options `Lark.__init__` accepts + # - In the docstring above. It is used both for the docstring of `LarkOptions` and `Lark`, and in readthedocs + # - As an attribute of `LarkOptions` above + # - Potentially in `_LOAD_ALLOWED_OPTIONS` below this class, when the option doesn't change how the grammar is loaded + # - Potentially in `lark.tools.__init__`, if it makes sense, and it can easily be passed as a cmd argument + _defaults: Dict[str, Any] = { 'debug': False, + 'strict': False, 'keep_all_tokens': False, 'tree_class': None, 'cache': False, @@ -102,181 +178,265 @@ class LarkOptions(Serialize): 'regex': False, 'propagate_positions': False, 'lexer_callbacks': {}, - 'maybe_placeholders': False, + 'maybe_placeholders': True, 'edit_terminals': None, 'g_regex_flags': 0, + 'use_bytes': False, + 'ordered_sets': True, + 'import_paths': [], + 'source_path': None, + '_plugins': {}, } - def __init__(self, options_dict): + def __init__(self, options_dict: Dict[str, Any]) -> None: o = dict(options_dict) options = {} for name, default in self._defaults.items(): if name in o: value = o.pop(name) - if isinstance(default, bool) and name != 'cache': + if isinstance(default, bool) and name not in ('cache', 'use_bytes', 'propagate_positions'): value = bool(value) else: value = default options[name] = value - if isinstance(options['start'], STRING_TYPE): + if isinstance(options['start'], str): options['start'] = [options['start']] self.__dict__['options'] = options - assert self.parser in ('earley', 'lalr', 'cyk', None) + + assert_config(self.parser, ('earley', 'lalr', 'cyk', None)) if self.parser == 'earley' and self.transformer: - raise ValueError('Cannot specify an embedded transformer when using the Earley algorithm.' + raise ConfigurationError('Cannot specify an embedded transformer when using the Earley algorithm. ' 'Please use your transformer on the resulting parse tree, or use a different algorithm (i.e. LALR)') if o: - raise ValueError("Unknown options: %s" % o.keys()) + raise ConfigurationError("Unknown options: %s" % o.keys()) - def __getattr__(self, name): + def __getattr__(self, name: str) -> Any: try: - return self.options[name] + return self.__dict__['options'][name] except KeyError as e: raise AttributeError(e) - def __setattr__(self, name, value): - assert name in self.options + def __setattr__(self, name: str, value: str) -> None: + assert_config(name, self.options.keys(), "%r isn't a valid option. Expected one of: %s") self.options[name] = value - def serialize(self, memo): + def serialize(self, memo = None) -> Dict[str, Any]: return self.options @classmethod - def deserialize(cls, data, memo): + def deserialize(cls, data: Dict[str, Any], memo: Dict[int, Union[TerminalDef, Rule]]) -> "LarkOptions": return cls(data) +# Options that can be passed to the Lark parser, even when it was loaded from cache/standalone. +# These options are only used outside of `load_grammar`. +_LOAD_ALLOWED_OPTIONS = {'postlex', 'transformer', 'lexer_callbacks', 'use_bytes', 'debug', 'g_regex_flags', 'regex', 'propagate_positions', 'tree_class', '_plugins'} + +_VALID_PRIORITY_OPTIONS = ('auto', 'normal', 'invert', None) +_VALID_AMBIGUITY_OPTIONS = ('auto', 'resolve', 'explicit', 'forest') + + +_T = TypeVar('_T', bound="Lark") + class Lark(Serialize): - def __init__(self, grammar, **options): - """ - grammar : a string or file-object containing the grammar spec (using Lark's ebnf syntax) - options : a dictionary controlling various aspects of Lark. - """ + """Main interface for the library. + + It's mostly a thin wrapper for the many different parsers, and for the tree constructor. + Parameters: + grammar: a string or file-object containing the grammar spec (using Lark's ebnf syntax) + options: a dictionary controlling various aspects of Lark. + + Example: + >>> Lark(r'''start: "foo" ''') + Lark(...) + """ + + source_path: str + source_grammar: str + grammar: 'Grammar' + options: LarkOptions + lexer: Lexer + parser: 'ParsingFrontend' + terminals: Collection[TerminalDef] + + def __init__(self, grammar: 'Union[Grammar, str, IO[str]]', **options) -> None: self.options = LarkOptions(options) + re_module: types.ModuleType # Set regex or re module use_regex = self.options.regex if use_regex: - if regex: - self.re = regex + if _has_regex: + re_module = regex else: raise ImportError('`regex` module must be installed if calling `Lark(regex=True)`.') else: - self.re = re + re_module = re # Some, but not all file-like objects have a 'name' attribute - try: - self.source = grammar.name - except AttributeError: - self.source = '' + if self.options.source_path is None: + try: + self.source_path = grammar.name # type: ignore[union-attr] + except AttributeError: + self.source_path = '' + else: + self.source_path = self.options.source_path # Drain file-like objects to get their contents try: - read = grammar.read + read = grammar.read # type: ignore[union-attr] except AttributeError: pass else: grammar = read() - assert isinstance(grammar, STRING_TYPE) - cache_fn = None - if self.options.cache: - if self.options.parser != 'lalr': - raise NotImplementedError("cache only works with parser='lalr' for now") - if isinstance(self.options.cache, STRING_TYPE): - cache_fn = self.options.cache - else: - if self.options.cache is not True: - raise ValueError("cache must be bool or str") - unhashable = ('transformer', 'postlex', 'lexer_callbacks', 'edit_terminals') - from . import __version__ + cache_sha256 = None + if isinstance(grammar, str): + self.source_grammar = grammar + if self.options.use_bytes: + if not isascii(grammar): + raise ConfigurationError("Grammar must be ascii only, when use_bytes=True") + + if self.options.cache: + if self.options.parser != 'lalr': + raise ConfigurationError("cache only works with parser='lalr' for now") + + unhashable = ('transformer', 'postlex', 'lexer_callbacks', 'edit_terminals', '_plugins') options_str = ''.join(k+str(v) for k, v in options.items() if k not in unhashable) - s = grammar + options_str + __version__ - md5 = hashlib.md5(s.encode()).hexdigest() - cache_fn = '.lark_cache_%s.tmp' % md5 + from . import __version__ + s = grammar + options_str + __version__ + str(sys.version_info[:2]) + cache_sha256 = sha256_digest(s) + + if isinstance(self.options.cache, str): + cache_fn = self.options.cache + else: + if self.options.cache is not True: + raise ConfigurationError("cache argument must be bool or str") + + try: + username = getpass.getuser() + except Exception: + # The exception raised may be ImportError or OSError in + # the future. For the cache, we don't care about the + # specific reason - we just want a username. + username = "unknown" + + cache_fn = tempfile.gettempdir() + "/.lark_cache_%s_%s_%s_%s.tmp" % (username, cache_sha256, *sys.version_info[:2]) + + old_options = self.options + try: + with FS.open(cache_fn, 'rb') as f: + logger.debug('Loading grammar from cache: %s', cache_fn) + # Remove options that aren't relevant for loading from cache + for name in (set(options) - _LOAD_ALLOWED_OPTIONS): + del options[name] + file_sha256 = f.readline().rstrip(b'\n') + cached_used_files = pickle.load(f) + if file_sha256 == cache_sha256.encode('utf8') and verify_used_files(cached_used_files): + cached_parser_data = pickle.load(f) + self._load(cached_parser_data, **options) + return + except FileNotFoundError: + # The cache file doesn't exist; parse and compose the grammar as normal + pass + except Exception: # We should probably narrow done which errors we catch here. + logger.exception("Failed to load Lark from cache: %r. We will try to carry on.", cache_fn) + + # In theory, the Lark instance might have been messed up by the call to `_load`. + # In practice the only relevant thing that might have been overwritten should be `options` + self.options = old_options + + + # Parse the grammar file and compose the grammars + self.grammar, used_files = load_grammar(grammar, self.source_path, self.options.import_paths, self.options.keep_all_tokens) + else: + assert isinstance(grammar, Grammar) + self.grammar = grammar - if FS.exists(cache_fn): - logging.debug('Loading grammar from cache: %s', cache_fn) - with FS.open(cache_fn, 'rb') as f: - self._load(f, self.options.transformer, self.options.postlex) - return if self.options.lexer == 'auto': if self.options.parser == 'lalr': self.options.lexer = 'contextual' elif self.options.parser == 'earley': - self.options.lexer = 'dynamic' + if self.options.postlex is not None: + logger.info("postlex can't be used with the dynamic lexer, so we use 'basic' instead. " + "Consider using lalr with contextual instead of earley") + self.options.lexer = 'basic' + else: + self.options.lexer = 'dynamic' elif self.options.parser == 'cyk': - self.options.lexer = 'standard' + self.options.lexer = 'basic' else: assert False, self.options.parser lexer = self.options.lexer - assert lexer in ('standard', 'contextual', 'dynamic', 'dynamic_complete') or issubclass(lexer, Lexer) + if isinstance(lexer, type): + assert issubclass(lexer, Lexer) # XXX Is this really important? Maybe just ensure interface compliance + else: + assert_config(lexer, ('basic', 'contextual', 'dynamic', 'dynamic_complete')) + if self.options.postlex is not None and 'dynamic' in lexer: + raise ConfigurationError("Can't use postlex with a dynamic lexer. Use basic or contextual instead") if self.options.ambiguity == 'auto': if self.options.parser == 'earley': self.options.ambiguity = 'resolve' else: - disambig_parsers = ['earley', 'cyk'] - assert self.options.parser in disambig_parsers, ( - 'Only %s supports disambiguation right now') % ', '.join(disambig_parsers) + assert_config(self.options.parser, ('earley', 'cyk'), "%r doesn't support disambiguation. Use one of these parsers instead: %s") if self.options.priority == 'auto': - if self.options.parser in ('earley', 'cyk', ): - self.options.priority = 'normal' - elif self.options.parser in ('lalr', ): - self.options.priority = None - elif self.options.priority in ('invert', 'normal'): - assert self.options.parser in ('earley', 'cyk'), "priorities are not supported for LALR at this time" + self.options.priority = 'normal' - assert self.options.priority in ('auto', None, 'normal', 'invert'), 'invalid priority option specified: {}. options are auto, none, normal, invert.'.format(self.options.priority) - assert self.options.ambiguity not in ('resolve__antiscore_sum', ), 'resolve__antiscore_sum has been replaced with the option priority="invert"' - assert self.options.ambiguity in ('resolve', 'explicit', 'auto', ) + if self.options.priority not in _VALID_PRIORITY_OPTIONS: + raise ConfigurationError("invalid priority option: %r. Must be one of %r" % (self.options.priority, _VALID_PRIORITY_OPTIONS)) + if self.options.ambiguity not in _VALID_AMBIGUITY_OPTIONS: + raise ConfigurationError("invalid ambiguity option: %r. Must be one of %r" % (self.options.ambiguity, _VALID_AMBIGUITY_OPTIONS)) - # Parse the grammar file and compose the grammars (TODO) - self.grammar = load_grammar(grammar, self.source, self.re) + if self.options.parser is None: + terminals_to_keep = '*' + elif self.options.postlex is not None: + terminals_to_keep = set(self.options.postlex.always_accept) + else: + terminals_to_keep = set() # Compile the EBNF grammar into BNF - self.terminals, self.rules, self.ignore_tokens = self.grammar.compile(self.options.start) + self.terminals, self.rules, self.ignore_tokens = self.grammar.compile(self.options.start, terminals_to_keep) if self.options.edit_terminals: for t in self.terminals: self.options.edit_terminals(t) - self._terminals_dict = {t.name:t for t in self.terminals} + self._terminals_dict = {t.name: t for t in self.terminals} # If the user asked to invert the priorities, negate them all here. - # This replaces the old 'resolve__antiscore_sum' option. if self.options.priority == 'invert': for rule in self.rules: if rule.options.priority is not None: rule.options.priority = -rule.options.priority + for term in self.terminals: + term.priority = -term.priority # Else, if the user asked to disable priorities, strip them from the - # rules. This allows the Earley parsers to skip an extra forest walk + # rules and terminals. This allows the Earley parsers to skip an extra forest walk # for improved performance, if you don't need them (or didn't specify any). - elif self.options.priority == None: + elif self.options.priority is None: for rule in self.rules: if rule.options.priority is not None: rule.options.priority = None - - # TODO Deprecate lexer_callbacks? - lexer_callbacks = dict(self.options.lexer_callbacks) - if self.options.transformer: - t = self.options.transformer for term in self.terminals: - if hasattr(t, term.name): - lexer_callbacks[term.name] = getattr(t, term.name) + term.priority = 0 - self.lexer_conf = LexerConf(self.terminals, self.ignore_tokens, self.options.postlex, lexer_callbacks, self.options.g_regex_flags) + # TODO Deprecate lexer_callbacks? + self.lexer_conf = LexerConf( + self.terminals, re_module, self.ignore_tokens, self.options.postlex, + self.options.lexer_callbacks, self.options.g_regex_flags, use_bytes=self.options.use_bytes, strict=self.options.strict + ) if self.options.parser: self.parser = self._build_parser() @@ -284,70 +444,128 @@ def __init__(self, grammar, **options): self.lexer = self._build_lexer() if cache_fn: - logging.debug('Saving grammar to cache: %s', cache_fn) - with FS.open(cache_fn, 'wb') as f: - self.save(f) + logger.debug('Saving grammar to cache: %s', cache_fn) + try: + with FS.open(cache_fn, 'wb') as f: + assert cache_sha256 is not None + f.write(cache_sha256.encode('utf8') + b'\n') + pickle.dump(used_files, f) + self.save(f, _LOAD_ALLOWED_OPTIONS) + except IOError as e: + logger.exception("Failed to save Lark to cache: %r.", cache_fn, e) - if __init__.__doc__: - __init__.__doc__ += "\nOptions:\n" + LarkOptions.OPTIONS_DOC + if __doc__: + __doc__ += "\n\n" + LarkOptions.OPTIONS_DOC __serialize_fields__ = 'parser', 'rules', 'options' - def _build_lexer(self): - return TraditionalLexer(self.lexer_conf.tokens, ignore=self.lexer_conf.ignore, user_callbacks=self.lexer_conf.callbacks, g_regex_flags=self.lexer_conf.g_regex_flags) - - def _prepare_callbacks(self): - self.parser_class = get_frontend(self.options.parser, self.options.lexer) - self._parse_tree_builder = ParseTreeBuilder(self.rules, self.options.tree_class or Tree, self.options.propagate_positions, self.options.keep_all_tokens, self.options.parser!='lalr' and self.options.ambiguity=='explicit', self.options.maybe_placeholders) - self._callbacks = self._parse_tree_builder.create_callback(self.options.transformer) - - def _build_parser(self): + def _build_lexer(self, dont_ignore: bool=False) -> BasicLexer: + lexer_conf = self.lexer_conf + if dont_ignore: + from copy import copy + lexer_conf = copy(lexer_conf) + lexer_conf.ignore = () + return BasicLexer(lexer_conf) + + def _prepare_callbacks(self) -> None: + self._callbacks = {} + # we don't need these callbacks if we aren't building a tree + if self.options.ambiguity != 'forest': + self._parse_tree_builder = ParseTreeBuilder( + self.rules, + self.options.tree_class or Tree, + self.options.propagate_positions, + self.options.parser != 'lalr' and self.options.ambiguity == 'explicit', + self.options.maybe_placeholders + ) + self._callbacks = self._parse_tree_builder.create_callback(self.options.transformer) + self._callbacks.update(_get_lexer_callbacks(self.options.transformer, self.terminals)) + + def _build_parser(self) -> "ParsingFrontend": self._prepare_callbacks() + _validate_frontend_args(self.options.parser, self.options.lexer) parser_conf = ParserConf(self.rules, self._callbacks, self.options.start) - return self.parser_class(self.lexer_conf, parser_conf, self.re, options=self.options) - - def save(self, f): + return _construct_parsing_frontend( + self.options.parser, + self.options.lexer, + self.lexer_conf, + parser_conf, + options=self.options + ) + + def save(self, f, exclude_options: Collection[str] = ()) -> None: + """Saves the instance into the given file object + + Useful for caching and multiprocessing. + """ + if self.options.parser != 'lalr': + raise NotImplementedError("Lark.save() is only implemented for the LALR(1) parser.") data, m = self.memo_serialize([TerminalDef, Rule]) - pickle.dump({'data': data, 'memo': m}, f) + if exclude_options: + data["options"] = {n: v for n, v in data["options"].items() if n not in exclude_options} + pickle.dump({'data': data, 'memo': m}, f, protocol=pickle.HIGHEST_PROTOCOL) @classmethod - def load(cls, f): + def load(cls: Type[_T], f) -> _T: + """Loads an instance from the given file object + + Useful for caching and multiprocessing. + """ inst = cls.__new__(cls) return inst._load(f) - def _load(self, f, transformer=None, postlex=None): + def _deserialize_lexer_conf(self, data: Dict[str, Any], memo: Dict[int, Union[TerminalDef, Rule]], options: LarkOptions) -> LexerConf: + lexer_conf = LexerConf.deserialize(data['lexer_conf'], memo) + lexer_conf.callbacks = options.lexer_callbacks or {} + lexer_conf.re_module = regex if options.regex else re + lexer_conf.use_bytes = options.use_bytes + lexer_conf.g_regex_flags = options.g_regex_flags + lexer_conf.skip_validation = True + lexer_conf.postlex = options.postlex + return lexer_conf + + def _load(self: _T, f: Any, **kwargs) -> _T: if isinstance(f, dict): d = f else: d = pickle.load(f) - memo = d['memo'] + memo_json = d['memo'] data = d['data'] - assert memo - memo = SerializeMemoizer.deserialize(memo, {'Rule': Rule, 'TerminalDef': TerminalDef}, {}) + assert memo_json + memo = SerializeMemoizer.deserialize(memo_json, {'Rule': Rule, 'TerminalDef': TerminalDef}, {}) options = dict(data['options']) - if transformer is not None: - options['transformer'] = transformer - if postlex is not None: - options['postlex'] = postlex + if (set(kwargs) - _LOAD_ALLOWED_OPTIONS) & set(LarkOptions._defaults): + raise ConfigurationError("Some options are not allowed when loading a Parser: {}" + .format(set(kwargs) - _LOAD_ALLOWED_OPTIONS)) + options.update(kwargs) self.options = LarkOptions.deserialize(options, memo) - self.re = regex if self.options.regex else re self.rules = [Rule.deserialize(r, memo) for r in data['rules']] - self.source = '' + self.source_path = '' + _validate_frontend_args(self.options.parser, self.options.lexer) + self.lexer_conf = self._deserialize_lexer_conf(data['parser'], memo, self.options) + self.terminals = self.lexer_conf.terminals self._prepare_callbacks() - self.parser = self.parser_class.deserialize(data['parser'], memo, self._callbacks, self.options.postlex, self.re) + self._terminals_dict = {t.name: t for t in self.terminals} + self.parser = _deserialize_parsing_frontend( + data['parser'], + memo, + self.lexer_conf, + self._callbacks, + self.options, # Not all, but multiple attributes are used + ) return self @classmethod - def _load_from_dict(cls, data, memo, transformer=None, postlex=None): + def _load_from_dict(cls, data, memo, **kwargs): inst = cls.__new__(cls) - return inst._load({'data': data, 'memo': memo}, transformer, postlex) + return inst._load({'data': data, 'memo': memo}, **kwargs) @classmethod - def open(cls, grammar_filename, rel_to=None, **options): + def open(cls: Type[_T], grammar_filename: str, rel_to: Optional[str]=None, **options) -> _T: """Create an instance of Lark with the grammar given by its filename - If rel_to is provided, the function will find the grammar filename in relation to it. + If ``rel_to`` is provided, the function will find the grammar filename in relation to it. Example: @@ -361,45 +579,83 @@ def open(cls, grammar_filename, rel_to=None, **options): with open(grammar_filename, encoding='utf8') as f: return cls(f, **options) + @classmethod + def open_from_package(cls: Type[_T], package: str, grammar_path: str, search_paths: 'Sequence[str]'=[""], **options) -> _T: + """Create an instance of Lark with the grammar loaded from within the package `package`. + This allows grammar loading from zipapps. + + Imports in the grammar will use the `package` and `search_paths` provided, through `FromPackageLoader` + + Example: + + Lark.open_from_package(__name__, "example.lark", ("grammars",), parser=...) + """ + package_loader = FromPackageLoader(package, search_paths) + full_path, text = package_loader(None, grammar_path) + options.setdefault('source_path', full_path) + options.setdefault('import_paths', []) + options['import_paths'].append(package_loader) + return cls(text, **options) + def __repr__(self): - return 'Lark(open(%r), parser=%r, lexer=%r, ...)' % (self.source, self.options.parser, self.options.lexer) + return 'Lark(open(%r), parser=%r, lexer=%r, ...)' % (self.source_path, self.options.parser, self.options.lexer) - def lex(self, text): - "Only lex (and postlex) the text, without parsing it. Only relevant when lexer='standard'" - if not hasattr(self, 'lexer'): - self.lexer = self._build_lexer() - stream = self.lexer.lex(text) + def lex(self, text: str, dont_ignore: bool=False) -> Iterator[Token]: + """Only lex (and postlex) the text, without parsing it. Only relevant when lexer='basic' + + When dont_ignore=True, the lexer will return all tokens, even those marked for %ignore. + + :raises UnexpectedCharacters: In case the lexer cannot find a suitable match. + """ + lexer: Lexer + if not hasattr(self, 'lexer') or dont_ignore: + lexer = self._build_lexer(dont_ignore) + else: + lexer = self.lexer + lexer_thread = LexerThread.from_text(lexer, text) + stream = lexer_thread.lex(None) if self.options.postlex: return self.options.postlex.process(stream) return stream - def get_terminal(self, name): - "Get information about a terminal" + def get_terminal(self, name: str) -> TerminalDef: + """Get information about a terminal""" return self._terminals_dict[name] - def parse(self, text, start=None, on_error=None): + def parse_interactive(self, text: Optional[str]=None, start: Optional[str]=None) -> 'InteractiveParser': + """Start an interactive parsing session. + + Parameters: + text (str, optional): Text to be parsed. Required for ``resume_parse()``. + start (str, optional): Start symbol + + Returns: + A new InteractiveParser instance. + + See Also: ``Lark.parse()`` + """ + return self.parser.parse_interactive(text, start=start) + + def parse(self, text: str, start: Optional[str]=None, on_error: 'Optional[Callable[[UnexpectedInput], bool]]'=None) -> 'ParseTree': """Parse the given text, according to the options provided. Parameters: - start: str - required if Lark was given multiple possible start symbols (using the start option). - on_error: function - if provided, will be called on UnexpectedToken error. Return true to resume parsing. LALR only. + text (str): Text to be parsed. + start (str, optional): Required if Lark was given multiple possible start symbols (using the start option). + on_error (function, optional): if provided, will be called on UnexpectedToken error. Return true to resume parsing. + LALR only. See examples/advanced/error_handling.py for an example of how to use on_error. + + Returns: + If a transformer is supplied to ``__init__``, returns whatever is the + result of the transformation. Otherwise, returns a Tree instance. + + :raises UnexpectedInput: On a parse error, one of these sub-exceptions will rise: + ``UnexpectedCharacters``, ``UnexpectedToken``, or ``UnexpectedEOF``. + For convenience, these sub-exceptions also inherit from ``ParserError`` and ``LexerError``. - Returns a tree, unless specified otherwise. """ - try: - return self.parser.parse(text, start=start) - except UnexpectedToken as e: - if on_error is None: - raise - - while True: - if not on_error(e): - raise e - try: - return e.puppet.resume_parse() - except UnexpectedToken as e2: - e = e2 + return self.parser.parse(text, start=start, on_error=on_error) ###} diff --git a/conda_lock/_vendor/poetry/core/_vendor/lark/lexer.py b/conda_lock/_vendor/poetry/core/_vendor/lark/lexer.py index bff5de9e..9061d600 100644 --- a/conda_lock/_vendor/poetry/core/_vendor/lark/lexer.py +++ b/conda_lock/_vendor/poetry/core/_vendor/lark/lexer.py @@ -1,17 +1,47 @@ -## Lexer Implementation +# Lexer Implementation +from abc import abstractmethod, ABC import re - -from .utils import Str, classify, get_regexp_width, Py36, Serialize +from contextlib import suppress +from typing import ( + TypeVar, Type, Dict, Iterator, Collection, Callable, Optional, FrozenSet, Any, + ClassVar, TYPE_CHECKING, overload +) +from types import ModuleType +import warnings +try: + import interegular +except ImportError: + pass +if TYPE_CHECKING: + from .common import LexerConf + from .parsers.lalr_parser_state import ParserState + +from .utils import classify, get_regexp_width, Serialize, logger from .exceptions import UnexpectedCharacters, LexError, UnexpectedToken +from .grammar import TOKEN_DEFAULT_PRIORITY + ###{standalone +from copy import copy + +try: # For the standalone parser, we need to make sure that has_interegular is False to avoid NameErrors later on + has_interegular = bool(interegular) +except NameError: + has_interegular = False -class Pattern(Serialize): +class Pattern(Serialize, ABC): + "An abstraction over regular expressions." - def __init__(self, value, flags=()): + value: str + flags: Collection[str] + raw: Optional[str] + type: ClassVar[str] + + def __init__(self, value: str, flags: Collection[str] = (), raw: Optional[str] = None) -> None: self.value = value self.flags = frozenset(flags) + self.raw = raw def __repr__(self): return repr(self.to_regexp()) @@ -19,45 +49,53 @@ def __repr__(self): # Pattern Hashing assumes all subclasses have a different priority! def __hash__(self): return hash((type(self), self.value, self.flags)) + def __eq__(self, other): return type(self) == type(other) and self.value == other.value and self.flags == other.flags - def to_regexp(self): + @abstractmethod + def to_regexp(self) -> str: + raise NotImplementedError() + + @property + @abstractmethod + def min_width(self) -> int: raise NotImplementedError() - if Py36: - # Python 3.6 changed syntax for flags in regular expression - def _get_flags(self, value): - for f in self.flags: - value = ('(?%s:%s)' % (f, value)) - return value + @property + @abstractmethod + def max_width(self) -> int: + raise NotImplementedError() - else: - def _get_flags(self, value): - for f in self.flags: - value = ('(?%s)' % f) + value - return value + def _get_flags(self, value): + for f in self.flags: + value = ('(?%s:%s)' % (f, value)) + return value class PatternStr(Pattern): - __serialize_fields__ = 'value', 'flags' + __serialize_fields__ = 'value', 'flags', 'raw' - type = "str" + type: ClassVar[str] = "str" - def to_regexp(self): + def to_regexp(self) -> str: return self._get_flags(re.escape(self.value)) @property - def min_width(self): + def min_width(self) -> int: + return len(self.value) + + @property + def max_width(self) -> int: return len(self.value) - max_width = min_width + class PatternRE(Pattern): - __serialize_fields__ = 'value', 'flags', '_width' + __serialize_fields__ = 'value', 'flags', 'raw', '_width' - type = "re" + type: ClassVar[str] = "re" - def to_regexp(self): + def to_regexp(self) -> str: return self._get_flags(self.value) _width = None @@ -67,18 +105,24 @@ def _get_width(self): return self._width @property - def min_width(self): + def min_width(self) -> int: return self._get_width()[0] + @property - def max_width(self): + def max_width(self) -> int: return self._get_width()[1] class TerminalDef(Serialize): + "A definition of a terminal" __serialize_fields__ = 'name', 'pattern', 'priority' __serialize_namespace__ = PatternStr, PatternRE - def __init__(self, name, pattern, priority=1): + name: str + pattern: Pattern + priority: int + + def __init__(self, name: str, pattern: Pattern, priority: int = TOKEN_DEFAULT_PRIORITY) -> None: assert isinstance(pattern, Pattern), pattern self.name = name self.pattern = pattern @@ -87,69 +131,168 @@ def __init__(self, name, pattern, priority=1): def __repr__(self): return '%s(%r, %r)' % (type(self).__name__, self.name, self.pattern) + def user_repr(self) -> str: + if self.name.startswith('__'): # We represent a generated terminal + return self.pattern.raw or self.name + else: + return self.name + +_T = TypeVar('_T', bound="Token") + +class Token(str): + """A string with meta-information, that is produced by the lexer. + + When parsing text, the resulting chunks of the input that haven't been discarded, + will end up in the tree as Token instances. The Token class inherits from Python's ``str``, + so normal string comparisons and operations will work as expected. + + Attributes: + type: Name of the token (as specified in grammar) + value: Value of the token (redundant, as ``token.value == token`` will always be true) + start_pos: The index of the token in the text + line: The line of the token in the text (starting with 1) + column: The column of the token in the text (starting with 1) + end_line: The line where the token ends + end_column: The next column after the end of the token. For example, + if the token is a single character with a column value of 4, + end_column will be 5. + end_pos: the index where the token ends (basically ``start_pos + len(token)``) + """ + __slots__ = ('type', 'start_pos', 'value', 'line', 'column', 'end_line', 'end_column', 'end_pos') + + __match_args__ = ('type', 'value') + + type: str + start_pos: Optional[int] + value: Any + line: Optional[int] + column: Optional[int] + end_line: Optional[int] + end_column: Optional[int] + end_pos: Optional[int] + + + @overload + def __new__( + cls, + type: str, + value: Any, + start_pos: Optional[int] = None, + line: Optional[int] = None, + column: Optional[int] = None, + end_line: Optional[int] = None, + end_column: Optional[int] = None, + end_pos: Optional[int] = None + ) -> 'Token': + ... + + @overload + def __new__( + cls, + type_: str, + value: Any, + start_pos: Optional[int] = None, + line: Optional[int] = None, + column: Optional[int] = None, + end_line: Optional[int] = None, + end_column: Optional[int] = None, + end_pos: Optional[int] = None + ) -> 'Token': ... + + def __new__(cls, *args, **kwargs): + if "type_" in kwargs: + warnings.warn("`type_` is deprecated use `type` instead", DeprecationWarning) + + if "type" in kwargs: + raise TypeError("Error: using both 'type' and the deprecated 'type_' as arguments.") + kwargs["type"] = kwargs.pop("type_") + + return cls._future_new(*args, **kwargs) -class Token(Str): - __slots__ = ('type', 'pos_in_stream', 'value', 'line', 'column', 'end_line', 'end_column', 'end_pos') - - def __new__(cls, type_, value, pos_in_stream=None, line=None, column=None, end_line=None, end_column=None, end_pos=None): - try: - self = super(Token, cls).__new__(cls, value) - except UnicodeDecodeError: - value = value.decode('latin1') - self = super(Token, cls).__new__(cls, value) - - self.type = type_ - self.pos_in_stream = pos_in_stream - self.value = value - self.line = line - self.column = column - self.end_line = end_line - self.end_column = end_column - self.end_pos = end_pos - return self - - def update(self, type_=None, value=None): + @classmethod + def _future_new(cls, type, value, start_pos=None, line=None, column=None, end_line=None, end_column=None, end_pos=None): + inst = super(Token, cls).__new__(cls, value) + + inst.type = type + inst.start_pos = start_pos + inst.value = value + inst.line = line + inst.column = column + inst.end_line = end_line + inst.end_column = end_column + inst.end_pos = end_pos + return inst + + @overload + def update(self, type: Optional[str] = None, value: Optional[Any] = None) -> 'Token': + ... + + @overload + def update(self, type_: Optional[str] = None, value: Optional[Any] = None) -> 'Token': + ... + + def update(self, *args, **kwargs): + if "type_" in kwargs: + warnings.warn("`type_` is deprecated use `type` instead", DeprecationWarning) + + if "type" in kwargs: + raise TypeError("Error: using both 'type' and the deprecated 'type_' as arguments.") + kwargs["type"] = kwargs.pop("type_") + + return self._future_update(*args, **kwargs) + + def _future_update(self, type: Optional[str] = None, value: Optional[Any] = None) -> 'Token': return Token.new_borrow_pos( - type_ if type_ is not None else self.type, + type if type is not None else self.type, value if value is not None else self.value, self ) @classmethod - def new_borrow_pos(cls, type_, value, borrow_t): - return cls(type_, value, borrow_t.pos_in_stream, borrow_t.line, borrow_t.column, borrow_t.end_line, borrow_t.end_column, borrow_t.end_pos) + def new_borrow_pos(cls: Type[_T], type_: str, value: Any, borrow_t: 'Token') -> _T: + return cls(type_, value, borrow_t.start_pos, borrow_t.line, borrow_t.column, borrow_t.end_line, borrow_t.end_column, borrow_t.end_pos) def __reduce__(self): - return (self.__class__, (self.type, self.value, self.pos_in_stream, self.line, self.column, )) + return (self.__class__, (self.type, self.value, self.start_pos, self.line, self.column)) def __repr__(self): - return 'Token(%s, %r)' % (self.type, self.value) + return 'Token(%r, %r)' % (self.type, self.value) def __deepcopy__(self, memo): - return Token(self.type, self.value, self.pos_in_stream, self.line, self.column) + return Token(self.type, self.value, self.start_pos, self.line, self.column) def __eq__(self, other): if isinstance(other, Token) and self.type != other.type: return False - return Str.__eq__(self, other) + return str.__eq__(self, other) - __hash__ = Str.__hash__ + __hash__ = str.__hash__ class LineCounter: - def __init__(self): - self.newline_char = '\n' + "A utility class for keeping track of line & column information" + + __slots__ = 'char_pos', 'line', 'column', 'line_start_pos', 'newline_char' + + def __init__(self, newline_char): + self.newline_char = newline_char self.char_pos = 0 self.line = 1 self.column = 1 self.line_start_pos = 0 - def feed(self, token, test_newline=True): + def __eq__(self, other): + if not isinstance(other, LineCounter): + return NotImplemented + + return self.char_pos == other.char_pos and self.newline_char == other.newline_char + + def feed(self, token: Token, test_newline=True): """Consume a token and calculate the new line & column. - As an optional optimization, set test_newline=False is token doesn't contain a newline. + As an optional optimization, set test_newline=False if token doesn't contain a newline. """ if test_newline: newlines = token.count(self.newline_char) @@ -160,62 +303,18 @@ def feed(self, token, test_newline=True): self.char_pos += len(token) self.column = self.char_pos - self.line_start_pos + 1 -class _Lex: - "Built to serve both Lexer and ContextualLexer" - def __init__(self, lexer, state=None): - self.lexer = lexer - self.state = state - - def lex(self, stream, newline_types, ignore_types): - newline_types = frozenset(newline_types) - ignore_types = frozenset(ignore_types) - line_ctr = LineCounter() - last_token = None - - while line_ctr.char_pos < len(stream): - lexer = self.lexer - res = lexer.match(stream, line_ctr.char_pos) - if not res: - allowed = {v for m, tfi in lexer.mres for v in tfi.values()} - ignore_types - if not allowed: - allowed = {""} - raise UnexpectedCharacters(stream, line_ctr.char_pos, line_ctr.line, line_ctr.column, allowed=allowed, state=self.state, token_history=last_token and [last_token]) - - value, type_ = res - - if type_ not in ignore_types: - t = Token(type_, value, line_ctr.char_pos, line_ctr.line, line_ctr.column) - line_ctr.feed(value, type_ in newline_types) - t.end_line = line_ctr.line - t.end_column = line_ctr.column - t.end_pos = line_ctr.char_pos - if t.type in lexer.callback: - t = lexer.callback[t.type](t) - if not isinstance(t, Token): - raise ValueError("Callbacks must return a token (returned %r)" % t) - yield t - last_token = t - else: - if type_ in lexer.callback: - t2 = Token(type_, value, line_ctr.char_pos, line_ctr.line, line_ctr.column) - lexer.callback[type_](t2) - line_ctr.feed(value, type_ in newline_types) - - - class UnlessCallback: - def __init__(self, mres): - self.mres = mres + def __init__(self, scanner): + self.scanner = scanner def __call__(self, t): - for mre, type_from_index in self.mres: - m = mre.match(t.value) - if m: - t.type = type_from_index[m.lastindex] - break + res = self.scanner.match(t.value, 0) + if res: + _value, t.type = res return t + class CallChain: def __init__(self, callback1, callback2, cond): self.callback1 = callback1 @@ -227,53 +326,72 @@ def __call__(self, t): return self.callback2(t) if self.cond(t2) else t2 +def _get_match(re_, regexp, s, flags): + m = re_.match(regexp, s, flags) + if m: + return m.group(0) - - -def _create_unless(terminals, g_regex_flags, re_): +def _create_unless(terminals, g_regex_flags, re_, use_bytes): tokens_by_type = classify(terminals, lambda t: type(t.pattern)) assert len(tokens_by_type) <= 2, tokens_by_type.keys() embedded_strs = set() callback = {} for retok in tokens_by_type.get(PatternRE, []): - unless = [] # {} + unless = [] for strtok in tokens_by_type.get(PatternStr, []): - if strtok.priority > retok.priority: + if strtok.priority != retok.priority: continue s = strtok.pattern.value - m = re_.match(retok.pattern.to_regexp(), s, g_regex_flags) - if m and m.group(0) == s: + if s == _get_match(re_, retok.pattern.to_regexp(), s, g_regex_flags): unless.append(strtok) if strtok.pattern.flags <= retok.pattern.flags: embedded_strs.add(strtok) if unless: - callback[retok.name] = UnlessCallback(build_mres(unless, g_regex_flags, re_, match_whole=True)) + callback[retok.name] = UnlessCallback(Scanner(unless, g_regex_flags, re_, match_whole=True, use_bytes=use_bytes)) - terminals = [t for t in terminals if t not in embedded_strs] - return terminals, callback + new_terminals = [t for t in terminals if t not in embedded_strs] + return new_terminals, callback -def _build_mres(terminals, max_size, g_regex_flags, match_whole, re_): - # Python sets an unreasonable group limit (currently 100) in its re module - # Worse, the only way to know we reached it is by catching an AssertionError! - # This function recursively tries less and less groups until it's successful. - postfix = '$' if match_whole else '' - mres = [] - while terminals: - try: - mre = re_.compile(u'|'.join(u'(?P<%s>%s)'%(t.name, t.pattern.to_regexp()+postfix) for t in terminals[:max_size]), g_regex_flags) - except AssertionError: # Yes, this is what Python provides us.. :/ - return _build_mres(terminals, max_size//2, g_regex_flags, match_whole, re_) +class Scanner: + def __init__(self, terminals, g_regex_flags, re_, use_bytes, match_whole=False): + self.terminals = terminals + self.g_regex_flags = g_regex_flags + self.re_ = re_ + self.use_bytes = use_bytes + self.match_whole = match_whole + + self.allowed_types = {t.name for t in self.terminals} + + self._mres = self._build_mres(terminals, len(terminals)) + + def _build_mres(self, terminals, max_size): + # Python sets an unreasonable group limit (currently 100) in its re module + # Worse, the only way to know we reached it is by catching an AssertionError! + # This function recursively tries less and less groups until it's successful. + postfix = '$' if self.match_whole else '' + mres = [] + while terminals: + pattern = u'|'.join(u'(?P<%s>%s)' % (t.name, t.pattern.to_regexp() + postfix) for t in terminals[:max_size]) + if self.use_bytes: + pattern = pattern.encode('latin-1') + try: + mre = self.re_.compile(pattern, self.g_regex_flags) + except AssertionError: # Yes, this is what Python provides us.. :/ + return self._build_mres(terminals, max_size // 2) + + mres.append(mre) + terminals = terminals[max_size:] + return mres - # terms_from_name = {t.name: t for t in terminals[:max_size]} - mres.append((mre, {i:n for n,i in mre.groupindex.items()} )) - terminals = terminals[max_size:] - return mres + def match(self, text, pos): + for mre in self._mres: + m = mre.match(text, pos) + if m: + return m.group(0), m.lastgroup -def build_mres(terminals, g_regex_flags, re_, match_whole=False): - return _build_mres(terminals, len(terminals), g_regex_flags, match_whole, re_) -def _regexp_has_newline(r): +def _regexp_has_newline(r: str): r"""Expressions that may indicate newlines in a regexp: - newlines (\n) - escaped newline (\\n) @@ -283,46 +401,172 @@ def _regexp_has_newline(r): """ return '\n' in r or '\\n' in r or '\\s' in r or '[^' in r or ('(?s' in r and '.' in r) -class Lexer(object): - """Lexer interface - Method Signatures: - lex(self, stream) -> Iterator[Token] +class LexerState: + """Represents the current state of the lexer as it scans the text + (Lexer objects are only instantiated per grammar, not per text) """ - lex = NotImplemented + __slots__ = 'text', 'line_ctr', 'last_token' -class TraditionalLexer(Lexer): + text: str + line_ctr: LineCounter + last_token: Optional[Token] + + def __init__(self, text: str, line_ctr: Optional[LineCounter]=None, last_token: Optional[Token]=None): + self.text = text + self.line_ctr = line_ctr or LineCounter(b'\n' if isinstance(text, bytes) else '\n') + self.last_token = last_token + + def __eq__(self, other): + if not isinstance(other, LexerState): + return NotImplemented + + return self.text is other.text and self.line_ctr == other.line_ctr and self.last_token == other.last_token + + def __copy__(self): + return type(self)(self.text, copy(self.line_ctr), self.last_token) + + +class LexerThread: + """A thread that ties a lexer instance and a lexer state, to be used by the parser + """ + + def __init__(self, lexer: 'Lexer', lexer_state: LexerState): + self.lexer = lexer + self.state = lexer_state + + @classmethod + def from_text(cls, lexer: 'Lexer', text: str) -> 'LexerThread': + return cls(lexer, LexerState(text)) + + def lex(self, parser_state): + return self.lexer.lex(self.state, parser_state) + + def __copy__(self): + return type(self)(self.lexer, copy(self.state)) + + _Token = Token - def __init__(self, terminals, re_, ignore=(), user_callbacks={}, g_regex_flags=0): - assert all(isinstance(t, TerminalDef) for t in terminals), terminals - terminals = list(terminals) +_Callback = Callable[[Token], Token] - self.re = re_ - # Sanitization - for t in terminals: +class Lexer(ABC): + """Lexer interface + + Method Signatures: + lex(self, lexer_state, parser_state) -> Iterator[Token] + """ + @abstractmethod + def lex(self, lexer_state: LexerState, parser_state: Any) -> Iterator[Token]: + return NotImplemented + + def make_lexer_state(self, text): + "Deprecated" + return LexerState(text) + + +def _check_regex_collisions(terminal_to_regexp: Dict[TerminalDef, str], comparator, strict_mode, max_collisions_to_show=8): + if not comparator: + comparator = interegular.Comparator.from_regexes(terminal_to_regexp) + + # When in strict mode, we only ever try to provide one example, so taking + # a long time for that should be fine + max_time = 2 if strict_mode else 0.2 + + # We don't want to show too many collisions. + if comparator.count_marked_pairs() >= max_collisions_to_show: + return + for group in classify(terminal_to_regexp, lambda t: t.priority).values(): + for a, b in comparator.check(group, skip_marked=True): + assert a.priority == b.priority + # Mark this pair to not repeat warnings when multiple different BasicLexers see the same collision + comparator.mark(a, b) + + # Notify the user + message = f"Collision between Terminals {a.name} and {b.name}. " try: - self.re.compile(t.pattern.to_regexp(), g_regex_flags) - except self.re.error: - raise LexError("Cannot compile token %s: %s" % (t.name, t.pattern)) + example = comparator.get_example_overlap(a, b, max_time).format_multiline() + except ValueError: + # Couldn't find an example within max_time steps. + example = "No example could be found fast enough. However, the collision does still exists" + if strict_mode: + raise LexError(f"{message}\n{example}") + logger.warning("%s The lexer will choose between them arbitrarily.\n%s", message, example) + if comparator.count_marked_pairs() >= max_collisions_to_show: + logger.warning("Found 8 regex collisions, will not check for more.") + return + + +class AbstractBasicLexer(Lexer): + terminals_by_name: Dict[str, TerminalDef] + + @abstractmethod + def __init__(self, conf: 'LexerConf', comparator=None) -> None: + ... + + @abstractmethod + def next_token(self, lex_state: LexerState, parser_state: Any = None) -> Token: + ... + + def lex(self, state: LexerState, parser_state: Any) -> Iterator[Token]: + with suppress(EOFError): + while True: + yield self.next_token(state, parser_state) + + +class BasicLexer(AbstractBasicLexer): + terminals: Collection[TerminalDef] + ignore_types: FrozenSet[str] + newline_types: FrozenSet[str] + user_callbacks: Dict[str, _Callback] + callback: Dict[str, _Callback] + re: ModuleType + + def __init__(self, conf: 'LexerConf', comparator=None) -> None: + terminals = list(conf.terminals) + assert all(isinstance(t, TerminalDef) for t in terminals), terminals + + self.re = conf.re_module - if t.pattern.min_width == 0: - raise LexError("Lexer does not allow zero-width terminals. (%s: %s)" % (t.name, t.pattern)) + if not conf.skip_validation: + # Sanitization + terminal_to_regexp = {} + for t in terminals: + regexp = t.pattern.to_regexp() + try: + self.re.compile(regexp, conf.g_regex_flags) + except self.re.error: + raise LexError("Cannot compile token %s: %s" % (t.name, t.pattern)) - assert set(ignore) <= {t.name for t in terminals} + if t.pattern.min_width == 0: + raise LexError("Lexer does not allow zero-width terminals. (%s: %s)" % (t.name, t.pattern)) + if t.pattern.type == "re": + terminal_to_regexp[t] = regexp + + if not (set(conf.ignore) <= {t.name for t in terminals}): + raise LexError("Ignore terminals are not defined: %s" % (set(conf.ignore) - {t.name for t in terminals})) + + if has_interegular: + _check_regex_collisions(terminal_to_regexp, comparator, conf.strict) + elif conf.strict: + raise LexError("interegular must be installed for strict mode. Use `pip install 'lark[interegular]'`.") # Init - self.newline_types = [t.name for t in terminals if _regexp_has_newline(t.pattern.to_regexp())] - self.ignore_types = list(ignore) + self.newline_types = frozenset(t.name for t in terminals if _regexp_has_newline(t.pattern.to_regexp())) + self.ignore_types = frozenset(conf.ignore) - terminals.sort(key=lambda x:(-x.priority, -x.pattern.max_width, -len(x.pattern.value), x.name)) + terminals.sort(key=lambda x: (-x.priority, -x.pattern.max_width, -len(x.pattern.value), x.name)) self.terminals = terminals - self.user_callbacks = user_callbacks - self.build(g_regex_flags) + self.user_callbacks = conf.callbacks + self.g_regex_flags = conf.g_regex_flags + self.use_bytes = conf.use_bytes + self.terminals_by_name = conf.terminals_by_name + + self._scanner = None - def build(self, g_regex_flags=0): - terminals, self.callback = _create_unless(self.terminals, g_regex_flags, re_=self.re) + def _build_scanner(self): + terminals, self.callback = _create_unless(self.terminals, self.g_regex_flags, self.re, self.use_bytes) assert all(self.callback.values()) for type_, f in self.user_callbacks.items(): @@ -332,64 +576,103 @@ def build(self, g_regex_flags=0): else: self.callback[type_] = f - self.mres = build_mres(terminals, g_regex_flags, self.re) + self._scanner = Scanner(terminals, self.g_regex_flags, self.re, self.use_bytes) - def match(self, stream, pos): - for mre, type_from_index in self.mres: - m = mre.match(stream, pos) - if m: - return m.group(0), type_from_index[m.lastindex] + @property + def scanner(self): + if self._scanner is None: + self._build_scanner() + return self._scanner + + def match(self, text, pos): + return self.scanner.match(text, pos) + + def next_token(self, lex_state: LexerState, parser_state: Any = None) -> Token: + line_ctr = lex_state.line_ctr + while line_ctr.char_pos < len(lex_state.text): + res = self.match(lex_state.text, line_ctr.char_pos) + if not res: + allowed = self.scanner.allowed_types - self.ignore_types + if not allowed: + allowed = {""} + raise UnexpectedCharacters(lex_state.text, line_ctr.char_pos, line_ctr.line, line_ctr.column, + allowed=allowed, token_history=lex_state.last_token and [lex_state.last_token], + state=parser_state, terminals_by_name=self.terminals_by_name) - def lex(self, stream): - return _Lex(self).lex(stream, self.newline_types, self.ignore_types) + value, type_ = res + ignored = type_ in self.ignore_types + t = None + if not ignored or type_ in self.callback: + t = Token(type_, value, line_ctr.char_pos, line_ctr.line, line_ctr.column) + line_ctr.feed(value, type_ in self.newline_types) + if t is not None: + t.end_line = line_ctr.line + t.end_column = line_ctr.column + t.end_pos = line_ctr.char_pos + if t.type in self.callback: + t = self.callback[t.type](t) + if not ignored: + if not isinstance(t, Token): + raise LexError("Callbacks must return a token (returned %r)" % t) + lex_state.last_token = t + return t + # EOF + raise EOFError(self) class ContextualLexer(Lexer): + lexers: Dict[int, AbstractBasicLexer] + root_lexer: AbstractBasicLexer + + BasicLexer: Type[AbstractBasicLexer] = BasicLexer - def __init__(self, terminals, states, re_, ignore=(), always_accept=(), user_callbacks={}, g_regex_flags=0): - self.re = re_ - tokens_by_name = {} - for t in terminals: - assert t.name not in tokens_by_name, t - tokens_by_name[t.name] = t + def __init__(self, conf: 'LexerConf', states: Dict[int, Collection[str]], always_accept: Collection[str]=()) -> None: + terminals = list(conf.terminals) + terminals_by_name = conf.terminals_by_name - lexer_by_tokens = {} + trad_conf = copy(conf) + trad_conf.terminals = terminals + + if has_interegular and not conf.skip_validation: + comparator = interegular.Comparator.from_regexes({t: t.pattern.to_regexp() for t in terminals}) + else: + comparator = None + lexer_by_tokens: Dict[FrozenSet[str], AbstractBasicLexer] = {} self.lexers = {} for state, accepts in states.items(): key = frozenset(accepts) try: lexer = lexer_by_tokens[key] except KeyError: - accepts = set(accepts) | set(ignore) | set(always_accept) - state_tokens = [tokens_by_name[n] for n in accepts if n and n in tokens_by_name] - lexer = TraditionalLexer(state_tokens, re_=self.re, ignore=ignore, user_callbacks=user_callbacks, g_regex_flags=g_regex_flags) + accepts = set(accepts) | set(conf.ignore) | set(always_accept) + lexer_conf = copy(trad_conf) + lexer_conf.terminals = [terminals_by_name[n] for n in accepts if n in terminals_by_name] + lexer = self.BasicLexer(lexer_conf, comparator) lexer_by_tokens[key] = lexer self.lexers[state] = lexer - self.root_lexer = TraditionalLexer(terminals, re_=self.re, ignore=ignore, user_callbacks=user_callbacks, g_regex_flags=g_regex_flags) + assert trad_conf.terminals is terminals + trad_conf.skip_validation = True # We don't need to verify all terminals again + self.root_lexer = self.BasicLexer(trad_conf, comparator) - def lex(self, stream, get_parser_state): - parser_state = get_parser_state() - l = _Lex(self.lexers[parser_state], parser_state) + def lex(self, lexer_state: LexerState, parser_state: 'ParserState') -> Iterator[Token]: try: - for x in l.lex(stream, self.root_lexer.newline_types, self.root_lexer.ignore_types): - yield x - parser_state = get_parser_state() - l.lexer = self.lexers[parser_state] - l.state = parser_state # For debug only, no need to worry about multithreading + while True: + lexer = self.lexers[parser_state.position] + yield lexer.next_token(lexer_state, parser_state) + except EOFError: + pass except UnexpectedCharacters as e: - # In the contextual lexer, UnexpectedCharacters can mean that the terminal is defined, - # but not in the current context. + # In the contextual lexer, UnexpectedCharacters can mean that the terminal is defined, but not in the current context. # This tests the input against the global context, to provide a nicer error. - root_match = self.root_lexer.match(stream, e.pos_in_stream) - if not root_match: - raise - - value, type_ = root_match - t = Token(type_, value, e.pos_in_stream, e.line, e.column) - raise UnexpectedToken(t, e.allowed, state=e.state) + try: + last_token = lexer_state.last_token # Save last_token. Calling root_lexer.next_token will change this to the wrong token + token = self.root_lexer.next_token(lexer_state, parser_state) + raise UnexpectedToken(token, e.allowed, state=parser_state, token_history=[last_token], terminals_by_name=self.root_lexer.terminals_by_name) + except UnexpectedCharacters: + raise e # Raise the original UnexpectedCharacters. The root lexer raises it with the wrong expected set. ###} diff --git a/conda_lock/_vendor/poetry/core/_vendor/lark/load_grammar.py b/conda_lock/_vendor/poetry/core/_vendor/lark/load_grammar.py index 407d8d16..362a845d 100644 --- a/conda_lock/_vendor/poetry/core/_vendor/lark/load_grammar.py +++ b/conda_lock/_vendor/poetry/core/_vendor/lark/load_grammar.py @@ -1,26 +1,31 @@ -"Parses and creates Grammar objects" +"""Parses and compiles Lark grammars into an internal representation. +""" +import hashlib import os.path import sys +from collections import namedtuple from copy import copy, deepcopy -from io import open +import pkgutil +from ast import literal_eval +from contextlib import suppress +from typing import List, Tuple, Union, Callable, Dict, Optional, Sequence, Generator -from .utils import bfs, eval_escaping -from .lexer import Token, TerminalDef, PatternStr, PatternRE +from .utils import bfs, logger, classify_bool, is_id_continue, is_id_start, bfs_all_unique, small_factors, OrderedSet +from .lexer import Token, TerminalDef, PatternStr, PatternRE, Pattern from .parse_tree_builder import ParseTreeBuilder -from .parser_frontends import LALR_TraditionalLexer +from .parser_frontends import ParsingFrontend from .common import LexerConf, ParserConf -from .grammar import RuleOptions, Rule, Terminal, NonTerminal, Symbol -from .utils import classify, suppress, dedup_list, Str -from .exceptions import GrammarError, UnexpectedCharacters, UnexpectedToken +from .grammar import RuleOptions, Rule, Terminal, NonTerminal, Symbol, TOKEN_DEFAULT_PRIORITY +from .utils import classify, dedup_list +from .exceptions import GrammarError, UnexpectedCharacters, UnexpectedToken, ParseError, UnexpectedInput from .tree import Tree, SlottedTree as ST from .visitors import Transformer, Visitor, v_args, Transformer_InPlace, Transformer_NonRecursive inline_args = v_args(inline=True) -__path__ = os.path.dirname(__file__) -IMPORT_PATHS = [os.path.join(__path__, 'grammars')] +IMPORT_PATHS = ['grammars'] EXT = '.lark' @@ -75,23 +80,28 @@ '_RBRA': r'\]', '_LBRACE': r'\{', '_RBRACE': r'\}', - 'OP': '[+*]|[?](?![a-z])', + 'OP': '[+*]|[?](?![a-z_])', '_COLON': ':', '_COMMA': ',', '_OR': r'\|', '_DOT': r'\.(?!\.)', '_DOTDOT': r'\.\.', 'TILDE': '~', - 'RULE': '!?[_?]?[a-z][_a-z0-9]*', + 'RULE_MODIFIERS': '(!|![?]?|[?]!?)(?=[_a-z])', + 'RULE': '_?[a-z][_a-z0-9]*', 'TERMINAL': '_?[A-Z][_A-Z0-9]*', 'STRING': r'"(\\"|\\\\|[^"\n])*?"i?', - 'REGEXP': r'/(?!/)(\\/|\\\\|[^/\n])*?/[%s]*' % _RE_FLAGS, + 'REGEXP': r'/(?!/)(\\/|\\\\|[^/])*?/[%s]*' % _RE_FLAGS, '_NL': r'(\r?\n)+\s*', + '_NL_OR': r'(\r?\n)+\s*\|', 'WS': r'[ \t]+', - 'COMMENT': r'\s*//[^\n]*', + 'COMMENT': r'\s*//[^\n]*|\s*#[^\n]*', + 'BACKSLASH': r'\\[ ]*\n', '_TO': '->', '_IGNORE': r'%ignore', + '_OVERRIDE': r'%override', '_DECLARE': r'%declare', + '_EXTEND': r'%extend', '_IMPORT': r'%import', 'NUMBER': r'[+-]?\d+', } @@ -99,19 +109,23 @@ RULES = { 'start': ['_list'], '_list': ['_item', '_list _item'], - '_item': ['rule', 'term', 'statement', '_NL'], + '_item': ['rule', 'term', 'ignore', 'import', 'declare', 'override', 'extend', '_NL'], - 'rule': ['RULE template_params _COLON expansions _NL', - 'RULE template_params _DOT NUMBER _COLON expansions _NL'], + 'rule': ['rule_modifiers RULE template_params priority _COLON expansions _NL'], + 'rule_modifiers': ['RULE_MODIFIERS', + ''], + 'priority': ['_DOT NUMBER', + ''], 'template_params': ['_LBRACE _template_params _RBRACE', ''], '_template_params': ['RULE', '_template_params _COMMA RULE'], - 'expansions': ['alias', - 'expansions _OR alias', - 'expansions _NL _OR alias'], + 'expansions': ['_expansions'], + '_expansions': ['alias', + '_expansions _OR alias', + '_expansions _NL_OR alias'], - '?alias': ['expansion _TO RULE', 'expansion'], + '?alias': ['expansion _TO nonterminal', 'expansion'], 'expansion': ['_expansion'], '_expansion': ['', '_expansion expr'], @@ -136,17 +150,21 @@ 'nonterminal': ['RULE'], '?name': ['RULE', 'TERMINAL'], + '?symbol': ['terminal', 'nonterminal'], 'maybe': ['_LBRA expansions _RBRA'], 'range': ['STRING _DOTDOT STRING'], - 'template_usage': ['RULE _LBRACE _template_args _RBRACE'], + 'template_usage': ['nonterminal _LBRACE _template_args _RBRACE'], '_template_args': ['value', '_template_args _COMMA value'], 'term': ['TERMINAL _COLON expansions _NL', 'TERMINAL _DOT NUMBER _COLON expansions _NL'], - 'statement': ['ignore', 'import', 'declare'], + 'override': ['_OVERRIDE rule', + '_OVERRIDE term'], + 'extend': ['_EXTEND rule', + '_EXTEND term'], 'ignore': ['_IGNORE expansions _NL'], 'declare': ['_DECLARE _declare_args _NL'], 'import': ['_IMPORT _import_path _NL', @@ -161,32 +179,171 @@ 'name_list': ['_name_list'], '_name_list': ['name', '_name_list _COMMA name'], - '_declare_args': ['name', '_declare_args name'], + '_declare_args': ['symbol', '_declare_args symbol'], 'literal': ['REGEXP', 'STRING'], } + +# Value 5 keeps the number of states in the lalr parser somewhat minimal +# It isn't optimal, but close to it. See PR #949 +SMALL_FACTOR_THRESHOLD = 5 +# The Threshold whether repeat via ~ are split up into different rules +# 50 is chosen since it keeps the number of states low and therefore lalr analysis time low, +# while not being to overaggressive and unnecessarily creating rules that might create shift/reduce conflicts. +# (See PR #949) +REPEAT_BREAK_THRESHOLD = 50 + + +class FindRuleSize(Transformer): + def __init__(self, keep_all_tokens: bool): + self.keep_all_tokens = keep_all_tokens + + def _will_not_get_removed(self, sym: Symbol) -> bool: + if isinstance(sym, NonTerminal): + return not sym.name.startswith('_') + if isinstance(sym, Terminal): + return self.keep_all_tokens or not sym.filter_out + if sym is _EMPTY: + return False + assert False, sym + + def _args_as_int(self, args: List[Union[int, Symbol]]) -> Generator[int, None, None]: + for a in args: + if isinstance(a, int): + yield a + elif isinstance(a, Symbol): + yield 1 if self._will_not_get_removed(a) else 0 + else: + assert False + + def expansion(self, args) -> int: + return sum(self._args_as_int(args)) + + def expansions(self, args) -> int: + return max(self._args_as_int(args)) + + @inline_args class EBNF_to_BNF(Transformer_InPlace): def __init__(self): self.new_rules = [] - self.rules_by_expr = {} + self.rules_cache = {} self.prefix = 'anon' self.i = 0 self.rule_options = None - def _add_recurse_rule(self, type_, expr): - if expr in self.rules_by_expr: - return self.rules_by_expr[expr] - - new_name = '__%s_%s_%d' % (self.prefix, type_, self.i) + def _name_rule(self, inner: str): + new_name = '__%s_%s_%d' % (self.prefix, inner, self.i) self.i += 1 - t = NonTerminal(new_name) - tree = ST('expansions', [ST('expansion', [expr]), ST('expansion', [t, expr])]) - self.new_rules.append((new_name, tree, self.rule_options)) - self.rules_by_expr[expr] = t + return new_name + + def _add_rule(self, key, name, expansions): + t = NonTerminal(name) + self.new_rules.append((name, expansions, self.rule_options)) + self.rules_cache[key] = t return t - def expr(self, rule, op, *args): + def _add_recurse_rule(self, type_: str, expr: Tree): + try: + return self.rules_cache[expr] + except KeyError: + new_name = self._name_rule(type_) + t = NonTerminal(new_name) + tree = ST('expansions', [ + ST('expansion', [expr]), + ST('expansion', [t, expr]) + ]) + return self._add_rule(expr, new_name, tree) + + def _add_repeat_rule(self, a, b, target, atom): + """Generate a rule that repeats target ``a`` times, and repeats atom ``b`` times. + + When called recursively (into target), it repeats atom for x(n) times, where: + x(0) = 1 + x(n) = a(n) * x(n-1) + b + + Example rule when a=3, b=4: + + new_rule: target target target atom atom atom atom + + """ + key = (a, b, target, atom) + try: + return self.rules_cache[key] + except KeyError: + new_name = self._name_rule('repeat_a%d_b%d' % (a, b)) + tree = ST('expansions', [ST('expansion', [target] * a + [atom] * b)]) + return self._add_rule(key, new_name, tree) + + def _add_repeat_opt_rule(self, a, b, target, target_opt, atom): + """Creates a rule that matches atom 0 to (a*n+b)-1 times. + + When target matches n times atom, and target_opt 0 to n-1 times target_opt, + + First we generate target * i followed by target_opt, for i from 0 to a-1 + These match 0 to n*a - 1 times atom + + Then we generate target * a followed by atom * i, for i from 0 to b-1 + These match n*a to n*a + b-1 times atom + + The created rule will not have any shift/reduce conflicts so that it can be used with lalr + + Example rule when a=3, b=4: + + new_rule: target_opt + | target target_opt + | target target target_opt + + | target target target + | target target target atom + | target target target atom atom + | target target target atom atom atom + + """ + key = (a, b, target, atom, "opt") + try: + return self.rules_cache[key] + except KeyError: + new_name = self._name_rule('repeat_a%d_b%d_opt' % (a, b)) + tree = ST('expansions', [ + ST('expansion', [target]*i + [target_opt]) for i in range(a) + ] + [ + ST('expansion', [target]*a + [atom]*i) for i in range(b) + ]) + return self._add_rule(key, new_name, tree) + + def _generate_repeats(self, rule: Tree, mn: int, mx: int): + """Generates a rule tree that repeats ``rule`` exactly between ``mn`` to ``mx`` times. + """ + # For a small number of repeats, we can take the naive approach + if mx < REPEAT_BREAK_THRESHOLD: + return ST('expansions', [ST('expansion', [rule] * n) for n in range(mn, mx + 1)]) + + # For large repeat values, we break the repetition into sub-rules. + # We treat ``rule~mn..mx`` as ``rule~mn rule~0..(diff=mx-mn)``. + # We then use small_factors to split up mn and diff up into values [(a, b), ...] + # This values are used with the help of _add_repeat_rule and _add_repeat_rule_opt + # to generate a complete rule/expression that matches the corresponding number of repeats + mn_target = rule + for a, b in small_factors(mn, SMALL_FACTOR_THRESHOLD): + mn_target = self._add_repeat_rule(a, b, mn_target, rule) + if mx == mn: + return mn_target + + diff = mx - mn + 1 # We add one because _add_repeat_opt_rule generates rules that match one less + diff_factors = small_factors(diff, SMALL_FACTOR_THRESHOLD) + diff_target = rule # Match rule 1 times + diff_opt_target = ST('expansion', []) # match rule 0 times (e.g. up to 1 -1 times) + for a, b in diff_factors[:-1]: + diff_opt_target = self._add_repeat_opt_rule(a, b, diff_target, diff_opt_target, rule) + diff_target = self._add_repeat_rule(a, b, diff_target, rule) + + a, b = diff_factors[-1] + diff_opt_target = self._add_repeat_opt_rule(a, b, diff_target, diff_opt_target, rule) + + return ST('expansions', [ST('expansion', [mn_target] + [diff_opt_target])]) + + def expr(self, rule: Tree, op: Token, *args): if op.value == '?': empty = ST('expansion', []) return ST('expansions', [rule, empty]) @@ -210,39 +367,26 @@ def expr(self, rule, op, *args): mn, mx = map(int, args) if mx < mn or mn < 0: raise GrammarError("Bad Range for %s (%d..%d isn't allowed)" % (rule, mn, mx)) - return ST('expansions', [ST('expansion', [rule] * n) for n in range(mn, mx+1)]) - assert False, op - def maybe(self, rule): - keep_all_tokens = self.rule_options and self.rule_options.keep_all_tokens - - def will_not_get_removed(sym): - if isinstance(sym, NonTerminal): - return not sym.name.startswith('_') - if isinstance(sym, Terminal): - return keep_all_tokens or not sym.filter_out - assert False + return self._generate_repeats(rule, mn, mx) - if any(rule.scan_values(will_not_get_removed)): - empty = _EMPTY - else: - empty = ST('expansion', []) + assert False, op + def maybe(self, rule: Tree): + keep_all_tokens = self.rule_options and self.rule_options.keep_all_tokens + rule_size = FindRuleSize(keep_all_tokens).transform(rule) + empty = ST('expansion', [_EMPTY] * rule_size) return ST('expansions', [rule, empty]) class SimplifyRule_Visitor(Visitor): @staticmethod - def _flatten(tree): - while True: - to_expand = [i for i, child in enumerate(tree.children) - if isinstance(child, Tree) and child.data == tree.data] - if not to_expand: - break - tree.expand_kids_by_index(*to_expand) + def _flatten(tree: Tree): + while tree.expand_kids_by_data(tree.data): + pass - def expansion(self, tree): + def expansion(self, tree: Tree): # rules_list unpacking # a : b (c|d) e # --> @@ -258,9 +402,9 @@ def expansion(self, tree): for i, child in enumerate(tree.children): if isinstance(child, Tree) and child.data == 'expansions': tree.data = 'expansions' - tree.children = [self.visit(ST('expansion', [option if i==j else other - for j, other in enumerate(tree.children)])) - for option in dedup_list(child.children)] + tree.children = [self.visit(ST('expansion', [option if i == j else other + for j, other in enumerate(tree.children)])) + for option in dedup_list(child.children)] self._flatten(tree) break @@ -273,7 +417,7 @@ def alias(self, tree): tree.data = 'expansions' tree.children = aliases - def expansions(self, tree): + def expansions(self, tree: Tree): self._flatten(tree) # Ensure all children are unique if len(set(tree.children)) != len(tree.children): @@ -283,31 +427,25 @@ def expansions(self, tree): class RuleTreeToText(Transformer): def expansions(self, x): return x + def expansion(self, symbols): return symbols, None + def alias(self, x): (expansion, _alias), alias = x assert _alias is None, (alias, expansion, '-', _alias) # Double alias not allowed - return expansion, alias.value + return expansion, alias.name -@inline_args -class CanonizeTree(Transformer_InPlace): - def tokenmods(self, *args): - if len(args) == 1: - return list(args) - tokenmods, value = args - return tokenmods + [value] - class PrepareAnonTerminals(Transformer_InPlace): - "Create a unique list of anonymous terminals. Attempt to give meaningful names to them when we add them" + """Create a unique list of anonymous terminals. Attempt to give meaningful names to them when we add them""" def __init__(self, terminals): self.terminals = terminals self.term_set = {td.name for td in self.terminals} self.term_reverse = {td.pattern: td for td in terminals} self.i = 0 - + self.rule_options = None @inline_args def pattern(self, p): @@ -326,16 +464,14 @@ def pattern(self, p): try: term_name = _TERMINAL_NAMES[value] except KeyError: - if value.isalnum() and value[0].isalpha() and value.upper() not in self.term_set: - with suppress(UnicodeEncodeError): - value.upper().encode('ascii') # Make sure we don't have unicode in our terminal names - term_name = value.upper() + if value and is_id_continue(value) and is_id_start(value[0]) and value.upper() not in self.term_set: + term_name = value.upper() if term_name in self.term_set: term_name = None elif isinstance(p, PatternRE): - if p in self.term_reverse: # Kind of a wierd placement.name + if p in self.term_reverse: # Kind of a weird placement.name term_name = self.term_reverse[p].name else: assert False, p @@ -351,26 +487,31 @@ def pattern(self, p): self.term_reverse[p] = termdef self.terminals.append(termdef) - return Terminal(term_name, filter_out=isinstance(p, PatternStr)) + filter_out = False if self.rule_options and self.rule_options.keep_all_tokens else isinstance(p, PatternStr) + + return Terminal(term_name, filter_out=filter_out) + class _ReplaceSymbols(Transformer_InPlace): - " Helper for ApplyTemplates " + """Helper for ApplyTemplates""" def __init__(self): self.names = {} def value(self, c): - if len(c) == 1 and isinstance(c[0], Token) and c[0].value in self.names: - return self.names[c[0].value] + if len(c) == 1 and isinstance(c[0], Symbol) and c[0].name in self.names: + return self.names[c[0].name] return self.__default__('value', c, None) def template_usage(self, c): - if c[0] in self.names: - return self.__default__('template_usage', [self.names[c[0]].name] + c[1:], None) + name = c[0].name + if name in self.names: + return self.__default__('template_usage', [self.names[name]] + c[1:], None) return self.__default__('template_usage', c, None) + class ApplyTemplates(Transformer_InPlace): - " Apply the templates, creating new rules that represent the used templates " + """Apply the templates, creating new rules that represent the used templates""" def __init__(self, rule_defs): self.rule_defs = rule_defs @@ -378,7 +519,7 @@ def __init__(self, rule_defs): self.created_templates = set() def template_usage(self, c): - name = c[0] + name = c[0].name args = c[1:] result_name = "%s{%s}" % (name, ",".join(a.name for a in args)) if result_name not in self.created_templates: @@ -396,26 +537,63 @@ def _rfind(s, choices): return max(s.rfind(c) for c in choices) +def eval_escaping(s): + w = '' + i = iter(s) + for n in i: + w += n + if n == '\\': + try: + n2 = next(i) + except StopIteration: + raise GrammarError("Literal ended unexpectedly (bad escaping): `%r`" % s) + if n2 == '\\': + w += '\\\\' + elif n2 not in 'Uuxnftr': + w += '\\' + w += n2 + w = w.replace('\\"', '"').replace("'", "\\'") + + to_eval = "u'''%s'''" % w + try: + s = literal_eval(to_eval) + except SyntaxError as e: + raise GrammarError(s, e) + + return s def _literal_to_pattern(literal): + assert isinstance(literal, Token) v = literal.value flag_start = _rfind(v, '/"')+1 assert flag_start > 0 flags = v[flag_start:] assert all(f in _RE_FLAGS for f in flags), flags + if literal.type == 'STRING' and '\n' in v: + raise GrammarError('You cannot put newlines in string literals') + + if literal.type == 'REGEXP' and '\n' in v and 'x' not in flags: + raise GrammarError('You can only use newlines in regular expressions ' + 'with the `x` (verbose) flag') + v = v[:flag_start] assert v[0] == v[-1] and v[0] in '"/' x = v[1:-1] s = eval_escaping(x) + if s == "": + raise GrammarError("Empty terminals are not allowed (%s)" % literal) + if literal.type == 'STRING': s = s.replace('\\\\', '\\') - - return { 'STRING': PatternStr, - 'REGEXP': PatternRE }[literal.type](s, flags) + return PatternStr(s, flags, raw=literal.value) + elif literal.type == 'REGEXP': + return PatternRE(s, flags, raw=literal.value) + else: + assert False, 'Invariant failed: literal.type not in ["STRING", "REGEXP"]' @inline_args @@ -427,32 +605,42 @@ def range(self, start, end): assert start.type == end.type == 'STRING' start = start.value[1:-1] end = end.value[1:-1] - assert len(eval_escaping(start)) == len(eval_escaping(end)) == 1, (start, end, len(eval_escaping(start)), len(eval_escaping(end))) + assert len(eval_escaping(start)) == len(eval_escaping(end)) == 1 regexp = '[%s-%s]' % (start, end) return ST('pattern', [PatternRE(regexp)]) -class TerminalTreeToPattern(Transformer): +def _make_joined_pattern(regexp, flags_set) -> PatternRE: + return PatternRE(regexp, ()) + +class TerminalTreeToPattern(Transformer_NonRecursive): def pattern(self, ps): p ,= ps return p - def expansion(self, items): - assert items + def expansion(self, items: List[Pattern]) -> Pattern: + if not items: + return PatternStr('') + if len(items) == 1: return items[0] - if len({i.flags for i in items}) > 1: - raise GrammarError("Lark doesn't support joining terminals with conflicting flags!") - return PatternRE(''.join(i.to_regexp() for i in items), items[0].flags if items else ()) - def expansions(self, exps): + pattern = ''.join(i.to_regexp() for i in items) + return _make_joined_pattern(pattern, {i.flags for i in items}) + + def expansions(self, exps: List[Pattern]) -> Pattern: if len(exps) == 1: return exps[0] - if len({i.flags for i in exps}) > 1: - raise GrammarError("Lark doesn't support joining terminals with conflicting flags!") - return PatternRE('(?:%s)' % ('|'.join(i.to_regexp() for i in exps)), exps[0].flags) - def expr(self, args): + # Do a bit of sorting to make sure that the longest option is returned + # (Python's re module otherwise prefers just 'l' when given (l|ll) and both could match) + exps.sort(key=lambda x: (-x.max_width, -x.min_width, -len(x.value))) + + pattern = '(?:%s)' % ('|'.join(i.to_regexp() for i in exps)) + return _make_joined_pattern(pattern, {i.flags for i in exps}) + + def expr(self, args) -> Pattern: + inner: Pattern inner, op = args[:2] if op == '~': if len(args) == 3: @@ -475,35 +663,35 @@ def alias(self, t): def value(self, v): return v[0] -class PrepareSymbols(Transformer_InPlace): + +class ValidateSymbols(Transformer_InPlace): def value(self, v): v ,= v - if isinstance(v, Tree): - return v - elif v.type == 'RULE': - return NonTerminal(Str(v.value)) - elif v.type == 'TERMINAL': - return Terminal(Str(v.value), filter_out=v.startswith('_')) - assert False + assert isinstance(v, (Tree, Symbol)) + return v -def _choice_of_rules(rules): - return ST('expansions', [ST('expansion', [Token('RULE', name)]) for name in rules]) def nr_deepcopy_tree(t): - "Deepcopy tree `t` without recursion" + """Deepcopy tree `t` without recursion""" return Transformer_NonRecursive(False).transform(t) + class Grammar: - def __init__(self, rule_defs, term_defs, ignore): + + term_defs: List[Tuple[str, Tuple[Tree, int]]] + rule_defs: List[Tuple[str, Tuple[str, ...], Tree, RuleOptions]] + ignore: List[str] + + def __init__(self, rule_defs: List[Tuple[str, Tuple[str, ...], Tree, RuleOptions]], term_defs: List[Tuple[str, Tuple[Tree, int]]], ignore: List[str]) -> None: self.term_defs = term_defs self.rule_defs = rule_defs self.ignore = ignore - def compile(self, start): + def compile(self, start, terminals_to_keep) -> Tuple[List[TerminalDef], List[Rule], List[str]]: # We change the trees in-place (to support huge grammars) # So deepcopy allows calling compile more than once. - term_defs = deepcopy(list(self.term_defs)) - rule_defs = [(n,p,nr_deepcopy_tree(t),o) for n,p,t,o in self.rule_defs] + term_defs = [(n, (nr_deepcopy_tree(t), p)) for n, (t, p) in self.term_defs] + rule_defs = [(n, p, nr_deepcopy_tree(t), o) for n, p, t, o in self.rule_defs] # =================== # Compile Terminals @@ -519,7 +707,7 @@ def compile(self, start): raise GrammarError("Terminals cannot be empty (%s)" % name) transformer = PrepareLiterals() * TerminalTreeToPattern() - terminals = [TerminalDef(name, transformer.transform( term_tree ), priority) + terminals = [TerminalDef(name, transformer.transform(term_tree), priority) for name, (term_tree, priority) in term_defs if term_tree] # ================= @@ -527,7 +715,8 @@ def compile(self, start): # ================= # 1. Pre-process terminals - transformer = PrepareLiterals() * PrepareSymbols() * PrepareAnonTerminals(terminals) # Adds to terminals + anon_tokens_transf = PrepareAnonTerminals(terminals) + transformer = PrepareLiterals() * ValidateSymbols() * anon_tokens_transf # Adds to terminals # 2. Inline Templates @@ -537,15 +726,17 @@ def compile(self, start): ebnf_to_bnf = EBNF_to_BNF() rules = [] i = 0 - while i < len(rule_defs): # We have to do it like this because rule_defs might grow due to templates + while i < len(rule_defs): # We have to do it like this because rule_defs might grow due to templates name, params, rule_tree, options = rule_defs[i] i += 1 - if len(params) != 0: # Dont transform templates + if len(params) != 0: # Dont transform templates continue - ebnf_to_bnf.rule_options = RuleOptions(keep_all_tokens=True) if options.keep_all_tokens else None + rule_options = RuleOptions(keep_all_tokens=True) if options and options.keep_all_tokens else None + ebnf_to_bnf.rule_options = rule_options ebnf_to_bnf.prefix = name + anon_tokens_transf.rule_options = rule_options tree = transformer.transform(rule_tree) - res = ebnf_to_bnf.transform(tree) + res: Tree = ebnf_to_bnf.transform(tree) rules.append((name, res, options)) rules += ebnf_to_bnf.new_rules @@ -555,7 +746,7 @@ def compile(self, start): rule_tree_to_text = RuleTreeToText() simplify_rule = SimplifyRule_Visitor() - compiled_rules = [] + compiled_rules: List[Rule] = [] for rule_content in rules: name, tree, options = rule_content simplify_rule.visit(tree) @@ -563,9 +754,9 @@ def compile(self, start): for i, (expansion, alias) in enumerate(expansions): if alias and name.startswith('_'): - raise GrammarError("Rule %s is marked for expansion (it starts with an underscore) and isn't allowed to have aliases (alias=%s)" % (name, alias)) + raise GrammarError("Rule %s is marked for expansion (it starts with an underscore) and isn't allowed to have aliases (alias=%s)"% (name, alias)) - empty_indices = [x==_EMPTY for x in expansion] + empty_indices = tuple(x==_EMPTY for x in expansion) if any(empty_indices): exp_options = copy(options) or RuleOptions() exp_options.empty_indices = empty_indices @@ -573,7 +764,11 @@ def compile(self, start): else: exp_options = options - assert all(isinstance(x, Symbol) for x in expansion), expansion + for sym in expansion: + assert isinstance(sym, Symbol) + if sym.is_term and exp_options and exp_options.keep_all_tokens: + assert isinstance(sym, Terminal) + sym.filter_out = False rule = Rule(NonTerminal(name), expansion, i, alias, exp_options) compiled_rules.append(rule) @@ -590,125 +785,108 @@ def compile(self, start): assert len({(r.alias, r.order, r.options) for r in dups}) == len(dups) # Remove duplicates - compiled_rules = list(set(compiled_rules)) - + compiled_rules = list(OrderedSet(compiled_rules)) # Filter out unused rules while True: c = len(compiled_rules) used_rules = {s for r in compiled_rules - for s in r.expansion - if isinstance(s, NonTerminal) - and s != r.origin} + for s in r.expansion + if isinstance(s, NonTerminal) + and s != r.origin} used_rules |= {NonTerminal(s) for s in start} - compiled_rules = [r for r in compiled_rules if r.origin in used_rules] + compiled_rules, unused = classify_bool(compiled_rules, lambda r: r.origin in used_rules) + for r in unused: + logger.debug("Unused rule: %s", r) if len(compiled_rules) == c: break # Filter out unused terminals - used_terms = {t.name for r in compiled_rules - for t in r.expansion - if isinstance(t, Terminal)} - terminals = [t for t in terminals if t.name in used_terms or t.name in self.ignore] + if terminals_to_keep != '*': + used_terms = {t.name for r in compiled_rules + for t in r.expansion + if isinstance(t, Terminal)} + terminals, unused = classify_bool(terminals, lambda t: t.name in used_terms or t.name in self.ignore or t.name in terminals_to_keep) + if unused: + logger.debug("Unused terminals: %s", [t.name for t in unused]) return terminals, compiled_rules, self.ignore +PackageResource = namedtuple('PackageResource', 'pkg_name path') -_imported_grammars = {} -def import_grammar(grammar_path, re_, base_paths=[]): - if grammar_path not in _imported_grammars: - import_paths = base_paths + IMPORT_PATHS - for import_path in import_paths: - with suppress(IOError): - joined_path = os.path.join(import_path, grammar_path) - with open(joined_path, encoding='utf8') as f: - text = f.read() - grammar = load_grammar(text, joined_path, re_) - _imported_grammars[grammar_path] = grammar - break - else: - open(grammar_path, encoding='utf8') - assert False - return _imported_grammars[grammar_path] - -def import_from_grammar_into_namespace(grammar, namespace, aliases): - """Returns all rules and terminals of grammar, prepended - with a 'namespace' prefix, except for those which are aliased. +class FromPackageLoader: """ + Provides a simple way of creating custom import loaders that load from packages via ``pkgutil.get_data`` instead of using `open`. + This allows them to be compatible even from within zip files. - imported_terms = dict(grammar.term_defs) - imported_rules = {n:(n,p,deepcopy(t),o) for n,p,t,o in grammar.rule_defs} + Relative imports are handled, so you can just freely use them. - term_defs = [] - rule_defs = [] + pkg_name: The name of the package. You can probably provide `__name__` most of the time + search_paths: All the path that will be search on absolute imports. + """ - def rule_dependencies(symbol): - if symbol.type != 'RULE': - return [] - try: - _, params, tree,_ = imported_rules[symbol] - except KeyError: - raise GrammarError("Missing symbol '%s' in grammar %s" % (symbol, namespace)) - return _find_used_symbols(tree) - set(params) + pkg_name: str + search_paths: Sequence[str] + def __init__(self, pkg_name: str, search_paths: Sequence[str]=("", )) -> None: + self.pkg_name = pkg_name + self.search_paths = search_paths + def __repr__(self): + return "%s(%r, %r)" % (type(self).__name__, self.pkg_name, self.search_paths) - def get_namespace_name(name, params): - if params is not None: - try: - return params[name] - except KeyError: - pass - try: - return aliases[name].value - except KeyError: - if name[0] == '_': - return '_%s__%s' % (namespace, name[1:]) - return '%s__%s' % (namespace, name) - - to_import = list(bfs(aliases, rule_dependencies)) - for symbol in to_import: - if symbol.type == 'TERMINAL': - term_defs.append([get_namespace_name(symbol, None), imported_terms[symbol]]) + def __call__(self, base_path: Union[None, str, PackageResource], grammar_path: str) -> Tuple[PackageResource, str]: + if base_path is None: + to_try = self.search_paths else: - assert symbol.type == 'RULE' - _, params, tree, options = imported_rules[symbol] - params_map = {p: ('%s__%s' if p[0]!='_' else '_%s__%s' ) % (namespace, p) for p in params} - for t in tree.iter_subtrees(): - for i, c in enumerate(t.children): - if isinstance(c, Token) and c.type in ('RULE', 'TERMINAL'): - t.children[i] = Token(c.type, get_namespace_name(c, params_map)) - params = [params_map[p] for p in params] # We can not rely on ordered dictionaries - rule_defs.append((get_namespace_name(symbol, params_map), params, tree, options)) + # Check whether or not the importing grammar was loaded by this module. + if not isinstance(base_path, PackageResource) or base_path.pkg_name != self.pkg_name: + # Technically false, but FileNotFound doesn't exist in python2.7, and this message should never reach the end user anyway + raise IOError() + to_try = [base_path.path] + + err = None + for path in to_try: + full_path = os.path.join(path, grammar_path) + try: + text: Optional[bytes] = pkgutil.get_data(self.pkg_name, full_path) + except IOError as e: + err = e + continue + else: + return PackageResource(self.pkg_name, full_path), (text.decode() if text else '') + raise IOError('Cannot find grammar in given paths') from err - return term_defs, rule_defs +stdlib_loader = FromPackageLoader('lark', IMPORT_PATHS) -def resolve_term_references(term_defs): - # TODO Solve with transitive closure (maybe) - term_dict = {k:t for k, (t,_p) in term_defs} - assert len(term_dict) == len(term_defs), "Same name defined twice?" +def resolve_term_references(term_dict): + # TODO Solve with transitive closure (maybe) while True: changed = False - for name, (token_tree, _p) in term_defs: + for name, token_tree in term_dict.items(): if token_tree is None: # Terminal added through %declare continue for exp in token_tree.find_data('value'): item ,= exp.children - if isinstance(item, Token): - if item.type == 'RULE': - raise GrammarError("Rules aren't allowed inside terminals (%s in %s)" % (item, name)) - if item.type == 'TERMINAL': - term_value = term_dict[item] - assert term_value is not None - exp.children[0] = term_value - changed = True + if isinstance(item, NonTerminal): + raise GrammarError("Rules aren't allowed inside terminals (%s in %s)" % (item, name)) + elif isinstance(item, Terminal): + try: + term_value = term_dict[item.name] + except KeyError: + raise GrammarError("Terminal used but not defined: %s" % item.name) + assert term_value is not None + exp.children[0] = term_value + changed = True + else: + assert isinstance(item, Tree) if not changed: break @@ -720,228 +898,531 @@ def resolve_term_references(term_defs): raise GrammarError("Recursion in terminal '%s' (recursion is only allowed in rules, not terminals)" % name) -def options_from_rule(name, params, *x): - if len(x) > 1: - priority, expansions = x - priority = int(priority) - else: - expansions ,= x - priority = None - params = [t.value for t in params.children] if params is not None else [] # For the grammar parser - - keep_all_tokens = name.startswith('!') - name = name.lstrip('!') - expand1 = name.startswith('?') - name = name.lstrip('?') - - return name, params, expansions, RuleOptions(keep_all_tokens, expand1, priority=priority, - template_source=(name if params else None)) - -def symbols_from_strcase(expansion): - return [Terminal(x, filter_out=x.startswith('_')) if x.isupper() else NonTerminal(x) for x in expansion] +def symbol_from_strcase(s): + assert isinstance(s, str) + return Terminal(s, filter_out=s.startswith('_')) if s.isupper() else NonTerminal(s) @inline_args class PrepareGrammar(Transformer_InPlace): def terminal(self, name): - return name + return Terminal(str(name), filter_out=name.startswith('_')) + def nonterminal(self, name): - return name + return NonTerminal(name.value) def _find_used_symbols(tree): assert tree.data == 'expansions' - return {t for x in tree.find_data('expansion') - for t in x.scan_values(lambda t: t.type in ('RULE', 'TERMINAL'))} + return {t.name for x in tree.find_data('expansion') + for t in x.scan_values(lambda t: isinstance(t, Symbol))} + -class GrammarLoader: - def __init__(self, re_): - self.re = re_ +def _get_parser(): + try: + return _get_parser.cache + except AttributeError: terminals = [TerminalDef(name, PatternRE(value)) for name, value in TERMINALS.items()] - rules = [options_from_rule(name, None, x) for name, x in RULES.items()] - rules = [Rule(NonTerminal(r), symbols_from_strcase(x.split()), i, None, o) for r, _p, xs, o in rules for i, x in enumerate(xs)] - callback = ParseTreeBuilder(rules, ST).create_callback() - lexer_conf = LexerConf(terminals, ['WS', 'COMMENT']) + rules = [(name.lstrip('?'), x, RuleOptions(expand1=name.startswith('?'))) + for name, x in RULES.items()] + rules = [Rule(NonTerminal(r), [symbol_from_strcase(s) for s in x.split()], i, None, o) + for r, xs, o in rules for i, x in enumerate(xs)] + callback = ParseTreeBuilder(rules, ST).create_callback() + import re + lexer_conf = LexerConf(terminals, re, ['WS', 'COMMENT', 'BACKSLASH']) parser_conf = ParserConf(rules, callback, ['start']) - self.parser = LALR_TraditionalLexer(lexer_conf, parser_conf, re_) + lexer_conf.lexer_type = 'basic' + parser_conf.parser_type = 'lalr' + _get_parser.cache = ParsingFrontend(lexer_conf, parser_conf, None) + return _get_parser.cache + +GRAMMAR_ERRORS = [ + ('Incorrect type of value', ['a: 1\n']), + ('Unclosed parenthesis', ['a: (\n']), + ('Unmatched closing parenthesis', ['a: )\n', 'a: [)\n', 'a: (]\n']), + ('Expecting rule or terminal definition (missing colon)', ['a\n', 'A\n', 'a->\n', 'A->\n', 'a A\n']), + ('Illegal name for rules or terminals', ['Aa:\n']), + ('Alias expects lowercase name', ['a: -> "a"\n']), + ('Unexpected colon', ['a::\n', 'a: b:\n', 'a: B:\n', 'a: "a":\n']), + ('Misplaced operator', ['a: b??', 'a: b(?)', 'a:+\n', 'a:?\n', 'a:*\n', 'a:|*\n']), + ('Expecting option ("|") or a new rule or terminal definition', ['a:a\n()\n']), + ('Terminal names cannot contain dots', ['A.B\n']), + ('Expecting rule or terminal definition', ['"a"\n']), + ('%import expects a name', ['%import "a"\n']), + ('%ignore expects a value', ['%ignore %import\n']), + ] + +def _translate_parser_exception(parse, e): + error = e.match_examples(parse, GRAMMAR_ERRORS, use_accepts=True) + if error: + return error + elif 'STRING' in e.expected: + return "Expecting a value" + +def _parse_grammar(text, name, start='start'): + try: + tree = _get_parser().parse(text + '\n', start) + except UnexpectedCharacters as e: + context = e.get_context(text) + raise GrammarError("Unexpected input at line %d column %d in %s: \n\n%s" % + (e.line, e.column, name, context)) + except UnexpectedToken as e: + context = e.get_context(text) + error = _translate_parser_exception(_get_parser().parse, e) + if error: + raise GrammarError("%s, at line %s column %s\n\n%s" % (error, e.line, e.column, context)) + raise + + return PrepareGrammar().transform(tree) + + +def _error_repr(error): + if isinstance(error, UnexpectedToken): + error2 = _translate_parser_exception(_get_parser().parse, error) + if error2: + return error2 + expected = ', '.join(error.accepts or error.expected) + return "Unexpected token %r. Expected one of: {%s}" % (str(error.token), expected) + else: + return str(error) - self.canonize_tree = CanonizeTree() +def _search_interactive_parser(interactive_parser, predicate): + def expand(node): + path, p = node + for choice in p.choices(): + t = Token(choice, '') + try: + new_p = p.feed_token(t) + except ParseError: # Illegal + pass + else: + yield path + (choice,), new_p - def load_grammar(self, grammar_text, grammar_name=''): - "Parse grammar_text, verify, and create Grammar object. Display nice messages on error." + for path, p in bfs_all_unique([((), interactive_parser)], expand): + if predicate(p): + return path, p - try: - tree = self.canonize_tree.transform( self.parser.parse(grammar_text+'\n') ) - except UnexpectedCharacters as e: - context = e.get_context(grammar_text) - raise GrammarError("Unexpected input at line %d column %d in %s: \n\n%s" % - (e.line, e.column, grammar_name, context)) - except UnexpectedToken as e: - context = e.get_context(grammar_text) - error = e.match_examples(self.parser.parse, { - 'Unclosed parenthesis': ['a: (\n'], - 'Umatched closing parenthesis': ['a: )\n', 'a: [)\n', 'a: (]\n'], - 'Expecting rule or terminal definition (missing colon)': ['a\n', 'a->\n', 'A->\n', 'a A\n'], - 'Alias expects lowercase name': ['a: -> "a"\n'], - 'Unexpected colon': ['a::\n', 'a: b:\n', 'a: B:\n', 'a: "a":\n'], - 'Misplaced operator': ['a: b??', 'a: b(?)', 'a:+\n', 'a:?\n', 'a:*\n', 'a:|*\n'], - 'Expecting option ("|") or a new rule or terminal definition': ['a:a\n()\n'], - '%import expects a name': ['%import "a"\n'], - '%ignore expects a value': ['%ignore %import\n'], - }) - if error: - raise GrammarError("%s at line %s column %s\n\n%s" % (error, e.line, e.column, context)) - elif 'STRING' in e.expected: - raise GrammarError("Expecting a value at line %s column %s\n\n%s" % (e.line, e.column, context)) - raise - - tree = PrepareGrammar().transform(tree) - - # Extract grammar items - defs = classify(tree.children, lambda c: c.data, lambda c: c.children) - term_defs = defs.pop('term', []) - rule_defs = defs.pop('rule', []) - statements = defs.pop('statement', []) - assert not defs - - term_defs = [td if len(td)==3 else (td[0], 1, td[1]) for td in term_defs] - term_defs = [(name.value, (t, int(p))) for name, p, t in term_defs] - rule_defs = [options_from_rule(*x) for x in rule_defs] - - # Execute statements - ignore, imports = [], {} - for (stmt,) in statements: - if stmt.data == 'ignore': - t ,= stmt.children - ignore.append(t) - elif stmt.data == 'import': - if len(stmt.children) > 1: - path_node, arg1 = stmt.children - else: - path_node, = stmt.children - arg1 = None - - if isinstance(arg1, Tree): # Multi import - dotted_path = tuple(path_node.children) - names = arg1.children - aliases = dict(zip(names, names)) # Can't have aliased multi import, so all aliases will be the same as names - else: # Single import - dotted_path = tuple(path_node.children[:-1]) - name = path_node.children[-1] # Get name from dotted path - aliases = {name: arg1 or name} # Aliases if exist - - if path_node.data == 'import_lib': # Import from library - base_paths = [] - else: # Relative import - if grammar_name == '': # Import relative to script file path if grammar is coded in script - try: - base_file = os.path.abspath(sys.modules['__main__'].__file__) - except AttributeError: - base_file = None - else: - base_file = grammar_name # Import relative to grammar file path if external grammar file - if base_file: - base_paths = [os.path.split(base_file)[0]] - else: - base_paths = [os.path.abspath(os.path.curdir)] +def find_grammar_errors(text: str, start: str='start') -> List[Tuple[UnexpectedInput, str]]: + errors = [] + def on_error(e): + errors.append((e, _error_repr(e))) - try: - import_base_paths, import_aliases = imports[dotted_path] - assert base_paths == import_base_paths, 'Inconsistent base_paths for %s.' % '.'.join(dotted_path) - import_aliases.update(aliases) - except KeyError: - imports[dotted_path] = base_paths, aliases + # recover to a new line + token_path, _ = _search_interactive_parser(e.interactive_parser.as_immutable(), lambda p: '_NL' in p.choices()) + for token_type in token_path: + e.interactive_parser.feed_token(Token(token_type, '')) + e.interactive_parser.feed_token(Token('_NL', '\n')) + return True - elif stmt.data == 'declare': - for t in stmt.children: - term_defs.append([t.value, (None, None)]) + _tree = _get_parser().parse(text + '\n', start, on_error=on_error) + + errors_by_line = classify(errors, lambda e: e[0].line) + errors = [el[0] for el in errors_by_line.values()] # already sorted + + for e in errors: + e[0].interactive_parser = None + return errors + + +def _get_mangle(prefix, aliases, base_mangle=None): + def mangle(s): + if s in aliases: + s = aliases[s] + else: + if s[0] == '_': + s = '_%s__%s' % (prefix, s[1:]) else: - assert False, stmt + s = '%s__%s' % (prefix, s) + if base_mangle is not None: + s = base_mangle(s) + return s + return mangle + +def _mangle_definition_tree(exp, mangle): + if mangle is None: + return exp + exp = deepcopy(exp) # TODO: is this needed? + for t in exp.iter_subtrees(): + for i, c in enumerate(t.children): + if isinstance(c, Symbol): + t.children[i] = c.renamed(mangle) + + return exp + +def _make_rule_tuple(modifiers_tree, name, params, priority_tree, expansions): + if modifiers_tree.children: + m ,= modifiers_tree.children + expand1 = '?' in m + if expand1 and name.startswith('_'): + raise GrammarError("Inlined rules (_rule) cannot use the ?rule modifier.") + keep_all_tokens = '!' in m + else: + keep_all_tokens = False + expand1 = False + + if priority_tree.children: + p ,= priority_tree.children + priority = int(p) + else: + priority = None + + if params is not None: + params = [t.value for t in params.children] # For the grammar parser + + return name, params, expansions, RuleOptions(keep_all_tokens, expand1, priority=priority, + template_source=(name if params else None)) + + +class Definition: + def __init__(self, is_term, tree, params=(), options=None): + self.is_term = is_term + self.tree = tree + self.params = tuple(params) + self.options = options + +class GrammarBuilder: + + global_keep_all_tokens: bool + import_paths: List[Union[str, Callable]] + used_files: Dict[str, str] + + _definitions: Dict[str, Definition] + _ignore_names: List[str] + + def __init__(self, global_keep_all_tokens: bool=False, import_paths: Optional[List[Union[str, Callable]]]=None, used_files: Optional[Dict[str, str]]=None) -> None: + self.global_keep_all_tokens = global_keep_all_tokens + self.import_paths = import_paths or [] + self.used_files = used_files or {} + + self._definitions: Dict[str, Definition] = {} + self._ignore_names: List[str] = [] + + def _grammar_error(self, is_term, msg, *names): + args = {} + for i, name in enumerate(names, start=1): + postfix = '' if i == 1 else str(i) + args['name' + postfix] = name + args['type' + postfix] = lowercase_type = ("rule", "terminal")[is_term] + args['Type' + postfix] = lowercase_type.title() + raise GrammarError(msg.format(**args)) + + def _check_options(self, is_term, options): + if is_term: + if options is None: + options = 1 + elif not isinstance(options, int): + raise GrammarError("Terminal require a single int as 'options' (e.g. priority), got %s" % (type(options),)) + else: + if options is None: + options = RuleOptions() + elif not isinstance(options, RuleOptions): + raise GrammarError("Rules require a RuleOptions instance as 'options'") + if self.global_keep_all_tokens: + options.keep_all_tokens = True + return options - # import grammars - for dotted_path, (base_paths, aliases) in imports.items(): - grammar_path = os.path.join(*dotted_path) + EXT - g = import_grammar(grammar_path, self.re, base_paths=base_paths) - new_td, new_rd = import_from_grammar_into_namespace(g, '__'.join(dotted_path), aliases) - - term_defs += new_td - rule_defs += new_rd - - # Verify correctness 1 - for name, _ in term_defs: - if name.startswith('__'): - raise GrammarError('Names starting with double-underscore are reserved (Error at %s)' % name) - - # Handle ignore tokens - # XXX A slightly hacky solution. Recognition of %ignore TERMINAL as separate comes from the lexer's - # inability to handle duplicate terminals (two names, one value) - ignore_names = [] - for t in ignore: - if t.data=='expansions' and len(t.children) == 1: + + def _define(self, name, is_term, exp, params=(), options=None, *, override=False): + if name in self._definitions: + if not override: + self._grammar_error(is_term, "{Type} '{name}' defined more than once", name) + elif override: + self._grammar_error(is_term, "Cannot override a nonexisting {type} {name}", name) + + if name.startswith('__'): + self._grammar_error(is_term, 'Names starting with double-underscore are reserved (Error at {name})', name) + + self._definitions[name] = Definition(is_term, exp, params, self._check_options(is_term, options)) + + def _extend(self, name, is_term, exp, params=(), options=None): + if name not in self._definitions: + self._grammar_error(is_term, "Can't extend {type} {name} as it wasn't defined before", name) + + d = self._definitions[name] + + if is_term != d.is_term: + self._grammar_error(is_term, "Cannot extend {type} {name} - one is a terminal, while the other is not.", name) + if tuple(params) != d.params: + self._grammar_error(is_term, "Cannot extend {type} with different parameters: {name}", name) + + if d.tree is None: + self._grammar_error(is_term, "Can't extend {type} {name} - it is abstract.", name) + + # TODO: think about what to do with 'options' + base = d.tree + + assert isinstance(base, Tree) and base.data == 'expansions' + base.children.insert(0, exp) + + def _ignore(self, exp_or_name): + if isinstance(exp_or_name, str): + self._ignore_names.append(exp_or_name) + else: + assert isinstance(exp_or_name, Tree) + t = exp_or_name + if t.data == 'expansions' and len(t.children) == 1: t2 ,= t.children if t2.data=='expansion' and len(t2.children) == 1: item ,= t2.children if item.data == 'value': item ,= item.children - if isinstance(item, Token) and item.type == 'TERMINAL': - ignore_names.append(item.value) - continue + if isinstance(item, Terminal): + # Keep terminal name, no need to create a new definition + self._ignore_names.append(item.name) + return + + name = '__IGNORE_%d'% len(self._ignore_names) + self._ignore_names.append(name) + self._definitions[name] = Definition(True, t, options=TOKEN_DEFAULT_PRIORITY) + + def _unpack_import(self, stmt, grammar_name): + if len(stmt.children) > 1: + path_node, arg1 = stmt.children + else: + path_node, = stmt.children + arg1 = None + + if isinstance(arg1, Tree): # Multi import + dotted_path = tuple(path_node.children) + names = arg1.children + aliases = dict(zip(names, names)) # Can't have aliased multi import, so all aliases will be the same as names + else: # Single import + dotted_path = tuple(path_node.children[:-1]) + if not dotted_path: + name ,= path_node.children + raise GrammarError("Nothing was imported from grammar `%s`" % name) + name = path_node.children[-1] # Get name from dotted path + aliases = {name.value: (arg1 or name).value} # Aliases if exist + + if path_node.data == 'import_lib': # Import from library + base_path = None + else: # Relative import + if grammar_name == '': # Import relative to script file path if grammar is coded in script + try: + base_file = os.path.abspath(sys.modules['__main__'].__file__) + except AttributeError: + base_file = None + else: + base_file = grammar_name # Import relative to grammar file path if external grammar file + if base_file: + if isinstance(base_file, PackageResource): + base_path = PackageResource(base_file.pkg_name, os.path.split(base_file.path)[0]) + else: + base_path = os.path.split(base_file)[0] + else: + base_path = os.path.abspath(os.path.curdir) + + return dotted_path, base_path, aliases - name = '__IGNORE_%d'% len(ignore_names) - ignore_names.append(name) - term_defs.append((name, (t, 1))) + def _unpack_definition(self, tree, mangle): - # Verify correctness 2 - terminal_names = set() - for name, _ in term_defs: - if name in terminal_names: - raise GrammarError("Terminal '%s' defined more than once" % name) - terminal_names.add(name) + if tree.data == 'rule': + name, params, exp, opts = _make_rule_tuple(*tree.children) + is_term = False + else: + name = tree.children[0].value + params = () # TODO terminal templates + opts = int(tree.children[1]) if len(tree.children) == 3 else TOKEN_DEFAULT_PRIORITY # priority + exp = tree.children[-1] + is_term = True + + if mangle is not None: + params = tuple(mangle(p) for p in params) + name = mangle(name) + + exp = _mangle_definition_tree(exp, mangle) + return name, is_term, exp, params, opts - if set(ignore_names) > terminal_names: - raise GrammarError("Terminals %s were marked to ignore but were not defined!" % (set(ignore_names) - terminal_names)) + def load_grammar(self, grammar_text: str, grammar_name: str="", mangle: Optional[Callable[[str], str]]=None) -> None: + tree = _parse_grammar(grammar_text, grammar_name) + + imports: Dict[Tuple[str, ...], Tuple[Optional[str], Dict[str, str]]] = {} + + for stmt in tree.children: + if stmt.data == 'import': + dotted_path, base_path, aliases = self._unpack_import(stmt, grammar_name) + try: + import_base_path, import_aliases = imports[dotted_path] + assert base_path == import_base_path, 'Inconsistent base_path for %s.' % '.'.join(dotted_path) + import_aliases.update(aliases) + except KeyError: + imports[dotted_path] = base_path, aliases + + for dotted_path, (base_path, aliases) in imports.items(): + self.do_import(dotted_path, base_path, aliases, mangle) + + for stmt in tree.children: + if stmt.data in ('term', 'rule'): + self._define(*self._unpack_definition(stmt, mangle)) + elif stmt.data == 'override': + r ,= stmt.children + self._define(*self._unpack_definition(r, mangle), override=True) + elif stmt.data == 'extend': + r ,= stmt.children + self._extend(*self._unpack_definition(r, mangle)) + elif stmt.data == 'ignore': + # if mangle is not None, we shouldn't apply ignore, since we aren't in a toplevel grammar + if mangle is None: + self._ignore(*stmt.children) + elif stmt.data == 'declare': + for symbol in stmt.children: + assert isinstance(symbol, Symbol), symbol + is_term = isinstance(symbol, Terminal) + if mangle is None: + name = symbol.name + else: + name = mangle(symbol.name) + self._define(name, is_term, None) + elif stmt.data == 'import': + pass + else: + assert False, stmt + + + term_defs = { name: d.tree + for name, d in self._definitions.items() + if d.is_term + } resolve_term_references(term_defs) - rules = rule_defs - rule_names = {} - for name, params, _x, _o in rules: - if name.startswith('__'): - raise GrammarError('Names starting with double-underscore are reserved (Error at %s)' % name) - if name in rule_names: - raise GrammarError("Rule '%s' defined more than once" % name) - rule_names[name] = len(params) + def _remove_unused(self, used): + def rule_dependencies(symbol): + try: + d = self._definitions[symbol] + except KeyError: + return [] + if d.is_term: + return [] + return _find_used_symbols(d.tree) - set(d.params) + + _used = set(bfs(used, rule_dependencies)) + self._definitions = {k: v for k, v in self._definitions.items() if k in _used} + + + def do_import(self, dotted_path: Tuple[str, ...], base_path: Optional[str], aliases: Dict[str, str], base_mangle: Optional[Callable[[str], str]]=None) -> None: + assert dotted_path + mangle = _get_mangle('__'.join(dotted_path), aliases, base_mangle) + grammar_path = os.path.join(*dotted_path) + EXT + to_try = self.import_paths + ([base_path] if base_path is not None else []) + [stdlib_loader] + for source in to_try: + try: + if callable(source): + joined_path, text = source(base_path, grammar_path) + else: + joined_path = os.path.join(source, grammar_path) + with open(joined_path, encoding='utf8') as f: + text = f.read() + except IOError: + continue + else: + h = sha256_digest(text) + if self.used_files.get(joined_path, h) != h: + raise RuntimeError("Grammar file was changed during importing") + self.used_files[joined_path] = h + + gb = GrammarBuilder(self.global_keep_all_tokens, self.import_paths, self.used_files) + gb.load_grammar(text, joined_path, mangle) + gb._remove_unused(map(mangle, aliases)) + for name in gb._definitions: + if name in self._definitions: + raise GrammarError("Cannot import '%s' from '%s': Symbol already defined." % (name, grammar_path)) + + self._definitions.update(**gb._definitions) + break + else: + # Search failed. Make Python throw a nice error. + open(grammar_path, encoding='utf8') + assert False, "Couldn't import grammar %s, but a corresponding file was found at a place where lark doesn't search for it" % (dotted_path,) + + + def validate(self) -> None: + for name, d in self._definitions.items(): + params = d.params + exp = d.tree - for name, params , expansions, _o in rules: for i, p in enumerate(params): - if p in rule_names: + if p in self._definitions: raise GrammarError("Template Parameter conflicts with rule %s (in template %s)" % (p, name)) if p in params[:i]: raise GrammarError("Duplicate Template Parameter %s (in template %s)" % (p, name)) - for temp in expansions.find_data('template_usage'): - sym = temp.children[0] + + if exp is None: # Remaining checks don't apply to abstract rules/terminals (created with %declare) + continue + + for temp in exp.find_data('template_usage'): + sym = temp.children[0].name args = temp.children[1:] if sym not in params: - if sym not in rule_names: - raise GrammarError("Template '%s' used but not defined (in rule %s)" % (sym, name)) - if len(args) != rule_names[sym]: - raise GrammarError("Wrong number of template arguments used for %s " - "(expected %s, got %s) (in rule %s)"%(sym, rule_names[sym], len(args), name)) - for sym in _find_used_symbols(expansions): - if sym.type == 'TERMINAL': - if sym not in terminal_names: - raise GrammarError("Token '%s' used but not defined (in rule %s)" % (sym, name)) - else: - if sym not in rule_names and sym not in params: - raise GrammarError("Rule '%s' used but not defined (in rule %s)" % (sym, name)) + if sym not in self._definitions: + self._grammar_error(d.is_term, "Template '%s' used but not defined (in {type} {name})" % sym, name) + if len(args) != len(self._definitions[sym].params): + expected, actual = len(self._definitions[sym].params), len(args) + self._grammar_error(d.is_term, "Wrong number of template arguments used for {name} " + "(expected %s, got %s) (in {type2} {name2})" % (expected, actual), sym, name) + + for sym in _find_used_symbols(exp): + if sym not in self._definitions and sym not in params: + self._grammar_error(d.is_term, "{Type} '{name}' used but not defined (in {type2} {name2})", sym, name) + + if not set(self._definitions).issuperset(self._ignore_names): + raise GrammarError("Terminals %s were marked to ignore but were not defined!" % (set(self._ignore_names) - set(self._definitions))) + + def build(self) -> Grammar: + self.validate() + rule_defs = [] + term_defs = [] + for name, d in self._definitions.items(): + (params, exp, options) = d.params, d.tree, d.options + if d.is_term: + assert len(params) == 0 + term_defs.append((name, (exp, options))) + else: + rule_defs.append((name, params, exp, options)) + # resolve_term_references(term_defs) + return Grammar(rule_defs, term_defs, self._ignore_names) + + +def verify_used_files(file_hashes): + for path, old in file_hashes.items(): + text = None + if isinstance(path, str) and os.path.exists(path): + with open(path, encoding='utf8') as f: + text = f.read() + elif isinstance(path, PackageResource): + with suppress(IOError): + text = pkgutil.get_data(*path).decode('utf-8') + if text is None: # We don't know how to load the path. ignore it. + continue + + current = sha256_digest(text) + if old != current: + logger.info("File %r changed, rebuilding Parser" % path) + return False + return True +def list_grammar_imports(grammar, import_paths=[]): + "Returns a list of paths to the lark grammars imported by the given grammar (recursively)" + builder = GrammarBuilder(False, import_paths) + builder.load_grammar(grammar, '') + return list(builder.used_files.keys()) - return Grammar(rules, term_defs, ignore_names) +def load_grammar(grammar, source, import_paths, global_keep_all_tokens): + builder = GrammarBuilder(global_keep_all_tokens, import_paths) + builder.load_grammar(grammar, source) + return builder.build(), builder.used_files +def sha256_digest(s: str) -> str: + """Get the sha256 digest of a string -def load_grammar(grammar, source, re_): - return GrammarLoader(re_).load_grammar(grammar, source) + Supports the `usedforsecurity` argument for Python 3.9+ to allow running on + a FIPS-enabled system. + """ + if sys.version_info >= (3, 9): + return hashlib.sha256(s.encode('utf8'), usedforsecurity=False).hexdigest() + else: + return hashlib.sha256(s.encode('utf8')).hexdigest() diff --git a/conda_lock/_vendor/poetry/core/_vendor/lark/parse_tree_builder.py b/conda_lock/_vendor/poetry/core/_vendor/lark/parse_tree_builder.py index 5a7c5d70..e3a41718 100644 --- a/conda_lock/_vendor/poetry/core/_vendor/lark/parse_tree_builder.py +++ b/conda_lock/_vendor/poetry/core/_vendor/lark/parse_tree_builder.py @@ -1,13 +1,16 @@ -from .exceptions import GrammarError +"""Provides functions for the automatic building and shaping of the parse-tree.""" + +from typing import List + +from .exceptions import GrammarError, ConfigurationError from .lexer import Token from .tree import Tree -from .visitors import InlineTransformer # XXX Deprecated from .visitors import Transformer_InPlace from .visitors import _vargs_meta, _vargs_meta_inline ###{standalone from functools import partial, wraps -from itertools import repeat, product +from itertools import product class ExpandSingleChild: @@ -20,50 +23,73 @@ def __call__(self, children): else: return self.node_builder(children) + + class PropagatePositions: - def __init__(self, node_builder): + def __init__(self, node_builder, node_filter=None): self.node_builder = node_builder + self.node_filter = node_filter def __call__(self, children): res = self.node_builder(children) - # local reference to Tree.meta reduces number of presence checks if isinstance(res, Tree): + # Calculate positions while the tree is streaming, according to the rule: + # - nodes start at the start of their first child's container, + # and end at the end of their last child's container. + # Containers are nodes that take up space in text, but have been inlined in the tree. + res_meta = res.meta - for c in children: - if isinstance(c, Tree): - child_meta = c.meta - if not child_meta.empty: - res_meta.line = child_meta.line - res_meta.column = child_meta.column - res_meta.start_pos = child_meta.start_pos - res_meta.empty = False - break - elif isinstance(c, Token): - res_meta.line = c.line - res_meta.column = c.column - res_meta.start_pos = c.pos_in_stream + + first_meta = self._pp_get_meta(children) + if first_meta is not None: + if not hasattr(res_meta, 'line'): + # meta was already set, probably because the rule has been inlined (e.g. `?rule`) + res_meta.line = getattr(first_meta, 'container_line', first_meta.line) + res_meta.column = getattr(first_meta, 'container_column', first_meta.column) + res_meta.start_pos = getattr(first_meta, 'container_start_pos', first_meta.start_pos) res_meta.empty = False - break - - for c in reversed(children): - if isinstance(c, Tree): - child_meta = c.meta - if not child_meta.empty: - res_meta.end_line = child_meta.end_line - res_meta.end_column = child_meta.end_column - res_meta.end_pos = child_meta.end_pos - res_meta.empty = False - break - elif isinstance(c, Token): - res_meta.end_line = c.end_line - res_meta.end_column = c.end_column - res_meta.end_pos = c.end_pos + + res_meta.container_line = getattr(first_meta, 'container_line', first_meta.line) + res_meta.container_column = getattr(first_meta, 'container_column', first_meta.column) + res_meta.container_start_pos = getattr(first_meta, 'container_start_pos', first_meta.start_pos) + + last_meta = self._pp_get_meta(reversed(children)) + if last_meta is not None: + if not hasattr(res_meta, 'end_line'): + res_meta.end_line = getattr(last_meta, 'container_end_line', last_meta.end_line) + res_meta.end_column = getattr(last_meta, 'container_end_column', last_meta.end_column) + res_meta.end_pos = getattr(last_meta, 'container_end_pos', last_meta.end_pos) res_meta.empty = False - break + + res_meta.container_end_line = getattr(last_meta, 'container_end_line', last_meta.end_line) + res_meta.container_end_column = getattr(last_meta, 'container_end_column', last_meta.end_column) + res_meta.container_end_pos = getattr(last_meta, 'container_end_pos', last_meta.end_pos) return res + def _pp_get_meta(self, children): + for c in children: + if self.node_filter is not None and not self.node_filter(c): + continue + if isinstance(c, Tree): + if not c.meta.empty: + return c.meta + elif isinstance(c, Token): + return c + elif hasattr(c, '__lark_meta__'): + return c.__lark_meta__() + +def make_propagate_positions(option): + if callable(option): + return partial(PropagatePositions, node_filter=option) + elif option is True: + return PropagatePositions + elif option is False: + return None + + raise ConfigurationError('Invalid option for propagate_positions: %r' % option) + class ChildFilter: def __init__(self, to_include, append_none, node_builder): @@ -87,8 +113,9 @@ def __call__(self, children): return self.node_builder(filtered) + class ChildFilterLALR(ChildFilter): - "Optimized childfilter for LALR (assumes no duplication in parse tree, so it's safe to change it)" + """Optimized childfilter for LALR (assumes no duplication in parse tree, so it's safe to change it)""" def __call__(self, children): filtered = [] @@ -108,6 +135,7 @@ def __call__(self, children): return self.node_builder(filtered) + class ChildFilterLALR_NoPlaceholders(ChildFilter): "Optimized childfilter for LALR (assumes no duplication in parse tree, so it's safe to change it)" def __init__(self, to_include, node_builder): @@ -126,10 +154,12 @@ def __call__(self, children): filtered.append(children[i]) return self.node_builder(filtered) + def _should_expand(sym): return not sym.is_term and sym.name.startswith('_') -def maybe_create_child_filter(expansion, keep_all_tokens, ambiguous, _empty_indices): + +def maybe_create_child_filter(expansion, keep_all_tokens, ambiguous, _empty_indices: List[bool]): # Prepare empty_indices as: How many Nones to insert at each index? if _empty_indices: assert _empty_indices.count(False) == len(expansion) @@ -156,21 +186,22 @@ def maybe_create_child_filter(expansion, keep_all_tokens, ambiguous, _empty_indi # LALR without placeholders return partial(ChildFilterLALR_NoPlaceholders, [(i, x) for i,x,_ in to_include]) + class AmbiguousExpander: """Deal with the case where we're expanding children ('_rule') into a parent but the children are ambiguous. i.e. (parent->_ambig->_expand_this_rule). In this case, make the parent itself - ambiguous with as many copies as their are ambiguous children, and then copy the ambiguous children - into the right parents in the right places, essentially shifting the ambiguiuty up the tree.""" + ambiguous with as many copies as there are ambiguous children, and then copy the ambiguous children + into the right parents in the right places, essentially shifting the ambiguity up the tree.""" def __init__(self, to_expand, tree_class, node_builder): self.node_builder = node_builder self.tree_class = tree_class self.to_expand = to_expand def __call__(self, children): - def _is_ambig_tree(child): - return hasattr(child, 'data') and child.data == '_ambig' + def _is_ambig_tree(t): + return hasattr(t, 'data') and t.data == '_ambig' - #### When we're repeatedly expanding ambiguities we can end up with nested ambiguities. + # -- When we're repeatedly expanding ambiguities we can end up with nested ambiguities. # All children of an _ambig node should be a derivation of that ambig node, hence # it is safe to assume that if we see an _ambig node nested within an ambig node # it is safe to simply expand it into the parent _ambig node as an alternative derivation. @@ -180,14 +211,14 @@ def _is_ambig_tree(child): if i in self.to_expand: ambiguous.append(i) - to_expand = [j for j, grandchild in enumerate(child.children) if _is_ambig_tree(grandchild)] - child.expand_kids_by_index(*to_expand) + child.expand_kids_by_data('_ambig') if not ambiguous: return self.node_builder(children) - expand = [ iter(child.children) if i in ambiguous else repeat(child) for i, child in enumerate(children) ] - return self.tree_class('_ambig', [self.node_builder(list(f[0])) for f in product(zip(*expand))]) + expand = [child.children if i in ambiguous else (child,) for i, child in enumerate(children)] + return self.tree_class('_ambig', [self.node_builder(list(f)) for f in product(*expand)]) + def maybe_create_ambiguous_expander(tree_class, expansion, keep_all_tokens): to_expand = [i for i, sym in enumerate(expansion) @@ -195,11 +226,88 @@ def maybe_create_ambiguous_expander(tree_class, expansion, keep_all_tokens): if to_expand: return partial(AmbiguousExpander, to_expand, tree_class) -def ptb_inline_args(func): - @wraps(func) - def f(children): - return func(*children) - return f + +class AmbiguousIntermediateExpander: + """ + Propagate ambiguous intermediate nodes and their derivations up to the + current rule. + + In general, converts + + rule + _iambig + _inter + someChildren1 + ... + _inter + someChildren2 + ... + someChildren3 + ... + + to + + _ambig + rule + someChildren1 + ... + someChildren3 + ... + rule + someChildren2 + ... + someChildren3 + ... + rule + childrenFromNestedIambigs + ... + someChildren3 + ... + ... + + propagating up any nested '_iambig' nodes along the way. + """ + + def __init__(self, tree_class, node_builder): + self.node_builder = node_builder + self.tree_class = tree_class + + def __call__(self, children): + def _is_iambig_tree(child): + return hasattr(child, 'data') and child.data == '_iambig' + + def _collapse_iambig(children): + """ + Recursively flatten the derivations of the parent of an '_iambig' + node. Returns a list of '_inter' nodes guaranteed not + to contain any nested '_iambig' nodes, or None if children does + not contain an '_iambig' node. + """ + + # Due to the structure of the SPPF, + # an '_iambig' node can only appear as the first child + if children and _is_iambig_tree(children[0]): + iambig_node = children[0] + result = [] + for grandchild in iambig_node.children: + collapsed = _collapse_iambig(grandchild.children) + if collapsed: + for child in collapsed: + child.children += children[1:] + result += collapsed + else: + new_tree = self.tree_class('_inter', grandchild.children + children[1:]) + result.append(new_tree) + return result + + collapsed = _collapse_iambig(children) + if collapsed: + processed_nodes = [self.node_builder(c.children) for c in collapsed] + return self.tree_class('_ambig', processed_nodes) + + return self.node_builder(children) + + def inplace_transformer(func): @wraps(func) @@ -209,9 +317,11 @@ def f(children): return func(tree) return f + def apply_visit_wrapper(func, name, wrapper): if wrapper is _vargs_meta or wrapper is _vargs_meta_inline: raise NotImplementedError("Meta args not supported for internal transformer") + @wraps(func) def f(children): return wrapper(func, name, children, None) @@ -219,50 +329,54 @@ def f(children): class ParseTreeBuilder: - def __init__(self, rules, tree_class, propagate_positions=False, keep_all_tokens=False, ambiguous=False, maybe_placeholders=False): + def __init__(self, rules, tree_class, propagate_positions=False, ambiguous=False, maybe_placeholders=False): self.tree_class = tree_class self.propagate_positions = propagate_positions - self.always_keep_all_tokens = keep_all_tokens self.ambiguous = ambiguous self.maybe_placeholders = maybe_placeholders self.rule_builders = list(self._init_builders(rules)) def _init_builders(self, rules): + propagate_positions = make_propagate_positions(self.propagate_positions) + for rule in rules: options = rule.options - keep_all_tokens = self.always_keep_all_tokens or options.keep_all_tokens + keep_all_tokens = options.keep_all_tokens expand_single_child = options.expand1 wrapper_chain = list(filter(None, [ (expand_single_child and not rule.alias) and ExpandSingleChild, maybe_create_child_filter(rule.expansion, keep_all_tokens, self.ambiguous, options.empty_indices if self.maybe_placeholders else None), - self.propagate_positions and PropagatePositions, + propagate_positions, self.ambiguous and maybe_create_ambiguous_expander(self.tree_class, rule.expansion, keep_all_tokens), + self.ambiguous and partial(AmbiguousIntermediateExpander, self.tree_class) ])) yield rule, wrapper_chain - def create_callback(self, transformer=None): callbacks = {} + default_handler = getattr(transformer, '__default__', None) + if default_handler: + def default_callback(data, children): + return default_handler(data, children, None) + else: + default_callback = self.tree_class + for rule, wrapper_chain in self.rule_builders: user_callback_name = rule.alias or rule.options.template_source or rule.origin.name try: f = getattr(transformer, user_callback_name) - # XXX InlineTransformer is deprecated! wrapper = getattr(f, 'visit_wrapper', None) if wrapper is not None: f = apply_visit_wrapper(f, user_callback_name, wrapper) - else: - if isinstance(transformer, InlineTransformer): - f = ptb_inline_args(f) - elif isinstance(transformer, Transformer_InPlace): - f = inplace_transformer(f) + elif isinstance(transformer, Transformer_InPlace): + f = inplace_transformer(f) except AttributeError: - f = partial(self.tree_class, user_callback_name) + f = partial(default_callback, user_callback_name) for w in wrapper_chain: f = w(f) diff --git a/conda_lock/_vendor/poetry/core/_vendor/lark/parser_frontends.py b/conda_lock/_vendor/poetry/core/_vendor/lark/parser_frontends.py index c453ab67..186058a6 100644 --- a/conda_lock/_vendor/poetry/core/_vendor/lark/parser_frontends.py +++ b/conda_lock/_vendor/poetry/core/_vendor/lark/parser_frontends.py @@ -1,225 +1,232 @@ -from functools import partial +from typing import Any, Callable, Dict, Optional, Collection, Union, TYPE_CHECKING +from .exceptions import ConfigurationError, GrammarError, assert_config from .utils import get_regexp_width, Serialize -from .parsers.grammar_analysis import GrammarAnalyzer -from .lexer import TraditionalLexer, ContextualLexer, Lexer, Token +from .lexer import LexerThread, BasicLexer, ContextualLexer, Lexer from .parsers import earley, xearley, cyk from .parsers.lalr_parser import LALR_Parser -from .grammar import Rule from .tree import Tree -from .common import LexerConf +from .common import LexerConf, ParserConf, _ParserArgType, _LexerArgType + +if TYPE_CHECKING: + from .parsers.lalr_analysis import ParseTableBase + ###{standalone -def get_frontend(parser, lexer): - if parser=='lalr': - if lexer is None: - raise ValueError('The LALR parser requires use of a lexer') - elif lexer == 'standard': - return LALR_TraditionalLexer - elif lexer == 'contextual': - return LALR_ContextualLexer - elif issubclass(lexer, Lexer): - return partial(LALR_CustomLexer, lexer) - else: - raise ValueError('Unknown lexer: %s' % lexer) - elif parser=='earley': - if lexer=='standard': - return Earley - elif lexer=='dynamic': - return XEarley - elif lexer=='dynamic_complete': - return XEarley_CompleteLex - elif lexer=='contextual': - raise ValueError('The Earley parser does not support the contextual parser') - else: - raise ValueError('Unknown lexer: %s' % lexer) - elif parser == 'cyk': - if lexer == 'standard': - return CYK - else: - raise ValueError('CYK parser requires using standard parser.') +def _wrap_lexer(lexer_class): + future_interface = getattr(lexer_class, '__future_interface__', False) + if future_interface: + return lexer_class else: - raise ValueError('Unknown parser: %s' % parser) + class CustomLexerWrapper(Lexer): + def __init__(self, lexer_conf): + self.lexer = lexer_class(lexer_conf) + def lex(self, lexer_state, parser_state): + return self.lexer.lex(lexer_state.text) + return CustomLexerWrapper -class _ParserFrontend(Serialize): - def _parse(self, input, start, *args): - if start is None: - start = self.start - if len(start) > 1: - raise ValueError("Lark initialized with more than 1 possible start rule. Must specify which start rule to parse", start) - start ,= start - return self.parser.parse(input, start, *args) +def _deserialize_parsing_frontend(data, memo, lexer_conf, callbacks, options): + parser_conf = ParserConf.deserialize(data['parser_conf'], memo) + cls = (options and options._plugins.get('LALR_Parser')) or LALR_Parser + parser = cls.deserialize(data['parser'], memo, callbacks, options.debug) + parser_conf.callbacks = callbacks + return ParsingFrontend(lexer_conf, parser_conf, options, parser=parser) + +_parser_creators: 'Dict[str, Callable[[LexerConf, Any, Any], Any]]' = {} -class WithLexer(_ParserFrontend): - lexer = None - parser = None - lexer_conf = None - start = None - __serialize_fields__ = 'parser', 'lexer_conf', 'start' - __serialize_namespace__ = LexerConf, +class ParsingFrontend(Serialize): + __serialize_fields__ = 'lexer_conf', 'parser_conf', 'parser' - def __init__(self, lexer_conf, parser_conf, re_, options=None): + lexer_conf: LexerConf + parser_conf: ParserConf + options: Any + + def __init__(self, lexer_conf: LexerConf, parser_conf: ParserConf, options, parser=None): + self.parser_conf = parser_conf self.lexer_conf = lexer_conf - self.start = parser_conf.start - self.postlex = lexer_conf.postlex - self.re = re_ - - @classmethod - def deserialize(cls, data, memo, callbacks, postlex, re_): - inst = super(WithLexer, cls).deserialize(data, memo) - inst.re = re_ - inst.postlex = postlex - inst.parser = LALR_Parser.deserialize(inst.parser, memo, callbacks) - inst.init_lexer() - return inst - - def _serialize(self, data, memo): - data['parser'] = data['parser'].serialize(memo) - - def lex(self, *args): - stream = self.lexer.lex(*args) - return self.postlex.process(stream) if self.postlex else stream - - def parse(self, text, start=None): - token_stream = self.lex(text) - return self._parse(token_stream, start) - - def init_traditional_lexer(self): - self.lexer = TraditionalLexer(self.lexer_conf.tokens, re_=self.re, ignore=self.lexer_conf.ignore, user_callbacks=self.lexer_conf.callbacks, g_regex_flags=self.lexer_conf.g_regex_flags) - -class LALR_WithLexer(WithLexer): - def __init__(self, lexer_conf, parser_conf, re_, options=None): - debug = options.debug if options else False - self.re = re_ - self.parser = LALR_Parser(parser_conf, debug=debug) - WithLexer.__init__(self, lexer_conf, parser_conf, re_, options) - - self.init_lexer() - - def init_lexer(self): - raise NotImplementedError() - -class LALR_TraditionalLexer(LALR_WithLexer): - def init_lexer(self): - self.init_traditional_lexer() - -class LALR_ContextualLexer(LALR_WithLexer): - def init_lexer(self): - states = {idx:list(t.keys()) for idx, t in self.parser._parse_table.states.items()} - always_accept = self.postlex.always_accept if self.postlex else () - self.lexer = ContextualLexer(self.lexer_conf.tokens, states, - re_=self.re, - ignore=self.lexer_conf.ignore, - always_accept=always_accept, - user_callbacks=self.lexer_conf.callbacks, - g_regex_flags=self.lexer_conf.g_regex_flags) - - - def parse(self, text, start=None): - parser_state = [None] - def set_parser_state(s): - parser_state[0] = s - - token_stream = self.lex(text, lambda: parser_state[0]) - return self._parse(token_stream, start, set_parser_state) -###} + self.options = options -class LALR_CustomLexer(LALR_WithLexer): - def __init__(self, lexer_cls, lexer_conf, parser_conf, re_, options=None): - self.lexer = lexer_cls(lexer_conf, re_=re_) - debug = options.debug if options else False - self.parser = LALR_Parser(parser_conf, debug=debug) - WithLexer.__init__(self, lexer_conf, parser_conf, re_, options) - - -def tokenize_text(text): - line = 1 - col_start_pos = 0 - for i, ch in enumerate(text): - if '\n' in ch: - line += ch.count('\n') - col_start_pos = i + ch.rindex('\n') - yield Token('CHAR', ch, line=line, column=i - col_start_pos) - -class Earley(WithLexer): - def __init__(self, lexer_conf, parser_conf, re_, options=None): - WithLexer.__init__(self, lexer_conf, parser_conf, re_, options) - self.init_traditional_lexer() - - resolve_ambiguity = options.ambiguity == 'resolve' - debug = options.debug if options else False - self.parser = earley.Parser(parser_conf, self.match, resolve_ambiguity=resolve_ambiguity, debug=debug) - - def match(self, term, token): - return term.name == token.type - - -class XEarley(_ParserFrontend): - def __init__(self, lexer_conf, parser_conf, re_, options=None, **kw): - self.re = re_ - - self.token_by_name = {t.name:t for t in lexer_conf.tokens} - self.start = parser_conf.start - - self._prepare_match(lexer_conf) - resolve_ambiguity = options.ambiguity == 'resolve' - debug = options.debug if options else False - self.parser = xearley.Parser(parser_conf, - self.match, - ignore=lexer_conf.ignore, - resolve_ambiguity=resolve_ambiguity, - debug=debug, - **kw - ) + # Set-up parser + if parser: # From cache + self.parser = parser + else: + create_parser = _parser_creators.get(parser_conf.parser_type) + assert create_parser is not None, "{} is not supported in standalone mode".format( + parser_conf.parser_type + ) + self.parser = create_parser(lexer_conf, parser_conf, options) + + # Set-up lexer + lexer_type = lexer_conf.lexer_type + self.skip_lexer = False + if lexer_type in ('dynamic', 'dynamic_complete'): + assert lexer_conf.postlex is None + self.skip_lexer = True + return + + if isinstance(lexer_type, type): + assert issubclass(lexer_type, Lexer) + self.lexer = _wrap_lexer(lexer_type)(lexer_conf) + elif isinstance(lexer_type, str): + create_lexer = { + 'basic': create_basic_lexer, + 'contextual': create_contextual_lexer, + }[lexer_type] + self.lexer = create_lexer(lexer_conf, self.parser, lexer_conf.postlex, options) + else: + raise TypeError("Bad value for lexer_type: {lexer_type}") - def match(self, term, text, index=0): - return self.regexps[term.name].match(text, index) + if lexer_conf.postlex: + self.lexer = PostLexConnector(self.lexer, lexer_conf.postlex) + + def _verify_start(self, start=None): + if start is None: + start_decls = self.parser_conf.start + if len(start_decls) > 1: + raise ConfigurationError("Lark initialized with more than 1 possible start rule. Must specify which start rule to parse", start_decls) + start ,= start_decls + elif start not in self.parser_conf.start: + raise ConfigurationError("Unknown start rule %s. Must be one of %r" % (start, self.parser_conf.start)) + return start + + def _make_lexer_thread(self, text: str) -> Union[str, LexerThread]: + cls = (self.options and self.options._plugins.get('LexerThread')) or LexerThread + return text if self.skip_lexer else cls.from_text(self.lexer, text) + + def parse(self, text: str, start=None, on_error=None): + chosen_start = self._verify_start(start) + kw = {} if on_error is None else {'on_error': on_error} + stream = self._make_lexer_thread(text) + return self.parser.parse(stream, chosen_start, **kw) + + def parse_interactive(self, text: Optional[str]=None, start=None): + # TODO BREAK - Change text from Optional[str] to text: str = ''. + # Would break behavior of exhaust_lexer(), which currently raises TypeError, and after the change would just return [] + chosen_start = self._verify_start(start) + if self.parser_conf.parser_type != 'lalr': + raise ConfigurationError("parse_interactive() currently only works with parser='lalr' ") + stream = self._make_lexer_thread(text) # type: ignore[arg-type] + return self.parser.parse_interactive(stream, chosen_start) + + +def _validate_frontend_args(parser, lexer) -> None: + assert_config(parser, ('lalr', 'earley', 'cyk')) + if not isinstance(lexer, type): # not custom lexer? + expected = { + 'lalr': ('basic', 'contextual'), + 'earley': ('basic', 'dynamic', 'dynamic_complete'), + 'cyk': ('basic', ), + }[parser] + assert_config(lexer, expected, 'Parser %r does not support lexer %%r, expected one of %%s' % parser) + + +def _get_lexer_callbacks(transformer, terminals): + result = {} + for terminal in terminals: + callback = getattr(transformer, terminal.name, None) + if callback is not None: + result[terminal.name] = callback + return result + +class PostLexConnector: + def __init__(self, lexer, postlexer): + self.lexer = lexer + self.postlexer = postlexer + + def lex(self, lexer_state, parser_state): + i = self.lexer.lex(lexer_state, parser_state) + return self.postlexer.process(i) + + + +def create_basic_lexer(lexer_conf, parser, postlex, options) -> BasicLexer: + cls = (options and options._plugins.get('BasicLexer')) or BasicLexer + return cls(lexer_conf) + +def create_contextual_lexer(lexer_conf: LexerConf, parser, postlex, options) -> ContextualLexer: + cls = (options and options._plugins.get('ContextualLexer')) or ContextualLexer + parse_table: ParseTableBase[int] = parser._parse_table + states: Dict[int, Collection[str]] = {idx:list(t.keys()) for idx, t in parse_table.states.items()} + always_accept: Collection[str] = postlex.always_accept if postlex else () + return cls(lexer_conf, states, always_accept=always_accept) + +def create_lalr_parser(lexer_conf: LexerConf, parser_conf: ParserConf, options=None) -> LALR_Parser: + debug = options.debug if options else False + strict = options.strict if options else False + cls = (options and options._plugins.get('LALR_Parser')) or LALR_Parser + return cls(parser_conf, debug=debug, strict=strict) + +_parser_creators['lalr'] = create_lalr_parser + +###} - def _prepare_match(self, lexer_conf): +class EarleyRegexpMatcher: + def __init__(self, lexer_conf): self.regexps = {} - for t in lexer_conf.tokens: - if t.priority != 1: - raise ValueError("Dynamic Earley doesn't support weights on terminals", t, t.priority) + for t in lexer_conf.terminals: regexp = t.pattern.to_regexp() try: width = get_regexp_width(regexp)[0] except ValueError: - raise ValueError("Bad regexp in token %s: %s" % (t.name, regexp)) + raise GrammarError("Bad regexp in token %s: %s" % (t.name, regexp)) else: if width == 0: - raise ValueError("Dynamic Earley doesn't allow zero-width regexps", t) + raise GrammarError("Dynamic Earley doesn't allow zero-width regexps", t) + if lexer_conf.use_bytes: + regexp = regexp.encode('utf-8') + + self.regexps[t.name] = lexer_conf.re_module.compile(regexp, lexer_conf.g_regex_flags) + + def match(self, term, text, index=0): + return self.regexps[term.name].match(text, index) - self.regexps[t.name] = self.re.compile(regexp, lexer_conf.g_regex_flags) - def parse(self, text, start): - return self._parse(text, start) +def create_earley_parser__dynamic(lexer_conf: LexerConf, parser_conf: ParserConf, **kw): + if lexer_conf.callbacks: + raise GrammarError("Earley's dynamic lexer doesn't support lexer_callbacks.") -class XEarley_CompleteLex(XEarley): - def __init__(self, *args, **kw): - XEarley.__init__(self, *args, complete_lex=True, **kw) + earley_matcher = EarleyRegexpMatcher(lexer_conf) + return xearley.Parser(lexer_conf, parser_conf, earley_matcher.match, **kw) +def _match_earley_basic(term, token): + return term.name == token.type +def create_earley_parser__basic(lexer_conf: LexerConf, parser_conf: ParserConf, **kw): + return earley.Parser(lexer_conf, parser_conf, _match_earley_basic, **kw) + +def create_earley_parser(lexer_conf: LexerConf, parser_conf: ParserConf, options) -> earley.Parser: + resolve_ambiguity = options.ambiguity == 'resolve' + debug = options.debug if options else False + tree_class = options.tree_class or Tree if options.ambiguity != 'forest' else None + + extra = {} + if lexer_conf.lexer_type == 'dynamic': + f = create_earley_parser__dynamic + elif lexer_conf.lexer_type == 'dynamic_complete': + extra['complete_lex'] = True + f = create_earley_parser__dynamic + else: + f = create_earley_parser__basic + + return f(lexer_conf, parser_conf, resolve_ambiguity=resolve_ambiguity, + debug=debug, tree_class=tree_class, ordered_sets=options.ordered_sets, **extra) -class CYK(WithLexer): - def __init__(self, lexer_conf, parser_conf, re_, options=None): - WithLexer.__init__(self, lexer_conf, parser_conf, re_, options) - self.init_traditional_lexer() - self._analysis = GrammarAnalyzer(parser_conf) +class CYK_FrontEnd: + def __init__(self, lexer_conf, parser_conf, options=None): self.parser = cyk.Parser(parser_conf.rules) self.callbacks = parser_conf.callbacks - def parse(self, text, start): - tokens = list(self.lex(text)) - parse = self._parse(tokens, start) - parse = self._transform(parse) - return parse + def parse(self, lexer_thread, start): + tokens = list(lexer_thread.lex(None)) + tree = self.parser.parse(tokens, start) + return self._transform(tree) def _transform(self, tree): subtrees = list(tree.iter_subtrees()) @@ -231,3 +238,20 @@ def _transform(self, tree): def _apply_callback(self, tree): return self.callbacks[tree.rule](tree.children) + +_parser_creators['earley'] = create_earley_parser +_parser_creators['cyk'] = CYK_FrontEnd + + +def _construct_parsing_frontend( + parser_type: _ParserArgType, + lexer_type: _LexerArgType, + lexer_conf, + parser_conf, + options +): + assert isinstance(lexer_conf, LexerConf) + assert isinstance(parser_conf, ParserConf) + parser_conf.parser_type = parser_type + lexer_conf.lexer_type = lexer_type + return ParsingFrontend(lexer_conf, parser_conf, options) diff --git a/conda_lock/_vendor/poetry/core/_vendor/lark/parsers/cyk.py b/conda_lock/_vendor/poetry/core/_vendor/lark/parsers/cyk.py index ff0924f2..b5334f90 100644 --- a/conda_lock/_vendor/poetry/core/_vendor/lark/parsers/cyk.py +++ b/conda_lock/_vendor/poetry/core/_vendor/lark/parsers/cyk.py @@ -13,17 +13,12 @@ from ..tree import Tree from ..grammar import Terminal as T, NonTerminal as NT, Symbol -try: - xrange -except NameError: - xrange = range - def match(t, s): assert isinstance(t, T) return t.name == s.type -class Rule(object): +class Rule: """Context-free grammar rule.""" def __init__(self, lhs, rhs, weight, alias): @@ -51,7 +46,7 @@ def __ne__(self, other): return not (self == other) -class Grammar(object): +class Grammar: """Context-free grammar.""" def __init__(self, rules): @@ -68,7 +63,7 @@ def __repr__(self): # Parse tree data structures -class RuleNode(object): +class RuleNode: """A node in the parse tree, which also contains the full rhs rule.""" def __init__(self, rule, children, weight=0): @@ -81,7 +76,7 @@ def __repr__(self): -class Parser(object): +class Parser: """Parser wrapper.""" def __init__(self, rules): @@ -153,11 +148,11 @@ def _parse(s, g): trees[(i, i)][rule.lhs] = RuleNode(rule, [T(w)], weight=rule.weight) # Iterate over lengths of sub-sentences - for l in xrange(2, len(s) + 1): + for l in range(2, len(s) + 1): # Iterate over sub-sentences with the given length - for i in xrange(len(s) - l + 1): + for i in range(len(s) - l + 1): # Choose partition of the sub-sentence in [1, l) - for p in xrange(i + 1, i + l): + for p in range(i + 1, i + l): span1 = (i, p - 1) span2 = (p, i + l - 1) for r1, r2 in itertools.product(table[span1], table[span2]): @@ -186,7 +181,7 @@ def _parse(s, g): # * Empty rules (epsilon rules) -class CnfWrapper(object): +class CnfWrapper: """CNF wrapper for grammar. Validates that the input grammar is CNF and provides helper data structures. @@ -250,7 +245,7 @@ def get_any_nt_unit_rule(g): def _remove_unit_rule(g, rule): - """Removes 'rule' from 'g' without changing the langugage produced by 'g'.""" + """Removes 'rule' from 'g' without changing the language produced by 'g'.""" new_rules = [x for x in g.rules if x != rule] refs = [x for x in g.rules if x.lhs == rule.rhs[0]] new_rules += [build_unit_skiprule(rule, ref) for ref in refs] @@ -262,7 +257,7 @@ def _split(rule): rule_str = str(rule.lhs) + '__' + '_'.join(str(x) for x in rule.rhs) rule_name = '__SP_%s' % (rule_str) + '_%d' yield Rule(rule.lhs, [rule.rhs[0], NT(rule_name % 1)], weight=rule.weight, alias=rule.alias) - for i in xrange(1, len(rule.rhs) - 2): + for i in range(1, len(rule.rhs) - 2): yield Rule(NT(rule_name % i), [rule.rhs[i], NT(rule_name % (i + 1))], weight=0, alias='Split') yield Rule(NT(rule_name % (len(rule.rhs) - 2)), rule.rhs[-2:], weight=0, alias='Split') diff --git a/conda_lock/_vendor/poetry/core/_vendor/lark/parsers/earley.py b/conda_lock/_vendor/poetry/core/_vendor/lark/parsers/earley.py index 59e9a06a..bc858860 100644 --- a/conda_lock/_vendor/poetry/core/_vendor/lark/parsers/earley.py +++ b/conda_lock/_vendor/poetry/core/_vendor/lark/parsers/earley.py @@ -1,4 +1,4 @@ -"""This module implements an scanerless Earley parser. +"""This module implements an Earley parser. The core Earley algorithm used here is based on Elizabeth Scott's implementation, here: https://www.sciencedirect.com/science/article/pii/S1571066108001497 @@ -6,31 +6,46 @@ That is probably the best reference for understanding the algorithm here. The Earley parser outputs an SPPF-tree as per that document. The SPPF tree format -is better documented here: - http://www.bramvandersanden.com/post/2014/06/shared-packed-parse-forest/ +is explained here: https://lark-parser.readthedocs.io/en/latest/_static/sppf/sppf.html """ -import logging +from typing import TYPE_CHECKING, Callable, Optional, List, Any from collections import deque -from ..visitors import Transformer_InPlace, v_args +from ..lexer import Token +from ..tree import Tree from ..exceptions import UnexpectedEOF, UnexpectedToken +from ..utils import logger, OrderedSet from .grammar_analysis import GrammarAnalyzer from ..grammar import NonTerminal -from .earley_common import Item, TransitiveItem -from .earley_forest import ForestToTreeVisitor, ForestSumVisitor, SymbolNode, ForestToAmbiguousTreeVisitor +from .earley_common import Item +from .earley_forest import ForestSumVisitor, SymbolNode, StableSymbolNode, TokenNode, ForestToParseTree + +if TYPE_CHECKING: + from ..common import LexerConf, ParserConf class Parser: - def __init__(self, parser_conf, term_matcher, resolve_ambiguity=True, debug=False): + lexer_conf: 'LexerConf' + parser_conf: 'ParserConf' + debug: bool + + def __init__(self, lexer_conf: 'LexerConf', parser_conf: 'ParserConf', term_matcher: Callable, + resolve_ambiguity: bool=True, debug: bool=False, + tree_class: Optional[Callable[[str, List], Any]]=Tree, ordered_sets: bool=True): analysis = GrammarAnalyzer(parser_conf) + self.lexer_conf = lexer_conf self.parser_conf = parser_conf self.resolve_ambiguity = resolve_ambiguity self.debug = debug + self.Tree = tree_class + self.Set = OrderedSet if ordered_sets else set + self.SymbolNode = StableSymbolNode if ordered_sets else SymbolNode self.FIRST = analysis.FIRST self.NULLABLE = analysis.NULLABLE self.callbacks = parser_conf.callbacks - self.predictions = {} + # TODO add typing info + self.predictions = {} # type: ignore[var-annotated] ## These could be moved to the grammar analyzer. Pre-computing these is *much* faster than # the slow 'isupper' in is_terminal. @@ -42,13 +57,21 @@ def __init__(self, parser_conf, term_matcher, resolve_ambiguity=True, debug=Fals if rule.origin not in self.predictions: self.predictions[rule.origin] = [x.rule for x in analysis.expand_rule(rule.origin)] - ## Detect if any rules have priorities set. If the user specified priority = "none" then - # the priorities will be stripped from all rules before they reach us, allowing us to + ## Detect if any rules/terminals have priorities set. If the user specified priority = None, then + # the priorities will be stripped from all rules/terminals before they reach us, allowing us to # skip the extra tree walk. We'll also skip this if the user just didn't specify priorities - # on any rules. + # on any rules/terminals. if self.forest_sum_visitor is None and rule.options.priority is not None: self.forest_sum_visitor = ForestSumVisitor + # Check terminals for priorities + # Ignore terminal priorities if the basic lexer is used + if self.lexer_conf.lexer_type != 'basic' and self.forest_sum_visitor is None: + for term in self.lexer_conf.terminals: + if term.priority: + self.forest_sum_visitor = ForestSumVisitor + break + self.term_matcher = term_matcher @@ -74,7 +97,7 @@ def predict_and_complete(self, i, to_scan, columns, transitives): if item.is_complete: ### (item.s == string) if item.node is None: label = (item.s, item.start, i) - item.node = node_cache[label] if label in node_cache else node_cache.setdefault(label, SymbolNode(*label)) + item.node = node_cache[label] if label in node_cache else node_cache.setdefault(label, self.SymbolNode(*label)) item.node.add_family(item.s, item.rule, item.start, None, None) # create_leo_transitives(item.rule.origin, item.start) @@ -89,7 +112,7 @@ def predict_and_complete(self, i, to_scan, columns, transitives): new_item = Item(transitive.rule, transitive.ptr, transitive.start) label = (root_transitive.s, root_transitive.start, i) - new_item.node = node_cache[label] if label in node_cache else node_cache.setdefault(label, SymbolNode(*label)) + new_item.node = node_cache[label] if label in node_cache else node_cache.setdefault(label, self.SymbolNode(*label)) new_item.node.add_path(root_transitive, item.node) if new_item.expect in self.TERMINALS: # Add (B :: aC.B, h, y) to Q @@ -113,7 +136,7 @@ def predict_and_complete(self, i, to_scan, columns, transitives): for originator in originators: new_item = originator.advance() label = (new_item.s, originator.start, i) - new_item.node = node_cache[label] if label in node_cache else node_cache.setdefault(label, SymbolNode(*label)) + new_item.node = node_cache[label] if label in node_cache else node_cache.setdefault(label, self.SymbolNode(*label)) new_item.node.add_family(new_item.s, new_item.rule, i, originator.node, item.node) if new_item.expect in self.TERMINALS: # Add (B :: aC.B, h, y) to Q @@ -134,7 +157,7 @@ def predict_and_complete(self, i, to_scan, columns, transitives): if item.expect in held_completions: new_item = item.advance() label = (new_item.s, item.start, i) - new_item.node = node_cache[label] if label in node_cache else node_cache.setdefault(label, SymbolNode(*label)) + new_item.node = node_cache[label] if label in node_cache else node_cache.setdefault(label, self.SymbolNode(*label)) new_item.node.add_family(new_item.s, new_item.rule, new_item.start, item.node, held_completions[item.expect]) new_items.append(new_item) @@ -145,7 +168,7 @@ def predict_and_complete(self, i, to_scan, columns, transitives): column.add(new_item) items.append(new_item) - def _parse(self, stream, columns, to_scan, start_symbol=None): + def _parse(self, lexer, columns, to_scan, start_symbol=None): def is_quasi_complete(item): if item.is_complete: return True @@ -159,60 +182,8 @@ def is_quasi_complete(item): quasi = quasi.advance() return True - def create_leo_transitives(origin, start): - visited = set() - to_create = [] - trule = None - previous = None - - ### Recursively walk backwards through the Earley sets until we find the - # first transitive candidate. If this is done continuously, we shouldn't - # have to walk more than 1 hop. - while True: - if origin in transitives[start]: - previous = trule = transitives[start][origin] - break - - is_empty_rule = not self.FIRST[origin] - if is_empty_rule: - break - - candidates = [ candidate for candidate in columns[start] if candidate.expect is not None and origin == candidate.expect ] - if len(candidates) != 1: - break - originator = next(iter(candidates)) - - if originator is None or originator in visited: - break - - visited.add(originator) - if not is_quasi_complete(originator): - break - - trule = originator.advance() - if originator.start != start: - visited.clear() - - to_create.append((origin, start, originator)) - origin = originator.rule.origin - start = originator.start - - # If a suitable Transitive candidate is not found, bail. - if trule is None: - return - - #### Now walk forwards and create Transitive Items in each set we walked through; and link - # each transitive item to the next set forwards. - while to_create: - origin, start, originator = to_create.pop() - titem = None - if previous is not None: - titem = previous.next_titem = TransitiveItem(origin, trule, originator, previous.column) - else: - titem = TransitiveItem(origin, trule, originator, start) - previous = transitives[start][origin] = titem - - + # def create_leo_transitives(origin, start): + # ... # removed at commit 4c1cfb2faf24e8f8bff7112627a00b94d261b420 def scan(i, token, to_scan): """The core Earley Scanner. @@ -222,18 +193,27 @@ def scan(i, token, to_scan): Earley predictor, based on the previously completed tokens. This ensures that at each phase of the parse we have a custom lexer context, allowing for more complex ambiguities.""" - next_to_scan = set() - next_set = set() + next_to_scan = self.Set() + next_set = self.Set() columns.append(next_set) transitives.append({}) node_cache = {} - for item in set(to_scan): + for item in self.Set(to_scan): if match(item.expect, token): new_item = item.advance() label = (new_item.s, new_item.start, i) - new_item.node = node_cache[label] if label in node_cache else node_cache.setdefault(label, SymbolNode(*label)) - new_item.node.add_family(new_item.s, item.rule, new_item.start, item.node, token) + # 'terminals' may not contain token.type when using %declare + # Additionally, token is not always a Token + # For example, it can be a Tree when using TreeMatcher + term = terminals.get(token.type) if isinstance(token, Token) else None + # Set the priority of the token node to 0 so that the + # terminal priorities do not affect the Tree chosen by + # ForestSumVisitor after the basic lexer has already + # "used up" the terminal priorities + token_node = TokenNode(token, term, priority=0) + new_item.node = node_cache[label] if label in node_cache else node_cache.setdefault(label, self.SymbolNode(*label)) + new_item.node.add_family(new_item.s, item.rule, new_item.start, item.node, token_node) if new_item.expect in self.TERMINALS: # add (B ::= Aai+1.B, h, y) to Q' @@ -244,7 +224,7 @@ def scan(i, token, to_scan): if not next_set and not next_to_scan: expect = {i.expect.name for i in to_scan} - raise UnexpectedToken(token, expect, considered_rules = set(to_scan)) + raise UnexpectedToken(token, expect, considered_rules=set(to_scan), state=frozenset(i.s for i in to_scan)) return next_to_scan @@ -252,6 +232,8 @@ def scan(i, token, to_scan): # Define parser functions match = self.term_matcher + terminals = self.lexer_conf.terminals_by_name + # Cache for nodes & tokens created in a particular parse step. transitives = [{}] @@ -260,25 +242,29 @@ def scan(i, token, to_scan): # Completions will be added to the SPPF tree, and predictions will be recursively # processed down to terminals/empty nodes to be added to the scanner for the next # step. + expects = {i.expect for i in to_scan} i = 0 - for token in stream: + for token in lexer.lex(expects): self.predict_and_complete(i, to_scan, columns, transitives) to_scan = scan(i, token, to_scan) i += 1 + expects.clear() + expects |= {i.expect for i in to_scan} + self.predict_and_complete(i, to_scan, columns, transitives) ## Column is now the final column in the parse. assert i == len(columns)-1 return to_scan - def parse(self, stream, start): + def parse(self, lexer, start): assert start, start start_symbol = NonTerminal(start) - columns = [set()] - to_scan = set() # The scan buffer. 'Q' in E.Scott's paper. + columns = [self.Set()] + to_scan = self.Set() # The scan buffer. 'Q' in E.Scott's paper. ## Predict for the start_symbol. # Add predicted items to the first Earley set (for the predictor) if they @@ -290,39 +276,33 @@ def parse(self, stream, start): else: columns[0].add(item) - to_scan = self._parse(stream, columns, to_scan, start_symbol) + to_scan = self._parse(lexer, columns, to_scan, start_symbol) # If the parse was successful, the start # symbol should have been completed in the last step of the Earley cycle, and will be in # this column. Find the item for the start_symbol, which is the root of the SPPF tree. solutions = [n.node for n in columns[-1] if n.is_complete and n.node is not None and n.s == start_symbol and n.start == 0] + if not solutions: + expected_terminals = [t.expect.name for t in to_scan] + raise UnexpectedEOF(expected_terminals, state=frozenset(i.s for i in to_scan)) + if self.debug: from .earley_forest import ForestToPyDotVisitor try: debug_walker = ForestToPyDotVisitor() except ImportError: - logging.warning("Cannot find dependency 'pydot', will not generate sppf debug image") + logger.warning("Cannot find dependency 'pydot', will not generate sppf debug image") else: debug_walker.visit(solutions[0], "sppf.png") - if not solutions: - expected_tokens = [t.expect for t in to_scan] - raise UnexpectedEOF(expected_tokens) - elif len(solutions) > 1: + if len(solutions) > 1: assert False, 'Earley should not generate multiple start symbol items!' - # Perform our SPPF -> AST conversion using the right ForestVisitor. - forest_tree_visitor_cls = ForestToTreeVisitor if self.resolve_ambiguity else ForestToAmbiguousTreeVisitor - forest_tree_visitor = forest_tree_visitor_cls(self.callbacks, self.forest_sum_visitor and self.forest_sum_visitor()) - - return forest_tree_visitor.visit(solutions[0]) - - -class ApplyCallbacks(Transformer_InPlace): - def __init__(self, postprocess): - self.postprocess = postprocess + if self.Tree is not None: + # Perform our SPPF -> AST conversion + transformer = ForestToParseTree(self.Tree, self.callbacks, self.forest_sum_visitor and self.forest_sum_visitor(), self.resolve_ambiguity) + return transformer.transform(solutions[0]) - @v_args(meta=True) - def drv(self, children, meta): - return self.postprocess[meta.rule](children) + # return the root of the SPPF + return solutions[0] diff --git a/conda_lock/_vendor/poetry/core/_vendor/lark/parsers/earley_common.py b/conda_lock/_vendor/poetry/core/_vendor/lark/parsers/earley_common.py index 6bd614ba..46e242b4 100644 --- a/conda_lock/_vendor/poetry/core/_vendor/lark/parsers/earley_common.py +++ b/conda_lock/_vendor/poetry/core/_vendor/lark/parsers/earley_common.py @@ -1,21 +1,8 @@ -"This module implements an Earley Parser" +"""This module implements useful building blocks for the Earley parser +""" -# The parser uses a parse-forest to keep track of derivations and ambiguations. -# When the parse ends successfully, a disambiguation stage resolves all ambiguity -# (right now ambiguity resolution is not developed beyond the needs of lark) -# Afterwards the parse tree is reduced (transformed) according to user callbacks. -# I use the no-recursion version of Transformer, because the tree might be -# deeper than Python's recursion limit (a bit absurd, but that's life) -# -# The algorithm keeps track of each state set, using a corresponding Column instance. -# Column keeps track of new items using NewsList instances. -# -# Author: Erez Shinan (2017) -# Email : erezshin@gmail.com -from ..grammar import NonTerminal, Terminal - -class Item(object): +class Item: "An Earley Item, the atom of the algorithm." __slots__ = ('s', 'rule', 'ptr', 'start', 'is_complete', 'expect', 'previous', 'node', '_hash') @@ -51,25 +38,5 @@ def __repr__(self): return '%s (%d)' % (symbol, self.start) -class TransitiveItem(Item): - __slots__ = ('recognized', 'reduction', 'column', 'next_titem') - def __init__(self, recognized, trule, originator, start): - super(TransitiveItem, self).__init__(trule.rule, trule.ptr, trule.start) - self.recognized = recognized - self.reduction = originator - self.column = start - self.next_titem = None - self._hash = hash((self.s, self.start, self.recognized)) - - def __eq__(self, other): - if not isinstance(other, TransitiveItem): - return False - return self is other or (type(self.s) == type(other.s) and self.s == other.s and self.start == other.start and self.recognized == other.recognized) - - def __hash__(self): - return self._hash - - def __repr__(self): - before = ( expansion.name for expansion in self.rule.expansion[:self.ptr] ) - after = ( expansion.name for expansion in self.rule.expansion[self.ptr:] ) - return '{} : {} -> {}* {} ({}, {})'.format(self.recognized.name, self.rule.origin.name, ' '.join(before), ' '.join(after), self.column, self.start) +# class TransitiveItem(Item): +# ... # removed at commit 4c1cfb2faf24e8f8bff7112627a00b94d261b420 diff --git a/conda_lock/_vendor/poetry/core/_vendor/lark/parsers/earley_forest.py b/conda_lock/_vendor/poetry/core/_vendor/lark/parsers/earley_forest.py index c8b4f253..301f4d1d 100644 --- a/conda_lock/_vendor/poetry/core/_vendor/lark/parsers/earley_forest.py +++ b/conda_lock/_vendor/poetry/core/_vendor/lark/parsers/earley_forest.py @@ -4,19 +4,22 @@ in order to store complex ambiguities. Full reference and more details is here: -http://www.bramvandersanden.com/post/2014/06/shared-packed-parse-forest/ +https://web.archive.org/web/20190616123959/http://www.bramvandersanden.com/post/2014/06/shared-packed-parse-forest/ """ +from typing import Type, AbstractSet from random import randint -from math import isinf from collections import deque from operator import attrgetter from importlib import import_module +from functools import partial +from ..parse_tree_builder import AmbiguousIntermediateExpander +from ..visitors import Discard +from ..utils import logger, OrderedSet from ..tree import Tree -from ..exceptions import ParseError -class ForestNode(object): +class ForestNode: pass class SymbolNode(ForestNode): @@ -32,14 +35,24 @@ class SymbolNode(ForestNode): with each Packed Node child representing a single derivation of a production. Hence a Symbol Node with a single child is unambiguous. + + Parameters: + s: A Symbol, or a tuple of (rule, ptr) for an intermediate node. + start: The index of the start of the substring matched by this symbol (inclusive). + end: The index of the end of the substring matched by this symbol (exclusive). + + Properties: + is_intermediate: True if this node is an intermediate node. + priority: The priority of the node's symbol. """ + Set: Type[AbstractSet] = set # Overridden by StableSymbolNode __slots__ = ('s', 'start', 'end', '_children', 'paths', 'paths_loaded', 'priority', 'is_intermediate', '_hash') def __init__(self, s, start, end): self.s = s self.start = start self.end = end - self._children = set() - self.paths = set() + self._children = self.Set() + self.paths = self.Set() self.paths_loaded = False ### We use inf here as it can be safely negated without resorting to conditionals, @@ -57,7 +70,7 @@ def add_path(self, transitive, node): def load_paths(self): for transitive, node in self.paths: if transitive.next_titem is not None: - vn = SymbolNode(transitive.next_titem.s, transitive.next_titem.start, self.end) + vn = type(self)(transitive.next_titem.s, transitive.next_titem.start, self.end) vn.add_path(transitive.next_titem, node) self.add_family(transitive.reduction.rule.origin, transitive.reduction.rule, transitive.reduction.start, transitive.reduction.node, vn) else: @@ -66,11 +79,15 @@ def load_paths(self): @property def is_ambiguous(self): + """Returns True if this node is ambiguous.""" return len(self.children) > 1 @property def children(self): - if not self.paths_loaded: self.load_paths() + """Returns a list of this node's children sorted from greatest to + least priority.""" + if not self.paths_loaded: + self.load_paths() return sorted(self._children, key=attrgetter('sort_key')) def __iter__(self): @@ -95,9 +112,20 @@ def __repr__(self): symbol = self.s.name return "({}, {}, {}, {})".format(symbol, self.start, self.end, self.priority) +class StableSymbolNode(SymbolNode): + "A version of SymbolNode that uses OrderedSet for output stability" + Set = OrderedSet + class PackedNode(ForestNode): """ A Packed Node represents a single derivation in a symbol node. + + Parameters: + rule: The rule associated with this node. + parent: The parent of this node. + left: The left child of this node. ``None`` if one does not exist. + right: The right child of this node. ``None`` if one does not exist. + priority: The priority of this node. """ __slots__ = ('parent', 's', 'rule', 'start', 'left', 'right', 'priority', '_hash') def __init__(self, parent, s, rule, start, left, right): @@ -124,8 +152,14 @@ def sort_key(self): """ return self.is_empty, -self.priority, self.rule.order + @property + def children(self): + """Returns a list of this node's children.""" + return [x for x in [self.left, self.right] if x is not None] + def __iter__(self): - return iter([self.left, self.right]) + yield self.left + yield self.right def __eq__(self, other): if not isinstance(other, PackedNode): @@ -146,22 +180,107 @@ def __repr__(self): symbol = self.s.name return "({}, {}, {}, {})".format(symbol, self.start, self.priority, self.rule.order) -class ForestVisitor(object): +class TokenNode(ForestNode): + """ + A Token Node represents a matched terminal and is always a leaf node. + + Parameters: + token: The Token associated with this node. + term: The TerminalDef matched by the token. + priority: The priority of this node. + """ + __slots__ = ('token', 'term', 'priority', '_hash') + def __init__(self, token, term, priority=None): + self.token = token + self.term = term + if priority is not None: + self.priority = priority + else: + self.priority = term.priority if term is not None else 0 + self._hash = hash(token) + + def __eq__(self, other): + if not isinstance(other, TokenNode): + return False + return self is other or (self.token == other.token) + + def __hash__(self): + return self._hash + + def __repr__(self): + return repr(self.token) + +class ForestVisitor: """ An abstract base class for building forest visitors. - Use this as a base when you need to walk the forest. + This class performs a controllable depth-first walk of an SPPF. + The visitor will not enter cycles and will backtrack if one is encountered. + Subclasses are notified of cycles through the ``on_cycle`` method. + + Behavior for visit events is defined by overriding the + ``visit*node*`` functions. + + The walk is controlled by the return values of the ``visit*node_in`` + methods. Returning a node(s) will schedule them to be visited. The visitor + will begin to backtrack if no nodes are returned. + + Parameters: + single_visit: If ``True``, non-Token nodes will only be visited once. """ - __slots__ = ['result'] - def visit_token_node(self, node): pass - def visit_symbol_node_in(self, node): pass - def visit_symbol_node_out(self, node): pass - def visit_packed_node_in(self, node): pass - def visit_packed_node_out(self, node): pass + def __init__(self, single_visit=False): + self.single_visit = single_visit + + def visit_token_node(self, node): + """Called when a ``Token`` is visited. ``Token`` nodes are always leaves.""" + pass + + def visit_symbol_node_in(self, node): + """Called when a symbol node is visited. Nodes that are returned + will be scheduled to be visited. If ``visit_intermediate_node_in`` + is not implemented, this function will be called for intermediate + nodes as well.""" + pass + + def visit_symbol_node_out(self, node): + """Called after all nodes returned from a corresponding ``visit_symbol_node_in`` + call have been visited. If ``visit_intermediate_node_out`` + is not implemented, this function will be called for intermediate + nodes as well.""" + pass + + def visit_packed_node_in(self, node): + """Called when a packed node is visited. Nodes that are returned + will be scheduled to be visited. """ + pass + + def visit_packed_node_out(self, node): + """Called after all nodes returned from a corresponding ``visit_packed_node_in`` + call have been visited.""" + pass + + def on_cycle(self, node, path): + """Called when a cycle is encountered. + + Parameters: + node: The node that causes a cycle. + path: The list of nodes being visited: nodes that have been + entered but not exited. The first element is the root in a forest + visit, and the last element is the node visited most recently. + ``path`` should be treated as read-only. + """ + pass + + def get_cycle_in_path(self, node, path): + """A utility function for use in ``on_cycle`` to obtain a slice of + ``path`` that only contains the nodes that make up the cycle.""" + index = len(path) - 1 + while id(path[index]) != id(node): + index -= 1 + return path[index:] def visit(self, root): - self.result = None # Visiting is a list of IDs of all symbol/intermediate nodes currently in # the stack. It serves two purposes: to detect when we 'recurse' in and out # of a symbol/intermediate so that we can process both up and down. Also, @@ -169,6 +288,13 @@ def visit(self, root): # to recurse into a node that's already on the stack (infinite recursion). visiting = set() + # set of all nodes that have been visited + visited = set() + + # a list of nodes that are currently being visited + # used for the `on_cycle` callback + path = [] + # We do not use recursion here to walk the Forest due to the limited # stack size in python. Therefore input_stack is essentially our stack. input_stack = deque([root]) @@ -179,7 +305,11 @@ def visit(self, root): vpni = getattr(self, 'visit_packed_node_in') vsno = getattr(self, 'visit_symbol_node_out') vsni = getattr(self, 'visit_symbol_node_in') + vino = getattr(self, 'visit_intermediate_node_out', vsno) + vini = getattr(self, 'visit_intermediate_node_in', vsni) vtn = getattr(self, 'visit_token_node') + oc = getattr(self, 'on_cycle') + while input_stack: current = next(reversed(input_stack)) try: @@ -195,37 +325,131 @@ def visit(self, root): continue if id(next_node) in visiting: - raise ParseError("Infinite recursion in grammar, in rule '%s'!" % next_node.s.name) + oc(next_node, path) + continue input_stack.append(next_node) continue - if not isinstance(current, ForestNode): - vtn(current) + if isinstance(current, TokenNode): + vtn(current.token) input_stack.pop() continue current_id = id(current) if current_id in visiting: - if isinstance(current, PackedNode): vpno(current) - else: vsno(current) + if isinstance(current, PackedNode): + vpno(current) + elif current.is_intermediate: + vino(current) + else: + vsno(current) input_stack.pop() + path.pop() visiting.remove(current_id) - continue + visited.add(current_id) + elif self.single_visit and current_id in visited: + input_stack.pop() else: visiting.add(current_id) - if isinstance(current, PackedNode): next_node = vpni(current) - else: next_node = vsni(current) + path.append(current) + if isinstance(current, PackedNode): + next_node = vpni(current) + elif current.is_intermediate: + next_node = vini(current) + else: + next_node = vsni(current) if next_node is None: continue - if id(next_node) in visiting: - raise ParseError("Infinite recursion in grammar!") + if not isinstance(next_node, ForestNode): + next_node = iter(next_node) + elif id(next_node) in visiting: + oc(next_node, path) + continue input_stack.append(next_node) - continue - return self.result +class ForestTransformer(ForestVisitor): + """The base class for a bottom-up forest transformation. Most users will + want to use ``TreeForestTransformer`` instead as it has a friendlier + interface and covers most use cases. + + Transformations are applied via inheritance and overriding of the + ``transform*node`` methods. + + ``transform_token_node`` receives a ``Token`` as an argument. + All other methods receive the node that is being transformed and + a list of the results of the transformations of that node's children. + The return value of these methods are the resulting transformations. + + If ``Discard`` is raised in a node's transformation, no data from that node + will be passed to its parent's transformation. + """ + + def __init__(self): + super(ForestTransformer, self).__init__() + # results of transformations + self.data = dict() + # used to track parent nodes + self.node_stack = deque() + + def transform(self, root): + """Perform a transformation on an SPPF.""" + self.node_stack.append('result') + self.data['result'] = [] + self.visit(root) + assert len(self.data['result']) <= 1 + if self.data['result']: + return self.data['result'][0] + + def transform_symbol_node(self, node, data): + """Transform a symbol node.""" + return node + + def transform_intermediate_node(self, node, data): + """Transform an intermediate node.""" + return node + + def transform_packed_node(self, node, data): + """Transform a packed node.""" + return node + + def transform_token_node(self, node): + """Transform a ``Token``.""" + return node + + def visit_symbol_node_in(self, node): + self.node_stack.append(id(node)) + self.data[id(node)] = [] + return node.children + + def visit_packed_node_in(self, node): + self.node_stack.append(id(node)) + self.data[id(node)] = [] + return node.children + + def visit_token_node(self, node): + transformed = self.transform_token_node(node) + if transformed is not Discard: + self.data[self.node_stack[-1]].append(transformed) + + def _visit_node_out_helper(self, node, method): + self.node_stack.pop() + transformed = method(node, self.data[id(node)]) + if transformed is not Discard: + self.data[self.node_stack[-1]].append(transformed) + del self.data[id(node)] + + def visit_symbol_node_out(self, node): + self._visit_node_out_helper(node, self.transform_symbol_node) + + def visit_intermediate_node_out(self, node): + self._visit_node_out_helper(node, self.transform_intermediate_node) + + def visit_packed_node_out(self, node): + self._visit_node_out_helper(node, self.transform_packed_node) + class ForestSumVisitor(ForestVisitor): """ @@ -243,8 +467,12 @@ class ForestSumVisitor(ForestVisitor): items created during parsing than there are SPPF nodes in the final tree. """ + def __init__(self): + super(ForestSumVisitor, self).__init__(single_visit=True) + def visit_packed_node_in(self, node): - return iter([node.left, node.right]) + yield node.left + yield node.right def visit_symbol_node_in(self, node): return iter(node.children) @@ -258,102 +486,249 @@ def visit_packed_node_out(self, node): def visit_symbol_node_out(self, node): node.priority = max(child.priority for child in node.children) -class ForestToTreeVisitor(ForestVisitor): +class PackedData(): + """Used in transformationss of packed nodes to distinguish the data + that comes from the left child and the right child. """ - A Forest visitor which converts an SPPF forest to an unambiguous AST. - - The implementation in this visitor walks only the first ambiguous child - of each symbol node. When it finds an ambiguous symbol node it first - calls the forest_sum_visitor implementation to sort the children - into preference order using the algorithms defined there; so the first - child should always be the highest preference. The forest_sum_visitor - implementation should be another ForestVisitor which sorts the children - according to some priority mechanism. + + class _NoData(): + pass + + NO_DATA = _NoData() + + def __init__(self, node, data): + self.left = self.NO_DATA + self.right = self.NO_DATA + if data: + if node.left is not None: + self.left = data[0] + if len(data) > 1: + self.right = data[1] + else: + self.right = data[0] + +class ForestToParseTree(ForestTransformer): + """Used by the earley parser when ambiguity equals 'resolve' or + 'explicit'. Transforms an SPPF into an (ambiguous) parse tree. + + Parameters: + tree_class: The tree class to use for construction + callbacks: A dictionary of rules to functions that output a tree + prioritizer: A ``ForestVisitor`` that manipulates the priorities of ForestNodes + resolve_ambiguity: If True, ambiguities will be resolved based on + priorities. Otherwise, `_ambig` nodes will be in the resulting tree. + use_cache: If True, the results of packed node transformations will be cached. """ - __slots__ = ['forest_sum_visitor', 'callbacks', 'output_stack'] - def __init__(self, callbacks, forest_sum_visitor = None): - assert callbacks - self.forest_sum_visitor = forest_sum_visitor + + def __init__(self, tree_class=Tree, callbacks=dict(), prioritizer=ForestSumVisitor(), resolve_ambiguity=True, use_cache=True): + super(ForestToParseTree, self).__init__() + self.tree_class = tree_class self.callbacks = callbacks + self.prioritizer = prioritizer + self.resolve_ambiguity = resolve_ambiguity + self._use_cache = use_cache + self._cache = {} + self._on_cycle_retreat = False + self._cycle_node = None + self._successful_visits = set() def visit(self, root): - self.output_stack = deque() - return super(ForestToTreeVisitor, self).visit(root) + if self.prioritizer: + self.prioritizer.visit(root) + super(ForestToParseTree, self).visit(root) + self._cache = {} + + def on_cycle(self, node, path): + logger.debug("Cycle encountered in the SPPF at node: %s. " + "As infinite ambiguities cannot be represented in a tree, " + "this family of derivations will be discarded.", node) + self._cycle_node = node + self._on_cycle_retreat = True + + def _check_cycle(self, node): + if self._on_cycle_retreat: + if id(node) == id(self._cycle_node) or id(node) in self._successful_visits: + self._cycle_node = None + self._on_cycle_retreat = False + else: + return Discard - def visit_token_node(self, node): - self.output_stack[-1].append(node) + def _collapse_ambig(self, children): + new_children = [] + for child in children: + if hasattr(child, 'data') and child.data == '_ambig': + new_children += child.children + else: + new_children.append(child) + return new_children + + def _call_rule_func(self, node, data): + # called when transforming children of symbol nodes + # data is a list of trees or tokens that correspond to the + # symbol's rule expansion + return self.callbacks[node.rule](data) + + def _call_ambig_func(self, node, data): + # called when transforming a symbol node + # data is a list of trees where each tree's data is + # equal to the name of the symbol or one of its aliases. + if len(data) > 1: + return self.tree_class('_ambig', data) + elif data: + return data[0] + return Discard + + def transform_symbol_node(self, node, data): + if id(node) not in self._successful_visits: + return Discard + r = self._check_cycle(node) + if r is Discard: + return r + self._successful_visits.remove(id(node)) + data = self._collapse_ambig(data) + return self._call_ambig_func(node, data) + + def transform_intermediate_node(self, node, data): + if id(node) not in self._successful_visits: + return Discard + r = self._check_cycle(node) + if r is Discard: + return r + self._successful_visits.remove(id(node)) + if len(data) > 1: + children = [self.tree_class('_inter', c) for c in data] + return self.tree_class('_iambig', children) + return data[0] + + def transform_packed_node(self, node, data): + r = self._check_cycle(node) + if r is Discard: + return r + if self.resolve_ambiguity and id(node.parent) in self._successful_visits: + return Discard + if self._use_cache and id(node) in self._cache: + return self._cache[id(node)] + children = [] + assert len(data) <= 2 + data = PackedData(node, data) + if data.left is not PackedData.NO_DATA: + if node.left.is_intermediate and isinstance(data.left, list): + children += data.left + else: + children.append(data.left) + if data.right is not PackedData.NO_DATA: + children.append(data.right) + if node.parent.is_intermediate: + return self._cache.setdefault(id(node), children) + return self._cache.setdefault(id(node), self._call_rule_func(node, children)) def visit_symbol_node_in(self, node): - if self.forest_sum_visitor and node.is_ambiguous and isinf(node.priority): - self.forest_sum_visitor.visit(node) - return next(iter(node.children)) + super(ForestToParseTree, self).visit_symbol_node_in(node) + if self._on_cycle_retreat: + return + return node.children def visit_packed_node_in(self, node): - if not node.parent.is_intermediate: - self.output_stack.append([]) - return iter([node.left, node.right]) + self._on_cycle_retreat = False + to_visit = super(ForestToParseTree, self).visit_packed_node_in(node) + if not self.resolve_ambiguity or id(node.parent) not in self._successful_visits: + if not self._use_cache or id(node) not in self._cache: + return to_visit def visit_packed_node_out(self, node): - if not node.parent.is_intermediate: - result = self.callbacks[node.rule](self.output_stack.pop()) - if self.output_stack: - self.output_stack[-1].append(result) - else: - self.result = result - -class ForestToAmbiguousTreeVisitor(ForestToTreeVisitor): - """ - A Forest visitor which converts an SPPF forest to an ambiguous AST. - - Because of the fundamental disparity between what can be stored in - an SPPF and what can be stored in a Tree; this implementation is not - complete. It correctly deals with ambiguities that occur on symbol nodes only, - and cannot deal with ambiguities that occur on intermediate nodes. - - Usually, most parsers can be rewritten to avoid intermediate node - ambiguities. Also, this implementation could be fixed, however - the code to handle intermediate node ambiguities is messy and - would not be performant. It is much better not to use this and - instead to correctly disambiguate the forest and only store unambiguous - parses in Trees. It is here just to provide some parity with the - old ambiguity='explicit'. - - This is mainly used by the test framework, to make it simpler to write - tests ensuring the SPPF contains the right results. + super(ForestToParseTree, self).visit_packed_node_out(node) + if not self._on_cycle_retreat: + self._successful_visits.add(id(node.parent)) + +def handles_ambiguity(func): + """Decorator for methods of subclasses of ``TreeForestTransformer``. + Denotes that the method should receive a list of transformed derivations.""" + func.handles_ambiguity = True + return func + +class TreeForestTransformer(ForestToParseTree): + """A ``ForestTransformer`` with a tree ``Transformer``-like interface. + By default, it will construct a tree. + + Methods provided via inheritance are called based on the rule/symbol + names of nodes in the forest. + + Methods that act on rules will receive a list of the results of the + transformations of the rule's children. By default, trees and tokens. + + Methods that act on tokens will receive a token. + + Alternatively, methods that act on rules may be annotated with + ``handles_ambiguity``. In this case, the function will receive a list + of all the transformations of all the derivations of the rule. + By default, a list of trees where each tree.data is equal to the + rule name or one of its aliases. + + Non-tree transformations are made possible by override of + ``__default__``, ``__default_token__``, and ``__default_ambig__``. + + Note: + Tree shaping features such as inlined rules and token filtering are + not built into the transformation. Positions are also not propagated. + + Parameters: + tree_class: The tree class to use for construction + prioritizer: A ``ForestVisitor`` that manipulates the priorities of nodes in the SPPF. + resolve_ambiguity: If True, ambiguities will be resolved based on priorities. + use_cache (bool): If True, caches the results of some transformations, + potentially improving performance when ``resolve_ambiguity==False``. + Only use if you know what you are doing: i.e. All transformation + functions are pure and referentially transparent. """ - def __init__(self, callbacks, forest_sum_visitor = ForestSumVisitor): - super(ForestToAmbiguousTreeVisitor, self).__init__(callbacks, forest_sum_visitor) - def visit_token_node(self, node): - self.output_stack[-1].children.append(node) + def __init__(self, tree_class=Tree, prioritizer=ForestSumVisitor(), resolve_ambiguity=True, use_cache=False): + super(TreeForestTransformer, self).__init__(tree_class, dict(), prioritizer, resolve_ambiguity, use_cache) - def visit_symbol_node_in(self, node): - if self.forest_sum_visitor and node.is_ambiguous and isinf(node.priority): - self.forest_sum_visitor.visit(node) - if not node.is_intermediate and node.is_ambiguous: - self.output_stack.append(Tree('_ambig', [])) - return iter(node.children) + def __default__(self, name, data): + """Default operation on tree (for override). - def visit_symbol_node_out(self, node): - if not node.is_intermediate and node.is_ambiguous: - result = self.output_stack.pop() - if self.output_stack: - self.output_stack[-1].children.append(result) - else: - self.result = result + Returns a tree with name with data as children. + """ + return self.tree_class(name, data) - def visit_packed_node_in(self, node): - if not node.parent.is_intermediate: - self.output_stack.append(Tree('drv', [])) - return iter([node.left, node.right]) + def __default_ambig__(self, name, data): + """Default operation on ambiguous rule (for override). - def visit_packed_node_out(self, node): - if not node.parent.is_intermediate: - result = self.callbacks[node.rule](self.output_stack.pop().children) - if self.output_stack: - self.output_stack[-1].children.append(result) - else: - self.result = result + Wraps data in an '_ambig_' node if it contains more than + one element. + """ + if len(data) > 1: + return self.tree_class('_ambig', data) + elif data: + return data[0] + return Discard + + def __default_token__(self, node): + """Default operation on ``Token`` (for override). + + Returns ``node``. + """ + return node + + def transform_token_node(self, node): + return getattr(self, node.type, self.__default_token__)(node) + + def _call_rule_func(self, node, data): + name = node.rule.alias or node.rule.options.template_source or node.rule.origin.name + user_func = getattr(self, name, self.__default__) + if user_func == self.__default__ or hasattr(user_func, 'handles_ambiguity'): + user_func = partial(self.__default__, name) + if not self.resolve_ambiguity: + wrapper = partial(AmbiguousIntermediateExpander, self.tree_class) + user_func = wrapper(user_func) + return user_func(data) + + def _call_ambig_func(self, node, data): + name = node.s.name + user_func = getattr(self, name, self.__default_ambig__) + if user_func == self.__default_ambig__ or not hasattr(user_func, 'handles_ambiguity'): + user_func = partial(self.__default_ambig__, name) + return user_func(data) class ForestToPyDotVisitor(ForestVisitor): """ @@ -365,12 +740,16 @@ class ForestToPyDotVisitor(ForestVisitor): is structured. """ def __init__(self, rankdir="TB"): + super(ForestToPyDotVisitor, self).__init__(single_visit=True) self.pydot = import_module('pydot') self.graph = self.pydot.Dot(graph_type='digraph', rankdir=rankdir) def visit(self, root, filename): super(ForestToPyDotVisitor, self).visit(root) - self.graph.write_png(filename) + try: + self.graph.write_png(filename) + except FileNotFoundError as e: + logger.error("Could not write png: ", e) def visit_token_node(self, node): graph_node_id = str(id(node)) @@ -389,14 +768,15 @@ def visit_packed_node_in(self, node): graph_node_shape = "diamond" graph_node = self.pydot.Node(graph_node_id, style=graph_node_style, fillcolor="#{:06x}".format(graph_node_color), shape=graph_node_shape, label=graph_node_label) self.graph.add_node(graph_node) - return iter([node.left, node.right]) + yield node.left + yield node.right def visit_packed_node_out(self, node): graph_node_id = str(id(node)) graph_node = self.graph.get_node(graph_node_id)[0] for child in [node.left, node.right]: if child is not None: - child_graph_node_id = str(id(child)) + child_graph_node_id = str(id(child.token if isinstance(child, TokenNode) else child)) child_graph_node = self.graph.get_node(child_graph_node_id)[0] self.graph.add_edge(self.pydot.Edge(graph_node, child_graph_node)) else: diff --git a/conda_lock/_vendor/poetry/core/_vendor/lark/parsers/grammar_analysis.py b/conda_lock/_vendor/poetry/core/_vendor/lark/parsers/grammar_analysis.py index 94c32ccc..b52e50d5 100644 --- a/conda_lock/_vendor/poetry/core/_vendor/lark/parsers/grammar_analysis.py +++ b/conda_lock/_vendor/poetry/core/_vendor/lark/parsers/grammar_analysis.py @@ -1,14 +1,20 @@ +"Provides for superficial grammar analysis." + from collections import Counter, defaultdict +from typing import List, Dict, Iterator, FrozenSet, Set from ..utils import bfs, fzset, classify from ..exceptions import GrammarError -from ..grammar import Rule, Terminal, NonTerminal +from ..grammar import Rule, Terminal, NonTerminal, Symbol +from ..common import ParserConf -class RulePtr(object): +class RulePtr: __slots__ = ('rule', 'index') + rule: Rule + index: int - def __init__(self, rule, index): + def __init__(self, rule: Rule, index: int): assert isinstance(rule, Rule) assert index <= len(rule.expansion) self.rule = rule @@ -20,27 +26,37 @@ def __repr__(self): return '<%s : %s * %s>' % (self.rule.origin.name, ' '.join(before), ' '.join(after)) @property - def next(self): + def next(self) -> Symbol: return self.rule.expansion[self.index] - def advance(self, sym): + def advance(self, sym: Symbol) -> 'RulePtr': assert self.next == sym return RulePtr(self.rule, self.index+1) @property - def is_satisfied(self): + def is_satisfied(self) -> bool: return self.index == len(self.rule.expansion) - def __eq__(self, other): + def __eq__(self, other) -> bool: + if not isinstance(other, RulePtr): + return NotImplemented return self.rule == other.rule and self.index == other.index - def __hash__(self): + + def __hash__(self) -> int: return hash((self.rule, self.index)) +State = FrozenSet[RulePtr] + # state generation ensures no duplicate LR0ItemSets -class LR0ItemSet(object): +class LR0ItemSet: __slots__ = ('kernel', 'closure', 'transitions', 'lookaheads') + kernel: State + closure: State + transitions: Dict[Symbol, 'LR0ItemSet'] + lookaheads: Dict[Symbol, Set[Rule]] + def __init__(self, kernel, closure): self.kernel = fzset(kernel) self.closure = fzset(closure) @@ -121,15 +137,16 @@ def calculate_sets(rules): return FIRST, FOLLOW, NULLABLE -class GrammarAnalyzer(object): - def __init__(self, parser_conf, debug=False): +class GrammarAnalyzer: + def __init__(self, parser_conf: ParserConf, debug: bool=False, strict: bool=False): self.debug = debug + self.strict = strict root_rules = {start: Rule(NonTerminal('$root_' + start), [NonTerminal(start), Terminal('$END')]) for start in parser_conf.start} rules = parser_conf.rules + list(root_rules.values()) - self.rules_by_origin = classify(rules, lambda r: r.origin) + self.rules_by_origin: Dict[NonTerminal, List[Rule]] = classify(rules, lambda r: r.origin) if len(rules) != len(set(rules)): duplicates = [item for item, count in Counter(rules).items() if count > 1] @@ -138,7 +155,7 @@ def __init__(self, parser_conf, debug=False): for r in rules: for sym in r.expansion: if not (sym.is_term or sym in self.rules_by_origin): - raise GrammarError("Using an undefined rule: %s" % sym) # TODO test validation + raise GrammarError("Using an undefined rule: %s" % sym) self.start_states = {start: self.expand_rule(root_rule.origin) for start, root_rule in root_rules.items()} @@ -160,14 +177,14 @@ def __init__(self, parser_conf, debug=False): self.FIRST, self.FOLLOW, self.NULLABLE = calculate_sets(rules) - def expand_rule(self, source_rule, rules_by_origin=None): + def expand_rule(self, source_rule: NonTerminal, rules_by_origin=None) -> State: "Returns all init_ptrs accessible by rule (recursive)" if rules_by_origin is None: rules_by_origin = self.rules_by_origin init_ptrs = set() - def _expand_rule(rule): + def _expand_rule(rule: NonTerminal) -> Iterator[NonTerminal]: assert not rule.is_term, rule for r in rules_by_origin[rule]: @@ -177,6 +194,7 @@ def _expand_rule(rule): if r.expansion: # if not empty rule new_r = init_ptr.next if not new_r.is_term: + assert isinstance(new_r, NonTerminal) yield new_r for _ in bfs([source_rule], _expand_rule): diff --git a/conda_lock/_vendor/poetry/core/_vendor/lark/parsers/lalr_analysis.py b/conda_lock/_vendor/poetry/core/_vendor/lark/parsers/lalr_analysis.py index 8890c3cd..b7b3fdfc 100644 --- a/conda_lock/_vendor/poetry/core/_vendor/lark/parsers/lalr_analysis.py +++ b/conda_lock/_vendor/poetry/core/_vendor/lark/parsers/lalr_analysis.py @@ -6,14 +6,15 @@ # Author: Erez Shinan (2017) # Email : erezshin@gmail.com -import logging -from collections import defaultdict, deque +from typing import Dict, Set, Iterator, Tuple, List, TypeVar, Generic +from collections import defaultdict -from ..utils import classify, classify_bool, bfs, fzset, Serialize, Enumerator +from ..utils import classify, classify_bool, bfs, fzset, Enumerator, logger from ..exceptions import GrammarError -from .grammar_analysis import GrammarAnalyzer, Terminal, LR0ItemSet -from ..grammar import Rule +from .grammar_analysis import GrammarAnalyzer, Terminal, LR0ItemSet, RulePtr, State +from ..grammar import Rule, Symbol +from ..common import ParserConf ###{standalone @@ -28,8 +29,13 @@ def __repr__(self): Shift = Action('Shift') Reduce = Action('Reduce') +StateT = TypeVar("StateT") + +class ParseTableBase(Generic[StateT]): + states: Dict[StateT, Dict[str, Tuple]] + start_states: Dict[str, StateT] + end_states: Dict[str, StateT] -class ParseTable: def __init__(self, states, start_states, end_states): self.states = states self.start_states = start_states @@ -37,7 +43,6 @@ def __init__(self, states, start_states, end_states): def serialize(self, memo): tokens = Enumerator() - rules = Enumerator() states = { state: {tokens.get(token): ((1, arg.serialize(memo)) if action is Reduce else (0, arg)) @@ -62,13 +67,21 @@ def deserialize(cls, data, memo): } return cls(states, data['start_states'], data['end_states']) +class ParseTable(ParseTableBase['State']): + """Parse-table whose key is State, i.e. set[RulePtr] + + Slower than IntParseTable, but useful for debugging + """ + pass + -class IntParseTable(ParseTable): +class IntParseTable(ParseTableBase[int]): + """Parse-table whose key is int. Best for performance.""" @classmethod - def from_ParseTable(cls, parse_table): + def from_ParseTable(cls, parse_table: ParseTable): enum = list(parse_table.states) - state_to_idx = {s:i for i,s in enumerate(enum)} + state_to_idx: Dict['State', int] = {s:i for i,s in enumerate(enum)} int_states = {} for s, la in parse_table.states.items(): @@ -93,9 +106,7 @@ def from_ParseTable(cls, parse_table): def digraph(X, R, G): F = {} S = [] - N = {} - for x in X: - N[x] = 0 + N = dict.fromkeys(X, 0) for x in X: # this is always true for the first iteration, but N[x] may be updated in traverse below if N[x] == 0: @@ -135,8 +146,16 @@ def traverse(x, S, N, X, R, G, F): class LALR_Analyzer(GrammarAnalyzer): - def __init__(self, parser_conf, debug=False): - GrammarAnalyzer.__init__(self, parser_conf, debug) + lr0_itemsets: Set[LR0ItemSet] + nonterminal_transitions: List[Tuple[LR0ItemSet, Symbol]] + lookback: Dict[Tuple[LR0ItemSet, Symbol], Set[Tuple[LR0ItemSet, Rule]]] + includes: Dict[Tuple[LR0ItemSet, Symbol], Set[Tuple[LR0ItemSet, Symbol]]] + reads: Dict[Tuple[LR0ItemSet, Symbol], Set[Tuple[LR0ItemSet, Symbol]]] + directly_reads: Dict[Tuple[LR0ItemSet, Symbol], Set[Symbol]] + + + def __init__(self, parser_conf: ParserConf, debug: bool=False, strict: bool=False): + GrammarAnalyzer.__init__(self, parser_conf, debug, strict) self.nonterminal_transitions = [] self.directly_reads = defaultdict(set) self.reads = defaultdict(set) @@ -144,12 +163,12 @@ def __init__(self, parser_conf, debug=False): self.lookback = defaultdict(set) - def compute_lr0_states(self): - self.lr0_states = set() + def compute_lr0_states(self) -> None: + self.lr0_itemsets = set() # map of kernels to LR0ItemSets - cache = {} + cache: Dict['State', LR0ItemSet] = {} - def step(state): + def step(state: LR0ItemSet) -> Iterator[LR0ItemSet]: _, unsat = classify_bool(state.closure, lambda rp: rp.is_satisfied) d = classify(unsat, lambda rp: rp.next) @@ -167,7 +186,7 @@ def step(state): state.transitions[sym] = new_state yield new_state - self.lr0_states.add(state) + self.lr0_itemsets.add(state) for _ in bfs(self.lr0_start_states.values(), step): pass @@ -180,7 +199,7 @@ def compute_reads_relations(self): assert(rp.index == 0) self.directly_reads[(root, rp.next)] = set([ Terminal('$END') ]) - for state in self.lr0_states: + for state in self.lr0_itemsets: seen = set() for rp in state.closure: if rp.is_satisfied: @@ -224,7 +243,7 @@ def compute_includes_lookback(self): if nt2 not in self.reads: continue for j in range(i + 1, len(rp.rule.expansion)): - if not rp.rule.expansion[j] in self.NULLABLE: + if rp.rule.expansion[j] not in self.NULLABLE: break else: includes.append(nt2) @@ -245,35 +264,60 @@ def compute_lookaheads(self): for s in follow_sets[nt]: state.lookaheads[s].add(rule) - def compute_lalr1_states(self): - m = {} - for state in self.lr0_states: - actions = {} - for la, next_state in state.transitions.items(): - actions[la] = (Shift, next_state.closure) - for la, rules in state.lookaheads.items(): + def compute_lalr1_states(self) -> None: + m: Dict[LR0ItemSet, Dict[str, Tuple]] = {} + reduce_reduce = [] + for itemset in self.lr0_itemsets: + actions: Dict[Symbol, Tuple] = {la: (Shift, next_state.closure) + for la, next_state in itemset.transitions.items()} + for la, rules in itemset.lookaheads.items(): if len(rules) > 1: - raise GrammarError('Reduce/Reduce collision in %s between the following rules: %s' % (la, ''.join([ '\n\t\t- ' + str(r) for r in rules ]))) + # Try to resolve conflict based on priority + p = [(r.options.priority or 0, r) for r in rules] + p.sort(key=lambda r: r[0], reverse=True) + best, second_best = p[:2] + if best[0] > second_best[0]: + rules = {best[1]} + else: + reduce_reduce.append((itemset, la, rules)) + continue + + rule ,= rules if la in actions: - if self.debug: - logging.warning('Shift/Reduce conflict for terminal %s: (resolving as shift)', la.name) - logging.warning(' * %s', list(rules)[0]) + if self.strict: + raise GrammarError(f"Shift/Reduce conflict for terminal {la.name}. [strict-mode]\n ") + elif self.debug: + logger.warning('Shift/Reduce conflict for terminal %s: (resolving as shift)', la.name) + logger.warning(' * %s', rule) + else: + logger.debug('Shift/Reduce conflict for terminal %s: (resolving as shift)', la.name) + logger.debug(' * %s', rule) else: - actions[la] = (Reduce, list(rules)[0]) - m[state] = { k.name: v for k, v in actions.items() } + actions[la] = (Reduce, rule) + m[itemset] = { k.name: v for k, v in actions.items() } + + if reduce_reduce: + msgs = [] + for itemset, la, rules in reduce_reduce: + msg = 'Reduce/Reduce collision in %s between the following rules: %s' % (la, ''.join([ '\n\t- ' + str(r) for r in rules ])) + if self.debug: + msg += '\n collision occurred in state: {%s\n }' % ''.join(['\n\t' + str(x) for x in itemset.closure]) + msgs.append(msg) + raise GrammarError('\n\n'.join(msgs)) states = { k.closure: v for k, v in m.items() } # compute end states - end_states = {} + end_states: Dict[str, 'State'] = {} for state in states: for rp in state: for start in self.lr0_start_states: if rp.rule.origin.name == ('$root_' + start) and rp.is_satisfied: - assert(not start in end_states) + assert start not in end_states end_states[start] = state - _parse_table = ParseTable(states, { start: state.closure for start, state in self.lr0_start_states.items() }, end_states) + start_states = { start: state.closure for start, state in self.lr0_start_states.items() } + _parse_table = ParseTable(states, start_states, end_states) if self.debug: self.parse_table = _parse_table @@ -285,4 +329,4 @@ def compute_lalr(self): self.compute_reads_relations() self.compute_includes_lookback() self.compute_lookaheads() - self.compute_lalr1_states() \ No newline at end of file + self.compute_lalr1_states() diff --git a/conda_lock/_vendor/poetry/core/_vendor/lark/parsers/lalr_interactive_parser.py b/conda_lock/_vendor/poetry/core/_vendor/lark/parsers/lalr_interactive_parser.py new file mode 100644 index 00000000..d5a2152f --- /dev/null +++ b/conda_lock/_vendor/poetry/core/_vendor/lark/parsers/lalr_interactive_parser.py @@ -0,0 +1,157 @@ +# This module provides a LALR interactive parser, which is used for debugging and error handling + +from typing import Iterator, List +from copy import copy +import warnings + +from lark.exceptions import UnexpectedToken +from lark.lexer import Token, LexerThread + +###{standalone + +class InteractiveParser: + """InteractiveParser gives you advanced control over parsing and error handling when parsing with LALR. + + For a simpler interface, see the ``on_error`` argument to ``Lark.parse()``. + """ + def __init__(self, parser, parser_state, lexer_thread: LexerThread): + self.parser = parser + self.parser_state = parser_state + self.lexer_thread = lexer_thread + self.result = None + + @property + def lexer_state(self) -> LexerThread: + warnings.warn("lexer_state will be removed in subsequent releases. Use lexer_thread instead.", DeprecationWarning) + return self.lexer_thread + + def feed_token(self, token: Token): + """Feed the parser with a token, and advance it to the next state, as if it received it from the lexer. + + Note that ``token`` has to be an instance of ``Token``. + """ + return self.parser_state.feed_token(token, token.type == '$END') + + def iter_parse(self) -> Iterator[Token]: + """Step through the different stages of the parse, by reading tokens from the lexer + and feeding them to the parser, one per iteration. + + Returns an iterator of the tokens it encounters. + + When the parse is over, the resulting tree can be found in ``InteractiveParser.result``. + """ + for token in self.lexer_thread.lex(self.parser_state): + yield token + self.result = self.feed_token(token) + + def exhaust_lexer(self) -> List[Token]: + """Try to feed the rest of the lexer state into the interactive parser. + + Note that this modifies the instance in place and does not feed an '$END' Token + """ + return list(self.iter_parse()) + + + def feed_eof(self, last_token=None): + """Feed a '$END' Token. Borrows from 'last_token' if given.""" + eof = Token.new_borrow_pos('$END', '', last_token) if last_token is not None else self.lexer_thread._Token('$END', '', 0, 1, 1) + return self.feed_token(eof) + + + def __copy__(self): + """Create a new interactive parser with a separate state. + + Calls to feed_token() won't affect the old instance, and vice-versa. + """ + return type(self)( + self.parser, + copy(self.parser_state), + copy(self.lexer_thread), + ) + + def copy(self): + return copy(self) + + def __eq__(self, other): + if not isinstance(other, InteractiveParser): + return False + + return self.parser_state == other.parser_state and self.lexer_thread == other.lexer_thread + + def as_immutable(self): + """Convert to an ``ImmutableInteractiveParser``.""" + p = copy(self) + return ImmutableInteractiveParser(p.parser, p.parser_state, p.lexer_thread) + + def pretty(self): + """Print the output of ``choices()`` in a way that's easier to read.""" + out = ["Parser choices:"] + for k, v in self.choices().items(): + out.append('\t- %s -> %r' % (k, v)) + out.append('stack size: %s' % len(self.parser_state.state_stack)) + return '\n'.join(out) + + def choices(self): + """Returns a dictionary of token types, matched to their action in the parser. + + Only returns token types that are accepted by the current state. + + Updated by ``feed_token()``. + """ + return self.parser_state.parse_conf.parse_table.states[self.parser_state.position] + + def accepts(self): + """Returns the set of possible tokens that will advance the parser into a new valid state.""" + accepts = set() + conf_no_callbacks = copy(self.parser_state.parse_conf) + # We don't want to call callbacks here since those might have arbitrary side effects + # and are unnecessarily slow. + conf_no_callbacks.callbacks = {} + for t in self.choices(): + if t.isupper(): # is terminal? + new_cursor = copy(self) + new_cursor.parser_state.parse_conf = conf_no_callbacks + try: + new_cursor.feed_token(self.lexer_thread._Token(t, '')) + except UnexpectedToken: + pass + else: + accepts.add(t) + return accepts + + def resume_parse(self): + """Resume automated parsing from the current state. + """ + return self.parser.parse_from_state(self.parser_state, last_token=self.lexer_thread.state.last_token) + + + +class ImmutableInteractiveParser(InteractiveParser): + """Same as ``InteractiveParser``, but operations create a new instance instead + of changing it in-place. + """ + + result = None + + def __hash__(self): + return hash((self.parser_state, self.lexer_thread)) + + def feed_token(self, token): + c = copy(self) + c.result = InteractiveParser.feed_token(c, token) + return c + + def exhaust_lexer(self): + """Try to feed the rest of the lexer state into the parser. + + Note that this returns a new ImmutableInteractiveParser and does not feed an '$END' Token""" + cursor = self.as_mutable() + cursor.exhaust_lexer() + return cursor.as_immutable() + + def as_mutable(self): + """Convert to an ``InteractiveParser``.""" + p = copy(self) + return InteractiveParser(p.parser, p.parser_state, p.lexer_thread) + +###} diff --git a/conda_lock/_vendor/poetry/core/_vendor/lark/parsers/lalr_parser.py b/conda_lock/_vendor/poetry/core/_vendor/lark/parsers/lalr_parser.py index f26cbc5b..6ae2a04f 100644 --- a/conda_lock/_vendor/poetry/core/_vendor/lark/parsers/lalr_parser.py +++ b/conda_lock/_vendor/poetry/core/_vendor/lark/parsers/lalr_parser.py @@ -2,19 +2,21 @@ """ # Author: Erez Shinan (2017) # Email : erezshin@gmail.com -from ..exceptions import UnexpectedToken -from ..lexer import Token -from ..utils import Enumerator, Serialize +from typing import Dict, Any, Optional +from ..lexer import Token, LexerThread +from ..utils import Serialize +from ..common import ParserConf, ParserCallbacks -from .lalr_analysis import LALR_Analyzer, Shift, Reduce, IntParseTable -from .lalr_puppet import ParserPuppet +from .lalr_analysis import LALR_Analyzer, IntParseTable, ParseTableBase +from .lalr_interactive_parser import InteractiveParser +from lark.exceptions import UnexpectedCharacters, UnexpectedInput, UnexpectedToken +from .lalr_parser_state import ParserState, ParseConf ###{standalone -class LALR_Parser(object): - def __init__(self, parser_conf, debug=False): - assert all(r.options.priority is None for r in parser_conf.rules), "LALR doesn't yet support prioritization" - analysis = LALR_Analyzer(parser_conf, debug=debug) +class LALR_Parser(Serialize): + def __init__(self, parser_conf: ParserConf, debug: bool=False, strict: bool=False): + analysis = LALR_Analyzer(parser_conf, debug=debug, strict=strict) analysis.compute_lalr() callbacks = parser_conf.callbacks @@ -23,97 +25,98 @@ def __init__(self, parser_conf, debug=False): self.parser = _Parser(analysis.parse_table, callbacks, debug) @classmethod - def deserialize(cls, data, memo, callbacks): + def deserialize(cls, data, memo, callbacks, debug=False): inst = cls.__new__(cls) inst._parse_table = IntParseTable.deserialize(data, memo) - inst.parser = _Parser(inst._parse_table, callbacks) + inst.parser = _Parser(inst._parse_table, callbacks, debug) return inst - def serialize(self, memo): + def serialize(self, memo: Any = None) -> Dict[str, Any]: return self._parse_table.serialize(memo) - def parse(self, *args): - return self.parser.parse(*args) + def parse_interactive(self, lexer: LexerThread, start: str): + return self.parser.parse(lexer, start, start_interactive=True) + + def parse(self, lexer, start, on_error=None): + try: + return self.parser.parse(lexer, start) + except UnexpectedInput as e: + if on_error is None: + raise + + while True: + if isinstance(e, UnexpectedCharacters): + s = e.interactive_parser.lexer_thread.state + p = s.line_ctr.char_pos + + if not on_error(e): + raise e + + if isinstance(e, UnexpectedCharacters): + # If user didn't change the character position, then we should + if p == s.line_ctr.char_pos: + s.line_ctr.feed(s.text[p:p+1]) + + try: + return e.interactive_parser.resume_parse() + except UnexpectedToken as e2: + if (isinstance(e, UnexpectedToken) + and e.token.type == e2.token.type == '$END' + and e.interactive_parser == e2.interactive_parser): + # Prevent infinite loop + raise e2 + e = e2 + except UnexpectedCharacters as e2: + e = e2 class _Parser: - def __init__(self, parse_table, callbacks, debug=False): + parse_table: ParseTableBase + callbacks: ParserCallbacks + debug: bool + + def __init__(self, parse_table: ParseTableBase, callbacks: ParserCallbacks, debug: bool=False): self.parse_table = parse_table self.callbacks = callbacks self.debug = debug - def parse(self, seq, start, set_state=None, value_stack=None, state_stack=None): - token = None - stream = iter(seq) - states = self.parse_table.states - start_state = self.parse_table.start_states[start] - end_state = self.parse_table.end_states[start] + def parse(self, lexer: LexerThread, start: str, value_stack=None, state_stack=None, start_interactive=False): + parse_conf = ParseConf(self.parse_table, self.callbacks, start) + parser_state = ParserState(parse_conf, lexer, state_stack, value_stack) + if start_interactive: + return InteractiveParser(self, parser_state, parser_state.lexer) + return self.parse_from_state(parser_state) - state_stack = state_stack or [start_state] - value_stack = value_stack or [] - if set_state: set_state(start_state) + def parse_from_state(self, state: ParserState, last_token: Optional[Token]=None): + """Run the main LALR parser loop - def get_action(token): - state = state_stack[-1] - try: - return states[state][token.type] - except KeyError: - expected = [s for s in states[state].keys() if s.isupper()] - try: - puppet = ParserPuppet(self, state_stack, value_stack, start, stream, set_state) - except NameError: - puppet = None - raise UnexpectedToken(token, expected, state=state, puppet=puppet) - - def reduce(rule): - size = len(rule.expansion) - if size: - s = value_stack[-size:] - del state_stack[-size:] - del value_stack[-size:] - else: - s = [] - - value = self.callbacks[rule](s) - - _action, new_state = states[state_stack[-1]][rule.origin.name] - assert _action is Shift - state_stack.append(new_state) - value_stack.append(value) - - # Main LALR-parser loop + Parameters: + state - the initial state. Changed in-place. + last_token - Used only for line information in case of an empty lexer. + """ try: - for token in stream: - while True: - action, arg = get_action(token) - assert arg != end_state - - if action is Shift: - state_stack.append(arg) - value_stack.append(token) - if set_state: set_state(arg) - break # next token - else: - reduce(arg) + token = last_token + for token in state.lexer.lex(state): + assert token is not None + state.feed_token(token) + + end_token = Token.new_borrow_pos('$END', '', token) if token else Token('$END', '', 0, 1, 1) + return state.feed_token(end_token, True) + except UnexpectedInput as e: + try: + e.interactive_parser = InteractiveParser(self, state, state.lexer) + except NameError: + pass + raise e except Exception as e: if self.debug: print("") print("STATE STACK DUMP") print("----------------") - for i, s in enumerate(state_stack): + for i, s in enumerate(state.state_stack): print('%d)' % i , s) print("") raise - - token = Token.new_borrow_pos('$END', '', token) if token else Token('$END', '', 0, 1, 1) - while True: - _action, arg = get_action(token) - assert(_action is Reduce) - reduce(arg) - if state_stack[-1] == end_state: - return value_stack[-1] - ###} - diff --git a/conda_lock/_vendor/poetry/core/_vendor/lark/parsers/lalr_parser_state.py b/conda_lock/_vendor/poetry/core/_vendor/lark/parsers/lalr_parser_state.py new file mode 100644 index 00000000..35056976 --- /dev/null +++ b/conda_lock/_vendor/poetry/core/_vendor/lark/parsers/lalr_parser_state.py @@ -0,0 +1,110 @@ +from copy import deepcopy, copy +from typing import Dict, Any, Generic, List +from ..lexer import Token, LexerThread +from ..common import ParserCallbacks + +from .lalr_analysis import Shift, ParseTableBase, StateT +from lark.exceptions import UnexpectedToken + +###{standalone + +class ParseConf(Generic[StateT]): + __slots__ = 'parse_table', 'callbacks', 'start', 'start_state', 'end_state', 'states' + + parse_table: ParseTableBase[StateT] + callbacks: ParserCallbacks + start: str + + start_state: StateT + end_state: StateT + states: Dict[StateT, Dict[str, tuple]] + + def __init__(self, parse_table: ParseTableBase[StateT], callbacks: ParserCallbacks, start: str): + self.parse_table = parse_table + + self.start_state = self.parse_table.start_states[start] + self.end_state = self.parse_table.end_states[start] + self.states = self.parse_table.states + + self.callbacks = callbacks + self.start = start + +class ParserState(Generic[StateT]): + __slots__ = 'parse_conf', 'lexer', 'state_stack', 'value_stack' + + parse_conf: ParseConf[StateT] + lexer: LexerThread + state_stack: List[StateT] + value_stack: list + + def __init__(self, parse_conf: ParseConf[StateT], lexer: LexerThread, state_stack=None, value_stack=None): + self.parse_conf = parse_conf + self.lexer = lexer + self.state_stack = state_stack or [self.parse_conf.start_state] + self.value_stack = value_stack or [] + + @property + def position(self) -> StateT: + return self.state_stack[-1] + + # Necessary for match_examples() to work + def __eq__(self, other) -> bool: + if not isinstance(other, ParserState): + return NotImplemented + return len(self.state_stack) == len(other.state_stack) and self.position == other.position + + def __copy__(self): + return type(self)( + self.parse_conf, + self.lexer, # XXX copy + copy(self.state_stack), + deepcopy(self.value_stack), + ) + + def copy(self) -> 'ParserState[StateT]': + return copy(self) + + def feed_token(self, token: Token, is_end=False) -> Any: + state_stack = self.state_stack + value_stack = self.value_stack + states = self.parse_conf.states + end_state = self.parse_conf.end_state + callbacks = self.parse_conf.callbacks + + while True: + state = state_stack[-1] + try: + action, arg = states[state][token.type] + except KeyError: + expected = {s for s in states[state].keys() if s.isupper()} + raise UnexpectedToken(token, expected, state=self, interactive_parser=None) + + assert arg != end_state + + if action is Shift: + # shift once and return + assert not is_end + state_stack.append(arg) + value_stack.append(token if token.type not in callbacks else callbacks[token.type](token)) + return + else: + # reduce+shift as many times as necessary + rule = arg + size = len(rule.expansion) + if size: + s = value_stack[-size:] + del state_stack[-size:] + del value_stack[-size:] + else: + s = [] + + value = callbacks[rule](s) if callbacks else s + + _action, new_state = states[state_stack[-1]][rule.origin.name] + assert _action is Shift + state_stack.append(new_state) + value_stack.append(value) + + if is_end and state_stack[-1] == end_state: + return value_stack[-1] +###} diff --git a/conda_lock/_vendor/poetry/core/_vendor/lark/parsers/lalr_puppet.py b/conda_lock/_vendor/poetry/core/_vendor/lark/parsers/lalr_puppet.py deleted file mode 100644 index 968783cc..00000000 --- a/conda_lock/_vendor/poetry/core/_vendor/lark/parsers/lalr_puppet.py +++ /dev/null @@ -1,79 +0,0 @@ -# This module provide a LALR puppet, which is used to debugging and error handling - -from copy import deepcopy - -from .lalr_analysis import Shift, Reduce - -class ParserPuppet: - def __init__(self, parser, state_stack, value_stack, start, stream, set_state): - self.parser = parser - self._state_stack = state_stack - self._value_stack = value_stack - self._start = start - self._stream = stream - self._set_state = set_state - - self.result = None - - def feed_token(self, token): - """Advance the parser state, as if it just recieved `token` from the lexer - - """ - end_state = self.parser.parse_table.end_states[self._start] - state_stack = self._state_stack - value_stack = self._value_stack - - state = state_stack[-1] - action, arg = self.parser.parse_table.states[state][token.type] - assert arg != end_state - - while action is Reduce: - rule = arg - size = len(rule.expansion) - if size: - s = value_stack[-size:] - del state_stack[-size:] - del value_stack[-size:] - else: - s = [] - - value = self.parser.callbacks[rule](s) - - _action, new_state = self.parser.parse_table.states[state_stack[-1]][rule.origin.name] - assert _action is Shift - state_stack.append(new_state) - value_stack.append(value) - - if state_stack[-1] == end_state: - self.result = value_stack[-1] - return self.result - - state = state_stack[-1] - action, arg = self.parser.parse_table.states[state][token.type] - assert arg != end_state - - assert action is Shift - state_stack.append(arg) - value_stack.append(token) - - def copy(self): - return type(self)( - self.parser, - list(self._state_stack), - deepcopy(self._value_stack), - self._start, - self._stream, - self._set_state, - ) - - def pretty(): - print("Puppet choices:") - for k, v in self.choices.items(): - print('\t-', k, '->', v) - print('stack size:', len(self._state_stack)) - - def choices(self): - return self.parser.parse_table.states[self._state_stack[-1]] - - def resume_parse(self): - return self.parser.parse(self._stream, self._start, self._set_state, self._value_stack, self._state_stack) diff --git a/conda_lock/_vendor/poetry/core/_vendor/lark/parsers/xearley.py b/conda_lock/_vendor/poetry/core/_vendor/lark/parsers/xearley.py index 855625a9..d9748dfe 100644 --- a/conda_lock/_vendor/poetry/core/_vendor/lark/parsers/xearley.py +++ b/conda_lock/_vendor/poetry/core/_vendor/lark/parsers/xearley.py @@ -1,4 +1,4 @@ -"""This module implements an experimental Earley parser with a dynamic lexer +"""This module implements an Earley parser with a dynamic lexer The core Earley algorithm used here is based on Elizabeth Scott's implementation, here: https://www.sciencedirect.com/science/article/pii/S1571066108001497 @@ -14,19 +14,26 @@ Earley's power in parsing any CFG. """ +from typing import TYPE_CHECKING, Callable, Optional, List, Any from collections import defaultdict +from ..tree import Tree from ..exceptions import UnexpectedCharacters from ..lexer import Token from ..grammar import Terminal from .earley import Parser as BaseParser -from .earley_forest import SymbolNode +from .earley_forest import TokenNode +if TYPE_CHECKING: + from ..common import LexerConf, ParserConf class Parser(BaseParser): - def __init__(self, parser_conf, term_matcher, resolve_ambiguity=True, ignore = (), complete_lex = False, debug=False): - BaseParser.__init__(self, parser_conf, term_matcher, resolve_ambiguity, debug) - self.ignore = [Terminal(t) for t in ignore] + def __init__(self, lexer_conf: 'LexerConf', parser_conf: 'ParserConf', term_matcher: Callable, + resolve_ambiguity: bool=True, complete_lex: bool=False, debug: bool=False, + tree_class: Optional[Callable[[str, List], Any]]=Tree, ordered_sets: bool=True): + BaseParser.__init__(self, lexer_conf, parser_conf, term_matcher, resolve_ambiguity, + debug, tree_class, ordered_sets) + self.ignore = [Terminal(t) for t in lexer_conf.ignore] self.complete_lex = complete_lex def _parse(self, stream, columns, to_scan, start_symbol=None): @@ -48,7 +55,7 @@ def scan(i, to_scan): # they complete, we push all tokens into a buffer (delayed_matches), to # be held possibly for a later parse step when we reach the point in the # input stream at which they complete. - for item in set(to_scan): + for item in self.Set(to_scan): m = match(item.expect, stream, i) if m: t = Token(item.expect.name, m.group(0), i, text_line, text_column) @@ -62,9 +69,10 @@ def scan(i, to_scan): t = Token(item.expect.name, m.group(0), i, text_line, text_column) delayed_matches[i+m.end()].append( (item, i, t) ) - # Remove any items that successfully matched in this pass from the to_scan buffer. - # This ensures we don't carry over tokens that already matched, if we're ignoring below. - to_scan.remove(item) + # XXX The following 3 lines were commented out for causing a bug. See issue #768 + # # Remove any items that successfully matched in this pass from the to_scan buffer. + # # This ensures we don't carry over tokens that already matched, if we're ignoring below. + # to_scan.remove(item) # 3) Process any ignores. This is typically used for e.g. whitespace. # We carry over any unmatched items from the to_scan buffer to be matched again after @@ -79,8 +87,8 @@ def scan(i, to_scan): # If we're ignoring up to the end of the file, # carry over the start symbol if it already completed. delayed_matches[m.end()].extend([(item, i, None) for item in columns[i] if item.is_complete and item.s == start_symbol]) - next_to_scan = set() - next_set = set() + next_to_scan = self.Set() + next_set = self.Set() columns.append(next_set) transitives.append({}) @@ -97,8 +105,9 @@ def scan(i, to_scan): new_item = item.advance() label = (new_item.s, new_item.start, i) - new_item.node = node_cache[label] if label in node_cache else node_cache.setdefault(label, SymbolNode(*label)) - new_item.node.add_family(new_item.s, item.rule, new_item.start, item.node, token) + token_node = TokenNode(token, terminals[token.type]) + new_item.node = node_cache[label] if label in node_cache else node_cache.setdefault(label, self.SymbolNode(*label)) + new_item.node.add_family(new_item.s, item.rule, new_item.start, item.node, token_node) else: new_item = item @@ -112,13 +121,18 @@ def scan(i, to_scan): del delayed_matches[i+1] # No longer needed, so unburden memory if not next_set and not delayed_matches and not next_to_scan: - raise UnexpectedCharacters(stream, i, text_line, text_column, {item.expect.name for item in to_scan}, set(to_scan)) + considered_rules = list(sorted(to_scan, key=lambda key: key.rule.origin.name)) + raise UnexpectedCharacters(stream, i, text_line, text_column, {item.expect.name for item in to_scan}, + set(to_scan), state=frozenset(i.s for i in to_scan), + considered_rules=considered_rules + ) return next_to_scan delayed_matches = defaultdict(list) match = self.term_matcher + terminals = self.lexer_conf.terminals_by_name # Cache for nodes & tokens created in a particular parse step. transitives = [{}] @@ -148,4 +162,4 @@ def scan(i, to_scan): ## Column is now the final column in the parse. assert i == len(columns)-1 - return to_scan \ No newline at end of file + return to_scan diff --git a/conda_lock/_vendor/poetry/core/_vendor/lark/py.typed b/conda_lock/_vendor/poetry/core/_vendor/lark/py.typed new file mode 100644 index 00000000..e69de29b diff --git a/conda_lock/_vendor/poetry/core/_vendor/lark/reconstruct.py b/conda_lock/_vendor/poetry/core/_vendor/lark/reconstruct.py index 1e3adc77..2d8423ae 100644 --- a/conda_lock/_vendor/poetry/core/_vendor/lark/reconstruct.py +++ b/conda_lock/_vendor/poetry/core/_vendor/lark/reconstruct.py @@ -1,16 +1,16 @@ -from collections import defaultdict - -from .tree import Tree -from .visitors import Transformer_InPlace -from .common import ParserConf -from .lexer import Token, PatternStr -from .parsers import earley -from .grammar import Rule, Terminal, NonTerminal +"""This is an experimental tool for reconstructing text from a shaped tree, based on a Lark grammar. +""" +from typing import Dict, Callable, Iterable, Optional +from .lark import Lark +from .tree import Tree, ParseTree +from .visitors import Transformer_InPlace +from .lexer import Token, PatternStr, TerminalDef +from .grammar import Terminal, NonTerminal, Symbol -def is_discarded_terminal(t): - return t.is_term and t.filter_out +from .tree_matcher import TreeMatcher, is_discarded_terminal +from .utils import is_id_continue def is_iter_empty(i): try: @@ -23,7 +23,10 @@ def is_iter_empty(i): class WriteTokensTransformer(Transformer_InPlace): "Inserts discarded tokens into their correct place, according to the rules of grammar" - def __init__(self, tokens, term_subs): + tokens: Dict[str, TerminalDef] + term_subs: Dict[str, Callable[[Symbol], str]] + + def __init__(self, tokens: Dict[str, TerminalDef], term_subs: Dict[str, Callable[[Symbol], str]]) -> None: self.tokens = tokens self.term_subs = term_subs @@ -59,105 +62,45 @@ def __default__(self, data, children, meta): return to_write -class MatchTree(Tree): - pass - -class MakeMatchTree: - def __init__(self, name, expansion): - self.name = name - self.expansion = expansion - - def __call__(self, args): - t = MatchTree(self.name, args) - t.meta.match_tree = True - t.meta.orig_expansion = self.expansion - return t - -def best_from_group(seq, group_key, cmp_key): - d = {} - for item in seq: - key = group_key(item) - if key in d: - v1 = cmp_key(item) - v2 = cmp_key(d[key]) - if v2 > v1: - d[key] = item - else: - d[key] = item - return list(d.values()) - -class Reconstructor: - def __init__(self, parser, term_subs={}): - # XXX TODO calling compile twice returns different results! - assert parser.options.maybe_placeholders == False - tokens, rules, _grammar_extra = parser.grammar.compile(parser.options.start) - - self.write_tokens = WriteTokensTransformer({t.name:t for t in tokens}, term_subs) - self.rules = list(self._build_recons_rules(rules)) - self.rules.reverse() - - # Choose the best rule from each group of {rule => [rule.alias]}, since we only really need one derivation. - self.rules = best_from_group(self.rules, lambda r: r, lambda r: -len(r.expansion)) - - self.rules.sort(key=lambda r: len(r.expansion)) - callbacks = {rule: rule.alias for rule in self.rules} # TODO pass callbacks through dict, instead of alias? - self.parser = earley.Parser(ParserConf(self.rules, callbacks, parser.options.start), - self._match, resolve_ambiguity=True) - - def _build_recons_rules(self, rules): - expand1s = {r.origin for r in rules if r.options.expand1} - - aliases = defaultdict(list) - for r in rules: - if r.alias: - aliases[r.origin].append( r.alias ) - - rule_names = {r.origin for r in rules} - nonterminals = {sym for sym in rule_names - if sym.name.startswith('_') or sym in expand1s or sym in aliases } - - for r in rules: - recons_exp = [sym if sym in nonterminals else Terminal(sym.name) - for sym in r.expansion if not is_discarded_terminal(sym)] - - # Skip self-recursive constructs - if recons_exp == [r.origin]: - continue - - sym = NonTerminal(r.alias) if r.alias else r.origin - - yield Rule(sym, recons_exp, alias=MakeMatchTree(sym.name, r.expansion)) - - for origin, rule_aliases in aliases.items(): - for alias in rule_aliases: - yield Rule(origin, [Terminal(alias)], alias=MakeMatchTree(origin.name, [NonTerminal(alias)])) - yield Rule(origin, [Terminal(origin.name)], alias=MakeMatchTree(origin.name, [origin])) - - def _match(self, term, token): - if isinstance(token, Tree): - return Terminal(token.data) == term - elif isinstance(token, Token): - return term == Terminal(token.type) - assert False +class Reconstructor(TreeMatcher): + """ + A Reconstructor that will, given a full parse Tree, generate source code. + + Note: + The reconstructor cannot generate values from regexps. If you need to produce discarded + regexes, such as newlines, use `term_subs` and provide default values for them. + + Parameters: + parser: a Lark instance + term_subs: a dictionary of [Terminal name as str] to [output text as str] + """ + + write_tokens: WriteTokensTransformer + + def __init__(self, parser: Lark, term_subs: Optional[Dict[str, Callable[[Symbol], str]]]=None) -> None: + TreeMatcher.__init__(self, parser) + + self.write_tokens = WriteTokensTransformer({t.name:t for t in self.tokens}, term_subs or {}) def _reconstruct(self, tree): - # TODO: ambiguity? - unreduced_tree = self.parser.parse(tree.children, tree.data) # find a full derivation - assert unreduced_tree.data == tree.data + unreduced_tree = self.match_tree(tree, tree.data) + res = self.write_tokens.transform(unreduced_tree) for item in res: if isinstance(item, Tree): - for x in self._reconstruct(item): - yield x + # TODO use orig_expansion.rulename to support templates + yield from self._reconstruct(item) else: yield item - def reconstruct(self, tree): + def reconstruct(self, tree: ParseTree, postproc: Optional[Callable[[Iterable[str]], Iterable[str]]]=None, insert_spaces: bool=True) -> str: x = self._reconstruct(tree) + if postproc: + x = postproc(x) y = [] prev_item = '' for item in x: - if prev_item and item and prev_item[-1].isalnum() and item[0].isalnum(): + if insert_spaces and prev_item and item and is_id_continue(prev_item[-1]) and is_id_continue(item[0]): y.append(' ') y.append(item) prev_item = item diff --git a/conda_lock/_vendor/poetry/core/_vendor/lark/reconstruct2.py b/conda_lock/_vendor/poetry/core/_vendor/lark/reconstruct2.py deleted file mode 100644 index c7300a06..00000000 --- a/conda_lock/_vendor/poetry/core/_vendor/lark/reconstruct2.py +++ /dev/null @@ -1,155 +0,0 @@ -from collections import defaultdict - -from .tree import Tree -from .visitors import Transformer_InPlace -from .common import ParserConf -from .lexer import Token, PatternStr -from .parsers import earley -from .grammar import Rule, Terminal, NonTerminal - - - -def is_discarded_terminal(t): - return t.is_term and t.filter_out - -def is_iter_empty(i): - try: - _ = next(i) - return False - except StopIteration: - return True - -class WriteTokensTransformer(Transformer_InPlace): - def __init__(self, tokens): - self.tokens = tokens - - def __default__(self, data, children, meta): - # if not isinstance(t, MatchTree): - # return t - if not getattr(meta, 'match_tree', False): - return Tree(data, children) - - iter_args = iter(children) - print('@@@', children, meta.orig_expansion) - to_write = [] - for sym in meta.orig_expansion: - if is_discarded_terminal(sym): - t = self.tokens[sym.name] - value = t.pattern.value - if not isinstance(t.pattern, PatternStr): - if t.name == "_NEWLINE": - value = "\n" - else: - raise NotImplementedError("Reconstructing regexps not supported yet: %s" % t) - to_write.append(value) - else: - x = next(iter_args) - if isinstance(x, list): - to_write += x - else: - if isinstance(x, Token): - assert Terminal(x.type) == sym, x - else: - assert NonTerminal(x.data) == sym, (sym, x) - to_write.append(x) - - assert is_iter_empty(iter_args) - return to_write - - -class MatchTree(Tree): - pass - -class MakeMatchTree: - def __init__(self, name, expansion): - self.name = name - self.expansion = expansion - - def __call__(self, args): - t = MatchTree(self.name, args) - t.meta.match_tree = True - t.meta.orig_expansion = self.expansion - return t - -from lark.load_grammar import SimplifyRule_Visitor, RuleTreeToText -class Reconstructor: - def __init__(self, parser): - # XXX TODO calling compile twice returns different results! - assert parser.options.maybe_placeholders == False - tokens, rules, _grammar_extra = parser.grammar.compile(parser.options.start) - - self.write_tokens = WriteTokensTransformer({t.name:t for t in tokens}) - self.rules = list(set(list(self._build_recons_rules(rules)))) - callbacks = {rule: rule.alias for rule in self.rules} # TODO pass callbacks through dict, instead of alias? - for r in self.rules: - print("*", r) - self.parser = earley.Parser(ParserConf(self.rules, callbacks, parser.options.start), - self._match, resolve_ambiguity=True) - - def _build_recons_rules(self, rules): - expand1s = {r.origin for r in rules if r.options.expand1} - - aliases = defaultdict(list) - for r in rules: - if r.alias: - aliases[r.origin].append( r.alias ) - - rule_names = {r.origin for r in rules} - nonterminals = {sym for sym in rule_names - if sym.name.startswith('_') or sym in expand1s or sym in aliases } - - for r in rules: - _recons_exp = [] - for sym in r.expansion: - if not is_discarded_terminal(sym): - if sym in nonterminals: - if sym in expand1s: - v = Tree('expansions', [sym, Terminal(sym.name.upper())]) - else: - v = sym - else: - v = Terminal(sym.name.upper()) - _recons_exp.append(v) - - simplify_rule = SimplifyRule_Visitor() - rule_tree_to_text = RuleTreeToText() - tree = Tree('expansions', [Tree('expansion', _recons_exp)]) - simplify_rule.visit(tree) - expansions = rule_tree_to_text.transform(tree) - - for recons_exp, alias in expansions: - - # Skip self-recursive constructs - if recons_exp == [r.origin]: - continue - - sym = NonTerminal(r.alias) if r.alias else r.origin - - yield Rule(sym, recons_exp, alias=MakeMatchTree(sym.name, r.expansion)) - - for origin, rule_aliases in aliases.items(): - for alias in rule_aliases: - yield Rule(origin, [Terminal(alias.upper())], alias=MakeMatchTree(origin.name, [NonTerminal(alias)])) - yield Rule(origin, [Terminal(origin.name.upper())], alias=MakeMatchTree(origin.name, [origin])) - - def _match(self, term, token): - if isinstance(token, Tree): - return Terminal(token.data.upper()) == term - elif isinstance(token, Token): - return term == Terminal(token.type.upper()) - assert False - - def _reconstruct(self, tree): - # TODO: ambiguity? - unreduced_tree = self.parser.parse(tree.children, tree.data) # find a full derivation - assert unreduced_tree.data == tree.data - res = self.write_tokens.transform(unreduced_tree) - for item in res: - if isinstance(item, Tree): - for x in self._reconstruct(item): - yield x - else: - yield item - - def reconstruct(self, tree): - return ''.join(self._reconstruct(tree)) diff --git a/conda_lock/_vendor/poetry/core/_vendor/lark/tools/__init__.py b/conda_lock/_vendor/poetry/core/_vendor/lark/tools/__init__.py index e69de29b..c6995c69 100644 --- a/conda_lock/_vendor/poetry/core/_vendor/lark/tools/__init__.py +++ b/conda_lock/_vendor/poetry/core/_vendor/lark/tools/__init__.py @@ -0,0 +1,71 @@ +import sys +from argparse import ArgumentParser, FileType +from textwrap import indent +from logging import DEBUG, INFO, WARN, ERROR +from typing import Optional +import warnings + +from lark import Lark, logger +try: + from interegular import logger as interegular_logger + has_interegular = True +except ImportError: + has_interegular = False + +lalr_argparser = ArgumentParser(add_help=False, epilog='Look at the Lark documentation for more info on the options') + +flags = [ + ('d', 'debug'), + 'keep_all_tokens', + 'regex', + 'propagate_positions', + 'maybe_placeholders', + 'use_bytes' +] + +options = ['start', 'lexer'] + +lalr_argparser.add_argument('-v', '--verbose', action='count', default=0, help="Increase Logger output level, up to three times") +lalr_argparser.add_argument('-s', '--start', action='append', default=[]) +lalr_argparser.add_argument('-l', '--lexer', default='contextual', choices=('basic', 'contextual')) +encoding: Optional[str] = 'utf-8' if sys.version_info > (3, 4) else None +lalr_argparser.add_argument('-o', '--out', type=FileType('w', encoding=encoding), default=sys.stdout, help='the output file (default=stdout)') +lalr_argparser.add_argument('grammar_file', type=FileType('r', encoding=encoding), help='A valid .lark file') + +for flag in flags: + if isinstance(flag, tuple): + options.append(flag[1]) + lalr_argparser.add_argument('-' + flag[0], '--' + flag[1], action='store_true') + elif isinstance(flag, str): + options.append(flag) + lalr_argparser.add_argument('--' + flag, action='store_true') + else: + raise NotImplementedError("flags must only contain strings or tuples of strings") + + +def build_lalr(namespace): + logger.setLevel((ERROR, WARN, INFO, DEBUG)[min(namespace.verbose, 3)]) + if has_interegular: + interegular_logger.setLevel(logger.getEffectiveLevel()) + if len(namespace.start) == 0: + namespace.start.append('start') + kwargs = {n: getattr(namespace, n) for n in options} + return Lark(namespace.grammar_file, parser='lalr', **kwargs), namespace.out + + +def showwarning_as_comment(message, category, filename, lineno, file=None, line=None): + # Based on warnings._showwarnmsg_impl + text = warnings.formatwarning(message, category, filename, lineno, line) + text = indent(text, '# ') + if file is None: + file = sys.stderr + if file is None: + return + try: + file.write(text) + except OSError: + pass + + +def make_warnings_comments(): + warnings.showwarning = showwarning_as_comment diff --git a/conda_lock/_vendor/poetry/core/_vendor/lark/tools/nearley.py b/conda_lock/_vendor/poetry/core/_vendor/lark/tools/nearley.py index 0b04fb55..1fc27d56 100644 --- a/conda_lock/_vendor/poetry/core/_vendor/lark/tools/nearley.py +++ b/conda_lock/_vendor/poetry/core/_vendor/lark/tools/nearley.py @@ -1,11 +1,12 @@ -"Converts between Lark and Nearley grammars. Work in progress!" +"Converts Nearley grammars to Lark" import os.path import sys import codecs +import argparse -from lark import Lark, InlineTransformer +from lark import Lark, Transformer, v_args nearley_grammar = r""" start: (ruledef|directive)+ @@ -34,20 +35,23 @@ COMMENT: /#[^\n]*/ REGEXP: /\[.*?\]/ - %import common.ESCAPED_STRING -> STRING + STRING: _STRING "i"? + + %import common.ESCAPED_STRING -> _STRING %import common.WS %ignore WS %ignore COMMENT """ -nearley_grammar_parser = Lark(nearley_grammar, parser='earley', lexer='standard') +nearley_grammar_parser = Lark(nearley_grammar, parser='earley', lexer='basic') def _get_rulename(name): - name = {'_': '_ws_maybe', '__':'_ws'}.get(name, name) + name = {'_': '_ws_maybe', '__': '_ws'}.get(name, name) return 'n_' + name.replace('$', '__DOLLAR__').lower() -class NearleyToLark(InlineTransformer): +@v_args(inline=True) +class NearleyToLark(Transformer): def __init__(self): self._count = 0 self.extra_rules = {} @@ -130,14 +134,14 @@ def _nearley_to_lark(g, builtin_path, n2l, js_code, folder_path, includes): elif statement.data == 'macro': pass # TODO Add support for macros! elif statement.data == 'ruledef': - rule_defs.append( n2l.transform(statement) ) + rule_defs.append(n2l.transform(statement)) else: raise Exception("Unknown statement: %s" % statement) return rule_defs -def create_code_for_nearley_grammar(g, start, builtin_path, folder_path): +def create_code_for_nearley_grammar(g, start, builtin_path, folder_path, es6=False): import js2py emit_code = [] @@ -160,7 +164,10 @@ def emit(x=None): for alias, code in n2l.alias_js_code.items(): js_code.append('%s = (%s);' % (alias, code)) - emit(js2py.translate_js('\n'.join(js_code))) + if es6: + emit(js2py.translate_js6('\n'.join(js_code))) + else: + emit(js2py.translate_js('\n'.join(js_code))) emit('class TransformNearley(Transformer):') for alias in n2l.alias_js_code: emit(" %s = var.get('%s').to_python()" % (alias, alias)) @@ -173,18 +180,23 @@ def emit(x=None): return ''.join(emit_code) -def main(fn, start, nearley_lib): +def main(fn, start, nearley_lib, es6=False): with codecs.open(fn, encoding='utf8') as f: grammar = f.read() - return create_code_for_nearley_grammar(grammar, start, os.path.join(nearley_lib, 'builtin'), os.path.abspath(os.path.dirname(fn))) + return create_code_for_nearley_grammar(grammar, start, os.path.join(nearley_lib, 'builtin'), os.path.abspath(os.path.dirname(fn)), es6=es6) +def get_arg_parser(): + parser = argparse.ArgumentParser(description='Reads a Nearley grammar (with js functions), and outputs an equivalent lark parser.') + parser.add_argument('nearley_grammar', help='Path to the file containing the nearley grammar') + parser.add_argument('start_rule', help='Rule within the nearley grammar to make the base rule') + parser.add_argument('nearley_lib', help='Path to root directory of nearley codebase (used for including builtins)') + parser.add_argument('--es6', help='Enable experimental ES6 support', action='store_true') + return parser if __name__ == '__main__': - if len(sys.argv) < 4: - print("Reads Nearley grammar (with js functions) outputs an equivalent lark parser.") - print("Usage: %s " % sys.argv[0]) + parser = get_arg_parser() + if len(sys.argv) == 1: + parser.print_help(sys.stderr) sys.exit(1) - - fn, start, nearley_lib = sys.argv[1:] - - print(main(fn, start, nearley_lib)) + args = parser.parse_args() + print(main(fn=args.nearley_grammar, start=args.start_rule, nearley_lib=args.nearley_lib, es6=args.es6)) diff --git a/conda_lock/_vendor/poetry/core/_vendor/lark/tools/serialize.py b/conda_lock/_vendor/poetry/core/_vendor/lark/tools/serialize.py index fb69d35a..eb28824b 100644 --- a/conda_lock/_vendor/poetry/core/_vendor/lark/tools/serialize.py +++ b/conda_lock/_vendor/poetry/core/_vendor/lark/tools/serialize.py @@ -1,24 +1,18 @@ -import codecs import sys import json -from lark import Lark -from lark.grammar import RuleOptions, Rule +from lark.grammar import Rule from lark.lexer import TerminalDef +from lark.tools import lalr_argparser, build_lalr import argparse -argparser = argparse.ArgumentParser(prog='python -m lark.tools.serialize') #description='''Lark Serialization Tool -- Stores Lark's internal state & LALR analysis as a convenient JSON file''') +argparser = argparse.ArgumentParser(prog='python -m lark.tools.serialize', parents=[lalr_argparser], + description="Lark Serialization Tool - Stores Lark's internal state & LALR analysis as a JSON file", + epilog='Look at the Lark documentation for more info on the options') -argparser.add_argument('grammar_file', type=argparse.FileType('r'), help='A valid .lark file') -argparser.add_argument('-o', '--out', type=argparse.FileType('w'), default=sys.stdout, help='json file path to create (default=stdout)') -argparser.add_argument('-s', '--start', default='start', help='start symbol (default="start")', nargs='+') -argparser.add_argument('-l', '--lexer', default='standard', choices=['standard', 'contextual'], help='lexer type (default="standard")') - - -def serialize(infile, outfile, lexer, start): - lark_inst = Lark(infile, parser="lalr", lexer=lexer, start=start) # TODO contextual +def serialize(lark_inst, outfile): data, memo = lark_inst.memo_serialize([TerminalDef, Rule]) outfile.write('{\n') outfile.write(' "data": %s,\n' % json.dumps(data)) @@ -27,13 +21,12 @@ def serialize(infile, outfile, lexer, start): def main(): - if len(sys.argv) == 1 or '-h' in sys.argv or '--help' in sys.argv: - print("Lark Serialization Tool - Stores Lark's internal state & LALR analysis as a JSON file") - print("") - argparser.print_help() - else: - args = argparser.parse_args() - serialize(args.grammar_file, args.out, args.lexer, args.start) + if len(sys.argv)==1: + argparser.print_help(sys.stderr) + sys.exit(1) + ns = argparser.parse_args() + serialize(*build_lalr(ns)) + if __name__ == '__main__': - main() \ No newline at end of file + main() diff --git a/conda_lock/_vendor/poetry/core/_vendor/lark/tools/standalone.py b/conda_lock/_vendor/poetry/core/_vendor/lark/tools/standalone.py index 72042cda..9940ccbf 100644 --- a/conda_lock/_vendor/poetry/core/_vendor/lark/tools/standalone.py +++ b/conda_lock/_vendor/poetry/core/_vendor/lark/tools/standalone.py @@ -3,7 +3,7 @@ # # Lark Stand-alone Generator Tool # ---------------------------------- -# Generates a stand-alone LALR(1) parser with a standard lexer +# Generates a stand-alone LALR(1) parser # # Git: https://github.com/erezsh/lark # Author: Erez Shinan (erezshin@gmail.com) @@ -24,23 +24,29 @@ # # -import os -from io import open +from copy import deepcopy +from abc import ABC, abstractmethod +from types import ModuleType +from typing import ( + TypeVar, Generic, Type, Tuple, List, Dict, Iterator, Collection, Callable, Optional, FrozenSet, Any, + Union, Iterable, IO, TYPE_CHECKING, overload, Sequence, + Pattern as REPattern, ClassVar, Set, Mapping +) ###} -import codecs import sys +import token, tokenize import os -from pprint import pprint from os import path from collections import defaultdict +from functools import partial +from argparse import ArgumentParser import lark -from lark import Lark -from lark.parsers.lalr_analysis import Reduce +from lark.tools import lalr_argparser, build_lalr, make_warnings_comments -from lark.grammar import RuleOptions, Rule +from lark.grammar import Rule from lark.lexer import TerminalDef _dir = path.dirname(__file__) @@ -53,75 +59,138 @@ 'utils.py', 'tree.py', 'visitors.py', - 'indenter.py', 'grammar.py', 'lexer.py', 'common.py', 'parse_tree_builder.py', - 'parsers/lalr_parser.py', 'parsers/lalr_analysis.py', + 'parsers/lalr_parser_state.py', + 'parsers/lalr_parser.py', + 'parsers/lalr_interactive_parser.py', 'parser_frontends.py', 'lark.py', + 'indenter.py', ] def extract_sections(lines): section = None text = [] sections = defaultdict(list) - for l in lines: - if l.startswith('###'): - if l[3] == '{': - section = l[4:].strip() - elif l[3] == '}': + for line in lines: + if line.startswith('###'): + if line[3] == '{': + section = line[4:].strip() + elif line[3] == '}': sections[section] += text section = None text = [] else: - raise ValueError(l) + raise ValueError(line) elif section: - text.append(l) + text.append(line) + + return {name: ''.join(text) for name, text in sections.items()} + + +def strip_docstrings(line_gen): + """ Strip comments and docstrings from a file. + Based on code from: https://stackoverflow.com/questions/1769332/script-to-remove-python-comments-docstrings + """ + res = [] + + prev_toktype = token.INDENT + last_lineno = -1 + last_col = 0 + + tokgen = tokenize.generate_tokens(line_gen) + for toktype, ttext, (slineno, scol), (elineno, ecol), ltext in tokgen: + if slineno > last_lineno: + last_col = 0 + if scol > last_col: + res.append(" " * (scol - last_col)) + if toktype == token.STRING and prev_toktype == token.INDENT: + # Docstring + res.append("#--") + elif toktype == tokenize.COMMENT: + # Comment + res.append("##\n") + else: + res.append(ttext) + prev_toktype = toktype + last_col = ecol + last_lineno = elineno + + return ''.join(res) + + +def gen_standalone(lark_inst, output=None, out=sys.stdout, compress=False): + if output is None: + output = partial(print, file=out) + + import pickle, zlib, base64 + def compressed_output(obj): + s = pickle.dumps(obj, pickle.HIGHEST_PROTOCOL) + c = zlib.compress(s) + output(repr(base64.b64encode(c))) + + def output_decompress(name): + output('%(name)s = pickle.loads(zlib.decompress(base64.b64decode(%(name)s)))' % locals()) + + output('# The file was automatically generated by Lark v%s' % lark.__version__) + output('__version__ = "%s"' % lark.__version__) + output() + + for i, pyfile in enumerate(EXTRACT_STANDALONE_FILES): + with open(os.path.join(_larkdir, pyfile)) as f: + code = extract_sections(f)['standalone'] + if i: # if not this file + code = strip_docstrings(partial(next, iter(code.splitlines(True)))) + output(code) - return {name:''.join(text) for name, text in sections.items()} + data, m = lark_inst.memo_serialize([TerminalDef, Rule]) + output('import pickle, zlib, base64') + if compress: + output('DATA = (') + compressed_output(data) + output(')') + output_decompress('DATA') + output('MEMO = (') + compressed_output(m) + output(')') + output_decompress('MEMO') + else: + output('DATA = (') + output(data) + output(')') + output('MEMO = (') + output(m) + output(')') -def main(fobj, start): - lark_inst = Lark(fobj, parser="lalr", lexer="contextual", start=start) + output('Shift = 0') + output('Reduce = 1') + output("def Lark_StandAlone(**kwargs):") + output(" return Lark._load_from_dict(DATA, MEMO, **kwargs)") - print('# The file was automatically generated by Lark v%s' % lark.__version__) - for pyfile in EXTRACT_STANDALONE_FILES: - with open(os.path.join(_larkdir, pyfile)) as f: - print (extract_sections(f)['standalone']) - data, m = lark_inst.memo_serialize([TerminalDef, Rule]) - print( 'DATA = (' ) - # pprint(data, width=160) - print(data) - print(')') - print( 'MEMO = (') - print(m) - print(')') +def main(): + make_warnings_comments() + parser = ArgumentParser(prog="prog='python -m lark.tools.standalone'", description="Lark Stand-alone Generator Tool", + parents=[lalr_argparser], epilog='Look at the Lark documentation for more info on the options') + parser.add_argument('-c', '--compress', action='store_true', default=0, help="Enable compression") + if len(sys.argv) == 1: + parser.print_help(sys.stderr) + sys.exit(1) + ns = parser.parse_args() - print('Shift = 0') - print('Reduce = 1') - print("def Lark_StandAlone(transformer=None, postlex=None):") - print(" return Lark._load_from_dict(DATA, MEMO, transformer=transformer, postlex=postlex)") + lark_inst, out = build_lalr(ns) + gen_standalone(lark_inst, out=out, compress=ns.compress) + ns.out.close() + ns.grammar_file.close() if __name__ == '__main__': - if len(sys.argv) < 2: - print("Lark Stand-alone Generator Tool") - print("Usage: python -m lark.tools.standalone []") - sys.exit(1) - - if len(sys.argv) == 3: - fn, start = sys.argv[1:] - elif len(sys.argv) == 2: - fn, start = sys.argv[1], 'start' - else: - assert False, sys.argv - - with codecs.open(fn, encoding='utf8') as f: - main(f, start) + main() diff --git a/conda_lock/_vendor/poetry/core/_vendor/lark/tree.py b/conda_lock/_vendor/poetry/core/_vendor/lark/tree.py index f9767e43..438837eb 100644 --- a/conda_lock/_vendor/poetry/core/_vendor/lark/tree.py +++ b/conda_lock/_vendor/poetry/core/_vendor/lark/tree.py @@ -1,50 +1,126 @@ -try: - from future_builtins import filter -except ImportError: - pass - +import sys from copy import deepcopy -from collections import OrderedDict +from typing import List, Callable, Iterator, Union, Optional, Generic, TypeVar, TYPE_CHECKING + +if TYPE_CHECKING: + from .lexer import TerminalDef, Token + try: + import rich + except ImportError: + pass + if sys.version_info >= (3, 8): + from typing import Literal + else: + from typing_extensions import Literal ###{standalone +from collections import OrderedDict + class Meta: + + empty: bool + line: int + column: int + start_pos: int + end_line: int + end_column: int + end_pos: int + orig_expansion: 'List[TerminalDef]' + match_tree: bool + def __init__(self): self.empty = True -class Tree(object): - def __init__(self, data, children, meta=None): + +_Leaf_T = TypeVar("_Leaf_T") +Branch = Union[_Leaf_T, 'Tree[_Leaf_T]'] + + +class Tree(Generic[_Leaf_T]): + """The main tree class. + + Creates a new tree, and stores "data" and "children" in attributes of the same name. + Trees can be hashed and compared. + + Parameters: + data: The name of the rule or alias + children: List of matched sub-rules and terminals + meta: Line & Column numbers (if ``propagate_positions`` is enabled). + meta attributes: (line, column, end_line, end_column, start_pos, end_pos, + container_line, container_column, container_end_line, container_end_column) + container_* attributes consider all symbols, including those that have been inlined in the tree. + For example, in the rule 'a: _A B _C', the regular attributes will mark the start and end of B, + but the container_* attributes will also include _A and _C in the range. However, rules that + contain 'a' will consider it in full, including _A and _C for all attributes. + """ + + data: str + children: 'List[Branch[_Leaf_T]]' + + def __init__(self, data: str, children: 'List[Branch[_Leaf_T]]', meta: Optional[Meta]=None) -> None: self.data = data self.children = children self._meta = meta @property - def meta(self): + def meta(self) -> Meta: if self._meta is None: self._meta = Meta() return self._meta def __repr__(self): - return 'Tree(%s, %s)' % (self.data, self.children) + return 'Tree(%r, %r)' % (self.data, self.children) def _pretty_label(self): return self.data def _pretty(self, level, indent_str): + yield f'{indent_str*level}{self._pretty_label()}' if len(self.children) == 1 and not isinstance(self.children[0], Tree): - return [ indent_str*level, self._pretty_label(), '\t', '%s' % (self.children[0],), '\n'] + yield f'\t{self.children[0]}\n' + else: + yield '\n' + for n in self.children: + if isinstance(n, Tree): + yield from n._pretty(level+1, indent_str) + else: + yield f'{indent_str*(level+1)}{n}\n' + + def pretty(self, indent_str: str=' ') -> str: + """Returns an indented string representation of the tree. + + Great for debugging. + """ + return ''.join(self._pretty(0, indent_str)) - l = [ indent_str*level, self._pretty_label(), '\n' ] - for n in self.children: - if isinstance(n, Tree): - l += n._pretty(level+1, indent_str) - else: - l += [ indent_str*(level+1), '%s' % (n,), '\n' ] + def __rich__(self, parent:Optional['rich.tree.Tree']=None) -> 'rich.tree.Tree': + """Returns a tree widget for the 'rich' library. - return l + Example: + :: + from rich import print + from lark import Tree - def pretty(self, indent_str=' '): - return ''.join(self._pretty(0, indent_str)) + tree = Tree('root', ['node1', 'node2']) + print(tree) + """ + return self._rich(parent) + + def _rich(self, parent): + if parent: + tree = parent.add(f'[bold]{self.data}[/bold]') + else: + import rich.tree + tree = rich.tree.Tree(self.data) + + for c in self.children: + if isinstance(c, Tree): + c._rich(tree) + else: + tree.add(f'[green]{c}[/green]') + + return tree def __eq__(self, other): try: @@ -55,37 +131,70 @@ def __eq__(self, other): def __ne__(self, other): return not (self == other) - def __hash__(self): + def __hash__(self) -> int: return hash((self.data, tuple(self.children))) - def iter_subtrees(self): + def iter_subtrees(self) -> 'Iterator[Tree[_Leaf_T]]': + """Depth-first iteration. + + Iterates over all the subtrees, never returning to the same node twice (Lark's parse-tree is actually a DAG). + """ queue = [self] subtrees = OrderedDict() for subtree in queue: subtrees[id(subtree)] = subtree - queue += [c for c in reversed(subtree.children) + # Reason for type ignore https://github.com/python/mypy/issues/10999 + queue += [c for c in reversed(subtree.children) # type: ignore[misc] if isinstance(c, Tree) and id(c) not in subtrees] del queue return reversed(list(subtrees.values())) - def find_pred(self, pred): - "Find all nodes where pred(tree) == True" + def iter_subtrees_topdown(self): + """Breadth-first iteration. + + Iterates over all the subtrees, return nodes in order like pretty() does. + """ + stack = [self] + stack_append = stack.append + stack_pop = stack.pop + while stack: + node = stack_pop() + if not isinstance(node, Tree): + continue + yield node + for child in reversed(node.children): + stack_append(child) + + def find_pred(self, pred: 'Callable[[Tree[_Leaf_T]], bool]') -> 'Iterator[Tree[_Leaf_T]]': + """Returns all nodes of the tree that evaluate pred(node) as true.""" return filter(pred, self.iter_subtrees()) - def find_data(self, data): - "Find all nodes where tree.data == data" + def find_data(self, data: str) -> 'Iterator[Tree[_Leaf_T]]': + """Returns all nodes of the tree whose data equals the given data.""" return self.find_pred(lambda t: t.data == data) ###} - def expand_kids_by_index(self, *indices): - "Expand (inline) children at the given indices" - for i in sorted(indices, reverse=True): # reverse so that changing tail won't affect indices - kid = self.children[i] - self.children[i:i+1] = kid.children + def expand_kids_by_data(self, *data_values): + """Expand (inline) children with any of the given data values. Returns True if anything changed""" + changed = False + for i in range(len(self.children)-1, -1, -1): + child = self.children[i] + if isinstance(child, Tree) and child.data in data_values: + self.children[i:i+1] = child.children + changed = True + return changed + + + def scan_values(self, pred: 'Callable[[Branch[_Leaf_T]], bool]') -> Iterator[_Leaf_T]: + """Return all values in the tree that evaluate pred(value) as true. - def scan_values(self, pred): + This can be used to find all the tokens in the tree. + + Example: + >>> all_tokens = tree.scan_values(lambda v: isinstance(v, Token)) + """ for c in self.children: if isinstance(c, Tree): for t in c.scan_values(pred): @@ -94,46 +203,35 @@ def scan_values(self, pred): if pred(c): yield c - def iter_subtrees_topdown(self): - stack = [self] - while stack: - node = stack.pop() - if not isinstance(node, Tree): - continue - yield node - for n in reversed(node.children): - stack.append(n) - def __deepcopy__(self, memo): return type(self)(self.data, deepcopy(self.children, memo), meta=self._meta) - def copy(self): + def copy(self) -> 'Tree[_Leaf_T]': return type(self)(self.data, self.children) - def set(self, data, children): + def set(self, data: str, children: 'List[Branch[_Leaf_T]]') -> None: self.data = data self.children = children - # XXX Deprecated! Here for backwards compatibility <0.6.0 - @property - def line(self): - return self.meta.line - @property - def column(self): - return self.meta.column - @property - def end_line(self): - return self.meta.end_line - @property - def end_column(self): - return self.meta.end_column + +ParseTree = Tree['Token'] class SlottedTree(Tree): __slots__ = 'data', 'children', 'rule', '_meta' -def pydot__tree_to_png(tree, filename, rankdir="LR", **kwargs): +def pydot__tree_to_png(tree: Tree, filename: str, rankdir: 'Literal["TB", "LR", "BT", "RL"]'="LR", **kwargs) -> None: + graph = pydot__tree_to_graph(tree, rankdir, **kwargs) + graph.write_png(filename) + + +def pydot__tree_to_dot(tree: Tree, filename, rankdir="LR", **kwargs): + graph = pydot__tree_to_graph(tree, rankdir, **kwargs) + graph.write(filename) + + +def pydot__tree_to_graph(tree: Tree, rankdir="LR", **kwargs): """Creates a colorful image that represents the tree (data+children, without meta) Possible values for `rankdir` are "TB", "LR", "BT", "RL", corresponding to @@ -144,7 +242,7 @@ def pydot__tree_to_png(tree, filename, rankdir="LR", **kwargs): possible attributes, see https://www.graphviz.org/doc/info/attrs.html. """ - import pydot + import pydot # type: ignore[import] graph = pydot.Dot(graph_type='digraph', rankdir=rankdir, **kwargs) i = [0] @@ -161,7 +259,7 @@ def _to_pydot(subtree): subnodes = [_to_pydot(child) if isinstance(child, Tree) else new_leaf(child) for child in subtree.children] - node = pydot.Node(i[0], style="filled", fillcolor="#%x"%color, label=subtree.data) + node = pydot.Node(i[0], style="filled", fillcolor="#%x" % color, label=subtree.data) i[0] += 1 graph.add_node(node) @@ -171,5 +269,4 @@ def _to_pydot(subtree): return node _to_pydot(tree) - graph.write_png(filename) - + return graph diff --git a/conda_lock/_vendor/poetry/core/_vendor/lark/tree_matcher.py b/conda_lock/_vendor/poetry/core/_vendor/lark/tree_matcher.py new file mode 100644 index 00000000..0f42652e --- /dev/null +++ b/conda_lock/_vendor/poetry/core/_vendor/lark/tree_matcher.py @@ -0,0 +1,186 @@ +"""Tree matcher based on Lark grammar""" + +import re +from collections import defaultdict + +from . import Tree, Token +from .common import ParserConf +from .parsers import earley +from .grammar import Rule, Terminal, NonTerminal + + +def is_discarded_terminal(t): + return t.is_term and t.filter_out + + +class _MakeTreeMatch: + def __init__(self, name, expansion): + self.name = name + self.expansion = expansion + + def __call__(self, args): + t = Tree(self.name, args) + t.meta.match_tree = True + t.meta.orig_expansion = self.expansion + return t + + +def _best_from_group(seq, group_key, cmp_key): + d = {} + for item in seq: + key = group_key(item) + if key in d: + v1 = cmp_key(item) + v2 = cmp_key(d[key]) + if v2 > v1: + d[key] = item + else: + d[key] = item + return list(d.values()) + + +def _best_rules_from_group(rules): + rules = _best_from_group(rules, lambda r: r, lambda r: -len(r.expansion)) + rules.sort(key=lambda r: len(r.expansion)) + return rules + + +def _match(term, token): + if isinstance(token, Tree): + name, _args = parse_rulename(term.name) + return token.data == name + elif isinstance(token, Token): + return term == Terminal(token.type) + assert False, (term, token) + + +def make_recons_rule(origin, expansion, old_expansion): + return Rule(origin, expansion, alias=_MakeTreeMatch(origin.name, old_expansion)) + + +def make_recons_rule_to_term(origin, term): + return make_recons_rule(origin, [Terminal(term.name)], [term]) + + +def parse_rulename(s): + "Parse rule names that may contain a template syntax (like rule{a, b, ...})" + name, args_str = re.match(r'(\w+)(?:{(.+)})?', s).groups() + args = args_str and [a.strip() for a in args_str.split(',')] + return name, args + + + +class ChildrenLexer: + def __init__(self, children): + self.children = children + + def lex(self, parser_state): + return self.children + +class TreeMatcher: + """Match the elements of a tree node, based on an ontology + provided by a Lark grammar. + + Supports templates and inlined rules (`rule{a, b,..}` and `_rule`) + + Initialize with an instance of Lark. + """ + + def __init__(self, parser): + # XXX TODO calling compile twice returns different results! + assert not parser.options.maybe_placeholders + # XXX TODO: we just ignore the potential existence of a postlexer + self.tokens, rules, _extra = parser.grammar.compile(parser.options.start, set()) + + self.rules_for_root = defaultdict(list) + + self.rules = list(self._build_recons_rules(rules)) + self.rules.reverse() + + # Choose the best rule from each group of {rule => [rule.alias]}, since we only really need one derivation. + self.rules = _best_rules_from_group(self.rules) + + self.parser = parser + self._parser_cache = {} + + def _build_recons_rules(self, rules): + "Convert tree-parsing/construction rules to tree-matching rules" + expand1s = {r.origin for r in rules if r.options.expand1} + + aliases = defaultdict(list) + for r in rules: + if r.alias: + aliases[r.origin].append(r.alias) + + rule_names = {r.origin for r in rules} + nonterminals = {sym for sym in rule_names + if sym.name.startswith('_') or sym in expand1s or sym in aliases} + + seen = set() + for r in rules: + recons_exp = [sym if sym in nonterminals else Terminal(sym.name) + for sym in r.expansion if not is_discarded_terminal(sym)] + + # Skip self-recursive constructs + if recons_exp == [r.origin] and r.alias is None: + continue + + sym = NonTerminal(r.alias) if r.alias else r.origin + rule = make_recons_rule(sym, recons_exp, r.expansion) + + if sym in expand1s and len(recons_exp) != 1: + self.rules_for_root[sym.name].append(rule) + + if sym.name not in seen: + yield make_recons_rule_to_term(sym, sym) + seen.add(sym.name) + else: + if sym.name.startswith('_') or sym in expand1s: + yield rule + else: + self.rules_for_root[sym.name].append(rule) + + for origin, rule_aliases in aliases.items(): + for alias in rule_aliases: + yield make_recons_rule_to_term(origin, NonTerminal(alias)) + yield make_recons_rule_to_term(origin, origin) + + def match_tree(self, tree, rulename): + """Match the elements of `tree` to the symbols of rule `rulename`. + + Parameters: + tree (Tree): the tree node to match + rulename (str): The expected full rule name (including template args) + + Returns: + Tree: an unreduced tree that matches `rulename` + + Raises: + UnexpectedToken: If no match was found. + + Note: + It's the callers' responsibility match the tree recursively. + """ + if rulename: + # validate + name, _args = parse_rulename(rulename) + assert tree.data == name + else: + rulename = tree.data + + # TODO: ambiguity? + try: + parser = self._parser_cache[rulename] + except KeyError: + rules = self.rules + _best_rules_from_group(self.rules_for_root[rulename]) + + # TODO pass callbacks through dict, instead of alias? + callbacks = {rule: rule.alias for rule in rules} + conf = ParserConf(rules, callbacks, [rulename]) + parser = earley.Parser(self.parser.lexer_conf, conf, _match, resolve_ambiguity=True) + self._parser_cache[rulename] = parser + + # find a full derivation + unreduced_tree = parser.parse(ChildrenLexer(tree.children), rulename) + assert unreduced_tree.data == rulename + return unreduced_tree diff --git a/conda_lock/_vendor/poetry/core/_vendor/lark/tree_templates.py b/conda_lock/_vendor/poetry/core/_vendor/lark/tree_templates.py new file mode 100644 index 00000000..6ec7323f --- /dev/null +++ b/conda_lock/_vendor/poetry/core/_vendor/lark/tree_templates.py @@ -0,0 +1,180 @@ +"""This module defines utilities for matching and translation tree templates. + +A tree templates is a tree that contains nodes that are template variables. + +""" + +from typing import Union, Optional, Mapping, Dict, Tuple, Iterator + +from lark import Tree, Transformer +from lark.exceptions import MissingVariableError + +Branch = Union[Tree[str], str] +TreeOrCode = Union[Tree[str], str] +MatchResult = Dict[str, Tree] +_TEMPLATE_MARKER = '$' + + +class TemplateConf: + """Template Configuration + + Allows customization for different uses of Template + + parse() must return a Tree instance. + """ + + def __init__(self, parse=None): + self._parse = parse + + def test_var(self, var: Union[Tree[str], str]) -> Optional[str]: + """Given a tree node, if it is a template variable return its name. Otherwise, return None. + + This method may be overridden for customization + + Parameters: + var: Tree | str - The tree node to test + + """ + if isinstance(var, str): + return _get_template_name(var) + + if ( + isinstance(var, Tree) + and var.data == "var" + and len(var.children) > 0 + and isinstance(var.children[0], str) + ): + return _get_template_name(var.children[0]) + + return None + + def _get_tree(self, template: TreeOrCode) -> Tree[str]: + if isinstance(template, str): + assert self._parse + template = self._parse(template) + + if not isinstance(template, Tree): + raise TypeError("template parser must return a Tree instance") + + return template + + def __call__(self, template: Tree[str]) -> 'Template': + return Template(template, conf=self) + + def _match_tree_template(self, template: TreeOrCode, tree: Branch) -> Optional[MatchResult]: + """Returns dict of {var: match} if found a match, else None + """ + template_var = self.test_var(template) + if template_var: + if not isinstance(tree, Tree): + raise TypeError(f"Template variables can only match Tree instances. Not {tree!r}") + return {template_var: tree} + + if isinstance(template, str): + if template == tree: + return {} + return None + + assert isinstance(template, Tree) and isinstance(tree, Tree), f"template={template} tree={tree}" + + if template.data == tree.data and len(template.children) == len(tree.children): + res = {} + for t1, t2 in zip(template.children, tree.children): + matches = self._match_tree_template(t1, t2) + if matches is None: + return None + + res.update(matches) + + return res + + return None + + +class _ReplaceVars(Transformer[str, Tree[str]]): + def __init__(self, conf: TemplateConf, vars: Mapping[str, Tree[str]]) -> None: + super().__init__() + self._conf = conf + self._vars = vars + + def __default__(self, data, children, meta) -> Tree[str]: + tree = super().__default__(data, children, meta) + + var = self._conf.test_var(tree) + if var: + try: + return self._vars[var] + except KeyError: + raise MissingVariableError(f"No mapping for template variable ({var})") + return tree + + +class Template: + """Represents a tree template, tied to a specific configuration + + A tree template is a tree that contains nodes that are template variables. + Those variables will match any tree. + (future versions may support annotations on the variables, to allow more complex templates) + """ + + def __init__(self, tree: Tree[str], conf: TemplateConf = TemplateConf()): + self.conf = conf + self.tree = conf._get_tree(tree) + + def match(self, tree: TreeOrCode) -> Optional[MatchResult]: + """Match a tree template to a tree. + + A tree template without variables will only match ``tree`` if it is equal to the template. + + Parameters: + tree (Tree): The tree to match to the template + + Returns: + Optional[Dict[str, Tree]]: If match is found, returns a dictionary mapping + template variable names to their matching tree nodes. + If no match was found, returns None. + """ + tree = self.conf._get_tree(tree) + return self.conf._match_tree_template(self.tree, tree) + + def search(self, tree: TreeOrCode) -> Iterator[Tuple[Tree[str], MatchResult]]: + """Search for all occurrences of the tree template inside ``tree``. + """ + tree = self.conf._get_tree(tree) + for subtree in tree.iter_subtrees(): + res = self.match(subtree) + if res: + yield subtree, res + + def apply_vars(self, vars: Mapping[str, Tree[str]]) -> Tree[str]: + """Apply vars to the template tree + """ + return _ReplaceVars(self.conf, vars).transform(self.tree) + + +def translate(t1: Template, t2: Template, tree: TreeOrCode): + """Search tree and translate each occurrence of t1 into t2. + """ + tree = t1.conf._get_tree(tree) # ensure it's a tree, parse if necessary and possible + for subtree, vars in t1.search(tree): + res = t2.apply_vars(vars) + subtree.set(res.data, res.children) + return tree + + +class TemplateTranslator: + """Utility class for translating a collection of patterns + """ + + def __init__(self, translations: Mapping[Template, Template]): + assert all(isinstance(k, Template) and isinstance(v, Template) for k, v in translations.items()) + self.translations = translations + + def translate(self, tree: Tree[str]): + for k, v in self.translations.items(): + tree = translate(k, v, tree) + return tree + + +def _get_template_name(value: str) -> Optional[str]: + return value.lstrip(_TEMPLATE_MARKER) if value.startswith(_TEMPLATE_MARKER) else None diff --git a/conda_lock/_vendor/poetry/core/_vendor/lark/utils.py b/conda_lock/_vendor/poetry/core/_vendor/lark/utils.py index 36f50d1e..70ac27e2 100644 --- a/conda_lock/_vendor/poetry/core/_vendor/lark/utils.py +++ b/conda_lock/_vendor/poetry/core/_vendor/lark/utils.py @@ -1,69 +1,40 @@ -import sys +import unicodedata import os -from functools import reduce -from ast import literal_eval +from itertools import product from collections import deque +from typing import Callable, Iterator, List, Optional, Tuple, Type, TypeVar, Union, Dict, Any, Sequence, Iterable, AbstractSet -class fzset(frozenset): - def __repr__(self): - return '{%s}' % ', '.join(map(repr, self)) - - -def classify_bool(seq, pred): - true_elems = [] - false_elems = [] +###{standalone +import sys, re +import logging - for elem in seq: - if pred(elem): - true_elems.append(elem) - else: - false_elems.append(elem) +logger: logging.Logger = logging.getLogger("lark") +logger.addHandler(logging.StreamHandler()) +# Set to highest level, since we have some warnings amongst the code +# By default, we should not output any log messages +logger.setLevel(logging.CRITICAL) - return true_elems, false_elems +NO_VALUE = object() +T = TypeVar("T") -def bfs(initial, expand): - open_q = deque(list(initial)) - visited = set(open_q) - while open_q: - node = open_q.popleft() - yield node - for next_node in expand(node): - if next_node not in visited: - visited.add(next_node) - open_q.append(next_node) - - - -def _serialize(value, memo): - if isinstance(value, Serialize): - return value.serialize(memo) - elif isinstance(value, list): - return [_serialize(elem, memo) for elem in value] - elif isinstance(value, frozenset): - return list(value) # TODO reversible? - elif isinstance(value, dict): - return {key:_serialize(elem, memo) for key, elem in value.items()} - return value - -###{standalone -def classify(seq, key=None, value=None): - d = {} +def classify(seq: Iterable, key: Optional[Callable] = None, value: Optional[Callable] = None) -> Dict: + d: Dict[Any, Any] = {} for item in seq: k = key(item) if (key is not None) else item v = value(item) if (value is not None) else item - if k in d: + try: d[k].append(v) - else: + except KeyError: d[k] = [v] return d -def _deserialize(data, namespace, memo): +def _deserialize(data: Any, namespace: Dict[str, Any], memo: Dict) -> Any: if isinstance(data, dict): - if '__type__' in data: # Object + if '__type__' in data: # Object class_ = namespace[data['__type__']] return class_.deserialize(data, memo) elif '@' in data: @@ -74,26 +45,35 @@ def _deserialize(data, namespace, memo): return data -class Serialize(object): - def memo_serialize(self, types_to_memoize): +_T = TypeVar("_T", bound="Serialize") + +class Serialize: + """Safe-ish serialization interface that doesn't rely on Pickle + + Attributes: + __serialize_fields__ (List[str]): Fields (aka attributes) to serialize. + __serialize_namespace__ (list): List of classes that deserialization is allowed to instantiate. + Should include all field types that aren't builtin types. + """ + + def memo_serialize(self, types_to_memoize: List) -> Any: memo = SerializeMemoizer(types_to_memoize) return self.serialize(memo), memo.serialize() - def serialize(self, memo=None): + def serialize(self, memo = None) -> Dict[str, Any]: if memo and memo.in_types(self): return {'@': memo.memoized.get(self)} fields = getattr(self, '__serialize_fields__') res = {f: _serialize(getattr(self, f), memo) for f in fields} res['__type__'] = type(self).__name__ - postprocess = getattr(self, '_serialize', None) - if postprocess: - postprocess(res, memo) + if hasattr(self, '_serialize'): + self._serialize(res, memo) # type: ignore[attr-defined] return res @classmethod - def deserialize(cls, data, memo): - namespace = getattr(cls, '__serialize_namespace__', {}) + def deserialize(cls: Type[_T], data: Dict[str, Any], memo: Dict[int, Any]) -> _T: + namespace = getattr(cls, '__serialize_namespace__', []) namespace = {c.__name__:c for c in namespace} fields = getattr(cls, '__serialize_fields__') @@ -107,77 +87,50 @@ def deserialize(cls, data, memo): setattr(inst, f, _deserialize(data[f], namespace, memo)) except KeyError as e: raise KeyError("Cannot find key for class", cls, e) - postprocess = getattr(inst, '_deserialize', None) - if postprocess: - postprocess() + + if hasattr(inst, '_deserialize'): + inst._deserialize() # type: ignore[attr-defined] + return inst class SerializeMemoizer(Serialize): + "A version of serialize that memoizes objects to reduce space" + __serialize_fields__ = 'memoized', - def __init__(self, types_to_memoize): + def __init__(self, types_to_memoize: List) -> None: self.types_to_memoize = tuple(types_to_memoize) self.memoized = Enumerator() - def in_types(self, value): + def in_types(self, value: Serialize) -> bool: return isinstance(value, self.types_to_memoize) - def serialize(self): + def serialize(self) -> Dict[int, Any]: # type: ignore[override] return _serialize(self.memoized.reversed(), None) @classmethod - def deserialize(cls, data, namespace, memo): + def deserialize(cls, data: Dict[int, Any], namespace: Dict[str, Any], memo: Dict[Any, Any]) -> Dict[int, Any]: # type: ignore[override] return _deserialize(data, namespace, memo) - -try: - STRING_TYPE = basestring -except NameError: # Python 3 - STRING_TYPE = str - - -import types -from functools import wraps, partial -from contextlib import contextmanager - -Str = type(u'') -try: - classtype = types.ClassType # Python2 -except AttributeError: - classtype = type # Python3 - -def smart_decorator(f, create_decorator): - if isinstance(f, types.FunctionType): - return wraps(f)(create_decorator(f, True)) - - elif isinstance(f, (classtype, type, types.BuiltinFunctionType)): - return wraps(f)(create_decorator(f, False)) - - elif isinstance(f, types.MethodType): - return wraps(f)(create_decorator(f.__func__, True)) - - elif isinstance(f, partial): - # wraps does not work for partials in 2.7: https://bugs.python.org/issue3445 - return wraps(f.func)(create_decorator(lambda *args, **kw: f(*args[1:], **kw), True)) - - else: - return create_decorator(f.__func__.__call__, True) - try: import regex + _has_regex = True except ImportError: - regex = None + _has_regex = False -import sys, re -Py36 = (sys.version_info[:2] >= (3, 6)) +if sys.version_info >= (3, 11): + import re._parser as sre_parse + import re._constants as sre_constants +else: + import sre_parse + import sre_constants -import sre_parse -import sre_constants categ_pattern = re.compile(r'\\p{[A-Za-z_]+}') -def get_regexp_width(expr): - if regex: + +def get_regexp_width(expr: str) -> Union[Tuple[int, int], List[int]]: + if _has_regex: # Since `sre_parse` cannot deal with Unicode categories of the form `\p{Mn}`, we replace these with # a simple letter, which makes no difference as we are only trying to get the possible lengths of the regex # match here below. @@ -187,61 +140,66 @@ def get_regexp_width(expr): raise ImportError('`regex` module must be installed in order to use Unicode categories.', expr) regexp_final = expr try: - return [int(x) for x in sre_parse.parse(regexp_final).getwidth()] + # Fixed in next version (past 0.960) of typeshed + return [int(x) for x in sre_parse.parse(regexp_final).getwidth()] # type: ignore[attr-defined] except sre_constants.error: - raise ValueError(expr) + if not _has_regex: + raise ValueError(expr) + else: + # sre_parse does not support the new features in regex. To not completely fail in that case, + # we manually test for the most important info (whether the empty string is matched) + c = regex.compile(regexp_final) + # Python 3.11.7 introducded sre_parse.MAXWIDTH that is used instead of MAXREPEAT + # See lark-parser/lark#1376 and python/cpython#109859 + MAXWIDTH = getattr(sre_parse, "MAXWIDTH", sre_constants.MAXREPEAT) + if c.match('') is None: + # MAXREPEAT is a none pickable subclass of int, therefore needs to be converted to enable caching + return 1, int(MAXWIDTH) + else: + return 0, int(MAXWIDTH) ###} -def dedup_list(l): - """Given a list (l) will removing duplicates from the list, - preserving the original order of the list. Assumes that - the list entries are hashable.""" - dedup = set() - return [ x for x in l if not (x in dedup or dedup.add(x))] - - - - -try: - from contextlib import suppress # Python 3 -except ImportError: - @contextmanager - def suppress(*excs): - '''Catch and dismiss the provided exception - - >>> x = 'hello' - >>> with suppress(IndexError): - ... x = x[10] - >>> x - 'hello' - ''' - try: - yield - except excs: - pass +_ID_START = 'Lu', 'Ll', 'Lt', 'Lm', 'Lo', 'Mn', 'Mc', 'Pc' +_ID_CONTINUE = _ID_START + ('Nd', 'Nl',) +def _test_unicode_category(s: str, categories: Sequence[str]) -> bool: + if len(s) != 1: + return all(_test_unicode_category(char, categories) for char in s) + return s == '_' or unicodedata.category(s) in categories +def is_id_continue(s: str) -> bool: + """ + Checks if all characters in `s` are alphanumeric characters (Unicode standard, so diacritics, indian vowels, non-latin + numbers, etc. all pass). Synonymous with a Python `ID_CONTINUE` identifier. See PEP 3131 for details. + """ + return _test_unicode_category(s, _ID_CONTINUE) +def is_id_start(s: str) -> bool: + """ + Checks if all characters in `s` are alphabetic characters (Unicode standard, so diacritics, indian vowels, non-latin + numbers, etc. all pass). Synonymous with a Python `ID_START` identifier. See PEP 3131 for details. + """ + return _test_unicode_category(s, _ID_START) -try: - compare = cmp -except NameError: - def compare(a, b): - if a == b: - return 0 - elif a > b: - return 1 - return -1 +def dedup_list(l: Sequence[T]) -> List[T]: + """Given a list (l) will removing duplicates from the list, + preserving the original order of the list. Assumes that + the list entries are hashable.""" + dedup = set() + # This returns None, but that's expected + return [x for x in l if not (x in dedup or dedup.add(x))] # type: ignore[func-returns-value] + # 2x faster (ordered in PyPy and CPython 3.6+, guaranteed to be ordered in Python 3.7+) + # return list(dict.fromkeys(l)) class Enumerator(Serialize): - def __init__(self): - self.enums = {} + def __init__(self) -> None: + self.enums: Dict[Any, int] = {} - def get(self, item): + def get(self, item) -> int: if item not in self.enums: self.enums[item] = len(self.enums) return self.enums[item] @@ -249,41 +207,16 @@ def get(self, item): def __len__(self): return len(self.enums) - def reversed(self): + def reversed(self) -> Dict[int, Any]: r = {v: k for k, v in self.enums.items()} assert len(r) == len(self.enums) return r -def eval_escaping(s): - w = '' - i = iter(s) - for n in i: - w += n - if n == '\\': - try: - n2 = next(i) - except StopIteration: - raise ValueError("Literal ended unexpectedly (bad escaping): `%r`" % s) - if n2 == '\\': - w += '\\\\' - elif n2 not in 'uxnftr': - w += '\\' - w += n2 - w = w.replace('\\"', '"').replace("'", "\\'") - - to_eval = "u'''%s'''" % w - try: - s = literal_eval(to_eval) - except SyntaxError as e: - raise ValueError(s, e) - - return s - def combine_alternatives(lists): """ - Accepts a list of alternatives, and enumerates all their possible concatinations. + Accepts a list of alternatives, and enumerates all their possible concatenations. Examples: >>> combine_alternatives([range(2), [4,5]]) @@ -298,11 +231,131 @@ def combine_alternatives(lists): if not lists: return [[]] assert all(l for l in lists), lists - init = [[x] for x in lists[0]] - return reduce(lambda a,b: [i+[j] for i in a for j in b], lists[1:], init) - + return list(product(*lists)) +try: + # atomicwrites doesn't have type bindings + import atomicwrites # type: ignore[import] + _has_atomicwrites = True +except ImportError: + _has_atomicwrites = False class FS: - open = open - exists = os.path.exists \ No newline at end of file + exists = staticmethod(os.path.exists) + + @staticmethod + def open(name, mode="r", **kwargs): + if _has_atomicwrites and "w" in mode: + return atomicwrites.atomic_write(name, mode=mode, overwrite=True, **kwargs) + else: + return open(name, mode, **kwargs) + + + +def isascii(s: str) -> bool: + """ str.isascii only exists in python3.7+ """ + if sys.version_info >= (3, 7): + return s.isascii() + else: + try: + s.encode('ascii') + return True + except (UnicodeDecodeError, UnicodeEncodeError): + return False + + +class fzset(frozenset): + def __repr__(self): + return '{%s}' % ', '.join(map(repr, self)) + + +def classify_bool(seq: Iterable, pred: Callable) -> Any: + false_elems = [] + true_elems = [elem for elem in seq if pred(elem) or false_elems.append(elem)] # type: ignore[func-returns-value] + return true_elems, false_elems + + +def bfs(initial: Iterable, expand: Callable) -> Iterator: + open_q = deque(list(initial)) + visited = set(open_q) + while open_q: + node = open_q.popleft() + yield node + for next_node in expand(node): + if next_node not in visited: + visited.add(next_node) + open_q.append(next_node) + +def bfs_all_unique(initial, expand): + "bfs, but doesn't keep track of visited (aka seen), because there can be no repetitions" + open_q = deque(list(initial)) + while open_q: + node = open_q.popleft() + yield node + open_q += expand(node) + + +def _serialize(value: Any, memo: Optional[SerializeMemoizer]) -> Any: + if isinstance(value, Serialize): + return value.serialize(memo) + elif isinstance(value, list): + return [_serialize(elem, memo) for elem in value] + elif isinstance(value, frozenset): + return list(value) # TODO reversible? + elif isinstance(value, dict): + return {key:_serialize(elem, memo) for key, elem in value.items()} + # assert value is None or isinstance(value, (int, float, str, tuple)), value + return value + + + + +def small_factors(n: int, max_factor: int) -> List[Tuple[int, int]]: + """ + Splits n up into smaller factors and summands <= max_factor. + Returns a list of [(a, b), ...] + so that the following code returns n: + + n = 1 + for a, b in values: + n = n * a + b + + Currently, we also keep a + b <= max_factor, but that might change + """ + assert n >= 0 + assert max_factor > 2 + if n <= max_factor: + return [(n, 0)] + + for a in range(max_factor, 1, -1): + r, b = divmod(n, a) + if a + b <= max_factor: + return small_factors(r, max_factor) + [(a, b)] + assert False, "Failed to factorize %s" % n + + +class OrderedSet(AbstractSet[T]): + """A minimal OrderedSet implementation, using a dictionary. + + (relies on the dictionary being ordered) + """ + def __init__(self, items: Iterable[T] =()): + self.d = dict.fromkeys(items) + + def __contains__(self, item: Any) -> bool: + return item in self.d + + def add(self, item: T): + self.d[item] = None + + def __iter__(self) -> Iterator[T]: + return iter(self.d) + + def remove(self, item: T): + del self.d[item] + + def __bool__(self): + return bool(self.d) + + def __len__(self) -> int: + return len(self.d) diff --git a/conda_lock/_vendor/poetry/core/_vendor/lark/visitors.py b/conda_lock/_vendor/poetry/core/_vendor/lark/visitors.py index c9f0e2dd..ae9d128c 100644 --- a/conda_lock/_vendor/poetry/core/_vendor/lark/visitors.py +++ b/conda_lock/_vendor/poetry/core/_vendor/lark/visitors.py @@ -1,21 +1,54 @@ -from functools import wraps +from typing import TypeVar, Tuple, List, Callable, Generic, Type, Union, Optional, Any, cast +from abc import ABC -from .utils import smart_decorator, combine_alternatives -from .tree import Tree +from .utils import combine_alternatives +from .tree import Tree, Branch from .exceptions import VisitError, GrammarError from .lexer import Token ###{standalone +from functools import wraps, update_wrapper from inspect import getmembers, getmro -class Discard(Exception): - pass +_Return_T = TypeVar('_Return_T') +_Return_V = TypeVar('_Return_V') +_Leaf_T = TypeVar('_Leaf_T') +_Leaf_U = TypeVar('_Leaf_U') +_R = TypeVar('_R') +_FUNC = Callable[..., _Return_T] +_DECORATED = Union[_FUNC, type] + +class _DiscardType: + """When the Discard value is returned from a transformer callback, + that node is discarded and won't appear in the parent. + + Note: + This feature is disabled when the transformer is provided to Lark + using the ``transformer`` keyword (aka Tree-less LALR mode). + + Example: + :: + + class T(Transformer): + def ignore_tree(self, children): + return Discard + + def IGNORE_TOKEN(self, token): + return Discard + """ + + def __repr__(self): + return "lark.visitors.Discard" + +Discard = _DiscardType() # Transformers class _Decoratable: + "Provides support for decorating methods with @v_args" + @classmethod - def _apply_decorator(cls, decorator, **kwargs): + def _apply_v_args(cls, visit_wrapper): mro = getmro(cls) assert mro[0] is cls libmembers = {name for _cls in mro[1:] for name, _ in getmembers(_cls)} @@ -28,28 +61,51 @@ def _apply_decorator(cls, decorator, **kwargs): continue # Skip if v_args already applied (at the function level) - if hasattr(cls.__dict__[name], 'vargs_applied') or hasattr(value, 'vargs_applied'): + if isinstance(cls.__dict__[name], _VArgsWrapper): continue - static = isinstance(cls.__dict__[name], (staticmethod, classmethod)) - setattr(cls, name, decorator(value, static=static, **kwargs)) + setattr(cls, name, _VArgsWrapper(cls.__dict__[name], visit_wrapper)) return cls def __class_getitem__(cls, _): return cls -class Transformer(_Decoratable): - """Visits the tree recursively, starting with the leaves and finally the root (bottom-up) +class Transformer(_Decoratable, ABC, Generic[_Leaf_T, _Return_T]): + """Transformers work bottom-up (or depth-first), starting with visiting the leaves and working + their way up until ending at the root of the tree. + + For each node visited, the transformer will call the appropriate method (callbacks), according to the + node's ``data``, and use the returned value to replace the node, thereby creating a new tree structure. + + Transformers can be used to implement map & reduce patterns. Because nodes are reduced from leaf to root, + at any point the callbacks may assume the children have already been transformed (if applicable). + + If the transformer cannot find a method with the right name, it will instead call ``__default__``, which by + default creates a copy of the node. + + To discard a node, return Discard (``lark.visitors.Discard``). - Calls its methods (provided by user via inheritance) according to tree.data - The returned value replaces the old one in the structure. + ``Transformer`` can do anything ``Visitor`` can do, but because it reconstructs the tree, + it is slightly less efficient. + + A transformer without methods essentially performs a non-memoized partial deepcopy. + + All these classes implement the transformer interface: + + - ``Transformer`` - Recursively transforms the tree. This is the one you probably want. + - ``Transformer_InPlace`` - Non-recursive. Changes the tree in-place instead of returning new instances + - ``Transformer_InPlaceRecursive`` - Recursive. Changes the tree in-place instead of returning new instances + + Parameters: + visit_tokens (bool, optional): Should the transformer visit tokens in addition to rules. + Setting this to ``False`` is slightly faster. Defaults to ``True``. + (For processing ignored tokens, use the ``lexer_callbacks`` options) - Can be used to implement map or reduce. """ __visit_tokens__ = True # For backwards compatibility - def __init__(self, visit_tokens=True): + def __init__(self, visit_tokens: bool=True) -> None: self.__visit_tokens__ = visit_tokens def _call_userfunc(self, tree, new_children=None): @@ -66,7 +122,7 @@ def _call_userfunc(self, tree, new_children=None): return f.visit_wrapper(f, tree.data, children, tree.meta) else: return f(children) - except (GrammarError, Discard): + except GrammarError: raise except Exception as e: raise VisitError(tree.data, tree, e) @@ -79,43 +135,106 @@ def _call_userfunc_token(self, token): else: try: return f(token) - except (GrammarError, Discard): + except GrammarError: raise except Exception as e: raise VisitError(token.type, token, e) - def _transform_children(self, children): for c in children: - try: - if isinstance(c, Tree): - yield self._transform_tree(c) - elif self.__visit_tokens__ and isinstance(c, Token): - yield self._call_userfunc_token(c) - else: - yield c - except Discard: - pass + if isinstance(c, Tree): + res = self._transform_tree(c) + elif self.__visit_tokens__ and isinstance(c, Token): + res = self._call_userfunc_token(c) + else: + res = c + + if res is not Discard: + yield res def _transform_tree(self, tree): children = list(self._transform_children(tree.children)) return self._call_userfunc(tree, children) - def transform(self, tree): + def transform(self, tree: Tree[_Leaf_T]) -> _Return_T: + "Transform the given tree, and return the final result" return self._transform_tree(tree) - def __mul__(self, other): + def __mul__( + self: 'Transformer[_Leaf_T, Tree[_Leaf_U]]', + other: 'Union[Transformer[_Leaf_U, _Return_V], TransformerChain[_Leaf_U, _Return_V,]]' + ) -> 'TransformerChain[_Leaf_T, _Return_V]': + """Chain two transformers together, returning a new transformer. + """ return TransformerChain(self, other) def __default__(self, data, children, meta): - "Default operation on tree (for override)" + """Default function that is called if there is no attribute matching ``data`` + + Can be overridden. Defaults to creating a new copy of the tree node (i.e. ``return Tree(data, children, meta)``) + """ return Tree(data, children, meta) def __default_token__(self, token): - "Default operation on token (for override)" + """Default function that is called if there is no attribute matching ``token.type`` + + Can be overridden. Defaults to returning the token as-is. + """ return token +def merge_transformers(base_transformer=None, **transformers_to_merge): + """Merge a collection of transformers into the base_transformer, each into its own 'namespace'. + + When called, it will collect the methods from each transformer, and assign them to base_transformer, + with their name prefixed with the given keyword, as ``prefix__methodname``. + + This function is especially useful for processing grammars that import other grammars, + thereby creating some of their rules in a 'namespace'. (i.e with a consistent name prefix). + In this case, the key for the transformer should match the name of the imported grammar. + + Parameters: + base_transformer (Transformer, optional): The transformer that all other transformers will be added to. + **transformers_to_merge: Keyword arguments, in the form of ``name_prefix = transformer``. + + Raises: + AttributeError: In case of a name collision in the merged methods + + Example: + :: + + class TBase(Transformer): + def start(self, children): + return children[0] + 'bar' + + class TImportedGrammar(Transformer): + def foo(self, children): + return "foo" + + composed_transformer = merge_transformers(TBase(), imported=TImportedGrammar()) + + t = Tree('start', [ Tree('imported__foo', []) ]) + + assert composed_transformer.transform(t) == 'foobar' + + """ + if base_transformer is None: + base_transformer = Transformer() + for prefix, transformer in transformers_to_merge.items(): + for method_name in dir(transformer): + method = getattr(transformer, method_name) + if not callable(method): + continue + if method_name.startswith("_") or method_name == "transform": + continue + prefixed_method = prefix + "__" + method_name + if hasattr(base_transformer, prefixed_method): + raise AttributeError("Cannot merge: method '%s' appears more than once" % prefixed_method) + + setattr(base_transformer, prefixed_method, method) + + return base_transformer + class InlineTransformer(Transformer): # XXX Deprecated def _call_userfunc(self, tree, new_children=None): @@ -129,46 +248,60 @@ def _call_userfunc(self, tree, new_children=None): return f(*children) -class TransformerChain(object): - def __init__(self, *transformers): +class TransformerChain(Generic[_Leaf_T, _Return_T]): + + transformers: 'Tuple[Union[Transformer, TransformerChain], ...]' + + def __init__(self, *transformers: 'Union[Transformer, TransformerChain]') -> None: self.transformers = transformers - def transform(self, tree): + def transform(self, tree: Tree[_Leaf_T]) -> _Return_T: for t in self.transformers: tree = t.transform(tree) - return tree + return cast(_Return_T, tree) - def __mul__(self, other): + def __mul__( + self: 'TransformerChain[_Leaf_T, Tree[_Leaf_U]]', + other: 'Union[Transformer[_Leaf_U, _Return_V], TransformerChain[_Leaf_U, _Return_V]]' + ) -> 'TransformerChain[_Leaf_T, _Return_V]': return TransformerChain(*self.transformers + (other,)) -class Transformer_InPlace(Transformer): - "Non-recursive. Changes the tree in-place instead of returning new instances" +class Transformer_InPlace(Transformer[_Leaf_T, _Return_T]): + """Same as Transformer, but non-recursive, and changes the tree in-place instead of returning new instances + + Useful for huge trees. Conservative in memory. + """ def _transform_tree(self, tree): # Cancel recursion return self._call_userfunc(tree) - def transform(self, tree): + def transform(self, tree: Tree[_Leaf_T]) -> _Return_T: for subtree in tree.iter_subtrees(): subtree.children = list(self._transform_children(subtree.children)) return self._transform_tree(tree) -class Transformer_NonRecursive(Transformer): - "Non-recursive. Doesn't change the original tree." +class Transformer_NonRecursive(Transformer[_Leaf_T, _Return_T]): + """Same as Transformer but non-recursive. + + Like Transformer, it doesn't change the original tree. - def transform(self, tree): + Useful for huge trees. + """ + + def transform(self, tree: Tree[_Leaf_T]) -> _Return_T: # Tree to postfix rev_postfix = [] - q = [tree] + q: List[Branch[_Leaf_T]] = [tree] while q: t = q.pop() - rev_postfix.append( t ) + rev_postfix.append(t) if isinstance(t, Tree): q += t.children # Postfix to tree - stack = [] + stack: List = [] for x in reversed(rev_postfix): if isinstance(x, Tree): size = len(x.children) @@ -177,23 +310,32 @@ def transform(self, tree): del stack[-size:] else: args = [] - stack.append(self._call_userfunc(x, args)) + + res = self._call_userfunc(x, args) + if res is not Discard: + stack.append(res) + + elif self.__visit_tokens__ and isinstance(x, Token): + res = self._call_userfunc_token(x) + if res is not Discard: + stack.append(res) else: stack.append(x) - t ,= stack # We should have only one tree remaining - return t - + result, = stack # We should have only one tree remaining + # There are no guarantees on the type of the value produced by calling a user func for a + # child will produce. This means type system can't statically know that the final result is + # _Return_T. As a result a cast is required. + return cast(_Return_T, result) class Transformer_InPlaceRecursive(Transformer): - "Recursive. Changes the tree in-place instead of returning new instances" + "Same as Transformer, recursive, but changes the tree in-place instead of returning new instances" def _transform_tree(self, tree): tree.children = list(self._transform_children(tree.children)) return self._call_userfunc(tree) - # Visitors class VisitorBase: @@ -201,38 +343,45 @@ def _call_userfunc(self, tree): return getattr(self, tree.data, self.__default__)(tree) def __default__(self, tree): - "Default operation on tree (for override)" + """Default function that is called if there is no attribute matching ``tree.data`` + + Can be overridden. Defaults to doing nothing. + """ return tree def __class_getitem__(cls, _): return cls -class Visitor(VisitorBase): - """Bottom-up visitor, non-recursive +class Visitor(VisitorBase, ABC, Generic[_Leaf_T]): + """Tree visitor, non-recursive (can handle huge trees). - Visits the tree, starting with the leaves and finally the root (bottom-up) - Calls its methods (provided by user via inheritance) according to tree.data + Visiting a node calls its methods (provided by the user via inheritance) according to ``tree.data`` """ - def visit(self, tree): + def visit(self, tree: Tree[_Leaf_T]) -> Tree[_Leaf_T]: + "Visits the tree, starting with the leaves and finally the root (bottom-up)" for subtree in tree.iter_subtrees(): self._call_userfunc(subtree) return tree - def visit_topdown(self,tree): + def visit_topdown(self, tree: Tree[_Leaf_T]) -> Tree[_Leaf_T]: + "Visit the tree, starting at the root, and ending at the leaves (top-down)" for subtree in tree.iter_subtrees_topdown(): self._call_userfunc(subtree) return tree -class Visitor_Recursive(VisitorBase): - """Bottom-up visitor, recursive - Visits the tree, starting with the leaves and finally the root (bottom-up) - Calls its methods (provided by user via inheritance) according to tree.data +class Visitor_Recursive(VisitorBase, Generic[_Leaf_T]): + """Bottom-up visitor, recursive. + + Visiting a node calls its methods (provided by the user via inheritance) according to ``tree.data`` + + Slightly faster than the non-recursive version. """ - def visit(self, tree): + def visit(self, tree: Tree[_Leaf_T]) -> Tree[_Leaf_T]: + "Visits the tree, starting with the leaves and finally the root (bottom-up)" for child in tree.children: if isinstance(child, Tree): self.visit(child) @@ -240,7 +389,8 @@ def visit(self, tree): self._call_userfunc(tree) return tree - def visit_topdown(self,tree): + def visit_topdown(self,tree: Tree[_Leaf_T]) -> Tree[_Leaf_T]: + "Visit the tree, starting at the root, and ending at the leaves (top-down)" self._call_userfunc(tree) for child in tree.children: @@ -250,27 +400,25 @@ def visit_topdown(self,tree): return tree - -def visit_children_decor(func): - "See Interpreter" - @wraps(func) - def inner(cls, tree): - values = cls.visit_children(tree) - return func(cls, values) - return inner - - -class Interpreter(_Decoratable): - """Top-down visitor, recursive +class Interpreter(_Decoratable, ABC, Generic[_Leaf_T, _Return_T]): + """Interpreter walks the tree starting at the root. Visits the tree, starting with the root and finally the leaves (top-down) - Calls its methods (provided by user via inheritance) according to tree.data - Unlike Transformer and Visitor, the Interpreter doesn't automatically visit its sub-branches. - The user has to explicitly call visit_children, or use the @visit_children_decor + For each tree node, it calls its methods (provided by user via inheritance) according to ``tree.data``. + + Unlike ``Transformer`` and ``Visitor``, the Interpreter doesn't automatically visit its sub-branches. + The user has to explicitly call ``visit``, ``visit_children``, or use the ``@visit_children_decor``. + This allows the user to implement branching and loops. """ - def visit(self, tree): + def visit(self, tree: Tree[_Leaf_T]) -> _Return_T: + # There are no guarantees on the type of the value produced by calling a user func for a + # child will produce. So only annotate the public method and use an internal method when + # visiting child trees. + return self._visit_tree(tree) + + def _visit_tree(self, tree: Tree[_Leaf_T]): f = getattr(self, tree.data) wrapper = getattr(f, 'visit_wrapper', None) if wrapper is not None: @@ -278,8 +426,8 @@ def visit(self, tree): else: return f(tree) - def visit_children(self, tree): - return [self.visit(child) if isinstance(child, Tree) else child + def visit_children(self, tree: Tree[_Leaf_T]) -> List: + return [self._visit_tree(child) if isinstance(child, Tree) else child for child in tree.children] def __getattr__(self, name): @@ -289,69 +437,113 @@ def __default__(self, tree): return self.visit_children(tree) +_InterMethod = Callable[[Type[Interpreter], _Return_T], _R] +def visit_children_decor(func: _InterMethod) -> _InterMethod: + "See Interpreter" + @wraps(func) + def inner(cls, tree): + values = cls.visit_children(tree) + return func(cls, values) + return inner # Decorators -def _apply_decorator(obj, decorator, **kwargs): +def _apply_v_args(obj, visit_wrapper): try: - _apply = obj._apply_decorator + _apply = obj._apply_v_args except AttributeError: - return decorator(obj, **kwargs) + return _VArgsWrapper(obj, visit_wrapper) else: - return _apply(decorator, **kwargs) - - - -def _inline_args__func(func): - @wraps(func) - def create_decorator(_f, with_self): - if with_self: - def f(self, children): - return _f(self, *children) - else: - def f(self, children): - return _f(*children) - return f - - return smart_decorator(func, create_decorator) + return _apply(visit_wrapper) -def inline_args(obj): # XXX Deprecated - return _apply_decorator(obj, _inline_args__func) +class _VArgsWrapper: + """ + A wrapper around a Callable. It delegates `__call__` to the Callable. + If the Callable has a `__get__`, that is also delegate and the resulting function is wrapped. + Otherwise, we use the original function mirroring the behaviour without a __get__. + We also have the visit_wrapper attribute to be used by Transformers. + """ + base_func: Callable + def __init__(self, func: Callable, visit_wrapper: Callable[[Callable, str, list, Any], Any]): + if isinstance(func, _VArgsWrapper): + func = func.base_func + # https://github.com/python/mypy/issues/708 + self.base_func = func # type: ignore[assignment] + self.visit_wrapper = visit_wrapper + update_wrapper(self, func) + def __call__(self, *args, **kwargs): + return self.base_func(*args, **kwargs) -def _visitor_args_func_dec(func, visit_wrapper=None, static=False): - def create_decorator(_f, with_self): - if with_self: - def f(self, *args, **kwargs): - return _f(self, *args, **kwargs) + def __get__(self, instance, owner=None): + try: + # Use the __get__ attribute of the type instead of the instance + # to fully mirror the behavior of getattr + g = type(self.base_func).__get__ + except AttributeError: + return self else: - def f(self, *args, **kwargs): - return _f(*args, **kwargs) - return f + return _VArgsWrapper(g(self.base_func, instance, owner), self.visit_wrapper) - if static: - f = wraps(func)(create_decorator(func, False)) - else: - f = smart_decorator(func, create_decorator) - f.vargs_applied = True - f.visit_wrapper = visit_wrapper - return f + def __set_name__(self, owner, name): + try: + f = type(self.base_func).__set_name__ + except AttributeError: + return + else: + f(self.base_func, owner, name) -def _vargs_inline(f, data, children, meta): +def _vargs_inline(f, _data, children, _meta): return f(*children) -def _vargs_meta_inline(f, data, children, meta): +def _vargs_meta_inline(f, _data, children, meta): return f(meta, *children) -def _vargs_meta(f, data, children, meta): - return f(children, meta) # TODO swap these for consistency? Backwards incompatible! +def _vargs_meta(f, _data, children, meta): + return f(meta, children) def _vargs_tree(f, data, children, meta): return f(Tree(data, children, meta)) -def v_args(inline=False, meta=False, tree=False, wrapper=None): - "A convenience decorator factory, for modifying the behavior of user-supplied visitor methods" + +def v_args(inline: bool = False, meta: bool = False, tree: bool = False, wrapper: Optional[Callable] = None) -> Callable[[_DECORATED], _DECORATED]: + """A convenience decorator factory for modifying the behavior of user-supplied visitor methods. + + By default, callback methods of transformers/visitors accept one argument - a list of the node's children. + + ``v_args`` can modify this behavior. When used on a transformer/visitor class definition, + it applies to all the callback methods inside it. + + ``v_args`` can be applied to a single method, or to an entire class. When applied to both, + the options given to the method take precedence. + + Parameters: + inline (bool, optional): Children are provided as ``*args`` instead of a list argument (not recommended for very long lists). + meta (bool, optional): Provides two arguments: ``meta`` and ``children`` (instead of just the latter) + tree (bool, optional): Provides the entire tree as the argument, instead of the children. + wrapper (function, optional): Provide a function to decorate all methods. + + Example: + :: + + @v_args(inline=True) + class SolveArith(Transformer): + def add(self, left, right): + return left + right + + @v_args(meta=True) + def mul(self, meta, children): + logger.info(f'mul at line {meta.line}') + left, right = children + return left * right + + + class ReverseNotation(Transformer_InPlace): + @v_args(tree=True) + def tree_node(self, tree): + tree.children = tree.children[::-1] + """ if tree and (meta or inline): raise ValueError("Visitor functions cannot combine 'tree' with 'meta' or 'inline'.") @@ -372,14 +564,14 @@ def v_args(inline=False, meta=False, tree=False, wrapper=None): func = wrapper def _visitor_args_dec(obj): - return _apply_decorator(obj, _visitor_args_func_dec, visit_wrapper=func) + return _apply_v_args(obj, func) return _visitor_args_dec ###} -#--- Visitor Utilities --- +# --- Visitor Utilities --- class CollapseAmbiguities(Transformer): """ @@ -393,7 +585,9 @@ class CollapseAmbiguities(Transformer): """ def _ambig(self, options): return sum(options, []) + def __default__(self, data, children_lists, meta): return [Tree(data, children, meta) for children in combine_alternatives(children_lists)] + def __default_token__(self, t): return [t] diff --git a/conda_lock/_vendor/poetry/core/_vendor/packaging/__about__.py b/conda_lock/_vendor/poetry/core/_vendor/packaging/__about__.py deleted file mode 100644 index 4c43a968..00000000 --- a/conda_lock/_vendor/poetry/core/_vendor/packaging/__about__.py +++ /dev/null @@ -1,27 +0,0 @@ -# This file is dual licensed under the terms of the Apache License, Version -# 2.0, and the BSD License. See the LICENSE file in the root of this repository -# for complete details. -from __future__ import absolute_import, division, print_function - -__all__ = [ - "__title__", - "__summary__", - "__uri__", - "__version__", - "__author__", - "__email__", - "__license__", - "__copyright__", -] - -__title__ = "packaging" -__summary__ = "Core utilities for Python packages" -__uri__ = "https://github.com/pypa/packaging" - -__version__ = "20.9" - -__author__ = "Donald Stufft and individual contributors" -__email__ = "donald@stufft.io" - -__license__ = "BSD-2-Clause or Apache-2.0" -__copyright__ = "2014-2019 %s" % __author__ diff --git a/conda_lock/_vendor/poetry/core/_vendor/packaging/__init__.py b/conda_lock/_vendor/poetry/core/_vendor/packaging/__init__.py index a0cf67df..22809cfd 100644 --- a/conda_lock/_vendor/poetry/core/_vendor/packaging/__init__.py +++ b/conda_lock/_vendor/poetry/core/_vendor/packaging/__init__.py @@ -1,26 +1,15 @@ # This file is dual licensed under the terms of the Apache License, Version # 2.0, and the BSD License. See the LICENSE file in the root of this repository # for complete details. -from __future__ import absolute_import, division, print_function -from .__about__ import ( - __author__, - __copyright__, - __email__, - __license__, - __summary__, - __title__, - __uri__, - __version__, -) +__title__ = "packaging" +__summary__ = "Core utilities for Python packages" +__uri__ = "https://github.com/pypa/packaging" -__all__ = [ - "__title__", - "__summary__", - "__uri__", - "__version__", - "__author__", - "__email__", - "__license__", - "__copyright__", -] +__version__ = "23.2" + +__author__ = "Donald Stufft and individual contributors" +__email__ = "donald@stufft.io" + +__license__ = "BSD-2-Clause or Apache-2.0" +__copyright__ = "2014 %s" % __author__ diff --git a/conda_lock/_vendor/poetry/core/_vendor/packaging/_compat.py b/conda_lock/_vendor/poetry/core/_vendor/packaging/_compat.py deleted file mode 100644 index e54bd4ed..00000000 --- a/conda_lock/_vendor/poetry/core/_vendor/packaging/_compat.py +++ /dev/null @@ -1,38 +0,0 @@ -# This file is dual licensed under the terms of the Apache License, Version -# 2.0, and the BSD License. See the LICENSE file in the root of this repository -# for complete details. -from __future__ import absolute_import, division, print_function - -import sys - -from ._typing import TYPE_CHECKING - -if TYPE_CHECKING: # pragma: no cover - from typing import Any, Dict, Tuple, Type - - -PY2 = sys.version_info[0] == 2 -PY3 = sys.version_info[0] == 3 - -# flake8: noqa - -if PY3: - string_types = (str,) -else: - string_types = (basestring,) - - -def with_metaclass(meta, *bases): - # type: (Type[Any], Tuple[Type[Any], ...]) -> Any - """ - Create a base class with a metaclass. - """ - # This requires a bit of explanation: the basic idea is to make a dummy - # metaclass for one level of class instantiation that replaces itself with - # the actual metaclass. - class metaclass(meta): # type: ignore - def __new__(cls, name, this_bases, d): - # type: (Type[Any], str, Tuple[Any], Dict[Any, Any]) -> Any - return meta(name, bases, d) - - return type.__new__(metaclass, "temporary_class", (), {}) diff --git a/conda_lock/_vendor/poetry/core/_vendor/packaging/_elffile.py b/conda_lock/_vendor/poetry/core/_vendor/packaging/_elffile.py new file mode 100644 index 00000000..6fb19b30 --- /dev/null +++ b/conda_lock/_vendor/poetry/core/_vendor/packaging/_elffile.py @@ -0,0 +1,108 @@ +""" +ELF file parser. + +This provides a class ``ELFFile`` that parses an ELF executable in a similar +interface to ``ZipFile``. Only the read interface is implemented. + +Based on: https://gist.github.com/lyssdod/f51579ae8d93c8657a5564aefc2ffbca +ELF header: https://refspecs.linuxfoundation.org/elf/gabi4+/ch4.eheader.html +""" + +import enum +import os +import struct +from typing import IO, Optional, Tuple + + +class ELFInvalid(ValueError): + pass + + +class EIClass(enum.IntEnum): + C32 = 1 + C64 = 2 + + +class EIData(enum.IntEnum): + Lsb = 1 + Msb = 2 + + +class EMachine(enum.IntEnum): + I386 = 3 + S390 = 22 + Arm = 40 + X8664 = 62 + AArc64 = 183 + + +class ELFFile: + """ + Representation of an ELF executable. + """ + + def __init__(self, f: IO[bytes]) -> None: + self._f = f + + try: + ident = self._read("16B") + except struct.error: + raise ELFInvalid("unable to parse identification") + magic = bytes(ident[:4]) + if magic != b"\x7fELF": + raise ELFInvalid(f"invalid magic: {magic!r}") + + self.capacity = ident[4] # Format for program header (bitness). + self.encoding = ident[5] # Data structure encoding (endianness). + + try: + # e_fmt: Format for program header. + # p_fmt: Format for section header. + # p_idx: Indexes to find p_type, p_offset, and p_filesz. + e_fmt, self._p_fmt, self._p_idx = { + (1, 1): ("HHIIIIIHHH", ">IIIIIIII", (0, 1, 4)), # 32-bit MSB. + (2, 1): ("HHIQQQIHHH", ">IIQQQQQQ", (0, 2, 5)), # 64-bit MSB. + }[(self.capacity, self.encoding)] + except KeyError: + raise ELFInvalid( + f"unrecognized capacity ({self.capacity}) or " + f"encoding ({self.encoding})" + ) + + try: + ( + _, + self.machine, # Architecture type. + _, + _, + self._e_phoff, # Offset of program header. + _, + self.flags, # Processor-specific flags. + _, + self._e_phentsize, # Size of section. + self._e_phnum, # Number of sections. + ) = self._read(e_fmt) + except struct.error as e: + raise ELFInvalid("unable to parse machine and section information") from e + + def _read(self, fmt: str) -> Tuple[int, ...]: + return struct.unpack(fmt, self._f.read(struct.calcsize(fmt))) + + @property + def interpreter(self) -> Optional[str]: + """ + The path recorded in the ``PT_INTERP`` section header. + """ + for index in range(self._e_phnum): + self._f.seek(self._e_phoff + self._e_phentsize * index) + try: + data = self._read(self._p_fmt) + except struct.error: + continue + if data[self._p_idx[0]] != 3: # Not PT_INTERP. + continue + self._f.seek(data[self._p_idx[1]]) + return os.fsdecode(self._f.read(data[self._p_idx[2]])).strip("\0") + return None diff --git a/conda_lock/_vendor/poetry/core/_vendor/packaging/_manylinux.py b/conda_lock/_vendor/poetry/core/_vendor/packaging/_manylinux.py new file mode 100644 index 00000000..3705d50d --- /dev/null +++ b/conda_lock/_vendor/poetry/core/_vendor/packaging/_manylinux.py @@ -0,0 +1,252 @@ +import collections +import contextlib +import functools +import os +import re +import sys +import warnings +from typing import Dict, Generator, Iterator, NamedTuple, Optional, Sequence, Tuple + +from ._elffile import EIClass, EIData, ELFFile, EMachine + +EF_ARM_ABIMASK = 0xFF000000 +EF_ARM_ABI_VER5 = 0x05000000 +EF_ARM_ABI_FLOAT_HARD = 0x00000400 + + +# `os.PathLike` not a generic type until Python 3.9, so sticking with `str` +# as the type for `path` until then. +@contextlib.contextmanager +def _parse_elf(path: str) -> Generator[Optional[ELFFile], None, None]: + try: + with open(path, "rb") as f: + yield ELFFile(f) + except (OSError, TypeError, ValueError): + yield None + + +def _is_linux_armhf(executable: str) -> bool: + # hard-float ABI can be detected from the ELF header of the running + # process + # https://static.docs.arm.com/ihi0044/g/aaelf32.pdf + with _parse_elf(executable) as f: + return ( + f is not None + and f.capacity == EIClass.C32 + and f.encoding == EIData.Lsb + and f.machine == EMachine.Arm + and f.flags & EF_ARM_ABIMASK == EF_ARM_ABI_VER5 + and f.flags & EF_ARM_ABI_FLOAT_HARD == EF_ARM_ABI_FLOAT_HARD + ) + + +def _is_linux_i686(executable: str) -> bool: + with _parse_elf(executable) as f: + return ( + f is not None + and f.capacity == EIClass.C32 + and f.encoding == EIData.Lsb + and f.machine == EMachine.I386 + ) + + +def _have_compatible_abi(executable: str, archs: Sequence[str]) -> bool: + if "armv7l" in archs: + return _is_linux_armhf(executable) + if "i686" in archs: + return _is_linux_i686(executable) + allowed_archs = {"x86_64", "aarch64", "ppc64", "ppc64le", "s390x", "loongarch64"} + return any(arch in allowed_archs for arch in archs) + + +# If glibc ever changes its major version, we need to know what the last +# minor version was, so we can build the complete list of all versions. +# For now, guess what the highest minor version might be, assume it will +# be 50 for testing. Once this actually happens, update the dictionary +# with the actual value. +_LAST_GLIBC_MINOR: Dict[int, int] = collections.defaultdict(lambda: 50) + + +class _GLibCVersion(NamedTuple): + major: int + minor: int + + +def _glibc_version_string_confstr() -> Optional[str]: + """ + Primary implementation of glibc_version_string using os.confstr. + """ + # os.confstr is quite a bit faster than ctypes.DLL. It's also less likely + # to be broken or missing. This strategy is used in the standard library + # platform module. + # https://github.com/python/cpython/blob/fcf1d003bf4f0100c/Lib/platform.py#L175-L183 + try: + # Should be a string like "glibc 2.17". + version_string: str = getattr(os, "confstr")("CS_GNU_LIBC_VERSION") + assert version_string is not None + _, version = version_string.rsplit() + except (AssertionError, AttributeError, OSError, ValueError): + # os.confstr() or CS_GNU_LIBC_VERSION not available (or a bad value)... + return None + return version + + +def _glibc_version_string_ctypes() -> Optional[str]: + """ + Fallback implementation of glibc_version_string using ctypes. + """ + try: + import ctypes + except ImportError: + return None + + # ctypes.CDLL(None) internally calls dlopen(NULL), and as the dlopen + # manpage says, "If filename is NULL, then the returned handle is for the + # main program". This way we can let the linker do the work to figure out + # which libc our process is actually using. + # + # We must also handle the special case where the executable is not a + # dynamically linked executable. This can occur when using musl libc, + # for example. In this situation, dlopen() will error, leading to an + # OSError. Interestingly, at least in the case of musl, there is no + # errno set on the OSError. The single string argument used to construct + # OSError comes from libc itself and is therefore not portable to + # hard code here. In any case, failure to call dlopen() means we + # can proceed, so we bail on our attempt. + try: + process_namespace = ctypes.CDLL(None) + except OSError: + return None + + try: + gnu_get_libc_version = process_namespace.gnu_get_libc_version + except AttributeError: + # Symbol doesn't exist -> therefore, we are not linked to + # glibc. + return None + + # Call gnu_get_libc_version, which returns a string like "2.5" + gnu_get_libc_version.restype = ctypes.c_char_p + version_str: str = gnu_get_libc_version() + # py2 / py3 compatibility: + if not isinstance(version_str, str): + version_str = version_str.decode("ascii") + + return version_str + + +def _glibc_version_string() -> Optional[str]: + """Returns glibc version string, or None if not using glibc.""" + return _glibc_version_string_confstr() or _glibc_version_string_ctypes() + + +def _parse_glibc_version(version_str: str) -> Tuple[int, int]: + """Parse glibc version. + + We use a regexp instead of str.split because we want to discard any + random junk that might come after the minor version -- this might happen + in patched/forked versions of glibc (e.g. Linaro's version of glibc + uses version strings like "2.20-2014.11"). See gh-3588. + """ + m = re.match(r"(?P[0-9]+)\.(?P[0-9]+)", version_str) + if not m: + warnings.warn( + f"Expected glibc version with 2 components major.minor," + f" got: {version_str}", + RuntimeWarning, + ) + return -1, -1 + return int(m.group("major")), int(m.group("minor")) + + +@functools.lru_cache() +def _get_glibc_version() -> Tuple[int, int]: + version_str = _glibc_version_string() + if version_str is None: + return (-1, -1) + return _parse_glibc_version(version_str) + + +# From PEP 513, PEP 600 +def _is_compatible(arch: str, version: _GLibCVersion) -> bool: + sys_glibc = _get_glibc_version() + if sys_glibc < version: + return False + # Check for presence of _manylinux module. + try: + import _manylinux # noqa + except ImportError: + return True + if hasattr(_manylinux, "manylinux_compatible"): + result = _manylinux.manylinux_compatible(version[0], version[1], arch) + if result is not None: + return bool(result) + return True + if version == _GLibCVersion(2, 5): + if hasattr(_manylinux, "manylinux1_compatible"): + return bool(_manylinux.manylinux1_compatible) + if version == _GLibCVersion(2, 12): + if hasattr(_manylinux, "manylinux2010_compatible"): + return bool(_manylinux.manylinux2010_compatible) + if version == _GLibCVersion(2, 17): + if hasattr(_manylinux, "manylinux2014_compatible"): + return bool(_manylinux.manylinux2014_compatible) + return True + + +_LEGACY_MANYLINUX_MAP = { + # CentOS 7 w/ glibc 2.17 (PEP 599) + (2, 17): "manylinux2014", + # CentOS 6 w/ glibc 2.12 (PEP 571) + (2, 12): "manylinux2010", + # CentOS 5 w/ glibc 2.5 (PEP 513) + (2, 5): "manylinux1", +} + + +def platform_tags(archs: Sequence[str]) -> Iterator[str]: + """Generate manylinux tags compatible to the current platform. + + :param archs: Sequence of compatible architectures. + The first one shall be the closest to the actual architecture and be the part of + platform tag after the ``linux_`` prefix, e.g. ``x86_64``. + The ``linux_`` prefix is assumed as a prerequisite for the current platform to + be manylinux-compatible. + + :returns: An iterator of compatible manylinux tags. + """ + if not _have_compatible_abi(sys.executable, archs): + return + # Oldest glibc to be supported regardless of architecture is (2, 17). + too_old_glibc2 = _GLibCVersion(2, 16) + if set(archs) & {"x86_64", "i686"}: + # On x86/i686 also oldest glibc to be supported is (2, 5). + too_old_glibc2 = _GLibCVersion(2, 4) + current_glibc = _GLibCVersion(*_get_glibc_version()) + glibc_max_list = [current_glibc] + # We can assume compatibility across glibc major versions. + # https://sourceware.org/bugzilla/show_bug.cgi?id=24636 + # + # Build a list of maximum glibc versions so that we can + # output the canonical list of all glibc from current_glibc + # down to too_old_glibc2, including all intermediary versions. + for glibc_major in range(current_glibc.major - 1, 1, -1): + glibc_minor = _LAST_GLIBC_MINOR[glibc_major] + glibc_max_list.append(_GLibCVersion(glibc_major, glibc_minor)) + for arch in archs: + for glibc_max in glibc_max_list: + if glibc_max.major == too_old_glibc2.major: + min_minor = too_old_glibc2.minor + else: + # For other glibc major versions oldest supported is (x, 0). + min_minor = -1 + for glibc_minor in range(glibc_max.minor, min_minor, -1): + glibc_version = _GLibCVersion(glibc_max.major, glibc_minor) + tag = "manylinux_{}_{}".format(*glibc_version) + if _is_compatible(arch, glibc_version): + yield f"{tag}_{arch}" + # Handle the legacy manylinux1, manylinux2010, manylinux2014 tags. + if glibc_version in _LEGACY_MANYLINUX_MAP: + legacy_tag = _LEGACY_MANYLINUX_MAP[glibc_version] + if _is_compatible(arch, glibc_version): + yield f"{legacy_tag}_{arch}" diff --git a/conda_lock/_vendor/poetry/core/_vendor/packaging/_musllinux.py b/conda_lock/_vendor/poetry/core/_vendor/packaging/_musllinux.py new file mode 100644 index 00000000..86419df9 --- /dev/null +++ b/conda_lock/_vendor/poetry/core/_vendor/packaging/_musllinux.py @@ -0,0 +1,83 @@ +"""PEP 656 support. + +This module implements logic to detect if the currently running Python is +linked against musl, and what musl version is used. +""" + +import functools +import re +import subprocess +import sys +from typing import Iterator, NamedTuple, Optional, Sequence + +from ._elffile import ELFFile + + +class _MuslVersion(NamedTuple): + major: int + minor: int + + +def _parse_musl_version(output: str) -> Optional[_MuslVersion]: + lines = [n for n in (n.strip() for n in output.splitlines()) if n] + if len(lines) < 2 or lines[0][:4] != "musl": + return None + m = re.match(r"Version (\d+)\.(\d+)", lines[1]) + if not m: + return None + return _MuslVersion(major=int(m.group(1)), minor=int(m.group(2))) + + +@functools.lru_cache() +def _get_musl_version(executable: str) -> Optional[_MuslVersion]: + """Detect currently-running musl runtime version. + + This is done by checking the specified executable's dynamic linking + information, and invoking the loader to parse its output for a version + string. If the loader is musl, the output would be something like:: + + musl libc (x86_64) + Version 1.2.2 + Dynamic Program Loader + """ + try: + with open(executable, "rb") as f: + ld = ELFFile(f).interpreter + except (OSError, TypeError, ValueError): + return None + if ld is None or "musl" not in ld: + return None + proc = subprocess.run([ld], stderr=subprocess.PIPE, text=True) + return _parse_musl_version(proc.stderr) + + +def platform_tags(archs: Sequence[str]) -> Iterator[str]: + """Generate musllinux tags compatible to the current platform. + + :param archs: Sequence of compatible architectures. + The first one shall be the closest to the actual architecture and be the part of + platform tag after the ``linux_`` prefix, e.g. ``x86_64``. + The ``linux_`` prefix is assumed as a prerequisite for the current platform to + be musllinux-compatible. + + :returns: An iterator of compatible musllinux tags. + """ + sys_musl = _get_musl_version(sys.executable) + if sys_musl is None: # Python not dynamically linked against musl. + return + for arch in archs: + for minor in range(sys_musl.minor, -1, -1): + yield f"musllinux_{sys_musl.major}_{minor}_{arch}" + + +if __name__ == "__main__": # pragma: no cover + import sysconfig + + plat = sysconfig.get_platform() + assert plat.startswith("linux-"), "not linux" + + print("plat:", plat) + print("musl:", _get_musl_version(sys.executable)) + print("tags:", end=" ") + for t in platform_tags(re.sub(r"[.-]", "_", plat.split("-", 1)[-1])): + print(t, end="\n ") diff --git a/conda_lock/_vendor/poetry/core/_vendor/packaging/_parser.py b/conda_lock/_vendor/poetry/core/_vendor/packaging/_parser.py new file mode 100644 index 00000000..4576981c --- /dev/null +++ b/conda_lock/_vendor/poetry/core/_vendor/packaging/_parser.py @@ -0,0 +1,359 @@ +"""Handwritten parser of dependency specifiers. + +The docstring for each __parse_* function contains ENBF-inspired grammar representing +the implementation. +""" + +import ast +from typing import Any, List, NamedTuple, Optional, Tuple, Union + +from ._tokenizer import DEFAULT_RULES, Tokenizer + + +class Node: + def __init__(self, value: str) -> None: + self.value = value + + def __str__(self) -> str: + return self.value + + def __repr__(self) -> str: + return f"<{self.__class__.__name__}('{self}')>" + + def serialize(self) -> str: + raise NotImplementedError + + +class Variable(Node): + def serialize(self) -> str: + return str(self) + + +class Value(Node): + def serialize(self) -> str: + return f'"{self}"' + + +class Op(Node): + def serialize(self) -> str: + return str(self) + + +MarkerVar = Union[Variable, Value] +MarkerItem = Tuple[MarkerVar, Op, MarkerVar] +# MarkerAtom = Union[MarkerItem, List["MarkerAtom"]] +# MarkerList = List[Union["MarkerList", MarkerAtom, str]] +# mypy does not support recursive type definition +# https://github.com/python/mypy/issues/731 +MarkerAtom = Any +MarkerList = List[Any] + + +class ParsedRequirement(NamedTuple): + name: str + url: str + extras: List[str] + specifier: str + marker: Optional[MarkerList] + + +# -------------------------------------------------------------------------------------- +# Recursive descent parser for dependency specifier +# -------------------------------------------------------------------------------------- +def parse_requirement(source: str) -> ParsedRequirement: + return _parse_requirement(Tokenizer(source, rules=DEFAULT_RULES)) + + +def _parse_requirement(tokenizer: Tokenizer) -> ParsedRequirement: + """ + requirement = WS? IDENTIFIER WS? extras WS? requirement_details + """ + tokenizer.consume("WS") + + name_token = tokenizer.expect( + "IDENTIFIER", expected="package name at the start of dependency specifier" + ) + name = name_token.text + tokenizer.consume("WS") + + extras = _parse_extras(tokenizer) + tokenizer.consume("WS") + + url, specifier, marker = _parse_requirement_details(tokenizer) + tokenizer.expect("END", expected="end of dependency specifier") + + return ParsedRequirement(name, url, extras, specifier, marker) + + +def _parse_requirement_details( + tokenizer: Tokenizer, +) -> Tuple[str, str, Optional[MarkerList]]: + """ + requirement_details = AT URL (WS requirement_marker?)? + | specifier WS? (requirement_marker)? + """ + + specifier = "" + url = "" + marker = None + + if tokenizer.check("AT"): + tokenizer.read() + tokenizer.consume("WS") + + url_start = tokenizer.position + url = tokenizer.expect("URL", expected="URL after @").text + if tokenizer.check("END", peek=True): + return (url, specifier, marker) + + tokenizer.expect("WS", expected="whitespace after URL") + + # The input might end after whitespace. + if tokenizer.check("END", peek=True): + return (url, specifier, marker) + + marker = _parse_requirement_marker( + tokenizer, span_start=url_start, after="URL and whitespace" + ) + else: + specifier_start = tokenizer.position + specifier = _parse_specifier(tokenizer) + tokenizer.consume("WS") + + if tokenizer.check("END", peek=True): + return (url, specifier, marker) + + marker = _parse_requirement_marker( + tokenizer, + span_start=specifier_start, + after=( + "version specifier" + if specifier + else "name and no valid version specifier" + ), + ) + + return (url, specifier, marker) + + +def _parse_requirement_marker( + tokenizer: Tokenizer, *, span_start: int, after: str +) -> MarkerList: + """ + requirement_marker = SEMICOLON marker WS? + """ + + if not tokenizer.check("SEMICOLON"): + tokenizer.raise_syntax_error( + f"Expected end or semicolon (after {after})", + span_start=span_start, + ) + tokenizer.read() + + marker = _parse_marker(tokenizer) + tokenizer.consume("WS") + + return marker + + +def _parse_extras(tokenizer: Tokenizer) -> List[str]: + """ + extras = (LEFT_BRACKET wsp* extras_list? wsp* RIGHT_BRACKET)? + """ + if not tokenizer.check("LEFT_BRACKET", peek=True): + return [] + + with tokenizer.enclosing_tokens( + "LEFT_BRACKET", + "RIGHT_BRACKET", + around="extras", + ): + tokenizer.consume("WS") + extras = _parse_extras_list(tokenizer) + tokenizer.consume("WS") + + return extras + + +def _parse_extras_list(tokenizer: Tokenizer) -> List[str]: + """ + extras_list = identifier (wsp* ',' wsp* identifier)* + """ + extras: List[str] = [] + + if not tokenizer.check("IDENTIFIER"): + return extras + + extras.append(tokenizer.read().text) + + while True: + tokenizer.consume("WS") + if tokenizer.check("IDENTIFIER", peek=True): + tokenizer.raise_syntax_error("Expected comma between extra names") + elif not tokenizer.check("COMMA"): + break + + tokenizer.read() + tokenizer.consume("WS") + + extra_token = tokenizer.expect("IDENTIFIER", expected="extra name after comma") + extras.append(extra_token.text) + + return extras + + +def _parse_specifier(tokenizer: Tokenizer) -> str: + """ + specifier = LEFT_PARENTHESIS WS? version_many WS? RIGHT_PARENTHESIS + | WS? version_many WS? + """ + with tokenizer.enclosing_tokens( + "LEFT_PARENTHESIS", + "RIGHT_PARENTHESIS", + around="version specifier", + ): + tokenizer.consume("WS") + parsed_specifiers = _parse_version_many(tokenizer) + tokenizer.consume("WS") + + return parsed_specifiers + + +def _parse_version_many(tokenizer: Tokenizer) -> str: + """ + version_many = (SPECIFIER (WS? COMMA WS? SPECIFIER)*)? + """ + parsed_specifiers = "" + while tokenizer.check("SPECIFIER"): + span_start = tokenizer.position + parsed_specifiers += tokenizer.read().text + if tokenizer.check("VERSION_PREFIX_TRAIL", peek=True): + tokenizer.raise_syntax_error( + ".* suffix can only be used with `==` or `!=` operators", + span_start=span_start, + span_end=tokenizer.position + 1, + ) + if tokenizer.check("VERSION_LOCAL_LABEL_TRAIL", peek=True): + tokenizer.raise_syntax_error( + "Local version label can only be used with `==` or `!=` operators", + span_start=span_start, + span_end=tokenizer.position, + ) + tokenizer.consume("WS") + if not tokenizer.check("COMMA"): + break + parsed_specifiers += tokenizer.read().text + tokenizer.consume("WS") + + return parsed_specifiers + + +# -------------------------------------------------------------------------------------- +# Recursive descent parser for marker expression +# -------------------------------------------------------------------------------------- +def parse_marker(source: str) -> MarkerList: + return _parse_full_marker(Tokenizer(source, rules=DEFAULT_RULES)) + + +def _parse_full_marker(tokenizer: Tokenizer) -> MarkerList: + retval = _parse_marker(tokenizer) + tokenizer.expect("END", expected="end of marker expression") + return retval + + +def _parse_marker(tokenizer: Tokenizer) -> MarkerList: + """ + marker = marker_atom (BOOLOP marker_atom)+ + """ + expression = [_parse_marker_atom(tokenizer)] + while tokenizer.check("BOOLOP"): + token = tokenizer.read() + expr_right = _parse_marker_atom(tokenizer) + expression.extend((token.text, expr_right)) + return expression + + +def _parse_marker_atom(tokenizer: Tokenizer) -> MarkerAtom: + """ + marker_atom = WS? LEFT_PARENTHESIS WS? marker WS? RIGHT_PARENTHESIS WS? + | WS? marker_item WS? + """ + + tokenizer.consume("WS") + if tokenizer.check("LEFT_PARENTHESIS", peek=True): + with tokenizer.enclosing_tokens( + "LEFT_PARENTHESIS", + "RIGHT_PARENTHESIS", + around="marker expression", + ): + tokenizer.consume("WS") + marker: MarkerAtom = _parse_marker(tokenizer) + tokenizer.consume("WS") + else: + marker = _parse_marker_item(tokenizer) + tokenizer.consume("WS") + return marker + + +def _parse_marker_item(tokenizer: Tokenizer) -> MarkerItem: + """ + marker_item = WS? marker_var WS? marker_op WS? marker_var WS? + """ + tokenizer.consume("WS") + marker_var_left = _parse_marker_var(tokenizer) + tokenizer.consume("WS") + marker_op = _parse_marker_op(tokenizer) + tokenizer.consume("WS") + marker_var_right = _parse_marker_var(tokenizer) + tokenizer.consume("WS") + return (marker_var_left, marker_op, marker_var_right) + + +def _parse_marker_var(tokenizer: Tokenizer) -> MarkerVar: + """ + marker_var = VARIABLE | QUOTED_STRING + """ + if tokenizer.check("VARIABLE"): + return process_env_var(tokenizer.read().text.replace(".", "_")) + elif tokenizer.check("QUOTED_STRING"): + return process_python_str(tokenizer.read().text) + else: + tokenizer.raise_syntax_error( + message="Expected a marker variable or quoted string" + ) + + +def process_env_var(env_var: str) -> Variable: + if ( + env_var == "platform_python_implementation" + or env_var == "python_implementation" + ): + return Variable("platform_python_implementation") + else: + return Variable(env_var) + + +def process_python_str(python_str: str) -> Value: + value = ast.literal_eval(python_str) + return Value(str(value)) + + +def _parse_marker_op(tokenizer: Tokenizer) -> Op: + """ + marker_op = IN | NOT IN | OP + """ + if tokenizer.check("IN"): + tokenizer.read() + return Op("in") + elif tokenizer.check("NOT"): + tokenizer.read() + tokenizer.expect("WS", expected="whitespace after 'not'") + tokenizer.expect("IN", expected="'in' after 'not'") + return Op("not in") + elif tokenizer.check("OP"): + return Op(tokenizer.read().text) + else: + return tokenizer.raise_syntax_error( + "Expected marker operator, one of " + "<=, <, !=, ==, >=, >, ~=, ===, in, not in" + ) diff --git a/conda_lock/_vendor/poetry/core/_vendor/packaging/_structures.py b/conda_lock/_vendor/poetry/core/_vendor/packaging/_structures.py index 800d5c55..90a6465f 100644 --- a/conda_lock/_vendor/poetry/core/_vendor/packaging/_structures.py +++ b/conda_lock/_vendor/poetry/core/_vendor/packaging/_structures.py @@ -1,85 +1,60 @@ # This file is dual licensed under the terms of the Apache License, Version # 2.0, and the BSD License. See the LICENSE file in the root of this repository # for complete details. -from __future__ import absolute_import, division, print_function -class InfinityType(object): - def __repr__(self): - # type: () -> str +class InfinityType: + def __repr__(self) -> str: return "Infinity" - def __hash__(self): - # type: () -> int + def __hash__(self) -> int: return hash(repr(self)) - def __lt__(self, other): - # type: (object) -> bool + def __lt__(self, other: object) -> bool: return False - def __le__(self, other): - # type: (object) -> bool + def __le__(self, other: object) -> bool: return False - def __eq__(self, other): - # type: (object) -> bool + def __eq__(self, other: object) -> bool: return isinstance(other, self.__class__) - def __ne__(self, other): - # type: (object) -> bool - return not isinstance(other, self.__class__) - - def __gt__(self, other): - # type: (object) -> bool + def __gt__(self, other: object) -> bool: return True - def __ge__(self, other): - # type: (object) -> bool + def __ge__(self, other: object) -> bool: return True - def __neg__(self): - # type: (object) -> NegativeInfinityType + def __neg__(self: object) -> "NegativeInfinityType": return NegativeInfinity Infinity = InfinityType() -class NegativeInfinityType(object): - def __repr__(self): - # type: () -> str +class NegativeInfinityType: + def __repr__(self) -> str: return "-Infinity" - def __hash__(self): - # type: () -> int + def __hash__(self) -> int: return hash(repr(self)) - def __lt__(self, other): - # type: (object) -> bool + def __lt__(self, other: object) -> bool: return True - def __le__(self, other): - # type: (object) -> bool + def __le__(self, other: object) -> bool: return True - def __eq__(self, other): - # type: (object) -> bool + def __eq__(self, other: object) -> bool: return isinstance(other, self.__class__) - def __ne__(self, other): - # type: (object) -> bool - return not isinstance(other, self.__class__) - - def __gt__(self, other): - # type: (object) -> bool + def __gt__(self, other: object) -> bool: return False - def __ge__(self, other): - # type: (object) -> bool + def __ge__(self, other: object) -> bool: return False - def __neg__(self): - # type: (object) -> InfinityType + def __neg__(self: object) -> InfinityType: return Infinity diff --git a/conda_lock/_vendor/poetry/core/_vendor/packaging/_tokenizer.py b/conda_lock/_vendor/poetry/core/_vendor/packaging/_tokenizer.py new file mode 100644 index 00000000..dd0d648d --- /dev/null +++ b/conda_lock/_vendor/poetry/core/_vendor/packaging/_tokenizer.py @@ -0,0 +1,192 @@ +import contextlib +import re +from dataclasses import dataclass +from typing import Dict, Iterator, NoReturn, Optional, Tuple, Union + +from .specifiers import Specifier + + +@dataclass +class Token: + name: str + text: str + position: int + + +class ParserSyntaxError(Exception): + """The provided source text could not be parsed correctly.""" + + def __init__( + self, + message: str, + *, + source: str, + span: Tuple[int, int], + ) -> None: + self.span = span + self.message = message + self.source = source + + super().__init__() + + def __str__(self) -> str: + marker = " " * self.span[0] + "~" * (self.span[1] - self.span[0]) + "^" + return "\n ".join([self.message, self.source, marker]) + + +DEFAULT_RULES: "Dict[str, Union[str, re.Pattern[str]]]" = { + "LEFT_PARENTHESIS": r"\(", + "RIGHT_PARENTHESIS": r"\)", + "LEFT_BRACKET": r"\[", + "RIGHT_BRACKET": r"\]", + "SEMICOLON": r";", + "COMMA": r",", + "QUOTED_STRING": re.compile( + r""" + ( + ('[^']*') + | + ("[^"]*") + ) + """, + re.VERBOSE, + ), + "OP": r"(===|==|~=|!=|<=|>=|<|>)", + "BOOLOP": r"\b(or|and)\b", + "IN": r"\bin\b", + "NOT": r"\bnot\b", + "VARIABLE": re.compile( + r""" + \b( + python_version + |python_full_version + |os[._]name + |sys[._]platform + |platform_(release|system) + |platform[._](version|machine|python_implementation) + |python_implementation + |implementation_(name|version) + |extra + )\b + """, + re.VERBOSE, + ), + "SPECIFIER": re.compile( + Specifier._operator_regex_str + Specifier._version_regex_str, + re.VERBOSE | re.IGNORECASE, + ), + "AT": r"\@", + "URL": r"[^ \t]+", + "IDENTIFIER": r"\b[a-zA-Z0-9][a-zA-Z0-9._-]*\b", + "VERSION_PREFIX_TRAIL": r"\.\*", + "VERSION_LOCAL_LABEL_TRAIL": r"\+[a-z0-9]+(?:[-_\.][a-z0-9]+)*", + "WS": r"[ \t]+", + "END": r"$", +} + + +class Tokenizer: + """Context-sensitive token parsing. + + Provides methods to examine the input stream to check whether the next token + matches. + """ + + def __init__( + self, + source: str, + *, + rules: "Dict[str, Union[str, re.Pattern[str]]]", + ) -> None: + self.source = source + self.rules: Dict[str, re.Pattern[str]] = { + name: re.compile(pattern) for name, pattern in rules.items() + } + self.next_token: Optional[Token] = None + self.position = 0 + + def consume(self, name: str) -> None: + """Move beyond provided token name, if at current position.""" + if self.check(name): + self.read() + + def check(self, name: str, *, peek: bool = False) -> bool: + """Check whether the next token has the provided name. + + By default, if the check succeeds, the token *must* be read before + another check. If `peek` is set to `True`, the token is not loaded and + would need to be checked again. + """ + assert ( + self.next_token is None + ), f"Cannot check for {name!r}, already have {self.next_token!r}" + assert name in self.rules, f"Unknown token name: {name!r}" + + expression = self.rules[name] + + match = expression.match(self.source, self.position) + if match is None: + return False + if not peek: + self.next_token = Token(name, match[0], self.position) + return True + + def expect(self, name: str, *, expected: str) -> Token: + """Expect a certain token name next, failing with a syntax error otherwise. + + The token is *not* read. + """ + if not self.check(name): + raise self.raise_syntax_error(f"Expected {expected}") + return self.read() + + def read(self) -> Token: + """Consume the next token and return it.""" + token = self.next_token + assert token is not None + + self.position += len(token.text) + self.next_token = None + + return token + + def raise_syntax_error( + self, + message: str, + *, + span_start: Optional[int] = None, + span_end: Optional[int] = None, + ) -> NoReturn: + """Raise ParserSyntaxError at the given position.""" + span = ( + self.position if span_start is None else span_start, + self.position if span_end is None else span_end, + ) + raise ParserSyntaxError( + message, + source=self.source, + span=span, + ) + + @contextlib.contextmanager + def enclosing_tokens( + self, open_token: str, close_token: str, *, around: str + ) -> Iterator[None]: + if self.check(open_token): + open_position = self.position + self.read() + else: + open_position = None + + yield + + if open_position is None: + return + + if not self.check(close_token): + self.raise_syntax_error( + f"Expected matching {close_token} for {open_token}, after {around}", + span_start=open_position, + ) + + self.read() diff --git a/conda_lock/_vendor/poetry/core/_vendor/packaging/_typing.py b/conda_lock/_vendor/poetry/core/_vendor/packaging/_typing.py deleted file mode 100644 index 77a8b918..00000000 --- a/conda_lock/_vendor/poetry/core/_vendor/packaging/_typing.py +++ /dev/null @@ -1,48 +0,0 @@ -"""For neatly implementing static typing in packaging. - -`mypy` - the static type analysis tool we use - uses the `typing` module, which -provides core functionality fundamental to mypy's functioning. - -Generally, `typing` would be imported at runtime and used in that fashion - -it acts as a no-op at runtime and does not have any run-time overhead by -design. - -As it turns out, `typing` is not vendorable - it uses separate sources for -Python 2/Python 3. Thus, this codebase can not expect it to be present. -To work around this, mypy allows the typing import to be behind a False-y -optional to prevent it from running at runtime and type-comments can be used -to remove the need for the types to be accessible directly during runtime. - -This module provides the False-y guard in a nicely named fashion so that a -curious maintainer can reach here to read this. - -In packaging, all static-typing related imports should be guarded as follows: - - from packaging._typing import TYPE_CHECKING - - if TYPE_CHECKING: - from typing import ... - -Ref: https://github.com/python/mypy/issues/3216 -""" - -__all__ = ["TYPE_CHECKING", "cast"] - -# The TYPE_CHECKING constant defined by the typing module is False at runtime -# but True while type checking. -if False: # pragma: no cover - from typing import TYPE_CHECKING -else: - TYPE_CHECKING = False - -# typing's cast syntax requires calling typing.cast at runtime, but we don't -# want to import typing at runtime. Here, we inform the type checkers that -# we're importing `typing.cast` as `cast` and re-implement typing.cast's -# runtime behavior in a block that is ignored by type checkers. -if TYPE_CHECKING: # pragma: no cover - # not executed at runtime - from typing import cast -else: - # executed at runtime - def cast(type_, value): # noqa - return value diff --git a/conda_lock/_vendor/poetry/core/_vendor/packaging/markers.py b/conda_lock/_vendor/poetry/core/_vendor/packaging/markers.py index e0330ab6..8b98fca7 100644 --- a/conda_lock/_vendor/poetry/core/_vendor/packaging/markers.py +++ b/conda_lock/_vendor/poetry/core/_vendor/packaging/markers.py @@ -1,34 +1,24 @@ # This file is dual licensed under the terms of the Apache License, Version # 2.0, and the BSD License. See the LICENSE file in the root of this repository # for complete details. -from __future__ import absolute_import, division, print_function import operator import os import platform import sys - -from pyparsing import ( # noqa: N817 - Forward, - Group, - Literal as L, - ParseException, - ParseResults, - QuotedString, - ZeroOrMore, - stringEnd, - stringStart, +from typing import Any, Callable, Dict, List, Optional, Tuple, Union + +from ._parser import ( + MarkerAtom, + MarkerList, + Op, + Value, + Variable, + parse_marker as _parse_marker, ) - -from ._compat import string_types -from ._typing import TYPE_CHECKING +from ._tokenizer import ParserSyntaxError from .specifiers import InvalidSpecifier, Specifier - -if TYPE_CHECKING: # pragma: no cover - from typing import Any, Callable, Dict, List, Optional, Tuple, Union - - Operator = Callable[[str, str], bool] - +from .utils import canonicalize_name __all__ = [ "InvalidMarker", @@ -38,6 +28,8 @@ "default_environment", ] +Operator = Callable[[str, str], bool] + class InvalidMarker(ValueError): """ @@ -58,111 +50,27 @@ class UndefinedEnvironmentName(ValueError): """ -class Node(object): - def __init__(self, value): - # type: (Any) -> None - self.value = value - - def __str__(self): - # type: () -> str - return str(self.value) - - def __repr__(self): - # type: () -> str - return "<{0}({1!r})>".format(self.__class__.__name__, str(self)) - - def serialize(self): - # type: () -> str - raise NotImplementedError - - -class Variable(Node): - def serialize(self): - # type: () -> str - return str(self) - - -class Value(Node): - def serialize(self): - # type: () -> str - return '"{0}"'.format(self) - - -class Op(Node): - def serialize(self): - # type: () -> str - return str(self) - - -VARIABLE = ( - L("implementation_version") - | L("platform_python_implementation") - | L("implementation_name") - | L("python_full_version") - | L("platform_release") - | L("platform_version") - | L("platform_machine") - | L("platform_system") - | L("python_version") - | L("sys_platform") - | L("os_name") - | L("os.name") # PEP-345 - | L("sys.platform") # PEP-345 - | L("platform.version") # PEP-345 - | L("platform.machine") # PEP-345 - | L("platform.python_implementation") # PEP-345 - | L("python_implementation") # undocumented setuptools legacy - | L("extra") # PEP-508 -) -ALIASES = { - "os.name": "os_name", - "sys.platform": "sys_platform", - "platform.version": "platform_version", - "platform.machine": "platform_machine", - "platform.python_implementation": "platform_python_implementation", - "python_implementation": "platform_python_implementation", -} -VARIABLE.setParseAction(lambda s, l, t: Variable(ALIASES.get(t[0], t[0]))) - -VERSION_CMP = ( - L("===") | L("==") | L(">=") | L("<=") | L("!=") | L("~=") | L(">") | L("<") -) - -MARKER_OP = VERSION_CMP | L("not in") | L("in") -MARKER_OP.setParseAction(lambda s, l, t: Op(t[0])) - -MARKER_VALUE = QuotedString("'") | QuotedString('"') -MARKER_VALUE.setParseAction(lambda s, l, t: Value(t[0])) - -BOOLOP = L("and") | L("or") - -MARKER_VAR = VARIABLE | MARKER_VALUE - -MARKER_ITEM = Group(MARKER_VAR + MARKER_OP + MARKER_VAR) -MARKER_ITEM.setParseAction(lambda s, l, t: tuple(t[0])) - -LPAREN = L("(").suppress() -RPAREN = L(")").suppress() - -MARKER_EXPR = Forward() -MARKER_ATOM = MARKER_ITEM | Group(LPAREN + MARKER_EXPR + RPAREN) -MARKER_EXPR << MARKER_ATOM + ZeroOrMore(BOOLOP + MARKER_EXPR) - -MARKER = stringStart + MARKER_EXPR + stringEnd - - -def _coerce_parse_result(results): - # type: (Union[ParseResults, List[Any]]) -> List[Any] - if isinstance(results, ParseResults): - return [_coerce_parse_result(i) for i in results] - else: - return results +def _normalize_extra_values(results: Any) -> Any: + """ + Normalize extra values. + """ + if isinstance(results[0], tuple): + lhs, op, rhs = results[0] + if isinstance(lhs, Variable) and lhs.value == "extra": + normalized_extra = canonicalize_name(rhs.value) + rhs = Value(normalized_extra) + elif isinstance(rhs, Variable) and rhs.value == "extra": + normalized_extra = canonicalize_name(lhs.value) + lhs = Value(normalized_extra) + results[0] = lhs, op, rhs + return results -def _format_marker(marker, first=True): - # type: (Union[List[str], Tuple[Node, ...], str], Optional[bool]) -> str +def _format_marker( + marker: Union[List[str], MarkerAtom, str], first: Optional[bool] = True +) -> str: - assert isinstance(marker, (list, tuple, string_types)) + assert isinstance(marker, (list, tuple, str)) # Sometimes we have a structure like [[...]] which is a single item list # where the single item is itself it's own list. In that case we want skip @@ -187,7 +95,7 @@ def _format_marker(marker, first=True): return marker -_operators = { +_operators: Dict[str, Operator] = { "in": lambda lhs, rhs: lhs in rhs, "not in": lambda lhs, rhs: lhs not in rhs, "<": operator.lt, @@ -196,52 +104,41 @@ def _format_marker(marker, first=True): "!=": operator.ne, ">=": operator.ge, ">": operator.gt, -} # type: Dict[str, Operator] +} -def _eval_op(lhs, op, rhs): - # type: (str, Op, str) -> bool +def _eval_op(lhs: str, op: Op, rhs: str) -> bool: try: spec = Specifier("".join([op.serialize(), rhs])) except InvalidSpecifier: pass else: - return spec.contains(lhs) + return spec.contains(lhs, prereleases=True) - oper = _operators.get(op.serialize()) # type: Optional[Operator] + oper: Optional[Operator] = _operators.get(op.serialize()) if oper is None: - raise UndefinedComparison( - "Undefined {0!r} on {1!r} and {2!r}.".format(op, lhs, rhs) - ) + raise UndefinedComparison(f"Undefined {op!r} on {lhs!r} and {rhs!r}.") return oper(lhs, rhs) -class Undefined(object): - pass - - -_undefined = Undefined() +def _normalize(*values: str, key: str) -> Tuple[str, ...]: + # PEP 685 – Comparison of extra names for optional distribution dependencies + # https://peps.python.org/pep-0685/ + # > When comparing extra names, tools MUST normalize the names being + # > compared using the semantics outlined in PEP 503 for names + if key == "extra": + return tuple(canonicalize_name(v) for v in values) + # other environment markers don't have such standards + return values -def _get_env(environment, name): - # type: (Dict[str, str], str) -> str - value = environment.get(name, _undefined) # type: Union[str, Undefined] - if isinstance(value, Undefined): - raise UndefinedEnvironmentName( - "{0!r} does not exist in evaluation environment.".format(name) - ) - - return value - - -def _evaluate_markers(markers, environment): - # type: (List[Any], Dict[str, str]) -> bool - groups = [[]] # type: List[List[bool]] +def _evaluate_markers(markers: MarkerList, environment: Dict[str, str]) -> bool: + groups: List[List[bool]] = [[]] for marker in markers: - assert isinstance(marker, (list, tuple, string_types)) + assert isinstance(marker, (list, tuple, str)) if isinstance(marker, list): groups[-1].append(_evaluate_markers(marker, environment)) @@ -249,12 +146,15 @@ def _evaluate_markers(markers, environment): lhs, op, rhs = marker if isinstance(lhs, Variable): - lhs_value = _get_env(environment, lhs.value) + environment_key = lhs.value + lhs_value = environment[environment_key] rhs_value = rhs.value else: lhs_value = lhs.value - rhs_value = _get_env(environment, rhs.value) + environment_key = rhs.value + rhs_value = environment[environment_key] + lhs_value, rhs_value = _normalize(lhs_value, rhs_value, key=environment_key) groups[-1].append(_eval_op(lhs_value, op, rhs_value)) else: assert marker in ["and", "or"] @@ -264,8 +164,7 @@ def _evaluate_markers(markers, environment): return any(all(item) for item in groups) -def format_full_version(info): - # type: (sys._version_info) -> str +def format_full_version(info: "sys._version_info") -> str: version = "{0.major}.{0.minor}.{0.micro}".format(info) kind = info.releaselevel if kind != "final": @@ -273,18 +172,9 @@ def format_full_version(info): return version -def default_environment(): - # type: () -> Dict[str, str] - if hasattr(sys, "implementation"): - # Ignoring the `sys.implementation` reference for type checking due to - # mypy not liking that the attribute doesn't exist in Python 2.7 when - # run with the `--py27` flag. - iver = format_full_version(sys.implementation.version) # type: ignore - implementation_name = sys.implementation.name # type: ignore - else: - iver = "0" - implementation_name = "" - +def default_environment() -> Dict[str, str]: + iver = format_full_version(sys.implementation.version) + implementation_name = sys.implementation.name return { "implementation_name": implementation_name, "implementation_version": iver, @@ -300,27 +190,48 @@ def default_environment(): } -class Marker(object): - def __init__(self, marker): - # type: (str) -> None +class Marker: + def __init__(self, marker: str) -> None: + # Note: We create a Marker object without calling this constructor in + # packaging.requirements.Requirement. If any additional logic is + # added here, make sure to mirror/adapt Requirement. try: - self._markers = _coerce_parse_result(MARKER.parseString(marker)) - except ParseException as e: - err_str = "Invalid marker: {0!r}, parse error at {1!r}".format( - marker, marker[e.loc : e.loc + 8] - ) - raise InvalidMarker(err_str) - - def __str__(self): - # type: () -> str + self._markers = _normalize_extra_values(_parse_marker(marker)) + # The attribute `_markers` can be described in terms of a recursive type: + # MarkerList = List[Union[Tuple[Node, ...], str, MarkerList]] + # + # For example, the following expression: + # python_version > "3.6" or (python_version == "3.6" and os_name == "unix") + # + # is parsed into: + # [ + # (, ')>, ), + # 'and', + # [ + # (, , ), + # 'or', + # (, , ) + # ] + # ] + except ParserSyntaxError as e: + raise InvalidMarker(str(e)) from e + + def __str__(self) -> str: return _format_marker(self._markers) - def __repr__(self): - # type: () -> str - return "".format(str(self)) + def __repr__(self) -> str: + return f"" + + def __hash__(self) -> int: + return hash((self.__class__.__name__, str(self))) + + def __eq__(self, other: Any) -> bool: + if not isinstance(other, Marker): + return NotImplemented + + return str(self) == str(other) - def evaluate(self, environment=None): - # type: (Optional[Dict[str, str]]) -> bool + def evaluate(self, environment: Optional[Dict[str, str]] = None) -> bool: """Evaluate a marker. Return the boolean from evaluating the given marker against the @@ -330,7 +241,12 @@ def evaluate(self, environment=None): The environment is determined from the current Python process. """ current_environment = default_environment() + current_environment["extra"] = "" if environment is not None: current_environment.update(environment) + # The API used to allow setting extra to None. We need to handle this + # case for backwards compatibility. + if current_environment["extra"] is None: + current_environment["extra"] = "" return _evaluate_markers(self._markers, current_environment) diff --git a/conda_lock/_vendor/poetry/core/_vendor/packaging/metadata.py b/conda_lock/_vendor/poetry/core/_vendor/packaging/metadata.py new file mode 100644 index 00000000..7b0e6a9c --- /dev/null +++ b/conda_lock/_vendor/poetry/core/_vendor/packaging/metadata.py @@ -0,0 +1,822 @@ +import email.feedparser +import email.header +import email.message +import email.parser +import email.policy +import sys +import typing +from typing import ( + Any, + Callable, + Dict, + Generic, + List, + Optional, + Tuple, + Type, + Union, + cast, +) + +from . import requirements, specifiers, utils, version as version_module + +T = typing.TypeVar("T") +if sys.version_info[:2] >= (3, 8): # pragma: no cover + from typing import Literal, TypedDict +else: # pragma: no cover + if typing.TYPE_CHECKING: + from typing_extensions import Literal, TypedDict + else: + try: + from typing_extensions import Literal, TypedDict + except ImportError: + + class Literal: + def __init_subclass__(*_args, **_kwargs): + pass + + class TypedDict: + def __init_subclass__(*_args, **_kwargs): + pass + + +try: + ExceptionGroup = __builtins__.ExceptionGroup # type: ignore[attr-defined] +except AttributeError: + + class ExceptionGroup(Exception): # type: ignore[no-redef] # noqa: N818 + """A minimal implementation of :external:exc:`ExceptionGroup` from Python 3.11. + + If :external:exc:`ExceptionGroup` is already defined by Python itself, + that version is used instead. + """ + + message: str + exceptions: List[Exception] + + def __init__(self, message: str, exceptions: List[Exception]) -> None: + self.message = message + self.exceptions = exceptions + + def __repr__(self) -> str: + return f"{self.__class__.__name__}({self.message!r}, {self.exceptions!r})" + + +class InvalidMetadata(ValueError): + """A metadata field contains invalid data.""" + + field: str + """The name of the field that contains invalid data.""" + + def __init__(self, field: str, message: str) -> None: + self.field = field + super().__init__(message) + + +# The RawMetadata class attempts to make as few assumptions about the underlying +# serialization formats as possible. The idea is that as long as a serialization +# formats offer some very basic primitives in *some* way then we can support +# serializing to and from that format. +class RawMetadata(TypedDict, total=False): + """A dictionary of raw core metadata. + + Each field in core metadata maps to a key of this dictionary (when data is + provided). The key is lower-case and underscores are used instead of dashes + compared to the equivalent core metadata field. Any core metadata field that + can be specified multiple times or can hold multiple values in a single + field have a key with a plural name. See :class:`Metadata` whose attributes + match the keys of this dictionary. + + Core metadata fields that can be specified multiple times are stored as a + list or dict depending on which is appropriate for the field. Any fields + which hold multiple values in a single field are stored as a list. + + """ + + # Metadata 1.0 - PEP 241 + metadata_version: str + name: str + version: str + platforms: List[str] + summary: str + description: str + keywords: List[str] + home_page: str + author: str + author_email: str + license: str + + # Metadata 1.1 - PEP 314 + supported_platforms: List[str] + download_url: str + classifiers: List[str] + requires: List[str] + provides: List[str] + obsoletes: List[str] + + # Metadata 1.2 - PEP 345 + maintainer: str + maintainer_email: str + requires_dist: List[str] + provides_dist: List[str] + obsoletes_dist: List[str] + requires_python: str + requires_external: List[str] + project_urls: Dict[str, str] + + # Metadata 2.0 + # PEP 426 attempted to completely revamp the metadata format + # but got stuck without ever being able to build consensus on + # it and ultimately ended up withdrawn. + # + # However, a number of tools had started emitting METADATA with + # `2.0` Metadata-Version, so for historical reasons, this version + # was skipped. + + # Metadata 2.1 - PEP 566 + description_content_type: str + provides_extra: List[str] + + # Metadata 2.2 - PEP 643 + dynamic: List[str] + + # Metadata 2.3 - PEP 685 + # No new fields were added in PEP 685, just some edge case were + # tightened up to provide better interoptability. + + +_STRING_FIELDS = { + "author", + "author_email", + "description", + "description_content_type", + "download_url", + "home_page", + "license", + "maintainer", + "maintainer_email", + "metadata_version", + "name", + "requires_python", + "summary", + "version", +} + +_LIST_FIELDS = { + "classifiers", + "dynamic", + "obsoletes", + "obsoletes_dist", + "platforms", + "provides", + "provides_dist", + "provides_extra", + "requires", + "requires_dist", + "requires_external", + "supported_platforms", +} + +_DICT_FIELDS = { + "project_urls", +} + + +def _parse_keywords(data: str) -> List[str]: + """Split a string of comma-separate keyboards into a list of keywords.""" + return [k.strip() for k in data.split(",")] + + +def _parse_project_urls(data: List[str]) -> Dict[str, str]: + """Parse a list of label/URL string pairings separated by a comma.""" + urls = {} + for pair in data: + # Our logic is slightly tricky here as we want to try and do + # *something* reasonable with malformed data. + # + # The main thing that we have to worry about, is data that does + # not have a ',' at all to split the label from the Value. There + # isn't a singular right answer here, and we will fail validation + # later on (if the caller is validating) so it doesn't *really* + # matter, but since the missing value has to be an empty str + # and our return value is dict[str, str], if we let the key + # be the missing value, then they'd have multiple '' values that + # overwrite each other in a accumulating dict. + # + # The other potentional issue is that it's possible to have the + # same label multiple times in the metadata, with no solid "right" + # answer with what to do in that case. As such, we'll do the only + # thing we can, which is treat the field as unparseable and add it + # to our list of unparsed fields. + parts = [p.strip() for p in pair.split(",", 1)] + parts.extend([""] * (max(0, 2 - len(parts)))) # Ensure 2 items + + # TODO: The spec doesn't say anything about if the keys should be + # considered case sensitive or not... logically they should + # be case-preserving and case-insensitive, but doing that + # would open up more cases where we might have duplicate + # entries. + label, url = parts + if label in urls: + # The label already exists in our set of urls, so this field + # is unparseable, and we can just add the whole thing to our + # unparseable data and stop processing it. + raise KeyError("duplicate labels in project urls") + urls[label] = url + + return urls + + +def _get_payload(msg: email.message.Message, source: Union[bytes, str]) -> str: + """Get the body of the message.""" + # If our source is a str, then our caller has managed encodings for us, + # and we don't need to deal with it. + if isinstance(source, str): + payload: str = msg.get_payload() + return payload + # If our source is a bytes, then we're managing the encoding and we need + # to deal with it. + else: + bpayload: bytes = msg.get_payload(decode=True) + try: + return bpayload.decode("utf8", "strict") + except UnicodeDecodeError: + raise ValueError("payload in an invalid encoding") + + +# The various parse_FORMAT functions here are intended to be as lenient as +# possible in their parsing, while still returning a correctly typed +# RawMetadata. +# +# To aid in this, we also generally want to do as little touching of the +# data as possible, except where there are possibly some historic holdovers +# that make valid data awkward to work with. +# +# While this is a lower level, intermediate format than our ``Metadata`` +# class, some light touch ups can make a massive difference in usability. + +# Map METADATA fields to RawMetadata. +_EMAIL_TO_RAW_MAPPING = { + "author": "author", + "author-email": "author_email", + "classifier": "classifiers", + "description": "description", + "description-content-type": "description_content_type", + "download-url": "download_url", + "dynamic": "dynamic", + "home-page": "home_page", + "keywords": "keywords", + "license": "license", + "maintainer": "maintainer", + "maintainer-email": "maintainer_email", + "metadata-version": "metadata_version", + "name": "name", + "obsoletes": "obsoletes", + "obsoletes-dist": "obsoletes_dist", + "platform": "platforms", + "project-url": "project_urls", + "provides": "provides", + "provides-dist": "provides_dist", + "provides-extra": "provides_extra", + "requires": "requires", + "requires-dist": "requires_dist", + "requires-external": "requires_external", + "requires-python": "requires_python", + "summary": "summary", + "supported-platform": "supported_platforms", + "version": "version", +} +_RAW_TO_EMAIL_MAPPING = {raw: email for email, raw in _EMAIL_TO_RAW_MAPPING.items()} + + +def parse_email(data: Union[bytes, str]) -> Tuple[RawMetadata, Dict[str, List[str]]]: + """Parse a distribution's metadata stored as email headers (e.g. from ``METADATA``). + + This function returns a two-item tuple of dicts. The first dict is of + recognized fields from the core metadata specification. Fields that can be + parsed and translated into Python's built-in types are converted + appropriately. All other fields are left as-is. Fields that are allowed to + appear multiple times are stored as lists. + + The second dict contains all other fields from the metadata. This includes + any unrecognized fields. It also includes any fields which are expected to + be parsed into a built-in type but were not formatted appropriately. Finally, + any fields that are expected to appear only once but are repeated are + included in this dict. + + """ + raw: Dict[str, Union[str, List[str], Dict[str, str]]] = {} + unparsed: Dict[str, List[str]] = {} + + if isinstance(data, str): + parsed = email.parser.Parser(policy=email.policy.compat32).parsestr(data) + else: + parsed = email.parser.BytesParser(policy=email.policy.compat32).parsebytes(data) + + # We have to wrap parsed.keys() in a set, because in the case of multiple + # values for a key (a list), the key will appear multiple times in the + # list of keys, but we're avoiding that by using get_all(). + for name in frozenset(parsed.keys()): + # Header names in RFC are case insensitive, so we'll normalize to all + # lower case to make comparisons easier. + name = name.lower() + + # We use get_all() here, even for fields that aren't multiple use, + # because otherwise someone could have e.g. two Name fields, and we + # would just silently ignore it rather than doing something about it. + headers = parsed.get_all(name) or [] + + # The way the email module works when parsing bytes is that it + # unconditionally decodes the bytes as ascii using the surrogateescape + # handler. When you pull that data back out (such as with get_all() ), + # it looks to see if the str has any surrogate escapes, and if it does + # it wraps it in a Header object instead of returning the string. + # + # As such, we'll look for those Header objects, and fix up the encoding. + value = [] + # Flag if we have run into any issues processing the headers, thus + # signalling that the data belongs in 'unparsed'. + valid_encoding = True + for h in headers: + # It's unclear if this can return more types than just a Header or + # a str, so we'll just assert here to make sure. + assert isinstance(h, (email.header.Header, str)) + + # If it's a header object, we need to do our little dance to get + # the real data out of it. In cases where there is invalid data + # we're going to end up with mojibake, but there's no obvious, good + # way around that without reimplementing parts of the Header object + # ourselves. + # + # That should be fine since, if mojibacked happens, this key is + # going into the unparsed dict anyways. + if isinstance(h, email.header.Header): + # The Header object stores it's data as chunks, and each chunk + # can be independently encoded, so we'll need to check each + # of them. + chunks: List[Tuple[bytes, Optional[str]]] = [] + for bin, encoding in email.header.decode_header(h): + try: + bin.decode("utf8", "strict") + except UnicodeDecodeError: + # Enable mojibake. + encoding = "latin1" + valid_encoding = False + else: + encoding = "utf8" + chunks.append((bin, encoding)) + + # Turn our chunks back into a Header object, then let that + # Header object do the right thing to turn them into a + # string for us. + value.append(str(email.header.make_header(chunks))) + # This is already a string, so just add it. + else: + value.append(h) + + # We've processed all of our values to get them into a list of str, + # but we may have mojibake data, in which case this is an unparsed + # field. + if not valid_encoding: + unparsed[name] = value + continue + + raw_name = _EMAIL_TO_RAW_MAPPING.get(name) + if raw_name is None: + # This is a bit of a weird situation, we've encountered a key that + # we don't know what it means, so we don't know whether it's meant + # to be a list or not. + # + # Since we can't really tell one way or another, we'll just leave it + # as a list, even though it may be a single item list, because that's + # what makes the most sense for email headers. + unparsed[name] = value + continue + + # If this is one of our string fields, then we'll check to see if our + # value is a list of a single item. If it is then we'll assume that + # it was emitted as a single string, and unwrap the str from inside + # the list. + # + # If it's any other kind of data, then we haven't the faintest clue + # what we should parse it as, and we have to just add it to our list + # of unparsed stuff. + if raw_name in _STRING_FIELDS and len(value) == 1: + raw[raw_name] = value[0] + # If this is one of our list of string fields, then we can just assign + # the value, since email *only* has strings, and our get_all() call + # above ensures that this is a list. + elif raw_name in _LIST_FIELDS: + raw[raw_name] = value + # Special Case: Keywords + # The keywords field is implemented in the metadata spec as a str, + # but it conceptually is a list of strings, and is serialized using + # ", ".join(keywords), so we'll do some light data massaging to turn + # this into what it logically is. + elif raw_name == "keywords" and len(value) == 1: + raw[raw_name] = _parse_keywords(value[0]) + # Special Case: Project-URL + # The project urls is implemented in the metadata spec as a list of + # specially-formatted strings that represent a key and a value, which + # is fundamentally a mapping, however the email format doesn't support + # mappings in a sane way, so it was crammed into a list of strings + # instead. + # + # We will do a little light data massaging to turn this into a map as + # it logically should be. + elif raw_name == "project_urls": + try: + raw[raw_name] = _parse_project_urls(value) + except KeyError: + unparsed[name] = value + # Nothing that we've done has managed to parse this, so it'll just + # throw it in our unparseable data and move on. + else: + unparsed[name] = value + + # We need to support getting the Description from the message payload in + # addition to getting it from the the headers. This does mean, though, there + # is the possibility of it being set both ways, in which case we put both + # in 'unparsed' since we don't know which is right. + try: + payload = _get_payload(parsed, data) + except ValueError: + unparsed.setdefault("description", []).append( + parsed.get_payload(decode=isinstance(data, bytes)) + ) + else: + if payload: + # Check to see if we've already got a description, if so then both + # it, and this body move to unparseable. + if "description" in raw: + description_header = cast(str, raw.pop("description")) + unparsed.setdefault("description", []).extend( + [description_header, payload] + ) + elif "description" in unparsed: + unparsed["description"].append(payload) + else: + raw["description"] = payload + + # We need to cast our `raw` to a metadata, because a TypedDict only support + # literal key names, but we're computing our key names on purpose, but the + # way this function is implemented, our `TypedDict` can only have valid key + # names. + return cast(RawMetadata, raw), unparsed + + +_NOT_FOUND = object() + + +# Keep the two values in sync. +_VALID_METADATA_VERSIONS = ["1.0", "1.1", "1.2", "2.1", "2.2", "2.3"] +_MetadataVersion = Literal["1.0", "1.1", "1.2", "2.1", "2.2", "2.3"] + +_REQUIRED_ATTRS = frozenset(["metadata_version", "name", "version"]) + + +class _Validator(Generic[T]): + """Validate a metadata field. + + All _process_*() methods correspond to a core metadata field. The method is + called with the field's raw value. If the raw value is valid it is returned + in its "enriched" form (e.g. ``version.Version`` for the ``Version`` field). + If the raw value is invalid, :exc:`InvalidMetadata` is raised (with a cause + as appropriate). + """ + + name: str + raw_name: str + added: _MetadataVersion + + def __init__( + self, + *, + added: _MetadataVersion = "1.0", + ) -> None: + self.added = added + + def __set_name__(self, _owner: "Metadata", name: str) -> None: + self.name = name + self.raw_name = _RAW_TO_EMAIL_MAPPING[name] + + def __get__(self, instance: "Metadata", _owner: Type["Metadata"]) -> T: + # With Python 3.8, the caching can be replaced with functools.cached_property(). + # No need to check the cache as attribute lookup will resolve into the + # instance's __dict__ before __get__ is called. + cache = instance.__dict__ + try: + value = instance._raw[self.name] # type: ignore[literal-required] + except KeyError: + if self.name in _STRING_FIELDS: + value = "" + elif self.name in _LIST_FIELDS: + value = [] + elif self.name in _DICT_FIELDS: + value = {} + else: # pragma: no cover + assert False + + try: + converter: Callable[[Any], T] = getattr(self, f"_process_{self.name}") + except AttributeError: + pass + else: + value = converter(value) + + cache[self.name] = value + try: + del instance._raw[self.name] # type: ignore[misc] + except KeyError: + pass + + return cast(T, value) + + def _invalid_metadata( + self, msg: str, cause: Optional[Exception] = None + ) -> InvalidMetadata: + exc = InvalidMetadata( + self.raw_name, msg.format_map({"field": repr(self.raw_name)}) + ) + exc.__cause__ = cause + return exc + + def _process_metadata_version(self, value: str) -> _MetadataVersion: + # Implicitly makes Metadata-Version required. + if value not in _VALID_METADATA_VERSIONS: + raise self._invalid_metadata(f"{value!r} is not a valid metadata version") + return cast(_MetadataVersion, value) + + def _process_name(self, value: str) -> str: + if not value: + raise self._invalid_metadata("{field} is a required field") + # Validate the name as a side-effect. + try: + utils.canonicalize_name(value, validate=True) + except utils.InvalidName as exc: + raise self._invalid_metadata( + f"{value!r} is invalid for {{field}}", cause=exc + ) + else: + return value + + def _process_version(self, value: str) -> version_module.Version: + if not value: + raise self._invalid_metadata("{field} is a required field") + try: + return version_module.parse(value) + except version_module.InvalidVersion as exc: + raise self._invalid_metadata( + f"{value!r} is invalid for {{field}}", cause=exc + ) + + def _process_summary(self, value: str) -> str: + """Check the field contains no newlines.""" + if "\n" in value: + raise self._invalid_metadata("{field} must be a single line") + return value + + def _process_description_content_type(self, value: str) -> str: + content_types = {"text/plain", "text/x-rst", "text/markdown"} + message = email.message.EmailMessage() + message["content-type"] = value + + content_type, parameters = ( + # Defaults to `text/plain` if parsing failed. + message.get_content_type().lower(), + message["content-type"].params, + ) + # Check if content-type is valid or defaulted to `text/plain` and thus was + # not parseable. + if content_type not in content_types or content_type not in value.lower(): + raise self._invalid_metadata( + f"{{field}} must be one of {list(content_types)}, not {value!r}" + ) + + charset = parameters.get("charset", "UTF-8") + if charset != "UTF-8": + raise self._invalid_metadata( + f"{{field}} can only specify the UTF-8 charset, not {list(charset)}" + ) + + markdown_variants = {"GFM", "CommonMark"} + variant = parameters.get("variant", "GFM") # Use an acceptable default. + if content_type == "text/markdown" and variant not in markdown_variants: + raise self._invalid_metadata( + f"valid Markdown variants for {{field}} are {list(markdown_variants)}, " + f"not {variant!r}", + ) + return value + + def _process_dynamic(self, value: List[str]) -> List[str]: + for dynamic_field in map(str.lower, value): + if dynamic_field in {"name", "version", "metadata-version"}: + raise self._invalid_metadata( + f"{value!r} is not allowed as a dynamic field" + ) + elif dynamic_field not in _EMAIL_TO_RAW_MAPPING: + raise self._invalid_metadata(f"{value!r} is not a valid dynamic field") + return list(map(str.lower, value)) + + def _process_provides_extra( + self, + value: List[str], + ) -> List[utils.NormalizedName]: + normalized_names = [] + try: + for name in value: + normalized_names.append(utils.canonicalize_name(name, validate=True)) + except utils.InvalidName as exc: + raise self._invalid_metadata( + f"{name!r} is invalid for {{field}}", cause=exc + ) + else: + return normalized_names + + def _process_requires_python(self, value: str) -> specifiers.SpecifierSet: + try: + return specifiers.SpecifierSet(value) + except specifiers.InvalidSpecifier as exc: + raise self._invalid_metadata( + f"{value!r} is invalid for {{field}}", cause=exc + ) + + def _process_requires_dist( + self, + value: List[str], + ) -> List[requirements.Requirement]: + reqs = [] + try: + for req in value: + reqs.append(requirements.Requirement(req)) + except requirements.InvalidRequirement as exc: + raise self._invalid_metadata(f"{req!r} is invalid for {{field}}", cause=exc) + else: + return reqs + + +class Metadata: + """Representation of distribution metadata. + + Compared to :class:`RawMetadata`, this class provides objects representing + metadata fields instead of only using built-in types. Any invalid metadata + will cause :exc:`InvalidMetadata` to be raised (with a + :py:attr:`~BaseException.__cause__` attribute as appropriate). + """ + + _raw: RawMetadata + + @classmethod + def from_raw(cls, data: RawMetadata, *, validate: bool = True) -> "Metadata": + """Create an instance from :class:`RawMetadata`. + + If *validate* is true, all metadata will be validated. All exceptions + related to validation will be gathered and raised as an :class:`ExceptionGroup`. + """ + ins = cls() + ins._raw = data.copy() # Mutations occur due to caching enriched values. + + if validate: + exceptions: List[InvalidMetadata] = [] + try: + metadata_version = ins.metadata_version + metadata_age = _VALID_METADATA_VERSIONS.index(metadata_version) + except InvalidMetadata as metadata_version_exc: + exceptions.append(metadata_version_exc) + metadata_version = None + + # Make sure to check for the fields that are present, the required + # fields (so their absence can be reported). + fields_to_check = frozenset(ins._raw) | _REQUIRED_ATTRS + # Remove fields that have already been checked. + fields_to_check -= {"metadata_version"} + + for key in fields_to_check: + try: + if metadata_version: + # Can't use getattr() as that triggers descriptor protocol which + # will fail due to no value for the instance argument. + try: + field_metadata_version = cls.__dict__[key].added + except KeyError: + exc = InvalidMetadata(key, f"unrecognized field: {key!r}") + exceptions.append(exc) + continue + field_age = _VALID_METADATA_VERSIONS.index( + field_metadata_version + ) + if field_age > metadata_age: + field = _RAW_TO_EMAIL_MAPPING[key] + exc = InvalidMetadata( + field, + "{field} introduced in metadata version " + "{field_metadata_version}, not {metadata_version}", + ) + exceptions.append(exc) + continue + getattr(ins, key) + except InvalidMetadata as exc: + exceptions.append(exc) + + if exceptions: + raise ExceptionGroup("invalid metadata", exceptions) + + return ins + + @classmethod + def from_email( + cls, data: Union[bytes, str], *, validate: bool = True + ) -> "Metadata": + """Parse metadata from email headers. + + If *validate* is true, the metadata will be validated. All exceptions + related to validation will be gathered and raised as an :class:`ExceptionGroup`. + """ + exceptions: list[InvalidMetadata] = [] + raw, unparsed = parse_email(data) + + if validate: + for unparsed_key in unparsed: + if unparsed_key in _EMAIL_TO_RAW_MAPPING: + message = f"{unparsed_key!r} has invalid data" + else: + message = f"unrecognized field: {unparsed_key!r}" + exceptions.append(InvalidMetadata(unparsed_key, message)) + + if exceptions: + raise ExceptionGroup("unparsed", exceptions) + + try: + return cls.from_raw(raw, validate=validate) + except ExceptionGroup as exc_group: + exceptions.extend(exc_group.exceptions) + raise ExceptionGroup("invalid or unparsed metadata", exceptions) from None + + metadata_version: _Validator[_MetadataVersion] = _Validator() + """:external:ref:`core-metadata-metadata-version` + (required; validated to be a valid metadata version)""" + name: _Validator[str] = _Validator() + """:external:ref:`core-metadata-name` + (required; validated using :func:`~packaging.utils.canonicalize_name` and its + *validate* parameter)""" + version: _Validator[version_module.Version] = _Validator() + """:external:ref:`core-metadata-version` (required)""" + dynamic: _Validator[List[str]] = _Validator( + added="2.2", + ) + """:external:ref:`core-metadata-dynamic` + (validated against core metadata field names and lowercased)""" + platforms: _Validator[List[str]] = _Validator() + """:external:ref:`core-metadata-platform`""" + supported_platforms: _Validator[List[str]] = _Validator(added="1.1") + """:external:ref:`core-metadata-supported-platform`""" + summary: _Validator[str] = _Validator() + """:external:ref:`core-metadata-summary` (validated to contain no newlines)""" + description: _Validator[str] = _Validator() # TODO 2.1: can be in body + """:external:ref:`core-metadata-description`""" + description_content_type: _Validator[str] = _Validator(added="2.1") + """:external:ref:`core-metadata-description-content-type` (validated)""" + keywords: _Validator[List[str]] = _Validator() + """:external:ref:`core-metadata-keywords`""" + home_page: _Validator[str] = _Validator() + """:external:ref:`core-metadata-home-page`""" + download_url: _Validator[str] = _Validator(added="1.1") + """:external:ref:`core-metadata-download-url`""" + author: _Validator[str] = _Validator() + """:external:ref:`core-metadata-author`""" + author_email: _Validator[str] = _Validator() + """:external:ref:`core-metadata-author-email`""" + maintainer: _Validator[str] = _Validator(added="1.2") + """:external:ref:`core-metadata-maintainer`""" + maintainer_email: _Validator[str] = _Validator(added="1.2") + """:external:ref:`core-metadata-maintainer-email`""" + license: _Validator[str] = _Validator() + """:external:ref:`core-metadata-license`""" + classifiers: _Validator[List[str]] = _Validator(added="1.1") + """:external:ref:`core-metadata-classifier`""" + requires_dist: _Validator[List[requirements.Requirement]] = _Validator(added="1.2") + """:external:ref:`core-metadata-requires-dist`""" + requires_python: _Validator[specifiers.SpecifierSet] = _Validator(added="1.2") + """:external:ref:`core-metadata-requires-python`""" + # Because `Requires-External` allows for non-PEP 440 version specifiers, we + # don't do any processing on the values. + requires_external: _Validator[List[str]] = _Validator(added="1.2") + """:external:ref:`core-metadata-requires-external`""" + project_urls: _Validator[Dict[str, str]] = _Validator(added="1.2") + """:external:ref:`core-metadata-project-url`""" + # PEP 685 lets us raise an error if an extra doesn't pass `Name` validation + # regardless of metadata version. + provides_extra: _Validator[List[utils.NormalizedName]] = _Validator( + added="2.1", + ) + """:external:ref:`core-metadata-provides-extra`""" + provides_dist: _Validator[List[str]] = _Validator(added="1.2") + """:external:ref:`core-metadata-provides-dist`""" + obsoletes_dist: _Validator[List[str]] = _Validator(added="1.2") + """:external:ref:`core-metadata-obsoletes-dist`""" + requires: _Validator[List[str]] = _Validator(added="1.1") + """``Requires`` (deprecated)""" + provides: _Validator[List[str]] = _Validator(added="1.1") + """``Provides`` (deprecated)""" + obsoletes: _Validator[List[str]] = _Validator(added="1.1") + """``Obsoletes`` (deprecated)""" diff --git a/conda_lock/_vendor/poetry/core/_vendor/packaging/requirements.py b/conda_lock/_vendor/poetry/core/_vendor/packaging/requirements.py index aa69d50d..0c00eba3 100644 --- a/conda_lock/_vendor/poetry/core/_vendor/packaging/requirements.py +++ b/conda_lock/_vendor/poetry/core/_vendor/packaging/requirements.py @@ -1,37 +1,14 @@ # This file is dual licensed under the terms of the Apache License, Version # 2.0, and the BSD License. See the LICENSE file in the root of this repository # for complete details. -from __future__ import absolute_import, division, print_function -import re -import string -import sys +from typing import Any, Iterator, Optional, Set -from pyparsing import ( # noqa: N817 - Combine, - Literal as L, - Optional, - ParseException, - Regex, - Word, - ZeroOrMore, - originalTextFor, - stringEnd, - stringStart, -) - -from ._typing import TYPE_CHECKING -from .markers import MARKER_EXPR, Marker -from .specifiers import LegacySpecifier, Specifier, SpecifierSet - -if sys.version_info[0] >= 3: - from urllib import parse as urlparse # pragma: no cover -else: # pragma: no cover - import urlparse - - -if TYPE_CHECKING: # pragma: no cover - from typing import List, Optional as TOptional, Set +from ._parser import parse_requirement as _parse_requirement +from ._tokenizer import ParserSyntaxError +from .markers import Marker, _normalize_extra_values +from .specifiers import SpecifierSet +from .utils import canonicalize_name class InvalidRequirement(ValueError): @@ -40,61 +17,7 @@ class InvalidRequirement(ValueError): """ -ALPHANUM = Word(string.ascii_letters + string.digits) - -LBRACKET = L("[").suppress() -RBRACKET = L("]").suppress() -LPAREN = L("(").suppress() -RPAREN = L(")").suppress() -COMMA = L(",").suppress() -SEMICOLON = L(";").suppress() -AT = L("@").suppress() - -PUNCTUATION = Word("-_.") -IDENTIFIER_END = ALPHANUM | (ZeroOrMore(PUNCTUATION) + ALPHANUM) -IDENTIFIER = Combine(ALPHANUM + ZeroOrMore(IDENTIFIER_END)) - -NAME = IDENTIFIER("name") -EXTRA = IDENTIFIER - -URI = Regex(r"[^ ]+")("url") -URL = AT + URI - -EXTRAS_LIST = EXTRA + ZeroOrMore(COMMA + EXTRA) -EXTRAS = (LBRACKET + Optional(EXTRAS_LIST) + RBRACKET)("extras") - -VERSION_PEP440 = Regex(Specifier._regex_str, re.VERBOSE | re.IGNORECASE) -VERSION_LEGACY = Regex(LegacySpecifier._regex_str, re.VERBOSE | re.IGNORECASE) - -VERSION_ONE = VERSION_PEP440 ^ VERSION_LEGACY -VERSION_MANY = Combine( - VERSION_ONE + ZeroOrMore(COMMA + VERSION_ONE), joinString=",", adjacent=False -)("_raw_spec") -_VERSION_SPEC = Optional(((LPAREN + VERSION_MANY + RPAREN) | VERSION_MANY)) -_VERSION_SPEC.setParseAction(lambda s, l, t: t._raw_spec or "") - -VERSION_SPEC = originalTextFor(_VERSION_SPEC)("specifier") -VERSION_SPEC.setParseAction(lambda s, l, t: t[1]) - -MARKER_EXPR = originalTextFor(MARKER_EXPR())("marker") -MARKER_EXPR.setParseAction( - lambda s, l, t: Marker(s[t._original_start : t._original_end]) -) -MARKER_SEPARATOR = SEMICOLON -MARKER = MARKER_SEPARATOR + MARKER_EXPR - -VERSION_AND_MARKER = VERSION_SPEC + Optional(MARKER) -URL_AND_MARKER = URL + Optional(MARKER) - -NAMED_REQUIREMENT = NAME + Optional(EXTRAS) + (URL_AND_MARKER | VERSION_AND_MARKER) - -REQUIREMENT = stringStart + NAMED_REQUIREMENT + stringEnd -# pyparsing isn't thread safe during initialization, so we do it eagerly, see -# issue #104 -REQUIREMENT.parseString("x[]") - - -class Requirement(object): +class Requirement: """Parse a requirement. Parse a given requirement string into its parts, such as name, specifier, @@ -107,54 +30,61 @@ class Requirement(object): # the thing as well as the version? What about the markers? # TODO: Can we normalize the name and extra name? - def __init__(self, requirement_string): - # type: (str) -> None + def __init__(self, requirement_string: str) -> None: try: - req = REQUIREMENT.parseString(requirement_string) - except ParseException as e: - raise InvalidRequirement( - 'Parse error at "{0!r}": {1}'.format( - requirement_string[e.loc : e.loc + 8], e.msg - ) - ) - - self.name = req.name # type: str - if req.url: - parsed_url = urlparse.urlparse(req.url) - if parsed_url.scheme == "file": - if urlparse.urlunparse(parsed_url) != req.url: - raise InvalidRequirement("Invalid URL given") - elif not (parsed_url.scheme and parsed_url.netloc) or ( - not parsed_url.scheme and not parsed_url.netloc - ): - raise InvalidRequirement("Invalid URL: {0}".format(req.url)) - self.url = req.url # type: TOptional[str] - else: - self.url = None - self.extras = set(req.extras.asList() if req.extras else []) # type: Set[str] - self.specifier = SpecifierSet(req.specifier) # type: SpecifierSet - self.marker = req.marker if req.marker else None # type: TOptional[Marker] - - def __str__(self): - # type: () -> str - parts = [self.name] # type: List[str] + parsed = _parse_requirement(requirement_string) + except ParserSyntaxError as e: + raise InvalidRequirement(str(e)) from e + + self.name: str = parsed.name + self.url: Optional[str] = parsed.url or None + self.extras: Set[str] = set(parsed.extras if parsed.extras else []) + self.specifier: SpecifierSet = SpecifierSet(parsed.specifier) + self.marker: Optional[Marker] = None + if parsed.marker is not None: + self.marker = Marker.__new__(Marker) + self.marker._markers = _normalize_extra_values(parsed.marker) + + def _iter_parts(self, name: str) -> Iterator[str]: + yield name if self.extras: - parts.append("[{0}]".format(",".join(sorted(self.extras)))) + formatted_extras = ",".join(sorted(self.extras)) + yield f"[{formatted_extras}]" if self.specifier: - parts.append(str(self.specifier)) + yield str(self.specifier) if self.url: - parts.append("@ {0}".format(self.url)) + yield f"@ {self.url}" if self.marker: - parts.append(" ") + yield " " if self.marker: - parts.append("; {0}".format(self.marker)) + yield f"; {self.marker}" - return "".join(parts) + def __str__(self) -> str: + return "".join(self._iter_parts(self.name)) - def __repr__(self): - # type: () -> str - return "".format(str(self)) + def __repr__(self) -> str: + return f"" + + def __hash__(self) -> int: + return hash( + ( + self.__class__.__name__, + *self._iter_parts(canonicalize_name(self.name)), + ) + ) + + def __eq__(self, other: Any) -> bool: + if not isinstance(other, Requirement): + return NotImplemented + + return ( + canonicalize_name(self.name) == canonicalize_name(other.name) + and self.extras == other.extras + and self.specifier == other.specifier + and self.url == other.url + and self.marker == other.marker + ) diff --git a/conda_lock/_vendor/poetry/core/_vendor/packaging/specifiers.py b/conda_lock/_vendor/poetry/core/_vendor/packaging/specifiers.py index a6a83c1f..ba8fe37b 100644 --- a/conda_lock/_vendor/poetry/core/_vendor/packaging/specifiers.py +++ b/conda_lock/_vendor/poetry/core/_vendor/packaging/specifiers.py @@ -1,340 +1,123 @@ # This file is dual licensed under the terms of the Apache License, Version # 2.0, and the BSD License. See the LICENSE file in the root of this repository # for complete details. -from __future__ import absolute_import, division, print_function +""" +.. testsetup:: + + from packaging.specifiers import Specifier, SpecifierSet, InvalidSpecifier + from packaging.version import Version +""" import abc -import functools import itertools import re -import warnings +from typing import ( + Callable, + Iterable, + Iterator, + List, + Optional, + Set, + Tuple, + TypeVar, + Union, +) -from ._compat import string_types, with_metaclass -from ._typing import TYPE_CHECKING from .utils import canonicalize_version -from .version import LegacyVersion, Version, parse +from .version import Version + +UnparsedVersion = Union[Version, str] +UnparsedVersionVar = TypeVar("UnparsedVersionVar", bound=UnparsedVersion) +CallableOperator = Callable[[Version, str], bool] -if TYPE_CHECKING: # pragma: no cover - from typing import Callable, Dict, Iterable, Iterator, List, Optional, Tuple, Union - ParsedVersion = Union[Version, LegacyVersion] - UnparsedVersion = Union[Version, LegacyVersion, str] - CallableOperator = Callable[[ParsedVersion, str], bool] +def _coerce_version(version: UnparsedVersion) -> Version: + if not isinstance(version, Version): + version = Version(version) + return version class InvalidSpecifier(ValueError): """ - An invalid specifier was found, users should refer to PEP 440. + Raised when attempting to create a :class:`Specifier` with a specifier + string that is invalid. + + >>> Specifier("lolwat") + Traceback (most recent call last): + ... + packaging.specifiers.InvalidSpecifier: Invalid specifier: 'lolwat' """ -class BaseSpecifier(with_metaclass(abc.ABCMeta, object)): # type: ignore +class BaseSpecifier(metaclass=abc.ABCMeta): @abc.abstractmethod - def __str__(self): - # type: () -> str + def __str__(self) -> str: """ - Returns the str representation of this Specifier like object. This + Returns the str representation of this Specifier-like object. This should be representative of the Specifier itself. """ @abc.abstractmethod - def __hash__(self): - # type: () -> int + def __hash__(self) -> int: """ - Returns a hash value for this Specifier like object. + Returns a hash value for this Specifier-like object. """ @abc.abstractmethod - def __eq__(self, other): - # type: (object) -> bool + def __eq__(self, other: object) -> bool: """ - Returns a boolean representing whether or not the two Specifier like + Returns a boolean representing whether or not the two Specifier-like objects are equal. + + :param other: The other object to check against. """ + @property @abc.abstractmethod - def __ne__(self, other): - # type: (object) -> bool - """ - Returns a boolean representing whether or not the two Specifier like - objects are not equal. - """ + def prereleases(self) -> Optional[bool]: + """Whether or not pre-releases as a whole are allowed. - @abc.abstractproperty - def prereleases(self): - # type: () -> Optional[bool] - """ - Returns whether or not pre-releases as a whole are allowed by this - specifier. + This can be set to either ``True`` or ``False`` to explicitly enable or disable + prereleases or it can be set to ``None`` (the default) to use default semantics. """ @prereleases.setter - def prereleases(self, value): - # type: (bool) -> None - """ - Sets whether or not pre-releases as a whole are allowed by this - specifier. + def prereleases(self, value: bool) -> None: + """Setter for :attr:`prereleases`. + + :param value: The value to set. """ @abc.abstractmethod - def contains(self, item, prereleases=None): - # type: (str, Optional[bool]) -> bool + def contains(self, item: str, prereleases: Optional[bool] = None) -> bool: """ Determines if the given item is contained within this specifier. """ @abc.abstractmethod - def filter(self, iterable, prereleases=None): - # type: (Iterable[UnparsedVersion], Optional[bool]) -> Iterable[UnparsedVersion] + def filter( + self, iterable: Iterable[UnparsedVersionVar], prereleases: Optional[bool] = None + ) -> Iterator[UnparsedVersionVar]: """ Takes an iterable of items and filters them so that only items which are contained within this specifier are allowed in it. """ -class _IndividualSpecifier(BaseSpecifier): - - _operators = {} # type: Dict[str, str] +class Specifier(BaseSpecifier): + """This class abstracts handling of version specifiers. - def __init__(self, spec="", prereleases=None): - # type: (str, Optional[bool]) -> None - match = self._regex.search(spec) - if not match: - raise InvalidSpecifier("Invalid specifier: '{0}'".format(spec)) - - self._spec = ( - match.group("operator").strip(), - match.group("version").strip(), - ) # type: Tuple[str, str] - - # Store whether or not this Specifier should accept prereleases - self._prereleases = prereleases - - def __repr__(self): - # type: () -> str - pre = ( - ", prereleases={0!r}".format(self.prereleases) - if self._prereleases is not None - else "" - ) - - return "<{0}({1!r}{2})>".format(self.__class__.__name__, str(self), pre) - - def __str__(self): - # type: () -> str - return "{0}{1}".format(*self._spec) - - @property - def _canonical_spec(self): - # type: () -> Tuple[str, Union[Version, str]] - return self._spec[0], canonicalize_version(self._spec[1]) - - def __hash__(self): - # type: () -> int - return hash(self._canonical_spec) - - def __eq__(self, other): - # type: (object) -> bool - if isinstance(other, string_types): - try: - other = self.__class__(str(other)) - except InvalidSpecifier: - return NotImplemented - elif not isinstance(other, self.__class__): - return NotImplemented - - return self._canonical_spec == other._canonical_spec - - def __ne__(self, other): - # type: (object) -> bool - if isinstance(other, string_types): - try: - other = self.__class__(str(other)) - except InvalidSpecifier: - return NotImplemented - elif not isinstance(other, self.__class__): - return NotImplemented + .. tip:: - return self._spec != other._spec - - def _get_operator(self, op): - # type: (str) -> CallableOperator - operator_callable = getattr( - self, "_compare_{0}".format(self._operators[op]) - ) # type: CallableOperator - return operator_callable - - def _coerce_version(self, version): - # type: (UnparsedVersion) -> ParsedVersion - if not isinstance(version, (LegacyVersion, Version)): - version = parse(version) - return version - - @property - def operator(self): - # type: () -> str - return self._spec[0] - - @property - def version(self): - # type: () -> str - return self._spec[1] - - @property - def prereleases(self): - # type: () -> Optional[bool] - return self._prereleases - - @prereleases.setter - def prereleases(self, value): - # type: (bool) -> None - self._prereleases = value - - def __contains__(self, item): - # type: (str) -> bool - return self.contains(item) - - def contains(self, item, prereleases=None): - # type: (UnparsedVersion, Optional[bool]) -> bool - - # Determine if prereleases are to be allowed or not. - if prereleases is None: - prereleases = self.prereleases - - # Normalize item to a Version or LegacyVersion, this allows us to have - # a shortcut for ``"2.0" in Specifier(">=2") - normalized_item = self._coerce_version(item) - - # Determine if we should be supporting prereleases in this specifier - # or not, if we do not support prereleases than we can short circuit - # logic if this version is a prereleases. - if normalized_item.is_prerelease and not prereleases: - return False - - # Actually do the comparison to determine if this item is contained - # within this Specifier or not. - operator_callable = self._get_operator(self.operator) # type: CallableOperator - return operator_callable(normalized_item, self.version) - - def filter(self, iterable, prereleases=None): - # type: (Iterable[UnparsedVersion], Optional[bool]) -> Iterable[UnparsedVersion] - - yielded = False - found_prereleases = [] - - kw = {"prereleases": prereleases if prereleases is not None else True} - - # Attempt to iterate over all the values in the iterable and if any of - # them match, yield them. - for version in iterable: - parsed_version = self._coerce_version(version) - - if self.contains(parsed_version, **kw): - # If our version is a prerelease, and we were not set to allow - # prereleases, then we'll store it for later incase nothing - # else matches this specifier. - if parsed_version.is_prerelease and not ( - prereleases or self.prereleases - ): - found_prereleases.append(version) - # Either this is not a prerelease, or we should have been - # accepting prereleases from the beginning. - else: - yielded = True - yield version - - # Now that we've iterated over everything, determine if we've yielded - # any values, and if we have not and we have any prereleases stored up - # then we will go ahead and yield the prereleases. - if not yielded and found_prereleases: - for version in found_prereleases: - yield version - - -class LegacySpecifier(_IndividualSpecifier): - - _regex_str = r""" - (?P(==|!=|<=|>=|<|>)) - \s* - (?P - [^,;\s)]* # Since this is a "legacy" specifier, and the version - # string can be just about anything, we match everything - # except for whitespace, a semi-colon for marker support, - # a closing paren since versions can be enclosed in - # them, and a comma since it's a version separator. - ) - """ - - _regex = re.compile(r"^\s*" + _regex_str + r"\s*$", re.VERBOSE | re.IGNORECASE) - - _operators = { - "==": "equal", - "!=": "not_equal", - "<=": "less_than_equal", - ">=": "greater_than_equal", - "<": "less_than", - ">": "greater_than", - } - - def __init__(self, spec="", prereleases=None): - # type: (str, Optional[bool]) -> None - super(LegacySpecifier, self).__init__(spec, prereleases) - - warnings.warn( - "Creating a LegacyVersion has been deprecated and will be " - "removed in the next major release", - DeprecationWarning, - ) - - def _coerce_version(self, version): - # type: (Union[ParsedVersion, str]) -> LegacyVersion - if not isinstance(version, LegacyVersion): - version = LegacyVersion(str(version)) - return version - - def _compare_equal(self, prospective, spec): - # type: (LegacyVersion, str) -> bool - return prospective == self._coerce_version(spec) - - def _compare_not_equal(self, prospective, spec): - # type: (LegacyVersion, str) -> bool - return prospective != self._coerce_version(spec) - - def _compare_less_than_equal(self, prospective, spec): - # type: (LegacyVersion, str) -> bool - return prospective <= self._coerce_version(spec) - - def _compare_greater_than_equal(self, prospective, spec): - # type: (LegacyVersion, str) -> bool - return prospective >= self._coerce_version(spec) - - def _compare_less_than(self, prospective, spec): - # type: (LegacyVersion, str) -> bool - return prospective < self._coerce_version(spec) - - def _compare_greater_than(self, prospective, spec): - # type: (LegacyVersion, str) -> bool - return prospective > self._coerce_version(spec) - - -def _require_version_compare( - fn, # type: (Callable[[Specifier, ParsedVersion, str], bool]) -): - # type: (...) -> Callable[[Specifier, ParsedVersion, str], bool] - @functools.wraps(fn) - def wrapped(self, prospective, spec): - # type: (Specifier, ParsedVersion, str) -> bool - if not isinstance(prospective, Version): - return False - return fn(self, prospective, spec) - - return wrapped - - -class Specifier(_IndividualSpecifier): + It is generally not required to instantiate this manually. You should instead + prefer to work with :class:`SpecifierSet` instead, which can parse + comma-separated version specifiers (which is what package metadata contains). + """ - _regex_str = r""" + _operator_regex_str = r""" (?P(~=|==|!=|<=|>=|<|>|===)) + """ + _version_regex_str = r""" (?P (?: # The identity operators allow for an escape hatch that will @@ -344,8 +127,10 @@ class Specifier(_IndividualSpecifier): # but included entirely as an escape hatch. (?<====) # Only match for the identity operator \s* - [^\s]* # We just match everything, except for whitespace - # since we are only testing for strict identity. + [^\s;)]* # The arbitrary version can be just about anything, + # we match everything except for whitespace, a + # semi-colon for marker support, and a closing paren + # since versions can be enclosed in them. ) | (?: @@ -358,23 +143,23 @@ class Specifier(_IndividualSpecifier): v? (?:[0-9]+!)? # epoch [0-9]+(?:\.[0-9]+)* # release - (?: # pre release - [-_\.]? - (a|b|c|rc|alpha|beta|pre|preview) - [-_\.]? - [0-9]* - )? - (?: # post release - (?:-[0-9]+)|(?:[-_\.]?(post|rev|r)[-_\.]?[0-9]*) - )? - # You cannot use a wild card and a dev or local version - # together so group them with a | and make them optional. + # You cannot use a wild card and a pre-release, post-release, a dev or + # local version together so group them with a | and make them optional. (?: + \.\* # Wild card syntax of .* + | + (?: # pre release + [-_\.]? + (alpha|beta|preview|pre|a|b|c|rc) + [-_\.]? + [0-9]* + )? + (?: # post release + (?:-[0-9]+)|(?:[-_\.]?(post|rev|r)[-_\.]?[0-9]*) + )? (?:[-_\.]?dev[-_\.]?[0-9]*)? # dev release (?:\+[a-z0-9]+(?:[-_\.][a-z0-9]+)*)? # local - | - \.\* # Wild card syntax of .* )? ) | @@ -389,7 +174,7 @@ class Specifier(_IndividualSpecifier): [0-9]+(?:\.[0-9]+)+ # release (We have a + instead of a *) (?: # pre release [-_\.]? - (a|b|c|rc|alpha|beta|pre|preview) + (alpha|beta|preview|pre|a|b|c|rc) [-_\.]? [0-9]* )? @@ -414,7 +199,7 @@ class Specifier(_IndividualSpecifier): [0-9]+(?:\.[0-9]+)* # release (?: # pre release [-_\.]? - (a|b|c|rc|alpha|beta|pre|preview) + (alpha|beta|preview|pre|a|b|c|rc) [-_\.]? [0-9]* )? @@ -426,7 +211,10 @@ class Specifier(_IndividualSpecifier): ) """ - _regex = re.compile(r"^\s*" + _regex_str + r"\s*$", re.VERBOSE | re.IGNORECASE) + _regex = re.compile( + r"^\s*" + _operator_regex_str + _version_regex_str + r"\s*$", + re.VERBOSE | re.IGNORECASE, + ) _operators = { "~=": "compatible", @@ -439,9 +227,153 @@ class Specifier(_IndividualSpecifier): "===": "arbitrary", } - @_require_version_compare - def _compare_compatible(self, prospective, spec): - # type: (ParsedVersion, str) -> bool + def __init__(self, spec: str = "", prereleases: Optional[bool] = None) -> None: + """Initialize a Specifier instance. + + :param spec: + The string representation of a specifier which will be parsed and + normalized before use. + :param prereleases: + This tells the specifier if it should accept prerelease versions if + applicable or not. The default of ``None`` will autodetect it from the + given specifiers. + :raises InvalidSpecifier: + If the given specifier is invalid (i.e. bad syntax). + """ + match = self._regex.search(spec) + if not match: + raise InvalidSpecifier(f"Invalid specifier: '{spec}'") + + self._spec: Tuple[str, str] = ( + match.group("operator").strip(), + match.group("version").strip(), + ) + + # Store whether or not this Specifier should accept prereleases + self._prereleases = prereleases + + # https://github.com/python/mypy/pull/13475#pullrequestreview-1079784515 + @property # type: ignore[override] + def prereleases(self) -> bool: + # If there is an explicit prereleases set for this, then we'll just + # blindly use that. + if self._prereleases is not None: + return self._prereleases + + # Look at all of our specifiers and determine if they are inclusive + # operators, and if they are if they are including an explicit + # prerelease. + operator, version = self._spec + if operator in ["==", ">=", "<=", "~=", "==="]: + # The == specifier can include a trailing .*, if it does we + # want to remove before parsing. + if operator == "==" and version.endswith(".*"): + version = version[:-2] + + # Parse the version, and if it is a pre-release than this + # specifier allows pre-releases. + if Version(version).is_prerelease: + return True + + return False + + @prereleases.setter + def prereleases(self, value: bool) -> None: + self._prereleases = value + + @property + def operator(self) -> str: + """The operator of this specifier. + + >>> Specifier("==1.2.3").operator + '==' + """ + return self._spec[0] + + @property + def version(self) -> str: + """The version of this specifier. + + >>> Specifier("==1.2.3").version + '1.2.3' + """ + return self._spec[1] + + def __repr__(self) -> str: + """A representation of the Specifier that shows all internal state. + + >>> Specifier('>=1.0.0') + =1.0.0')> + >>> Specifier('>=1.0.0', prereleases=False) + =1.0.0', prereleases=False)> + >>> Specifier('>=1.0.0', prereleases=True) + =1.0.0', prereleases=True)> + """ + pre = ( + f", prereleases={self.prereleases!r}" + if self._prereleases is not None + else "" + ) + + return f"<{self.__class__.__name__}({str(self)!r}{pre})>" + + def __str__(self) -> str: + """A string representation of the Specifier that can be round-tripped. + + >>> str(Specifier('>=1.0.0')) + '>=1.0.0' + >>> str(Specifier('>=1.0.0', prereleases=False)) + '>=1.0.0' + """ + return "{}{}".format(*self._spec) + + @property + def _canonical_spec(self) -> Tuple[str, str]: + canonical_version = canonicalize_version( + self._spec[1], + strip_trailing_zero=(self._spec[0] != "~="), + ) + return self._spec[0], canonical_version + + def __hash__(self) -> int: + return hash(self._canonical_spec) + + def __eq__(self, other: object) -> bool: + """Whether or not the two Specifier-like objects are equal. + + :param other: The other object to check against. + + The value of :attr:`prereleases` is ignored. + + >>> Specifier("==1.2.3") == Specifier("== 1.2.3.0") + True + >>> (Specifier("==1.2.3", prereleases=False) == + ... Specifier("==1.2.3", prereleases=True)) + True + >>> Specifier("==1.2.3") == "==1.2.3" + True + >>> Specifier("==1.2.3") == Specifier("==1.2.4") + False + >>> Specifier("==1.2.3") == Specifier("~=1.2.3") + False + """ + if isinstance(other, str): + try: + other = self.__class__(str(other)) + except InvalidSpecifier: + return NotImplemented + elif not isinstance(other, self.__class__): + return NotImplemented + + return self._canonical_spec == other._canonical_spec + + def _get_operator(self, op: str) -> CallableOperator: + operator_callable: CallableOperator = getattr( + self, f"_compare_{self._operators[op]}" + ) + return operator_callable + + def _compare_compatible(self, prospective: Version, spec: str) -> bool: # Compatible releases have an equivalent combination of >= and ==. That # is that ~=2.2 is equivalent to >=2.2,==2.*. This allows us to @@ -450,15 +382,9 @@ def _compare_compatible(self, prospective, spec): # the other specifiers. # We want everything but the last item in the version, but we want to - # ignore post and dev releases and we want to treat the pre-release as - # it's own separate segment. + # ignore suffix segments. prefix = ".".join( - list( - itertools.takewhile( - lambda x: (not x.startswith("post") and not x.startswith("dev")), - _version_split(spec), - ) - )[:-1] + list(itertools.takewhile(_is_not_suffix, _version_split(spec)))[:-1] ) # Add the prefix notation to the end of our string @@ -468,35 +394,35 @@ def _compare_compatible(self, prospective, spec): prospective, prefix ) - @_require_version_compare - def _compare_equal(self, prospective, spec): - # type: (ParsedVersion, str) -> bool + def _compare_equal(self, prospective: Version, spec: str) -> bool: # We need special logic to handle prefix matching if spec.endswith(".*"): # In the case of prefix matching we want to ignore local segment. - prospective = Version(prospective.public) + normalized_prospective = canonicalize_version( + prospective.public, strip_trailing_zero=False + ) + # Get the normalized version string ignoring the trailing .* + normalized_spec = canonicalize_version(spec[:-2], strip_trailing_zero=False) # Split the spec out by dots, and pretend that there is an implicit # dot in between a release segment and a pre-release segment. - split_spec = _version_split(spec[:-2]) # Remove the trailing .* + split_spec = _version_split(normalized_spec) # Split the prospective version out by dots, and pretend that there # is an implicit dot in between a release segment and a pre-release # segment. - split_prospective = _version_split(str(prospective)) + split_prospective = _version_split(normalized_prospective) + + # 0-pad the prospective version before shortening it to get the correct + # shortened version. + padded_prospective, _ = _pad_version(split_prospective, split_spec) # Shorten the prospective version to be the same length as the spec # so that we can determine if the specifier is a prefix of the # prospective version or not. - shortened_prospective = split_prospective[: len(split_spec)] + shortened_prospective = padded_prospective[: len(split_spec)] - # Pad out our two sides with zeros so that they both equal the same - # length. - padded_spec, padded_prospective = _pad_version( - split_spec, shortened_prospective - ) - - return padded_prospective == padded_spec + return shortened_prospective == split_spec else: # Convert our spec string into a Version spec_version = Version(spec) @@ -509,32 +435,24 @@ def _compare_equal(self, prospective, spec): return prospective == spec_version - @_require_version_compare - def _compare_not_equal(self, prospective, spec): - # type: (ParsedVersion, str) -> bool + def _compare_not_equal(self, prospective: Version, spec: str) -> bool: return not self._compare_equal(prospective, spec) - @_require_version_compare - def _compare_less_than_equal(self, prospective, spec): - # type: (ParsedVersion, str) -> bool + def _compare_less_than_equal(self, prospective: Version, spec: str) -> bool: # NB: Local version identifiers are NOT permitted in the version # specifier, so local version labels can be universally removed from # the prospective version. return Version(prospective.public) <= Version(spec) - @_require_version_compare - def _compare_greater_than_equal(self, prospective, spec): - # type: (ParsedVersion, str) -> bool + def _compare_greater_than_equal(self, prospective: Version, spec: str) -> bool: # NB: Local version identifiers are NOT permitted in the version # specifier, so local version labels can be universally removed from # the prospective version. return Version(prospective.public) >= Version(spec) - @_require_version_compare - def _compare_less_than(self, prospective, spec_str): - # type: (ParsedVersion, str) -> bool + def _compare_less_than(self, prospective: Version, spec_str: str) -> bool: # Convert our spec to a Version instance, since we'll want to work with # it as a version. @@ -559,9 +477,7 @@ def _compare_less_than(self, prospective, spec_str): # version in the spec. return True - @_require_version_compare - def _compare_greater_than(self, prospective, spec_str): - # type: (ParsedVersion, str) -> bool + def _compare_greater_than(self, prospective: Version, spec_str: str) -> bool: # Convert our spec to a Version instance, since we'll want to work with # it as a version. @@ -592,48 +508,143 @@ def _compare_greater_than(self, prospective, spec_str): # same version in the spec. return True - def _compare_arbitrary(self, prospective, spec): - # type: (Version, str) -> bool + def _compare_arbitrary(self, prospective: Version, spec: str) -> bool: return str(prospective).lower() == str(spec).lower() - @property - def prereleases(self): - # type: () -> bool + def __contains__(self, item: Union[str, Version]) -> bool: + """Return whether or not the item is contained in this specifier. - # If there is an explicit prereleases set for this, then we'll just - # blindly use that. - if self._prereleases is not None: - return self._prereleases + :param item: The item to check for. - # Look at all of our specifiers and determine if they are inclusive - # operators, and if they are if they are including an explicit - # prerelease. - operator, version = self._spec - if operator in ["==", ">=", "<=", "~=", "==="]: - # The == specifier can include a trailing .*, if it does we - # want to remove before parsing. - if operator == "==" and version.endswith(".*"): - version = version[:-2] + This is used for the ``in`` operator and behaves the same as + :meth:`contains` with no ``prereleases`` argument passed. - # Parse the version, and if it is a pre-release than this - # specifier allows pre-releases. - if parse(version).is_prerelease: - return True + >>> "1.2.3" in Specifier(">=1.2.3") + True + >>> Version("1.2.3") in Specifier(">=1.2.3") + True + >>> "1.0.0" in Specifier(">=1.2.3") + False + >>> "1.3.0a1" in Specifier(">=1.2.3") + False + >>> "1.3.0a1" in Specifier(">=1.2.3", prereleases=True) + True + """ + return self.contains(item) - return False + def contains( + self, item: UnparsedVersion, prereleases: Optional[bool] = None + ) -> bool: + """Return whether or not the item is contained in this specifier. + + :param item: + The item to check for, which can be a version string or a + :class:`Version` instance. + :param prereleases: + Whether or not to match prereleases with this Specifier. If set to + ``None`` (the default), it uses :attr:`prereleases` to determine + whether or not prereleases are allowed. + + >>> Specifier(">=1.2.3").contains("1.2.3") + True + >>> Specifier(">=1.2.3").contains(Version("1.2.3")) + True + >>> Specifier(">=1.2.3").contains("1.0.0") + False + >>> Specifier(">=1.2.3").contains("1.3.0a1") + False + >>> Specifier(">=1.2.3", prereleases=True).contains("1.3.0a1") + True + >>> Specifier(">=1.2.3").contains("1.3.0a1", prereleases=True) + True + """ - @prereleases.setter - def prereleases(self, value): - # type: (bool) -> None - self._prereleases = value + # Determine if prereleases are to be allowed or not. + if prereleases is None: + prereleases = self.prereleases + + # Normalize item to a Version, this allows us to have a shortcut for + # "2.0" in Specifier(">=2") + normalized_item = _coerce_version(item) + + # Determine if we should be supporting prereleases in this specifier + # or not, if we do not support prereleases than we can short circuit + # logic if this version is a prereleases. + if normalized_item.is_prerelease and not prereleases: + return False + + # Actually do the comparison to determine if this item is contained + # within this Specifier or not. + operator_callable: CallableOperator = self._get_operator(self.operator) + return operator_callable(normalized_item, self.version) + + def filter( + self, iterable: Iterable[UnparsedVersionVar], prereleases: Optional[bool] = None + ) -> Iterator[UnparsedVersionVar]: + """Filter items in the given iterable, that match the specifier. + + :param iterable: + An iterable that can contain version strings and :class:`Version` instances. + The items in the iterable will be filtered according to the specifier. + :param prereleases: + Whether or not to allow prereleases in the returned iterator. If set to + ``None`` (the default), it will be intelligently decide whether to allow + prereleases or not (based on the :attr:`prereleases` attribute, and + whether the only versions matching are prereleases). + + This method is smarter than just ``filter(Specifier().contains, [...])`` + because it implements the rule from :pep:`440` that a prerelease item + SHOULD be accepted if no other versions match the given specifier. + + >>> list(Specifier(">=1.2.3").filter(["1.2", "1.3", "1.5a1"])) + ['1.3'] + >>> list(Specifier(">=1.2.3").filter(["1.2", "1.2.3", "1.3", Version("1.4")])) + ['1.2.3', '1.3', ] + >>> list(Specifier(">=1.2.3").filter(["1.2", "1.5a1"])) + ['1.5a1'] + >>> list(Specifier(">=1.2.3").filter(["1.3", "1.5a1"], prereleases=True)) + ['1.3', '1.5a1'] + >>> list(Specifier(">=1.2.3", prereleases=True).filter(["1.3", "1.5a1"])) + ['1.3', '1.5a1'] + """ + + yielded = False + found_prereleases = [] + + kw = {"prereleases": prereleases if prereleases is not None else True} + + # Attempt to iterate over all the values in the iterable and if any of + # them match, yield them. + for version in iterable: + parsed_version = _coerce_version(version) + + if self.contains(parsed_version, **kw): + # If our version is a prerelease, and we were not set to allow + # prereleases, then we'll store it for later in case nothing + # else matches this specifier. + if parsed_version.is_prerelease and not ( + prereleases or self.prereleases + ): + found_prereleases.append(version) + # Either this is not a prerelease, or we should have been + # accepting prereleases from the beginning. + else: + yielded = True + yield version + + # Now that we've iterated over everything, determine if we've yielded + # any values, and if we have not and we have any prereleases stored up + # then we will go ahead and yield the prereleases. + if not yielded and found_prereleases: + for version in found_prereleases: + yield version _prefix_regex = re.compile(r"^([0-9]+)((?:a|b|c|rc)[0-9]+)$") -def _version_split(version): - # type: (str) -> List[str] - result = [] # type: List[str] +def _version_split(version: str) -> List[str]: + result: List[str] = [] for item in version.split("."): match = _prefix_regex.search(item) if match: @@ -643,8 +654,13 @@ def _version_split(version): return result -def _pad_version(left, right): - # type: (List[str], List[str]) -> Tuple[List[str], List[str]] +def _is_not_suffix(segment: str) -> bool: + return not any( + segment.startswith(prefix) for prefix in ("dev", "a", "b", "rc", "post") + ) + + +def _pad_version(left: List[str], right: List[str]) -> Tuple[List[str], List[str]]: left_split, right_split = [], [] # Get the release segment of our versions @@ -663,21 +679,39 @@ def _pad_version(left, right): class SpecifierSet(BaseSpecifier): - def __init__(self, specifiers="", prereleases=None): - # type: (str, Optional[bool]) -> None + """This class abstracts handling of a set of version specifiers. + + It can be passed a single specifier (``>=3.0``), a comma-separated list of + specifiers (``>=3.0,!=3.1``), or no specifier at all. + """ - # Split on , to break each individual specifier into it's own item, and + def __init__( + self, specifiers: str = "", prereleases: Optional[bool] = None + ) -> None: + """Initialize a SpecifierSet instance. + + :param specifiers: + The string representation of a specifier or a comma-separated list of + specifiers which will be parsed and normalized before use. + :param prereleases: + This tells the SpecifierSet if it should accept prerelease versions if + applicable or not. The default of ``None`` will autodetect it from the + given specifiers. + + :raises InvalidSpecifier: + If the given ``specifiers`` are not parseable than this exception will be + raised. + """ + + # Split on `,` to break each individual specifier into it's own item, and # strip each item to remove leading/trailing whitespace. split_specifiers = [s.strip() for s in specifiers.split(",") if s.strip()] # Parsed each individual specifier, attempting first to make it a - # Specifier and falling back to a LegacySpecifier. - parsed = set() + # Specifier. + parsed: Set[Specifier] = set() for specifier in split_specifiers: - try: - parsed.add(Specifier(specifier)) - except InvalidSpecifier: - parsed.add(LegacySpecifier(specifier)) + parsed.add(Specifier(specifier)) # Turn our parsed specifiers into a frozen set and save them for later. self._specs = frozenset(parsed) @@ -686,27 +720,75 @@ def __init__(self, specifiers="", prereleases=None): # we accept prereleases or not. self._prereleases = prereleases - def __repr__(self): - # type: () -> str + @property + def prereleases(self) -> Optional[bool]: + # If we have been given an explicit prerelease modifier, then we'll + # pass that through here. + if self._prereleases is not None: + return self._prereleases + + # If we don't have any specifiers, and we don't have a forced value, + # then we'll just return None since we don't know if this should have + # pre-releases or not. + if not self._specs: + return None + + # Otherwise we'll see if any of the given specifiers accept + # prereleases, if any of them do we'll return True, otherwise False. + return any(s.prereleases for s in self._specs) + + @prereleases.setter + def prereleases(self, value: bool) -> None: + self._prereleases = value + + def __repr__(self) -> str: + """A representation of the specifier set that shows all internal state. + + Note that the ordering of the individual specifiers within the set may not + match the input string. + + >>> SpecifierSet('>=1.0.0,!=2.0.0') + =1.0.0')> + >>> SpecifierSet('>=1.0.0,!=2.0.0', prereleases=False) + =1.0.0', prereleases=False)> + >>> SpecifierSet('>=1.0.0,!=2.0.0', prereleases=True) + =1.0.0', prereleases=True)> + """ pre = ( - ", prereleases={0!r}".format(self.prereleases) + f", prereleases={self.prereleases!r}" if self._prereleases is not None else "" ) - return "".format(str(self), pre) + return f"" - def __str__(self): - # type: () -> str + def __str__(self) -> str: + """A string representation of the specifier set that can be round-tripped. + + Note that the ordering of the individual specifiers within the set may not + match the input string. + + >>> str(SpecifierSet(">=1.0.0,!=1.0.1")) + '!=1.0.1,>=1.0.0' + >>> str(SpecifierSet(">=1.0.0,!=1.0.1", prereleases=False)) + '!=1.0.1,>=1.0.0' + """ return ",".join(sorted(str(s) for s in self._specs)) - def __hash__(self): - # type: () -> int + def __hash__(self) -> int: return hash(self._specs) - def __and__(self, other): - # type: (Union[SpecifierSet, str]) -> SpecifierSet - if isinstance(other, string_types): + def __and__(self, other: Union["SpecifierSet", str]) -> "SpecifierSet": + """Return a SpecifierSet which is a combination of the two sets. + + :param other: The other object to combine with. + + >>> SpecifierSet(">=1.0.0,!=1.0.1") & '<=2.0.0,!=2.0.1' + =1.0.0')> + >>> SpecifierSet(">=1.0.0,!=1.0.1") & SpecifierSet('<=2.0.0,!=2.0.1') + =1.0.0')> + """ + if isinstance(other, str): other = SpecifierSet(other) elif not isinstance(other, SpecifierSet): return NotImplemented @@ -728,66 +810,99 @@ def __and__(self, other): return specifier - def __eq__(self, other): - # type: (object) -> bool - if isinstance(other, (string_types, _IndividualSpecifier)): - other = SpecifierSet(str(other)) - elif not isinstance(other, SpecifierSet): - return NotImplemented + def __eq__(self, other: object) -> bool: + """Whether or not the two SpecifierSet-like objects are equal. - return self._specs == other._specs + :param other: The other object to check against. + + The value of :attr:`prereleases` is ignored. - def __ne__(self, other): - # type: (object) -> bool - if isinstance(other, (string_types, _IndividualSpecifier)): + >>> SpecifierSet(">=1.0.0,!=1.0.1") == SpecifierSet(">=1.0.0,!=1.0.1") + True + >>> (SpecifierSet(">=1.0.0,!=1.0.1", prereleases=False) == + ... SpecifierSet(">=1.0.0,!=1.0.1", prereleases=True)) + True + >>> SpecifierSet(">=1.0.0,!=1.0.1") == ">=1.0.0,!=1.0.1" + True + >>> SpecifierSet(">=1.0.0,!=1.0.1") == SpecifierSet(">=1.0.0") + False + >>> SpecifierSet(">=1.0.0,!=1.0.1") == SpecifierSet(">=1.0.0,!=1.0.2") + False + """ + if isinstance(other, (str, Specifier)): other = SpecifierSet(str(other)) elif not isinstance(other, SpecifierSet): return NotImplemented - return self._specs != other._specs + return self._specs == other._specs - def __len__(self): - # type: () -> int + def __len__(self) -> int: + """Returns the number of specifiers in this specifier set.""" return len(self._specs) - def __iter__(self): - # type: () -> Iterator[_IndividualSpecifier] - return iter(self._specs) - - @property - def prereleases(self): - # type: () -> Optional[bool] + def __iter__(self) -> Iterator[Specifier]: + """ + Returns an iterator over all the underlying :class:`Specifier` instances + in this specifier set. - # If we have been given an explicit prerelease modifier, then we'll - # pass that through here. - if self._prereleases is not None: - return self._prereleases + >>> sorted(SpecifierSet(">=1.0.0,!=1.0.1"), key=str) + [, =1.0.0')>] + """ + return iter(self._specs) - # If we don't have any specifiers, and we don't have a forced value, - # then we'll just return None since we don't know if this should have - # pre-releases or not. - if not self._specs: - return None + def __contains__(self, item: UnparsedVersion) -> bool: + """Return whether or not the item is contained in this specifier. - # Otherwise we'll see if any of the given specifiers accept - # prereleases, if any of them do we'll return True, otherwise False. - return any(s.prereleases for s in self._specs) + :param item: The item to check for. - @prereleases.setter - def prereleases(self, value): - # type: (bool) -> None - self._prereleases = value + This is used for the ``in`` operator and behaves the same as + :meth:`contains` with no ``prereleases`` argument passed. - def __contains__(self, item): - # type: (Union[ParsedVersion, str]) -> bool + >>> "1.2.3" in SpecifierSet(">=1.0.0,!=1.0.1") + True + >>> Version("1.2.3") in SpecifierSet(">=1.0.0,!=1.0.1") + True + >>> "1.0.1" in SpecifierSet(">=1.0.0,!=1.0.1") + False + >>> "1.3.0a1" in SpecifierSet(">=1.0.0,!=1.0.1") + False + >>> "1.3.0a1" in SpecifierSet(">=1.0.0,!=1.0.1", prereleases=True) + True + """ return self.contains(item) - def contains(self, item, prereleases=None): - # type: (Union[ParsedVersion, str], Optional[bool]) -> bool - - # Ensure that our item is a Version or LegacyVersion instance. - if not isinstance(item, (LegacyVersion, Version)): - item = parse(item) + def contains( + self, + item: UnparsedVersion, + prereleases: Optional[bool] = None, + installed: Optional[bool] = None, + ) -> bool: + """Return whether or not the item is contained in this SpecifierSet. + + :param item: + The item to check for, which can be a version string or a + :class:`Version` instance. + :param prereleases: + Whether or not to match prereleases with this SpecifierSet. If set to + ``None`` (the default), it uses :attr:`prereleases` to determine + whether or not prereleases are allowed. + + >>> SpecifierSet(">=1.0.0,!=1.0.1").contains("1.2.3") + True + >>> SpecifierSet(">=1.0.0,!=1.0.1").contains(Version("1.2.3")) + True + >>> SpecifierSet(">=1.0.0,!=1.0.1").contains("1.0.1") + False + >>> SpecifierSet(">=1.0.0,!=1.0.1").contains("1.3.0a1") + False + >>> SpecifierSet(">=1.0.0,!=1.0.1", prereleases=True).contains("1.3.0a1") + True + >>> SpecifierSet(">=1.0.0,!=1.0.1").contains("1.3.0a1", prereleases=True) + True + """ + # Ensure that our item is a Version instance. + if not isinstance(item, Version): + item = Version(item) # Determine if we're forcing a prerelease or not, if we're not forcing # one for this particular filter call, then we'll use whatever the @@ -804,6 +919,9 @@ def contains(self, item, prereleases=None): if not prereleases and item.is_prerelease: return False + if installed and item.is_prerelease: + item = Version(item.base_version) + # We simply dispatch to the underlying specs here to make sure that the # given version is contained within all of them. # Note: This use of all() here means that an empty set of specifiers @@ -811,12 +929,46 @@ def contains(self, item, prereleases=None): return all(s.contains(item, prereleases=prereleases) for s in self._specs) def filter( - self, - iterable, # type: Iterable[Union[ParsedVersion, str]] - prereleases=None, # type: Optional[bool] - ): - # type: (...) -> Iterable[Union[ParsedVersion, str]] - + self, iterable: Iterable[UnparsedVersionVar], prereleases: Optional[bool] = None + ) -> Iterator[UnparsedVersionVar]: + """Filter items in the given iterable, that match the specifiers in this set. + + :param iterable: + An iterable that can contain version strings and :class:`Version` instances. + The items in the iterable will be filtered according to the specifier. + :param prereleases: + Whether or not to allow prereleases in the returned iterator. If set to + ``None`` (the default), it will be intelligently decide whether to allow + prereleases or not (based on the :attr:`prereleases` attribute, and + whether the only versions matching are prereleases). + + This method is smarter than just ``filter(SpecifierSet(...).contains, [...])`` + because it implements the rule from :pep:`440` that a prerelease item + SHOULD be accepted if no other versions match the given specifier. + + >>> list(SpecifierSet(">=1.2.3").filter(["1.2", "1.3", "1.5a1"])) + ['1.3'] + >>> list(SpecifierSet(">=1.2.3").filter(["1.2", "1.3", Version("1.4")])) + ['1.3', ] + >>> list(SpecifierSet(">=1.2.3").filter(["1.2", "1.5a1"])) + [] + >>> list(SpecifierSet(">=1.2.3").filter(["1.3", "1.5a1"], prereleases=True)) + ['1.3', '1.5a1'] + >>> list(SpecifierSet(">=1.2.3", prereleases=True).filter(["1.3", "1.5a1"])) + ['1.3', '1.5a1'] + + An "empty" SpecifierSet will filter items based on the presence of prerelease + versions in the set. + + >>> list(SpecifierSet("").filter(["1.3", "1.5a1"])) + ['1.3'] + >>> list(SpecifierSet("").filter(["1.5a1"])) + ['1.5a1'] + >>> list(SpecifierSet("", prereleases=True).filter(["1.3", "1.5a1"])) + ['1.3', '1.5a1'] + >>> list(SpecifierSet("").filter(["1.3", "1.5a1"], prereleases=True)) + ['1.3', '1.5a1'] + """ # Determine if we're forcing a prerelease or not, if we're not forcing # one for this particular filter call, then we'll use whatever the # SpecifierSet thinks for whether or not we should support prereleases. @@ -829,24 +981,16 @@ def filter( if self._specs: for spec in self._specs: iterable = spec.filter(iterable, prereleases=bool(prereleases)) - return iterable + return iter(iterable) # If we do not have any specifiers, then we need to have a rough filter # which will filter out any pre-releases, unless there are no final - # releases, and which will filter out LegacyVersion in general. + # releases. else: - filtered = [] # type: List[Union[ParsedVersion, str]] - found_prereleases = [] # type: List[Union[ParsedVersion, str]] + filtered: List[UnparsedVersionVar] = [] + found_prereleases: List[UnparsedVersionVar] = [] for item in iterable: - # Ensure that we some kind of Version class for this item. - if not isinstance(item, (LegacyVersion, Version)): - parsed_version = parse(item) - else: - parsed_version = item - - # Filter out any item which is parsed as a LegacyVersion - if isinstance(parsed_version, LegacyVersion): - continue + parsed_version = _coerce_version(item) # Store any item which is a pre-release for later unless we've # already found a final version or we are accepting prereleases @@ -859,6 +1003,6 @@ def filter( # If we've found no items except for pre-releases, then we'll go # ahead and use the pre-releases if not filtered and found_prereleases and prereleases is None: - return found_prereleases + return iter(found_prereleases) - return filtered + return iter(filtered) diff --git a/conda_lock/_vendor/poetry/core/_vendor/packaging/tags.py b/conda_lock/_vendor/poetry/core/_vendor/packaging/tags.py index d637f1b6..37f33b1e 100644 --- a/conda_lock/_vendor/poetry/core/_vendor/packaging/tags.py +++ b/conda_lock/_vendor/poetry/core/_vendor/packaging/tags.py @@ -2,81 +2,46 @@ # 2.0, and the BSD License. See the LICENSE file in the root of this repository # for complete details. -from __future__ import absolute_import - -import distutils.util - -try: - from importlib.machinery import EXTENSION_SUFFIXES -except ImportError: # pragma: no cover - import imp - - EXTENSION_SUFFIXES = [x[0] for x in imp.get_suffixes()] - del imp -import collections import logging -import os import platform -import re import struct +import subprocess import sys import sysconfig -import warnings - -from ._typing import TYPE_CHECKING, cast - -if TYPE_CHECKING: # pragma: no cover - from typing import ( - IO, - Dict, - FrozenSet, - Iterable, - Iterator, - List, - Optional, - Sequence, - Tuple, - Union, - ) - - PythonVersion = Sequence[int] - MacVersion = Tuple[int, int] - GlibcVersion = Tuple[int, int] - +from importlib.machinery import EXTENSION_SUFFIXES +from typing import ( + Dict, + FrozenSet, + Iterable, + Iterator, + List, + Optional, + Sequence, + Tuple, + Union, + cast, +) + +from . import _manylinux, _musllinux logger = logging.getLogger(__name__) -INTERPRETER_SHORT_NAMES = { +PythonVersion = Sequence[int] +MacVersion = Tuple[int, int] + +INTERPRETER_SHORT_NAMES: Dict[str, str] = { "python": "py", # Generic. "cpython": "cp", "pypy": "pp", "ironpython": "ip", "jython": "jy", -} # type: Dict[str, str] - - -_32_BIT_INTERPRETER = sys.maxsize <= 2 ** 32 - - -_LEGACY_MANYLINUX_MAP = { - # CentOS 7 w/ glibc 2.17 (PEP 599) - (2, 17): "manylinux2014", - # CentOS 6 w/ glibc 2.12 (PEP 571) - (2, 12): "manylinux2010", - # CentOS 5 w/ glibc 2.5 (PEP 513) - (2, 5): "manylinux1", } -# If glibc ever changes its major version, we need to know what the last -# minor version was, so we can build the complete list of all versions. -# For now, guess what the highest minor version might be, assume it will -# be 50 for testing. Once this actually happens, update the dictionary -# with the actual value. -_LAST_GLIBC_MINOR = collections.defaultdict(lambda: 50) # type: Dict[int, int] -glibcVersion = collections.namedtuple("Version", ["major", "minor"]) +_32_BIT_INTERPRETER = struct.calcsize("P") == 4 -class Tag(object): + +class Tag: """ A representation of the tag triple for a wheel. @@ -86,8 +51,7 @@ class Tag(object): __slots__ = ["_interpreter", "_abi", "_platform", "_hash"] - def __init__(self, interpreter, abi, platform): - # type: (str, str, str) -> None + def __init__(self, interpreter: str, abi: str, platform: str) -> None: self._interpreter = interpreter.lower() self._abi = abi.lower() self._platform = platform.lower() @@ -99,46 +63,39 @@ def __init__(self, interpreter, abi, platform): self._hash = hash((self._interpreter, self._abi, self._platform)) @property - def interpreter(self): - # type: () -> str + def interpreter(self) -> str: return self._interpreter @property - def abi(self): - # type: () -> str + def abi(self) -> str: return self._abi @property - def platform(self): - # type: () -> str + def platform(self) -> str: return self._platform - def __eq__(self, other): - # type: (object) -> bool + def __eq__(self, other: object) -> bool: if not isinstance(other, Tag): return NotImplemented return ( - (self.platform == other.platform) - and (self.abi == other.abi) - and (self.interpreter == other.interpreter) + (self._hash == other._hash) # Short-circuit ASAP for perf reasons. + and (self._platform == other._platform) + and (self._abi == other._abi) + and (self._interpreter == other._interpreter) ) - def __hash__(self): - # type: () -> int + def __hash__(self) -> int: return self._hash - def __str__(self): - # type: () -> str - return "{}-{}-{}".format(self._interpreter, self._abi, self._platform) + def __str__(self) -> str: + return f"{self._interpreter}-{self._abi}-{self._platform}" - def __repr__(self): - # type: () -> str - return "<{self} @ {self_id}>".format(self=self, self_id=id(self)) + def __repr__(self) -> str: + return f"<{self} @ {id(self)}>" -def parse_tag(tag): - # type: (str) -> FrozenSet[Tag] +def parse_tag(tag: str) -> FrozenSet[Tag]: """ Parses the provided tag (e.g. `py3-none-any`) into a frozenset of Tag instances. @@ -154,25 +111,8 @@ def parse_tag(tag): return frozenset(tags) -def _warn_keyword_parameter(func_name, kwargs): - # type: (str, Dict[str, bool]) -> bool - """ - Backwards-compatibility with Python 2.7 to allow treating 'warn' as keyword-only. - """ - if not kwargs: - return False - elif len(kwargs) > 1 or "warn" not in kwargs: - kwargs.pop("warn", None) - arg = next(iter(kwargs.keys())) - raise TypeError( - "{}() got an unexpected keyword argument {!r}".format(func_name, arg) - ) - return kwargs["warn"] - - -def _get_config_var(name, warn=False): - # type: (str, bool) -> Union[int, str, None] - value = sysconfig.get_config_var(name) +def _get_config_var(name: str, warn: bool = False) -> Union[int, str, None]: + value: Union[int, str, None] = sysconfig.get_config_var(name) if value is None and warn: logger.debug( "Config variable '%s' is unset, Python ABI tag may be incorrect", name @@ -180,13 +120,11 @@ def _get_config_var(name, warn=False): return value -def _normalize_string(string): - # type: (str) -> str - return string.replace(".", "_").replace("-", "_") +def _normalize_string(string: str) -> str: + return string.replace(".", "_").replace("-", "_").replace(" ", "_") -def _abi3_applies(python_version): - # type: (PythonVersion) -> bool +def _abi3_applies(python_version: PythonVersion) -> bool: """ Determine if the Python version supports abi3. @@ -195,8 +133,7 @@ def _abi3_applies(python_version): return len(python_version) > 1 and tuple(python_version) >= (3, 2) -def _cpython_abis(py_version, warn=False): - # type: (PythonVersion, bool) -> List[str] +def _cpython_abis(py_version: PythonVersion, warn: bool = False) -> List[str]: py_version = tuple(py_version) # To allow for version comparison. abis = [] version = _version_nodot(py_version[:2]) @@ -222,7 +159,7 @@ def _cpython_abis(py_version, warn=False): elif debug: # Debug builds can also load "normal" extension modules. # We can also assume no UCS-4 or pymalloc requirement. - abis.append("cp{version}".format(version=version)) + abis.append(f"cp{version}") abis.insert( 0, "cp{version}{debug}{pymalloc}{ucs4}".format( @@ -233,12 +170,12 @@ def _cpython_abis(py_version, warn=False): def cpython_tags( - python_version=None, # type: Optional[PythonVersion] - abis=None, # type: Optional[Iterable[str]] - platforms=None, # type: Optional[Iterable[str]] - **kwargs # type: bool -): - # type: (...) -> Iterator[Tag] + python_version: Optional[PythonVersion] = None, + abis: Optional[Iterable[str]] = None, + platforms: Optional[Iterable[str]] = None, + *, + warn: bool = False, +) -> Iterator[Tag]: """ Yields the tags for a CPython interpreter. @@ -254,11 +191,10 @@ def cpython_tags( If 'abi3' or 'none' are specified in 'abis' then they will be yielded at their normal position and not at the beginning. """ - warn = _warn_keyword_parameter("cpython_tags", kwargs) if not python_version: python_version = sys.version_info[:2] - interpreter = "cp{}".format(_version_nodot(python_version[:2])) + interpreter = f"cp{_version_nodot(python_version[:2])}" if abis is None: if len(python_version) > 1: @@ -273,15 +209,13 @@ def cpython_tags( except ValueError: pass - platforms = list(platforms or _platform_tags()) + platforms = list(platforms or platform_tags()) for abi in abis: for platform_ in platforms: yield Tag(interpreter, abi, platform_) if _abi3_applies(python_version): - for tag in (Tag(interpreter, "abi3", platform_) for platform_ in platforms): - yield tag - for tag in (Tag(interpreter, "none", platform_) for platform_ in platforms): - yield tag + yield from (Tag(interpreter, "abi3", platform_) for platform_ in platforms) + yield from (Tag(interpreter, "none", platform_) for platform_ in platforms) if _abi3_applies(python_version): for minor_version in range(python_version[1] - 1, 1, -1): @@ -292,20 +226,54 @@ def cpython_tags( yield Tag(interpreter, "abi3", platform_) -def _generic_abi(): - # type: () -> Iterator[str] - abi = sysconfig.get_config_var("SOABI") - if abi: - yield _normalize_string(abi) +def _generic_abi() -> List[str]: + """ + Return the ABI tag based on EXT_SUFFIX. + """ + # The following are examples of `EXT_SUFFIX`. + # We want to keep the parts which are related to the ABI and remove the + # parts which are related to the platform: + # - linux: '.cpython-310-x86_64-linux-gnu.so' => cp310 + # - mac: '.cpython-310-darwin.so' => cp310 + # - win: '.cp310-win_amd64.pyd' => cp310 + # - win: '.pyd' => cp37 (uses _cpython_abis()) + # - pypy: '.pypy38-pp73-x86_64-linux-gnu.so' => pypy38_pp73 + # - graalpy: '.graalpy-38-native-x86_64-darwin.dylib' + # => graalpy_38_native + + ext_suffix = _get_config_var("EXT_SUFFIX", warn=True) + if not isinstance(ext_suffix, str) or ext_suffix[0] != ".": + raise SystemError("invalid sysconfig.get_config_var('EXT_SUFFIX')") + parts = ext_suffix.split(".") + if len(parts) < 3: + # CPython3.7 and earlier uses ".pyd" on Windows. + return _cpython_abis(sys.version_info[:2]) + soabi = parts[1] + if soabi.startswith("cpython"): + # non-windows + abi = "cp" + soabi.split("-")[1] + elif soabi.startswith("cp"): + # windows + abi = soabi.split("-")[0] + elif soabi.startswith("pypy"): + abi = "-".join(soabi.split("-")[:2]) + elif soabi.startswith("graalpy"): + abi = "-".join(soabi.split("-")[:3]) + elif soabi: + # pyston, ironpython, others? + abi = soabi + else: + return [] + return [_normalize_string(abi)] def generic_tags( - interpreter=None, # type: Optional[str] - abis=None, # type: Optional[Iterable[str]] - platforms=None, # type: Optional[Iterable[str]] - **kwargs # type: bool -): - # type: (...) -> Iterator[Tag] + interpreter: Optional[str] = None, + abis: Optional[Iterable[str]] = None, + platforms: Optional[Iterable[str]] = None, + *, + warn: bool = False, +) -> Iterator[Tag]: """ Yields the tags for a generic interpreter. @@ -314,15 +282,15 @@ def generic_tags( The "none" ABI will be added if it was not explicitly provided. """ - warn = _warn_keyword_parameter("generic_tags", kwargs) if not interpreter: interp_name = interpreter_name() interp_version = interpreter_version(warn=warn) interpreter = "".join([interp_name, interp_version]) if abis is None: abis = _generic_abi() - platforms = list(platforms or _platform_tags()) - abis = list(abis) + else: + abis = list(abis) + platforms = list(platforms or platform_tags()) if "none" not in abis: abis.append("none") for abi in abis: @@ -330,8 +298,7 @@ def generic_tags( yield Tag(interpreter, abi, platform_) -def _py_interpreter_range(py_version): - # type: (PythonVersion) -> Iterator[str] +def _py_interpreter_range(py_version: PythonVersion) -> Iterator[str]: """ Yields Python versions in descending order. @@ -339,19 +306,18 @@ def _py_interpreter_range(py_version): all previous versions of that major version. """ if len(py_version) > 1: - yield "py{version}".format(version=_version_nodot(py_version[:2])) - yield "py{major}".format(major=py_version[0]) + yield f"py{_version_nodot(py_version[:2])}" + yield f"py{py_version[0]}" if len(py_version) > 1: for minor in range(py_version[1] - 1, -1, -1): - yield "py{version}".format(version=_version_nodot((py_version[0], minor))) + yield f"py{_version_nodot((py_version[0], minor))}" def compatible_tags( - python_version=None, # type: Optional[PythonVersion] - interpreter=None, # type: Optional[str] - platforms=None, # type: Optional[Iterable[str]] -): - # type: (...) -> Iterator[Tag] + python_version: Optional[PythonVersion] = None, + interpreter: Optional[str] = None, + platforms: Optional[Iterable[str]] = None, +) -> Iterator[Tag]: """ Yields the sequence of tags that are compatible with a specific version of Python. @@ -362,7 +328,7 @@ def compatible_tags( """ if not python_version: python_version = sys.version_info[:2] - platforms = list(platforms or _platform_tags()) + platforms = list(platforms or platform_tags()) for version in _py_interpreter_range(python_version): for platform_ in platforms: yield Tag(version, "none", platform_) @@ -372,8 +338,7 @@ def compatible_tags( yield Tag(version, "none", "any") -def _mac_arch(arch, is_32bit=_32_BIT_INTERPRETER): - # type: (str, bool) -> str +def _mac_arch(arch: str, is_32bit: bool = _32_BIT_INTERPRETER) -> str: if not is_32bit: return arch @@ -383,8 +348,7 @@ def _mac_arch(arch, is_32bit=_32_BIT_INTERPRETER): return "i386" -def _mac_binary_formats(version, cpu_arch): - # type: (MacVersion, str) -> List[str] +def _mac_binary_formats(version: MacVersion, cpu_arch: str) -> List[str]: formats = [cpu_arch] if cpu_arch == "x86_64": if version < (10, 4): @@ -416,8 +380,9 @@ def _mac_binary_formats(version, cpu_arch): return formats -def mac_platforms(version=None, arch=None): - # type: (Optional[MacVersion], Optional[str]) -> Iterator[str] +def mac_platforms( + version: Optional[MacVersion] = None, arch: Optional[str] = None +) -> Iterator[str]: """ Yields the platform tags for a macOS system. @@ -426,9 +391,25 @@ def mac_platforms(version=None, arch=None): generate platform tags for. Both parameters default to the appropriate value for the current system. """ - version_str, _, cpu_arch = platform.mac_ver() # type: ignore + version_str, _, cpu_arch = platform.mac_ver() if version is None: version = cast("MacVersion", tuple(map(int, version_str.split(".")[:2]))) + if version == (10, 16): + # When built against an older macOS SDK, Python will report macOS 10.16 + # instead of the real version. + version_str = subprocess.run( + [ + sys.executable, + "-sS", + "-c", + "import platform; print(platform.mac_ver()[0])", + ], + check=True, + env={"SYSTEM_VERSION_COMPAT": "0"}, + stdout=subprocess.PIPE, + text=True, + ).stdout + version = cast("MacVersion", tuple(map(int, version_str.split(".")[:2]))) else: version = version if arch is None: @@ -487,320 +468,30 @@ def mac_platforms(version=None, arch=None): ) -# From PEP 513, PEP 600 -def _is_manylinux_compatible(name, arch, glibc_version): - # type: (str, str, GlibcVersion) -> bool - sys_glibc = _get_glibc_version() - if sys_glibc < glibc_version: - return False - # Check for presence of _manylinux module. - try: - import _manylinux # noqa - except ImportError: - pass - else: - if hasattr(_manylinux, "manylinux_compatible"): - result = _manylinux.manylinux_compatible( - glibc_version[0], glibc_version[1], arch - ) - if result is not None: - return bool(result) - else: - if glibc_version == (2, 5): - if hasattr(_manylinux, "manylinux1_compatible"): - return bool(_manylinux.manylinux1_compatible) - if glibc_version == (2, 12): - if hasattr(_manylinux, "manylinux2010_compatible"): - return bool(_manylinux.manylinux2010_compatible) - if glibc_version == (2, 17): - if hasattr(_manylinux, "manylinux2014_compatible"): - return bool(_manylinux.manylinux2014_compatible) - return True - - -def _glibc_version_string(): - # type: () -> Optional[str] - # Returns glibc version string, or None if not using glibc. - return _glibc_version_string_confstr() or _glibc_version_string_ctypes() - - -def _glibc_version_string_confstr(): - # type: () -> Optional[str] - """ - Primary implementation of glibc_version_string using os.confstr. - """ - # os.confstr is quite a bit faster than ctypes.DLL. It's also less likely - # to be broken or missing. This strategy is used in the standard library - # platform module. - # https://github.com/python/cpython/blob/fcf1d003bf4f0100c9d0921ff3d70e1127ca1b71/Lib/platform.py#L175-L183 - try: - # os.confstr("CS_GNU_LIBC_VERSION") returns a string like "glibc 2.17". - version_string = os.confstr( # type: ignore[attr-defined] # noqa: F821 - "CS_GNU_LIBC_VERSION" - ) - assert version_string is not None - _, version = version_string.split() # type: Tuple[str, str] - except (AssertionError, AttributeError, OSError, ValueError): - # os.confstr() or CS_GNU_LIBC_VERSION not available (or a bad value)... - return None - return version - - -def _glibc_version_string_ctypes(): - # type: () -> Optional[str] - """ - Fallback implementation of glibc_version_string using ctypes. - """ - try: - import ctypes - except ImportError: - return None - - # ctypes.CDLL(None) internally calls dlopen(NULL), and as the dlopen - # manpage says, "If filename is NULL, then the returned handle is for the - # main program". This way we can let the linker do the work to figure out - # which libc our process is actually using. - # - # We must also handle the special case where the executable is not a - # dynamically linked executable. This can occur when using musl libc, - # for example. In this situation, dlopen() will error, leading to an - # OSError. Interestingly, at least in the case of musl, there is no - # errno set on the OSError. The single string argument used to construct - # OSError comes from libc itself and is therefore not portable to - # hard code here. In any case, failure to call dlopen() means we - # can proceed, so we bail on our attempt. - try: - # Note: typeshed is wrong here so we are ignoring this line. - process_namespace = ctypes.CDLL(None) # type: ignore - except OSError: - return None - - try: - gnu_get_libc_version = process_namespace.gnu_get_libc_version - except AttributeError: - # Symbol doesn't exist -> therefore, we are not linked to - # glibc. - return None - - # Call gnu_get_libc_version, which returns a string like "2.5" - gnu_get_libc_version.restype = ctypes.c_char_p - version_str = gnu_get_libc_version() # type: str - # py2 / py3 compatibility: - if not isinstance(version_str, str): - version_str = version_str.decode("ascii") - - return version_str - - -def _parse_glibc_version(version_str): - # type: (str) -> Tuple[int, int] - # Parse glibc version. - # - # We use a regexp instead of str.split because we want to discard any - # random junk that might come after the minor version -- this might happen - # in patched/forked versions of glibc (e.g. Linaro's version of glibc - # uses version strings like "2.20-2014.11"). See gh-3588. - m = re.match(r"(?P[0-9]+)\.(?P[0-9]+)", version_str) - if not m: - warnings.warn( - "Expected glibc version with 2 components major.minor," - " got: %s" % version_str, - RuntimeWarning, - ) - return -1, -1 - return (int(m.group("major")), int(m.group("minor"))) - - -_glibc_version = [] # type: List[Tuple[int, int]] - - -def _get_glibc_version(): - # type: () -> Tuple[int, int] - if _glibc_version: - return _glibc_version[0] - version_str = _glibc_version_string() - if version_str is None: - _glibc_version.append((-1, -1)) - else: - _glibc_version.append(_parse_glibc_version(version_str)) - return _glibc_version[0] - - -# Python does not provide platform information at sufficient granularity to -# identify the architecture of the running executable in some cases, so we -# determine it dynamically by reading the information from the running -# process. This only applies on Linux, which uses the ELF format. -class _ELFFileHeader(object): - # https://en.wikipedia.org/wiki/Executable_and_Linkable_Format#File_header - class _InvalidELFFileHeader(ValueError): - """ - An invalid ELF file header was found. - """ - - ELF_MAGIC_NUMBER = 0x7F454C46 - ELFCLASS32 = 1 - ELFCLASS64 = 2 - ELFDATA2LSB = 1 - ELFDATA2MSB = 2 - EM_386 = 3 - EM_S390 = 22 - EM_ARM = 40 - EM_X86_64 = 62 - EF_ARM_ABIMASK = 0xFF000000 - EF_ARM_ABI_VER5 = 0x05000000 - EF_ARM_ABI_FLOAT_HARD = 0x00000400 - - def __init__(self, file): - # type: (IO[bytes]) -> None - def unpack(fmt): - # type: (str) -> int - try: - (result,) = struct.unpack( - fmt, file.read(struct.calcsize(fmt)) - ) # type: (int, ) - except struct.error: - raise _ELFFileHeader._InvalidELFFileHeader() - return result - - self.e_ident_magic = unpack(">I") - if self.e_ident_magic != self.ELF_MAGIC_NUMBER: - raise _ELFFileHeader._InvalidELFFileHeader() - self.e_ident_class = unpack("B") - if self.e_ident_class not in {self.ELFCLASS32, self.ELFCLASS64}: - raise _ELFFileHeader._InvalidELFFileHeader() - self.e_ident_data = unpack("B") - if self.e_ident_data not in {self.ELFDATA2LSB, self.ELFDATA2MSB}: - raise _ELFFileHeader._InvalidELFFileHeader() - self.e_ident_version = unpack("B") - self.e_ident_osabi = unpack("B") - self.e_ident_abiversion = unpack("B") - self.e_ident_pad = file.read(7) - format_h = "H" - format_i = "I" - format_q = "Q" - format_p = format_i if self.e_ident_class == self.ELFCLASS32 else format_q - self.e_type = unpack(format_h) - self.e_machine = unpack(format_h) - self.e_version = unpack(format_i) - self.e_entry = unpack(format_p) - self.e_phoff = unpack(format_p) - self.e_shoff = unpack(format_p) - self.e_flags = unpack(format_i) - self.e_ehsize = unpack(format_h) - self.e_phentsize = unpack(format_h) - self.e_phnum = unpack(format_h) - self.e_shentsize = unpack(format_h) - self.e_shnum = unpack(format_h) - self.e_shstrndx = unpack(format_h) - - -def _get_elf_header(): - # type: () -> Optional[_ELFFileHeader] - try: - with open(sys.executable, "rb") as f: - elf_header = _ELFFileHeader(f) - except (IOError, OSError, TypeError, _ELFFileHeader._InvalidELFFileHeader): - return None - return elf_header - - -def _is_linux_armhf(): - # type: () -> bool - # hard-float ABI can be detected from the ELF header of the running - # process - # https://static.docs.arm.com/ihi0044/g/aaelf32.pdf - elf_header = _get_elf_header() - if elf_header is None: - return False - result = elf_header.e_ident_class == elf_header.ELFCLASS32 - result &= elf_header.e_ident_data == elf_header.ELFDATA2LSB - result &= elf_header.e_machine == elf_header.EM_ARM - result &= ( - elf_header.e_flags & elf_header.EF_ARM_ABIMASK - ) == elf_header.EF_ARM_ABI_VER5 - result &= ( - elf_header.e_flags & elf_header.EF_ARM_ABI_FLOAT_HARD - ) == elf_header.EF_ARM_ABI_FLOAT_HARD - return result - - -def _is_linux_i686(): - # type: () -> bool - elf_header = _get_elf_header() - if elf_header is None: - return False - result = elf_header.e_ident_class == elf_header.ELFCLASS32 - result &= elf_header.e_ident_data == elf_header.ELFDATA2LSB - result &= elf_header.e_machine == elf_header.EM_386 - return result - - -def _have_compatible_manylinux_abi(arch): - # type: (str) -> bool - if arch == "armv7l": - return _is_linux_armhf() - if arch == "i686": - return _is_linux_i686() - return arch in {"x86_64", "aarch64", "ppc64", "ppc64le", "s390x"} - - -def _manylinux_tags(linux, arch): - # type: (str, str) -> Iterator[str] - # Oldest glibc to be supported regardless of architecture is (2, 17). - too_old_glibc2 = glibcVersion(2, 16) - if arch in {"x86_64", "i686"}: - # On x86/i686 also oldest glibc to be supported is (2, 5). - too_old_glibc2 = glibcVersion(2, 4) - current_glibc = glibcVersion(*_get_glibc_version()) - glibc_max_list = [current_glibc] - # We can assume compatibility across glibc major versions. - # https://sourceware.org/bugzilla/show_bug.cgi?id=24636 - # - # Build a list of maximum glibc versions so that we can - # output the canonical list of all glibc from current_glibc - # down to too_old_glibc2, including all intermediary versions. - for glibc_major in range(current_glibc.major - 1, 1, -1): - glibc_max_list.append(glibcVersion(glibc_major, _LAST_GLIBC_MINOR[glibc_major])) - for glibc_max in glibc_max_list: - if glibc_max.major == too_old_glibc2.major: - min_minor = too_old_glibc2.minor - else: - # For other glibc major versions oldest supported is (x, 0). - min_minor = -1 - for glibc_minor in range(glibc_max.minor, min_minor, -1): - glibc_version = (glibc_max.major, glibc_minor) - tag = "manylinux_{}_{}".format(*glibc_version) - if _is_manylinux_compatible(tag, arch, glibc_version): - yield linux.replace("linux", tag) - # Handle the legacy manylinux1, manylinux2010, manylinux2014 tags. - if glibc_version in _LEGACY_MANYLINUX_MAP: - legacy_tag = _LEGACY_MANYLINUX_MAP[glibc_version] - if _is_manylinux_compatible(legacy_tag, arch, glibc_version): - yield linux.replace("linux", legacy_tag) - - -def _linux_platforms(is_32bit=_32_BIT_INTERPRETER): - # type: (bool) -> Iterator[str] - linux = _normalize_string(distutils.util.get_platform()) +def _linux_platforms(is_32bit: bool = _32_BIT_INTERPRETER) -> Iterator[str]: + linux = _normalize_string(sysconfig.get_platform()) + if not linux.startswith("linux_"): + # we should never be here, just yield the sysconfig one and return + yield linux + return if is_32bit: if linux == "linux_x86_64": linux = "linux_i686" elif linux == "linux_aarch64": - linux = "linux_armv7l" + linux = "linux_armv8l" _, arch = linux.split("_", 1) - if _have_compatible_manylinux_abi(arch): - for tag in _manylinux_tags(linux, arch): - yield tag - yield linux + archs = {"armv8l": ["armv8l", "armv7l"]}.get(arch, [arch]) + yield from _manylinux.platform_tags(archs) + yield from _musllinux.platform_tags(archs) + for arch in archs: + yield f"linux_{arch}" -def _generic_platforms(): - # type: () -> Iterator[str] - yield _normalize_string(distutils.util.get_platform()) +def _generic_platforms() -> Iterator[str]: + yield _normalize_string(sysconfig.get_platform()) -def _platform_tags(): - # type: () -> Iterator[str] +def platform_tags() -> Iterator[str]: """ Provides the platform tags for this installation. """ @@ -812,25 +503,21 @@ def _platform_tags(): return _generic_platforms() -def interpreter_name(): - # type: () -> str +def interpreter_name() -> str: """ Returns the name of the running interpreter. + + Some implementations have a reserved, two-letter abbreviation which will + be returned when appropriate. """ - try: - name = sys.implementation.name # type: ignore - except AttributeError: # pragma: no cover - # Python 2.7 compatibility. - name = platform.python_implementation().lower() + name = sys.implementation.name return INTERPRETER_SHORT_NAMES.get(name) or name -def interpreter_version(**kwargs): - # type: (bool) -> str +def interpreter_version(*, warn: bool = False) -> str: """ Returns the version of the running interpreter. """ - warn = _warn_keyword_parameter("interpreter_version", kwargs) version = _get_config_var("py_version_nodot", warn=warn) if version: version = str(version) @@ -839,28 +526,28 @@ def interpreter_version(**kwargs): return version -def _version_nodot(version): - # type: (PythonVersion) -> str +def _version_nodot(version: PythonVersion) -> str: return "".join(map(str, version)) -def sys_tags(**kwargs): - # type: (bool) -> Iterator[Tag] +def sys_tags(*, warn: bool = False) -> Iterator[Tag]: """ Returns the sequence of tag triples for the running interpreter. The order of the sequence corresponds to priority order for the interpreter, from most to least important. """ - warn = _warn_keyword_parameter("sys_tags", kwargs) interp_name = interpreter_name() if interp_name == "cp": - for tag in cpython_tags(warn=warn): - yield tag + yield from cpython_tags(warn=warn) else: - for tag in generic_tags(): - yield tag + yield from generic_tags() - for tag in compatible_tags(): - yield tag + if interp_name == "pp": + interp = "pp3" + elif interp_name == "cp": + interp = "cp" + interpreter_version(warn=warn) + else: + interp = None + yield from compatible_tags(interpreter=interp) diff --git a/conda_lock/_vendor/poetry/core/_vendor/packaging/utils.py b/conda_lock/_vendor/poetry/core/_vendor/packaging/utils.py index 6e8c2a3e..c2c2f75a 100644 --- a/conda_lock/_vendor/poetry/core/_vendor/packaging/utils.py +++ b/conda_lock/_vendor/poetry/core/_vendor/packaging/utils.py @@ -1,22 +1,21 @@ # This file is dual licensed under the terms of the Apache License, Version # 2.0, and the BSD License. See the LICENSE file in the root of this repository # for complete details. -from __future__ import absolute_import, division, print_function import re +from typing import FrozenSet, NewType, Tuple, Union, cast -from ._typing import TYPE_CHECKING, cast from .tags import Tag, parse_tag from .version import InvalidVersion, Version -if TYPE_CHECKING: # pragma: no cover - from typing import FrozenSet, NewType, Tuple, Union +BuildTag = Union[Tuple[()], Tuple[int, str]] +NormalizedName = NewType("NormalizedName", str) - BuildTag = Union[Tuple[()], Tuple[int, str]] - NormalizedName = NewType("NormalizedName", str) -else: - BuildTag = tuple - NormalizedName = str + +class InvalidName(ValueError): + """ + An invalid distribution name; users should refer to the packaging user guide. + """ class InvalidWheelFilename(ValueError): @@ -31,87 +30,111 @@ class InvalidSdistFilename(ValueError): """ +# Core metadata spec for `Name` +_validate_regex = re.compile( + r"^([A-Z0-9]|[A-Z0-9][A-Z0-9._-]*[A-Z0-9])$", re.IGNORECASE +) _canonicalize_regex = re.compile(r"[-_.]+") +_normalized_regex = re.compile(r"^([a-z0-9]|[a-z0-9]([a-z0-9-](?!--))*[a-z0-9])$") # PEP 427: The build number must start with a digit. _build_tag_regex = re.compile(r"(\d+)(.*)") -def canonicalize_name(name): - # type: (str) -> NormalizedName +def canonicalize_name(name: str, *, validate: bool = False) -> NormalizedName: + if validate and not _validate_regex.match(name): + raise InvalidName(f"name is invalid: {name!r}") # This is taken from PEP 503. value = _canonicalize_regex.sub("-", name).lower() return cast(NormalizedName, value) -def canonicalize_version(version): - # type: (Union[Version, str]) -> Union[Version, str] +def is_normalized_name(name: str) -> bool: + return _normalized_regex.match(name) is not None + + +def canonicalize_version( + version: Union[Version, str], *, strip_trailing_zero: bool = True +) -> str: """ This is very similar to Version.__str__, but has one subtle difference with the way it handles the release segment. """ - if not isinstance(version, Version): + if isinstance(version, str): try: - version = Version(version) + parsed = Version(version) except InvalidVersion: # Legacy versions cannot be normalized return version + else: + parsed = version parts = [] # Epoch - if version.epoch != 0: - parts.append("{0}!".format(version.epoch)) + if parsed.epoch != 0: + parts.append(f"{parsed.epoch}!") # Release segment - # NB: This strips trailing '.0's to normalize - parts.append(re.sub(r"(\.0)+$", "", ".".join(str(x) for x in version.release))) + release_segment = ".".join(str(x) for x in parsed.release) + if strip_trailing_zero: + # NB: This strips trailing '.0's to normalize + release_segment = re.sub(r"(\.0)+$", "", release_segment) + parts.append(release_segment) # Pre-release - if version.pre is not None: - parts.append("".join(str(x) for x in version.pre)) + if parsed.pre is not None: + parts.append("".join(str(x) for x in parsed.pre)) # Post-release - if version.post is not None: - parts.append(".post{0}".format(version.post)) + if parsed.post is not None: + parts.append(f".post{parsed.post}") # Development release - if version.dev is not None: - parts.append(".dev{0}".format(version.dev)) + if parsed.dev is not None: + parts.append(f".dev{parsed.dev}") # Local version segment - if version.local is not None: - parts.append("+{0}".format(version.local)) + if parsed.local is not None: + parts.append(f"+{parsed.local}") return "".join(parts) -def parse_wheel_filename(filename): - # type: (str) -> Tuple[NormalizedName, Version, BuildTag, FrozenSet[Tag]] +def parse_wheel_filename( + filename: str, +) -> Tuple[NormalizedName, Version, BuildTag, FrozenSet[Tag]]: if not filename.endswith(".whl"): raise InvalidWheelFilename( - "Invalid wheel filename (extension must be '.whl'): {0}".format(filename) + f"Invalid wheel filename (extension must be '.whl'): {filename}" ) filename = filename[:-4] dashes = filename.count("-") if dashes not in (4, 5): raise InvalidWheelFilename( - "Invalid wheel filename (wrong number of parts): {0}".format(filename) + f"Invalid wheel filename (wrong number of parts): {filename}" ) parts = filename.split("-", dashes - 2) name_part = parts[0] - # See PEP 427 for the rules on escaping the project name + # See PEP 427 for the rules on escaping the project name. if "__" in name_part or re.match(r"^[\w\d._]*$", name_part, re.UNICODE) is None: - raise InvalidWheelFilename("Invalid project name: {0}".format(filename)) + raise InvalidWheelFilename(f"Invalid project name: {filename}") name = canonicalize_name(name_part) - version = Version(parts[1]) + + try: + version = Version(parts[1]) + except InvalidVersion as e: + raise InvalidWheelFilename( + f"Invalid wheel filename (invalid version): {filename}" + ) from e + if dashes == 5: build_part = parts[2] build_match = _build_tag_regex.match(build_part) if build_match is None: raise InvalidWheelFilename( - "Invalid build number: {0} in '{1}'".format(build_part, filename) + f"Invalid build number: {build_part} in '{filename}'" ) build = cast(BuildTag, (int(build_match.group(1)), build_match.group(2))) else: @@ -120,19 +143,30 @@ def parse_wheel_filename(filename): return (name, version, build, tags) -def parse_sdist_filename(filename): - # type: (str) -> Tuple[NormalizedName, Version] - if not filename.endswith(".tar.gz"): +def parse_sdist_filename(filename: str) -> Tuple[NormalizedName, Version]: + if filename.endswith(".tar.gz"): + file_stem = filename[: -len(".tar.gz")] + elif filename.endswith(".zip"): + file_stem = filename[: -len(".zip")] + else: raise InvalidSdistFilename( - "Invalid sdist filename (extension must be '.tar.gz'): {0}".format(filename) + f"Invalid sdist filename (extension must be '.tar.gz' or '.zip'):" + f" {filename}" ) # We are requiring a PEP 440 version, which cannot contain dashes, # so we split on the last dash. - name_part, sep, version_part = filename[:-7].rpartition("-") + name_part, sep, version_part = file_stem.rpartition("-") if not sep: - raise InvalidSdistFilename("Invalid sdist filename: {0}".format(filename)) + raise InvalidSdistFilename(f"Invalid sdist filename: {filename}") name = canonicalize_name(name_part) - version = Version(version_part) + + try: + version = Version(version_part) + except InvalidVersion as e: + raise InvalidSdistFilename( + f"Invalid sdist filename (invalid version): {filename}" + ) from e + return (name, version) diff --git a/conda_lock/_vendor/poetry/core/_vendor/packaging/version.py b/conda_lock/_vendor/poetry/core/_vendor/packaging/version.py index 517d91f2..5faab9bd 100644 --- a/conda_lock/_vendor/poetry/core/_vendor/packaging/version.py +++ b/conda_lock/_vendor/poetry/core/_vendor/packaging/version.py @@ -1,266 +1,125 @@ # This file is dual licensed under the terms of the Apache License, Version # 2.0, and the BSD License. See the LICENSE file in the root of this repository # for complete details. -from __future__ import absolute_import, division, print_function +""" +.. testsetup:: + + from packaging.version import parse, Version +""" -import collections import itertools import re -import warnings - -from ._structures import Infinity, NegativeInfinity -from ._typing import TYPE_CHECKING - -if TYPE_CHECKING: # pragma: no cover - from typing import Callable, Iterator, List, Optional, SupportsInt, Tuple, Union - - from ._structures import InfinityType, NegativeInfinityType - - InfiniteTypes = Union[InfinityType, NegativeInfinityType] - PrePostDevType = Union[InfiniteTypes, Tuple[str, int]] - SubLocalType = Union[InfiniteTypes, int, str] - LocalType = Union[ - NegativeInfinityType, - Tuple[ - Union[ - SubLocalType, - Tuple[SubLocalType, str], - Tuple[NegativeInfinityType, SubLocalType], - ], - ..., - ], - ] - CmpKey = Tuple[ - int, Tuple[int, ...], PrePostDevType, PrePostDevType, PrePostDevType, LocalType - ] - LegacyCmpKey = Tuple[int, Tuple[str, ...]] - VersionComparisonMethod = Callable[ - [Union[CmpKey, LegacyCmpKey], Union[CmpKey, LegacyCmpKey]], bool - ] - -__all__ = ["parse", "Version", "LegacyVersion", "InvalidVersion", "VERSION_PATTERN"] - - -_Version = collections.namedtuple( - "_Version", ["epoch", "release", "dev", "pre", "post", "local"] -) - - -def parse(version): - # type: (str) -> Union[LegacyVersion, Version] - """ - Parse the given version string and return either a :class:`Version` object - or a :class:`LegacyVersion` object depending on if the given version is - a valid PEP 440 version or a legacy version. +from typing import Any, Callable, NamedTuple, Optional, SupportsInt, Tuple, Union + +from ._structures import Infinity, InfinityType, NegativeInfinity, NegativeInfinityType + +__all__ = ["VERSION_PATTERN", "parse", "Version", "InvalidVersion"] + +LocalType = Tuple[Union[int, str], ...] + +CmpPrePostDevType = Union[InfinityType, NegativeInfinityType, Tuple[str, int]] +CmpLocalType = Union[ + NegativeInfinityType, + Tuple[Union[Tuple[int, str], Tuple[NegativeInfinityType, Union[int, str]]], ...], +] +CmpKey = Tuple[ + int, + Tuple[int, ...], + CmpPrePostDevType, + CmpPrePostDevType, + CmpPrePostDevType, + CmpLocalType, +] +VersionComparisonMethod = Callable[[CmpKey, CmpKey], bool] + + +class _Version(NamedTuple): + epoch: int + release: Tuple[int, ...] + dev: Optional[Tuple[str, int]] + pre: Optional[Tuple[str, int]] + post: Optional[Tuple[str, int]] + local: Optional[LocalType] + + +def parse(version: str) -> "Version": + """Parse the given version string. + + >>> parse('1.0.dev1') + + + :param version: The version string to parse. + :raises InvalidVersion: When the version string is not a valid version. """ - try: - return Version(version) - except InvalidVersion: - return LegacyVersion(version) + return Version(version) class InvalidVersion(ValueError): - """ - An invalid version was found, users should refer to PEP 440. + """Raised when a version string is not a valid version. + + >>> Version("invalid") + Traceback (most recent call last): + ... + packaging.version.InvalidVersion: Invalid version: 'invalid' """ -class _BaseVersion(object): - _key = None # type: Union[CmpKey, LegacyCmpKey] +class _BaseVersion: + _key: Tuple[Any, ...] - def __hash__(self): - # type: () -> int + def __hash__(self) -> int: return hash(self._key) # Please keep the duplicated `isinstance` check # in the six comparisons hereunder # unless you find a way to avoid adding overhead function calls. - def __lt__(self, other): - # type: (_BaseVersion) -> bool + def __lt__(self, other: "_BaseVersion") -> bool: if not isinstance(other, _BaseVersion): return NotImplemented return self._key < other._key - def __le__(self, other): - # type: (_BaseVersion) -> bool + def __le__(self, other: "_BaseVersion") -> bool: if not isinstance(other, _BaseVersion): return NotImplemented return self._key <= other._key - def __eq__(self, other): - # type: (object) -> bool + def __eq__(self, other: object) -> bool: if not isinstance(other, _BaseVersion): return NotImplemented return self._key == other._key - def __ge__(self, other): - # type: (_BaseVersion) -> bool + def __ge__(self, other: "_BaseVersion") -> bool: if not isinstance(other, _BaseVersion): return NotImplemented return self._key >= other._key - def __gt__(self, other): - # type: (_BaseVersion) -> bool + def __gt__(self, other: "_BaseVersion") -> bool: if not isinstance(other, _BaseVersion): return NotImplemented return self._key > other._key - def __ne__(self, other): - # type: (object) -> bool + def __ne__(self, other: object) -> bool: if not isinstance(other, _BaseVersion): return NotImplemented return self._key != other._key -class LegacyVersion(_BaseVersion): - def __init__(self, version): - # type: (str) -> None - self._version = str(version) - self._key = _legacy_cmpkey(self._version) - - warnings.warn( - "Creating a LegacyVersion has been deprecated and will be " - "removed in the next major release", - DeprecationWarning, - ) - - def __str__(self): - # type: () -> str - return self._version - - def __repr__(self): - # type: () -> str - return "".format(repr(str(self))) - - @property - def public(self): - # type: () -> str - return self._version - - @property - def base_version(self): - # type: () -> str - return self._version - - @property - def epoch(self): - # type: () -> int - return -1 - - @property - def release(self): - # type: () -> None - return None - - @property - def pre(self): - # type: () -> None - return None - - @property - def post(self): - # type: () -> None - return None - - @property - def dev(self): - # type: () -> None - return None - - @property - def local(self): - # type: () -> None - return None - - @property - def is_prerelease(self): - # type: () -> bool - return False - - @property - def is_postrelease(self): - # type: () -> bool - return False - - @property - def is_devrelease(self): - # type: () -> bool - return False - - -_legacy_version_component_re = re.compile(r"(\d+ | [a-z]+ | \.| -)", re.VERBOSE) - -_legacy_version_replacement_map = { - "pre": "c", - "preview": "c", - "-": "final-", - "rc": "c", - "dev": "@", -} - - -def _parse_version_parts(s): - # type: (str) -> Iterator[str] - for part in _legacy_version_component_re.split(s): - part = _legacy_version_replacement_map.get(part, part) - - if not part or part == ".": - continue - - if part[:1] in "0123456789": - # pad for numeric comparison - yield part.zfill(8) - else: - yield "*" + part - - # ensure that alpha/beta/candidate are before final - yield "*final" - - -def _legacy_cmpkey(version): - # type: (str) -> LegacyCmpKey - - # We hardcode an epoch of -1 here. A PEP 440 version can only have a epoch - # greater than or equal to 0. This will effectively put the LegacyVersion, - # which uses the defacto standard originally implemented by setuptools, - # as before all PEP 440 versions. - epoch = -1 - - # This scheme is taken from pkg_resources.parse_version setuptools prior to - # it's adoption of the packaging library. - parts = [] # type: List[str] - for part in _parse_version_parts(version.lower()): - if part.startswith("*"): - # remove "-" before a prerelease tag - if part < "*final": - while parts and parts[-1] == "*final-": - parts.pop() - - # remove trailing zeros from each series of numeric parts - while parts and parts[-1] == "00000000": - parts.pop() - - parts.append(part) - - return epoch, tuple(parts) - - # Deliberately not anchored to the start and end of the string, to make it # easier for 3rd party code to reuse -VERSION_PATTERN = r""" +_VERSION_PATTERN = r""" v? (?: (?:(?P[0-9]+)!)? # epoch (?P[0-9]+(?:\.[0-9]+)*) # release segment (?P
                                          # pre-release
             [-_\.]?
-            (?P(a|b|c|rc|alpha|beta|pre|preview))
+            (?Palpha|a|beta|b|preview|pre|c|rc)
             [-_\.]?
             (?P[0-9]+)?
         )?
@@ -284,18 +143,61 @@ def _legacy_cmpkey(version):
     (?:\+(?P[a-z0-9]+(?:[-_\.][a-z0-9]+)*))?       # local version
 """
 
+VERSION_PATTERN = _VERSION_PATTERN
+"""
+A string containing the regular expression used to match a valid version.
+
+The pattern is not anchored at either end, and is intended for embedding in larger
+expressions (for example, matching a version number as part of a file name). The
+regular expression should be compiled with the ``re.VERBOSE`` and ``re.IGNORECASE``
+flags set.
+
+:meta hide-value:
+"""
+
 
 class Version(_BaseVersion):
+    """This class abstracts handling of a project's versions.
+
+    A :class:`Version` instance is comparison aware and can be compared and
+    sorted using the standard Python interfaces.
+
+    >>> v1 = Version("1.0a5")
+    >>> v2 = Version("1.0")
+    >>> v1
+    
+    >>> v2
+    
+    >>> v1 < v2
+    True
+    >>> v1 == v2
+    False
+    >>> v1 > v2
+    False
+    >>> v1 >= v2
+    False
+    >>> v1 <= v2
+    True
+    """
 
     _regex = re.compile(r"^\s*" + VERSION_PATTERN + r"\s*$", re.VERBOSE | re.IGNORECASE)
+    _key: CmpKey
+
+    def __init__(self, version: str) -> None:
+        """Initialize a Version object.
 
-    def __init__(self, version):
-        # type: (str) -> None
+        :param version:
+            The string representation of a version which will be parsed and normalized
+            before use.
+        :raises InvalidVersion:
+            If the ``version`` does not conform to PEP 440 in any way then this
+            exception will be raised.
+        """
 
         # Validate the version and parse it into pieces
         match = self._regex.search(version)
         if not match:
-            raise InvalidVersion("Invalid version: '{0}'".format(version))
+            raise InvalidVersion(f"Invalid version: '{version}'")
 
         # Store the parsed out pieces of the version
         self._version = _Version(
@@ -319,17 +221,25 @@ def __init__(self, version):
             self._version.local,
         )
 
-    def __repr__(self):
-        # type: () -> str
-        return "".format(repr(str(self)))
+    def __repr__(self) -> str:
+        """A representation of the Version that shows all internal state.
+
+        >>> Version('1.0.0')
+        
+        """
+        return f""
+
+    def __str__(self) -> str:
+        """A string representation of the version that can be rounded-tripped.
 
-    def __str__(self):
-        # type: () -> str
+        >>> str(Version("1.0a5"))
+        '1.0a5'
+        """
         parts = []
 
         # Epoch
         if self.epoch != 0:
-            parts.append("{0}!".format(self.epoch))
+            parts.append(f"{self.epoch}!")
 
         # Release segment
         parts.append(".".join(str(x) for x in self.release))
@@ -340,67 +250,128 @@ def __str__(self):
 
         # Post-release
         if self.post is not None:
-            parts.append(".post{0}".format(self.post))
+            parts.append(f".post{self.post}")
 
         # Development release
         if self.dev is not None:
-            parts.append(".dev{0}".format(self.dev))
+            parts.append(f".dev{self.dev}")
 
         # Local version segment
         if self.local is not None:
-            parts.append("+{0}".format(self.local))
+            parts.append(f"+{self.local}")
 
         return "".join(parts)
 
     @property
-    def epoch(self):
-        # type: () -> int
-        _epoch = self._version.epoch  # type: int
-        return _epoch
+    def epoch(self) -> int:
+        """The epoch of the version.
+
+        >>> Version("2.0.0").epoch
+        0
+        >>> Version("1!2.0.0").epoch
+        1
+        """
+        return self._version.epoch
 
     @property
-    def release(self):
-        # type: () -> Tuple[int, ...]
-        _release = self._version.release  # type: Tuple[int, ...]
-        return _release
+    def release(self) -> Tuple[int, ...]:
+        """The components of the "release" segment of the version.
+
+        >>> Version("1.2.3").release
+        (1, 2, 3)
+        >>> Version("2.0.0").release
+        (2, 0, 0)
+        >>> Version("1!2.0.0.post0").release
+        (2, 0, 0)
+
+        Includes trailing zeroes but not the epoch or any pre-release / development /
+        post-release suffixes.
+        """
+        return self._version.release
 
     @property
-    def pre(self):
-        # type: () -> Optional[Tuple[str, int]]
-        _pre = self._version.pre  # type: Optional[Tuple[str, int]]
-        return _pre
+    def pre(self) -> Optional[Tuple[str, int]]:
+        """The pre-release segment of the version.
+
+        >>> print(Version("1.2.3").pre)
+        None
+        >>> Version("1.2.3a1").pre
+        ('a', 1)
+        >>> Version("1.2.3b1").pre
+        ('b', 1)
+        >>> Version("1.2.3rc1").pre
+        ('rc', 1)
+        """
+        return self._version.pre
 
     @property
-    def post(self):
-        # type: () -> Optional[Tuple[str, int]]
+    def post(self) -> Optional[int]:
+        """The post-release number of the version.
+
+        >>> print(Version("1.2.3").post)
+        None
+        >>> Version("1.2.3.post1").post
+        1
+        """
         return self._version.post[1] if self._version.post else None
 
     @property
-    def dev(self):
-        # type: () -> Optional[Tuple[str, int]]
+    def dev(self) -> Optional[int]:
+        """The development number of the version.
+
+        >>> print(Version("1.2.3").dev)
+        None
+        >>> Version("1.2.3.dev1").dev
+        1
+        """
         return self._version.dev[1] if self._version.dev else None
 
     @property
-    def local(self):
-        # type: () -> Optional[str]
+    def local(self) -> Optional[str]:
+        """The local version segment of the version.
+
+        >>> print(Version("1.2.3").local)
+        None
+        >>> Version("1.2.3+abc").local
+        'abc'
+        """
         if self._version.local:
             return ".".join(str(x) for x in self._version.local)
         else:
             return None
 
     @property
-    def public(self):
-        # type: () -> str
+    def public(self) -> str:
+        """The public portion of the version.
+
+        >>> Version("1.2.3").public
+        '1.2.3'
+        >>> Version("1.2.3+abc").public
+        '1.2.3'
+        >>> Version("1.2.3+abc.dev1").public
+        '1.2.3'
+        """
         return str(self).split("+", 1)[0]
 
     @property
-    def base_version(self):
-        # type: () -> str
+    def base_version(self) -> str:
+        """The "base version" of the version.
+
+        >>> Version("1.2.3").base_version
+        '1.2.3'
+        >>> Version("1.2.3+abc").base_version
+        '1.2.3'
+        >>> Version("1!1.2.3+abc.dev1").base_version
+        '1!1.2.3'
+
+        The "base version" is the public version of the project without any pre or post
+        release markers.
+        """
         parts = []
 
         # Epoch
         if self.epoch != 0:
-            parts.append("{0}!".format(self.epoch))
+            parts.append(f"{self.epoch}!")
 
         # Release segment
         parts.append(".".join(str(x) for x in self.release))
@@ -408,41 +379,79 @@ def base_version(self):
         return "".join(parts)
 
     @property
-    def is_prerelease(self):
-        # type: () -> bool
+    def is_prerelease(self) -> bool:
+        """Whether this version is a pre-release.
+
+        >>> Version("1.2.3").is_prerelease
+        False
+        >>> Version("1.2.3a1").is_prerelease
+        True
+        >>> Version("1.2.3b1").is_prerelease
+        True
+        >>> Version("1.2.3rc1").is_prerelease
+        True
+        >>> Version("1.2.3dev1").is_prerelease
+        True
+        """
         return self.dev is not None or self.pre is not None
 
     @property
-    def is_postrelease(self):
-        # type: () -> bool
+    def is_postrelease(self) -> bool:
+        """Whether this version is a post-release.
+
+        >>> Version("1.2.3").is_postrelease
+        False
+        >>> Version("1.2.3.post1").is_postrelease
+        True
+        """
         return self.post is not None
 
     @property
-    def is_devrelease(self):
-        # type: () -> bool
+    def is_devrelease(self) -> bool:
+        """Whether this version is a development release.
+
+        >>> Version("1.2.3").is_devrelease
+        False
+        >>> Version("1.2.3.dev1").is_devrelease
+        True
+        """
         return self.dev is not None
 
     @property
-    def major(self):
-        # type: () -> int
+    def major(self) -> int:
+        """The first item of :attr:`release` or ``0`` if unavailable.
+
+        >>> Version("1.2.3").major
+        1
+        """
         return self.release[0] if len(self.release) >= 1 else 0
 
     @property
-    def minor(self):
-        # type: () -> int
+    def minor(self) -> int:
+        """The second item of :attr:`release` or ``0`` if unavailable.
+
+        >>> Version("1.2.3").minor
+        2
+        >>> Version("1").minor
+        0
+        """
         return self.release[1] if len(self.release) >= 2 else 0
 
     @property
-    def micro(self):
-        # type: () -> int
+    def micro(self) -> int:
+        """The third item of :attr:`release` or ``0`` if unavailable.
+
+        >>> Version("1.2.3").micro
+        3
+        >>> Version("1").micro
+        0
+        """
         return self.release[2] if len(self.release) >= 3 else 0
 
 
 def _parse_letter_version(
-    letter,  # type: str
-    number,  # type: Union[str, bytes, SupportsInt]
-):
-    # type: (...) -> Optional[Tuple[str, int]]
+    letter: Optional[str], number: Union[str, bytes, SupportsInt, None]
+) -> Optional[Tuple[str, int]]:
 
     if letter:
         # We consider there to be an implicit 0 in a pre-release if there is
@@ -479,8 +488,7 @@ def _parse_letter_version(
 _local_version_separators = re.compile(r"[\._-]")
 
 
-def _parse_local_version(local):
-    # type: (str) -> Optional[LocalType]
+def _parse_local_version(local: Optional[str]) -> Optional[LocalType]:
     """
     Takes a string like abc.1.twelve and turns it into ("abc", 1, "twelve").
     """
@@ -493,14 +501,13 @@ def _parse_local_version(local):
 
 
 def _cmpkey(
-    epoch,  # type: int
-    release,  # type: Tuple[int, ...]
-    pre,  # type: Optional[Tuple[str, int]]
-    post,  # type: Optional[Tuple[str, int]]
-    dev,  # type: Optional[Tuple[str, int]]
-    local,  # type: Optional[Tuple[SubLocalType]]
-):
-    # type: (...) -> CmpKey
+    epoch: int,
+    release: Tuple[int, ...],
+    pre: Optional[Tuple[str, int]],
+    post: Optional[Tuple[str, int]],
+    dev: Optional[Tuple[str, int]],
+    local: Optional[LocalType],
+) -> CmpKey:
 
     # When we compare a release version, we want to compare it with all of the
     # trailing zeros removed. So we'll use a reverse the list, drop all the now
@@ -516,7 +523,7 @@ def _cmpkey(
     # if there is not a pre or a post segment. If we have one of those then
     # the normal sorting rules will handle this case correctly.
     if pre is None and post is None and dev is not None:
-        _pre = NegativeInfinity  # type: PrePostDevType
+        _pre: CmpPrePostDevType = NegativeInfinity
     # Versions without a pre-release (except as noted above) should sort after
     # those with one.
     elif pre is None:
@@ -526,21 +533,21 @@ def _cmpkey(
 
     # Versions without a post segment should sort before those with one.
     if post is None:
-        _post = NegativeInfinity  # type: PrePostDevType
+        _post: CmpPrePostDevType = NegativeInfinity
 
     else:
         _post = post
 
     # Versions without a development segment should sort after those with one.
     if dev is None:
-        _dev = Infinity  # type: PrePostDevType
+        _dev: CmpPrePostDevType = Infinity
 
     else:
         _dev = dev
 
     if local is None:
         # Versions without a local segment should sort before those with one.
-        _local = NegativeInfinity  # type: LocalType
+        _local: CmpLocalType = NegativeInfinity
     else:
         # Versions with a local segment need that segment parsed to implement
         # the sorting rules in PEP440.
diff --git a/conda_lock/_vendor/poetry/core/_vendor/pyparsing.LICENSE b/conda_lock/_vendor/poetry/core/_vendor/pyparsing.LICENSE
deleted file mode 100644
index 1bf98523..00000000
--- a/conda_lock/_vendor/poetry/core/_vendor/pyparsing.LICENSE
+++ /dev/null
@@ -1,18 +0,0 @@
-Permission is hereby granted, free of charge, to any person obtaining
-a copy of this software and associated documentation files (the
-"Software"), to deal in the Software without restriction, including
-without limitation the rights to use, copy, modify, merge, publish,
-distribute, sublicense, and/or sell copies of the Software, and to
-permit persons to whom the Software is furnished to do so, subject to
-the following conditions:
-
-The above copyright notice and this permission notice shall be
-included in all copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
-EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
-MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
-IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
-CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
-TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
-SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
diff --git a/conda_lock/_vendor/poetry/core/_vendor/pyparsing.py b/conda_lock/_vendor/poetry/core/_vendor/pyparsing.py
deleted file mode 100644
index 581d5bbb..00000000
--- a/conda_lock/_vendor/poetry/core/_vendor/pyparsing.py
+++ /dev/null
@@ -1,7107 +0,0 @@
-# -*- coding: utf-8 -*-
-# module pyparsing.py
-#
-# Copyright (c) 2003-2019  Paul T. McGuire
-#
-# Permission is hereby granted, free of charge, to any person obtaining
-# a copy of this software and associated documentation files (the
-# "Software"), to deal in the Software without restriction, including
-# without limitation the rights to use, copy, modify, merge, publish,
-# distribute, sublicense, and/or sell copies of the Software, and to
-# permit persons to whom the Software is furnished to do so, subject to
-# the following conditions:
-#
-# The above copyright notice and this permission notice shall be
-# included in all copies or substantial portions of the Software.
-#
-# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
-# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
-# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
-# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
-# CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
-# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
-# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
-#
-
-__doc__ = \
-"""
-pyparsing module - Classes and methods to define and execute parsing grammars
-=============================================================================
-
-The pyparsing module is an alternative approach to creating and
-executing simple grammars, vs. the traditional lex/yacc approach, or the
-use of regular expressions.  With pyparsing, you don't need to learn
-a new syntax for defining grammars or matching expressions - the parsing
-module provides a library of classes that you use to construct the
-grammar directly in Python.
-
-Here is a program to parse "Hello, World!" (or any greeting of the form
-``", !"``), built up using :class:`Word`,
-:class:`Literal`, and :class:`And` elements
-(the :class:`'+'` operators create :class:`And` expressions,
-and the strings are auto-converted to :class:`Literal` expressions)::
-
-    from pyparsing import Word, alphas
-
-    # define grammar of a greeting
-    greet = Word(alphas) + "," + Word(alphas) + "!"
-
-    hello = "Hello, World!"
-    print (hello, "->", greet.parseString(hello))
-
-The program outputs the following::
-
-    Hello, World! -> ['Hello', ',', 'World', '!']
-
-The Python representation of the grammar is quite readable, owing to the
-self-explanatory class names, and the use of '+', '|' and '^' operators.
-
-The :class:`ParseResults` object returned from
-:class:`ParserElement.parseString` can be
-accessed as a nested list, a dictionary, or an object with named
-attributes.
-
-The pyparsing module handles some of the problems that are typically
-vexing when writing text parsers:
-
-  - extra or missing whitespace (the above program will also handle
-    "Hello,World!", "Hello  ,  World  !", etc.)
-  - quoted strings
-  - embedded comments
-
-
-Getting Started -
------------------
-Visit the classes :class:`ParserElement` and :class:`ParseResults` to
-see the base classes that most other pyparsing
-classes inherit from. Use the docstrings for examples of how to:
-
- - construct literal match expressions from :class:`Literal` and
-   :class:`CaselessLiteral` classes
- - construct character word-group expressions using the :class:`Word`
-   class
- - see how to create repetitive expressions using :class:`ZeroOrMore`
-   and :class:`OneOrMore` classes
- - use :class:`'+'`, :class:`'|'`, :class:`'^'`,
-   and :class:`'&'` operators to combine simple expressions into
-   more complex ones
- - associate names with your parsed results using
-   :class:`ParserElement.setResultsName`
- - access the parsed data, which is returned as a :class:`ParseResults`
-   object
- - find some helpful expression short-cuts like :class:`delimitedList`
-   and :class:`oneOf`
- - find more useful common expressions in the :class:`pyparsing_common`
-   namespace class
-"""
-
-__version__ = "2.4.7"
-__versionTime__ = "30 Mar 2020 00:43 UTC"
-__author__ = "Paul McGuire "
-
-import string
-from weakref import ref as wkref
-import copy
-import sys
-import warnings
-import re
-import sre_constants
-import collections
-import pprint
-import traceback
-import types
-from datetime import datetime
-from operator import itemgetter
-import itertools
-from functools import wraps
-from contextlib import contextmanager
-
-try:
-    # Python 3
-    from itertools import filterfalse
-except ImportError:
-    from itertools import ifilterfalse as filterfalse
-
-try:
-    from _thread import RLock
-except ImportError:
-    from threading import RLock
-
-try:
-    # Python 3
-    from collections.abc import Iterable
-    from collections.abc import MutableMapping, Mapping
-except ImportError:
-    # Python 2.7
-    from collections import Iterable
-    from collections import MutableMapping, Mapping
-
-try:
-    from collections import OrderedDict as _OrderedDict
-except ImportError:
-    try:
-        from ordereddict import OrderedDict as _OrderedDict
-    except ImportError:
-        _OrderedDict = None
-
-try:
-    from types import SimpleNamespace
-except ImportError:
-    class SimpleNamespace: pass
-
-# version compatibility configuration
-__compat__ = SimpleNamespace()
-__compat__.__doc__ = """
-    A cross-version compatibility configuration for pyparsing features that will be
-    released in a future version. By setting values in this configuration to True,
-    those features can be enabled in prior versions for compatibility development
-    and testing.
-
-     - collect_all_And_tokens - flag to enable fix for Issue #63 that fixes erroneous grouping
-       of results names when an And expression is nested within an Or or MatchFirst; set to
-       True to enable bugfix released in pyparsing 2.3.0, or False to preserve
-       pre-2.3.0 handling of named results
-"""
-__compat__.collect_all_And_tokens = True
-
-__diag__ = SimpleNamespace()
-__diag__.__doc__ = """
-Diagnostic configuration (all default to False)
-     - warn_multiple_tokens_in_named_alternation - flag to enable warnings when a results
-       name is defined on a MatchFirst or Or expression with one or more And subexpressions
-       (only warns if __compat__.collect_all_And_tokens is False)
-     - warn_ungrouped_named_tokens_in_collection - flag to enable warnings when a results
-       name is defined on a containing expression with ungrouped subexpressions that also
-       have results names
-     - warn_name_set_on_empty_Forward - flag to enable warnings whan a Forward is defined
-       with a results name, but has no contents defined
-     - warn_on_multiple_string_args_to_oneof - flag to enable warnings whan oneOf is
-       incorrectly called with multiple str arguments
-     - enable_debug_on_named_expressions - flag to auto-enable debug on all subsequent
-       calls to ParserElement.setName()
-"""
-__diag__.warn_multiple_tokens_in_named_alternation = False
-__diag__.warn_ungrouped_named_tokens_in_collection = False
-__diag__.warn_name_set_on_empty_Forward = False
-__diag__.warn_on_multiple_string_args_to_oneof = False
-__diag__.enable_debug_on_named_expressions = False
-__diag__._all_names = [nm for nm in vars(__diag__) if nm.startswith("enable_") or nm.startswith("warn_")]
-
-def _enable_all_warnings():
-    __diag__.warn_multiple_tokens_in_named_alternation = True
-    __diag__.warn_ungrouped_named_tokens_in_collection = True
-    __diag__.warn_name_set_on_empty_Forward = True
-    __diag__.warn_on_multiple_string_args_to_oneof = True
-__diag__.enable_all_warnings = _enable_all_warnings
-
-
-__all__ = ['__version__', '__versionTime__', '__author__', '__compat__', '__diag__',
-           'And', 'CaselessKeyword', 'CaselessLiteral', 'CharsNotIn', 'Combine', 'Dict', 'Each', 'Empty',
-           'FollowedBy', 'Forward', 'GoToColumn', 'Group', 'Keyword', 'LineEnd', 'LineStart', 'Literal',
-           'PrecededBy', 'MatchFirst', 'NoMatch', 'NotAny', 'OneOrMore', 'OnlyOnce', 'Optional', 'Or',
-           'ParseBaseException', 'ParseElementEnhance', 'ParseException', 'ParseExpression', 'ParseFatalException',
-           'ParseResults', 'ParseSyntaxException', 'ParserElement', 'QuotedString', 'RecursiveGrammarException',
-           'Regex', 'SkipTo', 'StringEnd', 'StringStart', 'Suppress', 'Token', 'TokenConverter',
-           'White', 'Word', 'WordEnd', 'WordStart', 'ZeroOrMore', 'Char',
-           'alphanums', 'alphas', 'alphas8bit', 'anyCloseTag', 'anyOpenTag', 'cStyleComment', 'col',
-           'commaSeparatedList', 'commonHTMLEntity', 'countedArray', 'cppStyleComment', 'dblQuotedString',
-           'dblSlashComment', 'delimitedList', 'dictOf', 'downcaseTokens', 'empty', 'hexnums',
-           'htmlComment', 'javaStyleComment', 'line', 'lineEnd', 'lineStart', 'lineno',
-           'makeHTMLTags', 'makeXMLTags', 'matchOnlyAtCol', 'matchPreviousExpr', 'matchPreviousLiteral',
-           'nestedExpr', 'nullDebugAction', 'nums', 'oneOf', 'opAssoc', 'operatorPrecedence', 'printables',
-           'punc8bit', 'pythonStyleComment', 'quotedString', 'removeQuotes', 'replaceHTMLEntity',
-           'replaceWith', 'restOfLine', 'sglQuotedString', 'srange', 'stringEnd',
-           'stringStart', 'traceParseAction', 'unicodeString', 'upcaseTokens', 'withAttribute',
-           'indentedBlock', 'originalTextFor', 'ungroup', 'infixNotation', 'locatedExpr', 'withClass',
-           'CloseMatch', 'tokenMap', 'pyparsing_common', 'pyparsing_unicode', 'unicode_set',
-           'conditionAsParseAction', 're',
-           ]
-
-system_version = tuple(sys.version_info)[:3]
-PY_3 = system_version[0] == 3
-if PY_3:
-    _MAX_INT = sys.maxsize
-    basestring = str
-    unichr = chr
-    unicode = str
-    _ustr = str
-
-    # build list of single arg builtins, that can be used as parse actions
-    singleArgBuiltins = [sum, len, sorted, reversed, list, tuple, set, any, all, min, max]
-
-else:
-    _MAX_INT = sys.maxint
-    range = xrange
-
-    def _ustr(obj):
-        """Drop-in replacement for str(obj) that tries to be Unicode
-        friendly. It first tries str(obj). If that fails with
-        a UnicodeEncodeError, then it tries unicode(obj). It then
-        < returns the unicode object | encodes it with the default
-        encoding | ... >.
-        """
-        if isinstance(obj, unicode):
-            return obj
-
-        try:
-            # If this works, then _ustr(obj) has the same behaviour as str(obj), so
-            # it won't break any existing code.
-            return str(obj)
-
-        except UnicodeEncodeError:
-            # Else encode it
-            ret = unicode(obj).encode(sys.getdefaultencoding(), 'xmlcharrefreplace')
-            xmlcharref = Regex(r'&#\d+;')
-            xmlcharref.setParseAction(lambda t: '\\u' + hex(int(t[0][2:-1]))[2:])
-            return xmlcharref.transformString(ret)
-
-    # build list of single arg builtins, tolerant of Python version, that can be used as parse actions
-    singleArgBuiltins = []
-    import __builtin__
-
-    for fname in "sum len sorted reversed list tuple set any all min max".split():
-        try:
-            singleArgBuiltins.append(getattr(__builtin__, fname))
-        except AttributeError:
-            continue
-
-_generatorType = type((y for y in range(1)))
-
-def _xml_escape(data):
-    """Escape &, <, >, ", ', etc. in a string of data."""
-
-    # ampersand must be replaced first
-    from_symbols = '&><"\''
-    to_symbols = ('&' + s + ';' for s in "amp gt lt quot apos".split())
-    for from_, to_ in zip(from_symbols, to_symbols):
-        data = data.replace(from_, to_)
-    return data
-
-alphas = string.ascii_uppercase + string.ascii_lowercase
-nums = "0123456789"
-hexnums = nums + "ABCDEFabcdef"
-alphanums = alphas + nums
-_bslash = chr(92)
-printables = "".join(c for c in string.printable if c not in string.whitespace)
-
-
-def conditionAsParseAction(fn, message=None, fatal=False):
-    msg = message if message is not None else "failed user-defined condition"
-    exc_type = ParseFatalException if fatal else ParseException
-    fn = _trim_arity(fn)
-
-    @wraps(fn)
-    def pa(s, l, t):
-        if not bool(fn(s, l, t)):
-            raise exc_type(s, l, msg)
-
-    return pa
-
-class ParseBaseException(Exception):
-    """base exception class for all parsing runtime exceptions"""
-    # Performance tuning: we construct a *lot* of these, so keep this
-    # constructor as small and fast as possible
-    def __init__(self, pstr, loc=0, msg=None, elem=None):
-        self.loc = loc
-        if msg is None:
-            self.msg = pstr
-            self.pstr = ""
-        else:
-            self.msg = msg
-            self.pstr = pstr
-        self.parserElement = elem
-        self.args = (pstr, loc, msg)
-
-    @classmethod
-    def _from_exception(cls, pe):
-        """
-        internal factory method to simplify creating one type of ParseException
-        from another - avoids having __init__ signature conflicts among subclasses
-        """
-        return cls(pe.pstr, pe.loc, pe.msg, pe.parserElement)
-
-    def __getattr__(self, aname):
-        """supported attributes by name are:
-           - lineno - returns the line number of the exception text
-           - col - returns the column number of the exception text
-           - line - returns the line containing the exception text
-        """
-        if aname == "lineno":
-            return lineno(self.loc, self.pstr)
-        elif aname in ("col", "column"):
-            return col(self.loc, self.pstr)
-        elif aname == "line":
-            return line(self.loc, self.pstr)
-        else:
-            raise AttributeError(aname)
-
-    def __str__(self):
-        if self.pstr:
-            if self.loc >= len(self.pstr):
-                foundstr = ', found end of text'
-            else:
-                foundstr = (', found %r' % self.pstr[self.loc:self.loc + 1]).replace(r'\\', '\\')
-        else:
-            foundstr = ''
-        return ("%s%s  (at char %d), (line:%d, col:%d)" %
-                   (self.msg, foundstr, self.loc, self.lineno, self.column))
-    def __repr__(self):
-        return _ustr(self)
-    def markInputline(self, markerString=">!<"):
-        """Extracts the exception line from the input string, and marks
-           the location of the exception with a special symbol.
-        """
-        line_str = self.line
-        line_column = self.column - 1
-        if markerString:
-            line_str = "".join((line_str[:line_column],
-                                markerString, line_str[line_column:]))
-        return line_str.strip()
-    def __dir__(self):
-        return "lineno col line".split() + dir(type(self))
-
-class ParseException(ParseBaseException):
-    """
-    Exception thrown when parse expressions don't match class;
-    supported attributes by name are:
-    - lineno - returns the line number of the exception text
-    - col - returns the column number of the exception text
-    - line - returns the line containing the exception text
-
-    Example::
-
-        try:
-            Word(nums).setName("integer").parseString("ABC")
-        except ParseException as pe:
-            print(pe)
-            print("column: {}".format(pe.col))
-
-    prints::
-
-       Expected integer (at char 0), (line:1, col:1)
-        column: 1
-
-    """
-
-    @staticmethod
-    def explain(exc, depth=16):
-        """
-        Method to take an exception and translate the Python internal traceback into a list
-        of the pyparsing expressions that caused the exception to be raised.
-
-        Parameters:
-
-         - exc - exception raised during parsing (need not be a ParseException, in support
-           of Python exceptions that might be raised in a parse action)
-         - depth (default=16) - number of levels back in the stack trace to list expression
-           and function names; if None, the full stack trace names will be listed; if 0, only
-           the failing input line, marker, and exception string will be shown
-
-        Returns a multi-line string listing the ParserElements and/or function names in the
-        exception's stack trace.
-
-        Note: the diagnostic output will include string representations of the expressions
-        that failed to parse. These representations will be more helpful if you use `setName` to
-        give identifiable names to your expressions. Otherwise they will use the default string
-        forms, which may be cryptic to read.
-
-        explain() is only supported under Python 3.
-        """
-        import inspect
-
-        if depth is None:
-            depth = sys.getrecursionlimit()
-        ret = []
-        if isinstance(exc, ParseBaseException):
-            ret.append(exc.line)
-            ret.append(' ' * (exc.col - 1) + '^')
-        ret.append("{0}: {1}".format(type(exc).__name__, exc))
-
-        if depth > 0:
-            callers = inspect.getinnerframes(exc.__traceback__, context=depth)
-            seen = set()
-            for i, ff in enumerate(callers[-depth:]):
-                frm = ff[0]
-
-                f_self = frm.f_locals.get('self', None)
-                if isinstance(f_self, ParserElement):
-                    if frm.f_code.co_name not in ('parseImpl', '_parseNoCache'):
-                        continue
-                    if f_self in seen:
-                        continue
-                    seen.add(f_self)
-
-                    self_type = type(f_self)
-                    ret.append("{0}.{1} - {2}".format(self_type.__module__,
-                                                      self_type.__name__,
-                                                      f_self))
-                elif f_self is not None:
-                    self_type = type(f_self)
-                    ret.append("{0}.{1}".format(self_type.__module__,
-                                                self_type.__name__))
-                else:
-                    code = frm.f_code
-                    if code.co_name in ('wrapper', ''):
-                        continue
-
-                    ret.append("{0}".format(code.co_name))
-
-                depth -= 1
-                if not depth:
-                    break
-
-        return '\n'.join(ret)
-
-
-class ParseFatalException(ParseBaseException):
-    """user-throwable exception thrown when inconsistent parse content
-       is found; stops all parsing immediately"""
-    pass
-
-class ParseSyntaxException(ParseFatalException):
-    """just like :class:`ParseFatalException`, but thrown internally
-    when an :class:`ErrorStop` ('-' operator) indicates
-    that parsing is to stop immediately because an unbacktrackable
-    syntax error has been found.
-    """
-    pass
-
-#~ class ReparseException(ParseBaseException):
-    #~ """Experimental class - parse actions can raise this exception to cause
-       #~ pyparsing to reparse the input string:
-        #~ - with a modified input string, and/or
-        #~ - with a modified start location
-       #~ Set the values of the ReparseException in the constructor, and raise the
-       #~ exception in a parse action to cause pyparsing to use the new string/location.
-       #~ Setting the values as None causes no change to be made.
-       #~ """
-    #~ def __init_( self, newstring, restartLoc ):
-        #~ self.newParseText = newstring
-        #~ self.reparseLoc = restartLoc
-
-class RecursiveGrammarException(Exception):
-    """exception thrown by :class:`ParserElement.validate` if the
-    grammar could be improperly recursive
-    """
-    def __init__(self, parseElementList):
-        self.parseElementTrace = parseElementList
-
-    def __str__(self):
-        return "RecursiveGrammarException: %s" % self.parseElementTrace
-
-class _ParseResultsWithOffset(object):
-    def __init__(self, p1, p2):
-        self.tup = (p1, p2)
-    def __getitem__(self, i):
-        return self.tup[i]
-    def __repr__(self):
-        return repr(self.tup[0])
-    def setOffset(self, i):
-        self.tup = (self.tup[0], i)
-
-class ParseResults(object):
-    """Structured parse results, to provide multiple means of access to
-    the parsed data:
-
-       - as a list (``len(results)``)
-       - by list index (``results[0], results[1]``, etc.)
-       - by attribute (``results.`` - see :class:`ParserElement.setResultsName`)
-
-    Example::
-
-        integer = Word(nums)
-        date_str = (integer.setResultsName("year") + '/'
-                        + integer.setResultsName("month") + '/'
-                        + integer.setResultsName("day"))
-        # equivalent form:
-        # date_str = integer("year") + '/' + integer("month") + '/' + integer("day")
-
-        # parseString returns a ParseResults object
-        result = date_str.parseString("1999/12/31")
-
-        def test(s, fn=repr):
-            print("%s -> %s" % (s, fn(eval(s))))
-        test("list(result)")
-        test("result[0]")
-        test("result['month']")
-        test("result.day")
-        test("'month' in result")
-        test("'minutes' in result")
-        test("result.dump()", str)
-
-    prints::
-
-        list(result) -> ['1999', '/', '12', '/', '31']
-        result[0] -> '1999'
-        result['month'] -> '12'
-        result.day -> '31'
-        'month' in result -> True
-        'minutes' in result -> False
-        result.dump() -> ['1999', '/', '12', '/', '31']
-        - day: 31
-        - month: 12
-        - year: 1999
-    """
-    def __new__(cls, toklist=None, name=None, asList=True, modal=True):
-        if isinstance(toklist, cls):
-            return toklist
-        retobj = object.__new__(cls)
-        retobj.__doinit = True
-        return retobj
-
-    # Performance tuning: we construct a *lot* of these, so keep this
-    # constructor as small and fast as possible
-    def __init__(self, toklist=None, name=None, asList=True, modal=True, isinstance=isinstance):
-        if self.__doinit:
-            self.__doinit = False
-            self.__name = None
-            self.__parent = None
-            self.__accumNames = {}
-            self.__asList = asList
-            self.__modal = modal
-            if toklist is None:
-                toklist = []
-            if isinstance(toklist, list):
-                self.__toklist = toklist[:]
-            elif isinstance(toklist, _generatorType):
-                self.__toklist = list(toklist)
-            else:
-                self.__toklist = [toklist]
-            self.__tokdict = dict()
-
-        if name is not None and name:
-            if not modal:
-                self.__accumNames[name] = 0
-            if isinstance(name, int):
-                name = _ustr(name)  # will always return a str, but use _ustr for consistency
-            self.__name = name
-            if not (isinstance(toklist, (type(None), basestring, list)) and toklist in (None, '', [])):
-                if isinstance(toklist, basestring):
-                    toklist = [toklist]
-                if asList:
-                    if isinstance(toklist, ParseResults):
-                        self[name] = _ParseResultsWithOffset(ParseResults(toklist.__toklist), 0)
-                    else:
-                        self[name] = _ParseResultsWithOffset(ParseResults(toklist[0]), 0)
-                    self[name].__name = name
-                else:
-                    try:
-                        self[name] = toklist[0]
-                    except (KeyError, TypeError, IndexError):
-                        self[name] = toklist
-
-    def __getitem__(self, i):
-        if isinstance(i, (int, slice)):
-            return self.__toklist[i]
-        else:
-            if i not in self.__accumNames:
-                return self.__tokdict[i][-1][0]
-            else:
-                return ParseResults([v[0] for v in self.__tokdict[i]])
-
-    def __setitem__(self, k, v, isinstance=isinstance):
-        if isinstance(v, _ParseResultsWithOffset):
-            self.__tokdict[k] = self.__tokdict.get(k, list()) + [v]
-            sub = v[0]
-        elif isinstance(k, (int, slice)):
-            self.__toklist[k] = v
-            sub = v
-        else:
-            self.__tokdict[k] = self.__tokdict.get(k, list()) + [_ParseResultsWithOffset(v, 0)]
-            sub = v
-        if isinstance(sub, ParseResults):
-            sub.__parent = wkref(self)
-
-    def __delitem__(self, i):
-        if isinstance(i, (int, slice)):
-            mylen = len(self.__toklist)
-            del self.__toklist[i]
-
-            # convert int to slice
-            if isinstance(i, int):
-                if i < 0:
-                    i += mylen
-                i = slice(i, i + 1)
-            # get removed indices
-            removed = list(range(*i.indices(mylen)))
-            removed.reverse()
-            # fixup indices in token dictionary
-            for name, occurrences in self.__tokdict.items():
-                for j in removed:
-                    for k, (value, position) in enumerate(occurrences):
-                        occurrences[k] = _ParseResultsWithOffset(value, position - (position > j))
-        else:
-            del self.__tokdict[i]
-
-    def __contains__(self, k):
-        return k in self.__tokdict
-
-    def __len__(self):
-        return len(self.__toklist)
-
-    def __bool__(self):
-        return (not not self.__toklist)
-    __nonzero__ = __bool__
-
-    def __iter__(self):
-        return iter(self.__toklist)
-
-    def __reversed__(self):
-        return iter(self.__toklist[::-1])
-
-    def _iterkeys(self):
-        if hasattr(self.__tokdict, "iterkeys"):
-            return self.__tokdict.iterkeys()
-        else:
-            return iter(self.__tokdict)
-
-    def _itervalues(self):
-        return (self[k] for k in self._iterkeys())
-
-    def _iteritems(self):
-        return ((k, self[k]) for k in self._iterkeys())
-
-    if PY_3:
-        keys = _iterkeys
-        """Returns an iterator of all named result keys."""
-
-        values = _itervalues
-        """Returns an iterator of all named result values."""
-
-        items = _iteritems
-        """Returns an iterator of all named result key-value tuples."""
-
-    else:
-        iterkeys = _iterkeys
-        """Returns an iterator of all named result keys (Python 2.x only)."""
-
-        itervalues = _itervalues
-        """Returns an iterator of all named result values (Python 2.x only)."""
-
-        iteritems = _iteritems
-        """Returns an iterator of all named result key-value tuples (Python 2.x only)."""
-
-        def keys(self):
-            """Returns all named result keys (as a list in Python 2.x, as an iterator in Python 3.x)."""
-            return list(self.iterkeys())
-
-        def values(self):
-            """Returns all named result values (as a list in Python 2.x, as an iterator in Python 3.x)."""
-            return list(self.itervalues())
-
-        def items(self):
-            """Returns all named result key-values (as a list of tuples in Python 2.x, as an iterator in Python 3.x)."""
-            return list(self.iteritems())
-
-    def haskeys(self):
-        """Since keys() returns an iterator, this method is helpful in bypassing
-           code that looks for the existence of any defined results names."""
-        return bool(self.__tokdict)
-
-    def pop(self, *args, **kwargs):
-        """
-        Removes and returns item at specified index (default= ``last``).
-        Supports both ``list`` and ``dict`` semantics for ``pop()``. If
-        passed no argument or an integer argument, it will use ``list``
-        semantics and pop tokens from the list of parsed tokens. If passed
-        a non-integer argument (most likely a string), it will use ``dict``
-        semantics and pop the corresponding value from any defined results
-        names. A second default return value argument is supported, just as in
-        ``dict.pop()``.
-
-        Example::
-
-            def remove_first(tokens):
-                tokens.pop(0)
-            print(OneOrMore(Word(nums)).parseString("0 123 321")) # -> ['0', '123', '321']
-            print(OneOrMore(Word(nums)).addParseAction(remove_first).parseString("0 123 321")) # -> ['123', '321']
-
-            label = Word(alphas)
-            patt = label("LABEL") + OneOrMore(Word(nums))
-            print(patt.parseString("AAB 123 321").dump())
-
-            # Use pop() in a parse action to remove named result (note that corresponding value is not
-            # removed from list form of results)
-            def remove_LABEL(tokens):
-                tokens.pop("LABEL")
-                return tokens
-            patt.addParseAction(remove_LABEL)
-            print(patt.parseString("AAB 123 321").dump())
-
-        prints::
-
-            ['AAB', '123', '321']
-            - LABEL: AAB
-
-            ['AAB', '123', '321']
-        """
-        if not args:
-            args = [-1]
-        for k, v in kwargs.items():
-            if k == 'default':
-                args = (args[0], v)
-            else:
-                raise TypeError("pop() got an unexpected keyword argument '%s'" % k)
-        if (isinstance(args[0], int)
-                or len(args) == 1
-                or args[0] in self):
-            index = args[0]
-            ret = self[index]
-            del self[index]
-            return ret
-        else:
-            defaultvalue = args[1]
-            return defaultvalue
-
-    def get(self, key, defaultValue=None):
-        """
-        Returns named result matching the given key, or if there is no
-        such name, then returns the given ``defaultValue`` or ``None`` if no
-        ``defaultValue`` is specified.
-
-        Similar to ``dict.get()``.
-
-        Example::
-
-            integer = Word(nums)
-            date_str = integer("year") + '/' + integer("month") + '/' + integer("day")
-
-            result = date_str.parseString("1999/12/31")
-            print(result.get("year")) # -> '1999'
-            print(result.get("hour", "not specified")) # -> 'not specified'
-            print(result.get("hour")) # -> None
-        """
-        if key in self:
-            return self[key]
-        else:
-            return defaultValue
-
-    def insert(self, index, insStr):
-        """
-        Inserts new element at location index in the list of parsed tokens.
-
-        Similar to ``list.insert()``.
-
-        Example::
-
-            print(OneOrMore(Word(nums)).parseString("0 123 321")) # -> ['0', '123', '321']
-
-            # use a parse action to insert the parse location in the front of the parsed results
-            def insert_locn(locn, tokens):
-                tokens.insert(0, locn)
-            print(OneOrMore(Word(nums)).addParseAction(insert_locn).parseString("0 123 321")) # -> [0, '0', '123', '321']
-        """
-        self.__toklist.insert(index, insStr)
-        # fixup indices in token dictionary
-        for name, occurrences in self.__tokdict.items():
-            for k, (value, position) in enumerate(occurrences):
-                occurrences[k] = _ParseResultsWithOffset(value, position + (position > index))
-
-    def append(self, item):
-        """
-        Add single element to end of ParseResults list of elements.
-
-        Example::
-
-            print(OneOrMore(Word(nums)).parseString("0 123 321")) # -> ['0', '123', '321']
-
-            # use a parse action to compute the sum of the parsed integers, and add it to the end
-            def append_sum(tokens):
-                tokens.append(sum(map(int, tokens)))
-            print(OneOrMore(Word(nums)).addParseAction(append_sum).parseString("0 123 321")) # -> ['0', '123', '321', 444]
-        """
-        self.__toklist.append(item)
-
-    def extend(self, itemseq):
-        """
-        Add sequence of elements to end of ParseResults list of elements.
-
-        Example::
-
-            patt = OneOrMore(Word(alphas))
-
-            # use a parse action to append the reverse of the matched strings, to make a palindrome
-            def make_palindrome(tokens):
-                tokens.extend(reversed([t[::-1] for t in tokens]))
-                return ''.join(tokens)
-            print(patt.addParseAction(make_palindrome).parseString("lskdj sdlkjf lksd")) # -> 'lskdjsdlkjflksddsklfjkldsjdksl'
-        """
-        if isinstance(itemseq, ParseResults):
-            self.__iadd__(itemseq)
-        else:
-            self.__toklist.extend(itemseq)
-
-    def clear(self):
-        """
-        Clear all elements and results names.
-        """
-        del self.__toklist[:]
-        self.__tokdict.clear()
-
-    def __getattr__(self, name):
-        try:
-            return self[name]
-        except KeyError:
-            return ""
-
-    def __add__(self, other):
-        ret = self.copy()
-        ret += other
-        return ret
-
-    def __iadd__(self, other):
-        if other.__tokdict:
-            offset = len(self.__toklist)
-            addoffset = lambda a: offset if a < 0 else a + offset
-            otheritems = other.__tokdict.items()
-            otherdictitems = [(k, _ParseResultsWithOffset(v[0], addoffset(v[1])))
-                              for k, vlist in otheritems for v in vlist]
-            for k, v in otherdictitems:
-                self[k] = v
-                if isinstance(v[0], ParseResults):
-                    v[0].__parent = wkref(self)
-
-        self.__toklist += other.__toklist
-        self.__accumNames.update(other.__accumNames)
-        return self
-
-    def __radd__(self, other):
-        if isinstance(other, int) and other == 0:
-            # useful for merging many ParseResults using sum() builtin
-            return self.copy()
-        else:
-            # this may raise a TypeError - so be it
-            return other + self
-
-    def __repr__(self):
-        return "(%s, %s)" % (repr(self.__toklist), repr(self.__tokdict))
-
-    def __str__(self):
-        return '[' + ', '.join(_ustr(i) if isinstance(i, ParseResults) else repr(i) for i in self.__toklist) + ']'
-
-    def _asStringList(self, sep=''):
-        out = []
-        for item in self.__toklist:
-            if out and sep:
-                out.append(sep)
-            if isinstance(item, ParseResults):
-                out += item._asStringList()
-            else:
-                out.append(_ustr(item))
-        return out
-
-    def asList(self):
-        """
-        Returns the parse results as a nested list of matching tokens, all converted to strings.
-
-        Example::
-
-            patt = OneOrMore(Word(alphas))
-            result = patt.parseString("sldkj lsdkj sldkj")
-            # even though the result prints in string-like form, it is actually a pyparsing ParseResults
-            print(type(result), result) # ->  ['sldkj', 'lsdkj', 'sldkj']
-
-            # Use asList() to create an actual list
-            result_list = result.asList()
-            print(type(result_list), result_list) # ->  ['sldkj', 'lsdkj', 'sldkj']
-        """
-        return [res.asList() if isinstance(res, ParseResults) else res for res in self.__toklist]
-
-    def asDict(self):
-        """
-        Returns the named parse results as a nested dictionary.
-
-        Example::
-
-            integer = Word(nums)
-            date_str = integer("year") + '/' + integer("month") + '/' + integer("day")
-
-            result = date_str.parseString('12/31/1999')
-            print(type(result), repr(result)) # ->  (['12', '/', '31', '/', '1999'], {'day': [('1999', 4)], 'year': [('12', 0)], 'month': [('31', 2)]})
-
-            result_dict = result.asDict()
-            print(type(result_dict), repr(result_dict)) # ->  {'day': '1999', 'year': '12', 'month': '31'}
-
-            # even though a ParseResults supports dict-like access, sometime you just need to have a dict
-            import json
-            print(json.dumps(result)) # -> Exception: TypeError: ... is not JSON serializable
-            print(json.dumps(result.asDict())) # -> {"month": "31", "day": "1999", "year": "12"}
-        """
-        if PY_3:
-            item_fn = self.items
-        else:
-            item_fn = self.iteritems
-
-        def toItem(obj):
-            if isinstance(obj, ParseResults):
-                if obj.haskeys():
-                    return obj.asDict()
-                else:
-                    return [toItem(v) for v in obj]
-            else:
-                return obj
-
-        return dict((k, toItem(v)) for k, v in item_fn())
-
-    def copy(self):
-        """
-        Returns a new copy of a :class:`ParseResults` object.
-        """
-        ret = ParseResults(self.__toklist)
-        ret.__tokdict = dict(self.__tokdict.items())
-        ret.__parent = self.__parent
-        ret.__accumNames.update(self.__accumNames)
-        ret.__name = self.__name
-        return ret
-
-    def asXML(self, doctag=None, namedItemsOnly=False, indent="", formatted=True):
-        """
-        (Deprecated) Returns the parse results as XML. Tags are created for tokens and lists that have defined results names.
-        """
-        nl = "\n"
-        out = []
-        namedItems = dict((v[1], k) for (k, vlist) in self.__tokdict.items()
-                          for v in vlist)
-        nextLevelIndent = indent + "  "
-
-        # collapse out indents if formatting is not desired
-        if not formatted:
-            indent = ""
-            nextLevelIndent = ""
-            nl = ""
-
-        selfTag = None
-        if doctag is not None:
-            selfTag = doctag
-        else:
-            if self.__name:
-                selfTag = self.__name
-
-        if not selfTag:
-            if namedItemsOnly:
-                return ""
-            else:
-                selfTag = "ITEM"
-
-        out += [nl, indent, "<", selfTag, ">"]
-
-        for i, res in enumerate(self.__toklist):
-            if isinstance(res, ParseResults):
-                if i in namedItems:
-                    out += [res.asXML(namedItems[i],
-                                      namedItemsOnly and doctag is None,
-                                      nextLevelIndent,
-                                      formatted)]
-                else:
-                    out += [res.asXML(None,
-                                      namedItemsOnly and doctag is None,
-                                      nextLevelIndent,
-                                      formatted)]
-            else:
-                # individual token, see if there is a name for it
-                resTag = None
-                if i in namedItems:
-                    resTag = namedItems[i]
-                if not resTag:
-                    if namedItemsOnly:
-                        continue
-                    else:
-                        resTag = "ITEM"
-                xmlBodyText = _xml_escape(_ustr(res))
-                out += [nl, nextLevelIndent, "<", resTag, ">",
-                        xmlBodyText,
-                                                ""]
-
-        out += [nl, indent, ""]
-        return "".join(out)
-
-    def __lookup(self, sub):
-        for k, vlist in self.__tokdict.items():
-            for v, loc in vlist:
-                if sub is v:
-                    return k
-        return None
-
-    def getName(self):
-        r"""
-        Returns the results name for this token expression. Useful when several
-        different expressions might match at a particular location.
-
-        Example::
-
-            integer = Word(nums)
-            ssn_expr = Regex(r"\d\d\d-\d\d-\d\d\d\d")
-            house_number_expr = Suppress('#') + Word(nums, alphanums)
-            user_data = (Group(house_number_expr)("house_number")
-                        | Group(ssn_expr)("ssn")
-                        | Group(integer)("age"))
-            user_info = OneOrMore(user_data)
-
-            result = user_info.parseString("22 111-22-3333 #221B")
-            for item in result:
-                print(item.getName(), ':', item[0])
-
-        prints::
-
-            age : 22
-            ssn : 111-22-3333
-            house_number : 221B
-        """
-        if self.__name:
-            return self.__name
-        elif self.__parent:
-            par = self.__parent()
-            if par:
-                return par.__lookup(self)
-            else:
-                return None
-        elif (len(self) == 1
-              and len(self.__tokdict) == 1
-              and next(iter(self.__tokdict.values()))[0][1] in (0, -1)):
-            return next(iter(self.__tokdict.keys()))
-        else:
-            return None
-
-    def dump(self, indent='', full=True, include_list=True, _depth=0):
-        """
-        Diagnostic method for listing out the contents of
-        a :class:`ParseResults`. Accepts an optional ``indent`` argument so
-        that this string can be embedded in a nested display of other data.
-
-        Example::
-
-            integer = Word(nums)
-            date_str = integer("year") + '/' + integer("month") + '/' + integer("day")
-
-            result = date_str.parseString('12/31/1999')
-            print(result.dump())
-
-        prints::
-
-            ['12', '/', '31', '/', '1999']
-            - day: 1999
-            - month: 31
-            - year: 12
-        """
-        out = []
-        NL = '\n'
-        if include_list:
-            out.append(indent + _ustr(self.asList()))
-        else:
-            out.append('')
-
-        if full:
-            if self.haskeys():
-                items = sorted((str(k), v) for k, v in self.items())
-                for k, v in items:
-                    if out:
-                        out.append(NL)
-                    out.append("%s%s- %s: " % (indent, ('  ' * _depth), k))
-                    if isinstance(v, ParseResults):
-                        if v:
-                            out.append(v.dump(indent=indent, full=full, include_list=include_list, _depth=_depth + 1))
-                        else:
-                            out.append(_ustr(v))
-                    else:
-                        out.append(repr(v))
-            elif any(isinstance(vv, ParseResults) for vv in self):
-                v = self
-                for i, vv in enumerate(v):
-                    if isinstance(vv, ParseResults):
-                        out.append("\n%s%s[%d]:\n%s%s%s" % (indent,
-                                                            ('  ' * (_depth)),
-                                                            i,
-                                                            indent,
-                                                            ('  ' * (_depth + 1)),
-                                                            vv.dump(indent=indent,
-                                                                    full=full,
-                                                                    include_list=include_list,
-                                                                    _depth=_depth + 1)))
-                    else:
-                        out.append("\n%s%s[%d]:\n%s%s%s" % (indent,
-                                                            ('  ' * (_depth)),
-                                                            i,
-                                                            indent,
-                                                            ('  ' * (_depth + 1)),
-                                                            _ustr(vv)))
-
-        return "".join(out)
-
-    def pprint(self, *args, **kwargs):
-        """
-        Pretty-printer for parsed results as a list, using the
-        `pprint `_ module.
-        Accepts additional positional or keyword args as defined for
-        `pprint.pprint `_ .
-
-        Example::
-
-            ident = Word(alphas, alphanums)
-            num = Word(nums)
-            func = Forward()
-            term = ident | num | Group('(' + func + ')')
-            func <<= ident + Group(Optional(delimitedList(term)))
-            result = func.parseString("fna a,b,(fnb c,d,200),100")
-            result.pprint(width=40)
-
-        prints::
-
-            ['fna',
-             ['a',
-              'b',
-              ['(', 'fnb', ['c', 'd', '200'], ')'],
-              '100']]
-        """
-        pprint.pprint(self.asList(), *args, **kwargs)
-
-    # add support for pickle protocol
-    def __getstate__(self):
-        return (self.__toklist,
-                (self.__tokdict.copy(),
-                 self.__parent is not None and self.__parent() or None,
-                 self.__accumNames,
-                 self.__name))
-
-    def __setstate__(self, state):
-        self.__toklist = state[0]
-        self.__tokdict, par, inAccumNames, self.__name = state[1]
-        self.__accumNames = {}
-        self.__accumNames.update(inAccumNames)
-        if par is not None:
-            self.__parent = wkref(par)
-        else:
-            self.__parent = None
-
-    def __getnewargs__(self):
-        return self.__toklist, self.__name, self.__asList, self.__modal
-
-    def __dir__(self):
-        return dir(type(self)) + list(self.keys())
-
-    @classmethod
-    def from_dict(cls, other, name=None):
-        """
-        Helper classmethod to construct a ParseResults from a dict, preserving the
-        name-value relations as results names. If an optional 'name' argument is
-        given, a nested ParseResults will be returned
-        """
-        def is_iterable(obj):
-            try:
-                iter(obj)
-            except Exception:
-                return False
-            else:
-                if PY_3:
-                    return not isinstance(obj, (str, bytes))
-                else:
-                    return not isinstance(obj, basestring)
-
-        ret = cls([])
-        for k, v in other.items():
-            if isinstance(v, Mapping):
-                ret += cls.from_dict(v, name=k)
-            else:
-                ret += cls([v], name=k, asList=is_iterable(v))
-        if name is not None:
-            ret = cls([ret], name=name)
-        return ret
-
-MutableMapping.register(ParseResults)
-
-def col (loc, strg):
-    """Returns current column within a string, counting newlines as line separators.
-   The first column is number 1.
-
-   Note: the default parsing behavior is to expand tabs in the input string
-   before starting the parsing process.  See
-   :class:`ParserElement.parseString` for more
-   information on parsing strings containing ```` s, and suggested
-   methods to maintain a consistent view of the parsed string, the parse
-   location, and line and column positions within the parsed string.
-   """
-    s = strg
-    return 1 if 0 < loc < len(s) and s[loc-1] == '\n' else loc - s.rfind("\n", 0, loc)
-
-def lineno(loc, strg):
-    """Returns current line number within a string, counting newlines as line separators.
-    The first line is number 1.
-
-    Note - the default parsing behavior is to expand tabs in the input string
-    before starting the parsing process.  See :class:`ParserElement.parseString`
-    for more information on parsing strings containing ```` s, and
-    suggested methods to maintain a consistent view of the parsed string, the
-    parse location, and line and column positions within the parsed string.
-    """
-    return strg.count("\n", 0, loc) + 1
-
-def line(loc, strg):
-    """Returns the line of text containing loc within a string, counting newlines as line separators.
-       """
-    lastCR = strg.rfind("\n", 0, loc)
-    nextCR = strg.find("\n", loc)
-    if nextCR >= 0:
-        return strg[lastCR + 1:nextCR]
-    else:
-        return strg[lastCR + 1:]
-
-def _defaultStartDebugAction(instring, loc, expr):
-    print(("Match " + _ustr(expr) + " at loc " + _ustr(loc) + "(%d,%d)" % (lineno(loc, instring), col(loc, instring))))
-
-def _defaultSuccessDebugAction(instring, startloc, endloc, expr, toks):
-    print("Matched " + _ustr(expr) + " -> " + str(toks.asList()))
-
-def _defaultExceptionDebugAction(instring, loc, expr, exc):
-    print("Exception raised:" + _ustr(exc))
-
-def nullDebugAction(*args):
-    """'Do-nothing' debug action, to suppress debugging output during parsing."""
-    pass
-
-# Only works on Python 3.x - nonlocal is toxic to Python 2 installs
-#~ 'decorator to trim function calls to match the arity of the target'
-#~ def _trim_arity(func, maxargs=3):
-    #~ if func in singleArgBuiltins:
-        #~ return lambda s,l,t: func(t)
-    #~ limit = 0
-    #~ foundArity = False
-    #~ def wrapper(*args):
-        #~ nonlocal limit,foundArity
-        #~ while 1:
-            #~ try:
-                #~ ret = func(*args[limit:])
-                #~ foundArity = True
-                #~ return ret
-            #~ except TypeError:
-                #~ if limit == maxargs or foundArity:
-                    #~ raise
-                #~ limit += 1
-                #~ continue
-    #~ return wrapper
-
-# this version is Python 2.x-3.x cross-compatible
-'decorator to trim function calls to match the arity of the target'
-def _trim_arity(func, maxargs=2):
-    if func in singleArgBuiltins:
-        return lambda s, l, t: func(t)
-    limit = [0]
-    foundArity = [False]
-
-    # traceback return data structure changed in Py3.5 - normalize back to plain tuples
-    if system_version[:2] >= (3, 5):
-        def extract_stack(limit=0):
-            # special handling for Python 3.5.0 - extra deep call stack by 1
-            offset = -3 if system_version == (3, 5, 0) else -2
-            frame_summary = traceback.extract_stack(limit=-offset + limit - 1)[offset]
-            return [frame_summary[:2]]
-        def extract_tb(tb, limit=0):
-            frames = traceback.extract_tb(tb, limit=limit)
-            frame_summary = frames[-1]
-            return [frame_summary[:2]]
-    else:
-        extract_stack = traceback.extract_stack
-        extract_tb = traceback.extract_tb
-
-    # synthesize what would be returned by traceback.extract_stack at the call to
-    # user's parse action 'func', so that we don't incur call penalty at parse time
-
-    LINE_DIFF = 6
-    # IF ANY CODE CHANGES, EVEN JUST COMMENTS OR BLANK LINES, BETWEEN THE NEXT LINE AND
-    # THE CALL TO FUNC INSIDE WRAPPER, LINE_DIFF MUST BE MODIFIED!!!!
-    this_line = extract_stack(limit=2)[-1]
-    pa_call_line_synth = (this_line[0], this_line[1] + LINE_DIFF)
-
-    def wrapper(*args):
-        while 1:
-            try:
-                ret = func(*args[limit[0]:])
-                foundArity[0] = True
-                return ret
-            except TypeError:
-                # re-raise TypeErrors if they did not come from our arity testing
-                if foundArity[0]:
-                    raise
-                else:
-                    try:
-                        tb = sys.exc_info()[-1]
-                        if not extract_tb(tb, limit=2)[-1][:2] == pa_call_line_synth:
-                            raise
-                    finally:
-                        try:
-                            del tb
-                        except NameError:
-                            pass
-
-                if limit[0] <= maxargs:
-                    limit[0] += 1
-                    continue
-                raise
-
-    # copy func name to wrapper for sensible debug output
-    func_name = ""
-    try:
-        func_name = getattr(func, '__name__',
-                            getattr(func, '__class__').__name__)
-    except Exception:
-        func_name = str(func)
-    wrapper.__name__ = func_name
-
-    return wrapper
-
-
-class ParserElement(object):
-    """Abstract base level parser element class."""
-    DEFAULT_WHITE_CHARS = " \n\t\r"
-    verbose_stacktrace = False
-
-    @staticmethod
-    def setDefaultWhitespaceChars(chars):
-        r"""
-        Overrides the default whitespace chars
-
-        Example::
-
-            # default whitespace chars are space,  and newline
-            OneOrMore(Word(alphas)).parseString("abc def\nghi jkl")  # -> ['abc', 'def', 'ghi', 'jkl']
-
-            # change to just treat newline as significant
-            ParserElement.setDefaultWhitespaceChars(" \t")
-            OneOrMore(Word(alphas)).parseString("abc def\nghi jkl")  # -> ['abc', 'def']
-        """
-        ParserElement.DEFAULT_WHITE_CHARS = chars
-
-    @staticmethod
-    def inlineLiteralsUsing(cls):
-        """
-        Set class to be used for inclusion of string literals into a parser.
-
-        Example::
-
-            # default literal class used is Literal
-            integer = Word(nums)
-            date_str = integer("year") + '/' + integer("month") + '/' + integer("day")
-
-            date_str.parseString("1999/12/31")  # -> ['1999', '/', '12', '/', '31']
-
-
-            # change to Suppress
-            ParserElement.inlineLiteralsUsing(Suppress)
-            date_str = integer("year") + '/' + integer("month") + '/' + integer("day")
-
-            date_str.parseString("1999/12/31")  # -> ['1999', '12', '31']
-        """
-        ParserElement._literalStringClass = cls
-
-    @classmethod
-    def _trim_traceback(cls, tb):
-        while tb.tb_next:
-            tb = tb.tb_next
-        return tb
-
-    def __init__(self, savelist=False):
-        self.parseAction = list()
-        self.failAction = None
-        # ~ self.name = ""  # don't define self.name, let subclasses try/except upcall
-        self.strRepr = None
-        self.resultsName = None
-        self.saveAsList = savelist
-        self.skipWhitespace = True
-        self.whiteChars = set(ParserElement.DEFAULT_WHITE_CHARS)
-        self.copyDefaultWhiteChars = True
-        self.mayReturnEmpty = False # used when checking for left-recursion
-        self.keepTabs = False
-        self.ignoreExprs = list()
-        self.debug = False
-        self.streamlined = False
-        self.mayIndexError = True # used to optimize exception handling for subclasses that don't advance parse index
-        self.errmsg = ""
-        self.modalResults = True # used to mark results names as modal (report only last) or cumulative (list all)
-        self.debugActions = (None, None, None)  # custom debug actions
-        self.re = None
-        self.callPreparse = True # used to avoid redundant calls to preParse
-        self.callDuringTry = False
-
-    def copy(self):
-        """
-        Make a copy of this :class:`ParserElement`.  Useful for defining
-        different parse actions for the same parsing pattern, using copies of
-        the original parse element.
-
-        Example::
-
-            integer = Word(nums).setParseAction(lambda toks: int(toks[0]))
-            integerK = integer.copy().addParseAction(lambda toks: toks[0] * 1024) + Suppress("K")
-            integerM = integer.copy().addParseAction(lambda toks: toks[0] * 1024 * 1024) + Suppress("M")
-
-            print(OneOrMore(integerK | integerM | integer).parseString("5K 100 640K 256M"))
-
-        prints::
-
-            [5120, 100, 655360, 268435456]
-
-        Equivalent form of ``expr.copy()`` is just ``expr()``::
-
-            integerM = integer().addParseAction(lambda toks: toks[0] * 1024 * 1024) + Suppress("M")
-        """
-        cpy = copy.copy(self)
-        cpy.parseAction = self.parseAction[:]
-        cpy.ignoreExprs = self.ignoreExprs[:]
-        if self.copyDefaultWhiteChars:
-            cpy.whiteChars = ParserElement.DEFAULT_WHITE_CHARS
-        return cpy
-
-    def setName(self, name):
-        """
-        Define name for this expression, makes debugging and exception messages clearer.
-
-        Example::
-
-            Word(nums).parseString("ABC")  # -> Exception: Expected W:(0123...) (at char 0), (line:1, col:1)
-            Word(nums).setName("integer").parseString("ABC")  # -> Exception: Expected integer (at char 0), (line:1, col:1)
-        """
-        self.name = name
-        self.errmsg = "Expected " + self.name
-        if __diag__.enable_debug_on_named_expressions:
-            self.setDebug()
-        return self
-
-    def setResultsName(self, name, listAllMatches=False):
-        """
-        Define name for referencing matching tokens as a nested attribute
-        of the returned parse results.
-        NOTE: this returns a *copy* of the original :class:`ParserElement` object;
-        this is so that the client can define a basic element, such as an
-        integer, and reference it in multiple places with different names.
-
-        You can also set results names using the abbreviated syntax,
-        ``expr("name")`` in place of ``expr.setResultsName("name")``
-        - see :class:`__call__`.
-
-        Example::
-
-            date_str = (integer.setResultsName("year") + '/'
-                        + integer.setResultsName("month") + '/'
-                        + integer.setResultsName("day"))
-
-            # equivalent form:
-            date_str = integer("year") + '/' + integer("month") + '/' + integer("day")
-        """
-        return self._setResultsName(name, listAllMatches)
-
-    def _setResultsName(self, name, listAllMatches=False):
-        newself = self.copy()
-        if name.endswith("*"):
-            name = name[:-1]
-            listAllMatches = True
-        newself.resultsName = name
-        newself.modalResults = not listAllMatches
-        return newself
-
-    def setBreak(self, breakFlag=True):
-        """Method to invoke the Python pdb debugger when this element is
-           about to be parsed. Set ``breakFlag`` to True to enable, False to
-           disable.
-        """
-        if breakFlag:
-            _parseMethod = self._parse
-            def breaker(instring, loc, doActions=True, callPreParse=True):
-                import pdb
-                # this call to pdb.set_trace() is intentional, not a checkin error
-                pdb.set_trace()
-                return _parseMethod(instring, loc, doActions, callPreParse)
-            breaker._originalParseMethod = _parseMethod
-            self._parse = breaker
-        else:
-            if hasattr(self._parse, "_originalParseMethod"):
-                self._parse = self._parse._originalParseMethod
-        return self
-
-    def setParseAction(self, *fns, **kwargs):
-        """
-        Define one or more actions to perform when successfully matching parse element definition.
-        Parse action fn is a callable method with 0-3 arguments, called as ``fn(s, loc, toks)`` ,
-        ``fn(loc, toks)`` , ``fn(toks)`` , or just ``fn()`` , where:
-
-        - s   = the original string being parsed (see note below)
-        - loc = the location of the matching substring
-        - toks = a list of the matched tokens, packaged as a :class:`ParseResults` object
-
-        If the functions in fns modify the tokens, they can return them as the return
-        value from fn, and the modified list of tokens will replace the original.
-        Otherwise, fn does not need to return any value.
-
-        If None is passed as the parse action, all previously added parse actions for this
-        expression are cleared.
-
-        Optional keyword arguments:
-        - callDuringTry = (default= ``False``) indicate if parse action should be run during lookaheads and alternate testing
-
-        Note: the default parsing behavior is to expand tabs in the input string
-        before starting the parsing process.  See :class:`parseString for more
-        information on parsing strings containing ```` s, and suggested
-        methods to maintain a consistent view of the parsed string, the parse
-        location, and line and column positions within the parsed string.
-
-        Example::
-
-            integer = Word(nums)
-            date_str = integer + '/' + integer + '/' + integer
-
-            date_str.parseString("1999/12/31")  # -> ['1999', '/', '12', '/', '31']
-
-            # use parse action to convert to ints at parse time
-            integer = Word(nums).setParseAction(lambda toks: int(toks[0]))
-            date_str = integer + '/' + integer + '/' + integer
-
-            # note that integer fields are now ints, not strings
-            date_str.parseString("1999/12/31")  # -> [1999, '/', 12, '/', 31]
-        """
-        if list(fns) == [None,]:
-            self.parseAction = []
-        else:
-            if not all(callable(fn) for fn in fns):
-                raise TypeError("parse actions must be callable")
-            self.parseAction = list(map(_trim_arity, list(fns)))
-            self.callDuringTry = kwargs.get("callDuringTry", False)
-        return self
-
-    def addParseAction(self, *fns, **kwargs):
-        """
-        Add one or more parse actions to expression's list of parse actions. See :class:`setParseAction`.
-
-        See examples in :class:`copy`.
-        """
-        self.parseAction += list(map(_trim_arity, list(fns)))
-        self.callDuringTry = self.callDuringTry or kwargs.get("callDuringTry", False)
-        return self
-
-    def addCondition(self, *fns, **kwargs):
-        """Add a boolean predicate function to expression's list of parse actions. See
-        :class:`setParseAction` for function call signatures. Unlike ``setParseAction``,
-        functions passed to ``addCondition`` need to return boolean success/fail of the condition.
-
-        Optional keyword arguments:
-        - message = define a custom message to be used in the raised exception
-        - fatal   = if True, will raise ParseFatalException to stop parsing immediately; otherwise will raise ParseException
-
-        Example::
-
-            integer = Word(nums).setParseAction(lambda toks: int(toks[0]))
-            year_int = integer.copy()
-            year_int.addCondition(lambda toks: toks[0] >= 2000, message="Only support years 2000 and later")
-            date_str = year_int + '/' + integer + '/' + integer
-
-            result = date_str.parseString("1999/12/31")  # -> Exception: Only support years 2000 and later (at char 0), (line:1, col:1)
-        """
-        for fn in fns:
-            self.parseAction.append(conditionAsParseAction(fn, message=kwargs.get('message'),
-                                                           fatal=kwargs.get('fatal', False)))
-
-        self.callDuringTry = self.callDuringTry or kwargs.get("callDuringTry", False)
-        return self
-
-    def setFailAction(self, fn):
-        """Define action to perform if parsing fails at this expression.
-           Fail acton fn is a callable function that takes the arguments
-           ``fn(s, loc, expr, err)`` where:
-           - s = string being parsed
-           - loc = location where expression match was attempted and failed
-           - expr = the parse expression that failed
-           - err = the exception thrown
-           The function returns no value.  It may throw :class:`ParseFatalException`
-           if it is desired to stop parsing immediately."""
-        self.failAction = fn
-        return self
-
-    def _skipIgnorables(self, instring, loc):
-        exprsFound = True
-        while exprsFound:
-            exprsFound = False
-            for e in self.ignoreExprs:
-                try:
-                    while 1:
-                        loc, dummy = e._parse(instring, loc)
-                        exprsFound = True
-                except ParseException:
-                    pass
-        return loc
-
-    def preParse(self, instring, loc):
-        if self.ignoreExprs:
-            loc = self._skipIgnorables(instring, loc)
-
-        if self.skipWhitespace:
-            wt = self.whiteChars
-            instrlen = len(instring)
-            while loc < instrlen and instring[loc] in wt:
-                loc += 1
-
-        return loc
-
-    def parseImpl(self, instring, loc, doActions=True):
-        return loc, []
-
-    def postParse(self, instring, loc, tokenlist):
-        return tokenlist
-
-    # ~ @profile
-    def _parseNoCache(self, instring, loc, doActions=True, callPreParse=True):
-        TRY, MATCH, FAIL = 0, 1, 2
-        debugging = (self.debug)  # and doActions)
-
-        if debugging or self.failAction:
-            # ~ print ("Match", self, "at loc", loc, "(%d, %d)" % (lineno(loc, instring), col(loc, instring)))
-            if self.debugActions[TRY]:
-                self.debugActions[TRY](instring, loc, self)
-            try:
-                if callPreParse and self.callPreparse:
-                    preloc = self.preParse(instring, loc)
-                else:
-                    preloc = loc
-                tokensStart = preloc
-                if self.mayIndexError or preloc >= len(instring):
-                    try:
-                        loc, tokens = self.parseImpl(instring, preloc, doActions)
-                    except IndexError:
-                        raise ParseException(instring, len(instring), self.errmsg, self)
-                else:
-                    loc, tokens = self.parseImpl(instring, preloc, doActions)
-            except Exception as err:
-                # ~ print ("Exception raised:", err)
-                if self.debugActions[FAIL]:
-                    self.debugActions[FAIL](instring, tokensStart, self, err)
-                if self.failAction:
-                    self.failAction(instring, tokensStart, self, err)
-                raise
-        else:
-            if callPreParse and self.callPreparse:
-                preloc = self.preParse(instring, loc)
-            else:
-                preloc = loc
-            tokensStart = preloc
-            if self.mayIndexError or preloc >= len(instring):
-                try:
-                    loc, tokens = self.parseImpl(instring, preloc, doActions)
-                except IndexError:
-                    raise ParseException(instring, len(instring), self.errmsg, self)
-            else:
-                loc, tokens = self.parseImpl(instring, preloc, doActions)
-
-        tokens = self.postParse(instring, loc, tokens)
-
-        retTokens = ParseResults(tokens, self.resultsName, asList=self.saveAsList, modal=self.modalResults)
-        if self.parseAction and (doActions or self.callDuringTry):
-            if debugging:
-                try:
-                    for fn in self.parseAction:
-                        try:
-                            tokens = fn(instring, tokensStart, retTokens)
-                        except IndexError as parse_action_exc:
-                            exc = ParseException("exception raised in parse action")
-                            exc.__cause__ = parse_action_exc
-                            raise exc
-
-                        if tokens is not None and tokens is not retTokens:
-                            retTokens = ParseResults(tokens,
-                                                      self.resultsName,
-                                                      asList=self.saveAsList and isinstance(tokens, (ParseResults, list)),
-                                                      modal=self.modalResults)
-                except Exception as err:
-                    # ~ print "Exception raised in user parse action:", err
-                    if self.debugActions[FAIL]:
-                        self.debugActions[FAIL](instring, tokensStart, self, err)
-                    raise
-            else:
-                for fn in self.parseAction:
-                    try:
-                        tokens = fn(instring, tokensStart, retTokens)
-                    except IndexError as parse_action_exc:
-                        exc = ParseException("exception raised in parse action")
-                        exc.__cause__ = parse_action_exc
-                        raise exc
-
-                    if tokens is not None and tokens is not retTokens:
-                        retTokens = ParseResults(tokens,
-                                                  self.resultsName,
-                                                  asList=self.saveAsList and isinstance(tokens, (ParseResults, list)),
-                                                  modal=self.modalResults)
-        if debugging:
-            # ~ print ("Matched", self, "->", retTokens.asList())
-            if self.debugActions[MATCH]:
-                self.debugActions[MATCH](instring, tokensStart, loc, self, retTokens)
-
-        return loc, retTokens
-
-    def tryParse(self, instring, loc):
-        try:
-            return self._parse(instring, loc, doActions=False)[0]
-        except ParseFatalException:
-            raise ParseException(instring, loc, self.errmsg, self)
-
-    def canParseNext(self, instring, loc):
-        try:
-            self.tryParse(instring, loc)
-        except (ParseException, IndexError):
-            return False
-        else:
-            return True
-
-    class _UnboundedCache(object):
-        def __init__(self):
-            cache = {}
-            self.not_in_cache = not_in_cache = object()
-
-            def get(self, key):
-                return cache.get(key, not_in_cache)
-
-            def set(self, key, value):
-                cache[key] = value
-
-            def clear(self):
-                cache.clear()
-
-            def cache_len(self):
-                return len(cache)
-
-            self.get = types.MethodType(get, self)
-            self.set = types.MethodType(set, self)
-            self.clear = types.MethodType(clear, self)
-            self.__len__ = types.MethodType(cache_len, self)
-
-    if _OrderedDict is not None:
-        class _FifoCache(object):
-            def __init__(self, size):
-                self.not_in_cache = not_in_cache = object()
-
-                cache = _OrderedDict()
-
-                def get(self, key):
-                    return cache.get(key, not_in_cache)
-
-                def set(self, key, value):
-                    cache[key] = value
-                    while len(cache) > size:
-                        try:
-                            cache.popitem(False)
-                        except KeyError:
-                            pass
-
-                def clear(self):
-                    cache.clear()
-
-                def cache_len(self):
-                    return len(cache)
-
-                self.get = types.MethodType(get, self)
-                self.set = types.MethodType(set, self)
-                self.clear = types.MethodType(clear, self)
-                self.__len__ = types.MethodType(cache_len, self)
-
-    else:
-        class _FifoCache(object):
-            def __init__(self, size):
-                self.not_in_cache = not_in_cache = object()
-
-                cache = {}
-                key_fifo = collections.deque([], size)
-
-                def get(self, key):
-                    return cache.get(key, not_in_cache)
-
-                def set(self, key, value):
-                    cache[key] = value
-                    while len(key_fifo) > size:
-                        cache.pop(key_fifo.popleft(), None)
-                    key_fifo.append(key)
-
-                def clear(self):
-                    cache.clear()
-                    key_fifo.clear()
-
-                def cache_len(self):
-                    return len(cache)
-
-                self.get = types.MethodType(get, self)
-                self.set = types.MethodType(set, self)
-                self.clear = types.MethodType(clear, self)
-                self.__len__ = types.MethodType(cache_len, self)
-
-    # argument cache for optimizing repeated calls when backtracking through recursive expressions
-    packrat_cache = {} # this is set later by enabledPackrat(); this is here so that resetCache() doesn't fail
-    packrat_cache_lock = RLock()
-    packrat_cache_stats = [0, 0]
-
-    # this method gets repeatedly called during backtracking with the same arguments -
-    # we can cache these arguments and save ourselves the trouble of re-parsing the contained expression
-    def _parseCache(self, instring, loc, doActions=True, callPreParse=True):
-        HIT, MISS = 0, 1
-        lookup = (self, instring, loc, callPreParse, doActions)
-        with ParserElement.packrat_cache_lock:
-            cache = ParserElement.packrat_cache
-            value = cache.get(lookup)
-            if value is cache.not_in_cache:
-                ParserElement.packrat_cache_stats[MISS] += 1
-                try:
-                    value = self._parseNoCache(instring, loc, doActions, callPreParse)
-                except ParseBaseException as pe:
-                    # cache a copy of the exception, without the traceback
-                    cache.set(lookup, pe.__class__(*pe.args))
-                    raise
-                else:
-                    cache.set(lookup, (value[0], value[1].copy()))
-                    return value
-            else:
-                ParserElement.packrat_cache_stats[HIT] += 1
-                if isinstance(value, Exception):
-                    raise value
-                return value[0], value[1].copy()
-
-    _parse = _parseNoCache
-
-    @staticmethod
-    def resetCache():
-        ParserElement.packrat_cache.clear()
-        ParserElement.packrat_cache_stats[:] = [0] * len(ParserElement.packrat_cache_stats)
-
-    _packratEnabled = False
-    @staticmethod
-    def enablePackrat(cache_size_limit=128):
-        """Enables "packrat" parsing, which adds memoizing to the parsing logic.
-           Repeated parse attempts at the same string location (which happens
-           often in many complex grammars) can immediately return a cached value,
-           instead of re-executing parsing/validating code.  Memoizing is done of
-           both valid results and parsing exceptions.
-
-           Parameters:
-
-           - cache_size_limit - (default= ``128``) - if an integer value is provided
-             will limit the size of the packrat cache; if None is passed, then
-             the cache size will be unbounded; if 0 is passed, the cache will
-             be effectively disabled.
-
-           This speedup may break existing programs that use parse actions that
-           have side-effects.  For this reason, packrat parsing is disabled when
-           you first import pyparsing.  To activate the packrat feature, your
-           program must call the class method :class:`ParserElement.enablePackrat`.
-           For best results, call ``enablePackrat()`` immediately after
-           importing pyparsing.
-
-           Example::
-
-               import pyparsing
-               pyparsing.ParserElement.enablePackrat()
-        """
-        if not ParserElement._packratEnabled:
-            ParserElement._packratEnabled = True
-            if cache_size_limit is None:
-                ParserElement.packrat_cache = ParserElement._UnboundedCache()
-            else:
-                ParserElement.packrat_cache = ParserElement._FifoCache(cache_size_limit)
-            ParserElement._parse = ParserElement._parseCache
-
-    def parseString(self, instring, parseAll=False):
-        """
-        Execute the parse expression with the given string.
-        This is the main interface to the client code, once the complete
-        expression has been built.
-
-        Returns the parsed data as a :class:`ParseResults` object, which may be
-        accessed as a list, or as a dict or object with attributes if the given parser
-        includes results names.
-
-        If you want the grammar to require that the entire input string be
-        successfully parsed, then set ``parseAll`` to True (equivalent to ending
-        the grammar with ``StringEnd()``).
-
-        Note: ``parseString`` implicitly calls ``expandtabs()`` on the input string,
-        in order to report proper column numbers in parse actions.
-        If the input string contains tabs and
-        the grammar uses parse actions that use the ``loc`` argument to index into the
-        string being parsed, you can ensure you have a consistent view of the input
-        string by:
-
-        - calling ``parseWithTabs`` on your grammar before calling ``parseString``
-          (see :class:`parseWithTabs`)
-        - define your parse action using the full ``(s, loc, toks)`` signature, and
-          reference the input string using the parse action's ``s`` argument
-        - explictly expand the tabs in your input string before calling
-          ``parseString``
-
-        Example::
-
-            Word('a').parseString('aaaaabaaa')  # -> ['aaaaa']
-            Word('a').parseString('aaaaabaaa', parseAll=True)  # -> Exception: Expected end of text
-        """
-        ParserElement.resetCache()
-        if not self.streamlined:
-            self.streamline()
-            # ~ self.saveAsList = True
-        for e in self.ignoreExprs:
-            e.streamline()
-        if not self.keepTabs:
-            instring = instring.expandtabs()
-        try:
-            loc, tokens = self._parse(instring, 0)
-            if parseAll:
-                loc = self.preParse(instring, loc)
-                se = Empty() + StringEnd()
-                se._parse(instring, loc)
-        except ParseBaseException as exc:
-            if ParserElement.verbose_stacktrace:
-                raise
-            else:
-                # catch and re-raise exception from here, clearing out pyparsing internal stack trace
-                if getattr(exc, '__traceback__', None) is not None:
-                    exc.__traceback__ = self._trim_traceback(exc.__traceback__)
-                raise exc
-        else:
-            return tokens
-
-    def scanString(self, instring, maxMatches=_MAX_INT, overlap=False):
-        """
-        Scan the input string for expression matches.  Each match will return the
-        matching tokens, start location, and end location.  May be called with optional
-        ``maxMatches`` argument, to clip scanning after 'n' matches are found.  If
-        ``overlap`` is specified, then overlapping matches will be reported.
-
-        Note that the start and end locations are reported relative to the string
-        being parsed.  See :class:`parseString` for more information on parsing
-        strings with embedded tabs.
-
-        Example::
-
-            source = "sldjf123lsdjjkf345sldkjf879lkjsfd987"
-            print(source)
-            for tokens, start, end in Word(alphas).scanString(source):
-                print(' '*start + '^'*(end-start))
-                print(' '*start + tokens[0])
-
-        prints::
-
-            sldjf123lsdjjkf345sldkjf879lkjsfd987
-            ^^^^^
-            sldjf
-                    ^^^^^^^
-                    lsdjjkf
-                              ^^^^^^
-                              sldkjf
-                                       ^^^^^^
-                                       lkjsfd
-        """
-        if not self.streamlined:
-            self.streamline()
-        for e in self.ignoreExprs:
-            e.streamline()
-
-        if not self.keepTabs:
-            instring = _ustr(instring).expandtabs()
-        instrlen = len(instring)
-        loc = 0
-        preparseFn = self.preParse
-        parseFn = self._parse
-        ParserElement.resetCache()
-        matches = 0
-        try:
-            while loc <= instrlen and matches < maxMatches:
-                try:
-                    preloc = preparseFn(instring, loc)
-                    nextLoc, tokens = parseFn(instring, preloc, callPreParse=False)
-                except ParseException:
-                    loc = preloc + 1
-                else:
-                    if nextLoc > loc:
-                        matches += 1
-                        yield tokens, preloc, nextLoc
-                        if overlap:
-                            nextloc = preparseFn(instring, loc)
-                            if nextloc > loc:
-                                loc = nextLoc
-                            else:
-                                loc += 1
-                        else:
-                            loc = nextLoc
-                    else:
-                        loc = preloc + 1
-        except ParseBaseException as exc:
-            if ParserElement.verbose_stacktrace:
-                raise
-            else:
-                # catch and re-raise exception from here, clearing out pyparsing internal stack trace
-                if getattr(exc, '__traceback__', None) is not None:
-                    exc.__traceback__ = self._trim_traceback(exc.__traceback__)
-                raise exc
-
-    def transformString(self, instring):
-        """
-        Extension to :class:`scanString`, to modify matching text with modified tokens that may
-        be returned from a parse action.  To use ``transformString``, define a grammar and
-        attach a parse action to it that modifies the returned token list.
-        Invoking ``transformString()`` on a target string will then scan for matches,
-        and replace the matched text patterns according to the logic in the parse
-        action.  ``transformString()`` returns the resulting transformed string.
-
-        Example::
-
-            wd = Word(alphas)
-            wd.setParseAction(lambda toks: toks[0].title())
-
-            print(wd.transformString("now is the winter of our discontent made glorious summer by this sun of york."))
-
-        prints::
-
-            Now Is The Winter Of Our Discontent Made Glorious Summer By This Sun Of York.
-        """
-        out = []
-        lastE = 0
-        # force preservation of s, to minimize unwanted transformation of string, and to
-        # keep string locs straight between transformString and scanString
-        self.keepTabs = True
-        try:
-            for t, s, e in self.scanString(instring):
-                out.append(instring[lastE:s])
-                if t:
-                    if isinstance(t, ParseResults):
-                        out += t.asList()
-                    elif isinstance(t, list):
-                        out += t
-                    else:
-                        out.append(t)
-                lastE = e
-            out.append(instring[lastE:])
-            out = [o for o in out if o]
-            return "".join(map(_ustr, _flatten(out)))
-        except ParseBaseException as exc:
-            if ParserElement.verbose_stacktrace:
-                raise
-            else:
-                # catch and re-raise exception from here, clearing out pyparsing internal stack trace
-                if getattr(exc, '__traceback__', None) is not None:
-                    exc.__traceback__ = self._trim_traceback(exc.__traceback__)
-                raise exc
-
-    def searchString(self, instring, maxMatches=_MAX_INT):
-        """
-        Another extension to :class:`scanString`, simplifying the access to the tokens found
-        to match the given parse expression.  May be called with optional
-        ``maxMatches`` argument, to clip searching after 'n' matches are found.
-
-        Example::
-
-            # a capitalized word starts with an uppercase letter, followed by zero or more lowercase letters
-            cap_word = Word(alphas.upper(), alphas.lower())
-
-            print(cap_word.searchString("More than Iron, more than Lead, more than Gold I need Electricity"))
-
-            # the sum() builtin can be used to merge results into a single ParseResults object
-            print(sum(cap_word.searchString("More than Iron, more than Lead, more than Gold I need Electricity")))
-
-        prints::
-
-            [['More'], ['Iron'], ['Lead'], ['Gold'], ['I'], ['Electricity']]
-            ['More', 'Iron', 'Lead', 'Gold', 'I', 'Electricity']
-        """
-        try:
-            return ParseResults([t for t, s, e in self.scanString(instring, maxMatches)])
-        except ParseBaseException as exc:
-            if ParserElement.verbose_stacktrace:
-                raise
-            else:
-                # catch and re-raise exception from here, clearing out pyparsing internal stack trace
-                if getattr(exc, '__traceback__', None) is not None:
-                    exc.__traceback__ = self._trim_traceback(exc.__traceback__)
-                raise exc
-
-    def split(self, instring, maxsplit=_MAX_INT, includeSeparators=False):
-        """
-        Generator method to split a string using the given expression as a separator.
-        May be called with optional ``maxsplit`` argument, to limit the number of splits;
-        and the optional ``includeSeparators`` argument (default= ``False``), if the separating
-        matching text should be included in the split results.
-
-        Example::
-
-            punc = oneOf(list(".,;:/-!?"))
-            print(list(punc.split("This, this?, this sentence, is badly punctuated!")))
-
-        prints::
-
-            ['This', ' this', '', ' this sentence', ' is badly punctuated', '']
-        """
-        splits = 0
-        last = 0
-        for t, s, e in self.scanString(instring, maxMatches=maxsplit):
-            yield instring[last:s]
-            if includeSeparators:
-                yield t[0]
-            last = e
-        yield instring[last:]
-
-    def __add__(self, other):
-        """
-        Implementation of + operator - returns :class:`And`. Adding strings to a ParserElement
-        converts them to :class:`Literal`s by default.
-
-        Example::
-
-            greet = Word(alphas) + "," + Word(alphas) + "!"
-            hello = "Hello, World!"
-            print (hello, "->", greet.parseString(hello))
-
-        prints::
-
-            Hello, World! -> ['Hello', ',', 'World', '!']
-
-        ``...`` may be used as a parse expression as a short form of :class:`SkipTo`.
-
-            Literal('start') + ... + Literal('end')
-
-        is equivalent to:
-
-            Literal('start') + SkipTo('end')("_skipped*") + Literal('end')
-
-        Note that the skipped text is returned with '_skipped' as a results name,
-        and to support having multiple skips in the same parser, the value returned is
-        a list of all skipped text.
-        """
-        if other is Ellipsis:
-            return _PendingSkip(self)
-
-        if isinstance(other, basestring):
-            other = self._literalStringClass(other)
-        if not isinstance(other, ParserElement):
-            warnings.warn("Cannot combine element of type %s with ParserElement" % type(other),
-                          SyntaxWarning, stacklevel=2)
-            return None
-        return And([self, other])
-
-    def __radd__(self, other):
-        """
-        Implementation of + operator when left operand is not a :class:`ParserElement`
-        """
-        if other is Ellipsis:
-            return SkipTo(self)("_skipped*") + self
-
-        if isinstance(other, basestring):
-            other = self._literalStringClass(other)
-        if not isinstance(other, ParserElement):
-            warnings.warn("Cannot combine element of type %s with ParserElement" % type(other),
-                          SyntaxWarning, stacklevel=2)
-            return None
-        return other + self
-
-    def __sub__(self, other):
-        """
-        Implementation of - operator, returns :class:`And` with error stop
-        """
-        if isinstance(other, basestring):
-            other = self._literalStringClass(other)
-        if not isinstance(other, ParserElement):
-            warnings.warn("Cannot combine element of type %s with ParserElement" % type(other),
-                          SyntaxWarning, stacklevel=2)
-            return None
-        return self + And._ErrorStop() + other
-
-    def __rsub__(self, other):
-        """
-        Implementation of - operator when left operand is not a :class:`ParserElement`
-        """
-        if isinstance(other, basestring):
-            other = self._literalStringClass(other)
-        if not isinstance(other, ParserElement):
-            warnings.warn("Cannot combine element of type %s with ParserElement" % type(other),
-                          SyntaxWarning, stacklevel=2)
-            return None
-        return other - self
-
-    def __mul__(self, other):
-        """
-        Implementation of * operator, allows use of ``expr * 3`` in place of
-        ``expr + expr + expr``.  Expressions may also me multiplied by a 2-integer
-        tuple, similar to ``{min, max}`` multipliers in regular expressions.  Tuples
-        may also include ``None`` as in:
-         - ``expr*(n, None)`` or ``expr*(n, )`` is equivalent
-              to ``expr*n + ZeroOrMore(expr)``
-              (read as "at least n instances of ``expr``")
-         - ``expr*(None, n)`` is equivalent to ``expr*(0, n)``
-              (read as "0 to n instances of ``expr``")
-         - ``expr*(None, None)`` is equivalent to ``ZeroOrMore(expr)``
-         - ``expr*(1, None)`` is equivalent to ``OneOrMore(expr)``
-
-        Note that ``expr*(None, n)`` does not raise an exception if
-        more than n exprs exist in the input stream; that is,
-        ``expr*(None, n)`` does not enforce a maximum number of expr
-        occurrences.  If this behavior is desired, then write
-        ``expr*(None, n) + ~expr``
-        """
-        if other is Ellipsis:
-            other = (0, None)
-        elif isinstance(other, tuple) and other[:1] == (Ellipsis,):
-            other = ((0, ) + other[1:] + (None,))[:2]
-
-        if isinstance(other, int):
-            minElements, optElements = other, 0
-        elif isinstance(other, tuple):
-            other = tuple(o if o is not Ellipsis else None for o in other)
-            other = (other + (None, None))[:2]
-            if other[0] is None:
-                other = (0, other[1])
-            if isinstance(other[0], int) and other[1] is None:
-                if other[0] == 0:
-                    return ZeroOrMore(self)
-                if other[0] == 1:
-                    return OneOrMore(self)
-                else:
-                    return self * other[0] + ZeroOrMore(self)
-            elif isinstance(other[0], int) and isinstance(other[1], int):
-                minElements, optElements = other
-                optElements -= minElements
-            else:
-                raise TypeError("cannot multiply 'ParserElement' and ('%s', '%s') objects", type(other[0]), type(other[1]))
-        else:
-            raise TypeError("cannot multiply 'ParserElement' and '%s' objects", type(other))
-
-        if minElements < 0:
-            raise ValueError("cannot multiply ParserElement by negative value")
-        if optElements < 0:
-            raise ValueError("second tuple value must be greater or equal to first tuple value")
-        if minElements == optElements == 0:
-            raise ValueError("cannot multiply ParserElement by 0 or (0, 0)")
-
-        if optElements:
-            def makeOptionalList(n):
-                if n > 1:
-                    return Optional(self + makeOptionalList(n - 1))
-                else:
-                    return Optional(self)
-            if minElements:
-                if minElements == 1:
-                    ret = self + makeOptionalList(optElements)
-                else:
-                    ret = And([self] * minElements) + makeOptionalList(optElements)
-            else:
-                ret = makeOptionalList(optElements)
-        else:
-            if minElements == 1:
-                ret = self
-            else:
-                ret = And([self] * minElements)
-        return ret
-
-    def __rmul__(self, other):
-        return self.__mul__(other)
-
-    def __or__(self, other):
-        """
-        Implementation of | operator - returns :class:`MatchFirst`
-        """
-        if other is Ellipsis:
-            return _PendingSkip(self, must_skip=True)
-
-        if isinstance(other, basestring):
-            other = self._literalStringClass(other)
-        if not isinstance(other, ParserElement):
-            warnings.warn("Cannot combine element of type %s with ParserElement" % type(other),
-                          SyntaxWarning, stacklevel=2)
-            return None
-        return MatchFirst([self, other])
-
-    def __ror__(self, other):
-        """
-        Implementation of | operator when left operand is not a :class:`ParserElement`
-        """
-        if isinstance(other, basestring):
-            other = self._literalStringClass(other)
-        if not isinstance(other, ParserElement):
-            warnings.warn("Cannot combine element of type %s with ParserElement" % type(other),
-                          SyntaxWarning, stacklevel=2)
-            return None
-        return other | self
-
-    def __xor__(self, other):
-        """
-        Implementation of ^ operator - returns :class:`Or`
-        """
-        if isinstance(other, basestring):
-            other = self._literalStringClass(other)
-        if not isinstance(other, ParserElement):
-            warnings.warn("Cannot combine element of type %s with ParserElement" % type(other),
-                          SyntaxWarning, stacklevel=2)
-            return None
-        return Or([self, other])
-
-    def __rxor__(self, other):
-        """
-        Implementation of ^ operator when left operand is not a :class:`ParserElement`
-        """
-        if isinstance(other, basestring):
-            other = self._literalStringClass(other)
-        if not isinstance(other, ParserElement):
-            warnings.warn("Cannot combine element of type %s with ParserElement" % type(other),
-                          SyntaxWarning, stacklevel=2)
-            return None
-        return other ^ self
-
-    def __and__(self, other):
-        """
-        Implementation of & operator - returns :class:`Each`
-        """
-        if isinstance(other, basestring):
-            other = self._literalStringClass(other)
-        if not isinstance(other, ParserElement):
-            warnings.warn("Cannot combine element of type %s with ParserElement" % type(other),
-                          SyntaxWarning, stacklevel=2)
-            return None
-        return Each([self, other])
-
-    def __rand__(self, other):
-        """
-        Implementation of & operator when left operand is not a :class:`ParserElement`
-        """
-        if isinstance(other, basestring):
-            other = self._literalStringClass(other)
-        if not isinstance(other, ParserElement):
-            warnings.warn("Cannot combine element of type %s with ParserElement" % type(other),
-                          SyntaxWarning, stacklevel=2)
-            return None
-        return other & self
-
-    def __invert__(self):
-        """
-        Implementation of ~ operator - returns :class:`NotAny`
-        """
-        return NotAny(self)
-
-    def __iter__(self):
-        # must implement __iter__ to override legacy use of sequential access to __getitem__ to
-        # iterate over a sequence
-        raise TypeError('%r object is not iterable' % self.__class__.__name__)
-
-    def __getitem__(self, key):
-        """
-        use ``[]`` indexing notation as a short form for expression repetition:
-         - ``expr[n]`` is equivalent to ``expr*n``
-         - ``expr[m, n]`` is equivalent to ``expr*(m, n)``
-         - ``expr[n, ...]`` or ``expr[n,]`` is equivalent
-              to ``expr*n + ZeroOrMore(expr)``
-              (read as "at least n instances of ``expr``")
-         - ``expr[..., n]`` is equivalent to ``expr*(0, n)``
-              (read as "0 to n instances of ``expr``")
-         - ``expr[...]`` and ``expr[0, ...]`` are equivalent to ``ZeroOrMore(expr)``
-         - ``expr[1, ...]`` is equivalent to ``OneOrMore(expr)``
-         ``None`` may be used in place of ``...``.
-
-        Note that ``expr[..., n]`` and ``expr[m, n]``do not raise an exception
-        if more than ``n`` ``expr``s exist in the input stream.  If this behavior is
-        desired, then write ``expr[..., n] + ~expr``.
-       """
-
-        # convert single arg keys to tuples
-        try:
-            if isinstance(key, str):
-                key = (key,)
-            iter(key)
-        except TypeError:
-            key = (key, key)
-
-        if len(key) > 2:
-            warnings.warn("only 1 or 2 index arguments supported ({0}{1})".format(key[:5],
-                                                                                '... [{0}]'.format(len(key))
-                                                                                if len(key) > 5 else ''))
-
-        # clip to 2 elements
-        ret = self * tuple(key[:2])
-        return ret
-
-    def __call__(self, name=None):
-        """
-        Shortcut for :class:`setResultsName`, with ``listAllMatches=False``.
-
-        If ``name`` is given with a trailing ``'*'`` character, then ``listAllMatches`` will be
-        passed as ``True``.
-
-        If ``name` is omitted, same as calling :class:`copy`.
-
-        Example::
-
-            # these are equivalent
-            userdata = Word(alphas).setResultsName("name") + Word(nums + "-").setResultsName("socsecno")
-            userdata = Word(alphas)("name") + Word(nums + "-")("socsecno")
-        """
-        if name is not None:
-            return self._setResultsName(name)
-        else:
-            return self.copy()
-
-    def suppress(self):
-        """
-        Suppresses the output of this :class:`ParserElement`; useful to keep punctuation from
-        cluttering up returned output.
-        """
-        return Suppress(self)
-
-    def leaveWhitespace(self):
-        """
-        Disables the skipping of whitespace before matching the characters in the
-        :class:`ParserElement`'s defined pattern.  This is normally only used internally by
-        the pyparsing module, but may be needed in some whitespace-sensitive grammars.
-        """
-        self.skipWhitespace = False
-        return self
-
-    def setWhitespaceChars(self, chars):
-        """
-        Overrides the default whitespace chars
-        """
-        self.skipWhitespace = True
-        self.whiteChars = chars
-        self.copyDefaultWhiteChars = False
-        return self
-
-    def parseWithTabs(self):
-        """
-        Overrides default behavior to expand ````s to spaces before parsing the input string.
-        Must be called before ``parseString`` when the input grammar contains elements that
-        match ```` characters.
-        """
-        self.keepTabs = True
-        return self
-
-    def ignore(self, other):
-        """
-        Define expression to be ignored (e.g., comments) while doing pattern
-        matching; may be called repeatedly, to define multiple comment or other
-        ignorable patterns.
-
-        Example::
-
-            patt = OneOrMore(Word(alphas))
-            patt.parseString('ablaj /* comment */ lskjd') # -> ['ablaj']
-
-            patt.ignore(cStyleComment)
-            patt.parseString('ablaj /* comment */ lskjd') # -> ['ablaj', 'lskjd']
-        """
-        if isinstance(other, basestring):
-            other = Suppress(other)
-
-        if isinstance(other, Suppress):
-            if other not in self.ignoreExprs:
-                self.ignoreExprs.append(other)
-        else:
-            self.ignoreExprs.append(Suppress(other.copy()))
-        return self
-
-    def setDebugActions(self, startAction, successAction, exceptionAction):
-        """
-        Enable display of debugging messages while doing pattern matching.
-        """
-        self.debugActions = (startAction or _defaultStartDebugAction,
-                             successAction or _defaultSuccessDebugAction,
-                             exceptionAction or _defaultExceptionDebugAction)
-        self.debug = True
-        return self
-
-    def setDebug(self, flag=True):
-        """
-        Enable display of debugging messages while doing pattern matching.
-        Set ``flag`` to True to enable, False to disable.
-
-        Example::
-
-            wd = Word(alphas).setName("alphaword")
-            integer = Word(nums).setName("numword")
-            term = wd | integer
-
-            # turn on debugging for wd
-            wd.setDebug()
-
-            OneOrMore(term).parseString("abc 123 xyz 890")
-
-        prints::
-
-            Match alphaword at loc 0(1,1)
-            Matched alphaword -> ['abc']
-            Match alphaword at loc 3(1,4)
-            Exception raised:Expected alphaword (at char 4), (line:1, col:5)
-            Match alphaword at loc 7(1,8)
-            Matched alphaword -> ['xyz']
-            Match alphaword at loc 11(1,12)
-            Exception raised:Expected alphaword (at char 12), (line:1, col:13)
-            Match alphaword at loc 15(1,16)
-            Exception raised:Expected alphaword (at char 15), (line:1, col:16)
-
-        The output shown is that produced by the default debug actions - custom debug actions can be
-        specified using :class:`setDebugActions`. Prior to attempting
-        to match the ``wd`` expression, the debugging message ``"Match  at loc (,)"``
-        is shown. Then if the parse succeeds, a ``"Matched"`` message is shown, or an ``"Exception raised"``
-        message is shown. Also note the use of :class:`setName` to assign a human-readable name to the expression,
-        which makes debugging and exception messages easier to understand - for instance, the default
-        name created for the :class:`Word` expression without calling ``setName`` is ``"W:(ABCD...)"``.
-        """
-        if flag:
-            self.setDebugActions(_defaultStartDebugAction, _defaultSuccessDebugAction, _defaultExceptionDebugAction)
-        else:
-            self.debug = False
-        return self
-
-    def __str__(self):
-        return self.name
-
-    def __repr__(self):
-        return _ustr(self)
-
-    def streamline(self):
-        self.streamlined = True
-        self.strRepr = None
-        return self
-
-    def checkRecursion(self, parseElementList):
-        pass
-
-    def validate(self, validateTrace=None):
-        """
-        Check defined expressions for valid structure, check for infinite recursive definitions.
-        """
-        self.checkRecursion([])
-
-    def parseFile(self, file_or_filename, parseAll=False):
-        """
-        Execute the parse expression on the given file or filename.
-        If a filename is specified (instead of a file object),
-        the entire file is opened, read, and closed before parsing.
-        """
-        try:
-            file_contents = file_or_filename.read()
-        except AttributeError:
-            with open(file_or_filename, "r") as f:
-                file_contents = f.read()
-        try:
-            return self.parseString(file_contents, parseAll)
-        except ParseBaseException as exc:
-            if ParserElement.verbose_stacktrace:
-                raise
-            else:
-                # catch and re-raise exception from here, clearing out pyparsing internal stack trace
-                if getattr(exc, '__traceback__', None) is not None:
-                    exc.__traceback__ = self._trim_traceback(exc.__traceback__)
-                raise exc
-
-    def __eq__(self, other):
-        if self is other:
-            return True
-        elif isinstance(other, basestring):
-            return self.matches(other)
-        elif isinstance(other, ParserElement):
-            return vars(self) == vars(other)
-        return False
-
-    def __ne__(self, other):
-        return not (self == other)
-
-    def __hash__(self):
-        return id(self)
-
-    def __req__(self, other):
-        return self == other
-
-    def __rne__(self, other):
-        return not (self == other)
-
-    def matches(self, testString, parseAll=True):
-        """
-        Method for quick testing of a parser against a test string. Good for simple
-        inline microtests of sub expressions while building up larger parser.
-
-        Parameters:
-         - testString - to test against this expression for a match
-         - parseAll - (default= ``True``) - flag to pass to :class:`parseString` when running tests
-
-        Example::
-
-            expr = Word(nums)
-            assert expr.matches("100")
-        """
-        try:
-            self.parseString(_ustr(testString), parseAll=parseAll)
-            return True
-        except ParseBaseException:
-            return False
-
-    def runTests(self, tests, parseAll=True, comment='#',
-                 fullDump=True, printResults=True, failureTests=False, postParse=None,
-                 file=None):
-        """
-        Execute the parse expression on a series of test strings, showing each
-        test, the parsed results or where the parse failed. Quick and easy way to
-        run a parse expression against a list of sample strings.
-
-        Parameters:
-         - tests - a list of separate test strings, or a multiline string of test strings
-         - parseAll - (default= ``True``) - flag to pass to :class:`parseString` when running tests
-         - comment - (default= ``'#'``) - expression for indicating embedded comments in the test
-              string; pass None to disable comment filtering
-         - fullDump - (default= ``True``) - dump results as list followed by results names in nested outline;
-              if False, only dump nested list
-         - printResults - (default= ``True``) prints test output to stdout
-         - failureTests - (default= ``False``) indicates if these tests are expected to fail parsing
-         - postParse - (default= ``None``) optional callback for successful parse results; called as
-              `fn(test_string, parse_results)` and returns a string to be added to the test output
-         - file - (default=``None``) optional file-like object to which test output will be written;
-              if None, will default to ``sys.stdout``
-
-        Returns: a (success, results) tuple, where success indicates that all tests succeeded
-        (or failed if ``failureTests`` is True), and the results contain a list of lines of each
-        test's output
-
-        Example::
-
-            number_expr = pyparsing_common.number.copy()
-
-            result = number_expr.runTests('''
-                # unsigned integer
-                100
-                # negative integer
-                -100
-                # float with scientific notation
-                6.02e23
-                # integer with scientific notation
-                1e-12
-                ''')
-            print("Success" if result[0] else "Failed!")
-
-            result = number_expr.runTests('''
-                # stray character
-                100Z
-                # missing leading digit before '.'
-                -.100
-                # too many '.'
-                3.14.159
-                ''', failureTests=True)
-            print("Success" if result[0] else "Failed!")
-
-        prints::
-
-            # unsigned integer
-            100
-            [100]
-
-            # negative integer
-            -100
-            [-100]
-
-            # float with scientific notation
-            6.02e23
-            [6.02e+23]
-
-            # integer with scientific notation
-            1e-12
-            [1e-12]
-
-            Success
-
-            # stray character
-            100Z
-               ^
-            FAIL: Expected end of text (at char 3), (line:1, col:4)
-
-            # missing leading digit before '.'
-            -.100
-            ^
-            FAIL: Expected {real number with scientific notation | real number | signed integer} (at char 0), (line:1, col:1)
-
-            # too many '.'
-            3.14.159
-                ^
-            FAIL: Expected end of text (at char 4), (line:1, col:5)
-
-            Success
-
-        Each test string must be on a single line. If you want to test a string that spans multiple
-        lines, create a test like this::
-
-            expr.runTest(r"this is a test\\n of strings that spans \\n 3 lines")
-
-        (Note that this is a raw string literal, you must include the leading 'r'.)
-        """
-        if isinstance(tests, basestring):
-            tests = list(map(str.strip, tests.rstrip().splitlines()))
-        if isinstance(comment, basestring):
-            comment = Literal(comment)
-        if file is None:
-            file = sys.stdout
-        print_ = file.write
-
-        allResults = []
-        comments = []
-        success = True
-        NL = Literal(r'\n').addParseAction(replaceWith('\n')).ignore(quotedString)
-        BOM = u'\ufeff'
-        for t in tests:
-            if comment is not None and comment.matches(t, False) or comments and not t:
-                comments.append(t)
-                continue
-            if not t:
-                continue
-            out = ['\n' + '\n'.join(comments) if comments else '', t]
-            comments = []
-            try:
-                # convert newline marks to actual newlines, and strip leading BOM if present
-                t = NL.transformString(t.lstrip(BOM))
-                result = self.parseString(t, parseAll=parseAll)
-            except ParseBaseException as pe:
-                fatal = "(FATAL)" if isinstance(pe, ParseFatalException) else ""
-                if '\n' in t:
-                    out.append(line(pe.loc, t))
-                    out.append(' ' * (col(pe.loc, t) - 1) + '^' + fatal)
-                else:
-                    out.append(' ' * pe.loc + '^' + fatal)
-                out.append("FAIL: " + str(pe))
-                success = success and failureTests
-                result = pe
-            except Exception as exc:
-                out.append("FAIL-EXCEPTION: " + str(exc))
-                success = success and failureTests
-                result = exc
-            else:
-                success = success and not failureTests
-                if postParse is not None:
-                    try:
-                        pp_value = postParse(t, result)
-                        if pp_value is not None:
-                            if isinstance(pp_value, ParseResults):
-                                out.append(pp_value.dump())
-                            else:
-                                out.append(str(pp_value))
-                        else:
-                            out.append(result.dump())
-                    except Exception as e:
-                        out.append(result.dump(full=fullDump))
-                        out.append("{0} failed: {1}: {2}".format(postParse.__name__, type(e).__name__, e))
-                else:
-                    out.append(result.dump(full=fullDump))
-
-            if printResults:
-                if fullDump:
-                    out.append('')
-                print_('\n'.join(out))
-
-            allResults.append((t, result))
-
-        return success, allResults
-
-
-class _PendingSkip(ParserElement):
-    # internal placeholder class to hold a place were '...' is added to a parser element,
-    # once another ParserElement is added, this placeholder will be replaced with a SkipTo
-    def __init__(self, expr, must_skip=False):
-        super(_PendingSkip, self).__init__()
-        self.strRepr = str(expr + Empty()).replace('Empty', '...')
-        self.name = self.strRepr
-        self.anchor = expr
-        self.must_skip = must_skip
-
-    def __add__(self, other):
-        skipper = SkipTo(other).setName("...")("_skipped*")
-        if self.must_skip:
-            def must_skip(t):
-                if not t._skipped or t._skipped.asList() == ['']:
-                    del t[0]
-                    t.pop("_skipped", None)
-            def show_skip(t):
-                if t._skipped.asList()[-1:] == ['']:
-                    skipped = t.pop('_skipped')
-                    t['_skipped'] = 'missing <' + repr(self.anchor) + '>'
-            return (self.anchor + skipper().addParseAction(must_skip)
-                    | skipper().addParseAction(show_skip)) + other
-
-        return self.anchor + skipper + other
-
-    def __repr__(self):
-        return self.strRepr
-
-    def parseImpl(self, *args):
-        raise Exception("use of `...` expression without following SkipTo target expression")
-
-
-class Token(ParserElement):
-    """Abstract :class:`ParserElement` subclass, for defining atomic
-    matching patterns.
-    """
-    def __init__(self):
-        super(Token, self).__init__(savelist=False)
-
-
-class Empty(Token):
-    """An empty token, will always match.
-    """
-    def __init__(self):
-        super(Empty, self).__init__()
-        self.name = "Empty"
-        self.mayReturnEmpty = True
-        self.mayIndexError = False
-
-
-class NoMatch(Token):
-    """A token that will never match.
-    """
-    def __init__(self):
-        super(NoMatch, self).__init__()
-        self.name = "NoMatch"
-        self.mayReturnEmpty = True
-        self.mayIndexError = False
-        self.errmsg = "Unmatchable token"
-
-    def parseImpl(self, instring, loc, doActions=True):
-        raise ParseException(instring, loc, self.errmsg, self)
-
-
-class Literal(Token):
-    """Token to exactly match a specified string.
-
-    Example::
-
-        Literal('blah').parseString('blah')  # -> ['blah']
-        Literal('blah').parseString('blahfooblah')  # -> ['blah']
-        Literal('blah').parseString('bla')  # -> Exception: Expected "blah"
-
-    For case-insensitive matching, use :class:`CaselessLiteral`.
-
-    For keyword matching (force word break before and after the matched string),
-    use :class:`Keyword` or :class:`CaselessKeyword`.
-    """
-    def __init__(self, matchString):
-        super(Literal, self).__init__()
-        self.match = matchString
-        self.matchLen = len(matchString)
-        try:
-            self.firstMatchChar = matchString[0]
-        except IndexError:
-            warnings.warn("null string passed to Literal; use Empty() instead",
-                            SyntaxWarning, stacklevel=2)
-            self.__class__ = Empty
-        self.name = '"%s"' % _ustr(self.match)
-        self.errmsg = "Expected " + self.name
-        self.mayReturnEmpty = False
-        self.mayIndexError = False
-
-        # Performance tuning: modify __class__ to select
-        # a parseImpl optimized for single-character check
-        if self.matchLen == 1 and type(self) is Literal:
-            self.__class__ = _SingleCharLiteral
-
-    def parseImpl(self, instring, loc, doActions=True):
-        if instring[loc] == self.firstMatchChar and instring.startswith(self.match, loc):
-            return loc + self.matchLen, self.match
-        raise ParseException(instring, loc, self.errmsg, self)
-
-class _SingleCharLiteral(Literal):
-    def parseImpl(self, instring, loc, doActions=True):
-        if instring[loc] == self.firstMatchChar:
-            return loc + 1, self.match
-        raise ParseException(instring, loc, self.errmsg, self)
-
-_L = Literal
-ParserElement._literalStringClass = Literal
-
-class Keyword(Token):
-    """Token to exactly match a specified string as a keyword, that is,
-    it must be immediately followed by a non-keyword character.  Compare
-    with :class:`Literal`:
-
-     - ``Literal("if")`` will match the leading ``'if'`` in
-       ``'ifAndOnlyIf'``.
-     - ``Keyword("if")`` will not; it will only match the leading
-       ``'if'`` in ``'if x=1'``, or ``'if(y==2)'``
-
-    Accepts two optional constructor arguments in addition to the
-    keyword string:
-
-     - ``identChars`` is a string of characters that would be valid
-       identifier characters, defaulting to all alphanumerics + "_" and
-       "$"
-     - ``caseless`` allows case-insensitive matching, default is ``False``.
-
-    Example::
-
-        Keyword("start").parseString("start")  # -> ['start']
-        Keyword("start").parseString("starting")  # -> Exception
-
-    For case-insensitive matching, use :class:`CaselessKeyword`.
-    """
-    DEFAULT_KEYWORD_CHARS = alphanums + "_$"
-
-    def __init__(self, matchString, identChars=None, caseless=False):
-        super(Keyword, self).__init__()
-        if identChars is None:
-            identChars = Keyword.DEFAULT_KEYWORD_CHARS
-        self.match = matchString
-        self.matchLen = len(matchString)
-        try:
-            self.firstMatchChar = matchString[0]
-        except IndexError:
-            warnings.warn("null string passed to Keyword; use Empty() instead",
-                          SyntaxWarning, stacklevel=2)
-        self.name = '"%s"' % self.match
-        self.errmsg = "Expected " + self.name
-        self.mayReturnEmpty = False
-        self.mayIndexError = False
-        self.caseless = caseless
-        if caseless:
-            self.caselessmatch = matchString.upper()
-            identChars = identChars.upper()
-        self.identChars = set(identChars)
-
-    def parseImpl(self, instring, loc, doActions=True):
-        if self.caseless:
-            if ((instring[loc:loc + self.matchLen].upper() == self.caselessmatch)
-                    and (loc >= len(instring) - self.matchLen
-                         or instring[loc + self.matchLen].upper() not in self.identChars)
-                    and (loc == 0
-                         or instring[loc - 1].upper() not in self.identChars)):
-                return loc + self.matchLen, self.match
-
-        else:
-            if instring[loc] == self.firstMatchChar:
-                if ((self.matchLen == 1 or instring.startswith(self.match, loc))
-                        and (loc >= len(instring) - self.matchLen
-                             or instring[loc + self.matchLen] not in self.identChars)
-                        and (loc == 0 or instring[loc - 1] not in self.identChars)):
-                    return loc + self.matchLen, self.match
-
-        raise ParseException(instring, loc, self.errmsg, self)
-
-    def copy(self):
-        c = super(Keyword, self).copy()
-        c.identChars = Keyword.DEFAULT_KEYWORD_CHARS
-        return c
-
-    @staticmethod
-    def setDefaultKeywordChars(chars):
-        """Overrides the default Keyword chars
-        """
-        Keyword.DEFAULT_KEYWORD_CHARS = chars
-
-class CaselessLiteral(Literal):
-    """Token to match a specified string, ignoring case of letters.
-    Note: the matched results will always be in the case of the given
-    match string, NOT the case of the input text.
-
-    Example::
-
-        OneOrMore(CaselessLiteral("CMD")).parseString("cmd CMD Cmd10") # -> ['CMD', 'CMD', 'CMD']
-
-    (Contrast with example for :class:`CaselessKeyword`.)
-    """
-    def __init__(self, matchString):
-        super(CaselessLiteral, self).__init__(matchString.upper())
-        # Preserve the defining literal.
-        self.returnString = matchString
-        self.name = "'%s'" % self.returnString
-        self.errmsg = "Expected " + self.name
-
-    def parseImpl(self, instring, loc, doActions=True):
-        if instring[loc:loc + self.matchLen].upper() == self.match:
-            return loc + self.matchLen, self.returnString
-        raise ParseException(instring, loc, self.errmsg, self)
-
-class CaselessKeyword(Keyword):
-    """
-    Caseless version of :class:`Keyword`.
-
-    Example::
-
-        OneOrMore(CaselessKeyword("CMD")).parseString("cmd CMD Cmd10") # -> ['CMD', 'CMD']
-
-    (Contrast with example for :class:`CaselessLiteral`.)
-    """
-    def __init__(self, matchString, identChars=None):
-        super(CaselessKeyword, self).__init__(matchString, identChars, caseless=True)
-
-class CloseMatch(Token):
-    """A variation on :class:`Literal` which matches "close" matches,
-    that is, strings with at most 'n' mismatching characters.
-    :class:`CloseMatch` takes parameters:
-
-     - ``match_string`` - string to be matched
-     - ``maxMismatches`` - (``default=1``) maximum number of
-       mismatches allowed to count as a match
-
-    The results from a successful parse will contain the matched text
-    from the input string and the following named results:
-
-     - ``mismatches`` - a list of the positions within the
-       match_string where mismatches were found
-     - ``original`` - the original match_string used to compare
-       against the input string
-
-    If ``mismatches`` is an empty list, then the match was an exact
-    match.
-
-    Example::
-
-        patt = CloseMatch("ATCATCGAATGGA")
-        patt.parseString("ATCATCGAAXGGA") # -> (['ATCATCGAAXGGA'], {'mismatches': [[9]], 'original': ['ATCATCGAATGGA']})
-        patt.parseString("ATCAXCGAAXGGA") # -> Exception: Expected 'ATCATCGAATGGA' (with up to 1 mismatches) (at char 0), (line:1, col:1)
-
-        # exact match
-        patt.parseString("ATCATCGAATGGA") # -> (['ATCATCGAATGGA'], {'mismatches': [[]], 'original': ['ATCATCGAATGGA']})
-
-        # close match allowing up to 2 mismatches
-        patt = CloseMatch("ATCATCGAATGGA", maxMismatches=2)
-        patt.parseString("ATCAXCGAAXGGA") # -> (['ATCAXCGAAXGGA'], {'mismatches': [[4, 9]], 'original': ['ATCATCGAATGGA']})
-    """
-    def __init__(self, match_string, maxMismatches=1):
-        super(CloseMatch, self).__init__()
-        self.name = match_string
-        self.match_string = match_string
-        self.maxMismatches = maxMismatches
-        self.errmsg = "Expected %r (with up to %d mismatches)" % (self.match_string, self.maxMismatches)
-        self.mayIndexError = False
-        self.mayReturnEmpty = False
-
-    def parseImpl(self, instring, loc, doActions=True):
-        start = loc
-        instrlen = len(instring)
-        maxloc = start + len(self.match_string)
-
-        if maxloc <= instrlen:
-            match_string = self.match_string
-            match_stringloc = 0
-            mismatches = []
-            maxMismatches = self.maxMismatches
-
-            for match_stringloc, s_m in enumerate(zip(instring[loc:maxloc], match_string)):
-                src, mat = s_m
-                if src != mat:
-                    mismatches.append(match_stringloc)
-                    if len(mismatches) > maxMismatches:
-                        break
-            else:
-                loc = match_stringloc + 1
-                results = ParseResults([instring[start:loc]])
-                results['original'] = match_string
-                results['mismatches'] = mismatches
-                return loc, results
-
-        raise ParseException(instring, loc, self.errmsg, self)
-
-
-class Word(Token):
-    """Token for matching words composed of allowed character sets.
-    Defined with string containing all allowed initial characters, an
-    optional string containing allowed body characters (if omitted,
-    defaults to the initial character set), and an optional minimum,
-    maximum, and/or exact length.  The default value for ``min`` is
-    1 (a minimum value < 1 is not valid); the default values for
-    ``max`` and ``exact`` are 0, meaning no maximum or exact
-    length restriction. An optional ``excludeChars`` parameter can
-    list characters that might be found in the input ``bodyChars``
-    string; useful to define a word of all printables except for one or
-    two characters, for instance.
-
-    :class:`srange` is useful for defining custom character set strings
-    for defining ``Word`` expressions, using range notation from
-    regular expression character sets.
-
-    A common mistake is to use :class:`Word` to match a specific literal
-    string, as in ``Word("Address")``. Remember that :class:`Word`
-    uses the string argument to define *sets* of matchable characters.
-    This expression would match "Add", "AAA", "dAred", or any other word
-    made up of the characters 'A', 'd', 'r', 'e', and 's'. To match an
-    exact literal string, use :class:`Literal` or :class:`Keyword`.
-
-    pyparsing includes helper strings for building Words:
-
-     - :class:`alphas`
-     - :class:`nums`
-     - :class:`alphanums`
-     - :class:`hexnums`
-     - :class:`alphas8bit` (alphabetic characters in ASCII range 128-255
-       - accented, tilded, umlauted, etc.)
-     - :class:`punc8bit` (non-alphabetic characters in ASCII range
-       128-255 - currency, symbols, superscripts, diacriticals, etc.)
-     - :class:`printables` (any non-whitespace character)
-
-    Example::
-
-        # a word composed of digits
-        integer = Word(nums) # equivalent to Word("0123456789") or Word(srange("0-9"))
-
-        # a word with a leading capital, and zero or more lowercase
-        capital_word = Word(alphas.upper(), alphas.lower())
-
-        # hostnames are alphanumeric, with leading alpha, and '-'
-        hostname = Word(alphas, alphanums + '-')
-
-        # roman numeral (not a strict parser, accepts invalid mix of characters)
-        roman = Word("IVXLCDM")
-
-        # any string of non-whitespace characters, except for ','
-        csv_value = Word(printables, excludeChars=",")
-    """
-    def __init__(self, initChars, bodyChars=None, min=1, max=0, exact=0, asKeyword=False, excludeChars=None):
-        super(Word, self).__init__()
-        if excludeChars:
-            excludeChars = set(excludeChars)
-            initChars = ''.join(c for c in initChars if c not in excludeChars)
-            if bodyChars:
-                bodyChars = ''.join(c for c in bodyChars if c not in excludeChars)
-        self.initCharsOrig = initChars
-        self.initChars = set(initChars)
-        if bodyChars:
-            self.bodyCharsOrig = bodyChars
-            self.bodyChars = set(bodyChars)
-        else:
-            self.bodyCharsOrig = initChars
-            self.bodyChars = set(initChars)
-
-        self.maxSpecified = max > 0
-
-        if min < 1:
-            raise ValueError("cannot specify a minimum length < 1; use Optional(Word()) if zero-length word is permitted")
-
-        self.minLen = min
-
-        if max > 0:
-            self.maxLen = max
-        else:
-            self.maxLen = _MAX_INT
-
-        if exact > 0:
-            self.maxLen = exact
-            self.minLen = exact
-
-        self.name = _ustr(self)
-        self.errmsg = "Expected " + self.name
-        self.mayIndexError = False
-        self.asKeyword = asKeyword
-
-        if ' ' not in self.initCharsOrig + self.bodyCharsOrig and (min == 1 and max == 0 and exact == 0):
-            if self.bodyCharsOrig == self.initCharsOrig:
-                self.reString = "[%s]+" % _escapeRegexRangeChars(self.initCharsOrig)
-            elif len(self.initCharsOrig) == 1:
-                self.reString = "%s[%s]*" % (re.escape(self.initCharsOrig),
-                                             _escapeRegexRangeChars(self.bodyCharsOrig),)
-            else:
-                self.reString = "[%s][%s]*" % (_escapeRegexRangeChars(self.initCharsOrig),
-                                               _escapeRegexRangeChars(self.bodyCharsOrig),)
-            if self.asKeyword:
-                self.reString = r"\b" + self.reString + r"\b"
-
-            try:
-                self.re = re.compile(self.reString)
-            except Exception:
-                self.re = None
-            else:
-                self.re_match = self.re.match
-                self.__class__ = _WordRegex
-
-    def parseImpl(self, instring, loc, doActions=True):
-        if instring[loc] not in self.initChars:
-            raise ParseException(instring, loc, self.errmsg, self)
-
-        start = loc
-        loc += 1
-        instrlen = len(instring)
-        bodychars = self.bodyChars
-        maxloc = start + self.maxLen
-        maxloc = min(maxloc, instrlen)
-        while loc < maxloc and instring[loc] in bodychars:
-            loc += 1
-
-        throwException = False
-        if loc - start < self.minLen:
-            throwException = True
-        elif self.maxSpecified and loc < instrlen and instring[loc] in bodychars:
-            throwException = True
-        elif self.asKeyword:
-            if (start > 0 and instring[start - 1] in bodychars
-                    or loc < instrlen and instring[loc] in bodychars):
-                throwException = True
-
-        if throwException:
-            raise ParseException(instring, loc, self.errmsg, self)
-
-        return loc, instring[start:loc]
-
-    def __str__(self):
-        try:
-            return super(Word, self).__str__()
-        except Exception:
-            pass
-
-        if self.strRepr is None:
-
-            def charsAsStr(s):
-                if len(s) > 4:
-                    return s[:4] + "..."
-                else:
-                    return s
-
-            if self.initCharsOrig != self.bodyCharsOrig:
-                self.strRepr = "W:(%s, %s)" % (charsAsStr(self.initCharsOrig), charsAsStr(self.bodyCharsOrig))
-            else:
-                self.strRepr = "W:(%s)" % charsAsStr(self.initCharsOrig)
-
-        return self.strRepr
-
-class _WordRegex(Word):
-    def parseImpl(self, instring, loc, doActions=True):
-        result = self.re_match(instring, loc)
-        if not result:
-            raise ParseException(instring, loc, self.errmsg, self)
-
-        loc = result.end()
-        return loc, result.group()
-
-
-class Char(_WordRegex):
-    """A short-cut class for defining ``Word(characters, exact=1)``,
-    when defining a match of any single character in a string of
-    characters.
-    """
-    def __init__(self, charset, asKeyword=False, excludeChars=None):
-        super(Char, self).__init__(charset, exact=1, asKeyword=asKeyword, excludeChars=excludeChars)
-        self.reString = "[%s]" % _escapeRegexRangeChars(''.join(self.initChars))
-        if asKeyword:
-            self.reString = r"\b%s\b" % self.reString
-        self.re = re.compile(self.reString)
-        self.re_match = self.re.match
-
-
-class Regex(Token):
-    r"""Token for matching strings that match a given regular
-    expression. Defined with string specifying the regular expression in
-    a form recognized by the stdlib Python  `re module `_.
-    If the given regex contains named groups (defined using ``(?P...)``),
-    these will be preserved as named parse results.
-
-    If instead of the Python stdlib re module you wish to use a different RE module
-    (such as the `regex` module), you can replace it by either building your
-    Regex object with a compiled RE that was compiled using regex:
-
-    Example::
-
-        realnum = Regex(r"[+-]?\d+\.\d*")
-        date = Regex(r'(?P\d{4})-(?P\d\d?)-(?P\d\d?)')
-        # ref: https://stackoverflow.com/questions/267399/how-do-you-match-only-valid-roman-numerals-with-a-regular-expression
-        roman = Regex(r"M{0,4}(CM|CD|D?{0,3})(XC|XL|L?X{0,3})(IX|IV|V?I{0,3})")
-
-        # use regex module instead of stdlib re module to construct a Regex using
-        # a compiled regular expression
-        import regex
-        parser = pp.Regex(regex.compile(r'[0-9]'))
-
-    """
-    def __init__(self, pattern, flags=0, asGroupList=False, asMatch=False):
-        """The parameters ``pattern`` and ``flags`` are passed
-        to the ``re.compile()`` function as-is. See the Python
-        `re module `_ module for an
-        explanation of the acceptable patterns and flags.
-        """
-        super(Regex, self).__init__()
-
-        if isinstance(pattern, basestring):
-            if not pattern:
-                warnings.warn("null string passed to Regex; use Empty() instead",
-                              SyntaxWarning, stacklevel=2)
-
-            self.pattern = pattern
-            self.flags = flags
-
-            try:
-                self.re = re.compile(self.pattern, self.flags)
-                self.reString = self.pattern
-            except sre_constants.error:
-                warnings.warn("invalid pattern (%s) passed to Regex" % pattern,
-                              SyntaxWarning, stacklevel=2)
-                raise
-
-        elif hasattr(pattern, 'pattern') and hasattr(pattern, 'match'):
-            self.re = pattern
-            self.pattern = self.reString = pattern.pattern
-            self.flags = flags
-
-        else:
-            raise TypeError("Regex may only be constructed with a string or a compiled RE object")
-
-        self.re_match = self.re.match
-
-        self.name = _ustr(self)
-        self.errmsg = "Expected " + self.name
-        self.mayIndexError = False
-        self.mayReturnEmpty = self.re_match("") is not None
-        self.asGroupList = asGroupList
-        self.asMatch = asMatch
-        if self.asGroupList:
-            self.parseImpl = self.parseImplAsGroupList
-        if self.asMatch:
-            self.parseImpl = self.parseImplAsMatch
-
-    def parseImpl(self, instring, loc, doActions=True):
-        result = self.re_match(instring, loc)
-        if not result:
-            raise ParseException(instring, loc, self.errmsg, self)
-
-        loc = result.end()
-        ret = ParseResults(result.group())
-        d = result.groupdict()
-        if d:
-            for k, v in d.items():
-                ret[k] = v
-        return loc, ret
-
-    def parseImplAsGroupList(self, instring, loc, doActions=True):
-        result = self.re_match(instring, loc)
-        if not result:
-            raise ParseException(instring, loc, self.errmsg, self)
-
-        loc = result.end()
-        ret = result.groups()
-        return loc, ret
-
-    def parseImplAsMatch(self, instring, loc, doActions=True):
-        result = self.re_match(instring, loc)
-        if not result:
-            raise ParseException(instring, loc, self.errmsg, self)
-
-        loc = result.end()
-        ret = result
-        return loc, ret
-
-    def __str__(self):
-        try:
-            return super(Regex, self).__str__()
-        except Exception:
-            pass
-
-        if self.strRepr is None:
-            self.strRepr = "Re:(%s)" % repr(self.pattern)
-
-        return self.strRepr
-
-    def sub(self, repl):
-        r"""
-        Return Regex with an attached parse action to transform the parsed
-        result as if called using `re.sub(expr, repl, string) `_.
-
-        Example::
-
-            make_html = Regex(r"(\w+):(.*?):").sub(r"<\1>\2")
-            print(make_html.transformString("h1:main title:"))
-            # prints "

main title

" - """ - if self.asGroupList: - warnings.warn("cannot use sub() with Regex(asGroupList=True)", - SyntaxWarning, stacklevel=2) - raise SyntaxError() - - if self.asMatch and callable(repl): - warnings.warn("cannot use sub() with a callable with Regex(asMatch=True)", - SyntaxWarning, stacklevel=2) - raise SyntaxError() - - if self.asMatch: - def pa(tokens): - return tokens[0].expand(repl) - else: - def pa(tokens): - return self.re.sub(repl, tokens[0]) - return self.addParseAction(pa) - -class QuotedString(Token): - r""" - Token for matching strings that are delimited by quoting characters. - - Defined with the following parameters: - - - quoteChar - string of one or more characters defining the - quote delimiting string - - escChar - character to escape quotes, typically backslash - (default= ``None``) - - escQuote - special quote sequence to escape an embedded quote - string (such as SQL's ``""`` to escape an embedded ``"``) - (default= ``None``) - - multiline - boolean indicating whether quotes can span - multiple lines (default= ``False``) - - unquoteResults - boolean indicating whether the matched text - should be unquoted (default= ``True``) - - endQuoteChar - string of one or more characters defining the - end of the quote delimited string (default= ``None`` => same as - quoteChar) - - convertWhitespaceEscapes - convert escaped whitespace - (``'\t'``, ``'\n'``, etc.) to actual whitespace - (default= ``True``) - - Example:: - - qs = QuotedString('"') - print(qs.searchString('lsjdf "This is the quote" sldjf')) - complex_qs = QuotedString('{{', endQuoteChar='}}') - print(complex_qs.searchString('lsjdf {{This is the "quote"}} sldjf')) - sql_qs = QuotedString('"', escQuote='""') - print(sql_qs.searchString('lsjdf "This is the quote with ""embedded"" quotes" sldjf')) - - prints:: - - [['This is the quote']] - [['This is the "quote"']] - [['This is the quote with "embedded" quotes']] - """ - def __init__(self, quoteChar, escChar=None, escQuote=None, multiline=False, - unquoteResults=True, endQuoteChar=None, convertWhitespaceEscapes=True): - super(QuotedString, self).__init__() - - # remove white space from quote chars - wont work anyway - quoteChar = quoteChar.strip() - if not quoteChar: - warnings.warn("quoteChar cannot be the empty string", SyntaxWarning, stacklevel=2) - raise SyntaxError() - - if endQuoteChar is None: - endQuoteChar = quoteChar - else: - endQuoteChar = endQuoteChar.strip() - if not endQuoteChar: - warnings.warn("endQuoteChar cannot be the empty string", SyntaxWarning, stacklevel=2) - raise SyntaxError() - - self.quoteChar = quoteChar - self.quoteCharLen = len(quoteChar) - self.firstQuoteChar = quoteChar[0] - self.endQuoteChar = endQuoteChar - self.endQuoteCharLen = len(endQuoteChar) - self.escChar = escChar - self.escQuote = escQuote - self.unquoteResults = unquoteResults - self.convertWhitespaceEscapes = convertWhitespaceEscapes - - if multiline: - self.flags = re.MULTILINE | re.DOTALL - self.pattern = r'%s(?:[^%s%s]' % (re.escape(self.quoteChar), - _escapeRegexRangeChars(self.endQuoteChar[0]), - (escChar is not None and _escapeRegexRangeChars(escChar) or '')) - else: - self.flags = 0 - self.pattern = r'%s(?:[^%s\n\r%s]' % (re.escape(self.quoteChar), - _escapeRegexRangeChars(self.endQuoteChar[0]), - (escChar is not None and _escapeRegexRangeChars(escChar) or '')) - if len(self.endQuoteChar) > 1: - self.pattern += ( - '|(?:' + ')|(?:'.join("%s[^%s]" % (re.escape(self.endQuoteChar[:i]), - _escapeRegexRangeChars(self.endQuoteChar[i])) - for i in range(len(self.endQuoteChar) - 1, 0, -1)) + ')') - - if escQuote: - self.pattern += (r'|(?:%s)' % re.escape(escQuote)) - if escChar: - self.pattern += (r'|(?:%s.)' % re.escape(escChar)) - self.escCharReplacePattern = re.escape(self.escChar) + "(.)" - self.pattern += (r')*%s' % re.escape(self.endQuoteChar)) - - try: - self.re = re.compile(self.pattern, self.flags) - self.reString = self.pattern - self.re_match = self.re.match - except sre_constants.error: - warnings.warn("invalid pattern (%s) passed to Regex" % self.pattern, - SyntaxWarning, stacklevel=2) - raise - - self.name = _ustr(self) - self.errmsg = "Expected " + self.name - self.mayIndexError = False - self.mayReturnEmpty = True - - def parseImpl(self, instring, loc, doActions=True): - result = instring[loc] == self.firstQuoteChar and self.re_match(instring, loc) or None - if not result: - raise ParseException(instring, loc, self.errmsg, self) - - loc = result.end() - ret = result.group() - - if self.unquoteResults: - - # strip off quotes - ret = ret[self.quoteCharLen: -self.endQuoteCharLen] - - if isinstance(ret, basestring): - # replace escaped whitespace - if '\\' in ret and self.convertWhitespaceEscapes: - ws_map = { - r'\t': '\t', - r'\n': '\n', - r'\f': '\f', - r'\r': '\r', - } - for wslit, wschar in ws_map.items(): - ret = ret.replace(wslit, wschar) - - # replace escaped characters - if self.escChar: - ret = re.sub(self.escCharReplacePattern, r"\g<1>", ret) - - # replace escaped quotes - if self.escQuote: - ret = ret.replace(self.escQuote, self.endQuoteChar) - - return loc, ret - - def __str__(self): - try: - return super(QuotedString, self).__str__() - except Exception: - pass - - if self.strRepr is None: - self.strRepr = "quoted string, starting with %s ending with %s" % (self.quoteChar, self.endQuoteChar) - - return self.strRepr - - -class CharsNotIn(Token): - """Token for matching words composed of characters *not* in a given - set (will include whitespace in matched characters if not listed in - the provided exclusion set - see example). Defined with string - containing all disallowed characters, and an optional minimum, - maximum, and/or exact length. The default value for ``min`` is - 1 (a minimum value < 1 is not valid); the default values for - ``max`` and ``exact`` are 0, meaning no maximum or exact - length restriction. - - Example:: - - # define a comma-separated-value as anything that is not a ',' - csv_value = CharsNotIn(',') - print(delimitedList(csv_value).parseString("dkls,lsdkjf,s12 34,@!#,213")) - - prints:: - - ['dkls', 'lsdkjf', 's12 34', '@!#', '213'] - """ - def __init__(self, notChars, min=1, max=0, exact=0): - super(CharsNotIn, self).__init__() - self.skipWhitespace = False - self.notChars = notChars - - if min < 1: - raise ValueError("cannot specify a minimum length < 1; use " - "Optional(CharsNotIn()) if zero-length char group is permitted") - - self.minLen = min - - if max > 0: - self.maxLen = max - else: - self.maxLen = _MAX_INT - - if exact > 0: - self.maxLen = exact - self.minLen = exact - - self.name = _ustr(self) - self.errmsg = "Expected " + self.name - self.mayReturnEmpty = (self.minLen == 0) - self.mayIndexError = False - - def parseImpl(self, instring, loc, doActions=True): - if instring[loc] in self.notChars: - raise ParseException(instring, loc, self.errmsg, self) - - start = loc - loc += 1 - notchars = self.notChars - maxlen = min(start + self.maxLen, len(instring)) - while loc < maxlen and instring[loc] not in notchars: - loc += 1 - - if loc - start < self.minLen: - raise ParseException(instring, loc, self.errmsg, self) - - return loc, instring[start:loc] - - def __str__(self): - try: - return super(CharsNotIn, self).__str__() - except Exception: - pass - - if self.strRepr is None: - if len(self.notChars) > 4: - self.strRepr = "!W:(%s...)" % self.notChars[:4] - else: - self.strRepr = "!W:(%s)" % self.notChars - - return self.strRepr - -class White(Token): - """Special matching class for matching whitespace. Normally, - whitespace is ignored by pyparsing grammars. This class is included - when some whitespace structures are significant. Define with - a string containing the whitespace characters to be matched; default - is ``" \\t\\r\\n"``. Also takes optional ``min``, - ``max``, and ``exact`` arguments, as defined for the - :class:`Word` class. - """ - whiteStrs = { - ' ' : '', - '\t': '', - '\n': '', - '\r': '', - '\f': '', - u'\u00A0': '', - u'\u1680': '', - u'\u180E': '', - u'\u2000': '', - u'\u2001': '', - u'\u2002': '', - u'\u2003': '', - u'\u2004': '', - u'\u2005': '', - u'\u2006': '', - u'\u2007': '', - u'\u2008': '', - u'\u2009': '', - u'\u200A': '', - u'\u200B': '', - u'\u202F': '', - u'\u205F': '', - u'\u3000': '', - } - def __init__(self, ws=" \t\r\n", min=1, max=0, exact=0): - super(White, self).__init__() - self.matchWhite = ws - self.setWhitespaceChars("".join(c for c in self.whiteChars if c not in self.matchWhite)) - # ~ self.leaveWhitespace() - self.name = ("".join(White.whiteStrs[c] for c in self.matchWhite)) - self.mayReturnEmpty = True - self.errmsg = "Expected " + self.name - - self.minLen = min - - if max > 0: - self.maxLen = max - else: - self.maxLen = _MAX_INT - - if exact > 0: - self.maxLen = exact - self.minLen = exact - - def parseImpl(self, instring, loc, doActions=True): - if instring[loc] not in self.matchWhite: - raise ParseException(instring, loc, self.errmsg, self) - start = loc - loc += 1 - maxloc = start + self.maxLen - maxloc = min(maxloc, len(instring)) - while loc < maxloc and instring[loc] in self.matchWhite: - loc += 1 - - if loc - start < self.minLen: - raise ParseException(instring, loc, self.errmsg, self) - - return loc, instring[start:loc] - - -class _PositionToken(Token): - def __init__(self): - super(_PositionToken, self).__init__() - self.name = self.__class__.__name__ - self.mayReturnEmpty = True - self.mayIndexError = False - -class GoToColumn(_PositionToken): - """Token to advance to a specific column of input text; useful for - tabular report scraping. - """ - def __init__(self, colno): - super(GoToColumn, self).__init__() - self.col = colno - - def preParse(self, instring, loc): - if col(loc, instring) != self.col: - instrlen = len(instring) - if self.ignoreExprs: - loc = self._skipIgnorables(instring, loc) - while loc < instrlen and instring[loc].isspace() and col(loc, instring) != self.col: - loc += 1 - return loc - - def parseImpl(self, instring, loc, doActions=True): - thiscol = col(loc, instring) - if thiscol > self.col: - raise ParseException(instring, loc, "Text not in expected column", self) - newloc = loc + self.col - thiscol - ret = instring[loc: newloc] - return newloc, ret - - -class LineStart(_PositionToken): - r"""Matches if current position is at the beginning of a line within - the parse string - - Example:: - - test = '''\ - AAA this line - AAA and this line - AAA but not this one - B AAA and definitely not this one - ''' - - for t in (LineStart() + 'AAA' + restOfLine).searchString(test): - print(t) - - prints:: - - ['AAA', ' this line'] - ['AAA', ' and this line'] - - """ - def __init__(self): - super(LineStart, self).__init__() - self.errmsg = "Expected start of line" - - def parseImpl(self, instring, loc, doActions=True): - if col(loc, instring) == 1: - return loc, [] - raise ParseException(instring, loc, self.errmsg, self) - -class LineEnd(_PositionToken): - """Matches if current position is at the end of a line within the - parse string - """ - def __init__(self): - super(LineEnd, self).__init__() - self.setWhitespaceChars(ParserElement.DEFAULT_WHITE_CHARS.replace("\n", "")) - self.errmsg = "Expected end of line" - - def parseImpl(self, instring, loc, doActions=True): - if loc < len(instring): - if instring[loc] == "\n": - return loc + 1, "\n" - else: - raise ParseException(instring, loc, self.errmsg, self) - elif loc == len(instring): - return loc + 1, [] - else: - raise ParseException(instring, loc, self.errmsg, self) - -class StringStart(_PositionToken): - """Matches if current position is at the beginning of the parse - string - """ - def __init__(self): - super(StringStart, self).__init__() - self.errmsg = "Expected start of text" - - def parseImpl(self, instring, loc, doActions=True): - if loc != 0: - # see if entire string up to here is just whitespace and ignoreables - if loc != self.preParse(instring, 0): - raise ParseException(instring, loc, self.errmsg, self) - return loc, [] - -class StringEnd(_PositionToken): - """Matches if current position is at the end of the parse string - """ - def __init__(self): - super(StringEnd, self).__init__() - self.errmsg = "Expected end of text" - - def parseImpl(self, instring, loc, doActions=True): - if loc < len(instring): - raise ParseException(instring, loc, self.errmsg, self) - elif loc == len(instring): - return loc + 1, [] - elif loc > len(instring): - return loc, [] - else: - raise ParseException(instring, loc, self.errmsg, self) - -class WordStart(_PositionToken): - """Matches if the current position is at the beginning of a Word, - and is not preceded by any character in a given set of - ``wordChars`` (default= ``printables``). To emulate the - ``\b`` behavior of regular expressions, use - ``WordStart(alphanums)``. ``WordStart`` will also match at - the beginning of the string being parsed, or at the beginning of - a line. - """ - def __init__(self, wordChars=printables): - super(WordStart, self).__init__() - self.wordChars = set(wordChars) - self.errmsg = "Not at the start of a word" - - def parseImpl(self, instring, loc, doActions=True): - if loc != 0: - if (instring[loc - 1] in self.wordChars - or instring[loc] not in self.wordChars): - raise ParseException(instring, loc, self.errmsg, self) - return loc, [] - -class WordEnd(_PositionToken): - """Matches if the current position is at the end of a Word, and is - not followed by any character in a given set of ``wordChars`` - (default= ``printables``). To emulate the ``\b`` behavior of - regular expressions, use ``WordEnd(alphanums)``. ``WordEnd`` - will also match at the end of the string being parsed, or at the end - of a line. - """ - def __init__(self, wordChars=printables): - super(WordEnd, self).__init__() - self.wordChars = set(wordChars) - self.skipWhitespace = False - self.errmsg = "Not at the end of a word" - - def parseImpl(self, instring, loc, doActions=True): - instrlen = len(instring) - if instrlen > 0 and loc < instrlen: - if (instring[loc] in self.wordChars or - instring[loc - 1] not in self.wordChars): - raise ParseException(instring, loc, self.errmsg, self) - return loc, [] - - -class ParseExpression(ParserElement): - """Abstract subclass of ParserElement, for combining and - post-processing parsed tokens. - """ - def __init__(self, exprs, savelist=False): - super(ParseExpression, self).__init__(savelist) - if isinstance(exprs, _generatorType): - exprs = list(exprs) - - if isinstance(exprs, basestring): - self.exprs = [self._literalStringClass(exprs)] - elif isinstance(exprs, ParserElement): - self.exprs = [exprs] - elif isinstance(exprs, Iterable): - exprs = list(exprs) - # if sequence of strings provided, wrap with Literal - if any(isinstance(expr, basestring) for expr in exprs): - exprs = (self._literalStringClass(e) if isinstance(e, basestring) else e for e in exprs) - self.exprs = list(exprs) - else: - try: - self.exprs = list(exprs) - except TypeError: - self.exprs = [exprs] - self.callPreparse = False - - def append(self, other): - self.exprs.append(other) - self.strRepr = None - return self - - def leaveWhitespace(self): - """Extends ``leaveWhitespace`` defined in base class, and also invokes ``leaveWhitespace`` on - all contained expressions.""" - self.skipWhitespace = False - self.exprs = [e.copy() for e in self.exprs] - for e in self.exprs: - e.leaveWhitespace() - return self - - def ignore(self, other): - if isinstance(other, Suppress): - if other not in self.ignoreExprs: - super(ParseExpression, self).ignore(other) - for e in self.exprs: - e.ignore(self.ignoreExprs[-1]) - else: - super(ParseExpression, self).ignore(other) - for e in self.exprs: - e.ignore(self.ignoreExprs[-1]) - return self - - def __str__(self): - try: - return super(ParseExpression, self).__str__() - except Exception: - pass - - if self.strRepr is None: - self.strRepr = "%s:(%s)" % (self.__class__.__name__, _ustr(self.exprs)) - return self.strRepr - - def streamline(self): - super(ParseExpression, self).streamline() - - for e in self.exprs: - e.streamline() - - # collapse nested And's of the form And(And(And(a, b), c), d) to And(a, b, c, d) - # but only if there are no parse actions or resultsNames on the nested And's - # (likewise for Or's and MatchFirst's) - if len(self.exprs) == 2: - other = self.exprs[0] - if (isinstance(other, self.__class__) - and not other.parseAction - and other.resultsName is None - and not other.debug): - self.exprs = other.exprs[:] + [self.exprs[1]] - self.strRepr = None - self.mayReturnEmpty |= other.mayReturnEmpty - self.mayIndexError |= other.mayIndexError - - other = self.exprs[-1] - if (isinstance(other, self.__class__) - and not other.parseAction - and other.resultsName is None - and not other.debug): - self.exprs = self.exprs[:-1] + other.exprs[:] - self.strRepr = None - self.mayReturnEmpty |= other.mayReturnEmpty - self.mayIndexError |= other.mayIndexError - - self.errmsg = "Expected " + _ustr(self) - - return self - - def validate(self, validateTrace=None): - tmp = (validateTrace if validateTrace is not None else [])[:] + [self] - for e in self.exprs: - e.validate(tmp) - self.checkRecursion([]) - - def copy(self): - ret = super(ParseExpression, self).copy() - ret.exprs = [e.copy() for e in self.exprs] - return ret - - def _setResultsName(self, name, listAllMatches=False): - if __diag__.warn_ungrouped_named_tokens_in_collection: - for e in self.exprs: - if isinstance(e, ParserElement) and e.resultsName: - warnings.warn("{0}: setting results name {1!r} on {2} expression " - "collides with {3!r} on contained expression".format("warn_ungrouped_named_tokens_in_collection", - name, - type(self).__name__, - e.resultsName), - stacklevel=3) - - return super(ParseExpression, self)._setResultsName(name, listAllMatches) - - -class And(ParseExpression): - """ - Requires all given :class:`ParseExpression` s to be found in the given order. - Expressions may be separated by whitespace. - May be constructed using the ``'+'`` operator. - May also be constructed using the ``'-'`` operator, which will - suppress backtracking. - - Example:: - - integer = Word(nums) - name_expr = OneOrMore(Word(alphas)) - - expr = And([integer("id"), name_expr("name"), integer("age")]) - # more easily written as: - expr = integer("id") + name_expr("name") + integer("age") - """ - - class _ErrorStop(Empty): - def __init__(self, *args, **kwargs): - super(And._ErrorStop, self).__init__(*args, **kwargs) - self.name = '-' - self.leaveWhitespace() - - def __init__(self, exprs, savelist=True): - exprs = list(exprs) - if exprs and Ellipsis in exprs: - tmp = [] - for i, expr in enumerate(exprs): - if expr is Ellipsis: - if i < len(exprs) - 1: - skipto_arg = (Empty() + exprs[i + 1]).exprs[-1] - tmp.append(SkipTo(skipto_arg)("_skipped*")) - else: - raise Exception("cannot construct And with sequence ending in ...") - else: - tmp.append(expr) - exprs[:] = tmp - super(And, self).__init__(exprs, savelist) - self.mayReturnEmpty = all(e.mayReturnEmpty for e in self.exprs) - self.setWhitespaceChars(self.exprs[0].whiteChars) - self.skipWhitespace = self.exprs[0].skipWhitespace - self.callPreparse = True - - def streamline(self): - # collapse any _PendingSkip's - if self.exprs: - if any(isinstance(e, ParseExpression) and e.exprs and isinstance(e.exprs[-1], _PendingSkip) - for e in self.exprs[:-1]): - for i, e in enumerate(self.exprs[:-1]): - if e is None: - continue - if (isinstance(e, ParseExpression) - and e.exprs and isinstance(e.exprs[-1], _PendingSkip)): - e.exprs[-1] = e.exprs[-1] + self.exprs[i + 1] - self.exprs[i + 1] = None - self.exprs = [e for e in self.exprs if e is not None] - - super(And, self).streamline() - self.mayReturnEmpty = all(e.mayReturnEmpty for e in self.exprs) - return self - - def parseImpl(self, instring, loc, doActions=True): - # pass False as last arg to _parse for first element, since we already - # pre-parsed the string as part of our And pre-parsing - loc, resultlist = self.exprs[0]._parse(instring, loc, doActions, callPreParse=False) - errorStop = False - for e in self.exprs[1:]: - if isinstance(e, And._ErrorStop): - errorStop = True - continue - if errorStop: - try: - loc, exprtokens = e._parse(instring, loc, doActions) - except ParseSyntaxException: - raise - except ParseBaseException as pe: - pe.__traceback__ = None - raise ParseSyntaxException._from_exception(pe) - except IndexError: - raise ParseSyntaxException(instring, len(instring), self.errmsg, self) - else: - loc, exprtokens = e._parse(instring, loc, doActions) - if exprtokens or exprtokens.haskeys(): - resultlist += exprtokens - return loc, resultlist - - def __iadd__(self, other): - if isinstance(other, basestring): - other = self._literalStringClass(other) - return self.append(other) # And([self, other]) - - def checkRecursion(self, parseElementList): - subRecCheckList = parseElementList[:] + [self] - for e in self.exprs: - e.checkRecursion(subRecCheckList) - if not e.mayReturnEmpty: - break - - def __str__(self): - if hasattr(self, "name"): - return self.name - - if self.strRepr is None: - self.strRepr = "{" + " ".join(_ustr(e) for e in self.exprs) + "}" - - return self.strRepr - - -class Or(ParseExpression): - """Requires that at least one :class:`ParseExpression` is found. If - two expressions match, the expression that matches the longest - string will be used. May be constructed using the ``'^'`` - operator. - - Example:: - - # construct Or using '^' operator - - number = Word(nums) ^ Combine(Word(nums) + '.' + Word(nums)) - print(number.searchString("123 3.1416 789")) - - prints:: - - [['123'], ['3.1416'], ['789']] - """ - def __init__(self, exprs, savelist=False): - super(Or, self).__init__(exprs, savelist) - if self.exprs: - self.mayReturnEmpty = any(e.mayReturnEmpty for e in self.exprs) - else: - self.mayReturnEmpty = True - - def streamline(self): - super(Or, self).streamline() - if __compat__.collect_all_And_tokens: - self.saveAsList = any(e.saveAsList for e in self.exprs) - return self - - def parseImpl(self, instring, loc, doActions=True): - maxExcLoc = -1 - maxException = None - matches = [] - for e in self.exprs: - try: - loc2 = e.tryParse(instring, loc) - except ParseException as err: - err.__traceback__ = None - if err.loc > maxExcLoc: - maxException = err - maxExcLoc = err.loc - except IndexError: - if len(instring) > maxExcLoc: - maxException = ParseException(instring, len(instring), e.errmsg, self) - maxExcLoc = len(instring) - else: - # save match among all matches, to retry longest to shortest - matches.append((loc2, e)) - - if matches: - # re-evaluate all matches in descending order of length of match, in case attached actions - # might change whether or how much they match of the input. - matches.sort(key=itemgetter(0), reverse=True) - - if not doActions: - # no further conditions or parse actions to change the selection of - # alternative, so the first match will be the best match - best_expr = matches[0][1] - return best_expr._parse(instring, loc, doActions) - - longest = -1, None - for loc1, expr1 in matches: - if loc1 <= longest[0]: - # already have a longer match than this one will deliver, we are done - return longest - - try: - loc2, toks = expr1._parse(instring, loc, doActions) - except ParseException as err: - err.__traceback__ = None - if err.loc > maxExcLoc: - maxException = err - maxExcLoc = err.loc - else: - if loc2 >= loc1: - return loc2, toks - # didn't match as much as before - elif loc2 > longest[0]: - longest = loc2, toks - - if longest != (-1, None): - return longest - - if maxException is not None: - maxException.msg = self.errmsg - raise maxException - else: - raise ParseException(instring, loc, "no defined alternatives to match", self) - - - def __ixor__(self, other): - if isinstance(other, basestring): - other = self._literalStringClass(other) - return self.append(other) # Or([self, other]) - - def __str__(self): - if hasattr(self, "name"): - return self.name - - if self.strRepr is None: - self.strRepr = "{" + " ^ ".join(_ustr(e) for e in self.exprs) + "}" - - return self.strRepr - - def checkRecursion(self, parseElementList): - subRecCheckList = parseElementList[:] + [self] - for e in self.exprs: - e.checkRecursion(subRecCheckList) - - def _setResultsName(self, name, listAllMatches=False): - if (not __compat__.collect_all_And_tokens - and __diag__.warn_multiple_tokens_in_named_alternation): - if any(isinstance(e, And) for e in self.exprs): - warnings.warn("{0}: setting results name {1!r} on {2} expression " - "may only return a single token for an And alternative, " - "in future will return the full list of tokens".format( - "warn_multiple_tokens_in_named_alternation", name, type(self).__name__), - stacklevel=3) - - return super(Or, self)._setResultsName(name, listAllMatches) - - -class MatchFirst(ParseExpression): - """Requires that at least one :class:`ParseExpression` is found. If - two expressions match, the first one listed is the one that will - match. May be constructed using the ``'|'`` operator. - - Example:: - - # construct MatchFirst using '|' operator - - # watch the order of expressions to match - number = Word(nums) | Combine(Word(nums) + '.' + Word(nums)) - print(number.searchString("123 3.1416 789")) # Fail! -> [['123'], ['3'], ['1416'], ['789']] - - # put more selective expression first - number = Combine(Word(nums) + '.' + Word(nums)) | Word(nums) - print(number.searchString("123 3.1416 789")) # Better -> [['123'], ['3.1416'], ['789']] - """ - def __init__(self, exprs, savelist=False): - super(MatchFirst, self).__init__(exprs, savelist) - if self.exprs: - self.mayReturnEmpty = any(e.mayReturnEmpty for e in self.exprs) - else: - self.mayReturnEmpty = True - - def streamline(self): - super(MatchFirst, self).streamline() - if __compat__.collect_all_And_tokens: - self.saveAsList = any(e.saveAsList for e in self.exprs) - return self - - def parseImpl(self, instring, loc, doActions=True): - maxExcLoc = -1 - maxException = None - for e in self.exprs: - try: - ret = e._parse(instring, loc, doActions) - return ret - except ParseException as err: - if err.loc > maxExcLoc: - maxException = err - maxExcLoc = err.loc - except IndexError: - if len(instring) > maxExcLoc: - maxException = ParseException(instring, len(instring), e.errmsg, self) - maxExcLoc = len(instring) - - # only got here if no expression matched, raise exception for match that made it the furthest - else: - if maxException is not None: - maxException.msg = self.errmsg - raise maxException - else: - raise ParseException(instring, loc, "no defined alternatives to match", self) - - def __ior__(self, other): - if isinstance(other, basestring): - other = self._literalStringClass(other) - return self.append(other) # MatchFirst([self, other]) - - def __str__(self): - if hasattr(self, "name"): - return self.name - - if self.strRepr is None: - self.strRepr = "{" + " | ".join(_ustr(e) for e in self.exprs) + "}" - - return self.strRepr - - def checkRecursion(self, parseElementList): - subRecCheckList = parseElementList[:] + [self] - for e in self.exprs: - e.checkRecursion(subRecCheckList) - - def _setResultsName(self, name, listAllMatches=False): - if (not __compat__.collect_all_And_tokens - and __diag__.warn_multiple_tokens_in_named_alternation): - if any(isinstance(e, And) for e in self.exprs): - warnings.warn("{0}: setting results name {1!r} on {2} expression " - "may only return a single token for an And alternative, " - "in future will return the full list of tokens".format( - "warn_multiple_tokens_in_named_alternation", name, type(self).__name__), - stacklevel=3) - - return super(MatchFirst, self)._setResultsName(name, listAllMatches) - - -class Each(ParseExpression): - """Requires all given :class:`ParseExpression` s to be found, but in - any order. Expressions may be separated by whitespace. - - May be constructed using the ``'&'`` operator. - - Example:: - - color = oneOf("RED ORANGE YELLOW GREEN BLUE PURPLE BLACK WHITE BROWN") - shape_type = oneOf("SQUARE CIRCLE TRIANGLE STAR HEXAGON OCTAGON") - integer = Word(nums) - shape_attr = "shape:" + shape_type("shape") - posn_attr = "posn:" + Group(integer("x") + ',' + integer("y"))("posn") - color_attr = "color:" + color("color") - size_attr = "size:" + integer("size") - - # use Each (using operator '&') to accept attributes in any order - # (shape and posn are required, color and size are optional) - shape_spec = shape_attr & posn_attr & Optional(color_attr) & Optional(size_attr) - - shape_spec.runTests(''' - shape: SQUARE color: BLACK posn: 100, 120 - shape: CIRCLE size: 50 color: BLUE posn: 50,80 - color:GREEN size:20 shape:TRIANGLE posn:20,40 - ''' - ) - - prints:: - - shape: SQUARE color: BLACK posn: 100, 120 - ['shape:', 'SQUARE', 'color:', 'BLACK', 'posn:', ['100', ',', '120']] - - color: BLACK - - posn: ['100', ',', '120'] - - x: 100 - - y: 120 - - shape: SQUARE - - - shape: CIRCLE size: 50 color: BLUE posn: 50,80 - ['shape:', 'CIRCLE', 'size:', '50', 'color:', 'BLUE', 'posn:', ['50', ',', '80']] - - color: BLUE - - posn: ['50', ',', '80'] - - x: 50 - - y: 80 - - shape: CIRCLE - - size: 50 - - - color: GREEN size: 20 shape: TRIANGLE posn: 20,40 - ['color:', 'GREEN', 'size:', '20', 'shape:', 'TRIANGLE', 'posn:', ['20', ',', '40']] - - color: GREEN - - posn: ['20', ',', '40'] - - x: 20 - - y: 40 - - shape: TRIANGLE - - size: 20 - """ - def __init__(self, exprs, savelist=True): - super(Each, self).__init__(exprs, savelist) - self.mayReturnEmpty = all(e.mayReturnEmpty for e in self.exprs) - self.skipWhitespace = True - self.initExprGroups = True - self.saveAsList = True - - def streamline(self): - super(Each, self).streamline() - self.mayReturnEmpty = all(e.mayReturnEmpty for e in self.exprs) - return self - - def parseImpl(self, instring, loc, doActions=True): - if self.initExprGroups: - self.opt1map = dict((id(e.expr), e) for e in self.exprs if isinstance(e, Optional)) - opt1 = [e.expr for e in self.exprs if isinstance(e, Optional)] - opt2 = [e for e in self.exprs if e.mayReturnEmpty and not isinstance(e, (Optional, Regex))] - self.optionals = opt1 + opt2 - self.multioptionals = [e.expr for e in self.exprs if isinstance(e, ZeroOrMore)] - self.multirequired = [e.expr for e in self.exprs if isinstance(e, OneOrMore)] - self.required = [e for e in self.exprs if not isinstance(e, (Optional, ZeroOrMore, OneOrMore))] - self.required += self.multirequired - self.initExprGroups = False - tmpLoc = loc - tmpReqd = self.required[:] - tmpOpt = self.optionals[:] - matchOrder = [] - - keepMatching = True - while keepMatching: - tmpExprs = tmpReqd + tmpOpt + self.multioptionals + self.multirequired - failed = [] - for e in tmpExprs: - try: - tmpLoc = e.tryParse(instring, tmpLoc) - except ParseException: - failed.append(e) - else: - matchOrder.append(self.opt1map.get(id(e), e)) - if e in tmpReqd: - tmpReqd.remove(e) - elif e in tmpOpt: - tmpOpt.remove(e) - if len(failed) == len(tmpExprs): - keepMatching = False - - if tmpReqd: - missing = ", ".join(_ustr(e) for e in tmpReqd) - raise ParseException(instring, loc, "Missing one or more required elements (%s)" % missing) - - # add any unmatched Optionals, in case they have default values defined - matchOrder += [e for e in self.exprs if isinstance(e, Optional) and e.expr in tmpOpt] - - resultlist = [] - for e in matchOrder: - loc, results = e._parse(instring, loc, doActions) - resultlist.append(results) - - finalResults = sum(resultlist, ParseResults([])) - return loc, finalResults - - def __str__(self): - if hasattr(self, "name"): - return self.name - - if self.strRepr is None: - self.strRepr = "{" + " & ".join(_ustr(e) for e in self.exprs) + "}" - - return self.strRepr - - def checkRecursion(self, parseElementList): - subRecCheckList = parseElementList[:] + [self] - for e in self.exprs: - e.checkRecursion(subRecCheckList) - - -class ParseElementEnhance(ParserElement): - """Abstract subclass of :class:`ParserElement`, for combining and - post-processing parsed tokens. - """ - def __init__(self, expr, savelist=False): - super(ParseElementEnhance, self).__init__(savelist) - if isinstance(expr, basestring): - if issubclass(self._literalStringClass, Token): - expr = self._literalStringClass(expr) - else: - expr = self._literalStringClass(Literal(expr)) - self.expr = expr - self.strRepr = None - if expr is not None: - self.mayIndexError = expr.mayIndexError - self.mayReturnEmpty = expr.mayReturnEmpty - self.setWhitespaceChars(expr.whiteChars) - self.skipWhitespace = expr.skipWhitespace - self.saveAsList = expr.saveAsList - self.callPreparse = expr.callPreparse - self.ignoreExprs.extend(expr.ignoreExprs) - - def parseImpl(self, instring, loc, doActions=True): - if self.expr is not None: - return self.expr._parse(instring, loc, doActions, callPreParse=False) - else: - raise ParseException("", loc, self.errmsg, self) - - def leaveWhitespace(self): - self.skipWhitespace = False - self.expr = self.expr.copy() - if self.expr is not None: - self.expr.leaveWhitespace() - return self - - def ignore(self, other): - if isinstance(other, Suppress): - if other not in self.ignoreExprs: - super(ParseElementEnhance, self).ignore(other) - if self.expr is not None: - self.expr.ignore(self.ignoreExprs[-1]) - else: - super(ParseElementEnhance, self).ignore(other) - if self.expr is not None: - self.expr.ignore(self.ignoreExprs[-1]) - return self - - def streamline(self): - super(ParseElementEnhance, self).streamline() - if self.expr is not None: - self.expr.streamline() - return self - - def checkRecursion(self, parseElementList): - if self in parseElementList: - raise RecursiveGrammarException(parseElementList + [self]) - subRecCheckList = parseElementList[:] + [self] - if self.expr is not None: - self.expr.checkRecursion(subRecCheckList) - - def validate(self, validateTrace=None): - if validateTrace is None: - validateTrace = [] - tmp = validateTrace[:] + [self] - if self.expr is not None: - self.expr.validate(tmp) - self.checkRecursion([]) - - def __str__(self): - try: - return super(ParseElementEnhance, self).__str__() - except Exception: - pass - - if self.strRepr is None and self.expr is not None: - self.strRepr = "%s:(%s)" % (self.__class__.__name__, _ustr(self.expr)) - return self.strRepr - - -class FollowedBy(ParseElementEnhance): - """Lookahead matching of the given parse expression. - ``FollowedBy`` does *not* advance the parsing position within - the input string, it only verifies that the specified parse - expression matches at the current position. ``FollowedBy`` - always returns a null token list. If any results names are defined - in the lookahead expression, those *will* be returned for access by - name. - - Example:: - - # use FollowedBy to match a label only if it is followed by a ':' - data_word = Word(alphas) - label = data_word + FollowedBy(':') - attr_expr = Group(label + Suppress(':') + OneOrMore(data_word, stopOn=label).setParseAction(' '.join)) - - OneOrMore(attr_expr).parseString("shape: SQUARE color: BLACK posn: upper left").pprint() - - prints:: - - [['shape', 'SQUARE'], ['color', 'BLACK'], ['posn', 'upper left']] - """ - def __init__(self, expr): - super(FollowedBy, self).__init__(expr) - self.mayReturnEmpty = True - - def parseImpl(self, instring, loc, doActions=True): - # by using self._expr.parse and deleting the contents of the returned ParseResults list - # we keep any named results that were defined in the FollowedBy expression - _, ret = self.expr._parse(instring, loc, doActions=doActions) - del ret[:] - - return loc, ret - - -class PrecededBy(ParseElementEnhance): - """Lookbehind matching of the given parse expression. - ``PrecededBy`` does not advance the parsing position within the - input string, it only verifies that the specified parse expression - matches prior to the current position. ``PrecededBy`` always - returns a null token list, but if a results name is defined on the - given expression, it is returned. - - Parameters: - - - expr - expression that must match prior to the current parse - location - - retreat - (default= ``None``) - (int) maximum number of characters - to lookbehind prior to the current parse location - - If the lookbehind expression is a string, Literal, Keyword, or - a Word or CharsNotIn with a specified exact or maximum length, then - the retreat parameter is not required. Otherwise, retreat must be - specified to give a maximum number of characters to look back from - the current parse position for a lookbehind match. - - Example:: - - # VB-style variable names with type prefixes - int_var = PrecededBy("#") + pyparsing_common.identifier - str_var = PrecededBy("$") + pyparsing_common.identifier - - """ - def __init__(self, expr, retreat=None): - super(PrecededBy, self).__init__(expr) - self.expr = self.expr().leaveWhitespace() - self.mayReturnEmpty = True - self.mayIndexError = False - self.exact = False - if isinstance(expr, str): - retreat = len(expr) - self.exact = True - elif isinstance(expr, (Literal, Keyword)): - retreat = expr.matchLen - self.exact = True - elif isinstance(expr, (Word, CharsNotIn)) and expr.maxLen != _MAX_INT: - retreat = expr.maxLen - self.exact = True - elif isinstance(expr, _PositionToken): - retreat = 0 - self.exact = True - self.retreat = retreat - self.errmsg = "not preceded by " + str(expr) - self.skipWhitespace = False - self.parseAction.append(lambda s, l, t: t.__delitem__(slice(None, None))) - - def parseImpl(self, instring, loc=0, doActions=True): - if self.exact: - if loc < self.retreat: - raise ParseException(instring, loc, self.errmsg) - start = loc - self.retreat - _, ret = self.expr._parse(instring, start) - else: - # retreat specified a maximum lookbehind window, iterate - test_expr = self.expr + StringEnd() - instring_slice = instring[max(0, loc - self.retreat):loc] - last_expr = ParseException(instring, loc, self.errmsg) - for offset in range(1, min(loc, self.retreat + 1)+1): - try: - # print('trying', offset, instring_slice, repr(instring_slice[loc - offset:])) - _, ret = test_expr._parse(instring_slice, len(instring_slice) - offset) - except ParseBaseException as pbe: - last_expr = pbe - else: - break - else: - raise last_expr - return loc, ret - - -class NotAny(ParseElementEnhance): - """Lookahead to disallow matching with the given parse expression. - ``NotAny`` does *not* advance the parsing position within the - input string, it only verifies that the specified parse expression - does *not* match at the current position. Also, ``NotAny`` does - *not* skip over leading whitespace. ``NotAny`` always returns - a null token list. May be constructed using the '~' operator. - - Example:: - - AND, OR, NOT = map(CaselessKeyword, "AND OR NOT".split()) - - # take care not to mistake keywords for identifiers - ident = ~(AND | OR | NOT) + Word(alphas) - boolean_term = Optional(NOT) + ident - - # very crude boolean expression - to support parenthesis groups and - # operation hierarchy, use infixNotation - boolean_expr = boolean_term + ZeroOrMore((AND | OR) + boolean_term) - - # integers that are followed by "." are actually floats - integer = Word(nums) + ~Char(".") - """ - def __init__(self, expr): - super(NotAny, self).__init__(expr) - # ~ self.leaveWhitespace() - self.skipWhitespace = False # do NOT use self.leaveWhitespace(), don't want to propagate to exprs - self.mayReturnEmpty = True - self.errmsg = "Found unwanted token, " + _ustr(self.expr) - - def parseImpl(self, instring, loc, doActions=True): - if self.expr.canParseNext(instring, loc): - raise ParseException(instring, loc, self.errmsg, self) - return loc, [] - - def __str__(self): - if hasattr(self, "name"): - return self.name - - if self.strRepr is None: - self.strRepr = "~{" + _ustr(self.expr) + "}" - - return self.strRepr - -class _MultipleMatch(ParseElementEnhance): - def __init__(self, expr, stopOn=None): - super(_MultipleMatch, self).__init__(expr) - self.saveAsList = True - ender = stopOn - if isinstance(ender, basestring): - ender = self._literalStringClass(ender) - self.stopOn(ender) - - def stopOn(self, ender): - if isinstance(ender, basestring): - ender = self._literalStringClass(ender) - self.not_ender = ~ender if ender is not None else None - return self - - def parseImpl(self, instring, loc, doActions=True): - self_expr_parse = self.expr._parse - self_skip_ignorables = self._skipIgnorables - check_ender = self.not_ender is not None - if check_ender: - try_not_ender = self.not_ender.tryParse - - # must be at least one (but first see if we are the stopOn sentinel; - # if so, fail) - if check_ender: - try_not_ender(instring, loc) - loc, tokens = self_expr_parse(instring, loc, doActions, callPreParse=False) - try: - hasIgnoreExprs = (not not self.ignoreExprs) - while 1: - if check_ender: - try_not_ender(instring, loc) - if hasIgnoreExprs: - preloc = self_skip_ignorables(instring, loc) - else: - preloc = loc - loc, tmptokens = self_expr_parse(instring, preloc, doActions) - if tmptokens or tmptokens.haskeys(): - tokens += tmptokens - except (ParseException, IndexError): - pass - - return loc, tokens - - def _setResultsName(self, name, listAllMatches=False): - if __diag__.warn_ungrouped_named_tokens_in_collection: - for e in [self.expr] + getattr(self.expr, 'exprs', []): - if isinstance(e, ParserElement) and e.resultsName: - warnings.warn("{0}: setting results name {1!r} on {2} expression " - "collides with {3!r} on contained expression".format("warn_ungrouped_named_tokens_in_collection", - name, - type(self).__name__, - e.resultsName), - stacklevel=3) - - return super(_MultipleMatch, self)._setResultsName(name, listAllMatches) - - -class OneOrMore(_MultipleMatch): - """Repetition of one or more of the given expression. - - Parameters: - - expr - expression that must match one or more times - - stopOn - (default= ``None``) - expression for a terminating sentinel - (only required if the sentinel would ordinarily match the repetition - expression) - - Example:: - - data_word = Word(alphas) - label = data_word + FollowedBy(':') - attr_expr = Group(label + Suppress(':') + OneOrMore(data_word).setParseAction(' '.join)) - - text = "shape: SQUARE posn: upper left color: BLACK" - OneOrMore(attr_expr).parseString(text).pprint() # Fail! read 'color' as data instead of next label -> [['shape', 'SQUARE color']] - - # use stopOn attribute for OneOrMore to avoid reading label string as part of the data - attr_expr = Group(label + Suppress(':') + OneOrMore(data_word, stopOn=label).setParseAction(' '.join)) - OneOrMore(attr_expr).parseString(text).pprint() # Better -> [['shape', 'SQUARE'], ['posn', 'upper left'], ['color', 'BLACK']] - - # could also be written as - (attr_expr * (1,)).parseString(text).pprint() - """ - - def __str__(self): - if hasattr(self, "name"): - return self.name - - if self.strRepr is None: - self.strRepr = "{" + _ustr(self.expr) + "}..." - - return self.strRepr - -class ZeroOrMore(_MultipleMatch): - """Optional repetition of zero or more of the given expression. - - Parameters: - - expr - expression that must match zero or more times - - stopOn - (default= ``None``) - expression for a terminating sentinel - (only required if the sentinel would ordinarily match the repetition - expression) - - Example: similar to :class:`OneOrMore` - """ - def __init__(self, expr, stopOn=None): - super(ZeroOrMore, self).__init__(expr, stopOn=stopOn) - self.mayReturnEmpty = True - - def parseImpl(self, instring, loc, doActions=True): - try: - return super(ZeroOrMore, self).parseImpl(instring, loc, doActions) - except (ParseException, IndexError): - return loc, [] - - def __str__(self): - if hasattr(self, "name"): - return self.name - - if self.strRepr is None: - self.strRepr = "[" + _ustr(self.expr) + "]..." - - return self.strRepr - - -class _NullToken(object): - def __bool__(self): - return False - __nonzero__ = __bool__ - def __str__(self): - return "" - -class Optional(ParseElementEnhance): - """Optional matching of the given expression. - - Parameters: - - expr - expression that must match zero or more times - - default (optional) - value to be returned if the optional expression is not found. - - Example:: - - # US postal code can be a 5-digit zip, plus optional 4-digit qualifier - zip = Combine(Word(nums, exact=5) + Optional('-' + Word(nums, exact=4))) - zip.runTests(''' - # traditional ZIP code - 12345 - - # ZIP+4 form - 12101-0001 - - # invalid ZIP - 98765- - ''') - - prints:: - - # traditional ZIP code - 12345 - ['12345'] - - # ZIP+4 form - 12101-0001 - ['12101-0001'] - - # invalid ZIP - 98765- - ^ - FAIL: Expected end of text (at char 5), (line:1, col:6) - """ - __optionalNotMatched = _NullToken() - - def __init__(self, expr, default=__optionalNotMatched): - super(Optional, self).__init__(expr, savelist=False) - self.saveAsList = self.expr.saveAsList - self.defaultValue = default - self.mayReturnEmpty = True - - def parseImpl(self, instring, loc, doActions=True): - try: - loc, tokens = self.expr._parse(instring, loc, doActions, callPreParse=False) - except (ParseException, IndexError): - if self.defaultValue is not self.__optionalNotMatched: - if self.expr.resultsName: - tokens = ParseResults([self.defaultValue]) - tokens[self.expr.resultsName] = self.defaultValue - else: - tokens = [self.defaultValue] - else: - tokens = [] - return loc, tokens - - def __str__(self): - if hasattr(self, "name"): - return self.name - - if self.strRepr is None: - self.strRepr = "[" + _ustr(self.expr) + "]" - - return self.strRepr - -class SkipTo(ParseElementEnhance): - """Token for skipping over all undefined text until the matched - expression is found. - - Parameters: - - expr - target expression marking the end of the data to be skipped - - include - (default= ``False``) if True, the target expression is also parsed - (the skipped text and target expression are returned as a 2-element list). - - ignore - (default= ``None``) used to define grammars (typically quoted strings and - comments) that might contain false matches to the target expression - - failOn - (default= ``None``) define expressions that are not allowed to be - included in the skipped test; if found before the target expression is found, - the SkipTo is not a match - - Example:: - - report = ''' - Outstanding Issues Report - 1 Jan 2000 - - # | Severity | Description | Days Open - -----+----------+-------------------------------------------+----------- - 101 | Critical | Intermittent system crash | 6 - 94 | Cosmetic | Spelling error on Login ('log|n') | 14 - 79 | Minor | System slow when running too many reports | 47 - ''' - integer = Word(nums) - SEP = Suppress('|') - # use SkipTo to simply match everything up until the next SEP - # - ignore quoted strings, so that a '|' character inside a quoted string does not match - # - parse action will call token.strip() for each matched token, i.e., the description body - string_data = SkipTo(SEP, ignore=quotedString) - string_data.setParseAction(tokenMap(str.strip)) - ticket_expr = (integer("issue_num") + SEP - + string_data("sev") + SEP - + string_data("desc") + SEP - + integer("days_open")) - - for tkt in ticket_expr.searchString(report): - print tkt.dump() - - prints:: - - ['101', 'Critical', 'Intermittent system crash', '6'] - - days_open: 6 - - desc: Intermittent system crash - - issue_num: 101 - - sev: Critical - ['94', 'Cosmetic', "Spelling error on Login ('log|n')", '14'] - - days_open: 14 - - desc: Spelling error on Login ('log|n') - - issue_num: 94 - - sev: Cosmetic - ['79', 'Minor', 'System slow when running too many reports', '47'] - - days_open: 47 - - desc: System slow when running too many reports - - issue_num: 79 - - sev: Minor - """ - def __init__(self, other, include=False, ignore=None, failOn=None): - super(SkipTo, self).__init__(other) - self.ignoreExpr = ignore - self.mayReturnEmpty = True - self.mayIndexError = False - self.includeMatch = include - self.saveAsList = False - if isinstance(failOn, basestring): - self.failOn = self._literalStringClass(failOn) - else: - self.failOn = failOn - self.errmsg = "No match found for " + _ustr(self.expr) - - def parseImpl(self, instring, loc, doActions=True): - startloc = loc - instrlen = len(instring) - expr = self.expr - expr_parse = self.expr._parse - self_failOn_canParseNext = self.failOn.canParseNext if self.failOn is not None else None - self_ignoreExpr_tryParse = self.ignoreExpr.tryParse if self.ignoreExpr is not None else None - - tmploc = loc - while tmploc <= instrlen: - if self_failOn_canParseNext is not None: - # break if failOn expression matches - if self_failOn_canParseNext(instring, tmploc): - break - - if self_ignoreExpr_tryParse is not None: - # advance past ignore expressions - while 1: - try: - tmploc = self_ignoreExpr_tryParse(instring, tmploc) - except ParseBaseException: - break - - try: - expr_parse(instring, tmploc, doActions=False, callPreParse=False) - except (ParseException, IndexError): - # no match, advance loc in string - tmploc += 1 - else: - # matched skipto expr, done - break - - else: - # ran off the end of the input string without matching skipto expr, fail - raise ParseException(instring, loc, self.errmsg, self) - - # build up return values - loc = tmploc - skiptext = instring[startloc:loc] - skipresult = ParseResults(skiptext) - - if self.includeMatch: - loc, mat = expr_parse(instring, loc, doActions, callPreParse=False) - skipresult += mat - - return loc, skipresult - -class Forward(ParseElementEnhance): - """Forward declaration of an expression to be defined later - - used for recursive grammars, such as algebraic infix notation. - When the expression is known, it is assigned to the ``Forward`` - variable using the '<<' operator. - - Note: take care when assigning to ``Forward`` not to overlook - precedence of operators. - - Specifically, '|' has a lower precedence than '<<', so that:: - - fwdExpr << a | b | c - - will actually be evaluated as:: - - (fwdExpr << a) | b | c - - thereby leaving b and c out as parseable alternatives. It is recommended that you - explicitly group the values inserted into the ``Forward``:: - - fwdExpr << (a | b | c) - - Converting to use the '<<=' operator instead will avoid this problem. - - See :class:`ParseResults.pprint` for an example of a recursive - parser created using ``Forward``. - """ - def __init__(self, other=None): - super(Forward, self).__init__(other, savelist=False) - - def __lshift__(self, other): - if isinstance(other, basestring): - other = self._literalStringClass(other) - self.expr = other - self.strRepr = None - self.mayIndexError = self.expr.mayIndexError - self.mayReturnEmpty = self.expr.mayReturnEmpty - self.setWhitespaceChars(self.expr.whiteChars) - self.skipWhitespace = self.expr.skipWhitespace - self.saveAsList = self.expr.saveAsList - self.ignoreExprs.extend(self.expr.ignoreExprs) - return self - - def __ilshift__(self, other): - return self << other - - def leaveWhitespace(self): - self.skipWhitespace = False - return self - - def streamline(self): - if not self.streamlined: - self.streamlined = True - if self.expr is not None: - self.expr.streamline() - return self - - def validate(self, validateTrace=None): - if validateTrace is None: - validateTrace = [] - - if self not in validateTrace: - tmp = validateTrace[:] + [self] - if self.expr is not None: - self.expr.validate(tmp) - self.checkRecursion([]) - - def __str__(self): - if hasattr(self, "name"): - return self.name - if self.strRepr is not None: - return self.strRepr - - # Avoid infinite recursion by setting a temporary strRepr - self.strRepr = ": ..." - - # Use the string representation of main expression. - retString = '...' - try: - if self.expr is not None: - retString = _ustr(self.expr)[:1000] - else: - retString = "None" - finally: - self.strRepr = self.__class__.__name__ + ": " + retString - return self.strRepr - - def copy(self): - if self.expr is not None: - return super(Forward, self).copy() - else: - ret = Forward() - ret <<= self - return ret - - def _setResultsName(self, name, listAllMatches=False): - if __diag__.warn_name_set_on_empty_Forward: - if self.expr is None: - warnings.warn("{0}: setting results name {0!r} on {1} expression " - "that has no contained expression".format("warn_name_set_on_empty_Forward", - name, - type(self).__name__), - stacklevel=3) - - return super(Forward, self)._setResultsName(name, listAllMatches) - -class TokenConverter(ParseElementEnhance): - """ - Abstract subclass of :class:`ParseExpression`, for converting parsed results. - """ - def __init__(self, expr, savelist=False): - super(TokenConverter, self).__init__(expr) # , savelist) - self.saveAsList = False - -class Combine(TokenConverter): - """Converter to concatenate all matching tokens to a single string. - By default, the matching patterns must also be contiguous in the - input string; this can be disabled by specifying - ``'adjacent=False'`` in the constructor. - - Example:: - - real = Word(nums) + '.' + Word(nums) - print(real.parseString('3.1416')) # -> ['3', '.', '1416'] - # will also erroneously match the following - print(real.parseString('3. 1416')) # -> ['3', '.', '1416'] - - real = Combine(Word(nums) + '.' + Word(nums)) - print(real.parseString('3.1416')) # -> ['3.1416'] - # no match when there are internal spaces - print(real.parseString('3. 1416')) # -> Exception: Expected W:(0123...) - """ - def __init__(self, expr, joinString="", adjacent=True): - super(Combine, self).__init__(expr) - # suppress whitespace-stripping in contained parse expressions, but re-enable it on the Combine itself - if adjacent: - self.leaveWhitespace() - self.adjacent = adjacent - self.skipWhitespace = True - self.joinString = joinString - self.callPreparse = True - - def ignore(self, other): - if self.adjacent: - ParserElement.ignore(self, other) - else: - super(Combine, self).ignore(other) - return self - - def postParse(self, instring, loc, tokenlist): - retToks = tokenlist.copy() - del retToks[:] - retToks += ParseResults(["".join(tokenlist._asStringList(self.joinString))], modal=self.modalResults) - - if self.resultsName and retToks.haskeys(): - return [retToks] - else: - return retToks - -class Group(TokenConverter): - """Converter to return the matched tokens as a list - useful for - returning tokens of :class:`ZeroOrMore` and :class:`OneOrMore` expressions. - - Example:: - - ident = Word(alphas) - num = Word(nums) - term = ident | num - func = ident + Optional(delimitedList(term)) - print(func.parseString("fn a, b, 100")) # -> ['fn', 'a', 'b', '100'] - - func = ident + Group(Optional(delimitedList(term))) - print(func.parseString("fn a, b, 100")) # -> ['fn', ['a', 'b', '100']] - """ - def __init__(self, expr): - super(Group, self).__init__(expr) - self.saveAsList = True - - def postParse(self, instring, loc, tokenlist): - return [tokenlist] - -class Dict(TokenConverter): - """Converter to return a repetitive expression as a list, but also - as a dictionary. Each element can also be referenced using the first - token in the expression as its key. Useful for tabular report - scraping when the first column can be used as a item key. - - Example:: - - data_word = Word(alphas) - label = data_word + FollowedBy(':') - attr_expr = Group(label + Suppress(':') + OneOrMore(data_word).setParseAction(' '.join)) - - text = "shape: SQUARE posn: upper left color: light blue texture: burlap" - attr_expr = (label + Suppress(':') + OneOrMore(data_word, stopOn=label).setParseAction(' '.join)) - - # print attributes as plain groups - print(OneOrMore(attr_expr).parseString(text).dump()) - - # instead of OneOrMore(expr), parse using Dict(OneOrMore(Group(expr))) - Dict will auto-assign names - result = Dict(OneOrMore(Group(attr_expr))).parseString(text) - print(result.dump()) - - # access named fields as dict entries, or output as dict - print(result['shape']) - print(result.asDict()) - - prints:: - - ['shape', 'SQUARE', 'posn', 'upper left', 'color', 'light blue', 'texture', 'burlap'] - [['shape', 'SQUARE'], ['posn', 'upper left'], ['color', 'light blue'], ['texture', 'burlap']] - - color: light blue - - posn: upper left - - shape: SQUARE - - texture: burlap - SQUARE - {'color': 'light blue', 'posn': 'upper left', 'texture': 'burlap', 'shape': 'SQUARE'} - - See more examples at :class:`ParseResults` of accessing fields by results name. - """ - def __init__(self, expr): - super(Dict, self).__init__(expr) - self.saveAsList = True - - def postParse(self, instring, loc, tokenlist): - for i, tok in enumerate(tokenlist): - if len(tok) == 0: - continue - ikey = tok[0] - if isinstance(ikey, int): - ikey = _ustr(tok[0]).strip() - if len(tok) == 1: - tokenlist[ikey] = _ParseResultsWithOffset("", i) - elif len(tok) == 2 and not isinstance(tok[1], ParseResults): - tokenlist[ikey] = _ParseResultsWithOffset(tok[1], i) - else: - dictvalue = tok.copy() # ParseResults(i) - del dictvalue[0] - if len(dictvalue) != 1 or (isinstance(dictvalue, ParseResults) and dictvalue.haskeys()): - tokenlist[ikey] = _ParseResultsWithOffset(dictvalue, i) - else: - tokenlist[ikey] = _ParseResultsWithOffset(dictvalue[0], i) - - if self.resultsName: - return [tokenlist] - else: - return tokenlist - - -class Suppress(TokenConverter): - """Converter for ignoring the results of a parsed expression. - - Example:: - - source = "a, b, c,d" - wd = Word(alphas) - wd_list1 = wd + ZeroOrMore(',' + wd) - print(wd_list1.parseString(source)) - - # often, delimiters that are useful during parsing are just in the - # way afterward - use Suppress to keep them out of the parsed output - wd_list2 = wd + ZeroOrMore(Suppress(',') + wd) - print(wd_list2.parseString(source)) - - prints:: - - ['a', ',', 'b', ',', 'c', ',', 'd'] - ['a', 'b', 'c', 'd'] - - (See also :class:`delimitedList`.) - """ - def postParse(self, instring, loc, tokenlist): - return [] - - def suppress(self): - return self - - -class OnlyOnce(object): - """Wrapper for parse actions, to ensure they are only called once. - """ - def __init__(self, methodCall): - self.callable = _trim_arity(methodCall) - self.called = False - def __call__(self, s, l, t): - if not self.called: - results = self.callable(s, l, t) - self.called = True - return results - raise ParseException(s, l, "") - def reset(self): - self.called = False - -def traceParseAction(f): - """Decorator for debugging parse actions. - - When the parse action is called, this decorator will print - ``">> entering method-name(line:, , )"``. - When the parse action completes, the decorator will print - ``"<<"`` followed by the returned value, or any exception that the parse action raised. - - Example:: - - wd = Word(alphas) - - @traceParseAction - def remove_duplicate_chars(tokens): - return ''.join(sorted(set(''.join(tokens)))) - - wds = OneOrMore(wd).setParseAction(remove_duplicate_chars) - print(wds.parseString("slkdjs sld sldd sdlf sdljf")) - - prints:: - - >>entering remove_duplicate_chars(line: 'slkdjs sld sldd sdlf sdljf', 0, (['slkdjs', 'sld', 'sldd', 'sdlf', 'sdljf'], {})) - < 3: - thisFunc = paArgs[0].__class__.__name__ + '.' + thisFunc - sys.stderr.write(">>entering %s(line: '%s', %d, %r)\n" % (thisFunc, line(l, s), l, t)) - try: - ret = f(*paArgs) - except Exception as exc: - sys.stderr.write("< ['aa', 'bb', 'cc'] - delimitedList(Word(hexnums), delim=':', combine=True).parseString("AA:BB:CC:DD:EE") # -> ['AA:BB:CC:DD:EE'] - """ - dlName = _ustr(expr) + " [" + _ustr(delim) + " " + _ustr(expr) + "]..." - if combine: - return Combine(expr + ZeroOrMore(delim + expr)).setName(dlName) - else: - return (expr + ZeroOrMore(Suppress(delim) + expr)).setName(dlName) - -def countedArray(expr, intExpr=None): - """Helper to define a counted list of expressions. - - This helper defines a pattern of the form:: - - integer expr expr expr... - - where the leading integer tells how many expr expressions follow. - The matched tokens returns the array of expr tokens as a list - the - leading count token is suppressed. - - If ``intExpr`` is specified, it should be a pyparsing expression - that produces an integer value. - - Example:: - - countedArray(Word(alphas)).parseString('2 ab cd ef') # -> ['ab', 'cd'] - - # in this parser, the leading integer value is given in binary, - # '10' indicating that 2 values are in the array - binaryConstant = Word('01').setParseAction(lambda t: int(t[0], 2)) - countedArray(Word(alphas), intExpr=binaryConstant).parseString('10 ab cd ef') # -> ['ab', 'cd'] - """ - arrayExpr = Forward() - def countFieldParseAction(s, l, t): - n = t[0] - arrayExpr << (n and Group(And([expr] * n)) or Group(empty)) - return [] - if intExpr is None: - intExpr = Word(nums).setParseAction(lambda t: int(t[0])) - else: - intExpr = intExpr.copy() - intExpr.setName("arrayLen") - intExpr.addParseAction(countFieldParseAction, callDuringTry=True) - return (intExpr + arrayExpr).setName('(len) ' + _ustr(expr) + '...') - -def _flatten(L): - ret = [] - for i in L: - if isinstance(i, list): - ret.extend(_flatten(i)) - else: - ret.append(i) - return ret - -def matchPreviousLiteral(expr): - """Helper to define an expression that is indirectly defined from - the tokens matched in a previous expression, that is, it looks for - a 'repeat' of a previous expression. For example:: - - first = Word(nums) - second = matchPreviousLiteral(first) - matchExpr = first + ":" + second - - will match ``"1:1"``, but not ``"1:2"``. Because this - matches a previous literal, will also match the leading - ``"1:1"`` in ``"1:10"``. If this is not desired, use - :class:`matchPreviousExpr`. Do *not* use with packrat parsing - enabled. - """ - rep = Forward() - def copyTokenToRepeater(s, l, t): - if t: - if len(t) == 1: - rep << t[0] - else: - # flatten t tokens - tflat = _flatten(t.asList()) - rep << And(Literal(tt) for tt in tflat) - else: - rep << Empty() - expr.addParseAction(copyTokenToRepeater, callDuringTry=True) - rep.setName('(prev) ' + _ustr(expr)) - return rep - -def matchPreviousExpr(expr): - """Helper to define an expression that is indirectly defined from - the tokens matched in a previous expression, that is, it looks for - a 'repeat' of a previous expression. For example:: - - first = Word(nums) - second = matchPreviousExpr(first) - matchExpr = first + ":" + second - - will match ``"1:1"``, but not ``"1:2"``. Because this - matches by expressions, will *not* match the leading ``"1:1"`` - in ``"1:10"``; the expressions are evaluated first, and then - compared, so ``"1"`` is compared with ``"10"``. Do *not* use - with packrat parsing enabled. - """ - rep = Forward() - e2 = expr.copy() - rep <<= e2 - def copyTokenToRepeater(s, l, t): - matchTokens = _flatten(t.asList()) - def mustMatchTheseTokens(s, l, t): - theseTokens = _flatten(t.asList()) - if theseTokens != matchTokens: - raise ParseException('', 0, '') - rep.setParseAction(mustMatchTheseTokens, callDuringTry=True) - expr.addParseAction(copyTokenToRepeater, callDuringTry=True) - rep.setName('(prev) ' + _ustr(expr)) - return rep - -def _escapeRegexRangeChars(s): - # ~ escape these chars: ^-[] - for c in r"\^-[]": - s = s.replace(c, _bslash + c) - s = s.replace("\n", r"\n") - s = s.replace("\t", r"\t") - return _ustr(s) - -def oneOf(strs, caseless=False, useRegex=True, asKeyword=False): - """Helper to quickly define a set of alternative Literals, and makes - sure to do longest-first testing when there is a conflict, - regardless of the input order, but returns - a :class:`MatchFirst` for best performance. - - Parameters: - - - strs - a string of space-delimited literals, or a collection of - string literals - - caseless - (default= ``False``) - treat all literals as - caseless - - useRegex - (default= ``True``) - as an optimization, will - generate a Regex object; otherwise, will generate - a :class:`MatchFirst` object (if ``caseless=True`` or ``asKeyword=True``, or if - creating a :class:`Regex` raises an exception) - - asKeyword - (default=``False``) - enforce Keyword-style matching on the - generated expressions - - Example:: - - comp_oper = oneOf("< = > <= >= !=") - var = Word(alphas) - number = Word(nums) - term = var | number - comparison_expr = term + comp_oper + term - print(comparison_expr.searchString("B = 12 AA=23 B<=AA AA>12")) - - prints:: - - [['B', '=', '12'], ['AA', '=', '23'], ['B', '<=', 'AA'], ['AA', '>', '12']] - """ - if isinstance(caseless, basestring): - warnings.warn("More than one string argument passed to oneOf, pass " - "choices as a list or space-delimited string", stacklevel=2) - - if caseless: - isequal = (lambda a, b: a.upper() == b.upper()) - masks = (lambda a, b: b.upper().startswith(a.upper())) - parseElementClass = CaselessKeyword if asKeyword else CaselessLiteral - else: - isequal = (lambda a, b: a == b) - masks = (lambda a, b: b.startswith(a)) - parseElementClass = Keyword if asKeyword else Literal - - symbols = [] - if isinstance(strs, basestring): - symbols = strs.split() - elif isinstance(strs, Iterable): - symbols = list(strs) - else: - warnings.warn("Invalid argument to oneOf, expected string or iterable", - SyntaxWarning, stacklevel=2) - if not symbols: - return NoMatch() - - if not asKeyword: - # if not producing keywords, need to reorder to take care to avoid masking - # longer choices with shorter ones - i = 0 - while i < len(symbols) - 1: - cur = symbols[i] - for j, other in enumerate(symbols[i + 1:]): - if isequal(other, cur): - del symbols[i + j + 1] - break - elif masks(cur, other): - del symbols[i + j + 1] - symbols.insert(i, other) - break - else: - i += 1 - - if not (caseless or asKeyword) and useRegex: - # ~ print (strs, "->", "|".join([_escapeRegexChars(sym) for sym in symbols])) - try: - if len(symbols) == len("".join(symbols)): - return Regex("[%s]" % "".join(_escapeRegexRangeChars(sym) for sym in symbols)).setName(' | '.join(symbols)) - else: - return Regex("|".join(re.escape(sym) for sym in symbols)).setName(' | '.join(symbols)) - except Exception: - warnings.warn("Exception creating Regex for oneOf, building MatchFirst", - SyntaxWarning, stacklevel=2) - - # last resort, just use MatchFirst - return MatchFirst(parseElementClass(sym) for sym in symbols).setName(' | '.join(symbols)) - -def dictOf(key, value): - """Helper to easily and clearly define a dictionary by specifying - the respective patterns for the key and value. Takes care of - defining the :class:`Dict`, :class:`ZeroOrMore`, and - :class:`Group` tokens in the proper order. The key pattern - can include delimiting markers or punctuation, as long as they are - suppressed, thereby leaving the significant key text. The value - pattern can include named results, so that the :class:`Dict` results - can include named token fields. - - Example:: - - text = "shape: SQUARE posn: upper left color: light blue texture: burlap" - attr_expr = (label + Suppress(':') + OneOrMore(data_word, stopOn=label).setParseAction(' '.join)) - print(OneOrMore(attr_expr).parseString(text).dump()) - - attr_label = label - attr_value = Suppress(':') + OneOrMore(data_word, stopOn=label).setParseAction(' '.join) - - # similar to Dict, but simpler call format - result = dictOf(attr_label, attr_value).parseString(text) - print(result.dump()) - print(result['shape']) - print(result.shape) # object attribute access works too - print(result.asDict()) - - prints:: - - [['shape', 'SQUARE'], ['posn', 'upper left'], ['color', 'light blue'], ['texture', 'burlap']] - - color: light blue - - posn: upper left - - shape: SQUARE - - texture: burlap - SQUARE - SQUARE - {'color': 'light blue', 'shape': 'SQUARE', 'posn': 'upper left', 'texture': 'burlap'} - """ - return Dict(OneOrMore(Group(key + value))) - -def originalTextFor(expr, asString=True): - """Helper to return the original, untokenized text for a given - expression. Useful to restore the parsed fields of an HTML start - tag into the raw tag text itself, or to revert separate tokens with - intervening whitespace back to the original matching input text. By - default, returns astring containing the original parsed text. - - If the optional ``asString`` argument is passed as - ``False``, then the return value is - a :class:`ParseResults` containing any results names that - were originally matched, and a single token containing the original - matched text from the input string. So if the expression passed to - :class:`originalTextFor` contains expressions with defined - results names, you must set ``asString`` to ``False`` if you - want to preserve those results name values. - - Example:: - - src = "this is test bold text normal text " - for tag in ("b", "i"): - opener, closer = makeHTMLTags(tag) - patt = originalTextFor(opener + SkipTo(closer) + closer) - print(patt.searchString(src)[0]) - - prints:: - - [' bold text '] - ['text'] - """ - locMarker = Empty().setParseAction(lambda s, loc, t: loc) - endlocMarker = locMarker.copy() - endlocMarker.callPreparse = False - matchExpr = locMarker("_original_start") + expr + endlocMarker("_original_end") - if asString: - extractText = lambda s, l, t: s[t._original_start: t._original_end] - else: - def extractText(s, l, t): - t[:] = [s[t.pop('_original_start'):t.pop('_original_end')]] - matchExpr.setParseAction(extractText) - matchExpr.ignoreExprs = expr.ignoreExprs - return matchExpr - -def ungroup(expr): - """Helper to undo pyparsing's default grouping of And expressions, - even if all but one are non-empty. - """ - return TokenConverter(expr).addParseAction(lambda t: t[0]) - -def locatedExpr(expr): - """Helper to decorate a returned token with its starting and ending - locations in the input string. - - This helper adds the following results names: - - - locn_start = location where matched expression begins - - locn_end = location where matched expression ends - - value = the actual parsed results - - Be careful if the input text contains ```` characters, you - may want to call :class:`ParserElement.parseWithTabs` - - Example:: - - wd = Word(alphas) - for match in locatedExpr(wd).searchString("ljsdf123lksdjjf123lkkjj1222"): - print(match) - - prints:: - - [[0, 'ljsdf', 5]] - [[8, 'lksdjjf', 15]] - [[18, 'lkkjj', 23]] - """ - locator = Empty().setParseAction(lambda s, l, t: l) - return Group(locator("locn_start") + expr("value") + locator.copy().leaveWhitespace()("locn_end")) - - -# convenience constants for positional expressions -empty = Empty().setName("empty") -lineStart = LineStart().setName("lineStart") -lineEnd = LineEnd().setName("lineEnd") -stringStart = StringStart().setName("stringStart") -stringEnd = StringEnd().setName("stringEnd") - -_escapedPunc = Word(_bslash, r"\[]-*.$+^?()~ ", exact=2).setParseAction(lambda s, l, t: t[0][1]) -_escapedHexChar = Regex(r"\\0?[xX][0-9a-fA-F]+").setParseAction(lambda s, l, t: unichr(int(t[0].lstrip(r'\0x'), 16))) -_escapedOctChar = Regex(r"\\0[0-7]+").setParseAction(lambda s, l, t: unichr(int(t[0][1:], 8))) -_singleChar = _escapedPunc | _escapedHexChar | _escapedOctChar | CharsNotIn(r'\]', exact=1) -_charRange = Group(_singleChar + Suppress("-") + _singleChar) -_reBracketExpr = Literal("[") + Optional("^").setResultsName("negate") + Group(OneOrMore(_charRange | _singleChar)).setResultsName("body") + "]" - -def srange(s): - r"""Helper to easily define string ranges for use in Word - construction. Borrows syntax from regexp '[]' string range - definitions:: - - srange("[0-9]") -> "0123456789" - srange("[a-z]") -> "abcdefghijklmnopqrstuvwxyz" - srange("[a-z$_]") -> "abcdefghijklmnopqrstuvwxyz$_" - - The input string must be enclosed in []'s, and the returned string - is the expanded character set joined into a single string. The - values enclosed in the []'s may be: - - - a single character - - an escaped character with a leading backslash (such as ``\-`` - or ``\]``) - - an escaped hex character with a leading ``'\x'`` - (``\x21``, which is a ``'!'`` character) (``\0x##`` - is also supported for backwards compatibility) - - an escaped octal character with a leading ``'\0'`` - (``\041``, which is a ``'!'`` character) - - a range of any of the above, separated by a dash (``'a-z'``, - etc.) - - any combination of the above (``'aeiouy'``, - ``'a-zA-Z0-9_$'``, etc.) - """ - _expanded = lambda p: p if not isinstance(p, ParseResults) else ''.join(unichr(c) for c in range(ord(p[0]), ord(p[1]) + 1)) - try: - return "".join(_expanded(part) for part in _reBracketExpr.parseString(s).body) - except Exception: - return "" - -def matchOnlyAtCol(n): - """Helper method for defining parse actions that require matching at - a specific column in the input text. - """ - def verifyCol(strg, locn, toks): - if col(locn, strg) != n: - raise ParseException(strg, locn, "matched token not at column %d" % n) - return verifyCol - -def replaceWith(replStr): - """Helper method for common parse actions that simply return - a literal value. Especially useful when used with - :class:`transformString` (). - - Example:: - - num = Word(nums).setParseAction(lambda toks: int(toks[0])) - na = oneOf("N/A NA").setParseAction(replaceWith(math.nan)) - term = na | num - - OneOrMore(term).parseString("324 234 N/A 234") # -> [324, 234, nan, 234] - """ - return lambda s, l, t: [replStr] - -def removeQuotes(s, l, t): - """Helper parse action for removing quotation marks from parsed - quoted strings. - - Example:: - - # by default, quotation marks are included in parsed results - quotedString.parseString("'Now is the Winter of our Discontent'") # -> ["'Now is the Winter of our Discontent'"] - - # use removeQuotes to strip quotation marks from parsed results - quotedString.setParseAction(removeQuotes) - quotedString.parseString("'Now is the Winter of our Discontent'") # -> ["Now is the Winter of our Discontent"] - """ - return t[0][1:-1] - -def tokenMap(func, *args): - """Helper to define a parse action by mapping a function to all - elements of a ParseResults list. If any additional args are passed, - they are forwarded to the given function as additional arguments - after the token, as in - ``hex_integer = Word(hexnums).setParseAction(tokenMap(int, 16))``, - which will convert the parsed data to an integer using base 16. - - Example (compare the last to example in :class:`ParserElement.transformString`:: - - hex_ints = OneOrMore(Word(hexnums)).setParseAction(tokenMap(int, 16)) - hex_ints.runTests(''' - 00 11 22 aa FF 0a 0d 1a - ''') - - upperword = Word(alphas).setParseAction(tokenMap(str.upper)) - OneOrMore(upperword).runTests(''' - my kingdom for a horse - ''') - - wd = Word(alphas).setParseAction(tokenMap(str.title)) - OneOrMore(wd).setParseAction(' '.join).runTests(''' - now is the winter of our discontent made glorious summer by this sun of york - ''') - - prints:: - - 00 11 22 aa FF 0a 0d 1a - [0, 17, 34, 170, 255, 10, 13, 26] - - my kingdom for a horse - ['MY', 'KINGDOM', 'FOR', 'A', 'HORSE'] - - now is the winter of our discontent made glorious summer by this sun of york - ['Now Is The Winter Of Our Discontent Made Glorious Summer By This Sun Of York'] - """ - def pa(s, l, t): - return [func(tokn, *args) for tokn in t] - - try: - func_name = getattr(func, '__name__', - getattr(func, '__class__').__name__) - except Exception: - func_name = str(func) - pa.__name__ = func_name - - return pa - -upcaseTokens = tokenMap(lambda t: _ustr(t).upper()) -"""(Deprecated) Helper parse action to convert tokens to upper case. -Deprecated in favor of :class:`pyparsing_common.upcaseTokens`""" - -downcaseTokens = tokenMap(lambda t: _ustr(t).lower()) -"""(Deprecated) Helper parse action to convert tokens to lower case. -Deprecated in favor of :class:`pyparsing_common.downcaseTokens`""" - -def _makeTags(tagStr, xml, - suppress_LT=Suppress("<"), - suppress_GT=Suppress(">")): - """Internal helper to construct opening and closing tag expressions, given a tag name""" - if isinstance(tagStr, basestring): - resname = tagStr - tagStr = Keyword(tagStr, caseless=not xml) - else: - resname = tagStr.name - - tagAttrName = Word(alphas, alphanums + "_-:") - if xml: - tagAttrValue = dblQuotedString.copy().setParseAction(removeQuotes) - openTag = (suppress_LT - + tagStr("tag") - + Dict(ZeroOrMore(Group(tagAttrName + Suppress("=") + tagAttrValue))) - + Optional("/", default=[False])("empty").setParseAction(lambda s, l, t: t[0] == '/') - + suppress_GT) - else: - tagAttrValue = quotedString.copy().setParseAction(removeQuotes) | Word(printables, excludeChars=">") - openTag = (suppress_LT - + tagStr("tag") - + Dict(ZeroOrMore(Group(tagAttrName.setParseAction(downcaseTokens) - + Optional(Suppress("=") + tagAttrValue)))) - + Optional("/", default=[False])("empty").setParseAction(lambda s, l, t: t[0] == '/') - + suppress_GT) - closeTag = Combine(_L("", adjacent=False) - - openTag.setName("<%s>" % resname) - # add start results name in parse action now that ungrouped names are not reported at two levels - openTag.addParseAction(lambda t: t.__setitem__("start" + "".join(resname.replace(":", " ").title().split()), t.copy())) - closeTag = closeTag("end" + "".join(resname.replace(":", " ").title().split())).setName("" % resname) - openTag.tag = resname - closeTag.tag = resname - openTag.tag_body = SkipTo(closeTag()) - return openTag, closeTag - -def makeHTMLTags(tagStr): - """Helper to construct opening and closing tag expressions for HTML, - given a tag name. Matches tags in either upper or lower case, - attributes with namespaces and with quoted or unquoted values. - - Example:: - - text = 'More info at the pyparsing wiki page' - # makeHTMLTags returns pyparsing expressions for the opening and - # closing tags as a 2-tuple - a, a_end = makeHTMLTags("A") - link_expr = a + SkipTo(a_end)("link_text") + a_end - - for link in link_expr.searchString(text): - # attributes in the tag (like "href" shown here) are - # also accessible as named results - print(link.link_text, '->', link.href) - - prints:: - - pyparsing -> https://github.com/pyparsing/pyparsing/wiki - """ - return _makeTags(tagStr, False) - -def makeXMLTags(tagStr): - """Helper to construct opening and closing tag expressions for XML, - given a tag name. Matches tags only in the given upper/lower case. - - Example: similar to :class:`makeHTMLTags` - """ - return _makeTags(tagStr, True) - -def withAttribute(*args, **attrDict): - """Helper to create a validating parse action to be used with start - tags created with :class:`makeXMLTags` or - :class:`makeHTMLTags`. Use ``withAttribute`` to qualify - a starting tag with a required attribute value, to avoid false - matches on common tags such as ```` or ``
``. - - Call ``withAttribute`` with a series of attribute names and - values. Specify the list of filter attributes names and values as: - - - keyword arguments, as in ``(align="right")``, or - - as an explicit dict with ``**`` operator, when an attribute - name is also a Python reserved word, as in ``**{"class":"Customer", "align":"right"}`` - - a list of name-value tuples, as in ``(("ns1:class", "Customer"), ("ns2:align", "right"))`` - - For attribute names with a namespace prefix, you must use the second - form. Attribute names are matched insensitive to upper/lower case. - - If just testing for ``class`` (with or without a namespace), use - :class:`withClass`. - - To verify that the attribute exists, but without specifying a value, - pass ``withAttribute.ANY_VALUE`` as the value. - - Example:: - - html = ''' -
- Some text -
1 4 0 1 0
-
1,3 2,3 1,1
-
this has no type
-
- - ''' - div,div_end = makeHTMLTags("div") - - # only match div tag having a type attribute with value "grid" - div_grid = div().setParseAction(withAttribute(type="grid")) - grid_expr = div_grid + SkipTo(div | div_end)("body") - for grid_header in grid_expr.searchString(html): - print(grid_header.body) - - # construct a match with any div tag having a type attribute, regardless of the value - div_any_type = div().setParseAction(withAttribute(type=withAttribute.ANY_VALUE)) - div_expr = div_any_type + SkipTo(div | div_end)("body") - for div_header in div_expr.searchString(html): - print(div_header.body) - - prints:: - - 1 4 0 1 0 - - 1 4 0 1 0 - 1,3 2,3 1,1 - """ - if args: - attrs = args[:] - else: - attrs = attrDict.items() - attrs = [(k, v) for k, v in attrs] - def pa(s, l, tokens): - for attrName, attrValue in attrs: - if attrName not in tokens: - raise ParseException(s, l, "no matching attribute " + attrName) - if attrValue != withAttribute.ANY_VALUE and tokens[attrName] != attrValue: - raise ParseException(s, l, "attribute '%s' has value '%s', must be '%s'" % - (attrName, tokens[attrName], attrValue)) - return pa -withAttribute.ANY_VALUE = object() - -def withClass(classname, namespace=''): - """Simplified version of :class:`withAttribute` when - matching on a div class - made difficult because ``class`` is - a reserved word in Python. - - Example:: - - html = ''' -
- Some text -
1 4 0 1 0
-
1,3 2,3 1,1
-
this <div> has no class
-
- - ''' - div,div_end = makeHTMLTags("div") - div_grid = div().setParseAction(withClass("grid")) - - grid_expr = div_grid + SkipTo(div | div_end)("body") - for grid_header in grid_expr.searchString(html): - print(grid_header.body) - - div_any_type = div().setParseAction(withClass(withAttribute.ANY_VALUE)) - div_expr = div_any_type + SkipTo(div | div_end)("body") - for div_header in div_expr.searchString(html): - print(div_header.body) - - prints:: - - 1 4 0 1 0 - - 1 4 0 1 0 - 1,3 2,3 1,1 - """ - classattr = "%s:class" % namespace if namespace else "class" - return withAttribute(**{classattr: classname}) - -opAssoc = SimpleNamespace() -opAssoc.LEFT = object() -opAssoc.RIGHT = object() - -def infixNotation(baseExpr, opList, lpar=Suppress('('), rpar=Suppress(')')): - """Helper method for constructing grammars of expressions made up of - operators working in a precedence hierarchy. Operators may be unary - or binary, left- or right-associative. Parse actions can also be - attached to operator expressions. The generated parser will also - recognize the use of parentheses to override operator precedences - (see example below). - - Note: if you define a deep operator list, you may see performance - issues when using infixNotation. See - :class:`ParserElement.enablePackrat` for a mechanism to potentially - improve your parser performance. - - Parameters: - - baseExpr - expression representing the most basic element for the - nested - - opList - list of tuples, one for each operator precedence level - in the expression grammar; each tuple is of the form ``(opExpr, - numTerms, rightLeftAssoc, parseAction)``, where: - - - opExpr is the pyparsing expression for the operator; may also - be a string, which will be converted to a Literal; if numTerms - is 3, opExpr is a tuple of two expressions, for the two - operators separating the 3 terms - - numTerms is the number of terms for this operator (must be 1, - 2, or 3) - - rightLeftAssoc is the indicator whether the operator is right - or left associative, using the pyparsing-defined constants - ``opAssoc.RIGHT`` and ``opAssoc.LEFT``. - - parseAction is the parse action to be associated with - expressions matching this operator expression (the parse action - tuple member may be omitted); if the parse action is passed - a tuple or list of functions, this is equivalent to calling - ``setParseAction(*fn)`` - (:class:`ParserElement.setParseAction`) - - lpar - expression for matching left-parentheses - (default= ``Suppress('(')``) - - rpar - expression for matching right-parentheses - (default= ``Suppress(')')``) - - Example:: - - # simple example of four-function arithmetic with ints and - # variable names - integer = pyparsing_common.signed_integer - varname = pyparsing_common.identifier - - arith_expr = infixNotation(integer | varname, - [ - ('-', 1, opAssoc.RIGHT), - (oneOf('* /'), 2, opAssoc.LEFT), - (oneOf('+ -'), 2, opAssoc.LEFT), - ]) - - arith_expr.runTests(''' - 5+3*6 - (5+3)*6 - -2--11 - ''', fullDump=False) - - prints:: - - 5+3*6 - [[5, '+', [3, '*', 6]]] - - (5+3)*6 - [[[5, '+', 3], '*', 6]] - - -2--11 - [[['-', 2], '-', ['-', 11]]] - """ - # captive version of FollowedBy that does not do parse actions or capture results names - class _FB(FollowedBy): - def parseImpl(self, instring, loc, doActions=True): - self.expr.tryParse(instring, loc) - return loc, [] - - ret = Forward() - lastExpr = baseExpr | (lpar + ret + rpar) - for i, operDef in enumerate(opList): - opExpr, arity, rightLeftAssoc, pa = (operDef + (None, ))[:4] - termName = "%s term" % opExpr if arity < 3 else "%s%s term" % opExpr - if arity == 3: - if opExpr is None or len(opExpr) != 2: - raise ValueError( - "if numterms=3, opExpr must be a tuple or list of two expressions") - opExpr1, opExpr2 = opExpr - thisExpr = Forward().setName(termName) - if rightLeftAssoc == opAssoc.LEFT: - if arity == 1: - matchExpr = _FB(lastExpr + opExpr) + Group(lastExpr + OneOrMore(opExpr)) - elif arity == 2: - if opExpr is not None: - matchExpr = _FB(lastExpr + opExpr + lastExpr) + Group(lastExpr + OneOrMore(opExpr + lastExpr)) - else: - matchExpr = _FB(lastExpr + lastExpr) + Group(lastExpr + OneOrMore(lastExpr)) - elif arity == 3: - matchExpr = (_FB(lastExpr + opExpr1 + lastExpr + opExpr2 + lastExpr) - + Group(lastExpr + OneOrMore(opExpr1 + lastExpr + opExpr2 + lastExpr))) - else: - raise ValueError("operator must be unary (1), binary (2), or ternary (3)") - elif rightLeftAssoc == opAssoc.RIGHT: - if arity == 1: - # try to avoid LR with this extra test - if not isinstance(opExpr, Optional): - opExpr = Optional(opExpr) - matchExpr = _FB(opExpr.expr + thisExpr) + Group(opExpr + thisExpr) - elif arity == 2: - if opExpr is not None: - matchExpr = _FB(lastExpr + opExpr + thisExpr) + Group(lastExpr + OneOrMore(opExpr + thisExpr)) - else: - matchExpr = _FB(lastExpr + thisExpr) + Group(lastExpr + OneOrMore(thisExpr)) - elif arity == 3: - matchExpr = (_FB(lastExpr + opExpr1 + thisExpr + opExpr2 + thisExpr) - + Group(lastExpr + opExpr1 + thisExpr + opExpr2 + thisExpr)) - else: - raise ValueError("operator must be unary (1), binary (2), or ternary (3)") - else: - raise ValueError("operator must indicate right or left associativity") - if pa: - if isinstance(pa, (tuple, list)): - matchExpr.setParseAction(*pa) - else: - matchExpr.setParseAction(pa) - thisExpr <<= (matchExpr.setName(termName) | lastExpr) - lastExpr = thisExpr - ret <<= lastExpr - return ret - -operatorPrecedence = infixNotation -"""(Deprecated) Former name of :class:`infixNotation`, will be -dropped in a future release.""" - -dblQuotedString = Combine(Regex(r'"(?:[^"\n\r\\]|(?:"")|(?:\\(?:[^x]|x[0-9a-fA-F]+)))*') + '"').setName("string enclosed in double quotes") -sglQuotedString = Combine(Regex(r"'(?:[^'\n\r\\]|(?:'')|(?:\\(?:[^x]|x[0-9a-fA-F]+)))*") + "'").setName("string enclosed in single quotes") -quotedString = Combine(Regex(r'"(?:[^"\n\r\\]|(?:"")|(?:\\(?:[^x]|x[0-9a-fA-F]+)))*') + '"' - | Regex(r"'(?:[^'\n\r\\]|(?:'')|(?:\\(?:[^x]|x[0-9a-fA-F]+)))*") + "'").setName("quotedString using single or double quotes") -unicodeString = Combine(_L('u') + quotedString.copy()).setName("unicode string literal") - -def nestedExpr(opener="(", closer=")", content=None, ignoreExpr=quotedString.copy()): - """Helper method for defining nested lists enclosed in opening and - closing delimiters ("(" and ")" are the default). - - Parameters: - - opener - opening character for a nested list - (default= ``"("``); can also be a pyparsing expression - - closer - closing character for a nested list - (default= ``")"``); can also be a pyparsing expression - - content - expression for items within the nested lists - (default= ``None``) - - ignoreExpr - expression for ignoring opening and closing - delimiters (default= :class:`quotedString`) - - If an expression is not provided for the content argument, the - nested expression will capture all whitespace-delimited content - between delimiters as a list of separate values. - - Use the ``ignoreExpr`` argument to define expressions that may - contain opening or closing characters that should not be treated as - opening or closing characters for nesting, such as quotedString or - a comment expression. Specify multiple expressions using an - :class:`Or` or :class:`MatchFirst`. The default is - :class:`quotedString`, but if no expressions are to be ignored, then - pass ``None`` for this argument. - - Example:: - - data_type = oneOf("void int short long char float double") - decl_data_type = Combine(data_type + Optional(Word('*'))) - ident = Word(alphas+'_', alphanums+'_') - number = pyparsing_common.number - arg = Group(decl_data_type + ident) - LPAR, RPAR = map(Suppress, "()") - - code_body = nestedExpr('{', '}', ignoreExpr=(quotedString | cStyleComment)) - - c_function = (decl_data_type("type") - + ident("name") - + LPAR + Optional(delimitedList(arg), [])("args") + RPAR - + code_body("body")) - c_function.ignore(cStyleComment) - - source_code = ''' - int is_odd(int x) { - return (x%2); - } - - int dec_to_hex(char hchar) { - if (hchar >= '0' && hchar <= '9') { - return (ord(hchar)-ord('0')); - } else { - return (10+ord(hchar)-ord('A')); - } - } - ''' - for func in c_function.searchString(source_code): - print("%(name)s (%(type)s) args: %(args)s" % func) - - - prints:: - - is_odd (int) args: [['int', 'x']] - dec_to_hex (int) args: [['char', 'hchar']] - """ - if opener == closer: - raise ValueError("opening and closing strings cannot be the same") - if content is None: - if isinstance(opener, basestring) and isinstance(closer, basestring): - if len(opener) == 1 and len(closer) == 1: - if ignoreExpr is not None: - content = (Combine(OneOrMore(~ignoreExpr - + CharsNotIn(opener - + closer - + ParserElement.DEFAULT_WHITE_CHARS, exact=1) - ) - ).setParseAction(lambda t: t[0].strip())) - else: - content = (empty.copy() + CharsNotIn(opener - + closer - + ParserElement.DEFAULT_WHITE_CHARS - ).setParseAction(lambda t: t[0].strip())) - else: - if ignoreExpr is not None: - content = (Combine(OneOrMore(~ignoreExpr - + ~Literal(opener) - + ~Literal(closer) - + CharsNotIn(ParserElement.DEFAULT_WHITE_CHARS, exact=1)) - ).setParseAction(lambda t: t[0].strip())) - else: - content = (Combine(OneOrMore(~Literal(opener) - + ~Literal(closer) - + CharsNotIn(ParserElement.DEFAULT_WHITE_CHARS, exact=1)) - ).setParseAction(lambda t: t[0].strip())) - else: - raise ValueError("opening and closing arguments must be strings if no content expression is given") - ret = Forward() - if ignoreExpr is not None: - ret <<= Group(Suppress(opener) + ZeroOrMore(ignoreExpr | ret | content) + Suppress(closer)) - else: - ret <<= Group(Suppress(opener) + ZeroOrMore(ret | content) + Suppress(closer)) - ret.setName('nested %s%s expression' % (opener, closer)) - return ret - -def indentedBlock(blockStatementExpr, indentStack, indent=True): - """Helper method for defining space-delimited indentation blocks, - such as those used to define block statements in Python source code. - - Parameters: - - - blockStatementExpr - expression defining syntax of statement that - is repeated within the indented block - - indentStack - list created by caller to manage indentation stack - (multiple statementWithIndentedBlock expressions within a single - grammar should share a common indentStack) - - indent - boolean indicating whether block must be indented beyond - the current level; set to False for block of left-most - statements (default= ``True``) - - A valid block must contain at least one ``blockStatement``. - - Example:: - - data = ''' - def A(z): - A1 - B = 100 - G = A2 - A2 - A3 - B - def BB(a,b,c): - BB1 - def BBA(): - bba1 - bba2 - bba3 - C - D - def spam(x,y): - def eggs(z): - pass - ''' - - - indentStack = [1] - stmt = Forward() - - identifier = Word(alphas, alphanums) - funcDecl = ("def" + identifier + Group("(" + Optional(delimitedList(identifier)) + ")") + ":") - func_body = indentedBlock(stmt, indentStack) - funcDef = Group(funcDecl + func_body) - - rvalue = Forward() - funcCall = Group(identifier + "(" + Optional(delimitedList(rvalue)) + ")") - rvalue << (funcCall | identifier | Word(nums)) - assignment = Group(identifier + "=" + rvalue) - stmt << (funcDef | assignment | identifier) - - module_body = OneOrMore(stmt) - - parseTree = module_body.parseString(data) - parseTree.pprint() - - prints:: - - [['def', - 'A', - ['(', 'z', ')'], - ':', - [['A1'], [['B', '=', '100']], [['G', '=', 'A2']], ['A2'], ['A3']]], - 'B', - ['def', - 'BB', - ['(', 'a', 'b', 'c', ')'], - ':', - [['BB1'], [['def', 'BBA', ['(', ')'], ':', [['bba1'], ['bba2'], ['bba3']]]]]], - 'C', - 'D', - ['def', - 'spam', - ['(', 'x', 'y', ')'], - ':', - [[['def', 'eggs', ['(', 'z', ')'], ':', [['pass']]]]]]] - """ - backup_stack = indentStack[:] - - def reset_stack(): - indentStack[:] = backup_stack - - def checkPeerIndent(s, l, t): - if l >= len(s): return - curCol = col(l, s) - if curCol != indentStack[-1]: - if curCol > indentStack[-1]: - raise ParseException(s, l, "illegal nesting") - raise ParseException(s, l, "not a peer entry") - - def checkSubIndent(s, l, t): - curCol = col(l, s) - if curCol > indentStack[-1]: - indentStack.append(curCol) - else: - raise ParseException(s, l, "not a subentry") - - def checkUnindent(s, l, t): - if l >= len(s): return - curCol = col(l, s) - if not(indentStack and curCol in indentStack): - raise ParseException(s, l, "not an unindent") - if curCol < indentStack[-1]: - indentStack.pop() - - NL = OneOrMore(LineEnd().setWhitespaceChars("\t ").suppress(), stopOn=StringEnd()) - INDENT = (Empty() + Empty().setParseAction(checkSubIndent)).setName('INDENT') - PEER = Empty().setParseAction(checkPeerIndent).setName('') - UNDENT = Empty().setParseAction(checkUnindent).setName('UNINDENT') - if indent: - smExpr = Group(Optional(NL) - + INDENT - + OneOrMore(PEER + Group(blockStatementExpr) + Optional(NL), stopOn=StringEnd()) - + UNDENT) - else: - smExpr = Group(Optional(NL) - + OneOrMore(PEER + Group(blockStatementExpr) + Optional(NL), stopOn=StringEnd()) - + UNDENT) - smExpr.setFailAction(lambda a, b, c, d: reset_stack()) - blockStatementExpr.ignore(_bslash + LineEnd()) - return smExpr.setName('indented block') - -alphas8bit = srange(r"[\0xc0-\0xd6\0xd8-\0xf6\0xf8-\0xff]") -punc8bit = srange(r"[\0xa1-\0xbf\0xd7\0xf7]") - -anyOpenTag, anyCloseTag = makeHTMLTags(Word(alphas, alphanums + "_:").setName('any tag')) -_htmlEntityMap = dict(zip("gt lt amp nbsp quot apos".split(), '><& "\'')) -commonHTMLEntity = Regex('&(?P' + '|'.join(_htmlEntityMap.keys()) +");").setName("common HTML entity") -def replaceHTMLEntity(t): - """Helper parser action to replace common HTML entities with their special characters""" - return _htmlEntityMap.get(t.entity) - -# it's easy to get these comment structures wrong - they're very common, so may as well make them available -cStyleComment = Combine(Regex(r"/\*(?:[^*]|\*(?!/))*") + '*/').setName("C style comment") -"Comment of the form ``/* ... */``" - -htmlComment = Regex(r"").setName("HTML comment") -"Comment of the form ````" - -restOfLine = Regex(r".*").leaveWhitespace().setName("rest of line") -dblSlashComment = Regex(r"//(?:\\\n|[^\n])*").setName("// comment") -"Comment of the form ``// ... (to end of line)``" - -cppStyleComment = Combine(Regex(r"/\*(?:[^*]|\*(?!/))*") + '*/' | dblSlashComment).setName("C++ style comment") -"Comment of either form :class:`cStyleComment` or :class:`dblSlashComment`" - -javaStyleComment = cppStyleComment -"Same as :class:`cppStyleComment`" - -pythonStyleComment = Regex(r"#.*").setName("Python style comment") -"Comment of the form ``# ... (to end of line)``" - -_commasepitem = Combine(OneOrMore(Word(printables, excludeChars=',') - + Optional(Word(" \t") - + ~Literal(",") + ~LineEnd()))).streamline().setName("commaItem") -commaSeparatedList = delimitedList(Optional(quotedString.copy() | _commasepitem, default="")).setName("commaSeparatedList") -"""(Deprecated) Predefined expression of 1 or more printable words or -quoted strings, separated by commas. - -This expression is deprecated in favor of :class:`pyparsing_common.comma_separated_list`. -""" - -# some other useful expressions - using lower-case class name since we are really using this as a namespace -class pyparsing_common: - """Here are some common low-level expressions that may be useful in - jump-starting parser development: - - - numeric forms (:class:`integers`, :class:`reals`, - :class:`scientific notation`) - - common :class:`programming identifiers` - - network addresses (:class:`MAC`, - :class:`IPv4`, :class:`IPv6`) - - ISO8601 :class:`dates` and - :class:`datetime` - - :class:`UUID` - - :class:`comma-separated list` - - Parse actions: - - - :class:`convertToInteger` - - :class:`convertToFloat` - - :class:`convertToDate` - - :class:`convertToDatetime` - - :class:`stripHTMLTags` - - :class:`upcaseTokens` - - :class:`downcaseTokens` - - Example:: - - pyparsing_common.number.runTests(''' - # any int or real number, returned as the appropriate type - 100 - -100 - +100 - 3.14159 - 6.02e23 - 1e-12 - ''') - - pyparsing_common.fnumber.runTests(''' - # any int or real number, returned as float - 100 - -100 - +100 - 3.14159 - 6.02e23 - 1e-12 - ''') - - pyparsing_common.hex_integer.runTests(''' - # hex numbers - 100 - FF - ''') - - pyparsing_common.fraction.runTests(''' - # fractions - 1/2 - -3/4 - ''') - - pyparsing_common.mixed_integer.runTests(''' - # mixed fractions - 1 - 1/2 - -3/4 - 1-3/4 - ''') - - import uuid - pyparsing_common.uuid.setParseAction(tokenMap(uuid.UUID)) - pyparsing_common.uuid.runTests(''' - # uuid - 12345678-1234-5678-1234-567812345678 - ''') - - prints:: - - # any int or real number, returned as the appropriate type - 100 - [100] - - -100 - [-100] - - +100 - [100] - - 3.14159 - [3.14159] - - 6.02e23 - [6.02e+23] - - 1e-12 - [1e-12] - - # any int or real number, returned as float - 100 - [100.0] - - -100 - [-100.0] - - +100 - [100.0] - - 3.14159 - [3.14159] - - 6.02e23 - [6.02e+23] - - 1e-12 - [1e-12] - - # hex numbers - 100 - [256] - - FF - [255] - - # fractions - 1/2 - [0.5] - - -3/4 - [-0.75] - - # mixed fractions - 1 - [1] - - 1/2 - [0.5] - - -3/4 - [-0.75] - - 1-3/4 - [1.75] - - # uuid - 12345678-1234-5678-1234-567812345678 - [UUID('12345678-1234-5678-1234-567812345678')] - """ - - convertToInteger = tokenMap(int) - """ - Parse action for converting parsed integers to Python int - """ - - convertToFloat = tokenMap(float) - """ - Parse action for converting parsed numbers to Python float - """ - - integer = Word(nums).setName("integer").setParseAction(convertToInteger) - """expression that parses an unsigned integer, returns an int""" - - hex_integer = Word(hexnums).setName("hex integer").setParseAction(tokenMap(int, 16)) - """expression that parses a hexadecimal integer, returns an int""" - - signed_integer = Regex(r'[+-]?\d+').setName("signed integer").setParseAction(convertToInteger) - """expression that parses an integer with optional leading sign, returns an int""" - - fraction = (signed_integer().setParseAction(convertToFloat) + '/' + signed_integer().setParseAction(convertToFloat)).setName("fraction") - """fractional expression of an integer divided by an integer, returns a float""" - fraction.addParseAction(lambda t: t[0]/t[-1]) - - mixed_integer = (fraction | signed_integer + Optional(Optional('-').suppress() + fraction)).setName("fraction or mixed integer-fraction") - """mixed integer of the form 'integer - fraction', with optional leading integer, returns float""" - mixed_integer.addParseAction(sum) - - real = Regex(r'[+-]?(?:\d+\.\d*|\.\d+)').setName("real number").setParseAction(convertToFloat) - """expression that parses a floating point number and returns a float""" - - sci_real = Regex(r'[+-]?(?:\d+(?:[eE][+-]?\d+)|(?:\d+\.\d*|\.\d+)(?:[eE][+-]?\d+)?)').setName("real number with scientific notation").setParseAction(convertToFloat) - """expression that parses a floating point number with optional - scientific notation and returns a float""" - - # streamlining this expression makes the docs nicer-looking - number = (sci_real | real | signed_integer).streamline() - """any numeric expression, returns the corresponding Python type""" - - fnumber = Regex(r'[+-]?\d+\.?\d*([eE][+-]?\d+)?').setName("fnumber").setParseAction(convertToFloat) - """any int or real number, returned as float""" - - identifier = Word(alphas + '_', alphanums + '_').setName("identifier") - """typical code identifier (leading alpha or '_', followed by 0 or more alphas, nums, or '_')""" - - ipv4_address = Regex(r'(25[0-5]|2[0-4][0-9]|1?[0-9]{1,2})(\.(25[0-5]|2[0-4][0-9]|1?[0-9]{1,2})){3}').setName("IPv4 address") - "IPv4 address (``0.0.0.0 - 255.255.255.255``)" - - _ipv6_part = Regex(r'[0-9a-fA-F]{1,4}').setName("hex_integer") - _full_ipv6_address = (_ipv6_part + (':' + _ipv6_part) * 7).setName("full IPv6 address") - _short_ipv6_address = (Optional(_ipv6_part + (':' + _ipv6_part) * (0, 6)) - + "::" - + Optional(_ipv6_part + (':' + _ipv6_part) * (0, 6)) - ).setName("short IPv6 address") - _short_ipv6_address.addCondition(lambda t: sum(1 for tt in t if pyparsing_common._ipv6_part.matches(tt)) < 8) - _mixed_ipv6_address = ("::ffff:" + ipv4_address).setName("mixed IPv6 address") - ipv6_address = Combine((_full_ipv6_address | _mixed_ipv6_address | _short_ipv6_address).setName("IPv6 address")).setName("IPv6 address") - "IPv6 address (long, short, or mixed form)" - - mac_address = Regex(r'[0-9a-fA-F]{2}([:.-])[0-9a-fA-F]{2}(?:\1[0-9a-fA-F]{2}){4}').setName("MAC address") - "MAC address xx:xx:xx:xx:xx (may also have '-' or '.' delimiters)" - - @staticmethod - def convertToDate(fmt="%Y-%m-%d"): - """ - Helper to create a parse action for converting parsed date string to Python datetime.date - - Params - - - fmt - format to be passed to datetime.strptime (default= ``"%Y-%m-%d"``) - - Example:: - - date_expr = pyparsing_common.iso8601_date.copy() - date_expr.setParseAction(pyparsing_common.convertToDate()) - print(date_expr.parseString("1999-12-31")) - - prints:: - - [datetime.date(1999, 12, 31)] - """ - def cvt_fn(s, l, t): - try: - return datetime.strptime(t[0], fmt).date() - except ValueError as ve: - raise ParseException(s, l, str(ve)) - return cvt_fn - - @staticmethod - def convertToDatetime(fmt="%Y-%m-%dT%H:%M:%S.%f"): - """Helper to create a parse action for converting parsed - datetime string to Python datetime.datetime - - Params - - - fmt - format to be passed to datetime.strptime (default= ``"%Y-%m-%dT%H:%M:%S.%f"``) - - Example:: - - dt_expr = pyparsing_common.iso8601_datetime.copy() - dt_expr.setParseAction(pyparsing_common.convertToDatetime()) - print(dt_expr.parseString("1999-12-31T23:59:59.999")) - - prints:: - - [datetime.datetime(1999, 12, 31, 23, 59, 59, 999000)] - """ - def cvt_fn(s, l, t): - try: - return datetime.strptime(t[0], fmt) - except ValueError as ve: - raise ParseException(s, l, str(ve)) - return cvt_fn - - iso8601_date = Regex(r'(?P\d{4})(?:-(?P\d\d)(?:-(?P\d\d))?)?').setName("ISO8601 date") - "ISO8601 date (``yyyy-mm-dd``)" - - iso8601_datetime = Regex(r'(?P\d{4})-(?P\d\d)-(?P\d\d)[T ](?P\d\d):(?P\d\d)(:(?P\d\d(\.\d*)?)?)?(?PZ|[+-]\d\d:?\d\d)?').setName("ISO8601 datetime") - "ISO8601 datetime (``yyyy-mm-ddThh:mm:ss.s(Z|+-00:00)``) - trailing seconds, milliseconds, and timezone optional; accepts separating ``'T'`` or ``' '``" - - uuid = Regex(r'[0-9a-fA-F]{8}(-[0-9a-fA-F]{4}){3}-[0-9a-fA-F]{12}').setName("UUID") - "UUID (``xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx``)" - - _html_stripper = anyOpenTag.suppress() | anyCloseTag.suppress() - @staticmethod - def stripHTMLTags(s, l, tokens): - """Parse action to remove HTML tags from web page HTML source - - Example:: - - # strip HTML links from normal text - text = 'More info at the
pyparsing wiki page' - td, td_end = makeHTMLTags("TD") - table_text = td + SkipTo(td_end).setParseAction(pyparsing_common.stripHTMLTags)("body") + td_end - print(table_text.parseString(text).body) - - Prints:: - - More info at the pyparsing wiki page - """ - return pyparsing_common._html_stripper.transformString(tokens[0]) - - _commasepitem = Combine(OneOrMore(~Literal(",") - + ~LineEnd() - + Word(printables, excludeChars=',') - + Optional(White(" \t")))).streamline().setName("commaItem") - comma_separated_list = delimitedList(Optional(quotedString.copy() - | _commasepitem, default='') - ).setName("comma separated list") - """Predefined expression of 1 or more printable words or quoted strings, separated by commas.""" - - upcaseTokens = staticmethod(tokenMap(lambda t: _ustr(t).upper())) - """Parse action to convert tokens to upper case.""" - - downcaseTokens = staticmethod(tokenMap(lambda t: _ustr(t).lower())) - """Parse action to convert tokens to lower case.""" - - -class _lazyclassproperty(object): - def __init__(self, fn): - self.fn = fn - self.__doc__ = fn.__doc__ - self.__name__ = fn.__name__ - - def __get__(self, obj, cls): - if cls is None: - cls = type(obj) - if not hasattr(cls, '_intern') or any(cls._intern is getattr(superclass, '_intern', []) - for superclass in cls.__mro__[1:]): - cls._intern = {} - attrname = self.fn.__name__ - if attrname not in cls._intern: - cls._intern[attrname] = self.fn(cls) - return cls._intern[attrname] - - -class unicode_set(object): - """ - A set of Unicode characters, for language-specific strings for - ``alphas``, ``nums``, ``alphanums``, and ``printables``. - A unicode_set is defined by a list of ranges in the Unicode character - set, in a class attribute ``_ranges``, such as:: - - _ranges = [(0x0020, 0x007e), (0x00a0, 0x00ff),] - - A unicode set can also be defined using multiple inheritance of other unicode sets:: - - class CJK(Chinese, Japanese, Korean): - pass - """ - _ranges = [] - - @classmethod - def _get_chars_for_ranges(cls): - ret = [] - for cc in cls.__mro__: - if cc is unicode_set: - break - for rr in cc._ranges: - ret.extend(range(rr[0], rr[-1] + 1)) - return [unichr(c) for c in sorted(set(ret))] - - @_lazyclassproperty - def printables(cls): - "all non-whitespace characters in this range" - return u''.join(filterfalse(unicode.isspace, cls._get_chars_for_ranges())) - - @_lazyclassproperty - def alphas(cls): - "all alphabetic characters in this range" - return u''.join(filter(unicode.isalpha, cls._get_chars_for_ranges())) - - @_lazyclassproperty - def nums(cls): - "all numeric digit characters in this range" - return u''.join(filter(unicode.isdigit, cls._get_chars_for_ranges())) - - @_lazyclassproperty - def alphanums(cls): - "all alphanumeric characters in this range" - return cls.alphas + cls.nums - - -class pyparsing_unicode(unicode_set): - """ - A namespace class for defining common language unicode_sets. - """ - _ranges = [(32, sys.maxunicode)] - - class Latin1(unicode_set): - "Unicode set for Latin-1 Unicode Character Range" - _ranges = [(0x0020, 0x007e), (0x00a0, 0x00ff),] - - class LatinA(unicode_set): - "Unicode set for Latin-A Unicode Character Range" - _ranges = [(0x0100, 0x017f),] - - class LatinB(unicode_set): - "Unicode set for Latin-B Unicode Character Range" - _ranges = [(0x0180, 0x024f),] - - class Greek(unicode_set): - "Unicode set for Greek Unicode Character Ranges" - _ranges = [ - (0x0370, 0x03ff), (0x1f00, 0x1f15), (0x1f18, 0x1f1d), (0x1f20, 0x1f45), (0x1f48, 0x1f4d), - (0x1f50, 0x1f57), (0x1f59,), (0x1f5b,), (0x1f5d,), (0x1f5f, 0x1f7d), (0x1f80, 0x1fb4), (0x1fb6, 0x1fc4), - (0x1fc6, 0x1fd3), (0x1fd6, 0x1fdb), (0x1fdd, 0x1fef), (0x1ff2, 0x1ff4), (0x1ff6, 0x1ffe), - ] - - class Cyrillic(unicode_set): - "Unicode set for Cyrillic Unicode Character Range" - _ranges = [(0x0400, 0x04ff)] - - class Chinese(unicode_set): - "Unicode set for Chinese Unicode Character Range" - _ranges = [(0x4e00, 0x9fff), (0x3000, 0x303f),] - - class Japanese(unicode_set): - "Unicode set for Japanese Unicode Character Range, combining Kanji, Hiragana, and Katakana ranges" - _ranges = [] - - class Kanji(unicode_set): - "Unicode set for Kanji Unicode Character Range" - _ranges = [(0x4E00, 0x9Fbf), (0x3000, 0x303f),] - - class Hiragana(unicode_set): - "Unicode set for Hiragana Unicode Character Range" - _ranges = [(0x3040, 0x309f),] - - class Katakana(unicode_set): - "Unicode set for Katakana Unicode Character Range" - _ranges = [(0x30a0, 0x30ff),] - - class Korean(unicode_set): - "Unicode set for Korean Unicode Character Range" - _ranges = [(0xac00, 0xd7af), (0x1100, 0x11ff), (0x3130, 0x318f), (0xa960, 0xa97f), (0xd7b0, 0xd7ff), (0x3000, 0x303f),] - - class CJK(Chinese, Japanese, Korean): - "Unicode set for combined Chinese, Japanese, and Korean (CJK) Unicode Character Range" - pass - - class Thai(unicode_set): - "Unicode set for Thai Unicode Character Range" - _ranges = [(0x0e01, 0x0e3a), (0x0e3f, 0x0e5b),] - - class Arabic(unicode_set): - "Unicode set for Arabic Unicode Character Range" - _ranges = [(0x0600, 0x061b), (0x061e, 0x06ff), (0x0700, 0x077f),] - - class Hebrew(unicode_set): - "Unicode set for Hebrew Unicode Character Range" - _ranges = [(0x0590, 0x05ff),] - - class Devanagari(unicode_set): - "Unicode set for Devanagari Unicode Character Range" - _ranges = [(0x0900, 0x097f), (0xa8e0, 0xa8ff)] - -pyparsing_unicode.Japanese._ranges = (pyparsing_unicode.Japanese.Kanji._ranges - + pyparsing_unicode.Japanese.Hiragana._ranges - + pyparsing_unicode.Japanese.Katakana._ranges) - -# define ranges in language character sets -if PY_3: - setattr(pyparsing_unicode, u"العربية", pyparsing_unicode.Arabic) - setattr(pyparsing_unicode, u"中文", pyparsing_unicode.Chinese) - setattr(pyparsing_unicode, u"кириллица", pyparsing_unicode.Cyrillic) - setattr(pyparsing_unicode, u"Ελληνικά", pyparsing_unicode.Greek) - setattr(pyparsing_unicode, u"עִברִית", pyparsing_unicode.Hebrew) - setattr(pyparsing_unicode, u"日本語", pyparsing_unicode.Japanese) - setattr(pyparsing_unicode.Japanese, u"漢字", pyparsing_unicode.Japanese.Kanji) - setattr(pyparsing_unicode.Japanese, u"カタカナ", pyparsing_unicode.Japanese.Katakana) - setattr(pyparsing_unicode.Japanese, u"ひらがな", pyparsing_unicode.Japanese.Hiragana) - setattr(pyparsing_unicode, u"한국어", pyparsing_unicode.Korean) - setattr(pyparsing_unicode, u"ไทย", pyparsing_unicode.Thai) - setattr(pyparsing_unicode, u"देवनागरी", pyparsing_unicode.Devanagari) - - -class pyparsing_test: - """ - namespace class for classes useful in writing unit tests - """ - - class reset_pyparsing_context: - """ - Context manager to be used when writing unit tests that modify pyparsing config values: - - packrat parsing - - default whitespace characters. - - default keyword characters - - literal string auto-conversion class - - __diag__ settings - - Example: - with reset_pyparsing_context(): - # test that literals used to construct a grammar are automatically suppressed - ParserElement.inlineLiteralsUsing(Suppress) - - term = Word(alphas) | Word(nums) - group = Group('(' + term[...] + ')') - - # assert that the '()' characters are not included in the parsed tokens - self.assertParseAndCheckLisst(group, "(abc 123 def)", ['abc', '123', 'def']) - - # after exiting context manager, literals are converted to Literal expressions again - """ - - def __init__(self): - self._save_context = {} - - def save(self): - self._save_context["default_whitespace"] = ParserElement.DEFAULT_WHITE_CHARS - self._save_context["default_keyword_chars"] = Keyword.DEFAULT_KEYWORD_CHARS - self._save_context[ - "literal_string_class" - ] = ParserElement._literalStringClass - self._save_context["packrat_enabled"] = ParserElement._packratEnabled - self._save_context["packrat_parse"] = ParserElement._parse - self._save_context["__diag__"] = { - name: getattr(__diag__, name) for name in __diag__._all_names - } - self._save_context["__compat__"] = { - "collect_all_And_tokens": __compat__.collect_all_And_tokens - } - return self - - def restore(self): - # reset pyparsing global state - if ( - ParserElement.DEFAULT_WHITE_CHARS - != self._save_context["default_whitespace"] - ): - ParserElement.setDefaultWhitespaceChars( - self._save_context["default_whitespace"] - ) - Keyword.DEFAULT_KEYWORD_CHARS = self._save_context["default_keyword_chars"] - ParserElement.inlineLiteralsUsing( - self._save_context["literal_string_class"] - ) - for name, value in self._save_context["__diag__"].items(): - setattr(__diag__, name, value) - ParserElement._packratEnabled = self._save_context["packrat_enabled"] - ParserElement._parse = self._save_context["packrat_parse"] - __compat__.collect_all_And_tokens = self._save_context["__compat__"] - - def __enter__(self): - return self.save() - - def __exit__(self, *args): - return self.restore() - - class TestParseResultsAsserts: - """ - A mixin class to add parse results assertion methods to normal unittest.TestCase classes. - """ - def assertParseResultsEquals( - self, result, expected_list=None, expected_dict=None, msg=None - ): - """ - Unit test assertion to compare a ParseResults object with an optional expected_list, - and compare any defined results names with an optional expected_dict. - """ - if expected_list is not None: - self.assertEqual(expected_list, result.asList(), msg=msg) - if expected_dict is not None: - self.assertEqual(expected_dict, result.asDict(), msg=msg) - - def assertParseAndCheckList( - self, expr, test_string, expected_list, msg=None, verbose=True - ): - """ - Convenience wrapper assert to test a parser element and input string, and assert that - the resulting ParseResults.asList() is equal to the expected_list. - """ - result = expr.parseString(test_string, parseAll=True) - if verbose: - print(result.dump()) - self.assertParseResultsEquals(result, expected_list=expected_list, msg=msg) - - def assertParseAndCheckDict( - self, expr, test_string, expected_dict, msg=None, verbose=True - ): - """ - Convenience wrapper assert to test a parser element and input string, and assert that - the resulting ParseResults.asDict() is equal to the expected_dict. - """ - result = expr.parseString(test_string, parseAll=True) - if verbose: - print(result.dump()) - self.assertParseResultsEquals(result, expected_dict=expected_dict, msg=msg) - - def assertRunTestResults( - self, run_tests_report, expected_parse_results=None, msg=None - ): - """ - Unit test assertion to evaluate output of ParserElement.runTests(). If a list of - list-dict tuples is given as the expected_parse_results argument, then these are zipped - with the report tuples returned by runTests and evaluated using assertParseResultsEquals. - Finally, asserts that the overall runTests() success value is True. - - :param run_tests_report: tuple(bool, [tuple(str, ParseResults or Exception)]) returned from runTests - :param expected_parse_results (optional): [tuple(str, list, dict, Exception)] - """ - run_test_success, run_test_results = run_tests_report - - if expected_parse_results is not None: - merged = [ - (rpt[0], rpt[1], expected) - for rpt, expected in zip(run_test_results, expected_parse_results) - ] - for test_string, result, expected in merged: - # expected should be a tuple containing a list and/or a dict or an exception, - # and optional failure message string - # an empty tuple will skip any result validation - fail_msg = next( - (exp for exp in expected if isinstance(exp, str)), None - ) - expected_exception = next( - ( - exp - for exp in expected - if isinstance(exp, type) and issubclass(exp, Exception) - ), - None, - ) - if expected_exception is not None: - with self.assertRaises( - expected_exception=expected_exception, msg=fail_msg or msg - ): - if isinstance(result, Exception): - raise result - else: - expected_list = next( - (exp for exp in expected if isinstance(exp, list)), None - ) - expected_dict = next( - (exp for exp in expected if isinstance(exp, dict)), None - ) - if (expected_list, expected_dict) != (None, None): - self.assertParseResultsEquals( - result, - expected_list=expected_list, - expected_dict=expected_dict, - msg=fail_msg or msg, - ) - else: - # warning here maybe? - print("no validation for {!r}".format(test_string)) - - # do this last, in case some specific test results can be reported instead - self.assertTrue( - run_test_success, msg=msg if msg is not None else "failed runTests" - ) - - @contextmanager - def assertRaisesParseException(self, exc_type=ParseException, msg=None): - with self.assertRaises(exc_type, msg=msg): - yield - - -if __name__ == "__main__": - - selectToken = CaselessLiteral("select") - fromToken = CaselessLiteral("from") - - ident = Word(alphas, alphanums + "_$") - - columnName = delimitedList(ident, ".", combine=True).setParseAction(upcaseTokens) - columnNameList = Group(delimitedList(columnName)).setName("columns") - columnSpec = ('*' | columnNameList) - - tableName = delimitedList(ident, ".", combine=True).setParseAction(upcaseTokens) - tableNameList = Group(delimitedList(tableName)).setName("tables") - - simpleSQL = selectToken("command") + columnSpec("columns") + fromToken + tableNameList("tables") - - # demo runTests method, including embedded comments in test string - simpleSQL.runTests(""" - # '*' as column list and dotted table name - select * from SYS.XYZZY - - # caseless match on "SELECT", and casts back to "select" - SELECT * from XYZZY, ABC - - # list of column names, and mixed case SELECT keyword - Select AA,BB,CC from Sys.dual - - # multiple tables - Select A, B, C from Sys.dual, Table2 - - # invalid SELECT keyword - should fail - Xelect A, B, C from Sys.dual - - # incomplete command - should fail - Select - - # invalid column name - should fail - Select ^^^ frox Sys.dual - - """) - - pyparsing_common.number.runTests(""" - 100 - -100 - +100 - 3.14159 - 6.02e23 - 1e-12 - """) - - # any int or real number, returned as float - pyparsing_common.fnumber.runTests(""" - 100 - -100 - +100 - 3.14159 - 6.02e23 - 1e-12 - """) - - pyparsing_common.hex_integer.runTests(""" - 100 - FF - """) - - import uuid - pyparsing_common.uuid.setParseAction(tokenMap(uuid.UUID)) - pyparsing_common.uuid.runTests(""" - 12345678-1234-5678-1234-567812345678 - """) diff --git a/conda_lock/_vendor/poetry/core/_vendor/pyrsistent/LICENSE.mit b/conda_lock/_vendor/poetry/core/_vendor/pyrsistent/LICENSE.mit deleted file mode 100644 index 6609e4c0..00000000 --- a/conda_lock/_vendor/poetry/core/_vendor/pyrsistent/LICENSE.mit +++ /dev/null @@ -1,22 +0,0 @@ -Copyright (c) 2019 Tobias Gustafsson - -Permission is hereby granted, free of charge, to any person -obtaining a copy of this software and associated documentation -files (the "Software"), to deal in the Software without -restriction, including without limitation the rights to use, -copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the -Software is furnished to do so, subject to the following -conditions: - -The above copyright notice and this permission notice shall be -included in all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, -EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES -OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND -NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT -HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, -WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING -FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR -OTHER DEALINGS IN THE SOFTWARE. \ No newline at end of file diff --git a/conda_lock/_vendor/poetry/core/_vendor/pyrsistent/__init__.py b/conda_lock/_vendor/poetry/core/_vendor/pyrsistent/__init__.py deleted file mode 100644 index be299658..00000000 --- a/conda_lock/_vendor/poetry/core/_vendor/pyrsistent/__init__.py +++ /dev/null @@ -1,47 +0,0 @@ -# -*- coding: utf-8 -*- - -from pyrsistent._pmap import pmap, m, PMap - -from pyrsistent._pvector import pvector, v, PVector - -from pyrsistent._pset import pset, s, PSet - -from pyrsistent._pbag import pbag, b, PBag - -from pyrsistent._plist import plist, l, PList - -from pyrsistent._pdeque import pdeque, dq, PDeque - -from pyrsistent._checked_types import ( - CheckedPMap, CheckedPVector, CheckedPSet, InvariantException, CheckedKeyTypeError, - CheckedValueTypeError, CheckedType, optional) - -from pyrsistent._field_common import ( - field, PTypeError, pset_field, pmap_field, pvector_field) - -from pyrsistent._precord import PRecord - -from pyrsistent._pclass import PClass, PClassMeta - -from pyrsistent._immutable import immutable - -from pyrsistent._helpers import freeze, thaw, mutant - -from pyrsistent._transformations import inc, discard, rex, ny - -from pyrsistent._toolz import get_in - - -__all__ = ('pmap', 'm', 'PMap', - 'pvector', 'v', 'PVector', - 'pset', 's', 'PSet', - 'pbag', 'b', 'PBag', - 'plist', 'l', 'PList', - 'pdeque', 'dq', 'PDeque', - 'CheckedPMap', 'CheckedPVector', 'CheckedPSet', 'InvariantException', 'CheckedKeyTypeError', 'CheckedValueTypeError', 'CheckedType', 'optional', - 'PRecord', 'field', 'pset_field', 'pmap_field', 'pvector_field', - 'PClass', 'PClassMeta', - 'immutable', - 'freeze', 'thaw', 'mutant', - 'get_in', - 'inc', 'discard', 'rex', 'ny') diff --git a/conda_lock/_vendor/poetry/core/_vendor/pyrsistent/_checked_types.py b/conda_lock/_vendor/poetry/core/_vendor/pyrsistent/_checked_types.py deleted file mode 100644 index 293d989f..00000000 --- a/conda_lock/_vendor/poetry/core/_vendor/pyrsistent/_checked_types.py +++ /dev/null @@ -1,542 +0,0 @@ -from ._compat import Iterable -import six - -from pyrsistent._compat import Enum, string_types -from pyrsistent._pmap import PMap, pmap -from pyrsistent._pset import PSet, pset -from pyrsistent._pvector import PythonPVector, python_pvector - - -class CheckedType(object): - """ - Marker class to enable creation and serialization of checked object graphs. - """ - __slots__ = () - - @classmethod - def create(cls, source_data, _factory_fields=None): - raise NotImplementedError() - - def serialize(self, format=None): - raise NotImplementedError() - - -def _restore_pickle(cls, data): - return cls.create(data, _factory_fields=set()) - - -class InvariantException(Exception): - """ - Exception raised from a :py:class:`CheckedType` when invariant tests fail or when a mandatory - field is missing. - - Contains two fields of interest: - invariant_errors, a tuple of error data for the failing invariants - missing_fields, a tuple of strings specifying the missing names - """ - - def __init__(self, error_codes=(), missing_fields=(), *args, **kwargs): - self.invariant_errors = tuple(e() if callable(e) else e for e in error_codes) - self.missing_fields = missing_fields - super(InvariantException, self).__init__(*args, **kwargs) - - def __str__(self): - return super(InvariantException, self).__str__() + \ - ", invariant_errors=[{invariant_errors}], missing_fields=[{missing_fields}]".format( - invariant_errors=', '.join(str(e) for e in self.invariant_errors), - missing_fields=', '.join(self.missing_fields)) - - -_preserved_iterable_types = ( - Enum, -) -"""Some types are themselves iterable, but we want to use the type itself and -not its members for the type specification. This defines a set of such types -that we explicitly preserve. - -Note that strings are not such types because the string inputs we pass in are -values, not types. -""" - - -def maybe_parse_user_type(t): - """Try to coerce a user-supplied type directive into a list of types. - - This function should be used in all places where a user specifies a type, - for consistency. - - The policy for what defines valid user input should be clear from the implementation. - """ - is_type = isinstance(t, type) - is_preserved = isinstance(t, type) and issubclass(t, _preserved_iterable_types) - is_string = isinstance(t, string_types) - is_iterable = isinstance(t, Iterable) - - if is_preserved: - return [t] - elif is_string: - return [t] - elif is_type and not is_iterable: - return [t] - elif is_iterable: - # Recur to validate contained types as well. - ts = t - return tuple(e for t in ts for e in maybe_parse_user_type(t)) - else: - # If this raises because `t` cannot be formatted, so be it. - raise TypeError( - 'Type specifications must be types or strings. Input: {}'.format(t) - ) - - -def maybe_parse_many_user_types(ts): - # Just a different name to communicate that you're parsing multiple user - # inputs. `maybe_parse_user_type` handles the iterable case anyway. - return maybe_parse_user_type(ts) - - -def _store_types(dct, bases, destination_name, source_name): - maybe_types = maybe_parse_many_user_types([ - d[source_name] - for d in ([dct] + [b.__dict__ for b in bases]) if source_name in d - ]) - - dct[destination_name] = maybe_types - - -def _merge_invariant_results(result): - verdict = True - data = [] - for verd, dat in result: - if not verd: - verdict = False - data.append(dat) - - return verdict, tuple(data) - - -def wrap_invariant(invariant): - # Invariant functions may return the outcome of several tests - # In those cases the results have to be merged before being passed - # back to the client. - def f(*args, **kwargs): - result = invariant(*args, **kwargs) - if isinstance(result[0], bool): - return result - - return _merge_invariant_results(result) - - return f - - -def _all_dicts(bases, seen=None): - """ - Yield each class in ``bases`` and each of their base classes. - """ - if seen is None: - seen = set() - for cls in bases: - if cls in seen: - continue - seen.add(cls) - yield cls.__dict__ - for b in _all_dicts(cls.__bases__, seen): - yield b - - -def store_invariants(dct, bases, destination_name, source_name): - # Invariants are inherited - invariants = [] - for ns in [dct] + list(_all_dicts(bases)): - try: - invariant = ns[source_name] - except KeyError: - continue - invariants.append(invariant) - - if not all(callable(invariant) for invariant in invariants): - raise TypeError('Invariants must be callable') - dct[destination_name] = tuple(wrap_invariant(inv) for inv in invariants) - - -class _CheckedTypeMeta(type): - def __new__(mcs, name, bases, dct): - _store_types(dct, bases, '_checked_types', '__type__') - store_invariants(dct, bases, '_checked_invariants', '__invariant__') - - def default_serializer(self, _, value): - if isinstance(value, CheckedType): - return value.serialize() - return value - - dct.setdefault('__serializer__', default_serializer) - - dct['__slots__'] = () - - return super(_CheckedTypeMeta, mcs).__new__(mcs, name, bases, dct) - - -class CheckedTypeError(TypeError): - def __init__(self, source_class, expected_types, actual_type, actual_value, *args, **kwargs): - super(CheckedTypeError, self).__init__(*args, **kwargs) - self.source_class = source_class - self.expected_types = expected_types - self.actual_type = actual_type - self.actual_value = actual_value - - -class CheckedKeyTypeError(CheckedTypeError): - """ - Raised when trying to set a value using a key with a type that doesn't match the declared type. - - Attributes: - source_class -- The class of the collection - expected_types -- Allowed types - actual_type -- The non matching type - actual_value -- Value of the variable with the non matching type - """ - pass - - -class CheckedValueTypeError(CheckedTypeError): - """ - Raised when trying to set a value using a key with a type that doesn't match the declared type. - - Attributes: - source_class -- The class of the collection - expected_types -- Allowed types - actual_type -- The non matching type - actual_value -- Value of the variable with the non matching type - """ - pass - - -def _get_class(type_name): - module_name, class_name = type_name.rsplit('.', 1) - module = __import__(module_name, fromlist=[class_name]) - return getattr(module, class_name) - - -def get_type(typ): - if isinstance(typ, type): - return typ - - return _get_class(typ) - - -def get_types(typs): - return [get_type(typ) for typ in typs] - - -def _check_types(it, expected_types, source_class, exception_type=CheckedValueTypeError): - if expected_types: - for e in it: - if not any(isinstance(e, get_type(t)) for t in expected_types): - actual_type = type(e) - msg = "Type {source_class} can only be used with {expected_types}, not {actual_type}".format( - source_class=source_class.__name__, - expected_types=tuple(get_type(et).__name__ for et in expected_types), - actual_type=actual_type.__name__) - raise exception_type(source_class, expected_types, actual_type, e, msg) - - -def _invariant_errors(elem, invariants): - return [data for valid, data in (invariant(elem) for invariant in invariants) if not valid] - - -def _invariant_errors_iterable(it, invariants): - return sum([_invariant_errors(elem, invariants) for elem in it], []) - - -def optional(*typs): - """ Convenience function to specify that a value may be of any of the types in type 'typs' or None """ - return tuple(typs) + (type(None),) - - -def _checked_type_create(cls, source_data, _factory_fields=None, ignore_extra=False): - if isinstance(source_data, cls): - return source_data - - # Recursively apply create methods of checked types if the types of the supplied data - # does not match any of the valid types. - types = get_types(cls._checked_types) - checked_type = next((t for t in types if issubclass(t, CheckedType)), None) - if checked_type: - return cls([checked_type.create(data, ignore_extra=ignore_extra) - if not any(isinstance(data, t) for t in types) else data - for data in source_data]) - - return cls(source_data) - -@six.add_metaclass(_CheckedTypeMeta) -class CheckedPVector(PythonPVector, CheckedType): - """ - A CheckedPVector is a PVector which allows specifying type and invariant checks. - - >>> class Positives(CheckedPVector): - ... __type__ = (int, float) - ... __invariant__ = lambda n: (n >= 0, 'Negative') - ... - >>> Positives([1, 2, 3]) - Positives([1, 2, 3]) - """ - - __slots__ = () - - def __new__(cls, initial=()): - if type(initial) == PythonPVector: - return super(CheckedPVector, cls).__new__(cls, initial._count, initial._shift, initial._root, initial._tail) - - return CheckedPVector.Evolver(cls, python_pvector()).extend(initial).persistent() - - def set(self, key, value): - return self.evolver().set(key, value).persistent() - - def append(self, val): - return self.evolver().append(val).persistent() - - def extend(self, it): - return self.evolver().extend(it).persistent() - - create = classmethod(_checked_type_create) - - def serialize(self, format=None): - serializer = self.__serializer__ - return list(serializer(format, v) for v in self) - - def __reduce__(self): - # Pickling support - return _restore_pickle, (self.__class__, list(self),) - - class Evolver(PythonPVector.Evolver): - __slots__ = ('_destination_class', '_invariant_errors') - - def __init__(self, destination_class, vector): - super(CheckedPVector.Evolver, self).__init__(vector) - self._destination_class = destination_class - self._invariant_errors = [] - - def _check(self, it): - _check_types(it, self._destination_class._checked_types, self._destination_class) - error_data = _invariant_errors_iterable(it, self._destination_class._checked_invariants) - self._invariant_errors.extend(error_data) - - def __setitem__(self, key, value): - self._check([value]) - return super(CheckedPVector.Evolver, self).__setitem__(key, value) - - def append(self, elem): - self._check([elem]) - return super(CheckedPVector.Evolver, self).append(elem) - - def extend(self, it): - it = list(it) - self._check(it) - return super(CheckedPVector.Evolver, self).extend(it) - - def persistent(self): - if self._invariant_errors: - raise InvariantException(error_codes=self._invariant_errors) - - result = self._orig_pvector - if self.is_dirty() or (self._destination_class != type(self._orig_pvector)): - pv = super(CheckedPVector.Evolver, self).persistent().extend(self._extra_tail) - result = self._destination_class(pv) - self._reset(result) - - return result - - def __repr__(self): - return self.__class__.__name__ + "({0})".format(self.tolist()) - - __str__ = __repr__ - - def evolver(self): - return CheckedPVector.Evolver(self.__class__, self) - - -@six.add_metaclass(_CheckedTypeMeta) -class CheckedPSet(PSet, CheckedType): - """ - A CheckedPSet is a PSet which allows specifying type and invariant checks. - - >>> class Positives(CheckedPSet): - ... __type__ = (int, float) - ... __invariant__ = lambda n: (n >= 0, 'Negative') - ... - >>> Positives([1, 2, 3]) - Positives([1, 2, 3]) - """ - - __slots__ = () - - def __new__(cls, initial=()): - if type(initial) is PMap: - return super(CheckedPSet, cls).__new__(cls, initial) - - evolver = CheckedPSet.Evolver(cls, pset()) - for e in initial: - evolver.add(e) - - return evolver.persistent() - - def __repr__(self): - return self.__class__.__name__ + super(CheckedPSet, self).__repr__()[4:] - - def __str__(self): - return self.__repr__() - - def serialize(self, format=None): - serializer = self.__serializer__ - return set(serializer(format, v) for v in self) - - create = classmethod(_checked_type_create) - - def __reduce__(self): - # Pickling support - return _restore_pickle, (self.__class__, list(self),) - - def evolver(self): - return CheckedPSet.Evolver(self.__class__, self) - - class Evolver(PSet._Evolver): - __slots__ = ('_destination_class', '_invariant_errors') - - def __init__(self, destination_class, original_set): - super(CheckedPSet.Evolver, self).__init__(original_set) - self._destination_class = destination_class - self._invariant_errors = [] - - def _check(self, it): - _check_types(it, self._destination_class._checked_types, self._destination_class) - error_data = _invariant_errors_iterable(it, self._destination_class._checked_invariants) - self._invariant_errors.extend(error_data) - - def add(self, element): - self._check([element]) - self._pmap_evolver[element] = True - return self - - def persistent(self): - if self._invariant_errors: - raise InvariantException(error_codes=self._invariant_errors) - - if self.is_dirty() or self._destination_class != type(self._original_pset): - return self._destination_class(self._pmap_evolver.persistent()) - - return self._original_pset - - -class _CheckedMapTypeMeta(type): - def __new__(mcs, name, bases, dct): - _store_types(dct, bases, '_checked_key_types', '__key_type__') - _store_types(dct, bases, '_checked_value_types', '__value_type__') - store_invariants(dct, bases, '_checked_invariants', '__invariant__') - - def default_serializer(self, _, key, value): - sk = key - if isinstance(key, CheckedType): - sk = key.serialize() - - sv = value - if isinstance(value, CheckedType): - sv = value.serialize() - - return sk, sv - - dct.setdefault('__serializer__', default_serializer) - - dct['__slots__'] = () - - return super(_CheckedMapTypeMeta, mcs).__new__(mcs, name, bases, dct) - -# Marker object -_UNDEFINED_CHECKED_PMAP_SIZE = object() - - -@six.add_metaclass(_CheckedMapTypeMeta) -class CheckedPMap(PMap, CheckedType): - """ - A CheckedPMap is a PMap which allows specifying type and invariant checks. - - >>> class IntToFloatMap(CheckedPMap): - ... __key_type__ = int - ... __value_type__ = float - ... __invariant__ = lambda k, v: (int(v) == k, 'Invalid mapping') - ... - >>> IntToFloatMap({1: 1.5, 2: 2.25}) - IntToFloatMap({1: 1.5, 2: 2.25}) - """ - - __slots__ = () - - def __new__(cls, initial={}, size=_UNDEFINED_CHECKED_PMAP_SIZE): - if size is not _UNDEFINED_CHECKED_PMAP_SIZE: - return super(CheckedPMap, cls).__new__(cls, size, initial) - - evolver = CheckedPMap.Evolver(cls, pmap()) - for k, v in initial.items(): - evolver.set(k, v) - - return evolver.persistent() - - def evolver(self): - return CheckedPMap.Evolver(self.__class__, self) - - def __repr__(self): - return self.__class__.__name__ + "({0})".format(str(dict(self))) - - __str__ = __repr__ - - def serialize(self, format=None): - serializer = self.__serializer__ - return dict(serializer(format, k, v) for k, v in self.items()) - - @classmethod - def create(cls, source_data, _factory_fields=None): - if isinstance(source_data, cls): - return source_data - - # Recursively apply create methods of checked types if the types of the supplied data - # does not match any of the valid types. - key_types = get_types(cls._checked_key_types) - checked_key_type = next((t for t in key_types if issubclass(t, CheckedType)), None) - value_types = get_types(cls._checked_value_types) - checked_value_type = next((t for t in value_types if issubclass(t, CheckedType)), None) - - if checked_key_type or checked_value_type: - return cls(dict((checked_key_type.create(key) if checked_key_type and not any(isinstance(key, t) for t in key_types) else key, - checked_value_type.create(value) if checked_value_type and not any(isinstance(value, t) for t in value_types) else value) - for key, value in source_data.items())) - - return cls(source_data) - - def __reduce__(self): - # Pickling support - return _restore_pickle, (self.__class__, dict(self),) - - class Evolver(PMap._Evolver): - __slots__ = ('_destination_class', '_invariant_errors') - - def __init__(self, destination_class, original_map): - super(CheckedPMap.Evolver, self).__init__(original_map) - self._destination_class = destination_class - self._invariant_errors = [] - - def set(self, key, value): - _check_types([key], self._destination_class._checked_key_types, self._destination_class, CheckedKeyTypeError) - _check_types([value], self._destination_class._checked_value_types, self._destination_class) - self._invariant_errors.extend(data for valid, data in (invariant(key, value) - for invariant in self._destination_class._checked_invariants) - if not valid) - - return super(CheckedPMap.Evolver, self).set(key, value) - - def persistent(self): - if self._invariant_errors: - raise InvariantException(error_codes=self._invariant_errors) - - if self.is_dirty() or type(self._original_pmap) != self._destination_class: - return self._destination_class(self._buckets_evolver.persistent(), self._size) - - return self._original_pmap diff --git a/conda_lock/_vendor/poetry/core/_vendor/pyrsistent/_compat.py b/conda_lock/_vendor/poetry/core/_vendor/pyrsistent/_compat.py deleted file mode 100644 index e728586a..00000000 --- a/conda_lock/_vendor/poetry/core/_vendor/pyrsistent/_compat.py +++ /dev/null @@ -1,31 +0,0 @@ -from six import string_types - - -# enum compat -try: - from enum import Enum -except: - class Enum(object): pass - # no objects will be instances of this class - -# collections compat -try: - from collections.abc import ( - Container, - Hashable, - Iterable, - Mapping, - Sequence, - Set, - Sized, - ) -except ImportError: - from collections import ( - Container, - Hashable, - Iterable, - Mapping, - Sequence, - Set, - Sized, - ) diff --git a/conda_lock/_vendor/poetry/core/_vendor/pyrsistent/_field_common.py b/conda_lock/_vendor/poetry/core/_vendor/pyrsistent/_field_common.py deleted file mode 100644 index ca1cccd4..00000000 --- a/conda_lock/_vendor/poetry/core/_vendor/pyrsistent/_field_common.py +++ /dev/null @@ -1,330 +0,0 @@ -import six -import sys - -from pyrsistent._checked_types import ( - CheckedPMap, - CheckedPSet, - CheckedPVector, - CheckedType, - InvariantException, - _restore_pickle, - get_type, - maybe_parse_user_type, - maybe_parse_many_user_types, -) -from pyrsistent._checked_types import optional as optional_type -from pyrsistent._checked_types import wrap_invariant -import inspect - -PY2 = sys.version_info[0] < 3 - - -def set_fields(dct, bases, name): - dct[name] = dict(sum([list(b.__dict__.get(name, {}).items()) for b in bases], [])) - - for k, v in list(dct.items()): - if isinstance(v, _PField): - dct[name][k] = v - del dct[k] - - -def check_global_invariants(subject, invariants): - error_codes = tuple(error_code for is_ok, error_code in - (invariant(subject) for invariant in invariants) if not is_ok) - if error_codes: - raise InvariantException(error_codes, (), 'Global invariant failed') - - -def serialize(serializer, format, value): - if isinstance(value, CheckedType) and serializer is PFIELD_NO_SERIALIZER: - return value.serialize(format) - - return serializer(format, value) - - -def check_type(destination_cls, field, name, value): - if field.type and not any(isinstance(value, get_type(t)) for t in field.type): - actual_type = type(value) - message = "Invalid type for field {0}.{1}, was {2}".format(destination_cls.__name__, name, actual_type.__name__) - raise PTypeError(destination_cls, name, field.type, actual_type, message) - - -def is_type_cls(type_cls, field_type): - if type(field_type) is set: - return True - types = tuple(field_type) - if len(types) == 0: - return False - return issubclass(get_type(types[0]), type_cls) - - -def is_field_ignore_extra_complaint(type_cls, field, ignore_extra): - # ignore_extra param has default False value, for speed purpose no need to propagate False - if not ignore_extra: - return False - - if not is_type_cls(type_cls, field.type): - return False - - if PY2: - return 'ignore_extra' in inspect.getargspec(field.factory).args - else: - return 'ignore_extra' in inspect.signature(field.factory).parameters - - - -class _PField(object): - __slots__ = ('type', 'invariant', 'initial', 'mandatory', '_factory', 'serializer') - - def __init__(self, type, invariant, initial, mandatory, factory, serializer): - self.type = type - self.invariant = invariant - self.initial = initial - self.mandatory = mandatory - self._factory = factory - self.serializer = serializer - - @property - def factory(self): - # If no factory is specified and the type is another CheckedType use the factory method of that CheckedType - if self._factory is PFIELD_NO_FACTORY and len(self.type) == 1: - typ = get_type(tuple(self.type)[0]) - if issubclass(typ, CheckedType): - return typ.create - - return self._factory - -PFIELD_NO_TYPE = () -PFIELD_NO_INVARIANT = lambda _: (True, None) -PFIELD_NO_FACTORY = lambda x: x -PFIELD_NO_INITIAL = object() -PFIELD_NO_SERIALIZER = lambda _, value: value - - -def field(type=PFIELD_NO_TYPE, invariant=PFIELD_NO_INVARIANT, initial=PFIELD_NO_INITIAL, - mandatory=False, factory=PFIELD_NO_FACTORY, serializer=PFIELD_NO_SERIALIZER): - """ - Field specification factory for :py:class:`PRecord`. - - :param type: a type or iterable with types that are allowed for this field - :param invariant: a function specifying an invariant that must hold for the field - :param initial: value of field if not specified when instantiating the record - :param mandatory: boolean specifying if the field is mandatory or not - :param factory: function called when field is set. - :param serializer: function that returns a serialized version of the field - """ - - # NB: We have to check this predicate separately from the predicates in - # `maybe_parse_user_type` et al. because this one is related to supporting - # the argspec for `field`, while those are related to supporting the valid - # ways to specify types. - - # Multiple types must be passed in one of the following containers. Note - # that a type that is a subclass of one of these containers, like a - # `collections.namedtuple`, will work as expected, since we check - # `isinstance` and not `issubclass`. - if isinstance(type, (list, set, tuple)): - types = set(maybe_parse_many_user_types(type)) - else: - types = set(maybe_parse_user_type(type)) - - invariant_function = wrap_invariant(invariant) if invariant != PFIELD_NO_INVARIANT and callable(invariant) else invariant - field = _PField(type=types, invariant=invariant_function, initial=initial, - mandatory=mandatory, factory=factory, serializer=serializer) - - _check_field_parameters(field) - - return field - - -def _check_field_parameters(field): - for t in field.type: - if not isinstance(t, type) and not isinstance(t, six.string_types): - raise TypeError('Type parameter expected, not {0}'.format(type(t))) - - if field.initial is not PFIELD_NO_INITIAL and \ - not callable(field.initial) and \ - field.type and not any(isinstance(field.initial, t) for t in field.type): - raise TypeError('Initial has invalid type {0}'.format(type(field.initial))) - - if not callable(field.invariant): - raise TypeError('Invariant must be callable') - - if not callable(field.factory): - raise TypeError('Factory must be callable') - - if not callable(field.serializer): - raise TypeError('Serializer must be callable') - - -class PTypeError(TypeError): - """ - Raised when trying to assign a value with a type that doesn't match the declared type. - - Attributes: - source_class -- The class of the record - field -- Field name - expected_types -- Types allowed for the field - actual_type -- The non matching type - """ - def __init__(self, source_class, field, expected_types, actual_type, *args, **kwargs): - super(PTypeError, self).__init__(*args, **kwargs) - self.source_class = source_class - self.field = field - self.expected_types = expected_types - self.actual_type = actual_type - - -SEQ_FIELD_TYPE_SUFFIXES = { - CheckedPVector: "PVector", - CheckedPSet: "PSet", -} - -# Global dictionary to hold auto-generated field types: used for unpickling -_seq_field_types = {} - -def _restore_seq_field_pickle(checked_class, item_type, data): - """Unpickling function for auto-generated PVec/PSet field types.""" - type_ = _seq_field_types[checked_class, item_type] - return _restore_pickle(type_, data) - -def _types_to_names(types): - """Convert a tuple of types to a human-readable string.""" - return "".join(get_type(typ).__name__.capitalize() for typ in types) - -def _make_seq_field_type(checked_class, item_type): - """Create a subclass of the given checked class with the given item type.""" - type_ = _seq_field_types.get((checked_class, item_type)) - if type_ is not None: - return type_ - - class TheType(checked_class): - __type__ = item_type - - def __reduce__(self): - return (_restore_seq_field_pickle, - (checked_class, item_type, list(self))) - - suffix = SEQ_FIELD_TYPE_SUFFIXES[checked_class] - TheType.__name__ = _types_to_names(TheType._checked_types) + suffix - _seq_field_types[checked_class, item_type] = TheType - return TheType - -def _sequence_field(checked_class, item_type, optional, initial): - """ - Create checked field for either ``PSet`` or ``PVector``. - - :param checked_class: ``CheckedPSet`` or ``CheckedPVector``. - :param item_type: The required type for the items in the set. - :param optional: If true, ``None`` can be used as a value for - this field. - :param initial: Initial value to pass to factory. - - :return: A ``field`` containing a checked class. - """ - TheType = _make_seq_field_type(checked_class, item_type) - - if optional: - def factory(argument, _factory_fields=None, ignore_extra=False): - if argument is None: - return None - else: - return TheType.create(argument, _factory_fields=_factory_fields, ignore_extra=ignore_extra) - else: - factory = TheType.create - - return field(type=optional_type(TheType) if optional else TheType, - factory=factory, mandatory=True, - initial=factory(initial)) - - -def pset_field(item_type, optional=False, initial=()): - """ - Create checked ``PSet`` field. - - :param item_type: The required type for the items in the set. - :param optional: If true, ``None`` can be used as a value for - this field. - :param initial: Initial value to pass to factory if no value is given - for the field. - - :return: A ``field`` containing a ``CheckedPSet`` of the given type. - """ - return _sequence_field(CheckedPSet, item_type, optional, - initial) - - -def pvector_field(item_type, optional=False, initial=()): - """ - Create checked ``PVector`` field. - - :param item_type: The required type for the items in the vector. - :param optional: If true, ``None`` can be used as a value for - this field. - :param initial: Initial value to pass to factory if no value is given - for the field. - - :return: A ``field`` containing a ``CheckedPVector`` of the given type. - """ - return _sequence_field(CheckedPVector, item_type, optional, - initial) - - -_valid = lambda item: (True, "") - - -# Global dictionary to hold auto-generated field types: used for unpickling -_pmap_field_types = {} - -def _restore_pmap_field_pickle(key_type, value_type, data): - """Unpickling function for auto-generated PMap field types.""" - type_ = _pmap_field_types[key_type, value_type] - return _restore_pickle(type_, data) - -def _make_pmap_field_type(key_type, value_type): - """Create a subclass of CheckedPMap with the given key and value types.""" - type_ = _pmap_field_types.get((key_type, value_type)) - if type_ is not None: - return type_ - - class TheMap(CheckedPMap): - __key_type__ = key_type - __value_type__ = value_type - - def __reduce__(self): - return (_restore_pmap_field_pickle, - (self.__key_type__, self.__value_type__, dict(self))) - - TheMap.__name__ = "{0}To{1}PMap".format( - _types_to_names(TheMap._checked_key_types), - _types_to_names(TheMap._checked_value_types)) - _pmap_field_types[key_type, value_type] = TheMap - return TheMap - - -def pmap_field(key_type, value_type, optional=False, invariant=PFIELD_NO_INVARIANT): - """ - Create a checked ``PMap`` field. - - :param key: The required type for the keys of the map. - :param value: The required type for the values of the map. - :param optional: If true, ``None`` can be used as a value for - this field. - :param invariant: Pass-through to ``field``. - - :return: A ``field`` containing a ``CheckedPMap``. - """ - TheMap = _make_pmap_field_type(key_type, value_type) - - if optional: - def factory(argument): - if argument is None: - return None - else: - return TheMap.create(argument) - else: - factory = TheMap.create - - return field(mandatory=True, initial=TheMap(), - type=optional_type(TheMap) if optional else TheMap, - factory=factory, invariant=invariant) diff --git a/conda_lock/_vendor/poetry/core/_vendor/pyrsistent/_helpers.py b/conda_lock/_vendor/poetry/core/_vendor/pyrsistent/_helpers.py deleted file mode 100644 index c9c58fea..00000000 --- a/conda_lock/_vendor/poetry/core/_vendor/pyrsistent/_helpers.py +++ /dev/null @@ -1,82 +0,0 @@ -from functools import wraps -import six -from pyrsistent._pmap import PMap, pmap -from pyrsistent._pset import PSet, pset -from pyrsistent._pvector import PVector, pvector - - -def freeze(o): - """ - Recursively convert simple Python containers into pyrsistent versions - of those containers. - - - list is converted to pvector, recursively - - dict is converted to pmap, recursively on values (but not keys) - - set is converted to pset, but not recursively - - tuple is converted to tuple, recursively. - - Sets and dict keys are not recursively frozen because they do not contain - mutable data by convention. The main exception to this rule is that - dict keys and set elements are often instances of mutable objects that - support hash-by-id, which this function can't convert anyway. - - >>> freeze(set([1, 2])) - pset([1, 2]) - >>> freeze([1, {'a': 3}]) - pvector([1, pmap({'a': 3})]) - >>> freeze((1, [])) - (1, pvector([])) - """ - typ = type(o) - if typ is dict: - return pmap(dict((k, freeze(v)) for k, v in six.iteritems(o))) - if typ is list: - return pvector(map(freeze, o)) - if typ is tuple: - return tuple(map(freeze, o)) - if typ is set: - return pset(o) - return o - - -def thaw(o): - """ - Recursively convert pyrsistent containers into simple Python containers. - - - pvector is converted to list, recursively - - pmap is converted to dict, recursively on values (but not keys) - - pset is converted to set, but not recursively - - tuple is converted to tuple, recursively. - - >>> from pyrsistent import s, m, v - >>> thaw(s(1, 2)) - {1, 2} - >>> thaw(v(1, m(a=3))) - [1, {'a': 3}] - >>> thaw((1, v())) - (1, []) - """ - if isinstance(o, PVector): - return list(map(thaw, o)) - if isinstance(o, PMap): - return dict((k, thaw(v)) for k, v in o.iteritems()) - if isinstance(o, PSet): - return set(o) - if type(o) is tuple: - return tuple(map(thaw, o)) - return o - - -def mutant(fn): - """ - Convenience decorator to isolate mutation to within the decorated function (with respect - to the input arguments). - - All arguments to the decorated function will be frozen so that they are guaranteed not to change. - The return value is also frozen. - """ - @wraps(fn) - def inner_f(*args, **kwargs): - return freeze(fn(*[freeze(e) for e in args], **dict(freeze(item) for item in kwargs.items()))) - - return inner_f diff --git a/conda_lock/_vendor/poetry/core/_vendor/pyrsistent/_immutable.py b/conda_lock/_vendor/poetry/core/_vendor/pyrsistent/_immutable.py deleted file mode 100644 index a89bd755..00000000 --- a/conda_lock/_vendor/poetry/core/_vendor/pyrsistent/_immutable.py +++ /dev/null @@ -1,105 +0,0 @@ -import sys - -import six - - -def immutable(members='', name='Immutable', verbose=False): - """ - Produces a class that either can be used standalone or as a base class for persistent classes. - - This is a thin wrapper around a named tuple. - - Constructing a type and using it to instantiate objects: - - >>> Point = immutable('x, y', name='Point') - >>> p = Point(1, 2) - >>> p2 = p.set(x=3) - >>> p - Point(x=1, y=2) - >>> p2 - Point(x=3, y=2) - - Inheriting from a constructed type. In this case no type name needs to be supplied: - - >>> class PositivePoint(immutable('x, y')): - ... __slots__ = tuple() - ... def __new__(cls, x, y): - ... if x > 0 and y > 0: - ... return super(PositivePoint, cls).__new__(cls, x, y) - ... raise Exception('Coordinates must be positive!') - ... - >>> p = PositivePoint(1, 2) - >>> p.set(x=3) - PositivePoint(x=3, y=2) - >>> p.set(y=-3) - Traceback (most recent call last): - Exception: Coordinates must be positive! - - The persistent class also supports the notion of frozen members. The value of a frozen member - cannot be updated. For example it could be used to implement an ID that should remain the same - over time. A frozen member is denoted by a trailing underscore. - - >>> Point = immutable('x, y, id_', name='Point') - >>> p = Point(1, 2, id_=17) - >>> p.set(x=3) - Point(x=3, y=2, id_=17) - >>> p.set(id_=18) - Traceback (most recent call last): - AttributeError: Cannot set frozen members id_ - """ - - if isinstance(members, six.string_types): - members = members.replace(',', ' ').split() - - def frozen_member_test(): - frozen_members = ["'%s'" % f for f in members if f.endswith('_')] - if frozen_members: - return """ - frozen_fields = fields_to_modify & set([{frozen_members}]) - if frozen_fields: - raise AttributeError('Cannot set frozen members %s' % ', '.join(frozen_fields)) - """.format(frozen_members=', '.join(frozen_members)) - - return '' - - verbose_string = "" - if sys.version_info < (3, 7): - # Verbose is no longer supported in Python 3.7 - verbose_string = ", verbose={verbose}".format(verbose=verbose) - - quoted_members = ', '.join("'%s'" % m for m in members) - template = """ -class {class_name}(namedtuple('ImmutableBase', [{quoted_members}]{verbose_string})): - __slots__ = tuple() - - def __repr__(self): - return super({class_name}, self).__repr__().replace('ImmutableBase', self.__class__.__name__) - - def set(self, **kwargs): - if not kwargs: - return self - - fields_to_modify = set(kwargs.keys()) - if not fields_to_modify <= {member_set}: - raise AttributeError("'%s' is not a member" % ', '.join(fields_to_modify - {member_set})) - - {frozen_member_test} - - return self.__class__.__new__(self.__class__, *map(kwargs.pop, [{quoted_members}], self)) -""".format(quoted_members=quoted_members, - member_set="set([%s])" % quoted_members if quoted_members else 'set()', - frozen_member_test=frozen_member_test(), - verbose_string=verbose_string, - class_name=name) - - if verbose: - print(template) - - from collections import namedtuple - namespace = dict(namedtuple=namedtuple, __name__='pyrsistent_immutable') - try: - six.exec_(template, namespace) - except SyntaxError as e: - raise SyntaxError(e.message + ':\n' + template) - - return namespace[name] \ No newline at end of file diff --git a/conda_lock/_vendor/poetry/core/_vendor/pyrsistent/_pbag.py b/conda_lock/_vendor/poetry/core/_vendor/pyrsistent/_pbag.py deleted file mode 100644 index 9905e9a6..00000000 --- a/conda_lock/_vendor/poetry/core/_vendor/pyrsistent/_pbag.py +++ /dev/null @@ -1,267 +0,0 @@ -from ._compat import Container, Iterable, Sized, Hashable -from functools import reduce -from pyrsistent._pmap import pmap - - -def _add_to_counters(counters, element): - return counters.set(element, counters.get(element, 0) + 1) - - -class PBag(object): - """ - A persistent bag/multiset type. - - Requires elements to be hashable, and allows duplicates, but has no - ordering. Bags are hashable. - - Do not instantiate directly, instead use the factory functions :py:func:`b` - or :py:func:`pbag` to create an instance. - - Some examples: - - >>> s = pbag([1, 2, 3, 1]) - >>> s2 = s.add(4) - >>> s3 = s2.remove(1) - >>> s - pbag([1, 1, 2, 3]) - >>> s2 - pbag([1, 1, 2, 3, 4]) - >>> s3 - pbag([1, 2, 3, 4]) - """ - - __slots__ = ('_counts', '__weakref__') - - def __init__(self, counts): - self._counts = counts - - def add(self, element): - """ - Add an element to the bag. - - >>> s = pbag([1]) - >>> s2 = s.add(1) - >>> s3 = s.add(2) - >>> s2 - pbag([1, 1]) - >>> s3 - pbag([1, 2]) - """ - return PBag(_add_to_counters(self._counts, element)) - - def update(self, iterable): - """ - Update bag with all elements in iterable. - - >>> s = pbag([1]) - >>> s.update([1, 2]) - pbag([1, 1, 2]) - """ - if iterable: - return PBag(reduce(_add_to_counters, iterable, self._counts)) - - return self - - def remove(self, element): - """ - Remove an element from the bag. - - >>> s = pbag([1, 1, 2]) - >>> s2 = s.remove(1) - >>> s3 = s.remove(2) - >>> s2 - pbag([1, 2]) - >>> s3 - pbag([1, 1]) - """ - if element not in self._counts: - raise KeyError(element) - elif self._counts[element] == 1: - newc = self._counts.remove(element) - else: - newc = self._counts.set(element, self._counts[element] - 1) - return PBag(newc) - - def count(self, element): - """ - Return the number of times an element appears. - - - >>> pbag([]).count('non-existent') - 0 - >>> pbag([1, 1, 2]).count(1) - 2 - """ - return self._counts.get(element, 0) - - def __len__(self): - """ - Return the length including duplicates. - - >>> len(pbag([1, 1, 2])) - 3 - """ - return sum(self._counts.itervalues()) - - def __iter__(self): - """ - Return an iterator of all elements, including duplicates. - - >>> list(pbag([1, 1, 2])) - [1, 1, 2] - >>> list(pbag([1, 2])) - [1, 2] - """ - for elt, count in self._counts.iteritems(): - for i in range(count): - yield elt - - def __contains__(self, elt): - """ - Check if an element is in the bag. - - >>> 1 in pbag([1, 1, 2]) - True - >>> 0 in pbag([1, 2]) - False - """ - return elt in self._counts - - def __repr__(self): - return "pbag({0})".format(list(self)) - - def __eq__(self, other): - """ - Check if two bags are equivalent, honoring the number of duplicates, - and ignoring insertion order. - - >>> pbag([1, 1, 2]) == pbag([1, 2]) - False - >>> pbag([2, 1, 0]) == pbag([0, 1, 2]) - True - """ - if type(other) is not PBag: - raise TypeError("Can only compare PBag with PBags") - return self._counts == other._counts - - def __lt__(self, other): - raise TypeError('PBags are not orderable') - - __le__ = __lt__ - __gt__ = __lt__ - __ge__ = __lt__ - - # Multiset-style operations similar to collections.Counter - - def __add__(self, other): - """ - Combine elements from two PBags. - - >>> pbag([1, 2, 2]) + pbag([2, 3, 3]) - pbag([1, 2, 2, 2, 3, 3]) - """ - if not isinstance(other, PBag): - return NotImplemented - result = self._counts.evolver() - for elem, other_count in other._counts.iteritems(): - result[elem] = self.count(elem) + other_count - return PBag(result.persistent()) - - def __sub__(self, other): - """ - Remove elements from one PBag that are present in another. - - >>> pbag([1, 2, 2, 2, 3]) - pbag([2, 3, 3, 4]) - pbag([1, 2, 2]) - """ - if not isinstance(other, PBag): - return NotImplemented - result = self._counts.evolver() - for elem, other_count in other._counts.iteritems(): - newcount = self.count(elem) - other_count - if newcount > 0: - result[elem] = newcount - elif elem in self: - result.remove(elem) - return PBag(result.persistent()) - - def __or__(self, other): - """ - Union: Keep elements that are present in either of two PBags. - - >>> pbag([1, 2, 2, 2]) | pbag([2, 3, 3]) - pbag([1, 2, 2, 2, 3, 3]) - """ - if not isinstance(other, PBag): - return NotImplemented - result = self._counts.evolver() - for elem, other_count in other._counts.iteritems(): - count = self.count(elem) - newcount = max(count, other_count) - result[elem] = newcount - return PBag(result.persistent()) - - def __and__(self, other): - """ - Intersection: Only keep elements that are present in both PBags. - - >>> pbag([1, 2, 2, 2]) & pbag([2, 3, 3]) - pbag([2]) - """ - if not isinstance(other, PBag): - return NotImplemented - result = pmap().evolver() - for elem, count in self._counts.iteritems(): - newcount = min(count, other.count(elem)) - if newcount > 0: - result[elem] = newcount - return PBag(result.persistent()) - - def __hash__(self): - """ - Hash based on value of elements. - - >>> m = pmap({pbag([1, 2]): "it's here!"}) - >>> m[pbag([2, 1])] - "it's here!" - >>> pbag([1, 1, 2]) in m - False - """ - return hash(self._counts) - - -Container.register(PBag) -Iterable.register(PBag) -Sized.register(PBag) -Hashable.register(PBag) - - -def b(*elements): - """ - Construct a persistent bag. - - Takes an arbitrary number of arguments to insert into the new persistent - bag. - - >>> b(1, 2, 3, 2) - pbag([1, 2, 2, 3]) - """ - return pbag(elements) - - -def pbag(elements): - """ - Convert an iterable to a persistent bag. - - Takes an iterable with elements to insert. - - >>> pbag([1, 2, 3, 2]) - pbag([1, 2, 2, 3]) - """ - if not elements: - return _EMPTY_PBAG - return PBag(reduce(_add_to_counters, elements, pmap())) - - -_EMPTY_PBAG = PBag(pmap()) - diff --git a/conda_lock/_vendor/poetry/core/_vendor/pyrsistent/_pclass.py b/conda_lock/_vendor/poetry/core/_vendor/pyrsistent/_pclass.py deleted file mode 100644 index a437f716..00000000 --- a/conda_lock/_vendor/poetry/core/_vendor/pyrsistent/_pclass.py +++ /dev/null @@ -1,264 +0,0 @@ -import six -from pyrsistent._checked_types import (InvariantException, CheckedType, _restore_pickle, store_invariants) -from pyrsistent._field_common import ( - set_fields, check_type, is_field_ignore_extra_complaint, PFIELD_NO_INITIAL, serialize, check_global_invariants -) -from pyrsistent._transformations import transform - - -def _is_pclass(bases): - return len(bases) == 1 and bases[0] == CheckedType - - -class PClassMeta(type): - def __new__(mcs, name, bases, dct): - set_fields(dct, bases, name='_pclass_fields') - store_invariants(dct, bases, '_pclass_invariants', '__invariant__') - dct['__slots__'] = ('_pclass_frozen',) + tuple(key for key in dct['_pclass_fields']) - - # There must only be one __weakref__ entry in the inheritance hierarchy, - # lets put it on the top level class. - if _is_pclass(bases): - dct['__slots__'] += ('__weakref__',) - - return super(PClassMeta, mcs).__new__(mcs, name, bases, dct) - -_MISSING_VALUE = object() - - -def _check_and_set_attr(cls, field, name, value, result, invariant_errors): - check_type(cls, field, name, value) - is_ok, error_code = field.invariant(value) - if not is_ok: - invariant_errors.append(error_code) - else: - setattr(result, name, value) - - -@six.add_metaclass(PClassMeta) -class PClass(CheckedType): - """ - A PClass is a python class with a fixed set of specified fields. PClasses are declared as python classes inheriting - from PClass. It is defined the same way that PRecords are and behaves like a PRecord in all aspects except that it - is not a PMap and hence not a collection but rather a plain Python object. - - - More documentation and examples of PClass usage is available at https://github.com/tobgu/pyrsistent - """ - def __new__(cls, **kwargs): # Support *args? - result = super(PClass, cls).__new__(cls) - factory_fields = kwargs.pop('_factory_fields', None) - ignore_extra = kwargs.pop('ignore_extra', None) - missing_fields = [] - invariant_errors = [] - for name, field in cls._pclass_fields.items(): - if name in kwargs: - if factory_fields is None or name in factory_fields: - if is_field_ignore_extra_complaint(PClass, field, ignore_extra): - value = field.factory(kwargs[name], ignore_extra=ignore_extra) - else: - value = field.factory(kwargs[name]) - else: - value = kwargs[name] - _check_and_set_attr(cls, field, name, value, result, invariant_errors) - del kwargs[name] - elif field.initial is not PFIELD_NO_INITIAL: - initial = field.initial() if callable(field.initial) else field.initial - _check_and_set_attr( - cls, field, name, initial, result, invariant_errors) - elif field.mandatory: - missing_fields.append('{0}.{1}'.format(cls.__name__, name)) - - if invariant_errors or missing_fields: - raise InvariantException(tuple(invariant_errors), tuple(missing_fields), 'Field invariant failed') - - if kwargs: - raise AttributeError("'{0}' are not among the specified fields for {1}".format( - ', '.join(kwargs), cls.__name__)) - - check_global_invariants(result, cls._pclass_invariants) - - result._pclass_frozen = True - return result - - def set(self, *args, **kwargs): - """ - Set a field in the instance. Returns a new instance with the updated value. The original instance remains - unmodified. Accepts key-value pairs or single string representing the field name and a value. - - >>> from pyrsistent import PClass, field - >>> class AClass(PClass): - ... x = field() - ... - >>> a = AClass(x=1) - >>> a2 = a.set(x=2) - >>> a3 = a.set('x', 3) - >>> a - AClass(x=1) - >>> a2 - AClass(x=2) - >>> a3 - AClass(x=3) - """ - if args: - kwargs[args[0]] = args[1] - - factory_fields = set(kwargs) - - for key in self._pclass_fields: - if key not in kwargs: - value = getattr(self, key, _MISSING_VALUE) - if value is not _MISSING_VALUE: - kwargs[key] = value - - return self.__class__(_factory_fields=factory_fields, **kwargs) - - @classmethod - def create(cls, kwargs, _factory_fields=None, ignore_extra=False): - """ - Factory method. Will create a new PClass of the current type and assign the values - specified in kwargs. - - :param ignore_extra: A boolean which when set to True will ignore any keys which appear in kwargs that are not - in the set of fields on the PClass. - """ - if isinstance(kwargs, cls): - return kwargs - - if ignore_extra: - kwargs = {k: kwargs[k] for k in cls._pclass_fields if k in kwargs} - - return cls(_factory_fields=_factory_fields, ignore_extra=ignore_extra, **kwargs) - - def serialize(self, format=None): - """ - Serialize the current PClass using custom serializer functions for fields where - such have been supplied. - """ - result = {} - for name in self._pclass_fields: - value = getattr(self, name, _MISSING_VALUE) - if value is not _MISSING_VALUE: - result[name] = serialize(self._pclass_fields[name].serializer, format, value) - - return result - - def transform(self, *transformations): - """ - Apply transformations to the currency PClass. For more details on transformations see - the documentation for PMap. Transformations on PClasses do not support key matching - since the PClass is not a collection. Apart from that the transformations available - for other persistent types work as expected. - """ - return transform(self, transformations) - - def __eq__(self, other): - if isinstance(other, self.__class__): - for name in self._pclass_fields: - if getattr(self, name, _MISSING_VALUE) != getattr(other, name, _MISSING_VALUE): - return False - - return True - - return NotImplemented - - def __ne__(self, other): - return not self == other - - def __hash__(self): - # May want to optimize this by caching the hash somehow - return hash(tuple((key, getattr(self, key, _MISSING_VALUE)) for key in self._pclass_fields)) - - def __setattr__(self, key, value): - if getattr(self, '_pclass_frozen', False): - raise AttributeError("Can't set attribute, key={0}, value={1}".format(key, value)) - - super(PClass, self).__setattr__(key, value) - - def __delattr__(self, key): - raise AttributeError("Can't delete attribute, key={0}, use remove()".format(key)) - - def _to_dict(self): - result = {} - for key in self._pclass_fields: - value = getattr(self, key, _MISSING_VALUE) - if value is not _MISSING_VALUE: - result[key] = value - - return result - - def __repr__(self): - return "{0}({1})".format(self.__class__.__name__, - ', '.join('{0}={1}'.format(k, repr(v)) for k, v in self._to_dict().items())) - - def __reduce__(self): - # Pickling support - data = dict((key, getattr(self, key)) for key in self._pclass_fields if hasattr(self, key)) - return _restore_pickle, (self.__class__, data,) - - def evolver(self): - """ - Returns an evolver for this object. - """ - return _PClassEvolver(self, self._to_dict()) - - def remove(self, name): - """ - Remove attribute given by name from the current instance. Raises AttributeError if the - attribute doesn't exist. - """ - evolver = self.evolver() - del evolver[name] - return evolver.persistent() - - -class _PClassEvolver(object): - __slots__ = ('_pclass_evolver_original', '_pclass_evolver_data', '_pclass_evolver_data_is_dirty', '_factory_fields') - - def __init__(self, original, initial_dict): - self._pclass_evolver_original = original - self._pclass_evolver_data = initial_dict - self._pclass_evolver_data_is_dirty = False - self._factory_fields = set() - - def __getitem__(self, item): - return self._pclass_evolver_data[item] - - def set(self, key, value): - if self._pclass_evolver_data.get(key, _MISSING_VALUE) is not value: - self._pclass_evolver_data[key] = value - self._factory_fields.add(key) - self._pclass_evolver_data_is_dirty = True - - return self - - def __setitem__(self, key, value): - self.set(key, value) - - def remove(self, item): - if item in self._pclass_evolver_data: - del self._pclass_evolver_data[item] - self._factory_fields.discard(item) - self._pclass_evolver_data_is_dirty = True - return self - - raise AttributeError(item) - - def __delitem__(self, item): - self.remove(item) - - def persistent(self): - if self._pclass_evolver_data_is_dirty: - return self._pclass_evolver_original.__class__(_factory_fields=self._factory_fields, - **self._pclass_evolver_data) - - return self._pclass_evolver_original - - def __setattr__(self, key, value): - if key not in self.__slots__: - self.set(key, value) - else: - super(_PClassEvolver, self).__setattr__(key, value) - - def __getattr__(self, item): - return self[item] diff --git a/conda_lock/_vendor/poetry/core/_vendor/pyrsistent/_pdeque.py b/conda_lock/_vendor/poetry/core/_vendor/pyrsistent/_pdeque.py deleted file mode 100644 index 5147b3fa..00000000 --- a/conda_lock/_vendor/poetry/core/_vendor/pyrsistent/_pdeque.py +++ /dev/null @@ -1,376 +0,0 @@ -from ._compat import Sequence, Hashable -from itertools import islice, chain -from numbers import Integral -from pyrsistent._plist import plist - - -class PDeque(object): - """ - Persistent double ended queue (deque). Allows quick appends and pops in both ends. Implemented - using two persistent lists. - - A maximum length can be specified to create a bounded queue. - - Fully supports the Sequence and Hashable protocols including indexing and slicing but - if you need fast random access go for the PVector instead. - - Do not instantiate directly, instead use the factory functions :py:func:`dq` or :py:func:`pdeque` to - create an instance. - - Some examples: - - >>> x = pdeque([1, 2, 3]) - >>> x.left - 1 - >>> x.right - 3 - >>> x[0] == x.left - True - >>> x[-1] == x.right - True - >>> x.pop() - pdeque([1, 2]) - >>> x.pop() == x[:-1] - True - >>> x.popleft() - pdeque([2, 3]) - >>> x.append(4) - pdeque([1, 2, 3, 4]) - >>> x.appendleft(4) - pdeque([4, 1, 2, 3]) - - >>> y = pdeque([1, 2, 3], maxlen=3) - >>> y.append(4) - pdeque([2, 3, 4], maxlen=3) - >>> y.appendleft(4) - pdeque([4, 1, 2], maxlen=3) - """ - __slots__ = ('_left_list', '_right_list', '_length', '_maxlen', '__weakref__') - - def __new__(cls, left_list, right_list, length, maxlen=None): - instance = super(PDeque, cls).__new__(cls) - instance._left_list = left_list - instance._right_list = right_list - instance._length = length - - if maxlen is not None: - if not isinstance(maxlen, Integral): - raise TypeError('An integer is required as maxlen') - - if maxlen < 0: - raise ValueError("maxlen must be non-negative") - - instance._maxlen = maxlen - return instance - - @property - def right(self): - """ - Rightmost element in dqueue. - """ - return PDeque._tip_from_lists(self._right_list, self._left_list) - - @property - def left(self): - """ - Leftmost element in dqueue. - """ - return PDeque._tip_from_lists(self._left_list, self._right_list) - - @staticmethod - def _tip_from_lists(primary_list, secondary_list): - if primary_list: - return primary_list.first - - if secondary_list: - return secondary_list[-1] - - raise IndexError('No elements in empty deque') - - def __iter__(self): - return chain(self._left_list, self._right_list.reverse()) - - def __repr__(self): - return "pdeque({0}{1})".format(list(self), - ', maxlen={0}'.format(self._maxlen) if self._maxlen is not None else '') - __str__ = __repr__ - - @property - def maxlen(self): - """ - Maximum length of the queue. - """ - return self._maxlen - - def pop(self, count=1): - """ - Return new deque with rightmost element removed. Popping the empty queue - will return the empty queue. A optional count can be given to indicate the - number of elements to pop. Popping with a negative index is the same as - popleft. Executes in amortized O(k) where k is the number of elements to pop. - - >>> pdeque([1, 2]).pop() - pdeque([1]) - >>> pdeque([1, 2]).pop(2) - pdeque([]) - >>> pdeque([1, 2]).pop(-1) - pdeque([2]) - """ - if count < 0: - return self.popleft(-count) - - new_right_list, new_left_list = PDeque._pop_lists(self._right_list, self._left_list, count) - return PDeque(new_left_list, new_right_list, max(self._length - count, 0), self._maxlen) - - def popleft(self, count=1): - """ - Return new deque with leftmost element removed. Otherwise functionally - equivalent to pop(). - - >>> pdeque([1, 2]).popleft() - pdeque([2]) - """ - if count < 0: - return self.pop(-count) - - new_left_list, new_right_list = PDeque._pop_lists(self._left_list, self._right_list, count) - return PDeque(new_left_list, new_right_list, max(self._length - count, 0), self._maxlen) - - @staticmethod - def _pop_lists(primary_list, secondary_list, count): - new_primary_list = primary_list - new_secondary_list = secondary_list - - while count > 0 and (new_primary_list or new_secondary_list): - count -= 1 - if new_primary_list.rest: - new_primary_list = new_primary_list.rest - elif new_primary_list: - new_primary_list = new_secondary_list.reverse() - new_secondary_list = plist() - else: - new_primary_list = new_secondary_list.reverse().rest - new_secondary_list = plist() - - return new_primary_list, new_secondary_list - - def _is_empty(self): - return not self._left_list and not self._right_list - - def __lt__(self, other): - if not isinstance(other, PDeque): - return NotImplemented - - return tuple(self) < tuple(other) - - def __eq__(self, other): - if not isinstance(other, PDeque): - return NotImplemented - - if tuple(self) == tuple(other): - # Sanity check of the length value since it is redundant (there for performance) - assert len(self) == len(other) - return True - - return False - - def __hash__(self): - return hash(tuple(self)) - - def __len__(self): - return self._length - - def append(self, elem): - """ - Return new deque with elem as the rightmost element. - - >>> pdeque([1, 2]).append(3) - pdeque([1, 2, 3]) - """ - new_left_list, new_right_list, new_length = self._append(self._left_list, self._right_list, elem) - return PDeque(new_left_list, new_right_list, new_length, self._maxlen) - - def appendleft(self, elem): - """ - Return new deque with elem as the leftmost element. - - >>> pdeque([1, 2]).appendleft(3) - pdeque([3, 1, 2]) - """ - new_right_list, new_left_list, new_length = self._append(self._right_list, self._left_list, elem) - return PDeque(new_left_list, new_right_list, new_length, self._maxlen) - - def _append(self, primary_list, secondary_list, elem): - if self._maxlen is not None and self._length == self._maxlen: - if self._maxlen == 0: - return primary_list, secondary_list, 0 - new_primary_list, new_secondary_list = PDeque._pop_lists(primary_list, secondary_list, 1) - return new_primary_list, new_secondary_list.cons(elem), self._length - - return primary_list, secondary_list.cons(elem), self._length + 1 - - @staticmethod - def _extend_list(the_list, iterable): - count = 0 - for elem in iterable: - the_list = the_list.cons(elem) - count += 1 - - return the_list, count - - def _extend(self, primary_list, secondary_list, iterable): - new_primary_list, extend_count = PDeque._extend_list(primary_list, iterable) - new_secondary_list = secondary_list - current_len = self._length + extend_count - if self._maxlen is not None and current_len > self._maxlen: - pop_len = current_len - self._maxlen - new_secondary_list, new_primary_list = PDeque._pop_lists(new_secondary_list, new_primary_list, pop_len) - extend_count -= pop_len - - return new_primary_list, new_secondary_list, extend_count - - def extend(self, iterable): - """ - Return new deque with all elements of iterable appended to the right. - - >>> pdeque([1, 2]).extend([3, 4]) - pdeque([1, 2, 3, 4]) - """ - new_right_list, new_left_list, extend_count = self._extend(self._right_list, self._left_list, iterable) - return PDeque(new_left_list, new_right_list, self._length + extend_count, self._maxlen) - - def extendleft(self, iterable): - """ - Return new deque with all elements of iterable appended to the left. - - NB! The elements will be inserted in reverse order compared to the order in the iterable. - - >>> pdeque([1, 2]).extendleft([3, 4]) - pdeque([4, 3, 1, 2]) - """ - new_left_list, new_right_list, extend_count = self._extend(self._left_list, self._right_list, iterable) - return PDeque(new_left_list, new_right_list, self._length + extend_count, self._maxlen) - - def count(self, elem): - """ - Return the number of elements equal to elem present in the queue - - >>> pdeque([1, 2, 1]).count(1) - 2 - """ - return self._left_list.count(elem) + self._right_list.count(elem) - - def remove(self, elem): - """ - Return new deque with first element from left equal to elem removed. If no such element is found - a ValueError is raised. - - >>> pdeque([2, 1, 2]).remove(2) - pdeque([1, 2]) - """ - try: - return PDeque(self._left_list.remove(elem), self._right_list, self._length - 1) - except ValueError: - # Value not found in left list, try the right list - try: - # This is severely inefficient with a double reverse, should perhaps implement a remove_last()? - return PDeque(self._left_list, - self._right_list.reverse().remove(elem).reverse(), self._length - 1) - except ValueError: - raise ValueError('{0} not found in PDeque'.format(elem)) - - def reverse(self): - """ - Return reversed deque. - - >>> pdeque([1, 2, 3]).reverse() - pdeque([3, 2, 1]) - - Also supports the standard python reverse function. - - >>> reversed(pdeque([1, 2, 3])) - pdeque([3, 2, 1]) - """ - return PDeque(self._right_list, self._left_list, self._length) - __reversed__ = reverse - - def rotate(self, steps): - """ - Return deque with elements rotated steps steps. - - >>> x = pdeque([1, 2, 3]) - >>> x.rotate(1) - pdeque([3, 1, 2]) - >>> x.rotate(-2) - pdeque([3, 1, 2]) - """ - popped_deque = self.pop(steps) - if steps >= 0: - return popped_deque.extendleft(islice(self.reverse(), steps)) - - return popped_deque.extend(islice(self, -steps)) - - def __reduce__(self): - # Pickling support - return pdeque, (list(self), self._maxlen) - - def __getitem__(self, index): - if isinstance(index, slice): - if index.step is not None and index.step != 1: - # Too difficult, no structural sharing possible - return pdeque(tuple(self)[index], maxlen=self._maxlen) - - result = self - if index.start is not None: - result = result.popleft(index.start % self._length) - if index.stop is not None: - result = result.pop(self._length - (index.stop % self._length)) - - return result - - if not isinstance(index, Integral): - raise TypeError("'%s' object cannot be interpreted as an index" % type(index).__name__) - - if index >= 0: - return self.popleft(index).left - - shifted = len(self) + index - if shifted < 0: - raise IndexError( - "pdeque index {0} out of range {1}".format(index, len(self)), - ) - return self.popleft(shifted).left - - index = Sequence.index - -Sequence.register(PDeque) -Hashable.register(PDeque) - - -def pdeque(iterable=(), maxlen=None): - """ - Return deque containing the elements of iterable. If maxlen is specified then - len(iterable) - maxlen elements are discarded from the left to if len(iterable) > maxlen. - - >>> pdeque([1, 2, 3]) - pdeque([1, 2, 3]) - >>> pdeque([1, 2, 3, 4], maxlen=2) - pdeque([3, 4], maxlen=2) - """ - t = tuple(iterable) - if maxlen is not None: - t = t[-maxlen:] - length = len(t) - pivot = int(length / 2) - left = plist(t[:pivot]) - right = plist(t[pivot:], reverse=True) - return PDeque(left, right, length, maxlen) - -def dq(*elements): - """ - Return deque containing all arguments. - - >>> dq(1, 2, 3) - pdeque([1, 2, 3]) - """ - return pdeque(elements) diff --git a/conda_lock/_vendor/poetry/core/_vendor/pyrsistent/_plist.py b/conda_lock/_vendor/poetry/core/_vendor/pyrsistent/_plist.py deleted file mode 100644 index 8b4267f5..00000000 --- a/conda_lock/_vendor/poetry/core/_vendor/pyrsistent/_plist.py +++ /dev/null @@ -1,313 +0,0 @@ -from ._compat import Sequence, Hashable -from numbers import Integral -from functools import reduce - - -class _PListBuilder(object): - """ - Helper class to allow construction of a list without - having to reverse it in the end. - """ - __slots__ = ('_head', '_tail') - - def __init__(self): - self._head = _EMPTY_PLIST - self._tail = _EMPTY_PLIST - - def _append(self, elem, constructor): - if not self._tail: - self._head = constructor(elem) - self._tail = self._head - else: - self._tail.rest = constructor(elem) - self._tail = self._tail.rest - - return self._head - - def append_elem(self, elem): - return self._append(elem, lambda e: PList(e, _EMPTY_PLIST)) - - def append_plist(self, pl): - return self._append(pl, lambda l: l) - - def build(self): - return self._head - - -class _PListBase(object): - __slots__ = ('__weakref__',) - - # Selected implementations can be taken straight from the Sequence - # class, other are less suitable. Especially those that work with - # index lookups. - count = Sequence.count - index = Sequence.index - - def __reduce__(self): - # Pickling support - return plist, (list(self),) - - def __len__(self): - """ - Return the length of the list, computed by traversing it. - - This is obviously O(n) but with the current implementation - where a list is also a node the overhead of storing the length - in every node would be quite significant. - """ - return sum(1 for _ in self) - - def __repr__(self): - return "plist({0})".format(list(self)) - __str__ = __repr__ - - def cons(self, elem): - """ - Return a new list with elem inserted as new head. - - >>> plist([1, 2]).cons(3) - plist([3, 1, 2]) - """ - return PList(elem, self) - - def mcons(self, iterable): - """ - Return a new list with all elements of iterable repeatedly cons:ed to the current list. - NB! The elements will be inserted in the reverse order of the iterable. - Runs in O(len(iterable)). - - >>> plist([1, 2]).mcons([3, 4]) - plist([4, 3, 1, 2]) - """ - head = self - for elem in iterable: - head = head.cons(elem) - - return head - - def reverse(self): - """ - Return a reversed version of list. Runs in O(n) where n is the length of the list. - - >>> plist([1, 2, 3]).reverse() - plist([3, 2, 1]) - - Also supports the standard reversed function. - - >>> reversed(plist([1, 2, 3])) - plist([3, 2, 1]) - """ - result = plist() - head = self - while head: - result = result.cons(head.first) - head = head.rest - - return result - __reversed__ = reverse - - def split(self, index): - """ - Spilt the list at position specified by index. Returns a tuple containing the - list up until index and the list after the index. Runs in O(index). - - >>> plist([1, 2, 3, 4]).split(2) - (plist([1, 2]), plist([3, 4])) - """ - lb = _PListBuilder() - right_list = self - i = 0 - while right_list and i < index: - lb.append_elem(right_list.first) - right_list = right_list.rest - i += 1 - - if not right_list: - # Just a small optimization in the cases where no split occurred - return self, _EMPTY_PLIST - - return lb.build(), right_list - - def __iter__(self): - li = self - while li: - yield li.first - li = li.rest - - def __lt__(self, other): - if not isinstance(other, _PListBase): - return NotImplemented - - return tuple(self) < tuple(other) - - def __eq__(self, other): - """ - Traverses the lists, checking equality of elements. - - This is an O(n) operation, but preserves the standard semantics of list equality. - """ - if not isinstance(other, _PListBase): - return NotImplemented - - self_head = self - other_head = other - while self_head and other_head: - if not self_head.first == other_head.first: - return False - self_head = self_head.rest - other_head = other_head.rest - - return not self_head and not other_head - - def __getitem__(self, index): - # Don't use this this data structure if you plan to do a lot of indexing, it is - # very inefficient! Use a PVector instead! - - if isinstance(index, slice): - if index.start is not None and index.stop is None and (index.step is None or index.step == 1): - return self._drop(index.start) - - # Take the easy way out for all other slicing cases, not much structural reuse possible anyway - return plist(tuple(self)[index]) - - if not isinstance(index, Integral): - raise TypeError("'%s' object cannot be interpreted as an index" % type(index).__name__) - - if index < 0: - # NB: O(n)! - index += len(self) - - try: - return self._drop(index).first - except AttributeError: - raise IndexError("PList index out of range") - - def _drop(self, count): - if count < 0: - raise IndexError("PList index out of range") - - head = self - while count > 0: - head = head.rest - count -= 1 - - return head - - def __hash__(self): - return hash(tuple(self)) - - def remove(self, elem): - """ - Return new list with first element equal to elem removed. O(k) where k is the position - of the element that is removed. - - Raises ValueError if no matching element is found. - - >>> plist([1, 2, 1]).remove(1) - plist([2, 1]) - """ - - builder = _PListBuilder() - head = self - while head: - if head.first == elem: - return builder.append_plist(head.rest) - - builder.append_elem(head.first) - head = head.rest - - raise ValueError('{0} not found in PList'.format(elem)) - - -class PList(_PListBase): - """ - Classical Lisp style singly linked list. Adding elements to the head using cons is O(1). - Element access is O(k) where k is the position of the element in the list. Taking the - length of the list is O(n). - - Fully supports the Sequence and Hashable protocols including indexing and slicing but - if you need fast random access go for the PVector instead. - - Do not instantiate directly, instead use the factory functions :py:func:`l` or :py:func:`plist` to - create an instance. - - Some examples: - - >>> x = plist([1, 2]) - >>> y = x.cons(3) - >>> x - plist([1, 2]) - >>> y - plist([3, 1, 2]) - >>> y.first - 3 - >>> y.rest == x - True - >>> y[:2] - plist([3, 1]) - """ - __slots__ = ('first', 'rest') - - def __new__(cls, first, rest): - instance = super(PList, cls).__new__(cls) - instance.first = first - instance.rest = rest - return instance - - def __bool__(self): - return True - __nonzero__ = __bool__ - - -Sequence.register(PList) -Hashable.register(PList) - - -class _EmptyPList(_PListBase): - __slots__ = () - - def __bool__(self): - return False - __nonzero__ = __bool__ - - @property - def first(self): - raise AttributeError("Empty PList has no first") - - @property - def rest(self): - return self - - -Sequence.register(_EmptyPList) -Hashable.register(_EmptyPList) - -_EMPTY_PLIST = _EmptyPList() - - -def plist(iterable=(), reverse=False): - """ - Creates a new persistent list containing all elements of iterable. - Optional parameter reverse specifies if the elements should be inserted in - reverse order or not. - - >>> plist([1, 2, 3]) - plist([1, 2, 3]) - >>> plist([1, 2, 3], reverse=True) - plist([3, 2, 1]) - """ - if not reverse: - iterable = list(iterable) - iterable.reverse() - - return reduce(lambda pl, elem: pl.cons(elem), iterable, _EMPTY_PLIST) - - -def l(*elements): - """ - Creates a new persistent list containing all arguments. - - >>> l(1, 2, 3) - plist([1, 2, 3]) - """ - return plist(elements) diff --git a/conda_lock/_vendor/poetry/core/_vendor/pyrsistent/_pmap.py b/conda_lock/_vendor/poetry/core/_vendor/pyrsistent/_pmap.py deleted file mode 100644 index e8a0ec53..00000000 --- a/conda_lock/_vendor/poetry/core/_vendor/pyrsistent/_pmap.py +++ /dev/null @@ -1,460 +0,0 @@ -from ._compat import Mapping, Hashable -from itertools import chain -import six -from pyrsistent._pvector import pvector -from pyrsistent._transformations import transform - - -class PMap(object): - """ - Persistent map/dict. Tries to follow the same naming conventions as the built in dict where feasible. - - Do not instantiate directly, instead use the factory functions :py:func:`m` or :py:func:`pmap` to - create an instance. - - Was originally written as a very close copy of the Clojure equivalent but was later rewritten to closer - re-assemble the python dict. This means that a sparse vector (a PVector) of buckets is used. The keys are - hashed and the elements inserted at position hash % len(bucket_vector). Whenever the map size exceeds 2/3 of - the containing vectors size the map is reallocated to a vector of double the size. This is done to avoid - excessive hash collisions. - - This structure corresponds most closely to the built in dict type and is intended as a replacement. Where the - semantics are the same (more or less) the same function names have been used but for some cases it is not possible, - for example assignments and deletion of values. - - PMap implements the Mapping protocol and is Hashable. It also supports dot-notation for - element access. - - Random access and insert is log32(n) where n is the size of the map. - - The following are examples of some common operations on persistent maps - - >>> m1 = m(a=1, b=3) - >>> m2 = m1.set('c', 3) - >>> m3 = m2.remove('a') - >>> m1 - pmap({'b': 3, 'a': 1}) - >>> m2 - pmap({'c': 3, 'b': 3, 'a': 1}) - >>> m3 - pmap({'c': 3, 'b': 3}) - >>> m3['c'] - 3 - >>> m3.c - 3 - """ - __slots__ = ('_size', '_buckets', '__weakref__', '_cached_hash') - - def __new__(cls, size, buckets): - self = super(PMap, cls).__new__(cls) - self._size = size - self._buckets = buckets - return self - - @staticmethod - def _get_bucket(buckets, key): - index = hash(key) % len(buckets) - bucket = buckets[index] - return index, bucket - - @staticmethod - def _getitem(buckets, key): - _, bucket = PMap._get_bucket(buckets, key) - if bucket: - for k, v in bucket: - if k == key: - return v - - raise KeyError(key) - - def __getitem__(self, key): - return PMap._getitem(self._buckets, key) - - @staticmethod - def _contains(buckets, key): - _, bucket = PMap._get_bucket(buckets, key) - if bucket: - for k, _ in bucket: - if k == key: - return True - - return False - - return False - - def __contains__(self, key): - return self._contains(self._buckets, key) - - get = Mapping.get - - def __iter__(self): - return self.iterkeys() - - def __getattr__(self, key): - try: - return self[key] - except KeyError: - raise AttributeError( - "{0} has no attribute '{1}'".format(type(self).__name__, key) - ) - - def iterkeys(self): - for k, _ in self.iteritems(): - yield k - - # These are more efficient implementations compared to the original - # methods that are based on the keys iterator and then calls the - # accessor functions to access the value for the corresponding key - def itervalues(self): - for _, v in self.iteritems(): - yield v - - def iteritems(self): - for bucket in self._buckets: - if bucket: - for k, v in bucket: - yield k, v - - def values(self): - return pvector(self.itervalues()) - - def keys(self): - return pvector(self.iterkeys()) - - def items(self): - return pvector(self.iteritems()) - - def __len__(self): - return self._size - - def __repr__(self): - return 'pmap({0})'.format(str(dict(self))) - - def __eq__(self, other): - if self is other: - return True - if not isinstance(other, Mapping): - return NotImplemented - if len(self) != len(other): - return False - if isinstance(other, PMap): - if (hasattr(self, '_cached_hash') and hasattr(other, '_cached_hash') - and self._cached_hash != other._cached_hash): - return False - if self._buckets == other._buckets: - return True - return dict(self.iteritems()) == dict(other.iteritems()) - elif isinstance(other, dict): - return dict(self.iteritems()) == other - return dict(self.iteritems()) == dict(six.iteritems(other)) - - __ne__ = Mapping.__ne__ - - def __lt__(self, other): - raise TypeError('PMaps are not orderable') - - __le__ = __lt__ - __gt__ = __lt__ - __ge__ = __lt__ - - def __str__(self): - return self.__repr__() - - def __hash__(self): - if not hasattr(self, '_cached_hash'): - self._cached_hash = hash(frozenset(self.iteritems())) - return self._cached_hash - - def set(self, key, val): - """ - Return a new PMap with key and val inserted. - - >>> m1 = m(a=1, b=2) - >>> m2 = m1.set('a', 3) - >>> m3 = m1.set('c' ,4) - >>> m1 - pmap({'b': 2, 'a': 1}) - >>> m2 - pmap({'b': 2, 'a': 3}) - >>> m3 - pmap({'c': 4, 'b': 2, 'a': 1}) - """ - return self.evolver().set(key, val).persistent() - - def remove(self, key): - """ - Return a new PMap without the element specified by key. Raises KeyError if the element - is not present. - - >>> m1 = m(a=1, b=2) - >>> m1.remove('a') - pmap({'b': 2}) - """ - return self.evolver().remove(key).persistent() - - def discard(self, key): - """ - Return a new PMap without the element specified by key. Returns reference to itself - if element is not present. - - >>> m1 = m(a=1, b=2) - >>> m1.discard('a') - pmap({'b': 2}) - >>> m1 is m1.discard('c') - True - """ - try: - return self.remove(key) - except KeyError: - return self - - def update(self, *maps): - """ - Return a new PMap with the items in Mappings inserted. If the same key is present in multiple - maps the rightmost (last) value is inserted. - - >>> m1 = m(a=1, b=2) - >>> m1.update(m(a=2, c=3), {'a': 17, 'd': 35}) - pmap({'c': 3, 'b': 2, 'a': 17, 'd': 35}) - """ - return self.update_with(lambda l, r: r, *maps) - - def update_with(self, update_fn, *maps): - """ - Return a new PMap with the items in Mappings maps inserted. If the same key is present in multiple - maps the values will be merged using merge_fn going from left to right. - - >>> from operator import add - >>> m1 = m(a=1, b=2) - >>> m1.update_with(add, m(a=2)) - pmap({'b': 2, 'a': 3}) - - The reverse behaviour of the regular merge. Keep the leftmost element instead of the rightmost. - - >>> m1 = m(a=1) - >>> m1.update_with(lambda l, r: l, m(a=2), {'a':3}) - pmap({'a': 1}) - """ - evolver = self.evolver() - for map in maps: - for key, value in map.items(): - evolver.set(key, update_fn(evolver[key], value) if key in evolver else value) - - return evolver.persistent() - - def __add__(self, other): - return self.update(other) - - def __reduce__(self): - # Pickling support - return pmap, (dict(self),) - - def transform(self, *transformations): - """ - Transform arbitrarily complex combinations of PVectors and PMaps. A transformation - consists of two parts. One match expression that specifies which elements to transform - and one transformation function that performs the actual transformation. - - >>> from pyrsistent import freeze, ny - >>> news_paper = freeze({'articles': [{'author': 'Sara', 'content': 'A short article'}, - ... {'author': 'Steve', 'content': 'A slightly longer article'}], - ... 'weather': {'temperature': '11C', 'wind': '5m/s'}}) - >>> short_news = news_paper.transform(['articles', ny, 'content'], lambda c: c[:25] + '...' if len(c) > 25 else c) - >>> very_short_news = news_paper.transform(['articles', ny, 'content'], lambda c: c[:15] + '...' if len(c) > 15 else c) - >>> very_short_news.articles[0].content - 'A short article' - >>> very_short_news.articles[1].content - 'A slightly long...' - - When nothing has been transformed the original data structure is kept - - >>> short_news is news_paper - True - >>> very_short_news is news_paper - False - >>> very_short_news.articles[0] is news_paper.articles[0] - True - """ - return transform(self, transformations) - - def copy(self): - return self - - class _Evolver(object): - __slots__ = ('_buckets_evolver', '_size', '_original_pmap') - - def __init__(self, original_pmap): - self._original_pmap = original_pmap - self._buckets_evolver = original_pmap._buckets.evolver() - self._size = original_pmap._size - - def __getitem__(self, key): - return PMap._getitem(self._buckets_evolver, key) - - def __setitem__(self, key, val): - self.set(key, val) - - def set(self, key, val): - if len(self._buckets_evolver) < 0.67 * self._size: - self._reallocate(2 * len(self._buckets_evolver)) - - kv = (key, val) - index, bucket = PMap._get_bucket(self._buckets_evolver, key) - if bucket: - for k, v in bucket: - if k == key: - if v is not val: - new_bucket = [(k2, v2) if k2 != k else (k2, val) for k2, v2 in bucket] - self._buckets_evolver[index] = new_bucket - - return self - - new_bucket = [kv] - new_bucket.extend(bucket) - self._buckets_evolver[index] = new_bucket - self._size += 1 - else: - self._buckets_evolver[index] = [kv] - self._size += 1 - - return self - - def _reallocate(self, new_size): - new_list = new_size * [None] - buckets = self._buckets_evolver.persistent() - for k, v in chain.from_iterable(x for x in buckets if x): - index = hash(k) % new_size - if new_list[index]: - new_list[index].append((k, v)) - else: - new_list[index] = [(k, v)] - - # A reallocation should always result in a dirty buckets evolver to avoid - # possible loss of elements when doing the reallocation. - self._buckets_evolver = pvector().evolver() - self._buckets_evolver.extend(new_list) - - def is_dirty(self): - return self._buckets_evolver.is_dirty() - - def persistent(self): - if self.is_dirty(): - self._original_pmap = PMap(self._size, self._buckets_evolver.persistent()) - - return self._original_pmap - - def __len__(self): - return self._size - - def __contains__(self, key): - return PMap._contains(self._buckets_evolver, key) - - def __delitem__(self, key): - self.remove(key) - - def remove(self, key): - index, bucket = PMap._get_bucket(self._buckets_evolver, key) - - if bucket: - new_bucket = [(k, v) for (k, v) in bucket if k != key] - if len(bucket) > len(new_bucket): - self._buckets_evolver[index] = new_bucket if new_bucket else None - self._size -= 1 - return self - - raise KeyError('{0}'.format(key)) - - def evolver(self): - """ - Create a new evolver for this pmap. For a discussion on evolvers in general see the - documentation for the pvector evolver. - - Create the evolver and perform various mutating updates to it: - - >>> m1 = m(a=1, b=2) - >>> e = m1.evolver() - >>> e['c'] = 3 - >>> len(e) - 3 - >>> del e['a'] - - The underlying pmap remains the same: - - >>> m1 - pmap({'b': 2, 'a': 1}) - - The changes are kept in the evolver. An updated pmap can be created using the - persistent() function on the evolver. - - >>> m2 = e.persistent() - >>> m2 - pmap({'c': 3, 'b': 2}) - - The new pmap will share data with the original pmap in the same way that would have - been done if only using operations on the pmap. - """ - return self._Evolver(self) - -Mapping.register(PMap) -Hashable.register(PMap) - - -def _turbo_mapping(initial, pre_size): - if pre_size: - size = pre_size - else: - try: - size = 2 * len(initial) or 8 - except Exception: - # Guess we can't figure out the length. Give up on length hinting, - # we can always reallocate later. - size = 8 - - buckets = size * [None] - - if not isinstance(initial, Mapping): - # Make a dictionary of the initial data if it isn't already, - # that will save us some job further down since we can assume no - # key collisions - initial = dict(initial) - - for k, v in six.iteritems(initial): - h = hash(k) - index = h % size - bucket = buckets[index] - - if bucket: - bucket.append((k, v)) - else: - buckets[index] = [(k, v)] - - return PMap(len(initial), pvector().extend(buckets)) - - -_EMPTY_PMAP = _turbo_mapping({}, 0) - - -def pmap(initial={}, pre_size=0): - """ - Create new persistent map, inserts all elements in initial into the newly created map. - The optional argument pre_size may be used to specify an initial size of the underlying bucket vector. This - may have a positive performance impact in the cases where you know beforehand that a large number of elements - will be inserted into the map eventually since it will reduce the number of reallocations required. - - >>> pmap({'a': 13, 'b': 14}) - pmap({'b': 14, 'a': 13}) - """ - if not initial: - return _EMPTY_PMAP - - return _turbo_mapping(initial, pre_size) - - -def m(**kwargs): - """ - Creates a new persitent map. Inserts all key value arguments into the newly created map. - - >>> m(a=13, b=14) - pmap({'b': 14, 'a': 13}) - """ - return pmap(kwargs) diff --git a/conda_lock/_vendor/poetry/core/_vendor/pyrsistent/_precord.py b/conda_lock/_vendor/poetry/core/_vendor/pyrsistent/_precord.py deleted file mode 100644 index ec8d32c3..00000000 --- a/conda_lock/_vendor/poetry/core/_vendor/pyrsistent/_precord.py +++ /dev/null @@ -1,169 +0,0 @@ -import six -from pyrsistent._checked_types import CheckedType, _restore_pickle, InvariantException, store_invariants -from pyrsistent._field_common import ( - set_fields, check_type, is_field_ignore_extra_complaint, PFIELD_NO_INITIAL, serialize, check_global_invariants -) -from pyrsistent._pmap import PMap, pmap - - -class _PRecordMeta(type): - def __new__(mcs, name, bases, dct): - set_fields(dct, bases, name='_precord_fields') - store_invariants(dct, bases, '_precord_invariants', '__invariant__') - - dct['_precord_mandatory_fields'] = \ - set(name for name, field in dct['_precord_fields'].items() if field.mandatory) - - dct['_precord_initial_values'] = \ - dict((k, field.initial) for k, field in dct['_precord_fields'].items() if field.initial is not PFIELD_NO_INITIAL) - - - dct['__slots__'] = () - - return super(_PRecordMeta, mcs).__new__(mcs, name, bases, dct) - - -@six.add_metaclass(_PRecordMeta) -class PRecord(PMap, CheckedType): - """ - A PRecord is a PMap with a fixed set of specified fields. Records are declared as python classes inheriting - from PRecord. Because it is a PMap it has full support for all Mapping methods such as iteration and element - access using subscript notation. - - More documentation and examples of PRecord usage is available at https://github.com/tobgu/pyrsistent - """ - def __new__(cls, **kwargs): - # Hack total! If these two special attributes exist that means we can create - # ourselves. Otherwise we need to go through the Evolver to create the structures - # for us. - if '_precord_size' in kwargs and '_precord_buckets' in kwargs: - return super(PRecord, cls).__new__(cls, kwargs['_precord_size'], kwargs['_precord_buckets']) - - factory_fields = kwargs.pop('_factory_fields', None) - ignore_extra = kwargs.pop('_ignore_extra', False) - - initial_values = kwargs - if cls._precord_initial_values: - initial_values = dict((k, v() if callable(v) else v) - for k, v in cls._precord_initial_values.items()) - initial_values.update(kwargs) - - e = _PRecordEvolver(cls, pmap(), _factory_fields=factory_fields, _ignore_extra=ignore_extra) - for k, v in initial_values.items(): - e[k] = v - - return e.persistent() - - def set(self, *args, **kwargs): - """ - Set a field in the record. This set function differs slightly from that in the PMap - class. First of all it accepts key-value pairs. Second it accepts multiple key-value - pairs to perform one, atomic, update of multiple fields. - """ - - # The PRecord set() can accept kwargs since all fields that have been declared are - # valid python identifiers. Also allow multiple fields to be set in one operation. - if args: - return super(PRecord, self).set(args[0], args[1]) - - return self.update(kwargs) - - def evolver(self): - """ - Returns an evolver of this object. - """ - return _PRecordEvolver(self.__class__, self) - - def __repr__(self): - return "{0}({1})".format(self.__class__.__name__, - ', '.join('{0}={1}'.format(k, repr(v)) for k, v in self.items())) - - @classmethod - def create(cls, kwargs, _factory_fields=None, ignore_extra=False): - """ - Factory method. Will create a new PRecord of the current type and assign the values - specified in kwargs. - - :param ignore_extra: A boolean which when set to True will ignore any keys which appear in kwargs that are not - in the set of fields on the PRecord. - """ - if isinstance(kwargs, cls): - return kwargs - - if ignore_extra: - kwargs = {k: kwargs[k] for k in cls._precord_fields if k in kwargs} - - return cls(_factory_fields=_factory_fields, _ignore_extra=ignore_extra, **kwargs) - - def __reduce__(self): - # Pickling support - return _restore_pickle, (self.__class__, dict(self),) - - def serialize(self, format=None): - """ - Serialize the current PRecord using custom serializer functions for fields where - such have been supplied. - """ - return dict((k, serialize(self._precord_fields[k].serializer, format, v)) for k, v in self.items()) - - -class _PRecordEvolver(PMap._Evolver): - __slots__ = ('_destination_cls', '_invariant_error_codes', '_missing_fields', '_factory_fields', '_ignore_extra') - - def __init__(self, cls, original_pmap, _factory_fields=None, _ignore_extra=False): - super(_PRecordEvolver, self).__init__(original_pmap) - self._destination_cls = cls - self._invariant_error_codes = [] - self._missing_fields = [] - self._factory_fields = _factory_fields - self._ignore_extra = _ignore_extra - - def __setitem__(self, key, original_value): - self.set(key, original_value) - - def set(self, key, original_value): - field = self._destination_cls._precord_fields.get(key) - if field: - if self._factory_fields is None or field in self._factory_fields: - try: - if is_field_ignore_extra_complaint(PRecord, field, self._ignore_extra): - value = field.factory(original_value, ignore_extra=self._ignore_extra) - else: - value = field.factory(original_value) - except InvariantException as e: - self._invariant_error_codes += e.invariant_errors - self._missing_fields += e.missing_fields - return self - else: - value = original_value - - check_type(self._destination_cls, field, key, value) - - is_ok, error_code = field.invariant(value) - if not is_ok: - self._invariant_error_codes.append(error_code) - - return super(_PRecordEvolver, self).set(key, value) - else: - raise AttributeError("'{0}' is not among the specified fields for {1}".format(key, self._destination_cls.__name__)) - - def persistent(self): - cls = self._destination_cls - is_dirty = self.is_dirty() - pm = super(_PRecordEvolver, self).persistent() - if is_dirty or not isinstance(pm, cls): - result = cls(_precord_buckets=pm._buckets, _precord_size=pm._size) - else: - result = pm - - if cls._precord_mandatory_fields: - self._missing_fields += tuple('{0}.{1}'.format(cls.__name__, f) for f - in (cls._precord_mandatory_fields - set(result.keys()))) - - if self._invariant_error_codes or self._missing_fields: - raise InvariantException(tuple(self._invariant_error_codes), tuple(self._missing_fields), - 'Field invariant failed') - - check_global_invariants(result, cls._precord_invariants) - - return result diff --git a/conda_lock/_vendor/poetry/core/_vendor/pyrsistent/_pset.py b/conda_lock/_vendor/poetry/core/_vendor/pyrsistent/_pset.py deleted file mode 100644 index a972ec53..00000000 --- a/conda_lock/_vendor/poetry/core/_vendor/pyrsistent/_pset.py +++ /dev/null @@ -1,229 +0,0 @@ -from ._compat import Set, Hashable -import sys -from pyrsistent._pmap import pmap - -PY2 = sys.version_info[0] < 3 - - -class PSet(object): - """ - Persistent set implementation. Built on top of the persistent map. The set supports all operations - in the Set protocol and is Hashable. - - Do not instantiate directly, instead use the factory functions :py:func:`s` or :py:func:`pset` - to create an instance. - - Random access and insert is log32(n) where n is the size of the set. - - Some examples: - - >>> s = pset([1, 2, 3, 1]) - >>> s2 = s.add(4) - >>> s3 = s2.remove(2) - >>> s - pset([1, 2, 3]) - >>> s2 - pset([1, 2, 3, 4]) - >>> s3 - pset([1, 3, 4]) - """ - __slots__ = ('_map', '__weakref__') - - def __new__(cls, m): - self = super(PSet, cls).__new__(cls) - self._map = m - return self - - def __contains__(self, element): - return element in self._map - - def __iter__(self): - return iter(self._map) - - def __len__(self): - return len(self._map) - - def __repr__(self): - if PY2 or not self: - return 'p' + str(set(self)) - - return 'pset([{0}])'.format(str(set(self))[1:-1]) - - def __str__(self): - return self.__repr__() - - def __hash__(self): - return hash(self._map) - - def __reduce__(self): - # Pickling support - return pset, (list(self),) - - @classmethod - def _from_iterable(cls, it, pre_size=8): - return PSet(pmap(dict((k, True) for k in it), pre_size=pre_size)) - - def add(self, element): - """ - Return a new PSet with element added - - >>> s1 = s(1, 2) - >>> s1.add(3) - pset([1, 2, 3]) - """ - return self.evolver().add(element).persistent() - - def update(self, iterable): - """ - Return a new PSet with elements in iterable added - - >>> s1 = s(1, 2) - >>> s1.update([3, 4, 4]) - pset([1, 2, 3, 4]) - """ - e = self.evolver() - for element in iterable: - e.add(element) - - return e.persistent() - - def remove(self, element): - """ - Return a new PSet with element removed. Raises KeyError if element is not present. - - >>> s1 = s(1, 2) - >>> s1.remove(2) - pset([1]) - """ - if element in self._map: - return self.evolver().remove(element).persistent() - - raise KeyError("Element '%s' not present in PSet" % element) - - def discard(self, element): - """ - Return a new PSet with element removed. Returns itself if element is not present. - """ - if element in self._map: - return self.evolver().remove(element).persistent() - - return self - - class _Evolver(object): - __slots__ = ('_original_pset', '_pmap_evolver') - - def __init__(self, original_pset): - self._original_pset = original_pset - self._pmap_evolver = original_pset._map.evolver() - - def add(self, element): - self._pmap_evolver[element] = True - return self - - def remove(self, element): - del self._pmap_evolver[element] - return self - - def is_dirty(self): - return self._pmap_evolver.is_dirty() - - def persistent(self): - if not self.is_dirty(): - return self._original_pset - - return PSet(self._pmap_evolver.persistent()) - - def __len__(self): - return len(self._pmap_evolver) - - def copy(self): - return self - - def evolver(self): - """ - Create a new evolver for this pset. For a discussion on evolvers in general see the - documentation for the pvector evolver. - - Create the evolver and perform various mutating updates to it: - - >>> s1 = s(1, 2, 3) - >>> e = s1.evolver() - >>> _ = e.add(4) - >>> len(e) - 4 - >>> _ = e.remove(1) - - The underlying pset remains the same: - - >>> s1 - pset([1, 2, 3]) - - The changes are kept in the evolver. An updated pmap can be created using the - persistent() function on the evolver. - - >>> s2 = e.persistent() - >>> s2 - pset([2, 3, 4]) - - The new pset will share data with the original pset in the same way that would have - been done if only using operations on the pset. - """ - return PSet._Evolver(self) - - # All the operations and comparisons you would expect on a set. - # - # This is not very beautiful. If we avoid inheriting from PSet we can use the - # __slots__ concepts (which requires a new style class) and hopefully save some memory. - __le__ = Set.__le__ - __lt__ = Set.__lt__ - __gt__ = Set.__gt__ - __ge__ = Set.__ge__ - __eq__ = Set.__eq__ - __ne__ = Set.__ne__ - - __and__ = Set.__and__ - __or__ = Set.__or__ - __sub__ = Set.__sub__ - __xor__ = Set.__xor__ - - issubset = __le__ - issuperset = __ge__ - union = __or__ - intersection = __and__ - difference = __sub__ - symmetric_difference = __xor__ - - isdisjoint = Set.isdisjoint - -Set.register(PSet) -Hashable.register(PSet) - -_EMPTY_PSET = PSet(pmap()) - - -def pset(iterable=(), pre_size=8): - """ - Creates a persistent set from iterable. Optionally takes a sizing parameter equivalent to that - used for :py:func:`pmap`. - - >>> s1 = pset([1, 2, 3, 2]) - >>> s1 - pset([1, 2, 3]) - """ - if not iterable: - return _EMPTY_PSET - - return PSet._from_iterable(iterable, pre_size=pre_size) - - -def s(*elements): - """ - Create a persistent set. - - Takes an arbitrary number of arguments to insert into the new set. - - >>> s1 = s(1, 2, 3, 2) - >>> s1 - pset([1, 2, 3]) - """ - return pset(elements) diff --git a/conda_lock/_vendor/poetry/core/_vendor/pyrsistent/_pvector.py b/conda_lock/_vendor/poetry/core/_vendor/pyrsistent/_pvector.py deleted file mode 100644 index 82232782..00000000 --- a/conda_lock/_vendor/poetry/core/_vendor/pyrsistent/_pvector.py +++ /dev/null @@ -1,713 +0,0 @@ -from abc import abstractmethod, ABCMeta -from ._compat import Sequence, Hashable -from numbers import Integral -import operator -import six -from pyrsistent._transformations import transform - - -def _bitcount(val): - return bin(val).count("1") - -BRANCH_FACTOR = 32 -BIT_MASK = BRANCH_FACTOR - 1 -SHIFT = _bitcount(BIT_MASK) - - -def compare_pvector(v, other, operator): - return operator(v.tolist(), other.tolist() if isinstance(other, PVector) else other) - - -def _index_or_slice(index, stop): - if stop is None: - return index - - return slice(index, stop) - - -class PythonPVector(object): - """ - Support structure for PVector that implements structural sharing for vectors using a trie. - """ - __slots__ = ('_count', '_shift', '_root', '_tail', '_tail_offset', '__weakref__') - - def __new__(cls, count, shift, root, tail): - self = super(PythonPVector, cls).__new__(cls) - self._count = count - self._shift = shift - self._root = root - self._tail = tail - - # Derived attribute stored for performance - self._tail_offset = self._count - len(self._tail) - return self - - def __len__(self): - return self._count - - def __getitem__(self, index): - if isinstance(index, slice): - # There are more conditions than the below where it would be OK to - # return ourselves, implement those... - if index.start is None and index.stop is None and index.step is None: - return self - - # This is a bit nasty realizing the whole structure as a list before - # slicing it but it is the fastest way I've found to date, and it's easy :-) - return _EMPTY_PVECTOR.extend(self.tolist()[index]) - - if index < 0: - index += self._count - - return PythonPVector._node_for(self, index)[index & BIT_MASK] - - def __add__(self, other): - return self.extend(other) - - def __repr__(self): - return 'pvector({0})'.format(str(self.tolist())) - - def __str__(self): - return self.__repr__() - - def __iter__(self): - # This is kind of lazy and will produce some memory overhead but it is the fasted method - # by far of those tried since it uses the speed of the built in python list directly. - return iter(self.tolist()) - - def __ne__(self, other): - return not self.__eq__(other) - - def __eq__(self, other): - return self is other or (hasattr(other, '__len__') and self._count == len(other)) and compare_pvector(self, other, operator.eq) - - def __gt__(self, other): - return compare_pvector(self, other, operator.gt) - - def __lt__(self, other): - return compare_pvector(self, other, operator.lt) - - def __ge__(self, other): - return compare_pvector(self, other, operator.ge) - - def __le__(self, other): - return compare_pvector(self, other, operator.le) - - def __mul__(self, times): - if times <= 0 or self is _EMPTY_PVECTOR: - return _EMPTY_PVECTOR - - if times == 1: - return self - - return _EMPTY_PVECTOR.extend(times * self.tolist()) - - __rmul__ = __mul__ - - def _fill_list(self, node, shift, the_list): - if shift: - shift -= SHIFT - for n in node: - self._fill_list(n, shift, the_list) - else: - the_list.extend(node) - - def tolist(self): - """ - The fastest way to convert the vector into a python list. - """ - the_list = [] - self._fill_list(self._root, self._shift, the_list) - the_list.extend(self._tail) - return the_list - - def _totuple(self): - """ - Returns the content as a python tuple. - """ - return tuple(self.tolist()) - - def __hash__(self): - # Taking the easy way out again... - return hash(self._totuple()) - - def transform(self, *transformations): - return transform(self, transformations) - - def __reduce__(self): - # Pickling support - return pvector, (self.tolist(),) - - def mset(self, *args): - if len(args) % 2: - raise TypeError("mset expected an even number of arguments") - - evolver = self.evolver() - for i in range(0, len(args), 2): - evolver[args[i]] = args[i+1] - - return evolver.persistent() - - class Evolver(object): - __slots__ = ('_count', '_shift', '_root', '_tail', '_tail_offset', '_dirty_nodes', - '_extra_tail', '_cached_leafs', '_orig_pvector') - - def __init__(self, v): - self._reset(v) - - def __getitem__(self, index): - if not isinstance(index, Integral): - raise TypeError("'%s' object cannot be interpreted as an index" % type(index).__name__) - - if index < 0: - index += self._count + len(self._extra_tail) - - if self._count <= index < self._count + len(self._extra_tail): - return self._extra_tail[index - self._count] - - return PythonPVector._node_for(self, index)[index & BIT_MASK] - - def _reset(self, v): - self._count = v._count - self._shift = v._shift - self._root = v._root - self._tail = v._tail - self._tail_offset = v._tail_offset - self._dirty_nodes = {} - self._cached_leafs = {} - self._extra_tail = [] - self._orig_pvector = v - - def append(self, element): - self._extra_tail.append(element) - return self - - def extend(self, iterable): - self._extra_tail.extend(iterable) - return self - - def set(self, index, val): - self[index] = val - return self - - def __setitem__(self, index, val): - if not isinstance(index, Integral): - raise TypeError("'%s' object cannot be interpreted as an index" % type(index).__name__) - - if index < 0: - index += self._count + len(self._extra_tail) - - if 0 <= index < self._count: - node = self._cached_leafs.get(index >> SHIFT) - if node: - node[index & BIT_MASK] = val - elif index >= self._tail_offset: - if id(self._tail) not in self._dirty_nodes: - self._tail = list(self._tail) - self._dirty_nodes[id(self._tail)] = True - self._cached_leafs[index >> SHIFT] = self._tail - self._tail[index & BIT_MASK] = val - else: - self._root = self._do_set(self._shift, self._root, index, val) - elif self._count <= index < self._count + len(self._extra_tail): - self._extra_tail[index - self._count] = val - elif index == self._count + len(self._extra_tail): - self._extra_tail.append(val) - else: - raise IndexError("Index out of range: %s" % (index,)) - - def _do_set(self, level, node, i, val): - if id(node) in self._dirty_nodes: - ret = node - else: - ret = list(node) - self._dirty_nodes[id(ret)] = True - - if level == 0: - ret[i & BIT_MASK] = val - self._cached_leafs[i >> SHIFT] = ret - else: - sub_index = (i >> level) & BIT_MASK # >>> - ret[sub_index] = self._do_set(level - SHIFT, node[sub_index], i, val) - - return ret - - def delete(self, index): - del self[index] - return self - - def __delitem__(self, key): - if self._orig_pvector: - # All structural sharing bets are off, base evolver on _extra_tail only - l = PythonPVector(self._count, self._shift, self._root, self._tail).tolist() - l.extend(self._extra_tail) - self._reset(_EMPTY_PVECTOR) - self._extra_tail = l - - del self._extra_tail[key] - - def persistent(self): - result = self._orig_pvector - if self.is_dirty(): - result = PythonPVector(self._count, self._shift, self._root, self._tail).extend(self._extra_tail) - self._reset(result) - - return result - - def __len__(self): - return self._count + len(self._extra_tail) - - def is_dirty(self): - return bool(self._dirty_nodes or self._extra_tail) - - def evolver(self): - return PythonPVector.Evolver(self) - - def set(self, i, val): - # This method could be implemented by a call to mset() but doing so would cause - # a ~5 X performance penalty on PyPy (considered the primary platform for this implementation - # of PVector) so we're keeping this implementation for now. - - if not isinstance(i, Integral): - raise TypeError("'%s' object cannot be interpreted as an index" % type(i).__name__) - - if i < 0: - i += self._count - - if 0 <= i < self._count: - if i >= self._tail_offset: - new_tail = list(self._tail) - new_tail[i & BIT_MASK] = val - return PythonPVector(self._count, self._shift, self._root, new_tail) - - return PythonPVector(self._count, self._shift, self._do_set(self._shift, self._root, i, val), self._tail) - - if i == self._count: - return self.append(val) - - raise IndexError("Index out of range: %s" % (i,)) - - def _do_set(self, level, node, i, val): - ret = list(node) - if level == 0: - ret[i & BIT_MASK] = val - else: - sub_index = (i >> level) & BIT_MASK # >>> - ret[sub_index] = self._do_set(level - SHIFT, node[sub_index], i, val) - - return ret - - @staticmethod - def _node_for(pvector_like, i): - if 0 <= i < pvector_like._count: - if i >= pvector_like._tail_offset: - return pvector_like._tail - - node = pvector_like._root - for level in range(pvector_like._shift, 0, -SHIFT): - node = node[(i >> level) & BIT_MASK] # >>> - - return node - - raise IndexError("Index out of range: %s" % (i,)) - - def _create_new_root(self): - new_shift = self._shift - - # Overflow root? - if (self._count >> SHIFT) > (1 << self._shift): # >>> - new_root = [self._root, self._new_path(self._shift, self._tail)] - new_shift += SHIFT - else: - new_root = self._push_tail(self._shift, self._root, self._tail) - - return new_root, new_shift - - def append(self, val): - if len(self._tail) < BRANCH_FACTOR: - new_tail = list(self._tail) - new_tail.append(val) - return PythonPVector(self._count + 1, self._shift, self._root, new_tail) - - # Full tail, push into tree - new_root, new_shift = self._create_new_root() - return PythonPVector(self._count + 1, new_shift, new_root, [val]) - - def _new_path(self, level, node): - if level == 0: - return node - - return [self._new_path(level - SHIFT, node)] - - def _mutating_insert_tail(self): - self._root, self._shift = self._create_new_root() - self._tail = [] - - def _mutating_fill_tail(self, offset, sequence): - max_delta_len = BRANCH_FACTOR - len(self._tail) - delta = sequence[offset:offset + max_delta_len] - self._tail.extend(delta) - delta_len = len(delta) - self._count += delta_len - return offset + delta_len - - def _mutating_extend(self, sequence): - offset = 0 - sequence_len = len(sequence) - while offset < sequence_len: - offset = self._mutating_fill_tail(offset, sequence) - if len(self._tail) == BRANCH_FACTOR: - self._mutating_insert_tail() - - self._tail_offset = self._count - len(self._tail) - - def extend(self, obj): - # Mutates the new vector directly for efficiency but that's only an - # implementation detail, once it is returned it should be considered immutable - l = obj.tolist() if isinstance(obj, PythonPVector) else list(obj) - if l: - new_vector = self.append(l[0]) - new_vector._mutating_extend(l[1:]) - return new_vector - - return self - - def _push_tail(self, level, parent, tail_node): - """ - if parent is leaf, insert node, - else does it map to an existing child? -> - node_to_insert = push node one more level - else alloc new path - - return node_to_insert placed in copy of parent - """ - ret = list(parent) - - if level == SHIFT: - ret.append(tail_node) - return ret - - sub_index = ((self._count - 1) >> level) & BIT_MASK # >>> - if len(parent) > sub_index: - ret[sub_index] = self._push_tail(level - SHIFT, parent[sub_index], tail_node) - return ret - - ret.append(self._new_path(level - SHIFT, tail_node)) - return ret - - def index(self, value, *args, **kwargs): - return self.tolist().index(value, *args, **kwargs) - - def count(self, value): - return self.tolist().count(value) - - def delete(self, index, stop=None): - l = self.tolist() - del l[_index_or_slice(index, stop)] - return _EMPTY_PVECTOR.extend(l) - - def remove(self, value): - l = self.tolist() - l.remove(value) - return _EMPTY_PVECTOR.extend(l) - -@six.add_metaclass(ABCMeta) -class PVector(object): - """ - Persistent vector implementation. Meant as a replacement for the cases where you would normally - use a Python list. - - Do not instantiate directly, instead use the factory functions :py:func:`v` and :py:func:`pvector` to - create an instance. - - Heavily influenced by the persistent vector available in Clojure. Initially this was more or - less just a port of the Java code for the Clojure vector. It has since been modified and to - some extent optimized for usage in Python. - - The vector is organized as a trie, any mutating method will return a new vector that contains the changes. No - updates are done to the original vector. Structural sharing between vectors are applied where possible to save - space and to avoid making complete copies. - - This structure corresponds most closely to the built in list type and is intended as a replacement. Where the - semantics are the same (more or less) the same function names have been used but for some cases it is not possible, - for example assignments. - - The PVector implements the Sequence protocol and is Hashable. - - Inserts are amortized O(1). Random access is log32(n) where n is the size of the vector. - - The following are examples of some common operations on persistent vectors: - - >>> p = v(1, 2, 3) - >>> p2 = p.append(4) - >>> p3 = p2.extend([5, 6, 7]) - >>> p - pvector([1, 2, 3]) - >>> p2 - pvector([1, 2, 3, 4]) - >>> p3 - pvector([1, 2, 3, 4, 5, 6, 7]) - >>> p3[5] - 6 - >>> p.set(1, 99) - pvector([1, 99, 3]) - >>> - """ - - @abstractmethod - def __len__(self): - """ - >>> len(v(1, 2, 3)) - 3 - """ - - @abstractmethod - def __getitem__(self, index): - """ - Get value at index. Full slicing support. - - >>> v1 = v(5, 6, 7, 8) - >>> v1[2] - 7 - >>> v1[1:3] - pvector([6, 7]) - """ - - @abstractmethod - def __add__(self, other): - """ - >>> v1 = v(1, 2) - >>> v2 = v(3, 4) - >>> v1 + v2 - pvector([1, 2, 3, 4]) - """ - - @abstractmethod - def __mul__(self, times): - """ - >>> v1 = v(1, 2) - >>> 3 * v1 - pvector([1, 2, 1, 2, 1, 2]) - """ - - @abstractmethod - def __hash__(self): - """ - >>> v1 = v(1, 2, 3) - >>> v2 = v(1, 2, 3) - >>> hash(v1) == hash(v2) - True - """ - - @abstractmethod - def evolver(self): - """ - Create a new evolver for this pvector. The evolver acts as a mutable view of the vector - with "transaction like" semantics. No part of the underlying vector i updated, it is still - fully immutable. Furthermore multiple evolvers created from the same pvector do not - interfere with each other. - - You may want to use an evolver instead of working directly with the pvector in the - following cases: - - * Multiple updates are done to the same vector and the intermediate results are of no - interest. In this case using an evolver may be a more efficient and easier to work with. - * You need to pass a vector into a legacy function or a function that you have no control - over which performs in place mutations of lists. In this case pass an evolver instance - instead and then create a new pvector from the evolver once the function returns. - - The following example illustrates a typical workflow when working with evolvers. It also - displays most of the API (which i kept small by design, you should not be tempted to - use evolvers in excess ;-)). - - Create the evolver and perform various mutating updates to it: - - >>> v1 = v(1, 2, 3, 4, 5) - >>> e = v1.evolver() - >>> e[1] = 22 - >>> _ = e.append(6) - >>> _ = e.extend([7, 8, 9]) - >>> e[8] += 1 - >>> len(e) - 9 - - The underlying pvector remains the same: - - >>> v1 - pvector([1, 2, 3, 4, 5]) - - The changes are kept in the evolver. An updated pvector can be created using the - persistent() function on the evolver. - - >>> v2 = e.persistent() - >>> v2 - pvector([1, 22, 3, 4, 5, 6, 7, 8, 10]) - - The new pvector will share data with the original pvector in the same way that would have - been done if only using operations on the pvector. - """ - - @abstractmethod - def mset(self, *args): - """ - Return a new vector with elements in specified positions replaced by values (multi set). - - Elements on even positions in the argument list are interpreted as indexes while - elements on odd positions are considered values. - - >>> v1 = v(1, 2, 3) - >>> v1.mset(0, 11, 2, 33) - pvector([11, 2, 33]) - """ - - @abstractmethod - def set(self, i, val): - """ - Return a new vector with element at position i replaced with val. The original vector remains unchanged. - - Setting a value one step beyond the end of the vector is equal to appending. Setting beyond that will - result in an IndexError. - - >>> v1 = v(1, 2, 3) - >>> v1.set(1, 4) - pvector([1, 4, 3]) - >>> v1.set(3, 4) - pvector([1, 2, 3, 4]) - >>> v1.set(-1, 4) - pvector([1, 2, 4]) - """ - - @abstractmethod - def append(self, val): - """ - Return a new vector with val appended. - - >>> v1 = v(1, 2) - >>> v1.append(3) - pvector([1, 2, 3]) - """ - - @abstractmethod - def extend(self, obj): - """ - Return a new vector with all values in obj appended to it. Obj may be another - PVector or any other Iterable. - - >>> v1 = v(1, 2, 3) - >>> v1.extend([4, 5]) - pvector([1, 2, 3, 4, 5]) - """ - - @abstractmethod - def index(self, value, *args, **kwargs): - """ - Return first index of value. Additional indexes may be supplied to limit the search to a - sub range of the vector. - - >>> v1 = v(1, 2, 3, 4, 3) - >>> v1.index(3) - 2 - >>> v1.index(3, 3, 5) - 4 - """ - - @abstractmethod - def count(self, value): - """ - Return the number of times that value appears in the vector. - - >>> v1 = v(1, 4, 3, 4) - >>> v1.count(4) - 2 - """ - - @abstractmethod - def transform(self, *transformations): - """ - Transform arbitrarily complex combinations of PVectors and PMaps. A transformation - consists of two parts. One match expression that specifies which elements to transform - and one transformation function that performs the actual transformation. - - >>> from pyrsistent import freeze, ny - >>> news_paper = freeze({'articles': [{'author': 'Sara', 'content': 'A short article'}, - ... {'author': 'Steve', 'content': 'A slightly longer article'}], - ... 'weather': {'temperature': '11C', 'wind': '5m/s'}}) - >>> short_news = news_paper.transform(['articles', ny, 'content'], lambda c: c[:25] + '...' if len(c) > 25 else c) - >>> very_short_news = news_paper.transform(['articles', ny, 'content'], lambda c: c[:15] + '...' if len(c) > 15 else c) - >>> very_short_news.articles[0].content - 'A short article' - >>> very_short_news.articles[1].content - 'A slightly long...' - - When nothing has been transformed the original data structure is kept - - >>> short_news is news_paper - True - >>> very_short_news is news_paper - False - >>> very_short_news.articles[0] is news_paper.articles[0] - True - """ - - @abstractmethod - def delete(self, index, stop=None): - """ - Delete a portion of the vector by index or range. - - >>> v1 = v(1, 2, 3, 4, 5) - >>> v1.delete(1) - pvector([1, 3, 4, 5]) - >>> v1.delete(1, 3) - pvector([1, 4, 5]) - """ - - @abstractmethod - def remove(self, value): - """ - Remove the first occurrence of a value from the vector. - - >>> v1 = v(1, 2, 3, 2, 1) - >>> v2 = v1.remove(1) - >>> v2 - pvector([2, 3, 2, 1]) - >>> v2.remove(1) - pvector([2, 3, 2]) - """ - - -_EMPTY_PVECTOR = PythonPVector(0, SHIFT, [], []) -PVector.register(PythonPVector) -Sequence.register(PVector) -Hashable.register(PVector) - -def python_pvector(iterable=()): - """ - Create a new persistent vector containing the elements in iterable. - - >>> v1 = pvector([1, 2, 3]) - >>> v1 - pvector([1, 2, 3]) - """ - return _EMPTY_PVECTOR.extend(iterable) - -try: - # Use the C extension as underlying trie implementation if it is available - import os - if os.environ.get('PYRSISTENT_NO_C_EXTENSION'): - pvector = python_pvector - else: - from pvectorc import pvector - PVector.register(type(pvector())) -except ImportError: - pvector = python_pvector - - -def v(*elements): - """ - Create a new persistent vector containing all parameters to this function. - - >>> v1 = v(1, 2, 3) - >>> v1 - pvector([1, 2, 3]) - """ - return pvector(elements) diff --git a/conda_lock/_vendor/poetry/core/_vendor/pyrsistent/_toolz.py b/conda_lock/_vendor/poetry/core/_vendor/pyrsistent/_toolz.py deleted file mode 100644 index 6643ee86..00000000 --- a/conda_lock/_vendor/poetry/core/_vendor/pyrsistent/_toolz.py +++ /dev/null @@ -1,83 +0,0 @@ -""" -Functionality copied from the toolz package to avoid having -to add toolz as a dependency. - -See https://github.com/pytoolz/toolz/. - -toolz is relased under BSD licence. Below is the licence text -from toolz as it appeared when copying the code. - --------------------------------------------------------------- - -Copyright (c) 2013 Matthew Rocklin - -All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are met: - - a. Redistributions of source code must retain the above copyright notice, - this list of conditions and the following disclaimer. - b. Redistributions in binary form must reproduce the above copyright - notice, this list of conditions and the following disclaimer in the - documentation and/or other materials provided with the distribution. - c. Neither the name of toolz nor the names of its contributors - may be used to endorse or promote products derived from this software - without specific prior written permission. - - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" -AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE -ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR -ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL -DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR -SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER -CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT -LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY -OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH -DAMAGE. -""" -import operator -from six.moves import reduce - - -def get_in(keys, coll, default=None, no_default=False): - """ - NB: This is a straight copy of the get_in implementation found in - the toolz library (https://github.com/pytoolz/toolz/). It works - with persistent data structures as well as the corresponding - datastructures from the stdlib. - - Returns coll[i0][i1]...[iX] where [i0, i1, ..., iX]==keys. - - If coll[i0][i1]...[iX] cannot be found, returns ``default``, unless - ``no_default`` is specified, then it raises KeyError or IndexError. - - ``get_in`` is a generalization of ``operator.getitem`` for nested data - structures such as dictionaries and lists. - >>> from pyrsistent import freeze - >>> transaction = freeze({'name': 'Alice', - ... 'purchase': {'items': ['Apple', 'Orange'], - ... 'costs': [0.50, 1.25]}, - ... 'credit card': '5555-1234-1234-1234'}) - >>> get_in(['purchase', 'items', 0], transaction) - 'Apple' - >>> get_in(['name'], transaction) - 'Alice' - >>> get_in(['purchase', 'total'], transaction) - >>> get_in(['purchase', 'items', 'apple'], transaction) - >>> get_in(['purchase', 'items', 10], transaction) - >>> get_in(['purchase', 'total'], transaction, 0) - 0 - >>> get_in(['y'], {}, no_default=True) - Traceback (most recent call last): - ... - KeyError: 'y' - """ - try: - return reduce(operator.getitem, keys, coll) - except (KeyError, IndexError, TypeError): - if no_default: - raise - return default \ No newline at end of file diff --git a/conda_lock/_vendor/poetry/core/_vendor/pyrsistent/_transformations.py b/conda_lock/_vendor/poetry/core/_vendor/pyrsistent/_transformations.py deleted file mode 100644 index 61209896..00000000 --- a/conda_lock/_vendor/poetry/core/_vendor/pyrsistent/_transformations.py +++ /dev/null @@ -1,143 +0,0 @@ -import re -import six -try: - from inspect import Parameter, signature -except ImportError: - signature = None - try: - from inspect import getfullargspec as getargspec - except ImportError: - from inspect import getargspec - - -_EMPTY_SENTINEL = object() - - -def inc(x): - """ Add one to the current value """ - return x + 1 - - -def dec(x): - """ Subtract one from the current value """ - return x - 1 - - -def discard(evolver, key): - """ Discard the element and returns a structure without the discarded elements """ - try: - del evolver[key] - except KeyError: - pass - - -# Matchers -def rex(expr): - """ Regular expression matcher to use together with transform functions """ - r = re.compile(expr) - return lambda key: isinstance(key, six.string_types) and r.match(key) - - -def ny(_): - """ Matcher that matches any value """ - return True - - -# Support functions -def _chunks(l, n): - for i in range(0, len(l), n): - yield l[i:i + n] - - -def transform(structure, transformations): - r = structure - for path, command in _chunks(transformations, 2): - r = _do_to_path(r, path, command) - return r - - -def _do_to_path(structure, path, command): - if not path: - return command(structure) if callable(command) else command - - kvs = _get_keys_and_values(structure, path[0]) - return _update_structure(structure, kvs, path[1:], command) - - -def _items(structure): - try: - return structure.items() - except AttributeError: - # Support wider range of structures by adding a transform_items() or similar? - return list(enumerate(structure)) - - -def _get(structure, key, default): - try: - if hasattr(structure, '__getitem__'): - return structure[key] - - return getattr(structure, key) - - except (IndexError, KeyError): - return default - - -def _get_keys_and_values(structure, key_spec): - if callable(key_spec): - # Support predicates as callable objects in the path - arity = _get_arity(key_spec) - if arity == 1: - # Unary predicates are called with the "key" of the path - # - eg a key in a mapping, an index in a sequence. - return [(k, v) for k, v in _items(structure) if key_spec(k)] - elif arity == 2: - # Binary predicates are called with the key and the corresponding - # value. - return [(k, v) for k, v in _items(structure) if key_spec(k, v)] - else: - # Other arities are an error. - raise ValueError( - "callable in transform path must take 1 or 2 arguments" - ) - - # Non-callables are used as-is as a key. - return [(key_spec, _get(structure, key_spec, _EMPTY_SENTINEL))] - - -if signature is None: - def _get_arity(f): - argspec = getargspec(f) - return len(argspec.args) - len(argspec.defaults or ()) -else: - def _get_arity(f): - return sum( - 1 - for p - in signature(f).parameters.values() - if p.default is Parameter.empty - and p.kind in (Parameter.POSITIONAL_ONLY, Parameter.POSITIONAL_OR_KEYWORD) - ) - - -def _update_structure(structure, kvs, path, command): - from pyrsistent._pmap import pmap - e = structure.evolver() - if not path and command is discard: - # Do this in reverse to avoid index problems with vectors. See #92. - for k, v in reversed(kvs): - discard(e, k) - else: - for k, v in kvs: - is_empty = False - if v is _EMPTY_SENTINEL: - # Allow expansion of structure but make sure to cover the case - # when an empty pmap is added as leaf node. See #154. - is_empty = True - v = pmap() - - result = _do_to_path(v, path, command) - if result is not v or is_empty: - e[k] = result - - return e.persistent() diff --git a/conda_lock/_vendor/poetry/core/_vendor/pyrsistent/typing.py b/conda_lock/_vendor/poetry/core/_vendor/pyrsistent/typing.py deleted file mode 100644 index 6a86c831..00000000 --- a/conda_lock/_vendor/poetry/core/_vendor/pyrsistent/typing.py +++ /dev/null @@ -1,80 +0,0 @@ -"""Helpers for use with type annotation. - -Use the empty classes in this module when annotating the types of Pyrsistent -objects, instead of using the actual collection class. - -For example, - - from pyrsistent import pvector - from pyrsistent.typing import PVector - - myvector: PVector[str] = pvector(['a', 'b', 'c']) - -""" -from __future__ import absolute_import - -try: - from typing import Container - from typing import Hashable - from typing import Generic - from typing import Iterable - from typing import Mapping - from typing import Sequence - from typing import Sized - from typing import TypeVar - - __all__ = [ - 'CheckedPMap', - 'CheckedPSet', - 'CheckedPVector', - 'PBag', - 'PDeque', - 'PList', - 'PMap', - 'PSet', - 'PVector', - ] - - T = TypeVar('T') - KT = TypeVar('KT') - VT = TypeVar('VT') - - class CheckedPMap(Mapping[KT, VT], Hashable): - pass - - # PSet.add and PSet.discard have different type signatures than that of Set. - class CheckedPSet(Generic[T], Hashable): - pass - - class CheckedPVector(Sequence[T], Hashable): - pass - - class PBag(Container[T], Iterable[T], Sized, Hashable): - pass - - class PDeque(Sequence[T], Hashable): - pass - - class PList(Sequence[T], Hashable): - pass - - class PMap(Mapping[KT, VT], Hashable): - pass - - # PSet.add and PSet.discard have different type signatures than that of Set. - class PSet(Generic[T], Hashable): - pass - - class PVector(Sequence[T], Hashable): - pass - - class PVectorEvolver(Generic[T]): - pass - - class PMapEvolver(Generic[KT, VT]): - pass - - class PSetEvolver(Generic[T]): - pass -except ImportError: - pass diff --git a/conda_lock/_vendor/poetry/core/_vendor/attrs.LICENSE b/conda_lock/_vendor/poetry/core/_vendor/tomli/LICENSE similarity index 94% rename from conda_lock/_vendor/poetry/core/_vendor/attrs.LICENSE rename to conda_lock/_vendor/poetry/core/_vendor/tomli/LICENSE index 7ae3df93..e859590f 100644 --- a/conda_lock/_vendor/poetry/core/_vendor/attrs.LICENSE +++ b/conda_lock/_vendor/poetry/core/_vendor/tomli/LICENSE @@ -1,6 +1,6 @@ -The MIT License (MIT) +MIT License -Copyright (c) 2015 Hynek Schlawack +Copyright (c) 2021 Taneli Hukkinen Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal diff --git a/conda_lock/_vendor/poetry/core/_vendor/tomli/__init__.py b/conda_lock/_vendor/poetry/core/_vendor/tomli/__init__.py new file mode 100644 index 00000000..4c6ec97e --- /dev/null +++ b/conda_lock/_vendor/poetry/core/_vendor/tomli/__init__.py @@ -0,0 +1,11 @@ +# SPDX-License-Identifier: MIT +# SPDX-FileCopyrightText: 2021 Taneli Hukkinen +# Licensed to PSF under a Contributor Agreement. + +__all__ = ("loads", "load", "TOMLDecodeError") +__version__ = "2.0.1" # DO NOT EDIT THIS LINE MANUALLY. LET bump2version UTILITY DO IT + +from ._parser import TOMLDecodeError, load, loads + +# Pretend this exception was created here. +TOMLDecodeError.__module__ = __name__ diff --git a/conda_lock/_vendor/poetry/core/_vendor/tomli/_parser.py b/conda_lock/_vendor/poetry/core/_vendor/tomli/_parser.py new file mode 100644 index 00000000..f1bb0aa1 --- /dev/null +++ b/conda_lock/_vendor/poetry/core/_vendor/tomli/_parser.py @@ -0,0 +1,691 @@ +# SPDX-License-Identifier: MIT +# SPDX-FileCopyrightText: 2021 Taneli Hukkinen +# Licensed to PSF under a Contributor Agreement. + +from __future__ import annotations + +from collections.abc import Iterable +import string +from types import MappingProxyType +from typing import Any, BinaryIO, NamedTuple + +from ._re import ( + RE_DATETIME, + RE_LOCALTIME, + RE_NUMBER, + match_to_datetime, + match_to_localtime, + match_to_number, +) +from ._types import Key, ParseFloat, Pos + +ASCII_CTRL = frozenset(chr(i) for i in range(32)) | frozenset(chr(127)) + +# Neither of these sets include quotation mark or backslash. They are +# currently handled as separate cases in the parser functions. +ILLEGAL_BASIC_STR_CHARS = ASCII_CTRL - frozenset("\t") +ILLEGAL_MULTILINE_BASIC_STR_CHARS = ASCII_CTRL - frozenset("\t\n") + +ILLEGAL_LITERAL_STR_CHARS = ILLEGAL_BASIC_STR_CHARS +ILLEGAL_MULTILINE_LITERAL_STR_CHARS = ILLEGAL_MULTILINE_BASIC_STR_CHARS + +ILLEGAL_COMMENT_CHARS = ILLEGAL_BASIC_STR_CHARS + +TOML_WS = frozenset(" \t") +TOML_WS_AND_NEWLINE = TOML_WS | frozenset("\n") +BARE_KEY_CHARS = frozenset(string.ascii_letters + string.digits + "-_") +KEY_INITIAL_CHARS = BARE_KEY_CHARS | frozenset("\"'") +HEXDIGIT_CHARS = frozenset(string.hexdigits) + +BASIC_STR_ESCAPE_REPLACEMENTS = MappingProxyType( + { + "\\b": "\u0008", # backspace + "\\t": "\u0009", # tab + "\\n": "\u000A", # linefeed + "\\f": "\u000C", # form feed + "\\r": "\u000D", # carriage return + '\\"': "\u0022", # quote + "\\\\": "\u005C", # backslash + } +) + + +class TOMLDecodeError(ValueError): + """An error raised if a document is not valid TOML.""" + + +def load(__fp: BinaryIO, *, parse_float: ParseFloat = float) -> dict[str, Any]: + """Parse TOML from a binary file object.""" + b = __fp.read() + try: + s = b.decode() + except AttributeError: + raise TypeError( + "File must be opened in binary mode, e.g. use `open('foo.toml', 'rb')`" + ) from None + return loads(s, parse_float=parse_float) + + +def loads(__s: str, *, parse_float: ParseFloat = float) -> dict[str, Any]: # noqa: C901 + """Parse TOML from a string.""" + + # The spec allows converting "\r\n" to "\n", even in string + # literals. Let's do so to simplify parsing. + src = __s.replace("\r\n", "\n") + pos = 0 + out = Output(NestedDict(), Flags()) + header: Key = () + parse_float = make_safe_parse_float(parse_float) + + # Parse one statement at a time + # (typically means one line in TOML source) + while True: + # 1. Skip line leading whitespace + pos = skip_chars(src, pos, TOML_WS) + + # 2. Parse rules. Expect one of the following: + # - end of file + # - end of line + # - comment + # - key/value pair + # - append dict to list (and move to its namespace) + # - create dict (and move to its namespace) + # Skip trailing whitespace when applicable. + try: + char = src[pos] + except IndexError: + break + if char == "\n": + pos += 1 + continue + if char in KEY_INITIAL_CHARS: + pos = key_value_rule(src, pos, out, header, parse_float) + pos = skip_chars(src, pos, TOML_WS) + elif char == "[": + try: + second_char: str | None = src[pos + 1] + except IndexError: + second_char = None + out.flags.finalize_pending() + if second_char == "[": + pos, header = create_list_rule(src, pos, out) + else: + pos, header = create_dict_rule(src, pos, out) + pos = skip_chars(src, pos, TOML_WS) + elif char != "#": + raise suffixed_err(src, pos, "Invalid statement") + + # 3. Skip comment + pos = skip_comment(src, pos) + + # 4. Expect end of line or end of file + try: + char = src[pos] + except IndexError: + break + if char != "\n": + raise suffixed_err( + src, pos, "Expected newline or end of document after a statement" + ) + pos += 1 + + return out.data.dict + + +class Flags: + """Flags that map to parsed keys/namespaces.""" + + # Marks an immutable namespace (inline array or inline table). + FROZEN = 0 + # Marks a nest that has been explicitly created and can no longer + # be opened using the "[table]" syntax. + EXPLICIT_NEST = 1 + + def __init__(self) -> None: + self._flags: dict[str, dict] = {} + self._pending_flags: set[tuple[Key, int]] = set() + + def add_pending(self, key: Key, flag: int) -> None: + self._pending_flags.add((key, flag)) + + def finalize_pending(self) -> None: + for key, flag in self._pending_flags: + self.set(key, flag, recursive=False) + self._pending_flags.clear() + + def unset_all(self, key: Key) -> None: + cont = self._flags + for k in key[:-1]: + if k not in cont: + return + cont = cont[k]["nested"] + cont.pop(key[-1], None) + + def set(self, key: Key, flag: int, *, recursive: bool) -> None: # noqa: A003 + cont = self._flags + key_parent, key_stem = key[:-1], key[-1] + for k in key_parent: + if k not in cont: + cont[k] = {"flags": set(), "recursive_flags": set(), "nested": {}} + cont = cont[k]["nested"] + if key_stem not in cont: + cont[key_stem] = {"flags": set(), "recursive_flags": set(), "nested": {}} + cont[key_stem]["recursive_flags" if recursive else "flags"].add(flag) + + def is_(self, key: Key, flag: int) -> bool: + if not key: + return False # document root has no flags + cont = self._flags + for k in key[:-1]: + if k not in cont: + return False + inner_cont = cont[k] + if flag in inner_cont["recursive_flags"]: + return True + cont = inner_cont["nested"] + key_stem = key[-1] + if key_stem in cont: + cont = cont[key_stem] + return flag in cont["flags"] or flag in cont["recursive_flags"] + return False + + +class NestedDict: + def __init__(self) -> None: + # The parsed content of the TOML document + self.dict: dict[str, Any] = {} + + def get_or_create_nest( + self, + key: Key, + *, + access_lists: bool = True, + ) -> dict: + cont: Any = self.dict + for k in key: + if k not in cont: + cont[k] = {} + cont = cont[k] + if access_lists and isinstance(cont, list): + cont = cont[-1] + if not isinstance(cont, dict): + raise KeyError("There is no nest behind this key") + return cont + + def append_nest_to_list(self, key: Key) -> None: + cont = self.get_or_create_nest(key[:-1]) + last_key = key[-1] + if last_key in cont: + list_ = cont[last_key] + if not isinstance(list_, list): + raise KeyError("An object other than list found behind this key") + list_.append({}) + else: + cont[last_key] = [{}] + + +class Output(NamedTuple): + data: NestedDict + flags: Flags + + +def skip_chars(src: str, pos: Pos, chars: Iterable[str]) -> Pos: + try: + while src[pos] in chars: + pos += 1 + except IndexError: + pass + return pos + + +def skip_until( + src: str, + pos: Pos, + expect: str, + *, + error_on: frozenset[str], + error_on_eof: bool, +) -> Pos: + try: + new_pos = src.index(expect, pos) + except ValueError: + new_pos = len(src) + if error_on_eof: + raise suffixed_err(src, new_pos, f"Expected {expect!r}") from None + + if not error_on.isdisjoint(src[pos:new_pos]): + while src[pos] not in error_on: + pos += 1 + raise suffixed_err(src, pos, f"Found invalid character {src[pos]!r}") + return new_pos + + +def skip_comment(src: str, pos: Pos) -> Pos: + try: + char: str | None = src[pos] + except IndexError: + char = None + if char == "#": + return skip_until( + src, pos + 1, "\n", error_on=ILLEGAL_COMMENT_CHARS, error_on_eof=False + ) + return pos + + +def skip_comments_and_array_ws(src: str, pos: Pos) -> Pos: + while True: + pos_before_skip = pos + pos = skip_chars(src, pos, TOML_WS_AND_NEWLINE) + pos = skip_comment(src, pos) + if pos == pos_before_skip: + return pos + + +def create_dict_rule(src: str, pos: Pos, out: Output) -> tuple[Pos, Key]: + pos += 1 # Skip "[" + pos = skip_chars(src, pos, TOML_WS) + pos, key = parse_key(src, pos) + + if out.flags.is_(key, Flags.EXPLICIT_NEST) or out.flags.is_(key, Flags.FROZEN): + raise suffixed_err(src, pos, f"Cannot declare {key} twice") + out.flags.set(key, Flags.EXPLICIT_NEST, recursive=False) + try: + out.data.get_or_create_nest(key) + except KeyError: + raise suffixed_err(src, pos, "Cannot overwrite a value") from None + + if not src.startswith("]", pos): + raise suffixed_err(src, pos, "Expected ']' at the end of a table declaration") + return pos + 1, key + + +def create_list_rule(src: str, pos: Pos, out: Output) -> tuple[Pos, Key]: + pos += 2 # Skip "[[" + pos = skip_chars(src, pos, TOML_WS) + pos, key = parse_key(src, pos) + + if out.flags.is_(key, Flags.FROZEN): + raise suffixed_err(src, pos, f"Cannot mutate immutable namespace {key}") + # Free the namespace now that it points to another empty list item... + out.flags.unset_all(key) + # ...but this key precisely is still prohibited from table declaration + out.flags.set(key, Flags.EXPLICIT_NEST, recursive=False) + try: + out.data.append_nest_to_list(key) + except KeyError: + raise suffixed_err(src, pos, "Cannot overwrite a value") from None + + if not src.startswith("]]", pos): + raise suffixed_err(src, pos, "Expected ']]' at the end of an array declaration") + return pos + 2, key + + +def key_value_rule( + src: str, pos: Pos, out: Output, header: Key, parse_float: ParseFloat +) -> Pos: + pos, key, value = parse_key_value_pair(src, pos, parse_float) + key_parent, key_stem = key[:-1], key[-1] + abs_key_parent = header + key_parent + + relative_path_cont_keys = (header + key[:i] for i in range(1, len(key))) + for cont_key in relative_path_cont_keys: + # Check that dotted key syntax does not redefine an existing table + if out.flags.is_(cont_key, Flags.EXPLICIT_NEST): + raise suffixed_err(src, pos, f"Cannot redefine namespace {cont_key}") + # Containers in the relative path can't be opened with the table syntax or + # dotted key/value syntax in following table sections. + out.flags.add_pending(cont_key, Flags.EXPLICIT_NEST) + + if out.flags.is_(abs_key_parent, Flags.FROZEN): + raise suffixed_err( + src, pos, f"Cannot mutate immutable namespace {abs_key_parent}" + ) + + try: + nest = out.data.get_or_create_nest(abs_key_parent) + except KeyError: + raise suffixed_err(src, pos, "Cannot overwrite a value") from None + if key_stem in nest: + raise suffixed_err(src, pos, "Cannot overwrite a value") + # Mark inline table and array namespaces recursively immutable + if isinstance(value, (dict, list)): + out.flags.set(header + key, Flags.FROZEN, recursive=True) + nest[key_stem] = value + return pos + + +def parse_key_value_pair( + src: str, pos: Pos, parse_float: ParseFloat +) -> tuple[Pos, Key, Any]: + pos, key = parse_key(src, pos) + try: + char: str | None = src[pos] + except IndexError: + char = None + if char != "=": + raise suffixed_err(src, pos, "Expected '=' after a key in a key/value pair") + pos += 1 + pos = skip_chars(src, pos, TOML_WS) + pos, value = parse_value(src, pos, parse_float) + return pos, key, value + + +def parse_key(src: str, pos: Pos) -> tuple[Pos, Key]: + pos, key_part = parse_key_part(src, pos) + key: Key = (key_part,) + pos = skip_chars(src, pos, TOML_WS) + while True: + try: + char: str | None = src[pos] + except IndexError: + char = None + if char != ".": + return pos, key + pos += 1 + pos = skip_chars(src, pos, TOML_WS) + pos, key_part = parse_key_part(src, pos) + key += (key_part,) + pos = skip_chars(src, pos, TOML_WS) + + +def parse_key_part(src: str, pos: Pos) -> tuple[Pos, str]: + try: + char: str | None = src[pos] + except IndexError: + char = None + if char in BARE_KEY_CHARS: + start_pos = pos + pos = skip_chars(src, pos, BARE_KEY_CHARS) + return pos, src[start_pos:pos] + if char == "'": + return parse_literal_str(src, pos) + if char == '"': + return parse_one_line_basic_str(src, pos) + raise suffixed_err(src, pos, "Invalid initial character for a key part") + + +def parse_one_line_basic_str(src: str, pos: Pos) -> tuple[Pos, str]: + pos += 1 + return parse_basic_str(src, pos, multiline=False) + + +def parse_array(src: str, pos: Pos, parse_float: ParseFloat) -> tuple[Pos, list]: + pos += 1 + array: list = [] + + pos = skip_comments_and_array_ws(src, pos) + if src.startswith("]", pos): + return pos + 1, array + while True: + pos, val = parse_value(src, pos, parse_float) + array.append(val) + pos = skip_comments_and_array_ws(src, pos) + + c = src[pos : pos + 1] + if c == "]": + return pos + 1, array + if c != ",": + raise suffixed_err(src, pos, "Unclosed array") + pos += 1 + + pos = skip_comments_and_array_ws(src, pos) + if src.startswith("]", pos): + return pos + 1, array + + +def parse_inline_table(src: str, pos: Pos, parse_float: ParseFloat) -> tuple[Pos, dict]: + pos += 1 + nested_dict = NestedDict() + flags = Flags() + + pos = skip_chars(src, pos, TOML_WS) + if src.startswith("}", pos): + return pos + 1, nested_dict.dict + while True: + pos, key, value = parse_key_value_pair(src, pos, parse_float) + key_parent, key_stem = key[:-1], key[-1] + if flags.is_(key, Flags.FROZEN): + raise suffixed_err(src, pos, f"Cannot mutate immutable namespace {key}") + try: + nest = nested_dict.get_or_create_nest(key_parent, access_lists=False) + except KeyError: + raise suffixed_err(src, pos, "Cannot overwrite a value") from None + if key_stem in nest: + raise suffixed_err(src, pos, f"Duplicate inline table key {key_stem!r}") + nest[key_stem] = value + pos = skip_chars(src, pos, TOML_WS) + c = src[pos : pos + 1] + if c == "}": + return pos + 1, nested_dict.dict + if c != ",": + raise suffixed_err(src, pos, "Unclosed inline table") + if isinstance(value, (dict, list)): + flags.set(key, Flags.FROZEN, recursive=True) + pos += 1 + pos = skip_chars(src, pos, TOML_WS) + + +def parse_basic_str_escape( + src: str, pos: Pos, *, multiline: bool = False +) -> tuple[Pos, str]: + escape_id = src[pos : pos + 2] + pos += 2 + if multiline and escape_id in {"\\ ", "\\\t", "\\\n"}: + # Skip whitespace until next non-whitespace character or end of + # the doc. Error if non-whitespace is found before newline. + if escape_id != "\\\n": + pos = skip_chars(src, pos, TOML_WS) + try: + char = src[pos] + except IndexError: + return pos, "" + if char != "\n": + raise suffixed_err(src, pos, "Unescaped '\\' in a string") + pos += 1 + pos = skip_chars(src, pos, TOML_WS_AND_NEWLINE) + return pos, "" + if escape_id == "\\u": + return parse_hex_char(src, pos, 4) + if escape_id == "\\U": + return parse_hex_char(src, pos, 8) + try: + return pos, BASIC_STR_ESCAPE_REPLACEMENTS[escape_id] + except KeyError: + raise suffixed_err(src, pos, "Unescaped '\\' in a string") from None + + +def parse_basic_str_escape_multiline(src: str, pos: Pos) -> tuple[Pos, str]: + return parse_basic_str_escape(src, pos, multiline=True) + + +def parse_hex_char(src: str, pos: Pos, hex_len: int) -> tuple[Pos, str]: + hex_str = src[pos : pos + hex_len] + if len(hex_str) != hex_len or not HEXDIGIT_CHARS.issuperset(hex_str): + raise suffixed_err(src, pos, "Invalid hex value") + pos += hex_len + hex_int = int(hex_str, 16) + if not is_unicode_scalar_value(hex_int): + raise suffixed_err(src, pos, "Escaped character is not a Unicode scalar value") + return pos, chr(hex_int) + + +def parse_literal_str(src: str, pos: Pos) -> tuple[Pos, str]: + pos += 1 # Skip starting apostrophe + start_pos = pos + pos = skip_until( + src, pos, "'", error_on=ILLEGAL_LITERAL_STR_CHARS, error_on_eof=True + ) + return pos + 1, src[start_pos:pos] # Skip ending apostrophe + + +def parse_multiline_str(src: str, pos: Pos, *, literal: bool) -> tuple[Pos, str]: + pos += 3 + if src.startswith("\n", pos): + pos += 1 + + if literal: + delim = "'" + end_pos = skip_until( + src, + pos, + "'''", + error_on=ILLEGAL_MULTILINE_LITERAL_STR_CHARS, + error_on_eof=True, + ) + result = src[pos:end_pos] + pos = end_pos + 3 + else: + delim = '"' + pos, result = parse_basic_str(src, pos, multiline=True) + + # Add at maximum two extra apostrophes/quotes if the end sequence + # is 4 or 5 chars long instead of just 3. + if not src.startswith(delim, pos): + return pos, result + pos += 1 + if not src.startswith(delim, pos): + return pos, result + delim + pos += 1 + return pos, result + (delim * 2) + + +def parse_basic_str(src: str, pos: Pos, *, multiline: bool) -> tuple[Pos, str]: + if multiline: + error_on = ILLEGAL_MULTILINE_BASIC_STR_CHARS + parse_escapes = parse_basic_str_escape_multiline + else: + error_on = ILLEGAL_BASIC_STR_CHARS + parse_escapes = parse_basic_str_escape + result = "" + start_pos = pos + while True: + try: + char = src[pos] + except IndexError: + raise suffixed_err(src, pos, "Unterminated string") from None + if char == '"': + if not multiline: + return pos + 1, result + src[start_pos:pos] + if src.startswith('"""', pos): + return pos + 3, result + src[start_pos:pos] + pos += 1 + continue + if char == "\\": + result += src[start_pos:pos] + pos, parsed_escape = parse_escapes(src, pos) + result += parsed_escape + start_pos = pos + continue + if char in error_on: + raise suffixed_err(src, pos, f"Illegal character {char!r}") + pos += 1 + + +def parse_value( # noqa: C901 + src: str, pos: Pos, parse_float: ParseFloat +) -> tuple[Pos, Any]: + try: + char: str | None = src[pos] + except IndexError: + char = None + + # IMPORTANT: order conditions based on speed of checking and likelihood + + # Basic strings + if char == '"': + if src.startswith('"""', pos): + return parse_multiline_str(src, pos, literal=False) + return parse_one_line_basic_str(src, pos) + + # Literal strings + if char == "'": + if src.startswith("'''", pos): + return parse_multiline_str(src, pos, literal=True) + return parse_literal_str(src, pos) + + # Booleans + if char == "t": + if src.startswith("true", pos): + return pos + 4, True + if char == "f": + if src.startswith("false", pos): + return pos + 5, False + + # Arrays + if char == "[": + return parse_array(src, pos, parse_float) + + # Inline tables + if char == "{": + return parse_inline_table(src, pos, parse_float) + + # Dates and times + datetime_match = RE_DATETIME.match(src, pos) + if datetime_match: + try: + datetime_obj = match_to_datetime(datetime_match) + except ValueError as e: + raise suffixed_err(src, pos, "Invalid date or datetime") from e + return datetime_match.end(), datetime_obj + localtime_match = RE_LOCALTIME.match(src, pos) + if localtime_match: + return localtime_match.end(), match_to_localtime(localtime_match) + + # Integers and "normal" floats. + # The regex will greedily match any type starting with a decimal + # char, so needs to be located after handling of dates and times. + number_match = RE_NUMBER.match(src, pos) + if number_match: + return number_match.end(), match_to_number(number_match, parse_float) + + # Special floats + first_three = src[pos : pos + 3] + if first_three in {"inf", "nan"}: + return pos + 3, parse_float(first_three) + first_four = src[pos : pos + 4] + if first_four in {"-inf", "+inf", "-nan", "+nan"}: + return pos + 4, parse_float(first_four) + + raise suffixed_err(src, pos, "Invalid value") + + +def suffixed_err(src: str, pos: Pos, msg: str) -> TOMLDecodeError: + """Return a `TOMLDecodeError` where error message is suffixed with + coordinates in source.""" + + def coord_repr(src: str, pos: Pos) -> str: + if pos >= len(src): + return "end of document" + line = src.count("\n", 0, pos) + 1 + if line == 1: + column = pos + 1 + else: + column = pos - src.rindex("\n", 0, pos) + return f"line {line}, column {column}" + + return TOMLDecodeError(f"{msg} (at {coord_repr(src, pos)})") + + +def is_unicode_scalar_value(codepoint: int) -> bool: + return (0 <= codepoint <= 55295) or (57344 <= codepoint <= 1114111) + + +def make_safe_parse_float(parse_float: ParseFloat) -> ParseFloat: + """A decorator to make `parse_float` safe. + + `parse_float` must not return dicts or lists, because these types + would be mixed with parsed TOML tables and arrays, thus confusing + the parser. The returned decorated callable raises `ValueError` + instead of returning illegal types. + """ + # The default `float` callable never returns illegal types. Optimize it. + if parse_float is float: # type: ignore[comparison-overlap] + return float + + def safe_parse_float(float_str: str) -> Any: + float_value = parse_float(float_str) + if isinstance(float_value, (dict, list)): + raise ValueError("parse_float must not return dicts or lists") + return float_value + + return safe_parse_float diff --git a/conda_lock/_vendor/poetry/core/_vendor/tomli/_re.py b/conda_lock/_vendor/poetry/core/_vendor/tomli/_re.py new file mode 100644 index 00000000..994bb749 --- /dev/null +++ b/conda_lock/_vendor/poetry/core/_vendor/tomli/_re.py @@ -0,0 +1,107 @@ +# SPDX-License-Identifier: MIT +# SPDX-FileCopyrightText: 2021 Taneli Hukkinen +# Licensed to PSF under a Contributor Agreement. + +from __future__ import annotations + +from datetime import date, datetime, time, timedelta, timezone, tzinfo +from functools import lru_cache +import re +from typing import Any + +from ._types import ParseFloat + +# E.g. +# - 00:32:00.999999 +# - 00:32:00 +_TIME_RE_STR = r"([01][0-9]|2[0-3]):([0-5][0-9]):([0-5][0-9])(?:\.([0-9]{1,6})[0-9]*)?" + +RE_NUMBER = re.compile( + r""" +0 +(?: + x[0-9A-Fa-f](?:_?[0-9A-Fa-f])* # hex + | + b[01](?:_?[01])* # bin + | + o[0-7](?:_?[0-7])* # oct +) +| +[+-]?(?:0|[1-9](?:_?[0-9])*) # dec, integer part +(?P + (?:\.[0-9](?:_?[0-9])*)? # optional fractional part + (?:[eE][+-]?[0-9](?:_?[0-9])*)? # optional exponent part +) +""", + flags=re.VERBOSE, +) +RE_LOCALTIME = re.compile(_TIME_RE_STR) +RE_DATETIME = re.compile( + rf""" +([0-9]{{4}})-(0[1-9]|1[0-2])-(0[1-9]|[12][0-9]|3[01]) # date, e.g. 1988-10-27 +(?: + [Tt ] + {_TIME_RE_STR} + (?:([Zz])|([+-])([01][0-9]|2[0-3]):([0-5][0-9]))? # optional time offset +)? +""", + flags=re.VERBOSE, +) + + +def match_to_datetime(match: re.Match) -> datetime | date: + """Convert a `RE_DATETIME` match to `datetime.datetime` or `datetime.date`. + + Raises ValueError if the match does not correspond to a valid date + or datetime. + """ + ( + year_str, + month_str, + day_str, + hour_str, + minute_str, + sec_str, + micros_str, + zulu_time, + offset_sign_str, + offset_hour_str, + offset_minute_str, + ) = match.groups() + year, month, day = int(year_str), int(month_str), int(day_str) + if hour_str is None: + return date(year, month, day) + hour, minute, sec = int(hour_str), int(minute_str), int(sec_str) + micros = int(micros_str.ljust(6, "0")) if micros_str else 0 + if offset_sign_str: + tz: tzinfo | None = cached_tz( + offset_hour_str, offset_minute_str, offset_sign_str + ) + elif zulu_time: + tz = timezone.utc + else: # local date-time + tz = None + return datetime(year, month, day, hour, minute, sec, micros, tzinfo=tz) + + +@lru_cache(maxsize=None) +def cached_tz(hour_str: str, minute_str: str, sign_str: str) -> timezone: + sign = 1 if sign_str == "+" else -1 + return timezone( + timedelta( + hours=sign * int(hour_str), + minutes=sign * int(minute_str), + ) + ) + + +def match_to_localtime(match: re.Match) -> time: + hour_str, minute_str, sec_str, micros_str = match.groups() + micros = int(micros_str.ljust(6, "0")) if micros_str else 0 + return time(int(hour_str), int(minute_str), int(sec_str), micros) + + +def match_to_number(match: re.Match, parse_float: ParseFloat) -> Any: + if match.group("floatpart"): + return parse_float(match.group()) + return int(match.group(), 0) diff --git a/conda_lock/_vendor/poetry/core/_vendor/tomli/_types.py b/conda_lock/_vendor/poetry/core/_vendor/tomli/_types.py new file mode 100644 index 00000000..d949412e --- /dev/null +++ b/conda_lock/_vendor/poetry/core/_vendor/tomli/_types.py @@ -0,0 +1,10 @@ +# SPDX-License-Identifier: MIT +# SPDX-FileCopyrightText: 2021 Taneli Hukkinen +# Licensed to PSF under a Contributor Agreement. + +from typing import Any, Callable, Tuple + +# Type annotations +ParseFloat = Callable[[str], Any] +Key = Tuple[str, ...] +Pos = int diff --git a/conda_lock/_vendor/poetry/core/_vendor/tomli/py.typed b/conda_lock/_vendor/poetry/core/_vendor/tomli/py.typed new file mode 100644 index 00000000..7632ecf7 --- /dev/null +++ b/conda_lock/_vendor/poetry/core/_vendor/tomli/py.typed @@ -0,0 +1 @@ +# Marker file for PEP 561 diff --git a/conda_lock/_vendor/poetry/core/_vendor/tomlkit/LICENSE b/conda_lock/_vendor/poetry/core/_vendor/tomlkit/LICENSE deleted file mode 100644 index 44cf2b30..00000000 --- a/conda_lock/_vendor/poetry/core/_vendor/tomlkit/LICENSE +++ /dev/null @@ -1,20 +0,0 @@ -Copyright (c) 2018 Sébastien Eustace - -Permission is hereby granted, free of charge, to any person obtaining -a copy of this software and associated documentation files (the -"Software"), to deal in the Software without restriction, including -without limitation the rights to use, copy, modify, merge, publish, -distribute, sublicense, and/or sell copies of the Software, and to -permit persons to whom the Software is furnished to do so, subject to -the following conditions: - -The above copyright notice and this permission notice shall be -included in all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, -EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF -MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND -NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE -LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION -WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/conda_lock/_vendor/poetry/core/_vendor/tomlkit/__init__.py b/conda_lock/_vendor/poetry/core/_vendor/tomlkit/__init__.py deleted file mode 100644 index e0a7a542..00000000 --- a/conda_lock/_vendor/poetry/core/_vendor/tomlkit/__init__.py +++ /dev/null @@ -1,25 +0,0 @@ -from .api import aot -from .api import array -from .api import boolean -from .api import comment -from .api import date -from .api import datetime -from .api import document -from .api import dumps -from .api import float_ -from .api import inline_table -from .api import integer -from .api import item -from .api import key -from .api import key_value -from .api import loads -from .api import nl -from .api import parse -from .api import string -from .api import table -from .api import time -from .api import value -from .api import ws - - -__version__ = "0.7.0" diff --git a/conda_lock/_vendor/poetry/core/_vendor/tomlkit/_compat.py b/conda_lock/_vendor/poetry/core/_vendor/tomlkit/_compat.py deleted file mode 100644 index 8d3b0ae3..00000000 --- a/conda_lock/_vendor/poetry/core/_vendor/tomlkit/_compat.py +++ /dev/null @@ -1,174 +0,0 @@ -import re -import sys - - -try: - from datetime import timezone -except ImportError: - from datetime import datetime - from datetime import timedelta - from datetime import tzinfo - - class timezone(tzinfo): - __slots__ = "_offset", "_name" - - # Sentinel value to disallow None - _Omitted = object() - - def __new__(cls, offset, name=_Omitted): - if not isinstance(offset, timedelta): - raise TypeError("offset must be a timedelta") - if name is cls._Omitted: - if not offset: - return cls.utc - name = None - elif not isinstance(name, str): - raise TypeError("name must be a string") - if not cls._minoffset <= offset <= cls._maxoffset: - raise ValueError( - "offset must be a timedelta " - "strictly between -timedelta(hours=24) and " - "timedelta(hours=24)." - ) - return cls._create(offset, name) - - @classmethod - def _create(cls, offset, name=None): - self = tzinfo.__new__(cls) - self._offset = offset - self._name = name - return self - - def __getinitargs__(self): - """pickle support""" - if self._name is None: - return (self._offset,) - return (self._offset, self._name) - - def __eq__(self, other): - if type(other) != timezone: - return False - return self._offset == other._offset - - def __hash__(self): - return hash(self._offset) - - def __repr__(self): - """Convert to formal string, for repr(). - - >>> tz = timezone.utc - >>> repr(tz) - 'datetime.timezone.utc' - >>> tz = timezone(timedelta(hours=-5), 'EST') - >>> repr(tz) - "datetime.timezone(datetime.timedelta(-1, 68400), 'EST')" - """ - if self is self.utc: - return "datetime.timezone.utc" - if self._name is None: - return "%s.%s(%r)" % ( - self.__class__.__module__, - self.__class__.__name__, - self._offset, - ) - return "%s.%s(%r, %r)" % ( - self.__class__.__module__, - self.__class__.__name__, - self._offset, - self._name, - ) - - def __str__(self): - return self.tzname(None) - - def utcoffset(self, dt): - if isinstance(dt, datetime) or dt is None: - return self._offset - raise TypeError( - "utcoffset() argument must be a datetime instance" " or None" - ) - - def tzname(self, dt): - if isinstance(dt, datetime) or dt is None: - if self._name is None: - return self._name_from_offset(self._offset) - return self._name - raise TypeError("tzname() argument must be a datetime instance" " or None") - - def dst(self, dt): - if isinstance(dt, datetime) or dt is None: - return None - raise TypeError("dst() argument must be a datetime instance" " or None") - - def fromutc(self, dt): - if isinstance(dt, datetime): - if dt.tzinfo is not self: - raise ValueError("fromutc: dt.tzinfo " "is not self") - return dt + self._offset - raise TypeError("fromutc() argument must be a datetime instance" " or None") - - _maxoffset = timedelta(hours=23, minutes=59) - _minoffset = -_maxoffset - - @staticmethod - def _name_from_offset(delta): - if not delta: - return "UTC" - if delta < timedelta(0): - sign = "-" - delta = -delta - else: - sign = "+" - hours, rest = divmod(delta, timedelta(hours=1)) - minutes, rest = divmod(rest, timedelta(minutes=1)) - seconds = rest.seconds - microseconds = rest.microseconds - if microseconds: - return ("UTC{}{:02d}:{:02d}:{:02d}.{:06d}").format( - sign, hours, minutes, seconds, microseconds - ) - if seconds: - return "UTC{}{:02d}:{:02d}:{:02d}".format(sign, hours, minutes, seconds) - return "UTC{}{:02d}:{:02d}".format(sign, hours, minutes) - - timezone.utc = timezone._create(timedelta(0)) - timezone.min = timezone._create(timezone._minoffset) - timezone.max = timezone._create(timezone._maxoffset) - - -PY2 = sys.version_info[0] == 2 -PY36 = sys.version_info >= (3, 6) -PY38 = sys.version_info >= (3, 8) - -if PY2: - unicode = unicode - chr = unichr - long = long -else: - unicode = str - chr = chr - long = int - - -if PY36: - OrderedDict = dict -else: - from collections import OrderedDict - - -def decode(string, encodings=None): - if not PY2 and not isinstance(string, bytes): - return string - - if PY2 and isinstance(string, unicode): - return string - - encodings = encodings or ["utf-8", "latin1", "ascii"] - - for encoding in encodings: - try: - return string.decode(encoding) - except (UnicodeEncodeError, UnicodeDecodeError): - pass - - return string.decode(encodings[0], errors="ignore") diff --git a/conda_lock/_vendor/poetry/core/_vendor/tomlkit/_utils.py b/conda_lock/_vendor/poetry/core/_vendor/tomlkit/_utils.py deleted file mode 100644 index 2ae3e424..00000000 --- a/conda_lock/_vendor/poetry/core/_vendor/tomlkit/_utils.py +++ /dev/null @@ -1,144 +0,0 @@ -import re - -from datetime import date -from datetime import datetime -from datetime import time -from datetime import timedelta -from typing import Union - -from ._compat import decode -from ._compat import timezone - - -try: - from collections.abc import Mapping -except ImportError: - from collections import Mapping - - -RFC_3339_LOOSE = re.compile( - "^" - r"(([0-9]+)-(\d{2})-(\d{2}))?" # Date - "(" - "([T ])?" # Separator - r"(\d{2}):(\d{2}):(\d{2})(\.([0-9]+))?" # Time - r"((Z)|([\+|\-]([01][0-9]|2[0-3]):([0-5][0-9])))?" # Timezone - ")?" - "$" -) - -RFC_3339_DATETIME = re.compile( - "^" - "([0-9]+)-(0[1-9]|1[012])-(0[1-9]|[12][0-9]|3[01])" # Date - "[T ]" # Separator - r"([01][0-9]|2[0-3]):([0-5][0-9]):([0-5][0-9]|60)(\.([0-9]+))?" # Time - r"((Z)|([\+|\-]([01][0-9]|2[0-3]):([0-5][0-9])))?" # Timezone - "$" -) - -RFC_3339_DATE = re.compile("^([0-9]+)-(0[1-9]|1[012])-(0[1-9]|[12][0-9]|3[01])$") - -RFC_3339_TIME = re.compile( - r"^([01][0-9]|2[0-3]):([0-5][0-9]):([0-5][0-9]|60)(\.([0-9]+))?$" -) - -_utc = timezone(timedelta(), "UTC") - - -def parse_rfc3339(string): # type: (str) -> Union[datetime, date, time] - m = RFC_3339_DATETIME.match(string) - if m: - year = int(m.group(1)) - month = int(m.group(2)) - day = int(m.group(3)) - hour = int(m.group(4)) - minute = int(m.group(5)) - second = int(m.group(6)) - microsecond = 0 - - if m.group(7): - microsecond = int(("{:<06s}".format(m.group(8)))[:6]) - - if m.group(9): - # Timezone - tz = m.group(9) - if tz == "Z": - tzinfo = _utc - else: - sign = m.group(11)[0] - hour_offset, minute_offset = int(m.group(12)), int(m.group(13)) - offset = timedelta(seconds=hour_offset * 3600 + minute_offset * 60) - if sign == "-": - offset = -offset - - tzinfo = timezone( - offset, "{}{}:{}".format(sign, m.group(12), m.group(13)) - ) - - return datetime( - year, month, day, hour, minute, second, microsecond, tzinfo=tzinfo - ) - else: - return datetime(year, month, day, hour, minute, second, microsecond) - - m = RFC_3339_DATE.match(string) - if m: - year = int(m.group(1)) - month = int(m.group(2)) - day = int(m.group(3)) - - return date(year, month, day) - - m = RFC_3339_TIME.match(string) - if m: - hour = int(m.group(1)) - minute = int(m.group(2)) - second = int(m.group(3)) - microsecond = 0 - - if m.group(4): - microsecond = int(("{:<06s}".format(m.group(5)))[:6]) - - return time(hour, minute, second, microsecond) - - raise ValueError("Invalid RFC 339 string") - - -_escaped = {"b": "\b", "t": "\t", "n": "\n", "f": "\f", "r": "\r", '"': '"', "\\": "\\"} -_escapes = {v: k for k, v in _escaped.items()} - - -def escape_string(s): - s = decode(s) - - res = [] - start = 0 - - def flush(): - if start != i: - res.append(s[start:i]) - - return i + 1 - - i = 0 - while i < len(s): - c = s[i] - if c in '"\\\n\r\t\b\f': - start = flush() - res.append("\\" + _escapes[c]) - elif ord(c) < 0x20: - start = flush() - res.append("\\u%04x" % ord(c)) - i += 1 - - flush() - - return "".join(res) - - -def merge_dicts(d1, d2): - for k, v in d2.items(): - if k in d1 and isinstance(d1[k], dict) and isinstance(d2[k], Mapping): - merge_dicts(d1[k], d2[k]) - else: - d1[k] = d2[k] diff --git a/conda_lock/_vendor/poetry/core/_vendor/tomlkit/api.py b/conda_lock/_vendor/poetry/core/_vendor/tomlkit/api.py deleted file mode 100644 index 3de41219..00000000 --- a/conda_lock/_vendor/poetry/core/_vendor/tomlkit/api.py +++ /dev/null @@ -1,142 +0,0 @@ -import datetime as _datetime - -from typing import Tuple - -from ._utils import parse_rfc3339 -from .container import Container -from .items import AoT -from .items import Array -from .items import Bool -from .items import Comment -from .items import Date -from .items import DateTime -from .items import Float -from .items import InlineTable -from .items import Integer -from .items import Item as _Item -from .items import Key -from .items import String -from .items import Table -from .items import Time -from .items import Trivia -from .items import Whitespace -from .items import item -from .parser import Parser -from .toml_document import TOMLDocument as _TOMLDocument - - -def loads(string): # type: (str) -> _TOMLDocument - """ - Parses a string into a TOMLDocument. - - Alias for parse(). - """ - return parse(string) - - -def dumps(data, sort_keys=False): # type: (_TOMLDocument, bool) -> str - """ - Dumps a TOMLDocument into a string. - """ - if not isinstance(data, _TOMLDocument) and isinstance(data, dict): - data = item(data, _sort_keys=sort_keys) - - return data.as_string() - - -def parse(string): # type: (str) -> _TOMLDocument - """ - Parses a string into a TOMLDocument. - """ - return Parser(string).parse() - - -def document(): # type: () -> _TOMLDocument - """ - Returns a new TOMLDocument instance. - """ - return _TOMLDocument() - - -# Items -def integer(raw): # type: (str) -> Integer - return item(int(raw)) - - -def float_(raw): # type: (str) -> Float - return item(float(raw)) - - -def boolean(raw): # type: (str) -> Bool - return item(raw == "true") - - -def string(raw): # type: (str) -> String - return item(raw) - - -def date(raw): # type: (str) -> Date - value = parse_rfc3339(raw) - if not isinstance(value, _datetime.date): - raise ValueError("date() only accepts date strings.") - - return item(value) - - -def time(raw): # type: (str) -> Time - value = parse_rfc3339(raw) - if not isinstance(value, _datetime.time): - raise ValueError("time() only accepts time strings.") - - return item(value) - - -def datetime(raw): # type: (str) -> DateTime - value = parse_rfc3339(raw) - if not isinstance(value, _datetime.datetime): - raise ValueError("datetime() only accepts datetime strings.") - - return item(value) - - -def array(raw=None): # type: (str) -> Array - if raw is None: - raw = "[]" - - return value(raw) - - -def table(): # type: () -> Table - return Table(Container(), Trivia(), False) - - -def inline_table(): # type: () -> InlineTable - return InlineTable(Container(), Trivia(), new=True) - - -def aot(): # type: () -> AoT - return AoT([]) - - -def key(k): # type: (str) -> Key - return Key(k) - - -def value(raw): # type: (str) -> _Item - return Parser(raw)._parse_value() - - -def key_value(src): # type: (str) -> Tuple[Key, _Item] - return Parser(src)._parse_key_value() - - -def ws(src): # type: (str) -> Whitespace - return Whitespace(src, fixed=True) - - -def nl(): # type: () -> Whitespace - return ws("\n") - - -def comment(string): # type: (str) -> Comment - return Comment(Trivia(comment_ws=" ", comment="# " + string)) diff --git a/conda_lock/_vendor/poetry/core/_vendor/tomlkit/container.py b/conda_lock/_vendor/poetry/core/_vendor/tomlkit/container.py deleted file mode 100644 index 6386e738..00000000 --- a/conda_lock/_vendor/poetry/core/_vendor/tomlkit/container.py +++ /dev/null @@ -1,800 +0,0 @@ -from __future__ import unicode_literals - -import copy - -from typing import Any -from typing import Dict -from typing import Generator -from typing import List -from typing import Optional -from typing import Tuple -from typing import Union - -from ._compat import decode -from ._utils import merge_dicts -from .exceptions import KeyAlreadyPresent -from .exceptions import NonExistentKey -from .exceptions import ParseError -from .exceptions import TOMLKitError -from .items import AoT -from .items import Comment -from .items import Item -from .items import Key -from .items import Null -from .items import Table -from .items import Whitespace -from .items import item as _item - - -_NOT_SET = object() - - -class Container(dict): - """ - A container for items within a TOMLDocument. - """ - - def __init__(self, parsed=False): # type: (bool) -> None - self._map = {} # type: Dict[Key, int] - self._body = [] # type: List[Tuple[Optional[Key], Item]] - self._parsed = parsed - self._table_keys = [] - - @property - def body(self): # type: () -> List[Tuple[Optional[Key], Item]] - return self._body - - @property - def value(self): # type: () -> Dict[Any, Any] - d = {} - for k, v in self._body: - if k is None: - continue - - k = k.key - v = v.value - - if isinstance(v, Container): - v = v.value - - if k in d: - merge_dicts(d[k], v) - else: - d[k] = v - - return d - - def parsing(self, parsing): # type: (bool) -> None - self._parsed = parsing - - for k, v in self._body: - if isinstance(v, Table): - v.value.parsing(parsing) - elif isinstance(v, AoT): - for t in v.body: - t.value.parsing(parsing) - - def add( - self, key, item=None - ): # type: (Union[Key, Item, str], Optional[Item]) -> Container - """ - Adds an item to the current Container. - """ - if item is None: - if not isinstance(key, (Comment, Whitespace)): - raise ValueError( - "Non comment/whitespace items must have an associated key" - ) - - key, item = None, key - - return self.append(key, item) - - def append(self, key, item): # type: (Union[Key, str, None], Item) -> Container - if not isinstance(key, Key) and key is not None: - key = Key(key) - - if not isinstance(item, Item): - item = _item(item) - - if isinstance(item, (AoT, Table)) and item.name is None: - item.name = key.key - - if ( - isinstance(item, Table) - and self._body - and not self._parsed - and not item.trivia.indent - ): - item.trivia.indent = "\n" - - if isinstance(item, AoT) and self._body and not self._parsed: - if item and "\n" not in item[0].trivia.indent: - item[0].trivia.indent = "\n" + item[0].trivia.indent - else: - self.append(None, Whitespace("\n")) - - if key is not None and key in self: - current_idx = self._map[key] - if isinstance(current_idx, tuple): - current_body_element = self._body[current_idx[-1]] - else: - current_body_element = self._body[current_idx] - - current = current_body_element[1] - - if isinstance(item, Table): - if not isinstance(current, (Table, AoT)): - raise KeyAlreadyPresent(key) - - if item.is_aot_element(): - # New AoT element found later on - # Adding it to the current AoT - if not isinstance(current, AoT): - current = AoT([current, item], parsed=self._parsed) - - self._replace(key, key, current) - else: - current.append(item) - - return self - elif current.is_aot(): - if not item.is_aot_element(): - # Tried to define a table after an AoT with the same name. - raise KeyAlreadyPresent(key) - - current.append(item) - - return self - elif current.is_super_table(): - if item.is_super_table(): - # We need to merge both super tables - if ( - self._table_keys[-1] != current_body_element[0] - or key.is_dotted() - or current_body_element[0].is_dotted() - ): - if not isinstance(current_idx, tuple): - current_idx = (current_idx,) - - self._map[key] = current_idx + (len(self._body),) - self._body.append((key, item)) - self._table_keys.append(key) - - # Building a temporary proxy to check for errors - OutOfOrderTableProxy(self, self._map[key]) - - return self - - for k, v in item.value.body: - current.append(k, v) - - return self - elif current_body_element[0].is_dotted(): - raise TOMLKitError("Redefinition of an existing table") - elif not item.is_super_table(): - raise KeyAlreadyPresent(key) - elif isinstance(item, AoT): - if not isinstance(current, AoT): - # Tried to define an AoT after a table with the same name. - raise KeyAlreadyPresent(key) - - for table in item.body: - current.append(table) - - return self - else: - raise KeyAlreadyPresent(key) - - is_table = isinstance(item, (Table, AoT)) - if key is not None and self._body and not self._parsed: - # If there is already at least one table in the current container - # and the given item is not a table, we need to find the last - # item that is not a table and insert after it - # If no such item exists, insert at the top of the table - key_after = None - idx = 0 - for k, v in self._body: - if isinstance(v, Null): - # This happens only after deletion - continue - - if isinstance(v, Whitespace) and not v.is_fixed(): - continue - - if not is_table and isinstance(v, (Table, AoT)): - break - - key_after = k or idx - idx += 1 - - if key_after is not None: - if isinstance(key_after, int): - if key_after + 1 < len(self._body) - 1: - return self._insert_at(key_after + 1, key, item) - else: - previous_item = self._body[-1][1] - if ( - not isinstance(previous_item, Whitespace) - and not is_table - and "\n" not in previous_item.trivia.trail - ): - previous_item.trivia.trail += "\n" - else: - return self._insert_after(key_after, key, item) - else: - return self._insert_at(0, key, item) - - if key in self._map: - current_idx = self._map[key] - if isinstance(current_idx, tuple): - current_idx = current_idx[-1] - - current = self._body[current_idx][1] - if key is not None and not isinstance(current, Table): - raise KeyAlreadyPresent(key) - - # Adding sub tables to a currently existing table - if not isinstance(current_idx, tuple): - current_idx = (current_idx,) - - self._map[key] = current_idx + (len(self._body),) - else: - self._map[key] = len(self._body) - - self._body.append((key, item)) - if item.is_table(): - self._table_keys.append(key) - - if key is not None: - super(Container, self).__setitem__(key.key, item.value) - - return self - - def remove(self, key): # type: (Union[Key, str]) -> Container - if not isinstance(key, Key): - key = Key(key) - - idx = self._map.pop(key, None) - if idx is None: - raise NonExistentKey(key) - - if isinstance(idx, tuple): - for i in idx: - self._body[i] = (None, Null()) - else: - self._body[idx] = (None, Null()) - - super(Container, self).__delitem__(key.key) - - return self - - def _insert_after( - self, key, other_key, item - ): # type: (Union[str, Key], Union[str, Key], Union[Item, Any]) -> Container - if key is None: - raise ValueError("Key cannot be null in insert_after()") - - if key not in self: - raise NonExistentKey(key) - - if not isinstance(key, Key): - key = Key(key) - - if not isinstance(other_key, Key): - other_key = Key(other_key) - - item = _item(item) - - idx = self._map[key] - # Insert after the max index if there are many. - if isinstance(idx, tuple): - idx = max(idx) - current_item = self._body[idx][1] - if "\n" not in current_item.trivia.trail: - current_item.trivia.trail += "\n" - - # Increment indices after the current index - for k, v in self._map.items(): - if isinstance(v, tuple): - new_indices = [] - for v_ in v: - if v_ > idx: - v_ = v_ + 1 - - new_indices.append(v_) - - self._map[k] = tuple(new_indices) - elif v > idx: - self._map[k] = v + 1 - - self._map[other_key] = idx + 1 - self._body.insert(idx + 1, (other_key, item)) - - if key is not None: - super(Container, self).__setitem__(other_key.key, item.value) - - return self - - def _insert_at( - self, idx, key, item - ): # type: (int, Union[str, Key], Union[Item, Any]) -> Container - if idx > len(self._body) - 1: - raise ValueError("Unable to insert at position {}".format(idx)) - - if not isinstance(key, Key): - key = Key(key) - - item = _item(item) - - if idx > 0: - previous_item = self._body[idx - 1][1] - if ( - not isinstance(previous_item, Whitespace) - and not isinstance(item, (AoT, Table)) - and "\n" not in previous_item.trivia.trail - ): - previous_item.trivia.trail += "\n" - - # Increment indices after the current index - for k, v in self._map.items(): - if isinstance(v, tuple): - new_indices = [] - for v_ in v: - if v_ >= idx: - v_ = v_ + 1 - - new_indices.append(v_) - - self._map[k] = tuple(new_indices) - elif v >= idx: - self._map[k] = v + 1 - - self._map[key] = idx - self._body.insert(idx, (key, item)) - - if key is not None: - super(Container, self).__setitem__(key.key, item.value) - - return self - - def item(self, key): # type: (Union[Key, str]) -> Item - if not isinstance(key, Key): - key = Key(key) - - idx = self._map.get(key, None) - if idx is None: - raise NonExistentKey(key) - - if isinstance(idx, tuple): - # The item we are getting is an out of order table - # so we need a proxy to retrieve the proper objects - # from the parent container - return OutOfOrderTableProxy(self, idx) - - return self._body[idx][1] - - def last_item(self): # type: () -> Optional[Item] - if self._body: - return self._body[-1][1] - - def as_string(self): # type: () -> str - s = "" - for k, v in self._body: - if k is not None: - if isinstance(v, Table): - s += self._render_table(k, v) - elif isinstance(v, AoT): - s += self._render_aot(k, v) - else: - s += self._render_simple_item(k, v) - else: - s += self._render_simple_item(k, v) - - return s - - def _render_table( - self, key, table, prefix=None - ): # (Key, Table, Optional[str]) -> str - cur = "" - - if table.display_name is not None: - _key = table.display_name - else: - _key = key.as_string() - - if prefix is not None: - _key = prefix + "." + _key - - if not table.is_super_table() or ( - any( - not isinstance(v, (Table, AoT, Whitespace)) for _, v in table.value.body - ) - and not key.is_dotted() - ): - open_, close = "[", "]" - if table.is_aot_element(): - open_, close = "[[", "]]" - - cur += "{}{}{}{}{}{}{}{}".format( - table.trivia.indent, - open_, - decode(_key), - close, - table.trivia.comment_ws, - decode(table.trivia.comment), - table.trivia.trail, - "\n" if "\n" not in table.trivia.trail and len(table.value) > 0 else "", - ) - - for k, v in table.value.body: - if isinstance(v, Table): - if v.is_super_table(): - if k.is_dotted() and not key.is_dotted(): - # Dotted key inside table - cur += self._render_table(k, v) - else: - cur += self._render_table(k, v, prefix=_key) - else: - cur += self._render_table(k, v, prefix=_key) - elif isinstance(v, AoT): - cur += self._render_aot(k, v, prefix=_key) - else: - cur += self._render_simple_item( - k, v, prefix=_key if key.is_dotted() else None - ) - - return cur - - def _render_aot(self, key, aot, prefix=None): - _key = key.as_string() - if prefix is not None: - _key = prefix + "." + _key - - cur = "" - _key = decode(_key) - for table in aot.body: - cur += self._render_aot_table(table, prefix=_key) - - return cur - - def _render_aot_table(self, table, prefix=None): # (Table, Optional[str]) -> str - cur = "" - - _key = prefix or "" - - if not table.is_super_table(): - open_, close = "[[", "]]" - - cur += "{}{}{}{}{}{}{}".format( - table.trivia.indent, - open_, - decode(_key), - close, - table.trivia.comment_ws, - decode(table.trivia.comment), - table.trivia.trail, - ) - - for k, v in table.value.body: - if isinstance(v, Table): - if v.is_super_table(): - if k.is_dotted(): - # Dotted key inside table - cur += self._render_table(k, v) - else: - cur += self._render_table(k, v, prefix=_key) - else: - cur += self._render_table(k, v, prefix=_key) - elif isinstance(v, AoT): - cur += self._render_aot(k, v, prefix=_key) - else: - cur += self._render_simple_item(k, v) - - return cur - - def _render_simple_item(self, key, item, prefix=None): - if key is None: - return item.as_string() - - _key = key.as_string() - if prefix is not None: - _key = prefix + "." + _key - - return "{}{}{}{}{}{}{}".format( - item.trivia.indent, - decode(_key), - key.sep, - decode(item.as_string()), - item.trivia.comment_ws, - decode(item.trivia.comment), - item.trivia.trail, - ) - - # Dictionary methods - - def keys(self): # type: () -> Generator[str] - return super(Container, self).keys() - - def values(self): # type: () -> Generator[Item] - for k in self.keys(): - yield self[k] - - def items(self): # type: () -> Generator[Item] - for k, v in self.value.items(): - if k is None: - continue - - yield k, v - - def update(self, other): # type: (Dict) -> None - for k, v in other.items(): - self[k] = v - - def get(self, key, default=None): # type: (Any, Optional[Any]) -> Any - if not isinstance(key, Key): - key = Key(key) - - if key not in self: - return default - - return self[key] - - def pop(self, key, default=_NOT_SET): - try: - value = self[key] - except KeyError: - if default is _NOT_SET: - raise - - return default - - del self[key] - - return value - - def setdefault( - self, key, default=None - ): # type: (Union[Key, str], Any) -> Union[Item, Container] - if key not in self: - self[key] = default - - return self[key] - - def __contains__(self, key): # type: (Union[Key, str]) -> bool - if not isinstance(key, Key): - key = Key(key) - - return key in self._map - - def __getitem__(self, key): # type: (Union[Key, str]) -> Union[Item, Container] - if not isinstance(key, Key): - key = Key(key) - - idx = self._map.get(key, None) - if idx is None: - raise NonExistentKey(key) - - if isinstance(idx, tuple): - # The item we are getting is an out of order table - # so we need a proxy to retrieve the proper objects - # from the parent container - return OutOfOrderTableProxy(self, idx) - - item = self._body[idx][1] - if item.is_boolean(): - return item.value - - return item - - def __setitem__(self, key, value): # type: (Union[Key, str], Any) -> None - if key is not None and key in self: - self._replace(key, key, value) - else: - self.append(key, value) - - def __delitem__(self, key): # type: (Union[Key, str]) -> None - self.remove(key) - - def _replace( - self, key, new_key, value - ): # type: (Union[Key, str], Union[Key, str], Item) -> None - if not isinstance(key, Key): - key = Key(key) - - if not isinstance(new_key, Key): - new_key = Key(new_key) - - idx = self._map.get(key, None) - if idx is None: - raise NonExistentKey(key) - - self._replace_at(idx, new_key, value) - - def _replace_at( - self, idx, new_key, value - ): # type: (Union[int, Tuple[int]], Union[Key, str], Item) -> None - if not isinstance(new_key, Key): - new_key = Key(new_key) - - if isinstance(idx, tuple): - for i in idx[1:]: - self._body[i] = (None, Null()) - - idx = idx[0] - - k, v = self._body[idx] - - self._map[new_key] = self._map.pop(k) - if new_key != k: - super(Container, self).__delitem__(k) - - if isinstance(self._map[new_key], tuple): - self._map[new_key] = self._map[new_key][0] - - value = _item(value) - - # Copying trivia - if not isinstance(value, (Whitespace, AoT)): - value.trivia.indent = v.trivia.indent - value.trivia.comment_ws = v.trivia.comment_ws - value.trivia.comment = v.trivia.comment - value.trivia.trail = v.trivia.trail - - if isinstance(value, Table): - # Insert a cosmetic new line for tables - value.append(None, Whitespace("\n")) - - self._body[idx] = (new_key, value) - - super(Container, self).__setitem__(new_key.key, value.value) - - def __str__(self): # type: () -> str - return str(self.value) - - def __repr__(self): # type: () -> str - return super(Container, self).__repr__() - - def __eq__(self, other): # type: (Dict) -> bool - if not isinstance(other, dict): - return NotImplemented - - return self.value == other - - def _getstate(self, protocol): - return (self._parsed,) - - def __reduce__(self): - return self.__reduce_ex__(2) - - def __reduce_ex__(self, protocol): - return ( - self.__class__, - self._getstate(protocol), - (self._map, self._body, self._parsed), - ) - - def __setstate__(self, state): - self._map = state[0] - self._body = state[1] - self._parsed = state[2] - - def copy(self): # type: () -> Container - return copy.copy(self) - - def __copy__(self): # type: () -> Container - c = self.__class__(self._parsed) - for k, v in super(Container, self).copy().items(): - super(Container, c).__setitem__(k, v) - - c._body += self.body - c._map.update(self._map) - - return c - - -class OutOfOrderTableProxy(dict): - def __init__(self, container, indices): # type: (Container, Tuple) -> None - self._container = container - self._internal_container = Container(self._container.parsing) - self._tables = [] - self._tables_map = {} - self._map = {} - - for i in indices: - key, item = self._container._body[i] - - if isinstance(item, Table): - self._tables.append(item) - table_idx = len(self._tables) - 1 - for k, v in item.value.body: - self._internal_container.append(k, v) - self._tables_map[k] = table_idx - if k is not None: - super(OutOfOrderTableProxy, self).__setitem__(k.key, v) - else: - self._internal_container.append(key, item) - self._map[key] = i - if key is not None: - super(OutOfOrderTableProxy, self).__setitem__(key.key, item) - - @property - def value(self): - return self._internal_container.value - - def __getitem__(self, key): # type: (Union[Key, str]) -> Any - if key not in self._internal_container: - raise NonExistentKey(key) - - return self._internal_container[key] - - def __setitem__(self, key, item): # type: (Union[Key, str], Any) -> None - if key in self._map: - idx = self._map[key] - self._container._replace_at(idx, key, item) - elif key in self._tables_map: - table = self._tables[self._tables_map[key]] - table[key] = item - elif self._tables: - table = self._tables[0] - table[key] = item - else: - self._container[key] = item - - if key is not None: - super(OutOfOrderTableProxy, self).__setitem__(key, item) - - def __delitem__(self, key): # type: (Union[Key, str]) -> None - if key in self._map: - idx = self._map[key] - del self._container[key] - del self._map[key] - elif key in self._tables_map: - table = self._tables[self._tables_map[key]] - del table[key] - del self._tables_map[key] - else: - raise NonExistentKey(key) - - del self._internal_container[key] - - def keys(self): - return self._internal_container.keys() - - def values(self): - return self._internal_container.values() - - def items(self): # type: () -> Generator[Item] - return self._internal_container.items() - - def update(self, other): # type: (Dict) -> None - self._internal_container.update(other) - - def get(self, key, default=None): # type: (Any, Optional[Any]) -> Any - return self._internal_container.get(key, default=default) - - def pop(self, key, default=_NOT_SET): - return self._internal_container.pop(key, default=default) - - def setdefault( - self, key, default=None - ): # type: (Union[Key, str], Any) -> Union[Item, Container] - return self._internal_container.setdefault(key, default=default) - - def __contains__(self, key): - return key in self._internal_container - - def __str__(self): - return str(self._internal_container) - - def __repr__(self): - return repr(self._internal_container) - - def __eq__(self, other): # type: (Dict) -> bool - if not isinstance(other, dict): - return NotImplemented - - return self._internal_container == other - - def __getattr__(self, attribute): - return getattr(self._internal_container, attribute) diff --git a/conda_lock/_vendor/poetry/core/_vendor/tomlkit/exceptions.py b/conda_lock/_vendor/poetry/core/_vendor/tomlkit/exceptions.py deleted file mode 100644 index 44836363..00000000 --- a/conda_lock/_vendor/poetry/core/_vendor/tomlkit/exceptions.py +++ /dev/null @@ -1,221 +0,0 @@ -from typing import Optional - - -class TOMLKitError(Exception): - - pass - - -class ParseError(ValueError, TOMLKitError): - """ - This error occurs when the parser encounters a syntax error - in the TOML being parsed. The error references the line and - location within the line where the error was encountered. - """ - - def __init__( - self, line, col, message=None - ): # type: (int, int, Optional[str]) -> None - self._line = line - self._col = col - - if message is None: - message = "TOML parse error" - - super(ParseError, self).__init__( - "{} at line {} col {}".format(message, self._line, self._col) - ) - - @property - def line(self): - return self._line - - @property - def col(self): - return self._col - - -class MixedArrayTypesError(ParseError): - """ - An array was found that had two or more element types. - """ - - def __init__(self, line, col): # type: (int, int) -> None - message = "Mixed types found in array" - - super(MixedArrayTypesError, self).__init__(line, col, message=message) - - -class InvalidNumberError(ParseError): - """ - A numeric field was improperly specified. - """ - - def __init__(self, line, col): # type: (int, int) -> None - message = "Invalid number" - - super(InvalidNumberError, self).__init__(line, col, message=message) - - -class InvalidDateTimeError(ParseError): - """ - A datetime field was improperly specified. - """ - - def __init__(self, line, col): # type: (int, int) -> None - message = "Invalid datetime" - - super(InvalidDateTimeError, self).__init__(line, col, message=message) - - -class InvalidDateError(ParseError): - """ - A date field was improperly specified. - """ - - def __init__(self, line, col): # type: (int, int) -> None - message = "Invalid date" - - super(InvalidDateError, self).__init__(line, col, message=message) - - -class InvalidTimeError(ParseError): - """ - A date field was improperly specified. - """ - - def __init__(self, line, col): # type: (int, int) -> None - message = "Invalid time" - - super(InvalidTimeError, self).__init__(line, col, message=message) - - -class InvalidNumberOrDateError(ParseError): - """ - A numeric or date field was improperly specified. - """ - - def __init__(self, line, col): # type: (int, int) -> None - message = "Invalid number or date format" - - super(InvalidNumberOrDateError, self).__init__(line, col, message=message) - - -class InvalidUnicodeValueError(ParseError): - """ - A unicode code was improperly specified. - """ - - def __init__(self, line, col): # type: (int, int) -> None - message = "Invalid unicode value" - - super(InvalidUnicodeValueError, self).__init__(line, col, message=message) - - -class UnexpectedCharError(ParseError): - """ - An unexpected character was found during parsing. - """ - - def __init__(self, line, col, char): # type: (int, int, str) -> None - message = "Unexpected character: {}".format(repr(char)) - - super(UnexpectedCharError, self).__init__(line, col, message=message) - - -class EmptyKeyError(ParseError): - """ - An empty key was found during parsing. - """ - - def __init__(self, line, col): # type: (int, int) -> None - message = "Empty key" - - super(EmptyKeyError, self).__init__(line, col, message=message) - - -class EmptyTableNameError(ParseError): - """ - An empty table name was found during parsing. - """ - - def __init__(self, line, col): # type: (int, int) -> None - message = "Empty table name" - - super(EmptyTableNameError, self).__init__(line, col, message=message) - - -class InvalidCharInStringError(ParseError): - """ - The string being parsed contains an invalid character. - """ - - def __init__(self, line, col, char): # type: (int, int, str) -> None - message = "Invalid character {} in string".format(repr(char)) - - super(InvalidCharInStringError, self).__init__(line, col, message=message) - - -class UnexpectedEofError(ParseError): - """ - The TOML being parsed ended before the end of a statement. - """ - - def __init__(self, line, col): # type: (int, int) -> None - message = "Unexpected end of file" - - super(UnexpectedEofError, self).__init__(line, col, message=message) - - -class InternalParserError(ParseError): - """ - An error that indicates a bug in the parser. - """ - - def __init__( - self, line, col, message=None - ): # type: (int, int, Optional[str]) -> None - msg = "Internal parser error" - if message: - msg += " ({})".format(message) - - super(InternalParserError, self).__init__(line, col, message=msg) - - -class NonExistentKey(KeyError, TOMLKitError): - """ - A non-existent key was used. - """ - - def __init__(self, key): - message = 'Key "{}" does not exist.'.format(key) - - super(NonExistentKey, self).__init__(message) - - -class KeyAlreadyPresent(TOMLKitError): - """ - An already present key was used. - """ - - def __init__(self, key): - message = 'Key "{}" already exists.'.format(key) - - super(KeyAlreadyPresent, self).__init__(message) - - -class InvalidControlChar(ParseError): - def __init__(self, line, col, char, type): # type: (int, int, int, str) -> None - display_code = "\\u00" - - if char < 16: - display_code += "0" - - display_code += str(char) - - message = ( - "Control characters (codes less than 0x1f and 0x7f) are not allowed in {}, " - "use {} instead".format(type, display_code) - ) - - super(InvalidControlChar, self).__init__(line, col, message=message) diff --git a/conda_lock/_vendor/poetry/core/_vendor/tomlkit/items.py b/conda_lock/_vendor/poetry/core/_vendor/tomlkit/items.py deleted file mode 100644 index 184ffe7d..00000000 --- a/conda_lock/_vendor/poetry/core/_vendor/tomlkit/items.py +++ /dev/null @@ -1,1339 +0,0 @@ -from __future__ import unicode_literals - -import re -import string - -from datetime import date -from datetime import datetime -from datetime import time -from enum import Enum -from typing import Any -from typing import Dict -from typing import Generator -from typing import List -from typing import Optional -from typing import Union - -from ._compat import PY2 -from ._compat import PY38 -from ._compat import decode -from ._compat import long -from ._compat import unicode -from ._utils import escape_string - - -if PY2: - from functools32 import lru_cache -else: - from functools import lru_cache - - -def item(value, _parent=None, _sort_keys=False): - from .container import Container - - if isinstance(value, Item): - return value - - if isinstance(value, bool): - return Bool(value, Trivia()) - elif isinstance(value, int): - return Integer(value, Trivia(), str(value)) - elif isinstance(value, float): - return Float(value, Trivia(), str(value)) - elif isinstance(value, dict): - val = Table(Container(), Trivia(), False) - for k, v in sorted( - value.items(), - key=lambda i: (isinstance(i[1], dict), i[0] if _sort_keys else 1), - ): - val[k] = item(v, _parent=val, _sort_keys=_sort_keys) - - return val - elif isinstance(value, list): - if value and isinstance(value[0], dict): - a = AoT([]) - else: - a = Array([], Trivia()) - - for v in value: - if isinstance(v, dict): - table = Table(Container(), Trivia(), True) - - for k, _v in sorted( - v.items(), - key=lambda i: (isinstance(i[1], dict), i[0] if _sort_keys else 1), - ): - i = item(_v, _sort_keys=_sort_keys) - if isinstance(table, InlineTable): - i.trivia.trail = "" - - table[k] = item(i, _sort_keys=_sort_keys) - - v = table - - a.append(v) - - return a - elif isinstance(value, (str, unicode)): - escaped = escape_string(value) - - return String(StringType.SLB, decode(value), escaped, Trivia()) - elif isinstance(value, datetime): - return DateTime( - value.year, - value.month, - value.day, - value.hour, - value.minute, - value.second, - value.microsecond, - value.tzinfo, - Trivia(), - value.isoformat().replace("+00:00", "Z"), - ) - elif isinstance(value, date): - return Date(value.year, value.month, value.day, Trivia(), value.isoformat()) - elif isinstance(value, time): - return Time( - value.hour, - value.minute, - value.second, - value.microsecond, - value.tzinfo, - Trivia(), - value.isoformat(), - ) - - raise ValueError("Invalid type {}".format(type(value))) - - -class StringType(Enum): - # Single Line Basic - SLB = '"' - # Multi Line Basic - MLB = '"""' - # Single Line Literal - SLL = "'" - # Multi Line Literal - MLL = "'''" - - @property - @lru_cache(maxsize=None) - def unit(self): # type: () -> str - return self.value[0] - - @lru_cache(maxsize=None) - def is_basic(self): # type: () -> bool - return self in {StringType.SLB, StringType.MLB} - - @lru_cache(maxsize=None) - def is_literal(self): # type: () -> bool - return self in {StringType.SLL, StringType.MLL} - - @lru_cache(maxsize=None) - def is_singleline(self): # type: () -> bool - return self in {StringType.SLB, StringType.SLL} - - @lru_cache(maxsize=None) - def is_multiline(self): # type: () -> bool - return self in {StringType.MLB, StringType.MLL} - - @lru_cache(maxsize=None) - def toggle(self): # type: () -> StringType - return { - StringType.SLB: StringType.MLB, - StringType.MLB: StringType.SLB, - StringType.SLL: StringType.MLL, - StringType.MLL: StringType.SLL, - }[self] - - -class BoolType(Enum): - TRUE = "true" - FALSE = "false" - - @lru_cache(maxsize=None) - def __bool__(self): - return {BoolType.TRUE: True, BoolType.FALSE: False}[self] - - if PY2: - __nonzero__ = __bool__ # for PY2 - - def __iter__(self): - return iter(self.value) - - def __len__(self): - return len(self.value) - - -class Trivia: - """ - Trivia information (aka metadata). - """ - - def __init__( - self, indent=None, comment_ws=None, comment=None, trail=None - ): # type: (str, str, str, str) -> None - # Whitespace before a value. - self.indent = indent or "" - # Whitespace after a value, but before a comment. - self.comment_ws = comment_ws or "" - # Comment, starting with # character, or empty string if no comment. - self.comment = comment or "" - # Trailing newline. - if trail is None: - trail = "\n" - - self.trail = trail - - -class KeyType(Enum): - """ - The type of a Key. - - Keys can be bare (unquoted), or quoted using basic ("), or literal (') - quotes following the same escaping rules as single-line StringType. - """ - - Bare = "" - Basic = '"' - Literal = "'" - - -class Key: - """ - A key value. - """ - - def __init__( - self, k, t=None, sep=None, dotted=False, original=None - ): # type: (str, Optional[KeyType], Optional[str], bool, Optional[str]) -> None - if t is None: - if any( - [c not in string.ascii_letters + string.digits + "-" + "_" for c in k] - ): - t = KeyType.Basic - else: - t = KeyType.Bare - - self.t = t - if sep is None: - sep = " = " - - self.sep = sep - self.key = k - if original is None: - original = k - - self._original = original - - self._dotted = dotted - - @property - def delimiter(self): # type: () -> str - return self.t.value - - def is_dotted(self): # type: () -> bool - return self._dotted - - def is_bare(self): # type: () -> bool - return self.t == KeyType.Bare - - def as_string(self): # type: () -> str - return "{}{}{}".format(self.delimiter, self._original, self.delimiter) - - def __hash__(self): # type: () -> int - return hash(self.key) - - def __eq__(self, other): # type: (Key) -> bool - if isinstance(other, Key): - return self.key == other.key - - return self.key == other - - def __str__(self): # type: () -> str - return self.as_string() - - def __repr__(self): # type: () -> str - return "".format(self.as_string()) - - -class Item(object): - """ - An item within a TOML document. - """ - - def __init__(self, trivia): # type: (Trivia) -> None - self._trivia = trivia - - @property - def trivia(self): # type: () -> Trivia - return self._trivia - - @property - def discriminant(self): # type: () -> int - raise NotImplementedError() - - def as_string(self): # type: () -> str - raise NotImplementedError() - - # Helpers - - def comment(self, comment): # type: (str) -> Item - if not comment.strip().startswith("#"): - comment = "# " + comment - - self._trivia.comment_ws = " " - self._trivia.comment = comment - - return self - - def indent(self, indent): # type: (int) -> Item - if self._trivia.indent.startswith("\n"): - self._trivia.indent = "\n" + " " * indent - else: - self._trivia.indent = " " * indent - - return self - - def is_boolean(self): # type: () -> bool - return isinstance(self, Bool) - - def is_table(self): # type: () -> bool - return isinstance(self, Table) - - def is_inline_table(self): # type: () -> bool - return isinstance(self, InlineTable) - - def is_aot(self): # type: () -> bool - return isinstance(self, AoT) - - def _getstate(self, protocol=3): - return (self._trivia,) - - def __reduce__(self): - return self.__reduce_ex__(2) - - def __reduce_ex__(self, protocol): - return self.__class__, self._getstate(protocol) - - -class Whitespace(Item): - """ - A whitespace literal. - """ - - def __init__(self, s, fixed=False): # type: (str, bool) -> None - self._s = s - self._fixed = fixed - - @property - def s(self): # type: () -> str - return self._s - - @property - def value(self): # type: () -> str - return self._s - - @property - def trivia(self): # type: () -> Trivia - raise RuntimeError("Called trivia on a Whitespace variant.") - - @property - def discriminant(self): # type: () -> int - return 0 - - def is_fixed(self): # type: () -> bool - return self._fixed - - def as_string(self): # type: () -> str - return self._s - - def __repr__(self): # type: () -> str - return "<{} {}>".format(self.__class__.__name__, repr(self._s)) - - def _getstate(self, protocol=3): - return self._s, self._fixed - - -class Comment(Item): - """ - A comment literal. - """ - - @property - def discriminant(self): # type: () -> int - return 1 - - def as_string(self): # type: () -> str - return "{}{}{}".format( - self._trivia.indent, decode(self._trivia.comment), self._trivia.trail - ) - - def __str__(self): # type: () -> str - return "{}{}".format(self._trivia.indent, decode(self._trivia.comment)) - - -class Integer(long, Item): - """ - An integer literal. - """ - - def __new__(cls, value, trivia, raw): # type: (int, Trivia, str) -> Integer - return super(Integer, cls).__new__(cls, value) - - def __init__(self, _, trivia, raw): # type: (int, Trivia, str) -> None - super(Integer, self).__init__(trivia) - - self._raw = raw - self._sign = False - - if re.match(r"^[+\-]\d+$", raw): - self._sign = True - - @property - def discriminant(self): # type: () -> int - return 2 - - @property - def value(self): # type: () -> int - return self - - def as_string(self): # type: () -> str - return self._raw - - def __add__(self, other): - result = super(Integer, self).__add__(other) - - return self._new(result) - - def __radd__(self, other): - result = super(Integer, self).__radd__(other) - - if isinstance(other, Integer): - return self._new(result) - - return result - - def __sub__(self, other): - result = super(Integer, self).__sub__(other) - - return self._new(result) - - def __rsub__(self, other): - result = super(Integer, self).__rsub__(other) - - if isinstance(other, Integer): - return self._new(result) - - return result - - def _new(self, result): - raw = str(result) - - if self._sign: - sign = "+" if result >= 0 else "-" - raw = sign + raw - - return Integer(result, self._trivia, raw) - - def _getstate(self, protocol=3): - return int(self), self._trivia, self._raw - - -class Float(float, Item): - """ - A float literal. - """ - - def __new__(cls, value, trivia, raw): # type: (float, Trivia, str) -> Integer - return super(Float, cls).__new__(cls, value) - - def __init__(self, _, trivia, raw): # type: (float, Trivia, str) -> None - super(Float, self).__init__(trivia) - - self._raw = raw - self._sign = False - - if re.match(r"^[+\-].+$", raw): - self._sign = True - - @property - def discriminant(self): # type: () -> int - return 3 - - @property - def value(self): # type: () -> float - return self - - def as_string(self): # type: () -> str - return self._raw - - def __add__(self, other): - result = super(Float, self).__add__(other) - - return self._new(result) - - def __radd__(self, other): - result = super(Float, self).__radd__(other) - - if isinstance(other, Float): - return self._new(result) - - return result - - def __sub__(self, other): - result = super(Float, self).__sub__(other) - - return self._new(result) - - def __rsub__(self, other): - result = super(Float, self).__rsub__(other) - - if isinstance(other, Float): - return self._new(result) - - return result - - def _new(self, result): - raw = str(result) - - if self._sign: - sign = "+" if result >= 0 else "-" - raw = sign + raw - - return Float(result, self._trivia, raw) - - def _getstate(self, protocol=3): - return float(self), self._trivia, self._raw - - -class Bool(Item): - """ - A boolean literal. - """ - - def __init__(self, t, trivia): # type: (int, Trivia) -> None - super(Bool, self).__init__(trivia) - - self._value = bool(t) - - @property - def discriminant(self): # type: () -> int - return 4 - - @property - def value(self): # type: () -> bool - return self._value - - def as_string(self): # type: () -> str - return str(self._value).lower() - - def _getstate(self, protocol=3): - return self._value, self._trivia - - def __bool__(self): - return self._value - - __nonzero__ = __bool__ - - def __eq__(self, other): - if not isinstance(other, bool): - return NotImplemented - - return other == self._value - - def __hash__(self): - return hash(self._value) - - def __repr__(self): - return repr(self._value) - - -class DateTime(Item, datetime): - """ - A datetime literal. - """ - - def __new__( - cls, - year, - month, - day, - hour, - minute, - second, - microsecond, - tzinfo, - trivia, - raw, - **kwargs - ): # type: (int, int, int, int, int, int, int, Optional[datetime.tzinfo], Trivia, str, Any) -> datetime - return datetime.__new__( - cls, - year, - month, - day, - hour, - minute, - second, - microsecond, - tzinfo=tzinfo, - **kwargs - ) - - def __init__( - self, year, month, day, hour, minute, second, microsecond, tzinfo, trivia, raw - ): # type: (int, int, int, int, int, int, int, Optional[datetime.tzinfo], Trivia, str) -> None - super(DateTime, self).__init__(trivia) - - self._raw = raw - - @property - def discriminant(self): # type: () -> int - return 5 - - @property - def value(self): # type: () -> datetime - return self - - def as_string(self): # type: () -> str - return self._raw - - def __add__(self, other): - if PY38: - result = datetime( - self.year, - self.month, - self.day, - self.hour, - self.minute, - self.second, - self.microsecond, - self.tzinfo, - ).__add__(other) - else: - result = super(DateTime, self).__add__(other) - - return self._new(result) - - def __sub__(self, other): - if PY38: - result = datetime( - self.year, - self.month, - self.day, - self.hour, - self.minute, - self.second, - self.microsecond, - self.tzinfo, - ).__sub__(other) - else: - result = super(DateTime, self).__sub__(other) - - if isinstance(result, datetime): - result = self._new(result) - - return result - - def _new(self, result): - raw = result.isoformat() - - return DateTime( - result.year, - result.month, - result.day, - result.hour, - result.minute, - result.second, - result.microsecond, - result.tzinfo, - self._trivia, - raw, - ) - - def _getstate(self, protocol=3): - return ( - self.year, - self.month, - self.day, - self.hour, - self.minute, - self.second, - self.microsecond, - self.tzinfo, - self._trivia, - self._raw, - ) - - -class Date(Item, date): - """ - A date literal. - """ - - def __new__(cls, year, month, day, *_): # type: (int, int, int, Any) -> date - return date.__new__(cls, year, month, day) - - def __init__( - self, year, month, day, trivia, raw - ): # type: (int, int, int, Trivia, str) -> None - super(Date, self).__init__(trivia) - - self._raw = raw - - @property - def discriminant(self): # type: () -> int - return 6 - - @property - def value(self): # type: () -> date - return self - - def as_string(self): # type: () -> str - return self._raw - - def __add__(self, other): - if PY38: - result = date(self.year, self.month, self.day).__add__(other) - else: - result = super(Date, self).__add__(other) - - return self._new(result) - - def __sub__(self, other): - if PY38: - result = date(self.year, self.month, self.day).__sub__(other) - else: - result = super(Date, self).__sub__(other) - - if isinstance(result, date): - result = self._new(result) - - return result - - def _new(self, result): - raw = result.isoformat() - - return Date(result.year, result.month, result.day, self._trivia, raw) - - def _getstate(self, protocol=3): - return (self.year, self.month, self.day, self._trivia, self._raw) - - -class Time(Item, time): - """ - A time literal. - """ - - def __new__( - cls, hour, minute, second, microsecond, tzinfo, *_ - ): # type: (int, int, int, int, Optional[datetime.tzinfo], Any) -> time - return time.__new__(cls, hour, minute, second, microsecond, tzinfo) - - def __init__( - self, hour, minute, second, microsecond, tzinfo, trivia, raw - ): # type: (int, int, int, int, Optional[datetime.tzinfo], Trivia, str) -> None - super(Time, self).__init__(trivia) - - self._raw = raw - - @property - def discriminant(self): # type: () -> int - return 7 - - @property - def value(self): # type: () -> time - return self - - def as_string(self): # type: () -> str - return self._raw - - def _getstate(self, protocol=3): - return ( - self.hour, - self.minute, - self.second, - self.microsecond, - self.tzinfo, - self._trivia, - self._raw, - ) - - -class Array(Item, list): - """ - An array literal - """ - - def __init__( - self, value, trivia, multiline=False - ): # type: (list, Trivia, bool) -> None - super(Array, self).__init__(trivia) - - list.__init__( - self, [v.value for v in value if not isinstance(v, (Whitespace, Comment))] - ) - - self._value = value - self._multiline = multiline - - @property - def discriminant(self): # type: () -> int - return 8 - - @property - def value(self): # type: () -> list - return self - - def multiline(self, multiline): # type: (bool) -> self - self._multiline = multiline - - return self - - def as_string(self): # type: () -> str - if not self._multiline: - return "[{}]".format("".join(v.as_string() for v in self._value)) - - s = "[\n" + self.trivia.indent + " " * 4 - s += (",\n" + self.trivia.indent + " " * 4).join( - v.as_string() for v in self._value if not isinstance(v, Whitespace) - ) - s += ",\n" - s += "]" - - return s - - def append(self, _item): # type: (Any) -> None - if self._value: - self._value.append(Whitespace(", ")) - - it = item(_item) - super(Array, self).append(it.value) - - self._value.append(it) - - if not PY2: - - def clear(self): - super(Array, self).clear() - - self._value.clear() - - def __iadd__(self, other): # type: (list) -> Array - if not isinstance(other, list): - return NotImplemented - - for v in other: - self.append(v) - - return self - - def __delitem__(self, key): - super(Array, self).__delitem__(key) - - j = 0 if key >= 0 else -1 - for i, v in enumerate(self._value if key >= 0 else reversed(self._value)): - if key < 0: - i = -i - 1 - - if isinstance(v, (Comment, Whitespace)): - continue - - if j == key: - del self._value[i] - - if i < 0 and abs(i) > len(self._value): - i += 1 - - if i < len(self._value) - 1 and isinstance(self._value[i], Whitespace): - del self._value[i] - - break - - j += 1 if key >= 0 else -1 - - def __str__(self): - return str( - [v.value for v in self._value if not isinstance(v, (Whitespace, Comment))] - ) - - def __repr__(self): - return str(self) - - def _getstate(self, protocol=3): - return self._value, self._trivia - - -class Table(Item, dict): - """ - A table literal. - """ - - def __init__( - self, - value, - trivia, - is_aot_element, - is_super_table=False, - name=None, - display_name=None, - ): # type: (tomlkit.container.Container, Trivia, bool, bool, Optional[str], Optional[str]) -> None - super(Table, self).__init__(trivia) - - self.name = name - self.display_name = display_name - self._value = value - self._is_aot_element = is_aot_element - self._is_super_table = is_super_table - - for k, v in self._value.body: - if k is not None: - super(Table, self).__setitem__(k.key, v) - - @property - def value(self): # type: () -> tomlkit.container.Container - return self._value - - @property - def discriminant(self): # type: () -> int - return 9 - - def add(self, key, item=None): # type: (Union[Key, Item, str], Any) -> Item - if item is None: - if not isinstance(key, (Comment, Whitespace)): - raise ValueError( - "Non comment/whitespace items must have an associated key" - ) - - key, item = None, key - - return self.append(key, item) - - def append(self, key, _item): # type: (Union[Key, str], Any) -> Table - """ - Appends a (key, item) to the table. - """ - if not isinstance(_item, Item): - _item = item(_item) - - self._value.append(key, _item) - - if isinstance(key, Key): - key = key.key - - if key is not None: - super(Table, self).__setitem__(key, _item) - - m = re.match("(?s)^[^ ]*([ ]+).*$", self._trivia.indent) - if not m: - return self - - indent = m.group(1) - - if not isinstance(_item, Whitespace): - m = re.match("(?s)^([^ ]*)(.*)$", _item.trivia.indent) - if not m: - _item.trivia.indent = indent - else: - _item.trivia.indent = m.group(1) + indent + m.group(2) - - return self - - def raw_append(self, key, _item): # type: (Union[Key, str], Any) -> Table - if not isinstance(_item, Item): - _item = item(_item) - - self._value.append(key, _item) - - if isinstance(key, Key): - key = key.key - - if key is not None: - super(Table, self).__setitem__(key, _item) - - return self - - def remove(self, key): # type: (Union[Key, str]) -> Table - self._value.remove(key) - - if isinstance(key, Key): - key = key.key - - if key is not None: - super(Table, self).__delitem__(key) - - return self - - def is_aot_element(self): # type: () -> bool - return self._is_aot_element - - def is_super_table(self): # type: () -> bool - return self._is_super_table - - def as_string(self): # type: () -> str - return self._value.as_string() - - # Helpers - - def indent(self, indent): # type: (int) -> Table - super(Table, self).indent(indent) - - m = re.match("(?s)^[^ ]*([ ]+).*$", self._trivia.indent) - if not m: - indent = "" - else: - indent = m.group(1) - - for k, item in self._value.body: - if not isinstance(item, Whitespace): - item.trivia.indent = indent + item.trivia.indent - - return self - - def keys(self): # type: () -> Generator[str] - for k in self._value.keys(): - yield k - - def values(self): # type: () -> Generator[Item] - for v in self._value.values(): - yield v - - def items(self): # type: () -> Generator[Item] - for k, v in self._value.items(): - yield k, v - - def update(self, other): # type: (Dict) -> None - for k, v in other.items(): - self[k] = v - - def get(self, key, default=None): # type: (Any, Optional[Any]) -> Any - return self._value.get(key, default) - - def __contains__(self, key): # type: (Union[Key, str]) -> bool - return key in self._value - - def __getitem__(self, key): # type: (Union[Key, str]) -> Item - return self._value[key] - - def __setitem__(self, key, value): # type: (Union[Key, str], Any) -> None - if not isinstance(value, Item): - value = item(value) - - self._value[key] = value - - if key is not None: - super(Table, self).__setitem__(key, value) - - m = re.match("(?s)^[^ ]*([ ]+).*$", self._trivia.indent) - if not m: - return - - indent = m.group(1) - - if not isinstance(value, Whitespace): - m = re.match("(?s)^([^ ]*)(.*)$", value.trivia.indent) - if not m: - value.trivia.indent = indent - else: - value.trivia.indent = m.group(1) + indent + m.group(2) - - def __delitem__(self, key): # type: (Union[Key, str]) -> None - self.remove(key) - - def __repr__(self): - return super(Table, self).__repr__() - - def __str__(self): - return str(self.value) - - def _getstate(self, protocol=3): - return ( - self._value, - self._trivia, - self._is_aot_element, - self._is_super_table, - self.name, - self.display_name, - ) - - -class InlineTable(Item, dict): - """ - An inline table literal. - """ - - def __init__( - self, value, trivia, new=False - ): # type: (tomlkit.container.Container, Trivia, bool) -> None - super(InlineTable, self).__init__(trivia) - - self._value = value - self._new = new - - for k, v in self._value.body: - if k is not None: - super(InlineTable, self).__setitem__(k.key, v) - - @property - def discriminant(self): # type: () -> int - return 10 - - @property - def value(self): # type: () -> Dict - return self._value - - def append(self, key, _item): # type: (Union[Key, str], Any) -> InlineTable - """ - Appends a (key, item) to the table. - """ - if not isinstance(_item, Item): - _item = item(_item) - - if not isinstance(_item, (Whitespace, Comment)): - if not _item.trivia.indent and len(self._value) > 0 and not self._new: - _item.trivia.indent = " " - if _item.trivia.comment: - _item.trivia.comment = "" - - self._value.append(key, _item) - - if isinstance(key, Key): - key = key.key - - if key is not None: - super(InlineTable, self).__setitem__(key, _item) - - return self - - def remove(self, key): # type: (Union[Key, str]) -> InlineTable - self._value.remove(key) - - if isinstance(key, Key): - key = key.key - - if key is not None: - super(InlineTable, self).__delitem__(key) - - return self - - def as_string(self): # type: () -> str - buf = "{" - for i, (k, v) in enumerate(self._value.body): - if k is None: - if i == len(self._value.body) - 1: - if self._new: - buf = buf.rstrip(", ") - else: - buf = buf.rstrip(",") - - buf += v.as_string() - - continue - - buf += "{}{}{}{}{}{}".format( - v.trivia.indent, - k.as_string() + ("." if k.is_dotted() else ""), - k.sep, - v.as_string(), - v.trivia.comment, - v.trivia.trail.replace("\n", ""), - ) - - if i != len(self._value.body) - 1: - buf += "," - if self._new: - buf += " " - - buf += "}" - - return buf - - def keys(self): # type: () -> Generator[str] - for k in self._value.keys(): - yield k - - def values(self): # type: () -> Generator[Item] - for v in self._value.values(): - yield v - - def items(self): # type: () -> Generator[Item] - for k, v in self._value.items(): - yield k, v - - def update(self, other): # type: (Dict) -> None - for k, v in other.items(): - self[k] = v - - def get(self, key, default=None): # type: (Any, Optional[Any]) -> Any - return self._value.get(key, default) - - def __contains__(self, key): # type: (Union[Key, str]) -> bool - return key in self._value - - def __getitem__(self, key): # type: (Union[Key, str]) -> Item - return self._value[key] - - def __setitem__(self, key, value): # type: (Union[Key, str], Any) -> None - if not isinstance(value, Item): - value = item(value) - - self._value[key] = value - - if key is not None: - super(InlineTable, self).__setitem__(key, value) - if value.trivia.comment: - value.trivia.comment = "" - - m = re.match("(?s)^[^ ]*([ ]+).*$", self._trivia.indent) - if not m: - return - - indent = m.group(1) - - if not isinstance(value, Whitespace): - m = re.match("(?s)^([^ ]*)(.*)$", value.trivia.indent) - if not m: - value.trivia.indent = indent - else: - value.trivia.indent = m.group(1) + indent + m.group(2) - - def __delitem__(self, key): # type: (Union[Key, str]) -> None - self.remove(key) - - def __repr__(self): - return super(InlineTable, self).__repr__() - - def _getstate(self, protocol=3): - return (self._value, self._trivia) - - -class String(unicode, Item): - """ - A string literal. - """ - - def __new__(cls, t, value, original, trivia): - return super(String, cls).__new__(cls, value) - - def __init__( - self, t, _, original, trivia - ): # type: (StringType, str, original, Trivia) -> None - super(String, self).__init__(trivia) - - self._t = t - self._original = original - - @property - def discriminant(self): # type: () -> int - return 11 - - @property - def value(self): # type: () -> str - return self - - def as_string(self): # type: () -> str - return "{}{}{}".format(self._t.value, decode(self._original), self._t.value) - - def __add__(self, other): - result = super(String, self).__add__(other) - - return self._new(result) - - def __sub__(self, other): - result = super(String, self).__sub__(other) - - return self._new(result) - - def _new(self, result): - return String(self._t, result, result, self._trivia) - - def _getstate(self, protocol=3): - return self._t, unicode(self), self._original, self._trivia - - -class AoT(Item, list): - """ - An array of table literal - """ - - def __init__( - self, body, name=None, parsed=False - ): # type: (List[Table], Optional[str], bool) -> None - self.name = name - self._body = [] - self._parsed = parsed - - super(AoT, self).__init__(Trivia(trail="")) - - for table in body: - self.append(table) - - @property - def body(self): # type: () -> List[Table] - return self._body - - @property - def discriminant(self): # type: () -> int - return 12 - - @property - def value(self): # type: () -> List[Dict[Any, Any]] - return [v.value for v in self._body] - - def append(self, table): # type: (Table) -> Table - m = re.match("(?s)^[^ ]*([ ]+).*$", self._trivia.indent) - if m: - indent = m.group(1) - - m = re.match("(?s)^([^ ]*)(.*)$", table.trivia.indent) - if not m: - table.trivia.indent = indent - else: - table.trivia.indent = m.group(1) + indent + m.group(2) - - if not self._parsed and "\n" not in table.trivia.indent and self._body: - table.trivia.indent = "\n" + table.trivia.indent - - self._body.append(table) - - super(AoT, self).append(table) - - return table - - def as_string(self): # type: () -> str - b = "" - for table in self._body: - b += table.as_string() - - return b - - def __repr__(self): # type: () -> str - return "".format(self.value) - - def _getstate(self, protocol=3): - return self._body, self.name, self._parsed - - -class Null(Item): - """ - A null item. - """ - - def __init__(self): # type: () -> None - pass - - @property - def discriminant(self): # type: () -> int - return -1 - - @property - def value(self): # type: () -> None - return None - - def as_string(self): # type: () -> str - return "" - - def _getstate(self, protocol=3): - return tuple() diff --git a/conda_lock/_vendor/poetry/core/_vendor/tomlkit/parser.py b/conda_lock/_vendor/poetry/core/_vendor/tomlkit/parser.py deleted file mode 100644 index 49929954..00000000 --- a/conda_lock/_vendor/poetry/core/_vendor/tomlkit/parser.py +++ /dev/null @@ -1,1299 +0,0 @@ -# -*- coding: utf-8 -*- -from __future__ import unicode_literals - -import re -import string - -from typing import Any -from typing import Generator -from typing import List -from typing import Optional -from typing import Tuple -from typing import Union - -from ._compat import chr -from ._compat import decode -from ._utils import RFC_3339_LOOSE -from ._utils import _escaped -from ._utils import parse_rfc3339 -from .container import Container -from .exceptions import EmptyKeyError -from .exceptions import EmptyTableNameError -from .exceptions import InternalParserError -from .exceptions import InvalidCharInStringError -from .exceptions import InvalidControlChar -from .exceptions import InvalidDateError -from .exceptions import InvalidDateTimeError -from .exceptions import InvalidNumberError -from .exceptions import InvalidTimeError -from .exceptions import InvalidUnicodeValueError -from .exceptions import ParseError -from .exceptions import UnexpectedCharError -from .exceptions import UnexpectedEofError -from .items import AoT -from .items import Array -from .items import Bool -from .items import BoolType -from .items import Comment -from .items import Date -from .items import DateTime -from .items import Float -from .items import InlineTable -from .items import Integer -from .items import Item -from .items import Key -from .items import KeyType -from .items import Null -from .items import String -from .items import StringType -from .items import Table -from .items import Time -from .items import Trivia -from .items import Whitespace -from .source import Source -from .toml_char import TOMLChar -from .toml_document import TOMLDocument - - -CTRL_I = 0x09 # Tab -CTRL_J = 0x0A # Line feed -CTRL_M = 0x0D # Carriage return -CTRL_CHAR_LIMIT = 0x1F -CHR_DEL = 0x7F - - -class Parser: - """ - Parser for TOML documents. - """ - - def __init__(self, string): # type: (str) -> None - # Input to parse - self._src = Source(decode(string)) - - self._aot_stack = [] - - @property - def _state(self): - return self._src.state - - @property - def _idx(self): - return self._src.idx - - @property - def _current(self): - return self._src.current - - @property - def _marker(self): - return self._src.marker - - def extract(self): # type: () -> str - """ - Extracts the value between marker and index - """ - return self._src.extract() - - def inc(self, exception=None): # type: (Optional[ParseError.__class__]) -> bool - """ - Increments the parser if the end of the input has not been reached. - Returns whether or not it was able to advance. - """ - return self._src.inc(exception=exception) - - def inc_n(self, n, exception=None): # type: (int, Optional[ParseError]) -> bool - """ - Increments the parser by n characters - if the end of the input has not been reached. - """ - return self._src.inc_n(n=n, exception=exception) - - def consume(self, chars, min=0, max=-1): - """ - Consume chars until min/max is satisfied is valid. - """ - return self._src.consume(chars=chars, min=min, max=max) - - def end(self): # type: () -> bool - """ - Returns True if the parser has reached the end of the input. - """ - return self._src.end() - - def mark(self): # type: () -> None - """ - Sets the marker to the index's current position - """ - self._src.mark() - - def parse_error(self, exception=ParseError, *args): - """ - Creates a generic "parse error" at the current position. - """ - return self._src.parse_error(exception, *args) - - def parse(self): # type: () -> TOMLDocument - body = TOMLDocument(True) - - # Take all keyvals outside of tables/AoT's. - while not self.end(): - # Break out if a table is found - if self._current == "[": - break - - # Otherwise, take and append one KV - item = self._parse_item() - if not item: - break - - key, value = item - if key is not None and key.is_dotted(): - # We actually have a table - self._handle_dotted_key(body, key, value) - elif not self._merge_ws(value, body): - body.append(key, value) - - self.mark() - - while not self.end(): - key, value = self._parse_table() - if isinstance(value, Table) and value.is_aot_element(): - # This is just the first table in an AoT. Parse the rest of the array - # along with it. - value = self._parse_aot(value, key.key) - - body.append(key, value) - - body.parsing(False) - - return body - - def _merge_ws(self, item, container): # type: (Item, Container) -> bool - """ - Merges the given Item with the last one currently in the given Container if - both are whitespace items. - - Returns True if the items were merged. - """ - last = container.last_item() - if not last: - return False - - if not isinstance(item, Whitespace) or not isinstance(last, Whitespace): - return False - - start = self._idx - (len(last.s) + len(item.s)) - container.body[-1] = ( - container.body[-1][0], - Whitespace(self._src[start : self._idx]), - ) - - return True - - def _is_child(self, parent, child): # type: (str, str) -> bool - """ - Returns whether a key is strictly a child of another key. - AoT siblings are not considered children of one another. - """ - parent_parts = tuple(self._split_table_name(parent)) - child_parts = tuple(self._split_table_name(child)) - - if parent_parts == child_parts: - return False - - return parent_parts == child_parts[: len(parent_parts)] - - def _split_table_name(self, name): # type: (str) -> Generator[Key] - in_name = False - current = "" - t = KeyType.Bare - parts = 0 - for c in name: - c = TOMLChar(c) - - if c == ".": - if in_name: - current += c - continue - - if not current: - raise self.parse_error() - - yield Key(current.strip(), t=t, sep="", original=current) - - parts += 1 - - current = "" - t = KeyType.Bare - continue - elif c in {"'", '"'}: - if in_name: - if ( - t == KeyType.Literal - and c == '"' - or t == KeyType.Basic - and c == "'" - ): - current += c - continue - - if c != t.value: - raise self.parse_error() - - in_name = False - else: - if ( - current.strip() - and TOMLChar(current[-1]).is_spaces() - and not parts - ): - raise self.parse_error() - - in_name = True - t = KeyType.Literal if c == "'" else KeyType.Basic - - continue - elif in_name or c.is_bare_key_char(): - current += c - elif c.is_spaces(): - # A space is only valid at this point - # if it's in between parts. - # We store it for now and will check - # later if it's valid - current += c - continue - else: - raise self.parse_error() - - if current.strip(): - yield Key(current.strip(), t=t, sep="", original=current) - - def _parse_item(self): # type: () -> Optional[Tuple[Optional[Key], Item]] - """ - Attempts to parse the next item and returns it, along with its key - if the item is value-like. - """ - self.mark() - with self._state as state: - while True: - c = self._current - if c == "\n": - # Found a newline; Return all whitespace found up to this point. - self.inc() - - return None, Whitespace(self.extract()) - elif c in " \t\r": - # Skip whitespace. - if not self.inc(): - return None, Whitespace(self.extract()) - elif c == "#": - # Found a comment, parse it - indent = self.extract() - cws, comment, trail = self._parse_comment_trail() - - return None, Comment(Trivia(indent, cws, comment, trail)) - elif c == "[": - # Found a table, delegate to the calling function. - return - else: - # Begining of a KV pair. - # Return to beginning of whitespace so it gets included - # as indentation for the KV about to be parsed. - state.restore = True - break - - return self._parse_key_value(True) - - def _parse_comment_trail(self): # type: () -> Tuple[str, str, str] - """ - Returns (comment_ws, comment, trail) - If there is no comment, comment_ws and comment will - simply be empty. - """ - if self.end(): - return "", "", "" - - comment = "" - comment_ws = "" - self.mark() - - while True: - c = self._current - - if c == "\n": - break - elif c == "#": - comment_ws = self.extract() - - self.mark() - self.inc() # Skip # - - # The comment itself - while not self.end() and not self._current.is_nl(): - code = ord(self._current) - if code == CHR_DEL or code <= CTRL_CHAR_LIMIT and code != CTRL_I: - raise self.parse_error(InvalidControlChar, code, "comments") - - if not self.inc(): - break - - comment = self.extract() - self.mark() - - break - elif c in " \t\r": - self.inc() - else: - raise self.parse_error(UnexpectedCharError, c) - - if self.end(): - break - - while self._current.is_spaces() and self.inc(): - pass - - if self._current == "\r": - self.inc() - - if self._current == "\n": - self.inc() - - trail = "" - if self._idx != self._marker or self._current.is_ws(): - trail = self.extract() - - return comment_ws, comment, trail - - def _parse_key_value(self, parse_comment=False): # type: (bool) -> (Key, Item) - # Leading indent - self.mark() - - while self._current.is_spaces() and self.inc(): - pass - - indent = self.extract() - - # Key - key = self._parse_key() - - self.mark() - - found_equals = self._current == "=" - while self._current.is_kv_sep() and self.inc(): - if self._current == "=": - if found_equals: - raise self.parse_error(UnexpectedCharError, "=") - else: - found_equals = True - pass - - if not key.sep: - key.sep = self.extract() - else: - key.sep += self.extract() - - # Value - val = self._parse_value() - # Comment - if parse_comment: - cws, comment, trail = self._parse_comment_trail() - meta = val.trivia - if not meta.comment_ws: - meta.comment_ws = cws - - meta.comment = comment - meta.trail = trail - else: - val.trivia.trail = "" - - val.trivia.indent = indent - - return key, val - - def _parse_key(self): # type: () -> Key - """ - Parses a Key at the current position; - WS before the key must be exhausted first at the callsite. - """ - if self._current in "\"'": - return self._parse_quoted_key() - else: - return self._parse_bare_key() - - def _parse_quoted_key(self): # type: () -> Key - """ - Parses a key enclosed in either single or double quotes. - """ - quote_style = self._current - key_type = None - dotted = False - for t in KeyType: - if t.value == quote_style: - key_type = t - break - - if key_type is None: - raise RuntimeError("Should not have entered _parse_quoted_key()") - - self.inc() - self.mark() - - while self._current != quote_style and self.inc(): - pass - - key = self.extract() - - if self._current == ".": - self.inc() - dotted = True - key += "." + self._parse_key().as_string() - key_type = KeyType.Bare - else: - self.inc() - - return Key(key, key_type, "", dotted) - - def _parse_bare_key(self): # type: () -> Key - """ - Parses a bare key. - """ - key_type = None - dotted = False - - self.mark() - while ( - self._current.is_bare_key_char() or self._current.is_spaces() - ) and self.inc(): - pass - - original = self.extract() - key = original.strip() - if not key: - # Empty key - raise self.parse_error(ParseError, "Empty key found") - - if " " in key: - # Bare key with spaces in it - raise self.parse_error(ParseError, 'Invalid key "{}"'.format(key)) - - if self._current == ".": - self.inc() - dotted = True - original += "." + self._parse_key().as_string() - key = original.strip() - key_type = KeyType.Bare - - return Key(key, key_type, "", dotted, original=original) - - def _handle_dotted_key( - self, container, key, value - ): # type: (Union[Container, Table], Key, Any) -> None - names = tuple(self._split_table_name(key.as_string())) - name = names[0] - name._dotted = True - if name in container: - if not isinstance(value, Table): - table = Table(Container(True), Trivia(), False, is_super_table=True) - _table = table - for i, _name in enumerate(names[1:]): - if i == len(names) - 2: - _name.sep = key.sep - - _table.append(_name, value) - else: - _name._dotted = True - _table.append( - _name, - Table( - Container(True), - Trivia(), - False, - is_super_table=i < len(names) - 2, - ), - ) - - _table = _table[_name] - - value = table - - container.append(name, value) - - return - else: - table = Table(Container(True), Trivia(), False, is_super_table=True) - if isinstance(container, Table): - container.raw_append(name, table) - else: - container.append(name, table) - - for i, _name in enumerate(names[1:]): - if i == len(names) - 2: - _name.sep = key.sep - - table.append(_name, value) - else: - _name._dotted = True - if _name in table.value: - table = table.value[_name] - else: - table.append( - _name, - Table( - Container(True), - Trivia(), - False, - is_super_table=i < len(names) - 2, - ), - ) - - table = table[_name] - - def _parse_value(self): # type: () -> Item - """ - Attempts to parse a value at the current position. - """ - self.mark() - c = self._current - trivia = Trivia() - - if c == StringType.SLB.value: - return self._parse_basic_string() - elif c == StringType.SLL.value: - return self._parse_literal_string() - elif c == BoolType.TRUE.value[0]: - return self._parse_true() - elif c == BoolType.FALSE.value[0]: - return self._parse_false() - elif c == "[": - return self._parse_array() - elif c == "{": - return self._parse_inline_table() - elif c in "+-" or self._peek(4) in { - "+inf", - "-inf", - "inf", - "+nan", - "-nan", - "nan", - }: - # Number - while self._current not in " \t\n\r#,]}" and self.inc(): - pass - - raw = self.extract() - - item = self._parse_number(raw, trivia) - if item is not None: - return item - - raise self.parse_error(InvalidNumberError) - elif c in string.digits: - # Integer, Float, Date, Time or DateTime - while self._current not in " \t\n\r#,]}" and self.inc(): - pass - - raw = self.extract() - - m = RFC_3339_LOOSE.match(raw) - if m: - if m.group(1) and m.group(5): - # datetime - try: - dt = parse_rfc3339(raw) - return DateTime( - dt.year, - dt.month, - dt.day, - dt.hour, - dt.minute, - dt.second, - dt.microsecond, - dt.tzinfo, - trivia, - raw, - ) - except ValueError: - raise self.parse_error(InvalidDateTimeError) - - if m.group(1): - try: - dt = parse_rfc3339(raw) - date = Date(dt.year, dt.month, dt.day, trivia, raw) - self.mark() - while self._current not in "\t\n\r#,]}" and self.inc(): - pass - - time_raw = self.extract() - if not time_raw.strip(): - trivia.comment_ws = time_raw - return date - - dt = parse_rfc3339(raw + time_raw) - return DateTime( - dt.year, - dt.month, - dt.day, - dt.hour, - dt.minute, - dt.second, - dt.microsecond, - dt.tzinfo, - trivia, - raw + time_raw, - ) - except ValueError: - raise self.parse_error(InvalidDateError) - - if m.group(5): - try: - t = parse_rfc3339(raw) - return Time( - t.hour, - t.minute, - t.second, - t.microsecond, - t.tzinfo, - trivia, - raw, - ) - except ValueError: - raise self.parse_error(InvalidTimeError) - - item = self._parse_number(raw, trivia) - if item is not None: - return item - - raise self.parse_error(InvalidNumberError) - else: - raise self.parse_error(UnexpectedCharError, c) - - def _parse_true(self): - return self._parse_bool(BoolType.TRUE) - - def _parse_false(self): - return self._parse_bool(BoolType.FALSE) - - def _parse_bool(self, style): # type: (BoolType) -> Bool - with self._state: - style = BoolType(style) - - # only keep parsing for bool if the characters match the style - # try consuming rest of chars in style - for c in style: - self.consume(c, min=1, max=1) - - return Bool(style, Trivia()) - - def _parse_array(self): # type: () -> Array - # Consume opening bracket, EOF here is an issue (middle of array) - self.inc(exception=UnexpectedEofError) - - elems = [] # type: List[Item] - prev_value = None - while True: - # consume whitespace - mark = self._idx - self.consume(TOMLChar.SPACES) - newline = self.consume(TOMLChar.NL) - indent = self._src[mark : self._idx] - if newline: - elems.append(Whitespace(indent)) - continue - - # consume comment - if self._current == "#": - cws, comment, trail = self._parse_comment_trail() - elems.append(Comment(Trivia(indent, cws, comment, trail))) - continue - - # consume indent - if indent: - elems.append(Whitespace(indent)) - continue - - # consume value - if not prev_value: - try: - elems.append(self._parse_value()) - prev_value = True - continue - except UnexpectedCharError: - pass - - # consume comma - if prev_value and self._current == ",": - self.inc(exception=UnexpectedEofError) - elems.append(Whitespace(",")) - prev_value = False - continue - - # consume closing bracket - if self._current == "]": - # consume closing bracket, EOF here doesn't matter - self.inc() - break - - raise self.parse_error(UnexpectedCharError, self._current) - - try: - res = Array(elems, Trivia()) - except ValueError: - pass - else: - return res - - def _parse_inline_table(self): # type: () -> InlineTable - # consume opening bracket, EOF here is an issue (middle of array) - self.inc(exception=UnexpectedEofError) - - elems = Container(True) - trailing_comma = None - while True: - # consume leading whitespace - mark = self._idx - self.consume(TOMLChar.SPACES) - raw = self._src[mark : self._idx] - if raw: - elems.add(Whitespace(raw)) - - if not trailing_comma: - # None: empty inline table - # False: previous key-value pair was not followed by a comma - if self._current == "}": - # consume closing bracket, EOF here doesn't matter - self.inc() - break - - if ( - trailing_comma is False - or trailing_comma is None - and self._current == "," - ): - # Either the previous key-value pair was not followed by a comma - # or the table has an unexpected leading comma. - raise self.parse_error(UnexpectedCharError, self._current) - else: - # True: previous key-value pair was followed by a comma - if self._current == "}" or self._current == ",": - raise self.parse_error(UnexpectedCharError, self._current) - - key, val = self._parse_key_value(False) - if key.is_dotted(): - self._handle_dotted_key(elems, key, val) - else: - elems.add(key, val) - - # consume trailing whitespace - mark = self._idx - self.consume(TOMLChar.SPACES) - raw = self._src[mark : self._idx] - if raw: - elems.add(Whitespace(raw)) - - # consume trailing comma - trailing_comma = self._current == "," - if trailing_comma: - # consume closing bracket, EOF here is an issue (middle of inline table) - self.inc(exception=UnexpectedEofError) - - return InlineTable(elems, Trivia()) - - def _parse_number(self, raw, trivia): # type: (str, Trivia) -> Optional[Item] - # Leading zeros are not allowed - sign = "" - if raw.startswith(("+", "-")): - sign = raw[0] - raw = raw[1:] - - if ( - len(raw) > 1 - and raw.startswith("0") - and not raw.startswith(("0.", "0o", "0x", "0b", "0e")) - ): - return - - if raw.startswith(("0o", "0x", "0b")) and sign: - return - - digits = "[0-9]" - base = 10 - if raw.startswith("0b"): - digits = "[01]" - base = 2 - elif raw.startswith("0o"): - digits = "[0-7]" - base = 8 - elif raw.startswith("0x"): - digits = "[0-9a-f]" - base = 16 - - # Underscores should be surrounded by digits - clean = re.sub("(?i)(?<={})_(?={})".format(digits, digits), "", raw) - - if "_" in clean: - return - - if clean.endswith("."): - return - - try: - return Integer(int(sign + clean, base), trivia, sign + raw) - except ValueError: - try: - return Float(float(sign + clean), trivia, sign + raw) - except ValueError: - return - - def _parse_literal_string(self): # type: () -> String - with self._state: - return self._parse_string(StringType.SLL) - - def _parse_basic_string(self): # type: () -> String - with self._state: - return self._parse_string(StringType.SLB) - - def _parse_escaped_char(self, multiline): - if multiline and self._current.is_ws(): - # When the last non-whitespace character on a line is - # a \, it will be trimmed along with all whitespace - # (including newlines) up to the next non-whitespace - # character or closing delimiter. - # """\ - # hello \ - # world""" - tmp = "" - while self._current.is_ws(): - tmp += self._current - # consume the whitespace, EOF here is an issue - # (middle of string) - self.inc(exception=UnexpectedEofError) - continue - - # the escape followed by whitespace must have a newline - # before any other chars - if "\n" not in tmp: - raise self.parse_error(InvalidCharInStringError, self._current) - - return "" - - if self._current in _escaped: - c = _escaped[self._current] - - # consume this char, EOF here is an issue (middle of string) - self.inc(exception=UnexpectedEofError) - - return c - - if self._current in {"u", "U"}: - # this needs to be a unicode - u, ue = self._peek_unicode(self._current == "U") - if u is not None: - # consume the U char and the unicode value - self.inc_n(len(ue) + 1) - - return u - - raise self.parse_error(InvalidUnicodeValueError) - - raise self.parse_error(InvalidCharInStringError, self._current) - - def _parse_string(self, delim): # type: (StringType) -> String - # only keep parsing for string if the current character matches the delim - if self._current != delim.unit: - raise self.parse_error( - InternalParserError, - "Invalid character for string type {}".format(delim), - ) - - # consume the opening/first delim, EOF here is an issue - # (middle of string or middle of delim) - self.inc(exception=UnexpectedEofError) - - if self._current == delim.unit: - # consume the closing/second delim, we do not care if EOF occurs as - # that would simply imply an empty single line string - if not self.inc() or self._current != delim.unit: - # Empty string - return String(delim, "", "", Trivia()) - - # consume the third delim, EOF here is an issue (middle of string) - self.inc(exception=UnexpectedEofError) - - delim = delim.toggle() # convert delim to multi delim - - self.mark() # to extract the original string with whitespace and all - value = "" - - # A newline immediately following the opening delimiter will be trimmed. - if delim.is_multiline() and self._current == "\n": - # consume the newline, EOF here is an issue (middle of string) - self.inc(exception=UnexpectedEofError) - - escaped = False # whether the previous key was ESCAPE - while True: - code = ord(self._current) - if ( - delim.is_singleline() - and not escaped - and (code == CHR_DEL or code <= CTRL_CHAR_LIMIT and code != CTRL_I) - ): - raise self.parse_error(InvalidControlChar, code, "strings") - elif ( - delim.is_multiline() - and not escaped - and ( - code == CHR_DEL - or code <= CTRL_CHAR_LIMIT - and code not in [CTRL_I, CTRL_J, CTRL_M] - ) - ): - raise self.parse_error(InvalidControlChar, code, "strings") - elif not escaped and self._current == delim.unit: - # try to process current as a closing delim - original = self.extract() - - close = "" - if delim.is_multiline(): - # Consume the delimiters to see if we are at the end of the string - close = "" - while self._current == delim.unit: - close += self._current - self.inc() - - if len(close) < 3: - # Not a triple quote, leave in result as-is. - # Adding back the characters we already consumed - value += close - continue - - if len(close) == 3: - # We are at the end of the string - return String(delim, value, original, Trivia()) - - if len(close) >= 6: - raise self.parse_error(InvalidCharInStringError, self._current) - - value += close[:-3] - original += close[:-3] - - return String(delim, value, original, Trivia()) - else: - # consume the closing delim, we do not care if EOF occurs as - # that would simply imply the end of self._src - self.inc() - - return String(delim, value, original, Trivia()) - elif delim.is_basic() and escaped: - # attempt to parse the current char as an escaped value, an exception - # is raised if this fails - value += self._parse_escaped_char(delim.is_multiline()) - - # no longer escaped - escaped = False - elif delim.is_basic() and self._current == "\\": - # the next char is being escaped - escaped = True - - # consume this char, EOF here is an issue (middle of string) - self.inc(exception=UnexpectedEofError) - else: - # this is either a literal string where we keep everything as is, - # or this is not a special escaped char in a basic string - value += self._current - - # consume this char, EOF here is an issue (middle of string) - self.inc(exception=UnexpectedEofError) - - def _parse_table( - self, parent_name=None, parent=None - ): # type: (Optional[str], Optional[Table]) -> Tuple[Key, Union[Table, AoT]] - """ - Parses a table element. - """ - if self._current != "[": - raise self.parse_error( - InternalParserError, "_parse_table() called on non-bracket character." - ) - - indent = self.extract() - self.inc() # Skip opening bracket - - if self.end(): - raise self.parse_error(UnexpectedEofError) - - is_aot = False - if self._current == "[": - if not self.inc(): - raise self.parse_error(UnexpectedEofError) - - is_aot = True - - # Consume any whitespace - self.mark() - while self._current.is_spaces() and self.inc(): - pass - - ws_prefix = self.extract() - - # Key - if self._current in [StringType.SLL.value, StringType.SLB.value]: - delimiter = ( - StringType.SLL - if self._current == StringType.SLL.value - else StringType.SLB - ) - name = self._parse_string(delimiter) - name = "{delimiter}{name}{delimiter}".format( - delimiter=delimiter.value, name=name - ) - - self.mark() - while self._current != "]" and self.inc(): - if self.end(): - raise self.parse_error(UnexpectedEofError) - - pass - - ws_suffix = self.extract() - name += ws_suffix - else: - self.mark() - while self._current != "]" and self.inc(): - if self.end(): - raise self.parse_error(UnexpectedEofError) - - pass - - name = self.extract() - - name = ws_prefix + name - - if not name.strip(): - raise self.parse_error(EmptyTableNameError) - - key = Key(name, sep="") - name_parts = tuple(self._split_table_name(name)) - if any(" " in part.key.strip() and part.is_bare() for part in name_parts): - raise self.parse_error(ParseError, 'Invalid table name "{}"'.format(name)) - - missing_table = False - if parent_name: - parent_name_parts = tuple(self._split_table_name(parent_name)) - else: - parent_name_parts = tuple() - - if len(name_parts) > len(parent_name_parts) + 1: - missing_table = True - - name_parts = name_parts[len(parent_name_parts) :] - - values = Container(True) - - self.inc() # Skip closing bracket - if is_aot: - # TODO: Verify close bracket - self.inc() - - cws, comment, trail = self._parse_comment_trail() - - result = Null() - table = Table( - values, - Trivia(indent, cws, comment, trail), - is_aot, - name=name, - display_name=name, - ) - - if len(name_parts) > 1: - if missing_table: - # Missing super table - # i.e. a table initialized like this: [foo.bar] - # without initializing [foo] - # - # So we have to create the parent tables - table = Table( - Container(True), - Trivia(indent, cws, comment, trail), - is_aot and name_parts[0].key in self._aot_stack, - is_super_table=True, - name=name_parts[0].key, - ) - - result = table - key = name_parts[0] - - for i, _name in enumerate(name_parts[1:]): - if _name in table: - child = table[_name] - else: - child = Table( - Container(True), - Trivia(indent, cws, comment, trail), - is_aot and i == len(name_parts[1:]) - 1, - is_super_table=i < len(name_parts[1:]) - 1, - name=_name.key, - display_name=name if i == len(name_parts[1:]) - 1 else None, - ) - - if is_aot and i == len(name_parts[1:]) - 1: - table.append(_name, AoT([child], name=table.name, parsed=True)) - else: - table.append(_name, child) - - table = child - values = table.value - else: - if name_parts: - key = name_parts[0] - - while not self.end(): - item = self._parse_item() - if item: - _key, item = item - if not self._merge_ws(item, values): - if _key is not None and _key.is_dotted(): - self._handle_dotted_key(table, _key, item) - else: - table.raw_append(_key, item) - else: - if self._current == "[": - is_aot_next, name_next = self._peek_table() - - if self._is_child(name, name_next): - key_next, table_next = self._parse_table(name, table) - - table.raw_append(key_next, table_next) - - # Picking up any sibling - while not self.end(): - _, name_next = self._peek_table() - - if not self._is_child(name, name_next): - break - - key_next, table_next = self._parse_table(name, table) - - table.raw_append(key_next, table_next) - - break - else: - raise self.parse_error( - InternalParserError, - "_parse_item() returned None on a non-bracket character.", - ) - - if isinstance(result, Null): - result = table - - if is_aot and (not self._aot_stack or name != self._aot_stack[-1]): - result = self._parse_aot(result, name) - - return key, result - - def _peek_table(self): # type: () -> Tuple[bool, str] - """ - Peeks ahead non-intrusively by cloning then restoring the - initial state of the parser. - - Returns the name of the table about to be parsed, - as well as whether it is part of an AoT. - """ - # we always want to restore after exiting this scope - with self._state(save_marker=True, restore=True): - if self._current != "[": - raise self.parse_error( - InternalParserError, - "_peek_table() entered on non-bracket character", - ) - - # AoT - self.inc() - is_aot = False - if self._current == "[": - self.inc() - is_aot = True - - self.mark() - - while self._current != "]" and self.inc(): - table_name = self.extract() - - return is_aot, table_name - - def _parse_aot(self, first, name_first): # type: (Table, str) -> AoT - """ - Parses all siblings of the provided table first and bundles them into - an AoT. - """ - payload = [first] - self._aot_stack.append(name_first) - while not self.end(): - is_aot_next, name_next = self._peek_table() - if is_aot_next and name_next == name_first: - _, table = self._parse_table(name_first) - payload.append(table) - else: - break - - self._aot_stack.pop() - - return AoT(payload, parsed=True) - - def _peek(self, n): # type: (int) -> str - """ - Peeks ahead n characters. - - n is the max number of characters that will be peeked. - """ - # we always want to restore after exiting this scope - with self._state(restore=True): - buf = "" - for _ in range(n): - if self._current not in " \t\n\r#,]}": - buf += self._current - self.inc() - continue - - break - return buf - - def _peek_unicode( - self, is_long - ): # type: (bool) -> Tuple[Optional[str], Optional[str]] - """ - Peeks ahead non-intrusively by cloning then restoring the - initial state of the parser. - - Returns the unicode value is it's a valid one else None. - """ - # we always want to restore after exiting this scope - with self._state(save_marker=True, restore=True): - if self._current not in {"u", "U"}: - raise self.parse_error( - InternalParserError, "_peek_unicode() entered on non-unicode value" - ) - - self.inc() # Dropping prefix - self.mark() - - if is_long: - chars = 8 - else: - chars = 4 - - if not self.inc_n(chars): - value, extracted = None, None - else: - extracted = self.extract() - - if extracted[0].lower() == "d" and extracted[1].strip("01234567"): - return None, None - - try: - value = chr(int(extracted, 16)) - except (ValueError, OverflowError): - value = None - - return value, extracted diff --git a/conda_lock/_vendor/poetry/core/_vendor/tomlkit/source.py b/conda_lock/_vendor/poetry/core/_vendor/tomlkit/source.py deleted file mode 100644 index 6a6a2391..00000000 --- a/conda_lock/_vendor/poetry/core/_vendor/tomlkit/source.py +++ /dev/null @@ -1,193 +0,0 @@ -# -*- coding: utf-8 -*- -from __future__ import unicode_literals - -import itertools - -from copy import copy -from typing import Any -from typing import Optional -from typing import Tuple -from typing import Type - -from ._compat import PY2 -from ._compat import unicode -from .exceptions import ParseError -from .exceptions import UnexpectedCharError -from .exceptions import UnexpectedEofError -from .toml_char import TOMLChar - - -class _State: - def __init__( - self, source, save_marker=False, restore=False - ): # type: (_Source, Optional[bool], Optional[bool]) -> None - self._source = source - self._save_marker = save_marker - self.restore = restore - - def __enter__(self): # type: () -> None - # Entering this context manager - save the state - if PY2: - # Python 2.7 does not allow to directly copy - # an iterator, so we have to make tees of the original - # chars iterator. - self._source._chars, self._chars = itertools.tee(self._source._chars) - else: - self._chars = copy(self._source._chars) - self._idx = self._source._idx - self._current = self._source._current - self._marker = self._source._marker - - return self - - def __exit__(self, exception_type, exception_val, trace): - # Exiting this context manager - restore the prior state - if self.restore or exception_type: - self._source._chars = self._chars - self._source._idx = self._idx - self._source._current = self._current - if self._save_marker: - self._source._marker = self._marker - - -class _StateHandler: - """ - State preserver for the Parser. - """ - - def __init__(self, source): # type: (Source) -> None - self._source = source - self._states = [] - - def __call__(self, *args, **kwargs): - return _State(self._source, *args, **kwargs) - - def __enter__(self): # type: () -> None - state = self() - self._states.append(state) - return state.__enter__() - - def __exit__(self, exception_type, exception_val, trace): - state = self._states.pop() - return state.__exit__(exception_type, exception_val, trace) - - -class Source(unicode): - EOF = TOMLChar("\0") - - def __init__(self, _): # type: (unicode) -> None - super(Source, self).__init__() - - # Collection of TOMLChars - self._chars = iter([(i, TOMLChar(c)) for i, c in enumerate(self)]) - - self._idx = 0 - self._marker = 0 - self._current = TOMLChar("") - - self._state = _StateHandler(self) - - self.inc() - - def reset(self): - # initialize both idx and current - self.inc() - - # reset marker - self.mark() - - @property - def state(self): # type: () -> _StateHandler - return self._state - - @property - def idx(self): # type: () -> int - return self._idx - - @property - def current(self): # type: () -> TOMLChar - return self._current - - @property - def marker(self): # type: () -> int - return self._marker - - def extract(self): # type: () -> unicode - """ - Extracts the value between marker and index - """ - return self[self._marker : self._idx] - - def inc(self, exception=None): # type: (Optional[Type[ParseError]]) -> bool - """ - Increments the parser if the end of the input has not been reached. - Returns whether or not it was able to advance. - """ - try: - self._idx, self._current = next(self._chars) - - return True - except StopIteration: - self._idx = len(self) - self._current = self.EOF - if exception: - raise self.parse_error(exception) - - return False - - def inc_n(self, n, exception=None): # type: (int, Exception) -> bool - """ - Increments the parser by n characters - if the end of the input has not been reached. - """ - for _ in range(n): - if not self.inc(exception=exception): - return False - - return True - - def consume(self, chars, min=0, max=-1): - """ - Consume chars until min/max is satisfied is valid. - """ - while self.current in chars and max != 0: - min -= 1 - max -= 1 - if not self.inc(): - break - - # failed to consume minimum number of characters - if min > 0: - self.parse_error(UnexpectedCharError) - - def end(self): # type: () -> bool - """ - Returns True if the parser has reached the end of the input. - """ - return self._current is self.EOF - - def mark(self): # type: () -> None - """ - Sets the marker to the index's current position - """ - self._marker = self._idx - - def parse_error( - self, exception=ParseError, *args - ): # type: (Type[ParseError], Any) -> ParseError - """ - Creates a generic "parse error" at the current position. - """ - line, col = self._to_linecol() - - return exception(line, col, *args) - - def _to_linecol(self): # type: () -> Tuple[int, int] - cur = 0 - for i, line in enumerate(self.splitlines()): - if cur + len(line) + 1 > self.idx: - return (i + 1, self.idx - cur) - - cur += len(line) + 1 - - return len(self.splitlines()), 0 diff --git a/conda_lock/_vendor/poetry/core/_vendor/tomlkit/toml_char.py b/conda_lock/_vendor/poetry/core/_vendor/tomlkit/toml_char.py deleted file mode 100644 index 079b16cc..00000000 --- a/conda_lock/_vendor/poetry/core/_vendor/tomlkit/toml_char.py +++ /dev/null @@ -1,67 +0,0 @@ -import string - -from ._compat import PY2 -from ._compat import unicode - - -if PY2: - from functools32 import lru_cache -else: - from functools import lru_cache - - -class TOMLChar(unicode): - def __init__(self, c): - super(TOMLChar, self).__init__() - - if len(self) > 1: - raise ValueError("A TOML character must be of length 1") - - BARE = string.ascii_letters + string.digits + "-_" - KV = "= \t" - NUMBER = string.digits + "+-_.e" - SPACES = " \t" - NL = "\n\r" - WS = SPACES + NL - - @lru_cache(maxsize=None) - def is_bare_key_char(self): # type: () -> bool - """ - Whether the character is a valid bare key name or not. - """ - return self in self.BARE - - @lru_cache(maxsize=None) - def is_kv_sep(self): # type: () -> bool - """ - Whether the character is a valid key/value separator ot not. - """ - return self in self.KV - - @lru_cache(maxsize=None) - def is_int_float_char(self): # type: () -> bool - """ - Whether the character if a valid integer or float value character or not. - """ - return self in self.NUMBER - - @lru_cache(maxsize=None) - def is_ws(self): # type: () -> bool - """ - Whether the character is a whitespace character or not. - """ - return self in self.WS - - @lru_cache(maxsize=None) - def is_nl(self): # type: () -> bool - """ - Whether the character is a new line character or not. - """ - return self in self.NL - - @lru_cache(maxsize=None) - def is_spaces(self): # type: () -> bool - """ - Whether the character is a space or not - """ - return self in self.SPACES diff --git a/conda_lock/_vendor/poetry/core/_vendor/tomlkit/toml_document.py b/conda_lock/_vendor/poetry/core/_vendor/tomlkit/toml_document.py deleted file mode 100644 index b485e302..00000000 --- a/conda_lock/_vendor/poetry/core/_vendor/tomlkit/toml_document.py +++ /dev/null @@ -1,7 +0,0 @@ -from .container import Container - - -class TOMLDocument(Container): - """ - A TOML document. - """ diff --git a/conda_lock/_vendor/poetry/core/_vendor/tomlkit/toml_file.py b/conda_lock/_vendor/poetry/core/_vendor/tomlkit/toml_file.py deleted file mode 100644 index 3b416664..00000000 --- a/conda_lock/_vendor/poetry/core/_vendor/tomlkit/toml_file.py +++ /dev/null @@ -1,24 +0,0 @@ -import io - -from typing import Any -from typing import Dict - -from .api import loads -from .toml_document import TOMLDocument - - -class TOMLFile(object): - """ - Represents a TOML file. - """ - - def __init__(self, path): # type: (str) -> None - self._path = path - - def read(self): # type: () -> TOMLDocument - with io.open(self._path, encoding="utf-8") as f: - return loads(f.read()) - - def write(self, data): # type: (TOMLDocument) -> None - with io.open(self._path, "w", encoding="utf-8") as f: - f.write(data.as_string()) diff --git a/conda_lock/_vendor/poetry/core/_vendor/vendor.txt b/conda_lock/_vendor/poetry/core/_vendor/vendor.txt index 13de1ee1..abe97251 100644 --- a/conda_lock/_vendor/poetry/core/_vendor/vendor.txt +++ b/conda_lock/_vendor/poetry/core/_vendor/vendor.txt @@ -1,9 +1,4 @@ -attrs==20.3.0; python_version >= "2.7" and python_full_version < "3.0.0" or python_full_version >= "3.4.0" -jsonschema==3.2.0 -lark-parser==0.9.0 -packaging==20.9; (python_version >= "2.7" and python_full_version < "3.0.0") or (python_full_version >= "3.4.0") -pyparsing==2.4.7; python_version >= "2.7" and python_full_version < "3.0.0" or python_full_version >= "3.4.0" -pyrsistent==0.16.1; python_version >= "2.7" -six==1.15.0; python_version >= "2.7" and python_full_version < "3.0.0" or python_full_version >= "3.3.0" and python_version >= "2.7" -tomlkit==0.7.0; (python_version >= "2.7" and python_full_version < "3.0.0") or (python_full_version >= "3.5.0") -typing-extensions==3.7.4.3; python_version >= "3.6" and python_version < "3.8" +fastjsonschema==2.19.1 ; python_version >= "3.8" and python_version < "4.0" +lark==1.1.9 ; python_version >= "3.8" and python_version < "4.0" +packaging==23.2 ; python_version >= "3.8" and python_version < "4.0" +tomli==2.0.1 ; python_version >= "3.8" and python_version < "4.0" diff --git a/conda_lock/_vendor/poetry/core/constraints/__init__.py b/conda_lock/_vendor/poetry/core/constraints/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/conda_lock/_vendor/poetry/core/constraints/generic/__init__.py b/conda_lock/_vendor/poetry/core/constraints/generic/__init__.py new file mode 100644 index 00000000..6767de05 --- /dev/null +++ b/conda_lock/_vendor/poetry/core/constraints/generic/__init__.py @@ -0,0 +1,20 @@ +from __future__ import annotations + +from conda_lock._vendor.poetry.core.constraints.generic.any_constraint import AnyConstraint +from conda_lock._vendor.poetry.core.constraints.generic.base_constraint import BaseConstraint +from conda_lock._vendor.poetry.core.constraints.generic.constraint import Constraint +from conda_lock._vendor.poetry.core.constraints.generic.empty_constraint import EmptyConstraint +from conda_lock._vendor.poetry.core.constraints.generic.multi_constraint import MultiConstraint +from conda_lock._vendor.poetry.core.constraints.generic.parser import parse_constraint +from conda_lock._vendor.poetry.core.constraints.generic.union_constraint import UnionConstraint + + +__all__ = ( + "AnyConstraint", + "BaseConstraint", + "Constraint", + "EmptyConstraint", + "MultiConstraint", + "UnionConstraint", + "parse_constraint", +) diff --git a/conda_lock/_vendor/poetry/core/constraints/generic/any_constraint.py b/conda_lock/_vendor/poetry/core/constraints/generic/any_constraint.py new file mode 100644 index 00000000..eb95d5b6 --- /dev/null +++ b/conda_lock/_vendor/poetry/core/constraints/generic/any_constraint.py @@ -0,0 +1,45 @@ +from __future__ import annotations + +from conda_lock._vendor.poetry.core.constraints.generic.base_constraint import BaseConstraint +from conda_lock._vendor.poetry.core.constraints.generic.empty_constraint import EmptyConstraint + + +class AnyConstraint(BaseConstraint): + def allows(self, other: BaseConstraint) -> bool: + return True + + def allows_all(self, other: BaseConstraint) -> bool: + return True + + def allows_any(self, other: BaseConstraint) -> bool: + return True + + def invert(self) -> BaseConstraint: + return EmptyConstraint() + + def difference(self, other: BaseConstraint) -> BaseConstraint: + if other.is_any(): + return EmptyConstraint() + + raise ValueError("Unimplemented constraint difference") + + def intersect(self, other: BaseConstraint) -> BaseConstraint: + return other + + def union(self, other: BaseConstraint) -> AnyConstraint: + return AnyConstraint() + + def is_any(self) -> bool: + return True + + def is_empty(self) -> bool: + return False + + def __str__(self) -> str: + return "*" + + def __eq__(self, other: object) -> bool: + return isinstance(other, BaseConstraint) and other.is_any() + + def __hash__(self) -> int: + return hash("any") diff --git a/conda_lock/_vendor/poetry/core/constraints/generic/base_constraint.py b/conda_lock/_vendor/poetry/core/constraints/generic/base_constraint.py new file mode 100644 index 00000000..f2fb53b6 --- /dev/null +++ b/conda_lock/_vendor/poetry/core/constraints/generic/base_constraint.py @@ -0,0 +1,42 @@ +from __future__ import annotations + + +class BaseConstraint: + def allows(self, other: BaseConstraint) -> bool: + raise NotImplementedError + + def allows_all(self, other: BaseConstraint) -> bool: + raise NotImplementedError + + def allows_any(self, other: BaseConstraint) -> bool: + raise NotImplementedError + + def invert(self) -> BaseConstraint: + raise NotImplementedError() + + def difference(self, other: BaseConstraint) -> BaseConstraint: + raise NotImplementedError + + def intersect(self, other: BaseConstraint) -> BaseConstraint: + raise NotImplementedError + + def union(self, other: BaseConstraint) -> BaseConstraint: + raise NotImplementedError + + def is_any(self) -> bool: + return False + + def is_empty(self) -> bool: + return False + + def __repr__(self) -> str: + return f"<{self.__class__.__name__} {self}>" + + def __str__(self) -> str: + raise NotImplementedError + + def __hash__(self) -> int: + raise NotImplementedError + + def __eq__(self, other: object) -> bool: + raise NotImplementedError diff --git a/conda_lock/_vendor/poetry/core/constraints/generic/constraint.py b/conda_lock/_vendor/poetry/core/constraints/generic/constraint.py new file mode 100644 index 00000000..3f08c116 --- /dev/null +++ b/conda_lock/_vendor/poetry/core/constraints/generic/constraint.py @@ -0,0 +1,166 @@ +from __future__ import annotations + +import operator +import warnings + +from typing import Any +from typing import Callable +from typing import ClassVar + +from conda_lock._vendor.poetry.core.constraints.generic.any_constraint import AnyConstraint +from conda_lock._vendor.poetry.core.constraints.generic.base_constraint import BaseConstraint +from conda_lock._vendor.poetry.core.constraints.generic.empty_constraint import EmptyConstraint + + +OperatorType = Callable[[object, object], Any] + + +class Constraint(BaseConstraint): + OP_EQ = operator.eq + OP_NE = operator.ne + + _trans_op_str: ClassVar[dict[str, OperatorType]] = { + "=": OP_EQ, + "==": OP_EQ, + "!=": OP_NE, + } + + _trans_op_int: ClassVar[dict[OperatorType, str]] = {OP_EQ: "==", OP_NE: "!="} + + def __init__(self, value: str, operator: str = "==") -> None: + if operator == "=": + operator = "==" + + self._value = value + self._operator = operator + self._op = self._trans_op_str[operator] + + @property + def value(self) -> str: + return self._value + + @property + def version(self) -> str: + warnings.warn( + "The property 'version' is deprecated and will be removed. " + "Please use the property 'value' instead.", + DeprecationWarning, + stacklevel=2, + ) + return self.value + + @property + def operator(self) -> str: + return self._operator + + def allows(self, other: BaseConstraint) -> bool: + if not isinstance(other, Constraint): + raise ValueError("Unimplemented comparison of constraints") + + is_equal_op = self._operator == "==" + is_non_equal_op = self._operator == "!=" + is_other_equal_op = other.operator == "==" + is_other_non_equal_op = other.operator == "!=" + + if is_equal_op and is_other_equal_op: + return self._value == other.value + + if ( + is_equal_op + and is_other_non_equal_op + or is_non_equal_op + and is_other_equal_op + or is_non_equal_op + and is_other_non_equal_op + ): + return self._value != other.value + + return False + + def allows_all(self, other: BaseConstraint) -> bool: + if not isinstance(other, Constraint): + return other.is_empty() + + return other == self + + def allows_any(self, other: BaseConstraint) -> bool: + if isinstance(other, Constraint): + is_non_equal_op = self._operator == "!=" + is_other_non_equal_op = other.operator == "!=" + + if is_non_equal_op and is_other_non_equal_op: + return self._value != other.value + + return other.allows(self) + + def invert(self) -> Constraint: + return Constraint(self._value, "!=" if self._operator == "==" else "==") + + def difference(self, other: BaseConstraint) -> Constraint | EmptyConstraint: + if other.allows(self): + return EmptyConstraint() + + return self + + def intersect(self, other: BaseConstraint) -> BaseConstraint: + from conda_lock._vendor.poetry.core.constraints.generic.multi_constraint import MultiConstraint + + if isinstance(other, Constraint): + if other == self: + return self + + if self.operator == "!=" and other.operator == "==" and self.allows(other): + return other + + if other.operator == "!=" and self.operator == "==" and other.allows(self): + return self + + if other.operator == "!=" and self.operator == "!=": + return MultiConstraint(self, other) + + return EmptyConstraint() + + return other.intersect(self) + + def union(self, other: BaseConstraint) -> BaseConstraint: + from conda_lock._vendor.poetry.core.constraints.generic.union_constraint import UnionConstraint + + if isinstance(other, Constraint): + if other == self: + return self + + if self.operator == "!=" and other.operator == "==" and self.allows(other): + return self + + if other.operator == "!=" and self.operator == "==" and other.allows(self): + return other + + if other.operator == "==" and self.operator == "==": + return UnionConstraint(self, other) + + return AnyConstraint() + + # to preserve order (functionally not necessary) + if isinstance(other, UnionConstraint): + return UnionConstraint(self).union(other) + + return other.union(self) + + def is_any(self) -> bool: + return False + + def is_empty(self) -> bool: + return False + + def __eq__(self, other: object) -> bool: + if not isinstance(other, Constraint): + return NotImplemented + + return (self.value, self.operator) == (other.value, other.operator) + + def __hash__(self) -> int: + return hash((self._operator, self._value)) + + def __str__(self) -> str: + op = self._operator if self._operator != "==" else "" + return f"{op}{self._value}" diff --git a/conda_lock/_vendor/poetry/core/constraints/generic/empty_constraint.py b/conda_lock/_vendor/poetry/core/constraints/generic/empty_constraint.py new file mode 100644 index 00000000..b7cb590c --- /dev/null +++ b/conda_lock/_vendor/poetry/core/constraints/generic/empty_constraint.py @@ -0,0 +1,45 @@ +from __future__ import annotations + +from conda_lock._vendor.poetry.core.constraints.generic.base_constraint import BaseConstraint + + +class EmptyConstraint(BaseConstraint): + pretty_string = None + + def is_empty(self) -> bool: + return True + + def allows(self, other: BaseConstraint) -> bool: + return False + + def allows_all(self, other: BaseConstraint) -> bool: + return other.is_empty() + + def allows_any(self, other: BaseConstraint) -> bool: + return False + + def invert(self) -> BaseConstraint: + from conda_lock._vendor.poetry.core.constraints.generic.any_constraint import AnyConstraint + + return AnyConstraint() + + def intersect(self, other: BaseConstraint) -> BaseConstraint: + return self + + def union(self, other: BaseConstraint) -> BaseConstraint: + return other + + def difference(self, other: BaseConstraint) -> BaseConstraint: + return self + + def __eq__(self, other: object) -> bool: + if not isinstance(other, BaseConstraint): + return False + + return other.is_empty() + + def __hash__(self) -> int: + return hash("empty") + + def __str__(self) -> str: + return "" diff --git a/conda_lock/_vendor/poetry/core/constraints/generic/multi_constraint.py b/conda_lock/_vendor/poetry/core/constraints/generic/multi_constraint.py new file mode 100644 index 00000000..217f6d3a --- /dev/null +++ b/conda_lock/_vendor/poetry/core/constraints/generic/multi_constraint.py @@ -0,0 +1,135 @@ +from __future__ import annotations + +from typing import TYPE_CHECKING + +from conda_lock._vendor.poetry.core.constraints.generic import AnyConstraint +from conda_lock._vendor.poetry.core.constraints.generic import EmptyConstraint +from conda_lock._vendor.poetry.core.constraints.generic.base_constraint import BaseConstraint +from conda_lock._vendor.poetry.core.constraints.generic.constraint import Constraint + + +if TYPE_CHECKING: + from conda_lock._vendor.poetry.core.constraints.generic import UnionConstraint + + +class MultiConstraint(BaseConstraint): + def __init__(self, *constraints: Constraint) -> None: + if any(c.operator == "==" for c in constraints): + raise ValueError( + "A multi-constraint can only be comprised of negative constraints" + ) + + self._constraints = constraints + + @property + def constraints(self) -> tuple[Constraint, ...]: + return self._constraints + + def allows(self, other: BaseConstraint) -> bool: + return all(constraint.allows(other) for constraint in self._constraints) + + def allows_all(self, other: BaseConstraint) -> bool: + if other.is_any(): + return False + + if other.is_empty(): + return True + + if not isinstance(other, MultiConstraint): + return self.allows(other) + + our_constraints = iter(self._constraints) + their_constraints = iter(other.constraints) + our_constraint = next(our_constraints, None) + their_constraint = next(their_constraints, None) + + while our_constraint and their_constraint: + if our_constraint.allows_all(their_constraint): + their_constraint = next(their_constraints, None) + else: + our_constraint = next(our_constraints, None) + + return their_constraint is None + + def allows_any(self, other: BaseConstraint) -> bool: + if other.is_any(): + return True + + if other.is_empty(): + return True + + if isinstance(other, Constraint): + return self.allows(other) + + if isinstance(other, MultiConstraint): + return any( + c1.allows(c2) for c1 in self.constraints for c2 in other.constraints + ) + + return False + + def invert(self) -> UnionConstraint: + from conda_lock._vendor.poetry.core.constraints.generic import UnionConstraint + + return UnionConstraint(*(c.invert() for c in self._constraints)) + + def intersect(self, other: BaseConstraint) -> BaseConstraint: + if isinstance(other, MultiConstraint): + ours = set(self.constraints) + union = list(self.constraints) + [ + c for c in other.constraints if c not in ours + ] + return MultiConstraint(*union) + + if not isinstance(other, Constraint): + return other.intersect(self) + + if other in self._constraints: + return self + + if other.value in (c.value for c in self._constraints): + # same value but different operator, e.g. '== "linux"' and '!= "linux"' + return EmptyConstraint() + + if other.operator == "==": + return other + + return MultiConstraint(*self._constraints, other) + + def union(self, other: BaseConstraint) -> BaseConstraint: + if isinstance(other, MultiConstraint): + theirs = set(other.constraints) + common = [c for c in self.constraints if c in theirs] + return MultiConstraint(*common) + + if not isinstance(other, Constraint): + return other.union(self) + + if other in self._constraints: + return other + + if other.value not in (c.value for c in self._constraints): + if other.operator == "!=": + return AnyConstraint() + + return self + + constraints = [c for c in self._constraints if c.value != other.value] + + if len(constraints) == 1: + return constraints[0] + + return MultiConstraint(*constraints) + + def __eq__(self, other: object) -> bool: + if not isinstance(other, MultiConstraint): + return False + + return self._constraints == other._constraints + + def __hash__(self) -> int: + return hash(("multi", *self._constraints)) + + def __str__(self) -> str: + constraints = [str(constraint) for constraint in self._constraints] + return ", ".join(constraints) diff --git a/conda_lock/_vendor/poetry/core/packages/constraints/__init__.py b/conda_lock/_vendor/poetry/core/constraints/generic/parser.py similarity index 62% rename from conda_lock/_vendor/poetry/core/packages/constraints/__init__.py rename to conda_lock/_vendor/poetry/core/constraints/generic/parser.py index 33acb85a..62d39d95 100644 --- a/conda_lock/_vendor/poetry/core/packages/constraints/__init__.py +++ b/conda_lock/_vendor/poetry/core/constraints/generic/parser.py @@ -1,24 +1,25 @@ +from __future__ import annotations + +import functools import re -from typing import Union +from typing import TYPE_CHECKING + +from conda_lock._vendor.poetry.core.constraints.generic.any_constraint import AnyConstraint +from conda_lock._vendor.poetry.core.constraints.generic.constraint import Constraint +from conda_lock._vendor.poetry.core.constraints.generic.union_constraint import UnionConstraint +from conda_lock._vendor.poetry.core.constraints.version.exceptions import ParseConstraintError + -from .any_constraint import AnyConstraint -from .base_constraint import BaseConstraint -from .constraint import Constraint -from .empty_constraint import EmptyConstraint -from .multi_constraint import MultiConstraint -from .union_constraint import UnionConstraint +if TYPE_CHECKING: + from conda_lock._vendor.poetry.core.constraints.generic.base_constraint import BaseConstraint BASIC_CONSTRAINT = re.compile(r"^(!?==?)?\s*([^\s]+?)\s*$") -ConstraintTypes = Union[ - AnyConstraint, Constraint, UnionConstraint, EmptyConstraint, MultiConstraint -] -def parse_constraint( - constraints, -): # type: (str) -> Union[AnyConstraint, UnionConstraint, Constraint] +@functools.lru_cache(maxsize=None) +def parse_constraint(constraints: str) -> BaseConstraint: if constraints == "*": return AnyConstraint() @@ -51,7 +52,7 @@ def parse_constraint( return UnionConstraint(*or_groups) -def parse_single_constraint(constraint): # type: (str) -> Constraint +def parse_single_constraint(constraint: str) -> Constraint: # Basic comparator m = BASIC_CONSTRAINT.match(constraint) if m: @@ -63,4 +64,4 @@ def parse_single_constraint(constraint): # type: (str) -> Constraint return Constraint(version, op) - raise ValueError("Could not parse version constraint: {}".format(constraint)) + raise ParseConstraintError(f"Could not parse version constraint: {constraint}") diff --git a/conda_lock/_vendor/poetry/core/constraints/generic/union_constraint.py b/conda_lock/_vendor/poetry/core/constraints/generic/union_constraint.py new file mode 100644 index 00000000..5705ce98 --- /dev/null +++ b/conda_lock/_vendor/poetry/core/constraints/generic/union_constraint.py @@ -0,0 +1,185 @@ +from __future__ import annotations + +import itertools + +from conda_lock._vendor.poetry.core.constraints.generic import AnyConstraint +from conda_lock._vendor.poetry.core.constraints.generic.base_constraint import BaseConstraint +from conda_lock._vendor.poetry.core.constraints.generic.constraint import Constraint +from conda_lock._vendor.poetry.core.constraints.generic.empty_constraint import EmptyConstraint +from conda_lock._vendor.poetry.core.constraints.generic.multi_constraint import MultiConstraint + + +class UnionConstraint(BaseConstraint): + def __init__(self, *constraints: BaseConstraint) -> None: + self._constraints = constraints + + @property + def constraints(self) -> tuple[BaseConstraint, ...]: + return self._constraints + + def allows( + self, + other: BaseConstraint, + ) -> bool: + return any(constraint.allows(other) for constraint in self._constraints) + + def allows_any(self, other: BaseConstraint) -> bool: + if other.is_empty(): + return False + + if other.is_any(): + return True + + if isinstance(other, (UnionConstraint, MultiConstraint)): + constraints = other.constraints + else: + constraints = (other,) + + return any( + our_constraint.allows_any(their_constraint) + for our_constraint in self._constraints + for their_constraint in constraints + ) + + def allows_all(self, other: BaseConstraint) -> bool: + if other.is_any(): + return False + + if other.is_empty(): + return True + + if isinstance(other, (UnionConstraint, MultiConstraint)): + constraints = other.constraints + else: + constraints = (other,) + + our_constraints = iter(self._constraints) + their_constraints = iter(constraints) + our_constraint = next(our_constraints, None) + their_constraint = next(their_constraints, None) + + while our_constraint and their_constraint: + if our_constraint.allows_all(their_constraint): + their_constraint = next(their_constraints, None) + else: + our_constraint = next(our_constraints, None) + + return their_constraint is None + + def invert(self) -> MultiConstraint: + inverted_constraints = [c.invert() for c in self._constraints] + if any(not isinstance(c, Constraint) for c in inverted_constraints): + raise NotImplementedError( + "Inversion of complex union constraints not implemented" + ) + return MultiConstraint(*inverted_constraints) # type: ignore[arg-type] + + def intersect(self, other: BaseConstraint) -> BaseConstraint: + if other.is_any(): + return self + + if other.is_empty(): + return other + + if isinstance(other, Constraint): + # (A or B) and C => (A and C) or (B and C) + # just a special case of UnionConstraint + other = UnionConstraint(other) + + new_constraints = [] + if isinstance(other, UnionConstraint): + # (A or B) and (C or D) => (A and C) or (A and D) or (B and C) or (B and D) + for our_constraint in self._constraints: + for their_constraint in other.constraints: + intersection = our_constraint.intersect(their_constraint) + + if not (intersection.is_empty() or intersection in new_constraints): + new_constraints.append(intersection) + + else: + assert isinstance(other, MultiConstraint) + # (A or B) and (C and D) => (A and C and D) or (B and C and D) + + for our_constraint in self._constraints: + intersection = our_constraint + for their_constraint in other.constraints: + intersection = intersection.intersect(their_constraint) + + if not (intersection.is_empty() or intersection in new_constraints): + new_constraints.append(intersection) + + if not new_constraints: + return EmptyConstraint() + + if len(new_constraints) == 1: + return new_constraints[0] + + return UnionConstraint(*new_constraints) + + def union(self, other: BaseConstraint) -> BaseConstraint: + if other.is_any(): + return other + + if other.is_empty(): + return self + + if isinstance(other, Constraint): + # (A or B) or C => A or B or C + # just a special case of UnionConstraint + other = UnionConstraint(other) + + new_constraints: list[BaseConstraint] = [] + if isinstance(other, UnionConstraint): + # (A or B) or (C or D) => A or B or C or D + our_new_constraints: list[BaseConstraint] = [] + their_new_constraints: list[BaseConstraint] = [] + merged_new_constraints: list[BaseConstraint] = [] + for their_constraint in other.constraints: + for our_constraint in self._constraints: + union = our_constraint.union(their_constraint) + if union.is_any(): + return AnyConstraint() + if isinstance(union, Constraint): + if union == our_constraint: + if union not in our_new_constraints: + our_new_constraints.append(union) + elif union == their_constraint: + if union not in their_new_constraints: + their_new_constraints.append(their_constraint) + elif union not in merged_new_constraints: + merged_new_constraints.append(union) + else: + if our_constraint not in our_new_constraints: + our_new_constraints.append(our_constraint) + if their_constraint not in their_new_constraints: + their_new_constraints.append(their_constraint) + new_constraints = our_new_constraints + for constraint in itertools.chain( + their_new_constraints, merged_new_constraints + ): + if constraint not in new_constraints: + new_constraints.append(constraint) + + else: + assert isinstance(other, MultiConstraint) + # (A or B) or (C and D) => nothing to do + + new_constraints = [*self._constraints, other] + + if len(new_constraints) == 1: + return new_constraints[0] + + return UnionConstraint(*new_constraints) + + def __eq__(self, other: object) -> bool: + if not isinstance(other, UnionConstraint): + return False + + return self._constraints == other._constraints + + def __hash__(self) -> int: + return hash(("union", *self._constraints)) + + def __str__(self) -> str: + constraints = [str(constraint) for constraint in self._constraints] + return " || ".join(constraints) diff --git a/conda_lock/_vendor/poetry/core/constraints/version/__init__.py b/conda_lock/_vendor/poetry/core/constraints/version/__init__.py new file mode 100644 index 00000000..e19cad83 --- /dev/null +++ b/conda_lock/_vendor/poetry/core/constraints/version/__init__.py @@ -0,0 +1,26 @@ +from __future__ import annotations + +from conda_lock._vendor.poetry.core.constraints.version.empty_constraint import EmptyConstraint +from conda_lock._vendor.poetry.core.constraints.version.parser import parse_constraint +from conda_lock._vendor.poetry.core.constraints.version.parser import parse_marker_version_constraint +from conda_lock._vendor.poetry.core.constraints.version.util import constraint_regions +from conda_lock._vendor.poetry.core.constraints.version.version import Version +from conda_lock._vendor.poetry.core.constraints.version.version_constraint import VersionConstraint +from conda_lock._vendor.poetry.core.constraints.version.version_range import VersionRange +from conda_lock._vendor.poetry.core.constraints.version.version_range_constraint import ( + VersionRangeConstraint, +) +from conda_lock._vendor.poetry.core.constraints.version.version_union import VersionUnion + + +__all__ = ( + "EmptyConstraint", + "Version", + "VersionConstraint", + "VersionRange", + "VersionRangeConstraint", + "VersionUnion", + "constraint_regions", + "parse_constraint", + "parse_marker_version_constraint", +) diff --git a/conda_lock/_vendor/poetry/core/constraints/version/empty_constraint.py b/conda_lock/_vendor/poetry/core/constraints/version/empty_constraint.py new file mode 100644 index 00000000..d5fb7fbf --- /dev/null +++ b/conda_lock/_vendor/poetry/core/constraints/version/empty_constraint.py @@ -0,0 +1,56 @@ +from __future__ import annotations + +from typing import TYPE_CHECKING + +from conda_lock._vendor.poetry.core.constraints.version.version_constraint import VersionConstraint + + +if TYPE_CHECKING: + from conda_lock._vendor.poetry.core.constraints.version.version import Version + from conda_lock._vendor.poetry.core.constraints.version.version_range_constraint import ( + VersionRangeConstraint, + ) + + +class EmptyConstraint(VersionConstraint): + def is_empty(self) -> bool: + return True + + def is_any(self) -> bool: + return False + + def is_simple(self) -> bool: + return True + + def allows(self, version: Version) -> bool: + return False + + def allows_all(self, other: VersionConstraint) -> bool: + return other.is_empty() + + def allows_any(self, other: VersionConstraint) -> bool: + return False + + def intersect(self, other: VersionConstraint) -> EmptyConstraint: + return self + + def union(self, other: VersionConstraint) -> VersionConstraint: + return other + + def difference(self, other: VersionConstraint) -> EmptyConstraint: + return self + + def flatten(self) -> list[VersionRangeConstraint]: + return [] + + def __str__(self) -> str: + return "" + + def __eq__(self, other: object) -> bool: + if not isinstance(other, VersionConstraint): + return False + + return other.is_empty() + + def __hash__(self) -> int: + return hash("empty") diff --git a/conda_lock/_vendor/poetry/core/semver/exceptions.py b/conda_lock/_vendor/poetry/core/constraints/version/exceptions.py similarity index 52% rename from conda_lock/_vendor/poetry/core/semver/exceptions.py rename to conda_lock/_vendor/poetry/core/constraints/version/exceptions.py index b2432399..d06e56f7 100644 --- a/conda_lock/_vendor/poetry/core/semver/exceptions.py +++ b/conda_lock/_vendor/poetry/core/constraints/version/exceptions.py @@ -1,5 +1,4 @@ -class ParseVersionError(ValueError): - pass +from __future__ import annotations class ParseConstraintError(ValueError): diff --git a/conda_lock/_vendor/poetry/core/constraints/version/parser.py b/conda_lock/_vendor/poetry/core/constraints/version/parser.py new file mode 100644 index 00000000..90e49c7f --- /dev/null +++ b/conda_lock/_vendor/poetry/core/constraints/version/parser.py @@ -0,0 +1,220 @@ +from __future__ import annotations + +import functools +import re + +from typing import TYPE_CHECKING + +from conda_lock._vendor.poetry.core.constraints.version.exceptions import ParseConstraintError +from conda_lock._vendor.poetry.core.version.exceptions import InvalidVersion + + +if TYPE_CHECKING: + from conda_lock._vendor.poetry.core.constraints.version.version import Version + from conda_lock._vendor.poetry.core.constraints.version.version_constraint import VersionConstraint + + +@functools.lru_cache(maxsize=None) +def parse_constraint(constraints: str) -> VersionConstraint: + return _parse_constraint(constraints=constraints) + + +def parse_marker_version_constraint(constraints: str) -> VersionConstraint: + return _parse_constraint(constraints=constraints, is_marker_constraint=True) + + +def _parse_constraint( + constraints: str, *, is_marker_constraint: bool = False +) -> VersionConstraint: + if constraints == "*": + from conda_lock._vendor.poetry.core.constraints.version.version_range import VersionRange + + return VersionRange() + + or_constraints = re.split(r"\s*\|\|?\s*", constraints.strip()) + or_groups = [] + for constraints in or_constraints: + # allow trailing commas for robustness (even though it may not be + # standard-compliant it seems to occur in some packages) + constraints = constraints.rstrip(",").rstrip() + and_constraints = re.split( + r"(?< ,]) *(? 1: + for constraint in and_constraints: + constraint_objects.append( + parse_single_constraint( + constraint, is_marker_constraint=is_marker_constraint + ) + ) + else: + constraint_objects.append( + parse_single_constraint( + and_constraints[0], is_marker_constraint=is_marker_constraint + ) + ) + + if len(constraint_objects) == 1: + constraint = constraint_objects[0] + else: + constraint = constraint_objects[0] + for next_constraint in constraint_objects[1:]: + constraint = constraint.intersect(next_constraint) + + or_groups.append(constraint) + + if len(or_groups) == 1: + return or_groups[0] + else: + from conda_lock._vendor.poetry.core.constraints.version.version_union import VersionUnion + + return VersionUnion.of(*or_groups) + + +def parse_single_constraint( + constraint: str, *, is_marker_constraint: bool = False +) -> VersionConstraint: + from conda_lock._vendor.poetry.core.constraints.version.patterns import BASIC_CONSTRAINT + from conda_lock._vendor.poetry.core.constraints.version.patterns import CARET_CONSTRAINT + from conda_lock._vendor.poetry.core.constraints.version.patterns import TILDE_CONSTRAINT + from conda_lock._vendor.poetry.core.constraints.version.patterns import TILDE_PEP440_CONSTRAINT + from conda_lock._vendor.poetry.core.constraints.version.patterns import X_CONSTRAINT + from conda_lock._vendor.poetry.core.constraints.version.version import Version + from conda_lock._vendor.poetry.core.constraints.version.version_range import VersionRange + from conda_lock._vendor.poetry.core.constraints.version.version_union import VersionUnion + + m = re.match(r"(?i)^v?[xX*](\.[xX*])*$", constraint) + if m: + return VersionRange() + + # Tilde range + m = TILDE_CONSTRAINT.match(constraint) + if m: + try: + version = Version.parse(m.group("version")) + except InvalidVersion as e: + raise ParseConstraintError( + f"Could not parse version constraint: {constraint}" + ) from e + + high = version.stable.next_minor() + if version.release.precision == 1: + high = version.stable.next_major() + + return VersionRange(version, high, include_min=True) + + # PEP 440 Tilde range (~=) + m = TILDE_PEP440_CONSTRAINT.match(constraint) + if m: + try: + version = Version.parse(m.group("version")) + except InvalidVersion as e: + raise ParseConstraintError( + f"Could not parse version constraint: {constraint}" + ) from e + + if version.release.precision == 2: + high = version.stable.next_major() + else: + high = version.stable.next_minor() + + return VersionRange(version, high, include_min=True) + + # Caret range + m = CARET_CONSTRAINT.match(constraint) + if m: + try: + version = Version.parse(m.group("version")) + except InvalidVersion as e: + raise ParseConstraintError( + f"Could not parse version constraint: {constraint}" + ) from e + + return VersionRange(version, version.next_breaking(), include_min=True) + + # X Range + m = X_CONSTRAINT.match(constraint) + if m: + op = m.group("op") + + try: + return _make_x_constraint_range( + version=Version.parse(m.group("version")), + invert=op == "!=", + is_marker_constraint=is_marker_constraint, + ) + except ValueError: + raise ValueError(f"Could not parse version constraint: {constraint}") + + # Basic comparator + m = BASIC_CONSTRAINT.match(constraint) + if m: + op = m.group("op") + version_string = m.group("version") + + if version_string == "dev": + version_string = "0.0-dev" + + try: + version = Version.parse(version_string) + except InvalidVersion as e: + raise ParseConstraintError( + f"Could not parse version constraint: {constraint}" + ) from e + + if op == "<": + return VersionRange(max=version) + if op == "<=": + return VersionRange(max=version, include_max=True) + if op == ">": + return VersionRange(min=version) + if op == ">=": + return VersionRange(min=version, include_min=True) + + if m.group("wildcard") is not None: + return _make_x_constraint_range( + version=version, + invert=op == "!=", + is_marker_constraint=is_marker_constraint, + ) + + if op == "!=": + return VersionUnion(VersionRange(max=version), VersionRange(min=version)) + + return version + + raise ParseConstraintError(f"Could not parse version constraint: {constraint}") + + +def _make_x_constraint_range( + version: Version, *, invert: bool = False, is_marker_constraint: bool = False +) -> VersionConstraint: + from conda_lock._vendor.poetry.core.constraints.version.version_range import VersionRange + + if version.is_postrelease(): + _next = version.next_postrelease() + elif version.is_stable(): + _next = version.next_stable() + elif version.is_prerelease(): + _next = version.next_prerelease() + elif version.is_devrelease(): + _next = version.next_devrelease() + else: + raise RuntimeError("version is neither stable, nor pre-release nor dev-release") + + _min = version + _max = _next + + if not is_marker_constraint: + _min = _min.first_devrelease() + if not _max.is_devrelease(): + _max = _max.first_devrelease() + + result = VersionRange(_min, _max, include_min=True) + + if invert: + return VersionRange().difference(result) + + return result diff --git a/conda_lock/_vendor/poetry/core/constraints/version/patterns.py b/conda_lock/_vendor/poetry/core/constraints/version/patterns.py new file mode 100644 index 00000000..6dfdf8c0 --- /dev/null +++ b/conda_lock/_vendor/poetry/core/constraints/version/patterns.py @@ -0,0 +1,28 @@ +from __future__ import annotations + +import re + +from packaging.version import VERSION_PATTERN + + +COMPLETE_VERSION = re.compile(VERSION_PATTERN, re.VERBOSE | re.IGNORECASE) + +CARET_CONSTRAINT = re.compile( + rf"^\^\s*(?P{VERSION_PATTERN})$", re.VERBOSE | re.IGNORECASE +) +TILDE_CONSTRAINT = re.compile( + rf"^~(?!=)\s*(?P{VERSION_PATTERN})$", re.VERBOSE | re.IGNORECASE +) +TILDE_PEP440_CONSTRAINT = re.compile( + rf"^~=\s*(?P{VERSION_PATTERN})$", re.VERBOSE | re.IGNORECASE +) +X_CONSTRAINT = re.compile( + r"^(?P!=|==)?\s*v?(?P(\d+)(?:\.(\d+))?(?:\.(\d+))?)(?:\.[xX*])+$" +) + +# note that we also allow technically incorrect version patterns with astrix (eg: 3.5.*) +# as this is supported by pip and appears in metadata within python packages +BASIC_CONSTRAINT = re.compile( + rf"^(?P<>|!=|>=?|<=?|==?)?\s*(?P{VERSION_PATTERN}|dev)(?P\.\*)?$", + re.VERBOSE | re.IGNORECASE, +) diff --git a/conda_lock/_vendor/poetry/core/constraints/version/util.py b/conda_lock/_vendor/poetry/core/constraints/version/util.py new file mode 100644 index 00000000..9a16cf1c --- /dev/null +++ b/conda_lock/_vendor/poetry/core/constraints/version/util.py @@ -0,0 +1,58 @@ +from __future__ import annotations + +from typing import TYPE_CHECKING + +from conda_lock._vendor.poetry.core.constraints.version.version_range import VersionRange + + +if TYPE_CHECKING: + from conda_lock._vendor.poetry.core.constraints.version.version_constraint import VersionConstraint + + +def constraint_regions(constraints: list[VersionConstraint]) -> list[VersionRange]: + """ + Transform a list of VersionConstraints into a list of VersionRanges that mark out + the distinct regions of version-space. + + eg input >=3.6 and >=2.7,<3.0.0 || >=3.4.0 + output <2.7, >=2.7,<3.0.0, >=3.0.0,<3.4.0, >=3.4.0,<3.6, >=3.6. + """ + flattened = [] + for constraint in constraints: + flattened += constraint.flatten() + + mins = { + (constraint.min, not constraint.include_min) + for constraint in flattened + if constraint.min is not None + } + maxs = { + (constraint.max, constraint.include_max) + for constraint in flattened + if constraint.max is not None + } + + edges = sorted(mins | maxs) + if not edges: + return [VersionRange(None, None)] + + start = edges[0] + regions = [ + VersionRange(None, start[0], include_max=start[1]), + ] + + for low, high in zip(edges, edges[1:]): + version_range = VersionRange( + low[0], + high[0], + include_min=not low[1], + include_max=high[1], + ) + regions.append(version_range) + + end = edges[-1] + regions.append( + VersionRange(end[0], None, include_min=not end[1]), + ) + + return regions diff --git a/conda_lock/_vendor/poetry/core/constraints/version/version.py b/conda_lock/_vendor/poetry/core/constraints/version/version.py new file mode 100644 index 00000000..8a75afa4 --- /dev/null +++ b/conda_lock/_vendor/poetry/core/constraints/version/version.py @@ -0,0 +1,182 @@ +from __future__ import annotations + +import dataclasses + +from typing import TYPE_CHECKING + +from conda_lock._vendor.poetry.core.constraints.version.empty_constraint import EmptyConstraint +from conda_lock._vendor.poetry.core.constraints.version.version_range_constraint import ( + VersionRangeConstraint, +) +from conda_lock._vendor.poetry.core.constraints.version.version_union import VersionUnion +from conda_lock._vendor.poetry.core.version.pep440 import Release +from conda_lock._vendor.poetry.core.version.pep440.version import PEP440Version + + +if TYPE_CHECKING: + from conda_lock._vendor.poetry.core.constraints.version.version_constraint import VersionConstraint + from conda_lock._vendor.poetry.core.version.pep440 import LocalSegmentType + from conda_lock._vendor.poetry.core.version.pep440 import ReleaseTag + + +@dataclasses.dataclass(frozen=True) +class Version(PEP440Version, VersionRangeConstraint): + """ + A version constraint representing a single version. + """ + + @property + def precision(self) -> int: + return self.release.precision + + @property + def stable(self) -> Version: + if self.is_stable(): + return self + + post = self.post if self.pre is None else None + return Version(release=self.release, post=post, epoch=self.epoch) + + def next_breaking(self) -> Version: + if self.major > 0 or self.minor is None: + return self.stable.next_major() + + if self.minor > 0 or self.patch is None: + return self.stable.next_minor() + + return self.stable.next_patch() + + @property + def min(self) -> Version: + return self + + @property + def max(self) -> Version: + return self + + @property + def full_max(self) -> Version: + return self + + @property + def include_min(self) -> bool: + return True + + @property + def include_max(self) -> bool: + return True + + def is_any(self) -> bool: + return False + + def is_empty(self) -> bool: + return False + + def is_simple(self) -> bool: + return True + + def allows(self, version: Version | None) -> bool: + if version is None: + return False + + _this, _other = self, version + + # allow weak equality to allow `3.0.0+local.1` for `3.0.0` + if not _this.is_local() and _other.is_local(): + _other = _other.without_local() + + return _this == _other + + def allows_all(self, other: VersionConstraint) -> bool: + return other.is_empty() or ( + self.allows(other) if isinstance(other, self.__class__) else other == self + ) + + def allows_any(self, other: VersionConstraint) -> bool: + intersection = self.intersect(other) + return not intersection.is_empty() + + def intersect(self, other: VersionConstraint) -> VersionConstraint: + if isinstance(other, Version): + if self.allows(other): + return other + + if other.allows(self): + return self + + return EmptyConstraint() + + return other.intersect(self) + + def union(self, other: VersionConstraint) -> VersionConstraint: + from conda_lock._vendor.poetry.core.constraints.version.version_range import VersionRange + + if other.allows(self): + return other + + if isinstance(other, VersionRangeConstraint): + if self.allows(other.min): + return VersionRange( + other.min, + other.max, + include_min=True, + include_max=other.include_max, + ) + + if self.allows(other.max): + return VersionRange( + other.min, + other.max, + include_min=other.include_min, + include_max=True, + ) + + return VersionUnion.of(self, other) + + def difference(self, other: VersionConstraint) -> Version | EmptyConstraint: + if other.allows(self): + return EmptyConstraint() + + return self + + def flatten(self) -> list[VersionRangeConstraint]: + return [self] + + def __str__(self) -> str: + return self.text + + def __eq__(self, other: object) -> bool: + from conda_lock._vendor.poetry.core.constraints.version.version_range import VersionRange + + if isinstance(other, VersionRange): + return ( + self == other.min + and self == other.max + and (other.include_min or other.include_max) + ) + return super().__eq__(other) + + @classmethod + def from_parts( + cls, + major: int, + minor: int | None = None, + patch: int | None = None, + extra: int | tuple[int, ...] = (), + pre: ReleaseTag | None = None, + post: ReleaseTag | None = None, + dev: ReleaseTag | None = None, + local: LocalSegmentType = None, + *, + epoch: int = 0, + ) -> Version: + if isinstance(extra, int): + extra = (extra,) + return cls( + release=Release(major=major, minor=minor, patch=patch, extra=extra), + pre=pre, + post=post, + dev=dev, + local=local, + epoch=epoch, + ) diff --git a/conda_lock/_vendor/poetry/core/constraints/version/version_constraint.py b/conda_lock/_vendor/poetry/core/constraints/version/version_constraint.py new file mode 100644 index 00000000..ae167b6c --- /dev/null +++ b/conda_lock/_vendor/poetry/core/constraints/version/version_constraint.py @@ -0,0 +1,126 @@ +from __future__ import annotations + +from abc import abstractmethod +from typing import TYPE_CHECKING + + +if TYPE_CHECKING: + from conda_lock._vendor.poetry.core.constraints.version.version import Version + from conda_lock._vendor.poetry.core.constraints.version.version_range_constraint import ( + VersionRangeConstraint, + ) + + +class VersionConstraint: + @abstractmethod + def is_empty(self) -> bool: + raise NotImplementedError + + @abstractmethod + def is_any(self) -> bool: + raise NotImplementedError + + @abstractmethod + def is_simple(self) -> bool: + raise NotImplementedError + + @abstractmethod + def allows(self, version: Version) -> bool: + raise NotImplementedError + + @abstractmethod + def allows_all(self, other: VersionConstraint) -> bool: + raise NotImplementedError + + @abstractmethod + def allows_any(self, other: VersionConstraint) -> bool: + raise NotImplementedError + + @abstractmethod + def intersect(self, other: VersionConstraint) -> VersionConstraint: + raise NotImplementedError + + @abstractmethod + def union(self, other: VersionConstraint) -> VersionConstraint: + raise NotImplementedError + + @abstractmethod + def difference(self, other: VersionConstraint) -> VersionConstraint: + raise NotImplementedError + + @abstractmethod + def flatten(self) -> list[VersionRangeConstraint]: + raise NotImplementedError + + def __repr__(self) -> str: + return f"<{self.__class__.__name__} {self}>" + + def __str__(self) -> str: + raise NotImplementedError + + def __hash__(self) -> int: + raise NotImplementedError + + def __eq__(self, other: object) -> bool: + raise NotImplementedError + + +def _is_wildcard_candidate( + min_: Version, max_: Version, *, inverted: bool = False +) -> bool: + if ( + min_.is_local() + or max_.is_local() + or min_.is_prerelease() + or max_.is_prerelease() + or min_.is_postrelease() is not max_.is_postrelease() + or min_.first_devrelease() != min_ + or (max_.is_devrelease() and max_.first_devrelease() != max_) + ): + return False + + first = max_ if inverted else min_ + second = min_ if inverted else max_ + + parts_first = list(first.parts) + parts_second = list(second.parts) + + # remove trailing zeros from second + while parts_second and parts_second[-1] == 0: + del parts_second[-1] + + # fill up first with zeros + parts_first += [0] * (len(parts_second) - len(parts_first)) + + # all exceeding parts of first must be zero + if set(parts_first[len(parts_second) :]) not in [set(), {0}]: + return False + + parts_first = parts_first[: len(parts_second)] + + if first.is_postrelease(): + assert first.post is not None + return parts_first == parts_second and first.post.next() == second.post + + return ( + parts_first[:-1] == parts_second[:-1] + and parts_first[-1] + 1 == parts_second[-1] + ) + + +def _single_wildcard_range_string(first: Version, second: Version) -> str: + if first.is_postrelease(): + base_version = str(first.without_devrelease()) + + else: + parts = list(second.parts) + + # remove trailing zeros from max + while parts and parts[-1] == 0: + del parts[-1] + + parts[-1] = parts[-1] - 1 + + base_version = ".".join(str(part) for part in parts) + + return f"{base_version}.*" diff --git a/conda_lock/_vendor/poetry/core/semver/version_range.py b/conda_lock/_vendor/poetry/core/constraints/version/version_range.py similarity index 53% rename from conda_lock/_vendor/poetry/core/semver/version_range.py rename to conda_lock/_vendor/poetry/core/constraints/version/version_range.py index ead2a45c..a09aa7f6 100644 --- a/conda_lock/_vendor/poetry/core/semver/version_range.py +++ b/conda_lock/_vendor/poetry/core/constraints/version/version_range.py @@ -1,94 +1,108 @@ +from __future__ import annotations + +from contextlib import suppress +from functools import cached_property from typing import TYPE_CHECKING -from typing import Any -from typing import List -from typing import Optional -from .empty_constraint import EmptyConstraint -from .version_constraint import VersionConstraint -from .version_union import VersionUnion +from conda_lock._vendor.poetry.core.constraints.version.empty_constraint import EmptyConstraint +from conda_lock._vendor.poetry.core.constraints.version.version_constraint import _is_wildcard_candidate +from conda_lock._vendor.poetry.core.constraints.version.version_constraint import ( + _single_wildcard_range_string, +) +from conda_lock._vendor.poetry.core.constraints.version.version_range_constraint import ( + VersionRangeConstraint, +) +from conda_lock._vendor.poetry.core.constraints.version.version_union import VersionUnion if TYPE_CHECKING: - from conda_lock._vendor.poetry.core.semver.version import Version - - from . import VersionTypes # noqa + from conda_lock._vendor.poetry.core.constraints.version.version import Version + from conda_lock._vendor.poetry.core.constraints.version.version_constraint import VersionConstraint -class VersionRange(VersionConstraint): +class VersionRange(VersionRangeConstraint): def __init__( self, - min=None, # type: Optional["Version"] - max=None, # type: Optional["Version"] - include_min=False, # type: bool - include_max=False, # type: bool - always_include_max_prerelease=False, # type: bool - ): - full_max = max - if ( - not always_include_max_prerelease - and not include_max - and full_max is not None - and not full_max.is_prerelease() - and not full_max.build - and ( - min is None - or not min.is_prerelease() - or not min.equals_without_prerelease(full_max) - ) - ): - full_max = full_max.first_prerelease - - self._min = min + min: Version | None = None, + max: Version | None = None, + include_min: bool = False, + include_max: bool = False, + ) -> None: self._max = max - self._full_max = full_max + self._min = min self._include_min = include_min self._include_max = include_max @property - def min(self): # type: () -> "Version" + def min(self) -> Version | None: return self._min @property - def max(self): # type: () -> "Version" + def max(self) -> Version | None: return self._max @property - def full_max(self): # type: () -> "Version" - return self._full_max - - @property - def include_min(self): # type: () -> bool + def include_min(self) -> bool: return self._include_min @property - def include_max(self): # type: () -> bool + def include_max(self) -> bool: return self._include_max - def is_empty(self): # type: () -> bool + def is_empty(self) -> bool: return False - def is_any(self): # type: () -> bool + def is_any(self) -> bool: return self._min is None and self._max is None - def allows(self, other): # type: ("Version") -> bool + def is_simple(self) -> bool: + return self._min is None or self._max is None + + def allows(self, other: Version) -> bool: if self._min is not None: - if other < self._min: + _this, _other = self.allowed_min, other + + assert _this is not None + + if not _this.is_postrelease() and _other.is_postrelease(): + # The exclusive ordered comparison >V MUST NOT allow a post-release + # of the given version unless V itself is a post release. + # https://peps.python.org/pep-0440/#exclusive-ordered-comparison + # e.g. "2.0.post1" does not match ">2" + _other = _other.without_postrelease() + + if not _this.is_local() and _other.is_local(): + # The exclusive ordered comparison >V MUST NOT match + # a local version of the specified version. + # https://peps.python.org/pep-0440/#exclusive-ordered-comparison + # e.g. "2.0+local.version" does not match ">2" + _other = other.without_local() + + if _other < _this: return False - if not self._include_min and other == self._min: + if not self._include_min and (_other == self._min or _other == _this): return False - if self.full_max is not None: - if other > self.full_max: + if self.max is not None: + _this, _other = self.allowed_max, other + + assert _this is not None + + if not _this.is_local() and _other.is_local(): + # allow weak equality to allow `3.0.0+local.1` for `<=3.0.0` + _other = _other.without_local() + + if _other > _this: return False - if not self._include_max and other == self.full_max: + if not self._include_max and (_other == self._max or _other == _this): return False return True - def allows_all(self, other): # type: ("VersionTypes") -> bool - from .version import Version + def allows_all(self, other: VersionConstraint) -> bool: + from conda_lock._vendor.poetry.core.constraints.version.version import Version if other.is_empty(): return True @@ -97,34 +111,39 @@ def allows_all(self, other): # type: ("VersionTypes") -> bool return self.allows(other) if isinstance(other, VersionUnion): - return all([self.allows_all(constraint) for constraint in other.ranges]) + return all(self.allows_all(constraint) for constraint in other.ranges) - if isinstance(other, VersionRange): + if isinstance(other, VersionRangeConstraint): return not other.allows_lower(self) and not other.allows_higher(self) - raise ValueError("Unknown VersionConstraint type {}.".format(other)) + raise ValueError(f"Unknown VersionConstraint type {other}.") - def allows_any(self, other): # type: ("VersionTypes") -> bool - from .version import Version + def allows_any(self, other: VersionConstraint) -> bool: + from conda_lock._vendor.poetry.core.constraints.version.version import Version if other.is_empty(): return False if isinstance(other, Version): - return self.allows(other) + if self.allows(other): + return True + + # Although `>=1.2.3+local` does not allow the exact version `1.2.3`, both of + # those versions do allow `1.2.3+local`. + return ( + self.min is not None and self.min.is_local() and other.allows(self.min) + ) if isinstance(other, VersionUnion): - return any([self.allows_any(constraint) for constraint in other.ranges]) + return any(self.allows_any(constraint) for constraint in other.ranges) - if isinstance(other, VersionRange): - return not other.is_strictly_lower(self) and not other.is_strictly_higher( - self - ) + if isinstance(other, VersionRangeConstraint): + return not (other.is_strictly_lower(self) or other.is_strictly_higher(self)) - raise ValueError("Unknown VersionConstraint type {}.".format(other)) + raise ValueError(f"Unknown VersionConstraint type {other}.") - def intersect(self, other): # type: ("VersionTypes") -> "VersionTypes" - from .version import Version + def intersect(self, other: VersionConstraint) -> VersionConstraint: + from conda_lock._vendor.poetry.core.constraints.version.version import Version if other.is_empty(): return other @@ -132,15 +151,25 @@ def intersect(self, other): # type: ("VersionTypes") -> "VersionTypes" if isinstance(other, VersionUnion): return other.intersect(self) - # A range and a Version just yields the version if it's in the range. if isinstance(other, Version): + # A range and a Version just yields the version if it's in the range. if self.allows(other): return other + # `>=1.2.3+local` intersects `1.2.3` to return `>=1.2.3+local,<1.2.4`. + if self.min is not None and self.min.is_local() and other.allows(self.min): + upper = other.stable.next_patch() + return VersionRange( + min=self.min, + max=upper, + include_min=self.include_min, + include_max=False, + ) + return EmptyConstraint() - if not isinstance(other, VersionRange): - raise ValueError("Unknown VersionConstraint type {}.".format(other)) + if not isinstance(other, VersionRangeConstraint): + raise ValueError(f"Unknown VersionConstraint type {other}.") if self.allows_lower(other): if self.is_strictly_lower(other): @@ -170,6 +199,7 @@ def intersect(self, other): # type: ("VersionTypes") -> "VersionTypes" # Because we already verified that the lower range isn't strictly # lower, there must be some overlap. assert intersect_include_min and intersect_include_max + assert intersect_min is not None return intersect_min @@ -178,8 +208,8 @@ def intersect(self, other): # type: ("VersionTypes") -> "VersionTypes" intersect_min, intersect_max, intersect_include_min, intersect_include_max ) - def union(self, other): # type: ("VersionTypes") -> "VersionTypes" - from .version import Version + def union(self, other: VersionConstraint) -> VersionConstraint: + from conda_lock._vendor.poetry.core.constraints.version.version import Version if isinstance(other, Version): if self.allows(other): @@ -197,7 +227,7 @@ def union(self, other): # type: ("VersionTypes") -> "VersionTypes" return VersionUnion.of(self, other) - if isinstance(other, VersionRange): + if isinstance(other, VersionRangeConstraint): # If the two ranges don't overlap, we won't be able to create a single # VersionRange for both of them. edges_touch = ( @@ -230,8 +260,8 @@ def union(self, other): # type: ("VersionTypes") -> "VersionTypes" return VersionUnion.of(self, other) - def difference(self, other): # type: ("VersionTypes") -> "VersionTypes" - from .version import Version + def difference(self, other: VersionConstraint) -> VersionConstraint: + from conda_lock._vendor.poetry.core.constraints.version.version import Version if other.is_empty(): return self @@ -256,10 +286,11 @@ def difference(self, other): # type: ("VersionTypes") -> "VersionTypes" VersionRange(self.min, other, self.include_min, False), VersionRange(other, self.max, False, self.include_max), ) - elif isinstance(other, VersionRange): + elif isinstance(other, VersionRangeConstraint): if not self.allows_any(other): return self + before: VersionConstraint | None if not self.allows_lower(other): before = None elif self.min == other.min: @@ -269,6 +300,7 @@ def difference(self, other): # type: ("VersionTypes") -> "VersionTypes" self.min, other.min, self.include_min, not other.include_min ) + after: VersionConstraint | None if not self.allows_higher(other): after = None elif self.max == other.max: @@ -282,6 +314,7 @@ def difference(self, other): # type: ("VersionTypes") -> "VersionTypes" return EmptyConstraint() if before is None: + assert after is not None return after if after is None: @@ -289,8 +322,8 @@ def difference(self, other): # type: ("VersionTypes") -> "VersionTypes" return VersionUnion.of(before, after) elif isinstance(other, VersionUnion): - ranges = [] # type: List[VersionRange] - current = self + ranges: list[VersionRangeConstraint] = [] + current: VersionRangeConstraint = self for range in other.ranges: # Skip any ranges that are strictly lower than [current]. @@ -311,73 +344,46 @@ def difference(self, other): # type: ("VersionTypes") -> "VersionTypes" ranges.append(difference.ranges[0]) current = difference.ranges[-1] else: + assert isinstance(difference, VersionRangeConstraint) current = difference if not ranges: return current - return VersionUnion.of(*(ranges + [current])) + return VersionUnion.of(*([*ranges, current])) - raise ValueError("Unknown VersionConstraint type {}.".format(other)) - - def allows_lower(self, other): # type: (VersionRange) -> bool - if self.min is None: - return other.min is not None + raise ValueError(f"Unknown VersionConstraint type {other}.") - if other.min is None: - return False + def flatten(self) -> list[VersionRangeConstraint]: + return [self] - if self.min < other.min: - return True + @cached_property + def _single_wildcard_range_string(self) -> str: + if not self.is_single_wildcard_range: + raise ValueError("Not a valid wildcard range") - if self.min > other.min: - return False + assert self.min is not None + assert self.max is not None + return f"=={_single_wildcard_range_string(self.min, self.max)}" - return self.include_min and not other.include_min - - def allows_higher(self, other): # type: (VersionRange) -> bool - if self.full_max is None: - return other.max is not None - - if other.full_max is None: - return False - - if self.full_max < other.full_max: - return False - - if self.full_max > other.full_max: - return True - - return self.include_max and not other.include_max - - def is_strictly_lower(self, other): # type: (VersionRange) -> bool - if self.full_max is None or other.min is None: - return False - - if self.full_max < other.min: - return True - - if self.full_max > other.min: - return False - - return not self.include_max or not other.include_min - - def is_strictly_higher(self, other): # type: (VersionRange) -> bool - return other.is_strictly_lower(self) - - def is_adjacent_to(self, other): # type: (VersionRange) -> bool - if self.max != other.min: + @cached_property + def is_single_wildcard_range(self) -> bool: + # e.g. + # - "1.*" equals ">=1.0.dev0, <2" (equivalent to ">=1.0.dev0, <2.0.dev0") + # - "1.0.*" equals ">=1.0.dev0, <1.1" + # - "1.2.*" equals ">=1.2.dev0, <1.3" + if ( + self.min is None + or self.max is None + or not self.include_min + or self.include_max + ): return False - return ( - self.include_max - and not other.include_min - or not self.include_max - and other.include_min - ) + return _is_wildcard_candidate(self.min, self.max) - def __eq__(self, other): # type: (Any) -> int - if not isinstance(other, VersionRange): + def __eq__(self, other: object) -> bool: + if not isinstance(other, VersionRangeConstraint): return False return ( @@ -387,55 +393,54 @@ def __eq__(self, other): # type: (Any) -> int and self._include_max == other.include_max ) - def __lt__(self, other): # type: (VersionRange) -> int + def __lt__(self, other: VersionRangeConstraint) -> bool: return self._cmp(other) < 0 - def __le__(self, other): # type: (VersionRange) -> int + def __le__(self, other: VersionRangeConstraint) -> bool: return self._cmp(other) <= 0 - def __gt__(self, other): # type: (VersionRange) -> int + def __gt__(self, other: VersionRangeConstraint) -> bool: return self._cmp(other) > 0 - def __ge__(self, other): # type: (VersionRange) -> int + def __ge__(self, other: VersionRangeConstraint) -> bool: return self._cmp(other) >= 0 - def _cmp(self, other): # type: (VersionRange) -> int + def _cmp(self, other: VersionRangeConstraint) -> int: if self.min is None: - if other.min is None: - return self._compare_max(other) - - return -1 + return self._compare_max(other) if other.min is None else -1 elif other.min is None: return 1 - result = self.min._cmp(other.min) - if result != 0: - return result + if self.min > other.min: + return 1 + elif self.min < other.min: + return -1 if self.include_min != other.include_min: return -1 if self.include_min else 1 return self._compare_max(other) - def _compare_max(self, other): # type: (VersionRange) -> int + def _compare_max(self, other: VersionRangeConstraint) -> int: if self.max is None: - if other.max is None: - return 0 - - return 1 + return 0 if other.max is None else 1 elif other.max is None: return -1 - result = self.max._cmp(other.max) - if result != 0: - return result + if self.max > other.max: + return 1 + elif self.max < other.max: + return -1 if self.include_max != other.include_max: return 1 if self.include_max else -1 return 0 - def __str__(self): # type: () -> str + def __str__(self) -> str: + with suppress(ValueError): + return self._single_wildcard_range_string + text = "" if self.min is not None: @@ -446,17 +451,15 @@ def __str__(self): # type: () -> str if self.min is not None: text += "," - text += "{}{}".format("<=" if self.include_max else "<", self.max.text) + op = "<=" if self.include_max else "<" + text += f"{op}{self.max.text}" if self.min is None and self.max is None: return "*" return text - def __repr__(self): # type: () -> str - return "".format(str(self)) - - def __hash__(self): # type: () -> int + def __hash__(self) -> int: return ( hash(self.min) ^ hash(self.max) diff --git a/conda_lock/_vendor/poetry/core/constraints/version/version_range_constraint.py b/conda_lock/_vendor/poetry/core/constraints/version/version_range_constraint.py new file mode 100644 index 00000000..c8c11915 --- /dev/null +++ b/conda_lock/_vendor/poetry/core/constraints/version/version_range_constraint.py @@ -0,0 +1,125 @@ +from __future__ import annotations + +from abc import abstractmethod +from functools import cached_property +from typing import TYPE_CHECKING + +from conda_lock._vendor.poetry.core.constraints.version.version_constraint import VersionConstraint + + +if TYPE_CHECKING: + from conda_lock._vendor.poetry.core.constraints.version.version import Version + + +class VersionRangeConstraint(VersionConstraint): + @property + @abstractmethod + def min(self) -> Version | None: + raise NotImplementedError + + @property + @abstractmethod + def max(self) -> Version | None: + raise NotImplementedError + + @property + @abstractmethod + def include_min(self) -> bool: + raise NotImplementedError + + @property + @abstractmethod + def include_max(self) -> bool: + raise NotImplementedError + + @property + def allowed_min(self) -> Version | None: + # That is a bit inaccurate because + # 1) The exclusive ordered comparison >V MUST NOT allow a post-release + # of the given version unless V itself is a post release. + # 2) The exclusive ordered comparison >V MUST NOT match + # a local version of the specified version. + # https://peps.python.org/pep-0440/#exclusive-ordered-comparison + # However, there is no specific min greater than the greatest post release + # or greatest local version identifier. These cases have to be handled by + # the callers of allowed_min. + return self.min + + @cached_property + def allowed_max(self) -> Version | None: + if self.max is None: + return None + + if self.include_max or self.max.is_unstable(): + return self.max + + if self.min == self.max and (self.include_min or self.include_max): + # this is an equality range + return self.max + + # The exclusive ordered comparison bool: + _this, _other = self.allowed_min, other.allowed_min + + if _this is None: + return _other is not None + + if _other is None: + return False + + if _this < _other: + return True + + if _this > _other: + return False + + return self.include_min and not other.include_min + + def allows_higher(self, other: VersionRangeConstraint) -> bool: + _this, _other = self.allowed_max, other.allowed_max + + if _this is None: + return _other is not None + + if _other is None: + return False + + if _this < _other: + return False + + if _this > _other: + return True + + return self.include_max and not other.include_max + + def is_strictly_lower(self, other: VersionRangeConstraint) -> bool: + _this, _other = self.allowed_max, other.allowed_min + + if _this is None or _other is None: + return False + + if _this < _other: + return True + + if _this > _other: + return False + + return not (self.include_max and other.include_min) + + def is_strictly_higher(self, other: VersionRangeConstraint) -> bool: + return other.is_strictly_lower(self) + + def is_adjacent_to(self, other: VersionRangeConstraint) -> bool: + if self.max != other.min: + return False + + return ( + self.include_max + and not other.include_min + or not self.include_max + and other.include_min + ) diff --git a/conda_lock/_vendor/poetry/core/constraints/version/version_union.py b/conda_lock/_vendor/poetry/core/constraints/version/version_union.py new file mode 100644 index 00000000..8b9aa517 --- /dev/null +++ b/conda_lock/_vendor/poetry/core/constraints/version/version_union.py @@ -0,0 +1,328 @@ +from __future__ import annotations + +import operator as op + +from functools import cached_property +from functools import reduce +from typing import TYPE_CHECKING + +from conda_lock._vendor.poetry.core.constraints.version.empty_constraint import EmptyConstraint +from conda_lock._vendor.poetry.core.constraints.version.version_constraint import VersionConstraint +from conda_lock._vendor.poetry.core.constraints.version.version_constraint import _is_wildcard_candidate +from conda_lock._vendor.poetry.core.constraints.version.version_constraint import ( + _single_wildcard_range_string, +) +from conda_lock._vendor.poetry.core.constraints.version.version_range_constraint import ( + VersionRangeConstraint, +) + + +if TYPE_CHECKING: + from conda_lock._vendor.poetry.core.constraints.version.version import Version + + +class VersionUnion(VersionConstraint): + """ + A version constraint representing a union of multiple disjoint version + ranges. + + An instance of this will only be created if the version can't be represented + as a non-compound value. + """ + + def __init__(self, *ranges: VersionRangeConstraint) -> None: + self._ranges = list(ranges) + + @property + def ranges(self) -> list[VersionRangeConstraint]: + return self._ranges + + @classmethod + def of(cls, *ranges: VersionConstraint) -> VersionConstraint: + from conda_lock._vendor.poetry.core.constraints.version.version_range import VersionRange + + flattened: list[VersionRangeConstraint] = [] + for constraint in ranges: + if constraint.is_empty(): + continue + + if isinstance(constraint, VersionUnion): + flattened += constraint.ranges + continue + + assert isinstance(constraint, VersionRangeConstraint) + flattened.append(constraint) + + if not flattened: + return EmptyConstraint() + + if any(constraint.is_any() for constraint in flattened): + return VersionRange() + + # Only allow Versions and VersionRanges here so we can more easily reason + # about everything in flattened. _EmptyVersions and VersionUnions are + # filtered out above. + for constraint in flattened: + if not isinstance(constraint, VersionRangeConstraint): + raise ValueError(f"Unknown VersionConstraint type {constraint}.") + + flattened.sort() + + merged: list[VersionRangeConstraint] = [] + for constraint in flattened: + # Merge this constraint with the previous one, but only if they touch. + if not merged or ( + not merged[-1].allows_any(constraint) + and not merged[-1].is_adjacent_to(constraint) + ): + merged.append(constraint) + else: + new_constraint = merged[-1].union(constraint) + assert isinstance(new_constraint, VersionRangeConstraint) + merged[-1] = new_constraint + + if len(merged) == 1: + return merged[0] + + return VersionUnion(*merged) + + def is_empty(self) -> bool: + return False + + def is_any(self) -> bool: + return False + + def is_simple(self) -> bool: + return self.excludes_single_version + + def allows(self, version: Version) -> bool: + if self.excludes_single_version: + # when excluded version is local, special handling is required + # to ensure that a constraint (!=2.0+deadbeef) will allow the + # provided version (2.0) + + excluded = self._excluded_single_version + + if excluded.is_local(): + return excluded != version + + return any(constraint.allows(version) for constraint in self._ranges) + + def allows_all(self, other: VersionConstraint) -> bool: + our_ranges = iter(self._ranges) + their_ranges = iter(other.flatten()) + + our_current_range = next(our_ranges, None) + their_current_range = next(their_ranges, None) + + while our_current_range and their_current_range: + if our_current_range.allows_all(their_current_range): + their_current_range = next(their_ranges, None) + else: + our_current_range = next(our_ranges, None) + + return their_current_range is None + + def allows_any(self, other: VersionConstraint) -> bool: + our_ranges = iter(self._ranges) + their_ranges = iter(other.flatten()) + + our_current_range = next(our_ranges, None) + their_current_range = next(their_ranges, None) + + while our_current_range and their_current_range: + if our_current_range.allows_any(their_current_range): + return True + + if their_current_range.allows_higher(our_current_range): + our_current_range = next(our_ranges, None) + else: + their_current_range = next(their_ranges, None) + + return False + + def intersect(self, other: VersionConstraint) -> VersionConstraint: + our_ranges = iter(self._ranges) + their_ranges = iter(other.flatten()) + new_ranges = [] + + our_current_range = next(our_ranges, None) + their_current_range = next(their_ranges, None) + + while our_current_range and their_current_range: + intersection = our_current_range.intersect(their_current_range) + + if not intersection.is_empty(): + new_ranges.append(intersection) + + if their_current_range.allows_higher(our_current_range): + our_current_range = next(our_ranges, None) + else: + their_current_range = next(their_ranges, None) + + return VersionUnion.of(*new_ranges) + + def union(self, other: VersionConstraint) -> VersionConstraint: + return VersionUnion.of(self, other) + + def difference(self, other: VersionConstraint) -> VersionConstraint: + our_ranges = iter(self._ranges) + their_ranges = iter(other.flatten()) + new_ranges: list[VersionConstraint] = [] + + state = { + "current": next(our_ranges, None), + "their_range": next(their_ranges, None), + } + + def their_next_range() -> bool: + state["their_range"] = next(their_ranges, None) + if state["their_range"]: + return True + + assert state["current"] is not None + new_ranges.append(state["current"]) + our_current = next(our_ranges, None) + while our_current: + new_ranges.append(our_current) + our_current = next(our_ranges, None) + + return False + + def our_next_range(include_current: bool = True) -> bool: + if include_current: + assert state["current"] is not None + new_ranges.append(state["current"]) + + our_current = next(our_ranges, None) + if not our_current: + return False + + state["current"] = our_current + + return True + + while True: + if state["their_range"] is None: + break + + assert state["current"] is not None + if state["their_range"].is_strictly_lower(state["current"]): + if not their_next_range(): + break + + continue + + if state["their_range"].is_strictly_higher(state["current"]): + if not our_next_range(): + break + + continue + + difference = state["current"].difference(state["their_range"]) + if isinstance(difference, VersionUnion): + assert len(difference.ranges) == 2 + new_ranges.append(difference.ranges[0]) + state["current"] = difference.ranges[-1] + + if not their_next_range(): + break + elif difference.is_empty(): + if not our_next_range(False): + break + else: + assert isinstance(difference, VersionRangeConstraint) + state["current"] = difference + + if state["current"].allows_higher(state["their_range"]): + if not their_next_range(): + break + elif not our_next_range(): + break + + if not new_ranges: + return EmptyConstraint() + + if len(new_ranges) == 1: + return new_ranges[0] + + return VersionUnion.of(*new_ranges) + + def flatten(self) -> list[VersionRangeConstraint]: + return self.ranges + + @cached_property + def _exclude_single_wildcard_range_string(self) -> str: + """ + Helper method to convert this instance into a wild card range + string. + """ + if not self.excludes_single_wildcard_range: + raise ValueError("Not a valid wildcard range") + + idx_order = (0, 1) if self._ranges[0].max else (1, 0) + one = self._ranges[idx_order[0]] + two = self._ranges[idx_order[1]] + + assert one.max is not None + assert two.min is not None + return f"!={_single_wildcard_range_string(one.max, two.min)}" + + @cached_property + def excludes_single_wildcard_range(self) -> bool: + if len(self._ranges) != 2: + return False + + idx_order = (0, 1) if self._ranges[0].max else (1, 0) + one = self._ranges[idx_order[0]] + two = self._ranges[idx_order[1]] + + if ( + one.max is None + or one.include_max + or one.min is not None + or two.min is None + or not two.include_min + or two.max is not None + ): + return False + + return _is_wildcard_candidate(two.min, one.max, inverted=True) + + @cached_property + def excludes_single_version(self) -> bool: + from conda_lock._vendor.poetry.core.constraints.version.version import Version + + return isinstance(self._inverted, Version) + + @cached_property + def _excluded_single_version(self) -> Version: + from conda_lock._vendor.poetry.core.constraints.version.version import Version + + excluded = self._inverted + assert isinstance(excluded, Version) + return excluded + + @cached_property + def _inverted(self) -> VersionConstraint: + from conda_lock._vendor.poetry.core.constraints.version.version_range import VersionRange + + return VersionRange().difference(self) + + def __eq__(self, other: object) -> bool: + if not isinstance(other, VersionUnion): + return False + + return self._ranges == other.ranges + + def __hash__(self) -> int: + return reduce(op.xor, map(hash, self._ranges)) + + def __str__(self) -> str: + if self.excludes_single_version: + return f"!={self._excluded_single_version}" + + try: + return self._exclude_single_wildcard_range_string + except ValueError: + return " || ".join([str(r) for r in self._ranges]) diff --git a/conda_lock/_vendor/poetry/core/exceptions/__init__.py b/conda_lock/_vendor/poetry/core/exceptions/__init__.py index d5ff9062..f2e8d9f8 100644 --- a/conda_lock/_vendor/poetry/core/exceptions/__init__.py +++ b/conda_lock/_vendor/poetry/core/exceptions/__init__.py @@ -1,4 +1,6 @@ +from __future__ import annotations + from conda_lock._vendor.poetry.core.exceptions.base import PoetryCoreException -__all__ = [clazz.__name__ for clazz in {PoetryCoreException}] +__all__ = ("PoetryCoreException",) diff --git a/conda_lock/_vendor/poetry/core/exceptions/base.py b/conda_lock/_vendor/poetry/core/exceptions/base.py index 41b1c3e8..43727628 100644 --- a/conda_lock/_vendor/poetry/core/exceptions/base.py +++ b/conda_lock/_vendor/poetry/core/exceptions/base.py @@ -1,2 +1,5 @@ +from __future__ import annotations + + class PoetryCoreException(Exception): pass diff --git a/conda_lock/_vendor/poetry/core/factory.py b/conda_lock/_vendor/poetry/core/factory.py index be157f00..7bce6439 100644 --- a/conda_lock/_vendor/poetry/core/factory.py +++ b/conda_lock/_vendor/poetry/core/factory.py @@ -1,35 +1,52 @@ -from __future__ import absolute_import -from __future__ import unicode_literals +from __future__ import annotations import logging +from pathlib import Path +from typing import TYPE_CHECKING from typing import Any from typing import Dict from typing import List -from typing import Optional from typing import Union from warnings import warn -from .json import validate_object -from .packages.dependency import Dependency -from .packages.project_package import ProjectPackage -from .poetry import Poetry -from .pyproject import PyProjectTOML -from .spdx import license_by_id -from .utils._compat import Path +from packaging.utils import canonicalize_name + +from conda_lock._vendor.poetry.core.utils.helpers import combine_unicode +from conda_lock._vendor.poetry.core.utils.helpers import readme_content_type + + +if TYPE_CHECKING: + from collections.abc import Mapping + + from packaging.utils import NormalizedName + + from conda_lock._vendor.poetry.core.packages.dependency import Dependency + from conda_lock._vendor.poetry.core.packages.dependency_group import DependencyGroup + from conda_lock._vendor.poetry.core.packages.project_package import ProjectPackage + from conda_lock._vendor.poetry.core.poetry import Poetry + from conda_lock._vendor.poetry.core.spdx.license import License + + DependencyConstraint = Union[str, Dict[str, Any]] + DependencyConfig = Mapping[ + str, Union[List[DependencyConstraint], DependencyConstraint] + ] logger = logging.getLogger(__name__) -class Factory(object): +class Factory: """ Factory class to create various elements needed by Poetry. """ def create_poetry( - self, cwd=None, with_dev=True - ): # type: (Optional[Path], bool) -> Poetry + self, cwd: Path | None = None, with_groups: bool = True + ) -> Poetry: + from conda_lock._vendor.poetry.core.poetry import Poetry + from conda_lock._vendor.poetry.core.pyproject.toml import PyProjectTOML + poetry_file = self.locate(cwd) local_config = PyProjectTOML(path=poetry_file).poetry_config @@ -38,85 +55,137 @@ def create_poetry( if check_result["errors"]: message = "" for error in check_result["errors"]: - message += " - {}\n".format(error) + message += f" - {error}\n" raise RuntimeError("The Poetry configuration is invalid:\n" + message) # Load package - name = local_config["name"] - version = local_config["version"] - package = ProjectPackage(name, version, version) - package.root_dir = poetry_file.parent - - for author in local_config["authors"]: - package.authors.append(author) + # If name or version were missing in package mode, we would have already + # raised an error, so we can safely assume they might only be missing + # in non-package mode and use some dummy values in this case. + name = local_config.get("name", "non-package-mode") + assert isinstance(name, str) + version = local_config.get("version", "0") + assert isinstance(version, str) + package = self.get_package(name, version) + package = self.configure_package( + package, local_config, poetry_file.parent, with_groups=with_groups + ) - for maintainer in local_config.get("maintainers", []): - package.maintainers.append(maintainer) + return Poetry(poetry_file, local_config, package) - package.description = local_config.get("description", "") - package.homepage = local_config.get("homepage") - package.repository_url = local_config.get("repository") - package.documentation_url = local_config.get("documentation") - try: - license_ = license_by_id(local_config.get("license", "")) - except ValueError: - license_ = None + @classmethod + def get_package(cls, name: str, version: str) -> ProjectPackage: + from conda_lock._vendor.poetry.core.packages.project_package import ProjectPackage - package.license = license_ - package.keywords = local_config.get("keywords", []) - package.classifiers = local_config.get("classifiers", []) + return ProjectPackage(name, version) - if "readme" in local_config: - package.readme = Path(poetry_file.parent) / local_config["readme"] + @classmethod + def _add_package_group_dependencies( + cls, + package: ProjectPackage, + group: str | DependencyGroup, + dependencies: DependencyConfig, + ) -> None: + from conda_lock._vendor.poetry.core.packages.dependency_group import MAIN_GROUP + + if isinstance(group, str): + if package.has_dependency_group(group): + group = package.dependency_group(group) + else: + from conda_lock._vendor.poetry.core.packages.dependency_group import DependencyGroup - if "platform" in local_config: - package.platform = local_config["platform"] + group = DependencyGroup(group) - if "dependencies" in local_config: - for name, constraint in local_config["dependencies"].items(): + for name, constraints in dependencies.items(): + _constraints = ( + constraints if isinstance(constraints, list) else [constraints] + ) + for _constraint in _constraints: if name.lower() == "python": - package.python_versions = constraint + if group.name == MAIN_GROUP and isinstance(_constraint, str): + package.python_versions = _constraint continue - if isinstance(constraint, list): - for _constraint in constraint: - package.add_dependency( - self.create_dependency( - name, _constraint, root_dir=package.root_dir - ) - ) + group.add_dependency( + cls.create_dependency( + name, + _constraint, + groups=[group.name], + root_dir=package.root_dir, + ) + ) - continue + package.add_dependency_group(group) - package.add_dependency( - self.create_dependency(name, constraint, root_dir=package.root_dir) - ) + @classmethod + def configure_package( + cls, + package: ProjectPackage, + config: dict[str, Any], + root: Path, + with_groups: bool = True, + ) -> ProjectPackage: + from conda_lock._vendor.poetry.core.packages.dependency import Dependency + from conda_lock._vendor.poetry.core.packages.dependency_group import MAIN_GROUP + from conda_lock._vendor.poetry.core.packages.dependency_group import DependencyGroup + from conda_lock._vendor.poetry.core.spdx.helpers import license_by_id + + package.root_dir = root + + package.authors = [ + combine_unicode(author) for author in config.get("authors", []) + ] + + package.maintainers = [ + combine_unicode(maintainer) for maintainer in config.get("maintainers", []) + ] + + package.description = config.get("description", "") + package.homepage = config.get("homepage") + package.repository_url = config.get("repository") + package.documentation_url = config.get("documentation") + try: + license_: License | None = license_by_id(config.get("license", "")) + except ValueError: + license_ = None - if with_dev and "dev-dependencies" in local_config: - for name, constraint in local_config["dev-dependencies"].items(): - if isinstance(constraint, list): - for _constraint in constraint: - package.add_dependency( - self.create_dependency( - name, - _constraint, - category="dev", - root_dir=package.root_dir, - ) - ) + package.license = license_ + package.keywords = config.get("keywords", []) + package.classifiers = config.get("classifiers", []) - continue + if "readme" in config: + if isinstance(config["readme"], str): + package.readmes = (root / config["readme"],) + else: + package.readmes = tuple(root / readme for readme in config["readme"]) - package.add_dependency( - self.create_dependency( - name, constraint, category="dev", root_dir=package.root_dir - ) + if "dependencies" in config: + cls._add_package_group_dependencies( + package=package, group=MAIN_GROUP, dependencies=config["dependencies"] + ) + + if with_groups and "group" in config: + for group_name, group_config in config["group"].items(): + group = DependencyGroup( + group_name, optional=group_config.get("optional", False) + ) + cls._add_package_group_dependencies( + package=package, + group=group, + dependencies=group_config["dependencies"], ) - extras = local_config.get("extras", {}) + if with_groups and "dev-dependencies" in config: + cls._add_package_group_dependencies( + package=package, group="dev", dependencies=config["dev-dependencies"] + ) + + package_extras: dict[NormalizedName, list[Dependency]] = {} + extras = config.get("extras", {}) for extra_name, requirements in extras.items(): - package.extras[extra_name] = [] + extra_name = canonicalize_name(extra_name) + package_extras[extra_name] = [] # Checking for dependency for req in requirements: @@ -124,21 +193,21 @@ def create_poetry( for dep in package.requires: if dep.name == req.name: - dep.in_extras.append(extra_name) - package.extras[extra_name].append(dep) + dep._in_extras = [*dep._in_extras, extra_name] + package_extras[extra_name].append(dep) - break + package.extras = package_extras - if "build" in local_config: - build = local_config["build"] + if "build" in config: + build = config["build"] if not isinstance(build, dict): build = {"script": build} package.build_config = build or {} - if "include" in local_config: + if "include" in config: package.include = [] - for include in local_config["include"]: + for include in config["include"]: if not isinstance(include, dict): include = {"path": include} @@ -149,34 +218,44 @@ def create_poetry( package.include.append(include) - if "exclude" in local_config: - package.exclude = local_config["exclude"] + if "exclude" in config: + package.exclude = config["exclude"] - if "packages" in local_config: - package.packages = local_config["packages"] + if "packages" in config: + package.packages = config["packages"] # Custom urls - if "urls" in local_config: - package.custom_urls = local_config["urls"] + if "urls" in config: + package.custom_urls = config["urls"] - return Poetry(poetry_file, local_config, package) + return package @classmethod def create_dependency( cls, - name, # type: str - constraint, # type: Union[str, Dict[str, Any]] - category="main", # type: str - root_dir=None, # type: Optional[Path] - ): # type: (...) -> Dependency - from .packages.constraints import parse_constraint as parse_generic_constraint - from .packages.directory_dependency import DirectoryDependency - from .packages.file_dependency import FileDependency - from .packages.url_dependency import URLDependency - from .packages.utils.utils import create_nested_marker - from .packages.vcs_dependency import VCSDependency - from .version.markers import AnyMarker - from .version.markers import parse_marker + name: str, + constraint: DependencyConstraint, + groups: list[str] | None = None, + root_dir: Path | None = None, + ) -> Dependency: + from conda_lock._vendor.poetry.core.constraints.generic import ( + parse_constraint as parse_generic_constraint, + ) + from conda_lock._vendor.poetry.core.constraints.version import ( + parse_constraint as parse_version_constraint, + ) + from conda_lock._vendor.poetry.core.packages.dependency import Dependency + from conda_lock._vendor.poetry.core.packages.dependency_group import MAIN_GROUP + from conda_lock._vendor.poetry.core.packages.directory_dependency import DirectoryDependency + from conda_lock._vendor.poetry.core.packages.file_dependency import FileDependency + from conda_lock._vendor.poetry.core.packages.url_dependency import URLDependency + from conda_lock._vendor.poetry.core.packages.utils.utils import create_nested_marker + from conda_lock._vendor.poetry.core.packages.vcs_dependency import VCSDependency + from conda_lock._vendor.poetry.core.version.markers import AnyMarker + from conda_lock._vendor.poetry.core.version.markers import parse_marker + + if groups is None: + groups = [MAIN_GROUP] if constraint is None: constraint = "*" @@ -188,17 +267,18 @@ def create_dependency( markers = constraint.get("markers") if "allows-prereleases" in constraint: message = ( - 'The "{}" dependency specifies ' + f'The "{name}" dependency specifies ' 'the "allows-prereleases" property, which is deprecated. ' - 'Use "allow-prereleases" instead.'.format(name) + 'Use "allow-prereleases" instead.' ) - warn(message, DeprecationWarning) + warn(message, DeprecationWarning, stacklevel=2) logger.warning(message) allows_prereleases = constraint.get( "allow-prereleases", constraint.get("allows-prereleases", False) ) + dependency: Dependency if "git" in constraint: # VCS dependency dependency = VCSDependency( @@ -208,7 +288,8 @@ def create_dependency( branch=constraint.get("branch", None), tag=constraint.get("tag", None), rev=constraint.get("rev", None), - category=category, + directory=constraint.get("subdirectory", None), + groups=groups, optional=optional, develop=constraint.get("develop", False), extras=constraint.get("extras", []), @@ -219,7 +300,8 @@ def create_dependency( dependency = FileDependency( name, file_path, - category=category, + directory=constraint.get("subdirectory", None), + groups=groups, base=root_dir, extras=constraint.get("extras", []), ) @@ -235,16 +317,20 @@ def create_dependency( dependency = FileDependency( name, path, - category=category, + directory=constraint.get("subdirectory", None), + groups=groups, optional=optional, base=root_dir, extras=constraint.get("extras", []), ) else: + subdirectory = constraint.get("subdirectory", None) + if subdirectory: + path = path / subdirectory dependency = DirectoryDependency( name, path, - category=category, + groups=groups, optional=optional, base=root_dir, develop=constraint.get("develop", False), @@ -254,7 +340,8 @@ def create_dependency( dependency = URLDependency( name, constraint["url"], - category=category, + directory=constraint.get("subdirectory", None), + groups=groups, optional=optional, extras=constraint.get("extras", []), ) @@ -265,54 +352,69 @@ def create_dependency( name, version, optional=optional, - category=category, + groups=groups, allows_prereleases=allows_prereleases, extras=constraint.get("extras", []), ) - if not markers: - marker = AnyMarker() - if python_versions: - dependency.python_versions = python_versions - marker = marker.intersect( - parse_marker( - create_nested_marker( - "python_version", dependency.python_constraint - ) + marker = parse_marker(markers) if markers else AnyMarker() + + if python_versions: + marker = marker.intersect( + parse_marker( + create_nested_marker( + "python_version", parse_version_constraint(python_versions) ) ) + ) - if platform: - marker = marker.intersect( - parse_marker( - create_nested_marker( - "sys_platform", parse_generic_constraint(platform) - ) + if platform: + marker = marker.intersect( + parse_marker( + create_nested_marker( + "sys_platform", parse_generic_constraint(platform) ) ) - else: - marker = parse_marker(markers) + ) if not marker.is_any(): dependency.marker = marker dependency.source_name = constraint.get("source") else: - dependency = Dependency(name, constraint, category=category) + dependency = Dependency(name, constraint, groups=groups) return dependency @classmethod def validate( - cls, config, strict=False - ): # type: (dict, bool) -> Dict[str, List[str]] + cls, config: dict[str, Any], strict: bool = False + ) -> dict[str, list[str]]: """ Checks the validity of a configuration """ - result = {"errors": [], "warnings": []} + from conda_lock._vendor.poetry.core.json import validate_object + + result: dict[str, list[str]] = {"errors": [], "warnings": []} # Schema validation errors validation_errors = validate_object(config, "poetry-schema") + # json validation may only say "data cannot be validated by any definition", + # which is quite vague, so we try to give a more precise error message + generic_error = "data cannot be validated by any definition" + if generic_error in validation_errors: + package_mode = config.get("package-mode", True) + if not isinstance(package_mode, bool): + validation_errors[validation_errors.index(generic_error)] = ( + f"Invalid value for package-mode: {package_mode}" + ) + elif package_mode: + required = {"name", "version", "description", "authors"} + if missing := required.difference(config): + validation_errors[validation_errors.index(generic_error)] = ( + f"The fields {sorted(missing)} are required in package mode." + ) + result["errors"] += validation_errors if strict: @@ -331,33 +433,69 @@ def validate( if "allows-prereleases" in constraint: result["warnings"].append( - 'The "{}" dependency specifies ' + f'The "{name}" dependency specifies ' 'the "allows-prereleases" property, which is deprecated. ' - 'Use "allow-prereleases" instead.'.format(name) + 'Use "allow-prereleases" instead.' ) + if "extras" in config: + for extra_name, requirements in config["extras"].items(): + extra_name = canonicalize_name(extra_name) + + for req in requirements: + req_name = canonicalize_name(req) + for dependency in config.get("dependencies", {}): + dep_name = canonicalize_name(dependency) + if req_name == dep_name: + break + else: + result["errors"].append( + f'Cannot find dependency "{req}" for extra ' + f'"{extra_name}" in main dependencies.' + ) + # Checking for scripts with extras if "scripts" in config: scripts = config["scripts"] + config_extras = config.get("extras", {}) + for name, script in scripts.items(): if not isinstance(script, dict): continue - extras = script["extras"] + extras = script.get("extras", []) + if extras: + result["warnings"].append( + f'The script "{name}" depends on an extra. Scripts' + " depending on extras are deprecated and support for them" + " will be removed in a future version of" + " poetry/poetry-core. See" + " https://packaging.python.org/en/latest/specifications/entry-points/#data-model" + " for details." + ) for extra in extras: - if extra not in config["extras"]: + if extra not in config_extras: result["errors"].append( - 'Script "{}" requires extra "{}" which is not defined.'.format( - name, extra - ) + f'The script "{name}" requires extra "{extra}"' + " which is not defined." ) + # Checking types of all readme files (must match) + if "readme" in config and not isinstance(config["readme"], str): + readme_types = {readme_content_type(r) for r in config["readme"]} + if len(readme_types) > 1: + result["errors"].append( + "Declared README files must be of same type: found" + f" {', '.join(sorted(readme_types))}" + ) + return result @classmethod - def locate(cls, cwd): # type: (Path) -> Path - candidates = [Path(cwd)] - candidates.extend(Path(cwd).parents) + def locate(cls, cwd: Path | None = None) -> Path: + cwd = Path(cwd or Path.cwd()) + candidates = [cwd] + candidates.extend(cwd.parents) for path in candidates: poetry_file = path / "pyproject.toml" @@ -367,7 +505,5 @@ def locate(cls, cwd): # type: (Path) -> Path else: raise RuntimeError( - "Poetry could not find a pyproject.toml file in {} or its parents".format( - cwd - ) + f"Poetry could not find a pyproject.toml file in {cwd} or its parents" ) diff --git a/conda_lock/_vendor/poetry/core/json/__init__.py b/conda_lock/_vendor/poetry/core/json/__init__.py index 83ecab77..035dd906 100644 --- a/conda_lock/_vendor/poetry/core/json/__init__.py +++ b/conda_lock/_vendor/poetry/core/json/__init__.py @@ -1,41 +1,54 @@ +from __future__ import annotations + import json -import os +import sys -from io import open -from typing import List +from pathlib import Path +from typing import TYPE_CHECKING +from typing import Any -from jsonschema import Draft7Validator +import fastjsonschema +from fastjsonschema.exceptions import JsonSchemaException -SCHEMA_DIR = os.path.join(os.path.dirname(__file__), "schemas") +SCHEMA_DIR = Path(__file__).parent / "schemas" -class ValidationError(ValueError): - pass +if sys.version_info < (3, 9): + def _get_schema_file(schema_name: str) -> Path: + return SCHEMA_DIR / f"{schema_name}.json" -def validate_object(obj, schema_name): # type: (dict, str) -> List[str] - schema = os.path.join(SCHEMA_DIR, "{}.json".format(schema_name)) +else: + from importlib.resources import files - if not os.path.exists(schema): - raise ValueError("Schema {} does not exist.".format(schema_name)) + if TYPE_CHECKING: + from importlib.abc import Traversable - with open(schema, encoding="utf-8") as f: - schema = json.loads(f.read()) + def _get_schema_file(schema_name: str) -> Traversable: + return files(__package__) / "schemas" / f"{schema_name}.json" - validator = Draft7Validator(schema) - validation_errors = sorted(validator.iter_errors(obj), key=lambda e: e.path) - errors = [] +class ValidationError(ValueError): + pass - for error in validation_errors: - message = error.message - if error.path: - message = "[{}] {}".format( - ".".join(str(x) for x in error.absolute_path), message - ) - errors.append(message) +def validate_object(obj: dict[str, Any], schema_name: str) -> list[str]: + schema_file = _get_schema_file(schema_name) + + if not schema_file.is_file(): + raise ValueError(f"Schema {schema_name} does not exist.") + + with schema_file.open(encoding="utf-8") as f: + schema = json.load(f) + + validate = fastjsonschema.compile(schema) + + errors = [] + try: + validate(obj) + except JsonSchemaException as e: + errors = [e.message] return errors diff --git a/conda_lock/_vendor/poetry/core/json/schemas/poetry-schema.json b/conda_lock/_vendor/poetry/core/json/schemas/poetry-schema.json index 81664910..b534e3b6 100644 --- a/conda_lock/_vendor/poetry/core/json/schemas/poetry-schema.json +++ b/conda_lock/_vendor/poetry/core/json/schemas/poetry-schema.json @@ -1,591 +1,688 @@ { - "$schema": "http://json-schema.org/draft-04/schema#", - "name": "Package", - "type": "object", - "additionalProperties": false, - "required": [ + "$schema": "http://json-schema.org/draft-04/schema#", + "name": "Package", + "type": "object", + "additionalProperties": true, + "anyOf": [ + { + "required": [ + "package-mode" + ], + "properties": { + "package-mode": { + "enum": [ + false + ] + } + } + }, + { + "required": [ "name", "version", - "description" - ], - "properties": { - "name": { - "type": "string", - "description": "Package name." - }, - "version": { - "type": "string", - "description": "Package version." - }, - "description": { - "type": "string", - "description": "Short package description." - }, - "keywords": { - "type": "array", - "items": { - "type": "string", - "description": "A tag/keyword that this package relates to." - } - }, - "homepage": { - "type": "string", - "description": "Homepage URL for the project.", - "format": "uri" - }, - "repository": { - "type": "string", - "description": "Repository URL for the project.", - "format": "uri" - }, - "documentation": { - "type": "string", - "description": "Documentation URL for the project.", - "format": "uri" - }, - "license": { + "description", + "authors" + ] + } + ], + "properties": { + "package-mode": { + "type": "boolean", + "default": true, + "description": "Whether Poetry is operated in package mode or non-package mode." + }, + "name": { + "type": "string", + "description": "Package name." + }, + "version": { + "type": "string", + "description": "Package version." + }, + "description": { + "type": "string", + "description": "Short package description.", + "pattern": "\\A[^\\n]*\\Z" + }, + "keywords": { + "type": "array", + "items": { + "type": "string", + "description": "A tag/keyword that this package relates to." + } + }, + "homepage": { + "type": "string", + "description": "Homepage URL for the project.", + "format": "uri" + }, + "repository": { + "type": "string", + "description": "Repository URL for the project.", + "format": "uri" + }, + "documentation": { + "type": "string", + "description": "Documentation URL for the project.", + "format": "uri" + }, + "license": { + "type": "string", + "description": "License name." + }, + "authors": { + "$ref": "#/definitions/authors" + }, + "maintainers": { + "$ref": "#/definitions/maintainers" + }, + "readme": { + "anyOf": [ + { + "type": "string", + "description": "The path to the README file." + }, + { + "type": "array", + "description": "A list of paths to the readme files.", + "items": { + "type": "string" + } + } + ] + }, + "classifiers": { + "type": "array", + "description": "A list of trove classifiers." + }, + "packages": { + "type": "array", + "description": "A list of packages to include in the final distribution.", + "items": { + "type": "object", + "description": "Information about where the package resides.", + "additionalProperties": false, + "required": [ + "include" + ], + "properties": { + "include": { + "$ref": "#/definitions/include-path" + }, + "from": { "type": "string", - "description": "License name." - }, - "authors": { - "$ref": "#/definitions/authors" - }, - "maintainers": { - "$ref": "#/definitions/maintainers" - }, - "readme": { + "description": "Where the source directory of the package resides." + }, + "format": { + "$ref": "#/definitions/package-formats" + }, + "to": { "type": "string", - "description": "The path to the README file" - }, - "classifiers": { - "type": "array", - "description": "A list of trove classifers." - }, - "packages": { - "type": "array", - "description": "A list of packages to include in the final distribution.", - "items": { - "type": "object", - "description": "Information about where the package resides.", - "additionalProperties": false, - "required": [ - "include" - ], - "properties": { - "include": { - "$ref": "#/definitions/include-path" - }, - "from": { - "type": "string", - "description": "Where the source directory of the package resides." - }, - "format": { - "$ref": "#/definitions/package-formats" - } - } - } - }, - "include": { - "type": "array", - "description": "A list of files and folders to include.", - "items": { - "anyOf": [ - { - "$ref": "#/definitions/include-path" - }, - { - "type": "object", - "additionalProperties": false, - "required": [ - "path" - ], - "properties": { - "path": { - "$ref": "#/definitions/include-path" - }, - "format": { - "$ref": "#/definitions/package-formats" - } - } - } - ] - } - }, - "exclude": { - "type": "array", - "description": "A list of files and folders to exclude." - }, - "dependencies": { + "description": "Where the package should be installed in the final distribution." + } + } + } + }, + "include": { + "type": "array", + "description": "A list of files and folders to include.", + "items": { + "anyOf": [ + { + "$ref": "#/definitions/include-path" + }, + { "type": "object", - "description": "This is a hash of package name (keys) and version constraints (values) that are required to run this package.", + "additionalProperties": false, "required": [ - "python" + "path" ], "properties": { - "python": { - "type": "string", - "description": "The Python versions the package is compatible with." - } - }, - "$ref": "#/definitions/dependencies", - "additionalProperties": false - }, - "dev-dependencies": { - "type": "object", - "description": "This is a hash of package name (keys) and version constraints (values) that this package requires for developing it (testing tools and such).", - "$ref": "#/definitions/dependencies", - "additionalProperties": false - }, - "extras": { - "type": "object", - "patternProperties": { - "^[a-zA-Z-_.0-9]+$": { - "type": "array", - "items": { - "type": "string" - } - } + "path": { + "$ref": "#/definitions/include-path" + }, + "format": { + "$ref": "#/definitions/package-formats" + } } - }, - "build": { - "$ref": "#/definitions/build-section" - }, - "source": { - "type": "array", - "description": "A set of additional repositories where packages can be found.", - "additionalProperties": { - "$ref": "#/definitions/repository" + } + ] + } + }, + "exclude": { + "type": "array", + "description": "A list of files and folders to exclude." + }, + "dependencies": { + "type": "object", + "description": "This is a hash of package name (keys) and version constraints (values) that are required to run this package.", + "required": [ + "python" + ], + "properties": { + "python": { + "type": "string", + "description": "The Python versions the package is compatible with." + } + }, + "$ref": "#/definitions/dependencies", + "additionalProperties": false + }, + "dev-dependencies": { + "type": "object", + "description": "This is a hash of package name (keys) and version constraints (values) that this package requires for developing it (testing tools and such).", + "$ref": "#/definitions/dependencies", + "additionalProperties": false + }, + "extras": { + "type": "object", + "patternProperties": { + "^[a-zA-Z-_.0-9]+$": { + "type": "array", + "items": { + "type": "string", + "pattern": "^[a-zA-Z-_.0-9]+$" + } + } + } + }, + "group": { + "type": "object", + "description": "This represents groups of dependencies", + "patternProperties": { + "^[a-zA-Z-_.0-9]+$": { + "type": "object", + "description": "This represents a single dependency group", + "required": [ + "dependencies" + ], + "properties": { + "optional": { + "type": "boolean", + "description": "Whether the dependency group is optional or not" }, - "items": { - "$ref": "#/definitions/repository" - } - }, - "scripts": { - "type": "object", - "description": "A hash of scripts to be installed.", - "items": { - "type": "string" + "dependencies": { + "type": "object", + "description": "The dependencies of this dependency group", + "$ref": "#/definitions/dependencies", + "additionalProperties": false } - }, - "plugins": { - "type": "object", - "description": "A hash of hashes representing plugins", - "patternProperties": { - "^[a-zA-Z-_.0-9]+$": { - "type": "object", - "patternProperties": { - "^[a-zA-Z-_.0-9]+$": { - "type": "string" - } - } - } - } - }, - "urls": { - "type": "object", - "patternProperties": { - "^.+$": { - "type": "string", - "description": "The full url of the custom url." - } + }, + "additionalProperties": false + } + } + }, + "build": { + "$ref": "#/definitions/build-section" + }, + "scripts": { + "type": "object", + "description": "A hash of scripts to be installed.", + "patternProperties": { + "^[a-zA-Z-_.0-9]+$": { + "oneOf": [ + { + "$ref": "#/definitions/script-legacy" + }, + { + "$ref": "#/definitions/script-table" } + ] } + } }, - "definitions": { - "authors": { - "type": "array", - "description": "List of authors that contributed to the package. This is typically the main maintainers, not the full list.", - "items": { - "type": "string" + "plugins": { + "type": "object", + "description": "A hash of hashes representing plugins", + "patternProperties": { + "^[a-zA-Z-_.0-9]+$": { + "type": "object", + "patternProperties": { + "^[a-zA-Z-_.0-9]+$": { + "type": "string" } - }, - "maintainers": { - "type": "array", - "description": "List of maintainers, other than the original author(s), that upkeep the package.", - "items": { - "type": "string" + } + } + } + }, + "urls": { + "type": "object", + "patternProperties": { + "^.+$": { + "type": "string", + "description": "The full url of the custom url." + } + } + } + }, + "definitions": { + "authors": { + "type": "array", + "description": "List of authors that contributed to the package. This is typically the main maintainers, not the full list.", + "items": { + "type": "string" + } + }, + "maintainers": { + "type": "array", + "description": "List of maintainers, other than the original author(s), that upkeep the package.", + "items": { + "type": "string" + } + }, + "include-path": { + "type": "string", + "description": "Path to file or directory to include." + }, + "package-format": { + "type": "string", + "enum": [ + "sdist", + "wheel" + ], + "description": "A Python packaging format." + }, + "package-formats": { + "oneOf": [ + { + "$ref": "#/definitions/package-format" + }, + { + "type": "array", + "items": { + "$ref": "#/definitions/package-format" + } + } + ], + "description": "The format(s) for which the package must be included." + }, + "dependencies": { + "type": "object", + "patternProperties": { + "^[a-zA-Z-_.0-9]+$": { + "oneOf": [ + { + "$ref": "#/definitions/dependency" + }, + { + "$ref": "#/definitions/long-dependency" + }, + { + "$ref": "#/definitions/git-dependency" + }, + { + "$ref": "#/definitions/file-dependency" + }, + { + "$ref": "#/definitions/path-dependency" + }, + { + "$ref": "#/definitions/url-dependency" + }, + { + "$ref": "#/definitions/multiple-constraints-dependency" } + ] + } + } + }, + "dependency": { + "type": "string", + "description": "The constraint of the dependency." + }, + "long-dependency": { + "type": "object", + "required": [ + "version" + ], + "additionalProperties": false, + "properties": { + "version": { + "type": "string", + "description": "The constraint of the dependency." }, - "include-path": { - "type": "string", - "description": "Path to file or directory to include." - }, - "package-format": { - "type": "string", - "enum": ["sdist", "wheel"], - "description": "A Python packaging format." + "python": { + "type": "string", + "description": "The python versions for which the dependency should be installed." }, - "package-formats": { - "oneOf": [ - {"$ref": "#/definitions/package-format"}, - {"type": "array", "items": {"$ref": "#/definitions/package-format"}} - ], - "description": "The format(s) for which the package must be included." + "platform": { + "type": "string", + "description": "The platform(s) for which the dependency should be installed." }, - "dependencies": { - "type": "object", - "patternProperties": { - "^[a-zA-Z-_.0-9]+$": { - "oneOf": [ - { - "$ref": "#/definitions/dependency" - }, - { - "$ref": "#/definitions/long-dependency" - }, - { - "$ref": "#/definitions/git-dependency" - }, - { - "$ref": "#/definitions/file-dependency" - }, - { - "$ref": "#/definitions/path-dependency" - }, - { - "$ref": "#/definitions/url-dependency" - }, - { - "$ref": "#/definitions/multiple-constraints-dependency" - } - ] - } - } + "markers": { + "type": "string", + "description": "The PEP 508 compliant environment markers for which the dependency should be installed." }, - "dependency": { - "type": "string", - "description": "The constraint of the dependency." + "allow-prereleases": { + "type": "boolean", + "description": "Whether the dependency allows prereleases or not." }, - "long-dependency": { - "type": "object", - "required": [ - "version" - ], - "additionalProperties": false, - "properties": { - "version": { - "type": "string", - "description": "The constraint of the dependency." - }, - "python": { - "type": "string", - "description": "The python versions for which the dependency should be installed." - }, - "platform": { - "type": "string", - "description": "The platform(s) for which the dependency should be installed." - }, - "markers": { - "type": "string", - "description": "The PEP 508 compliant environment markers for which the dependency should be installed." - }, - "allow-prereleases": { - "type": "boolean", - "description": "Whether the dependency allows prereleases or not." - }, - "allows-prereleases": { - "type": "boolean", - "description": "Whether the dependency allows prereleases or not." - }, - "optional": { - "type": "boolean", - "description": "Whether the dependency is optional or not." - }, - "extras": { - "type": "array", - "description": "The required extras for this dependency.", - "items": { - "type": "string" - } - }, - "source": { - "type": "string", - "description": "The exclusive source used to search for this dependency." - } - } + "allows-prereleases": { + "type": "boolean", + "description": "Whether the dependency allows prereleases or not." }, - "git-dependency": { - "type": "object", - "required": [ - "git" - ], - "additionalProperties": false, - "properties": { - "git": { - "type": "string", - "description": "The url of the git repository.", - "format": "uri" - }, - "branch": { - "type": "string", - "description": "The branch to checkout." - }, - "tag": { - "type": "string", - "description": "The tag to checkout." - }, - "rev": { - "type": "string", - "description": "The revision to checkout." - }, - "python": { - "type": "string", - "description": "The python versions for which the dependency should be installed." - }, - "platform": { - "type": "string", - "description": "The platform(s) for which the dependency should be installed." - }, - "markers": { - "type": "string", - "description": "The PEP 508 compliant environment markers for which the dependency should be installed." - }, - "allow-prereleases": { - "type": "boolean", - "description": "Whether the dependency allows prereleases or not." - }, - "allows-prereleases": { - "type": "boolean", - "description": "Whether the dependency allows prereleases or not." - }, - "optional": { - "type": "boolean", - "description": "Whether the dependency is optional or not." - }, - "extras": { - "type": "array", - "description": "The required extras for this dependency.", - "items": { - "type": "string" - } - }, - "develop": { - "type": "boolean", - "description": "Whether to install the dependency in development mode." - } - } + "optional": { + "type": "boolean", + "description": "Whether the dependency is optional or not." }, - "file-dependency": { - "type": "object", - "required": [ - "file" - ], - "additionalProperties": false, - "properties": { - "file": { - "type": "string", - "description": "The path to the file." - }, - "python": { - "type": "string", - "description": "The python versions for which the dependency should be installed." - }, - "platform": { - "type": "string", - "description": "The platform(s) for which the dependency should be installed." - }, - "markers": { - "type": "string", - "description": "The PEP 508 compliant environment markers for which the dependency should be installed." - }, - "optional": { - "type": "boolean", - "description": "Whether the dependency is optional or not." - }, - "extras": { - "type": "array", - "description": "The required extras for this dependency.", - "items": { - "type": "string" - } - } - } - }, - "path-dependency": { - "type": "object", - "required": [ - "path" - ], - "additionalProperties": false, - "properties": { - "path": { - "type": "string", - "description": "The path to the dependency." - }, - "python": { - "type": "string", - "description": "The python versions for which the dependency should be installed." - }, - "platform": { - "type": "string", - "description": "The platform(s) for which the dependency should be installed." - }, - "markers": { - "type": "string", - "description": "The PEP 508 compliant environment markers for which the dependency should be installed." - }, - "optional": { - "type": "boolean", - "description": "Whether the dependency is optional or not." - }, - "extras": { - "type": "array", - "description": "The required extras for this dependency.", - "items": { - "type": "string" - } - }, - "develop": { - "type": "boolean", - "description": "Whether to install the dependency in development mode." - } - } + "extras": { + "type": "array", + "description": "The required extras for this dependency.", + "items": { + "type": "string" + } }, - "url-dependency": { - "type": "object", - "required": [ - "url" - ], - "additionalProperties": false, - "properties": { - "url": { - "type": "string", - "description": "The url to the file." - }, - "python": { - "type": "string", - "description": "The python versions for which the dependency should be installed." - }, - "platform": { - "type": "string", - "description": "The platform(s) for which the dependency should be installed." - }, - "markers": { - "type": "string", - "description": "The PEP 508 compliant environment markers for which the dependency should be installed." - }, - "optional": { - "type": "boolean", - "description": "Whether the dependency is optional or not." - }, - "extras": { - "type": "array", - "description": "The required extras for this dependency.", - "items": { - "type": "string" - } - } - } + "source": { + "type": "string", + "description": "The exclusive source used to search for this dependency." + } + } + }, + "git-dependency": { + "type": "object", + "required": [ + "git" + ], + "additionalProperties": false, + "properties": { + "git": { + "type": "string", + "description": "The url of the git repository." + }, + "branch": { + "type": "string", + "description": "The branch to checkout." + }, + "tag": { + "type": "string", + "description": "The tag to checkout." + }, + "rev": { + "type": "string", + "description": "The revision to checkout." + }, + "subdirectory": { + "type": "string", + "description": "The relative path to the directory where the package is located." + }, + "python": { + "type": "string", + "description": "The python versions for which the dependency should be installed." + }, + "platform": { + "type": "string", + "description": "The platform(s) for which the dependency should be installed." + }, + "markers": { + "type": "string", + "description": "The PEP 508 compliant environment markers for which the dependency should be installed." + }, + "allow-prereleases": { + "type": "boolean", + "description": "Whether the dependency allows prereleases or not." + }, + "allows-prereleases": { + "type": "boolean", + "description": "Whether the dependency allows prereleases or not." + }, + "optional": { + "type": "boolean", + "description": "Whether the dependency is optional or not." }, - "multiple-constraints-dependency": { - "type": "array", - "minItems": 1, - "items": { - "oneOf": [ - { - "$ref": "#/definitions/dependency" - }, - { - "$ref": "#/definitions/long-dependency" - }, - { - "$ref": "#/definitions/git-dependency" - }, - { - "$ref": "#/definitions/file-dependency" - }, - { - "$ref": "#/definitions/path-dependency" - }, - { - "$ref": "#/definitions/url-dependency" - } - ] - } + "extras": { + "type": "array", + "description": "The required extras for this dependency.", + "items": { + "type": "string" + } + }, + "develop": { + "type": "boolean", + "description": "Whether to install the dependency in development mode." + } + } + }, + "file-dependency": { + "type": "object", + "required": [ + "file" + ], + "additionalProperties": false, + "properties": { + "file": { + "type": "string", + "description": "The path to the file." + }, + "subdirectory": { + "type": "string", + "description": "The relative path to the directory where the package is located." + }, + "python": { + "type": "string", + "description": "The python versions for which the dependency should be installed." + }, + "platform": { + "type": "string", + "description": "The platform(s) for which the dependency should be installed." + }, + "markers": { + "type": "string", + "description": "The PEP 508 compliant environment markers for which the dependency should be installed." + }, + "optional": { + "type": "boolean", + "description": "Whether the dependency is optional or not." }, - "scripts": { - "type": "object", - "patternProperties": { - "^[a-zA-Z-_.0-9]+$": { - "oneOf": [ - { - "$ref": "#/definitions/script" - }, - { - "$ref": "#/definitions/extra-script" - } - ] - } - } + "extras": { + "type": "array", + "description": "The required extras for this dependency.", + "items": { + "type": "string" + } + } + } + }, + "path-dependency": { + "type": "object", + "required": [ + "path" + ], + "additionalProperties": false, + "properties": { + "path": { + "type": "string", + "description": "The path to the dependency." + }, + "subdirectory": { + "type": "string", + "description": "The relative path to the directory where the package is located." + }, + "python": { + "type": "string", + "description": "The python versions for which the dependency should be installed." + }, + "platform": { + "type": "string", + "description": "The platform(s) for which the dependency should be installed." + }, + "markers": { + "type": "string", + "description": "The PEP 508 compliant environment markers for which the dependency should be installed." + }, + "optional": { + "type": "boolean", + "description": "Whether the dependency is optional or not." }, - "script": { - "type": "string", - "description": "A simple script pointing to a callable object." + "extras": { + "type": "array", + "description": "The required extras for this dependency.", + "items": { + "type": "string" + } + }, + "develop": { + "type": "boolean", + "description": "Whether to install the dependency in development mode." + } + } + }, + "url-dependency": { + "type": "object", + "required": [ + "url" + ], + "additionalProperties": false, + "properties": { + "url": { + "type": "string", + "description": "The url to the file." + }, + "subdirectory": { + "type": "string", + "description": "The relative path to the directory where the package is located." + }, + "python": { + "type": "string", + "description": "The python versions for which the dependency should be installed." + }, + "platform": { + "type": "string", + "description": "The platform(s) for which the dependency should be installed." + }, + "markers": { + "type": "string", + "description": "The PEP 508 compliant environment markers for which the dependency should be installed." + }, + "optional": { + "type": "boolean", + "description": "Whether the dependency is optional or not." }, - "extra-script": { - "type": "object", - "description": "A script that should be installed only if extras are activated.", - "additionalProperties": false, - "properties": { - "callable": { - "$ref": "#/definitions/script" - }, - "extras": { - "type": "array", - "description": "The required extras for this script.", - "items": { - "type": "string" - } - } - } + "extras": { + "type": "array", + "description": "The required extras for this dependency.", + "items": { + "type": "string" + } + } + } + }, + "multiple-constraints-dependency": { + "type": "array", + "minItems": 1, + "items": { + "oneOf": [ + { + "$ref": "#/definitions/dependency" + }, + { + "$ref": "#/definitions/long-dependency" + }, + { + "$ref": "#/definitions/git-dependency" + }, + { + "$ref": "#/definitions/file-dependency" + }, + { + "$ref": "#/definitions/path-dependency" + }, + { + "$ref": "#/definitions/url-dependency" + } + ] + } + }, + "script-table": { + "type": "object", + "oneOf": [ + { + "$ref": "#/definitions/extra-script-legacy" + }, + { + "$ref": "#/definitions/extra-scripts" + } + ] + }, + "script-legacy": { + "type": "string", + "description": "A simple script pointing to a callable object." + }, + "extra-scripts": { + "type": "object", + "description": "Either a console entry point or a script file that'll be included in the distribution package.", + "additionalProperties": false, + "properties": { + "reference": { + "type": "string", + "description": "If type is file this is the relative path of the script file, if console it is the module name." + }, + "type": { + "description": "Value can be either file or console.", + "type": "string", + "enum": [ + "file", + "console" + ] }, - "repository": { - "type": "object", - "additionalProperties": false, - "properties": { - "name": { - "type": "string", - "description": "The name of the repository" - }, - "url": { - "type": "string", - "description": "The url of the repository", - "format": "uri" - }, - "default": { - "type": "boolean", - "description": "Make this repository the default (disable PyPI)" - }, - "secondary": { - "type": "boolean", - "description": "Declare this repository as secondary, i.e. it will only be looked up last for packages." - } - } + "extras": { + "type": "array", + "description": "The required extras for this script. Only applicable if type is console.", + "items": { + "type": "string" + } + } + }, + "required": [ + "reference", + "type" + ] + }, + "extra-script-legacy": { + "type": "object", + "description": "A script that should be installed only if extras are activated.", + "additionalProperties": false, + "properties": { + "callable": { + "$ref": "#/definitions/script-legacy", + "description": "The entry point of the script. Deprecated in favour of reference." }, - "build-script": { - "type": "string", - "description": "The python script file used to build extensions." + "extras": { + "type": "array", + "description": "The required extras for this script.", + "items": { + "type": "string" + } + } + } + }, + "build-script": { + "type": "string", + "description": "The python script file used to build extensions." + }, + "build-config": { + "type": "object", + "description": "Build specific configurations.", + "additionalProperties": false, + "properties": { + "generate-setup-file": { + "type": "boolean", + "description": "Generate and include a setup.py file in sdist.", + "default": false }, - "build-config": { - "type": "object", - "description": "Build specific configurations.", - "additionalProperties": false, - "properties": { - "generate-setup-file": { - "type": "boolean", - "description": "Generate and include a setup.py file in sdist.", - "default": true - }, - "script": { - "$ref": "#/definitions/build-script" - } - } + "script": { + "$ref": "#/definitions/build-script" + } + } + }, + "build-section": { + "oneOf": [ + { + "$ref": "#/definitions/build-script" }, - "build-section": { - "oneOf": [ - {"$ref": "#/definitions/build-script"}, - {"$ref": "#/definitions/build-config"} - ] + { + "$ref": "#/definitions/build-config" } + ] } + } } diff --git a/conda_lock/_vendor/poetry/core/masonry/__init__.py b/conda_lock/_vendor/poetry/core/masonry/__init__.py index ddd3a14f..943204ad 100644 --- a/conda_lock/_vendor/poetry/core/masonry/__init__.py +++ b/conda_lock/_vendor/poetry/core/masonry/__init__.py @@ -6,5 +6,3 @@ `flit `__ and adapted to work with the poetry codebase, so kudos to them for showing the way. """ - -from .builder import Builder diff --git a/conda_lock/_vendor/poetry/core/masonry/api.py b/conda_lock/_vendor/poetry/core/masonry/api.py index 019f53bc..71e3c30b 100644 --- a/conda_lock/_vendor/poetry/core/masonry/api.py +++ b/conda_lock/_vendor/poetry/core/masonry/api.py @@ -1,33 +1,32 @@ """ PEP-517 compliant buildsystem API """ + +from __future__ import annotations + import logging +from pathlib import Path from typing import Any -from typing import Dict -from typing import List -from typing import Optional from conda_lock._vendor.poetry.core.factory import Factory -from conda_lock._vendor.poetry.core.utils._compat import Path -from conda_lock._vendor.poetry.core.utils._compat import unicode - -from .builders.sdist import SdistBuilder -from .builders.wheel import WheelBuilder +from conda_lock._vendor.poetry.core.masonry.builders.sdist import SdistBuilder +from conda_lock._vendor.poetry.core.masonry.builders.wheel import WheelBuilder log = logging.getLogger(__name__) def get_requires_for_build_wheel( - config_settings=None, -): # type: (Optional[Dict[str, Any]]) -> List[str] + config_settings: dict[str, Any] | None = None, +) -> list[str]: """ Returns an additional list of requirements for building, as PEP508 strings, above and beyond those specified in the pyproject.toml file. - This implementation is optional. At the moment it only returns an empty list, which would be the same as if - not define. So this is just for completeness for future implementation. + This implementation is optional. At the moment it only returns an empty list, + which would be the same as if not define. So this is just for completeness + for future implementation. """ return [] @@ -38,53 +37,51 @@ def get_requires_for_build_wheel( def prepare_metadata_for_build_wheel( - metadata_directory, config_settings=None -): # type: (str, Optional[Dict[str, Any]]) -> str - poetry = Factory().create_poetry(Path(".").resolve(), with_dev=False) + metadata_directory: str, config_settings: dict[str, Any] | None = None +) -> str: + poetry = Factory().create_poetry(Path(".").resolve(), with_groups=False) builder = WheelBuilder(poetry) - - dist_info = Path(metadata_directory, builder.dist_info) - dist_info.mkdir(parents=True, exist_ok=True) - - if "scripts" in poetry.local_config or "plugins" in poetry.local_config: - with (dist_info / "entry_points.txt").open("w", encoding="utf-8") as f: - builder._write_entry_points(f) - - with (dist_info / "WHEEL").open("w", encoding="utf-8") as f: - builder._write_wheel_file(f) - - with (dist_info / "METADATA").open("w", encoding="utf-8") as f: - builder._write_metadata_file(f) - + metadata_path = Path(metadata_directory) + dist_info = builder.prepare_metadata(metadata_path) return dist_info.name def build_wheel( - wheel_directory, config_settings=None, metadata_directory=None -): # type: (str, Optional[Dict[str, Any]], Optional[str]) -> str + wheel_directory: str, + config_settings: dict[str, Any] | None = None, + metadata_directory: str | None = None, +) -> str: """Builds a wheel, places it in wheel_directory""" - poetry = Factory().create_poetry(Path(".").resolve(), with_dev=False) + poetry = Factory().create_poetry(Path(".").resolve(), with_groups=False) + metadata_path = None if metadata_directory is None else Path(metadata_directory) - return unicode(WheelBuilder.make_in(poetry, Path(wheel_directory))) + return WheelBuilder.make_in( + poetry, Path(wheel_directory), metadata_directory=metadata_path + ) def build_sdist( - sdist_directory, config_settings=None -): # type: (str, Optional[Dict[str, Any]]) -> str + sdist_directory: str, config_settings: dict[str, Any] | None = None +) -> str: """Builds an sdist, places it in sdist_directory""" - poetry = Factory().create_poetry(Path(".").resolve(), with_dev=False) + poetry = Factory().create_poetry(Path(".").resolve(), with_groups=False) path = SdistBuilder(poetry).build(Path(sdist_directory)) - return unicode(path.name) + return path.name def build_editable( - wheel_directory, config_settings=None, metadata_directory=None, -): # type: (str, Optional[Dict[str, Any]], Optional[str]) -> str - poetry = Factory().create_poetry(Path(".").resolve(), with_dev=False) - - return unicode(WheelBuilder.make_in(poetry, Path(wheel_directory), editable=True)) + wheel_directory: str, + config_settings: dict[str, Any] | None = None, + metadata_directory: str | None = None, +) -> str: + poetry = Factory().create_poetry(Path(".").resolve(), with_groups=False) + metadata_path = None if metadata_directory is None else Path(metadata_directory) + + return WheelBuilder.make_in( + poetry, Path(wheel_directory), metadata_directory=metadata_path, editable=True + ) get_requires_for_build_editable = get_requires_for_build_wheel diff --git a/conda_lock/_vendor/poetry/core/masonry/builder.py b/conda_lock/_vendor/poetry/core/masonry/builder.py index 85105cc4..d467eaad 100644 --- a/conda_lock/_vendor/poetry/core/masonry/builder.py +++ b/conda_lock/_vendor/poetry/core/masonry/builder.py @@ -1,35 +1,49 @@ -from typing import TYPE_CHECKING -from typing import Optional -from typing import Union +from __future__ import annotations -from conda_lock._vendor.poetry.core.utils._compat import Path +import warnings -from .builders.sdist import SdistBuilder -from .builders.wheel import WheelBuilder +from typing import TYPE_CHECKING if TYPE_CHECKING: - from conda_lock._vendor.poetry.core.poetry import Poetry # noqa + from pathlib import Path + + from conda_lock._vendor.poetry.core.poetry import Poetry + + +warnings.warn( + "poetry.core.masonry.builder is deprecated. Its functionality has been moved" + "from poetry-core to poetry (poetry.console.commands.build).", + DeprecationWarning, + stacklevel=2, +) class Builder: - _FORMATS = { - "sdist": SdistBuilder, - "wheel": WheelBuilder, - } + def __init__(self, poetry: Poetry) -> None: + from conda_lock._vendor.poetry.core.masonry.builders.sdist import SdistBuilder + from conda_lock._vendor.poetry.core.masonry.builders.wheel import WheelBuilder - def __init__(self, poetry): # type: ("Poetry") -> None self._poetry = poetry + self._formats = { + "sdist": SdistBuilder, + "wheel": WheelBuilder, + } + def build( - self, fmt, executable=None - ): # type: (str, Optional[Union[str, Path]]) -> None - if fmt in self._FORMATS: - builders = [self._FORMATS[fmt]] + self, + fmt: str, + executable: str | Path | None = None, + *, + target_dir: Path | None = None, + ) -> None: + if fmt in self._formats: + builders = [self._formats[fmt]] elif fmt == "all": - builders = self._FORMATS.values() + builders = list(self._formats.values()) else: - raise ValueError("Invalid format: {}".format(fmt)) + raise ValueError(f"Invalid format: {fmt}") for builder in builders: - builder(self._poetry, executable=executable).build() + builder(self._poetry, executable=executable).build(target_dir) diff --git a/conda_lock/_vendor/poetry/core/masonry/builders/__init__.py b/conda_lock/_vendor/poetry/core/masonry/builders/__init__.py index 20d725b7..e69de29b 100644 --- a/conda_lock/_vendor/poetry/core/masonry/builders/__init__.py +++ b/conda_lock/_vendor/poetry/core/masonry/builders/__init__.py @@ -1,2 +0,0 @@ -from .sdist import SdistBuilder -from .wheel import WheelBuilder diff --git a/conda_lock/_vendor/poetry/core/masonry/builders/builder.py b/conda_lock/_vendor/poetry/core/masonry/builders/builder.py index f95e5b03..013ff4f6 100644 --- a/conda_lock/_vendor/poetry/core/masonry/builders/builder.py +++ b/conda_lock/_vendor/poetry/core/masonry/builders/builder.py @@ -1,34 +1,24 @@ -# -*- coding: utf-8 -*- +from __future__ import annotations + import logging import re -import shutil import sys -import tempfile +import warnings from collections import defaultdict -from contextlib import contextmanager +from functools import cached_property +from pathlib import Path from typing import TYPE_CHECKING -from typing import Any -from typing import Dict -from typing import List -from typing import Optional -from typing import Set -from typing import Union - -from conda_lock._vendor.poetry.core.utils._compat import Path -from conda_lock._vendor.poetry.core.utils._compat import to_str -from conda_lock._vendor.poetry.core.vcs import get_vcs - -from ..metadata import Metadata -from ..utils.module import Module -from ..utils.package_include import PackageInclude if TYPE_CHECKING: - from conda_lock._vendor.poetry.core.poetry import Poetry # noqa + from conda_lock._vendor.poetry.core.masonry.utils.module import Module + from conda_lock._vendor.poetry.core.poetry import Poetry -AUTHOR_REGEX = re.compile(r"(?u)^(?P[- .,\w\d'’\"()]+) <(?P.+?)>$") +AUTHOR_REGEX = re.compile( + r"(?u)^(?P[- .,\w\d'’\"()]+) <(?P.+?)>$" # noqa: RUF001 +) METADATA_BASE = """\ Metadata-Version: 2.1 @@ -40,21 +30,43 @@ logger = logging.getLogger(__name__) -class Builder(object): - format = None # type: Optional[str] +class Builder: + format: str | None = None def __init__( - self, poetry, ignore_packages_formats=False, executable=None - ): # type: ("Poetry", bool, Optional[Union[Path, str]]) -> None + self, + poetry: Poetry, + ignore_packages_formats: bool = False, + executable: Path | None = None, + ) -> None: + from conda_lock._vendor.poetry.core.masonry.metadata import Metadata + + if not poetry.is_package_mode: + raise RuntimeError( + "Building a package is not possible in non-package mode." + ) + self._poetry = poetry self._package = poetry.package - self._path = poetry.file.parent - self._excluded_files = None # type: Optional[Set[str]] + self._path: Path = poetry.pyproject_path.parent + self._ignore_packages_formats = ignore_packages_formats + self._excluded_files: set[str] | None = None self._executable = Path(executable or sys.executable) + self._meta = Metadata.from_package(self._package) + + @cached_property + def _module(self) -> Module: + from conda_lock._vendor.poetry.core.masonry.utils.module import Module packages = [] for p in self._package.packages: - formats = p.get("format", []) + formats = p.get("format") or None + + # Default to including the package in both sdist & wheel + # if the `format` key is not provided in the inline include table. + if formats is None: + formats = ["sdist", "wheel"] + if not isinstance(formats, list): formats = [formats] @@ -62,7 +74,7 @@ def __init__( formats and self.format and self.format not in formats - and not ignore_packages_formats + and not self._ignore_packages_formats ): continue @@ -76,69 +88,69 @@ def __init__( formats and self.format and self.format not in formats - and not ignore_packages_formats + and not self._ignore_packages_formats ): continue includes.append(include) - self._module = Module( + return Module( self._package.name, self._path.as_posix(), packages=packages, includes=includes, ) - self._meta = Metadata.from_package(self._package) - @property - def executable(self): # type: () -> Path + def executable(self) -> Path: return self._executable - def build(self): # type: () -> None - raise NotImplementedError() + @property + def default_target_dir(self) -> Path: + return self._path / "dist" + + def build(self, target_dir: Path | None) -> Path: + raise NotImplementedError - def find_excluded_files(self): # type: () -> Set[str] + def find_excluded_files(self, fmt: str | None = None) -> set[str]: if self._excluded_files is None: + from conda_lock._vendor.poetry.core.vcs import get_vcs + # Checking VCS vcs = get_vcs(self._path) - if not vcs: - vcs_ignored_files = set() - else: - vcs_ignored_files = set(vcs.get_ignored_files()) + vcs_ignored_files = set(vcs.get_ignored_files()) if vcs else set() - explicitely_excluded = set() + explicitly_excluded = set() for excluded_glob in self._package.exclude: for excluded in self._path.glob(str(excluded_glob)): - explicitely_excluded.add( + explicitly_excluded.add( Path(excluded).relative_to(self._path).as_posix() ) - explicitely_included = set() + explicitly_included = set() for inc in self._package.include: + if fmt and inc["format"] and fmt not in inc["format"]: + continue + included_glob = inc["path"] for included in self._path.glob(str(included_glob)): - explicitely_included.add( + explicitly_included.add( Path(included).relative_to(self._path).as_posix() ) - ignored = (vcs_ignored_files | explicitely_excluded) - explicitely_included - result = set() - for file in ignored: - result.add(file) + ignored = (vcs_ignored_files | explicitly_excluded) - explicitly_included + for ignored_file in ignored: + logger.debug(f"Ignoring: {ignored_file}") - # The list of excluded files might be big and we will do a lot - # containment check (x in excluded). - # Returning a set make those tests much much faster. - self._excluded_files = result + self._excluded_files = ignored return self._excluded_files - def is_excluded(self, filepath): # type: (Union[str, Path]) -> bool + def is_excluded(self, filepath: str | Path) -> bool: exclude_path = Path(filepath) while True: - if exclude_path.as_posix() in self.find_excluded_files(): + if exclude_path.as_posix() in self.find_excluded_files(fmt=self.format): return True if len(exclude_path.parts) > 1: @@ -148,12 +160,12 @@ def is_excluded(self, filepath): # type: (Union[str, Path]) -> bool return False - def find_files_to_add( - self, exclude_build=True - ): # type: (bool) -> Set[BuildIncludeFile] + def find_files_to_add(self, exclude_build: bool = True) -> set[BuildIncludeFile]: """ Finds all files to add to the tarball """ + from conda_lock._vendor.poetry.core.masonry.utils.package_include import PackageInclude + to_add = set() for include in self._module.includes: @@ -164,32 +176,48 @@ def find_files_to_add( if "__pycache__" in str(file): continue + if ( + isinstance(include, PackageInclude) + and include.source + and self.format == "wheel" + ): + source_root = include.base + else: + source_root = self._path + + if ( + isinstance(include, PackageInclude) + and include.target + and self.format == "wheel" + ): + target_dir = include.target + else: + target_dir = None + if file.is_dir(): if self.format in formats: for current_file in file.glob("**/*"): include_file = BuildIncludeFile( path=current_file, project_root=self._path, - source_root=self._path, + source_root=source_root, + target_dir=target_dir, ) - if not current_file.is_dir() and not self.is_excluded( - include_file.relative_to_source_root() + if not ( + current_file.is_dir() + or self.is_excluded( + include_file.relative_to_source_root() + ) ): to_add.add(include_file) continue - if ( - isinstance(include, PackageInclude) - and include.source - and self.format == "wheel" - ): - source_root = include.base - else: - source_root = self._path - include_file = BuildIncludeFile( - path=file, project_root=self._path, source_root=source_root + path=file, + project_root=self._path, + source_root=source_root, + target_dir=target_dir, ) if self.is_excluded( @@ -200,11 +228,7 @@ def find_files_to_add( if file.suffix == ".pyc": continue - if file in to_add: - # Skip duplicates - continue - - logger.debug("Adding: {}".format(str(file))) + logger.debug(f"Adding: {file}") to_add.add(include_file) # add build script if it is specified and explicitly required @@ -219,156 +243,208 @@ def find_files_to_add( return to_add - def get_metadata_content(self): # type: () -> str + def get_metadata_content(self) -> str: content = METADATA_BASE.format( name=self._meta.name, version=self._meta.version, - summary=to_str(self._meta.summary), + summary=str(self._meta.summary), ) # Optional fields if self._meta.home_page: - content += "Home-page: {}\n".format(self._meta.home_page) + content += f"Home-page: {self._meta.home_page}\n" if self._meta.license: - content += "License: {}\n".format(self._meta.license) + content += f"License: {self._meta.license}\n" if self._meta.keywords: - content += "Keywords: {}\n".format(self._meta.keywords) + content += f"Keywords: {self._meta.keywords}\n" if self._meta.author: - content += "Author: {}\n".format(to_str(self._meta.author)) + content += f"Author: {self._meta.author}\n" if self._meta.author_email: - content += "Author-email: {}\n".format(to_str(self._meta.author_email)) + content += f"Author-email: {self._meta.author_email}\n" if self._meta.maintainer: - content += "Maintainer: {}\n".format(to_str(self._meta.maintainer)) + content += f"Maintainer: {self._meta.maintainer}\n" if self._meta.maintainer_email: - content += "Maintainer-email: {}\n".format( - to_str(self._meta.maintainer_email) - ) + content += f"Maintainer-email: {self._meta.maintainer_email}\n" if self._meta.requires_python: - content += "Requires-Python: {}\n".format(self._meta.requires_python) + content += f"Requires-Python: {self._meta.requires_python}\n" for classifier in self._meta.classifiers: - content += "Classifier: {}\n".format(classifier) + content += f"Classifier: {classifier}\n" for extra in sorted(self._meta.provides_extra): - content += "Provides-Extra: {}\n".format(extra) + content += f"Provides-Extra: {extra}\n" for dep in sorted(self._meta.requires_dist): - content += "Requires-Dist: {}\n".format(dep) + content += f"Requires-Dist: {dep}\n" for url in sorted(self._meta.project_urls, key=lambda u: u[0]): - content += "Project-URL: {}\n".format(to_str(url)) + content += f"Project-URL: {url}\n" if self._meta.description_content_type: - content += "Description-Content-Type: {}\n".format( - self._meta.description_content_type + content += ( + f"Description-Content-Type: {self._meta.description_content_type}\n" ) if self._meta.description is not None: - content += "\n" + to_str(self._meta.description) + "\n" + content += f"\n{self._meta.description}\n" return content - def convert_entry_points(self): # type: () -> Dict[str, List[str]] + def convert_entry_points(self) -> dict[str, list[str]]: result = defaultdict(list) # Scripts -> Entry points - for name, ep in self._poetry.local_config.get("scripts", {}).items(): - extras = "" - if isinstance(ep, dict): - extras = "[{}]".format(", ".join(ep["extras"])) - ep = ep["callable"] + for name, specification in self._poetry.local_config.get("scripts", {}).items(): + if isinstance(specification, str): + # TODO: deprecate this in favour or reference + specification = {"reference": specification, "type": "console"} + + if "callable" in specification: + warnings.warn( + f"Use of callable in script specification ({name}) is" + " deprecated. Use reference instead.", + DeprecationWarning, + stacklevel=1, + ) + specification = { + "reference": specification["callable"], + "type": "console", + } - result["console_scripts"].append("{} = {}{}".format(name, ep, extras)) + if specification.get("type") != "console": + continue + + extras = specification.get("extras", []) + if extras: + warnings.warn( + f'The script "{name}" depends on an extra. Scripts depending on' + " extras are deprecated and support for them will be removed in a" + " future version of poetry/poetry-core. See" + " https://packaging.python.org/en/latest/specifications/entry-points/#data-model" + " for details.", + DeprecationWarning, + stacklevel=1, + ) + extras = f"[{', '.join(extras)}]" if extras else "" + reference = specification.get("reference") + + if reference: + result["console_scripts"].append(f"{name} = {reference}{extras}") # Plugins -> entry points plugins = self._poetry.local_config.get("plugins", {}) for groupname, group in plugins.items(): - for name, ep in sorted(group.items()): - result[groupname].append("{} = {}".format(name, ep)) + for name, specification in sorted(group.items()): + result[groupname].append(f"{name} = {specification}") for groupname in result: result[groupname] = sorted(result[groupname]) return dict(result) + def convert_script_files(self) -> list[Path]: + script_files: list[Path] = [] + + for name, specification in self._poetry.local_config.get("scripts", {}).items(): + if isinstance(specification, dict) and specification.get("type") == "file": + source = specification["reference"] + + if Path(source).is_absolute(): + raise RuntimeError( + f"{source} in {name} is an absolute path. Expected relative" + " path." + ) + + abs_path = Path.joinpath(self._path, source) + + if not abs_path.exists(): + raise RuntimeError( + f"{abs_path} in script specification ({name}) is not found." + ) + + if not abs_path.is_file(): + raise RuntimeError( + f"{abs_path} in script specification ({name}) is not a file." + ) + + script_files.append(abs_path) + + return script_files + @classmethod - def convert_author(cls, author): # type: (str) -> Dict[str, str] + def convert_author(cls, author: str) -> dict[str, str]: m = AUTHOR_REGEX.match(author) + if m is None: + raise RuntimeError(f"{author} does not match regex") name = m.group("name") email = m.group("email") return {"name": name, "email": email} - @classmethod - @contextmanager - def temporary_directory(cls, *args, **kwargs): # type: (*Any, **Any) -> None - try: - from tempfile import TemporaryDirectory - - with TemporaryDirectory(*args, **kwargs) as name: - yield name - except ImportError: - name = tempfile.mkdtemp(*args, **kwargs) + def _get_legal_files(self) -> set[Path]: + include_files_patterns = {"COPYING*", "LICEN[SC]E*", "AUTHORS*", "NOTICE*"} + files: set[Path] = set() - yield name + for pattern in include_files_patterns: + files.update(self._path.glob(pattern)) - shutil.rmtree(name) + files.update(self._path.joinpath("LICENSES").glob("**/*")) + return files class BuildIncludeFile: def __init__( self, - path, # type: Union[Path, str] - project_root, # type: Union[Path, str] - source_root=None, # type: Optional[Union[Path, str]] - ): + path: Path | str, + project_root: Path | str, + source_root: Path | str, + target_dir: Path | str | None = None, + ) -> None: """ :param project_root: the full path of the project's root :param path: a full path to the file to be included - :param source_root: the root path to resolve to + :param source_root: the full root path to resolve to + :param target_dir: the relative target root to resolve to """ self.path = Path(path) self.project_root = Path(project_root).resolve() - self.source_root = None if not source_root else Path(source_root).resolve() - if not self.path.is_absolute() and self.source_root: + self.source_root = Path(source_root).resolve() + self.target_dir = None if not target_dir else Path(target_dir) + if not self.path.is_absolute(): self.path = self.source_root / self.path else: self.path = self.path - try: - self.path = self.path.resolve() - except FileNotFoundError: - # this is an issue in in python 3.5, since resolve uses strict=True by - # default, this workaround needs to be maintained till python 2.7 and - # python 3.5 are dropped, until we can use resolve(strict=False). - pass + self.path = self.path.resolve() - def __eq__(self, other): # type: (Union[BuildIncludeFile, Path]) -> bool - if hasattr(other, "path"): - return self.path == other.path - return self.path == other + def __eq__(self, other: object) -> bool: + if not isinstance(other, BuildIncludeFile): + return False - def __ne__(self, other): # type: (Union[BuildIncludeFile, Path]) -> bool - return not self.__eq__(other) + return self.path == other.path - def __hash__(self): # type: () -> int + def __hash__(self) -> int: return hash(self.path) - def __repr__(self): # type: () -> str + def __repr__(self) -> str: return str(self.path) - def relative_to_project_root(self): # type: () -> Path + def relative_to_project_root(self) -> Path: return self.path.relative_to(self.project_root) - def relative_to_source_root(self): # type: () -> Path - if self.source_root is not None: - return self.path.relative_to(self.source_root) - return self.path + def relative_to_source_root(self) -> Path: + return self.path.relative_to(self.source_root) + + def relative_to_target_root(self) -> Path: + path = self.relative_to_source_root() + if self.target_dir is not None: + return self.target_dir / path + return path diff --git a/conda_lock/_vendor/poetry/core/masonry/builders/sdist.py b/conda_lock/_vendor/poetry/core/masonry/builders/sdist.py index cf96f88d..1256a51a 100644 --- a/conda_lock/_vendor/poetry/core/masonry/builders/sdist.py +++ b/conda_lock/_vendor/poetry/core/masonry/builders/sdist.py @@ -1,40 +1,33 @@ -# -*- coding: utf-8 -*- +from __future__ import annotations + import logging import os import re import tarfile -import time from collections import defaultdict from contextlib import contextmanager from copy import copy from gzip import GzipFile from io import BytesIO +from pathlib import Path from posixpath import join as pjoin from pprint import pformat -from tarfile import TarInfo from typing import TYPE_CHECKING -from typing import Dict -from typing import Iterator -from typing import List -from typing import Optional -from typing import Set -from typing import Tuple - -from conda_lock._vendor.poetry.core.utils._compat import Path -from conda_lock._vendor.poetry.core.utils._compat import decode -from conda_lock._vendor.poetry.core.utils._compat import encode -from conda_lock._vendor.poetry.core.utils._compat import to_str -from ..utils.helpers import normalize_file_permissions -from ..utils.package_include import PackageInclude -from .builder import Builder -from .builder import BuildIncludeFile +from conda_lock._vendor.poetry.core.masonry.builders.builder import Builder +from conda_lock._vendor.poetry.core.masonry.builders.builder import BuildIncludeFile +from conda_lock._vendor.poetry.core.masonry.utils.helpers import distribution_name if TYPE_CHECKING: - from conda_lock._vendor.poetry.core.packages import Dependency # noqa - from conda_lock._vendor.poetry.core.packages import ProjectPackage # noqa + from collections.abc import Iterable + from collections.abc import Iterator + from tarfile import TarInfo + + from conda_lock._vendor.poetry.core.masonry.utils.package_include import PackageInclude + from conda_lock._vendor.poetry.core.packages.dependency import Dependency + from conda_lock._vendor.poetry.core.packages.project_package import ProjectPackage SETUP = """\ # -*- coding: utf-8 -*- @@ -62,27 +55,27 @@ class SdistBuilder(Builder): - format = "sdist" - def build(self, target_dir=None): # type: (Optional[Path]) -> Path + def build( + self, + target_dir: Path | None = None, + ) -> Path: logger.info("Building sdist") - if target_dir is None: - target_dir = self._path / "dist" + target_dir = target_dir or self.default_target_dir if not target_dir.exists(): target_dir.mkdir(parents=True) - target = target_dir / "{}-{}.tar.gz".format( - self._package.pretty_name, self._meta.version - ) + name = distribution_name(self._package.name) + target = target_dir / f"{name}-{self._meta.version}.tar.gz" gz = GzipFile(target.as_posix(), mode="wb", mtime=0) tar = tarfile.TarFile( target.as_posix(), mode="w", fileobj=gz, format=tarfile.PAX_FORMAT ) try: - tar_dir = "{}-{}".format(self._package.pretty_name, self._meta.version) + tar_dir = f"{name}-{self._meta.version}" files_to_add = self.find_files_to_add(exclude_build=False) @@ -103,32 +96,36 @@ def build(self, target_dir=None): # type: (Optional[Path]) -> Path setup = self.build_setup() tar_info = tarfile.TarInfo(pjoin(tar_dir, "setup.py")) tar_info.size = len(setup) - tar_info.mtime = time.time() + tar_info.mtime = 0 + tar_info = self.clean_tarinfo(tar_info) tar.addfile(tar_info, BytesIO(setup)) pkg_info = self.build_pkg_info() tar_info = tarfile.TarInfo(pjoin(tar_dir, "PKG-INFO")) tar_info.size = len(pkg_info) - tar_info.mtime = time.time() + tar_info.mtime = 0 + tar_info = self.clean_tarinfo(tar_info) tar.addfile(tar_info, BytesIO(pkg_info)) finally: tar.close() gz.close() - logger.info("Built {}".format(target.name)) + logger.info(f"Built {target.name}") return target - def build_setup(self): # type: () -> bytes + def build_setup(self) -> bytes: + from conda_lock._vendor.poetry.core.masonry.utils.package_include import PackageInclude + before, extra, after = [], [], [] - package_dir = {} + package_dir: dict[str, str] = {} # If we have a build script, use it if self._package.build_script: - after += [ - "from {} import *".format(self._package.build_script.split(".")[0]), - "build(setup_kwargs)", - ] + import_name = ".".join( + Path(self._package.build_script).with_suffix("").parts + ) + after += [f"from {import_name} import *", "build(setup_kwargs)"] modules = [] packages = [] @@ -142,7 +139,14 @@ def build_setup(self): # type: () -> bytes pkg_dir, _packages, _package_data = self.find_packages(include) if pkg_dir is not None: - package_dir[""] = os.path.relpath(pkg_dir, str(self._path)) + pkg_root = os.path.relpath(pkg_dir, str(self._path)) + if "" in package_dir: + package_dir.update( + (p, os.path.join(pkg_root, p.replace(".", "/"))) + for p in _packages + ) + else: + package_dir[""] = pkg_root packages += [p for p in _packages if p not in packages] package_data.update(_package_data) @@ -158,63 +162,65 @@ def build_setup(self): # type: () -> bytes pass if package_dir: - before.append("package_dir = \\\n{}\n".format(pformat(package_dir))) + before.append(f"package_dir = \\\n{pformat(package_dir)}\n") extra.append("'package_dir': package_dir,") if packages: - before.append("packages = \\\n{}\n".format(pformat(sorted(packages)))) + before.append(f"packages = \\\n{pformat(sorted(packages))}\n") extra.append("'packages': packages,") if package_data: - before.append("package_data = \\\n{}\n".format(pformat(package_data))) + before.append(f"package_data = \\\n{pformat(package_data)}\n") extra.append("'package_data': package_data,") if modules: - before.append("modules = \\\n{}".format(pformat(modules))) - extra.append("'py_modules': modules,".format()) + before.append(f"modules = \\\n{pformat(modules)}") + extra.append("'py_modules': modules,") dependencies, extras = self.convert_dependencies( self._package, self._package.requires ) if dependencies: - before.append( - "install_requires = \\\n{}\n".format(pformat(sorted(dependencies))) - ) + before.append(f"install_requires = \\\n{pformat(sorted(dependencies))}\n") extra.append("'install_requires': install_requires,") if extras: - before.append("extras_require = \\\n{}\n".format(pformat(extras))) + before.append(f"extras_require = \\\n{pformat(extras)}\n") extra.append("'extras_require': extras_require,") entry_points = self.convert_entry_points() if entry_points: - before.append("entry_points = \\\n{}\n".format(pformat(entry_points))) + before.append(f"entry_points = \\\n{pformat(entry_points)}\n") extra.append("'entry_points': entry_points,") + script_files = self.convert_script_files() + if script_files: + rel_paths = [str(p.relative_to(self._path)) for p in script_files] + before.append(f"scripts = \\\n{pformat(rel_paths)}\n") + extra.append("'scripts': scripts,") + if self._package.python_versions != "*": python_requires = self._meta.requires_python - extra.append("'python_requires': {!r},".format(python_requires)) - - return encode( - SETUP.format( - before="\n".join(before), - name=to_str(self._meta.name), - version=to_str(self._meta.version), - description=to_str(self._meta.summary), - long_description=to_str(self._meta.description), - author=to_str(self._meta.author), - author_email=to_str(self._meta.author_email), - maintainer=to_str(self._meta.maintainer), - maintainer_email=to_str(self._meta.maintainer_email), - url=to_str(self._meta.home_page), - extra="\n ".join(extra), - after="\n".join(after), - ) - ) + extra.append(f"'python_requires': {python_requires!r},") + + return SETUP.format( + before="\n".join(before), + name=str(self._meta.name), + version=self._meta.version, + description=str(self._meta.summary), + long_description=str(self._meta.description), + author=str(self._meta.author), + author_email=str(self._meta.author_email), + maintainer=str(self._meta.maintainer), + maintainer_email=str(self._meta.maintainer_email), + url=str(self._meta.home_page), + extra="\n ".join(extra), + after="\n".join(after), + ).encode() @contextmanager - def setup_py(self): # type: () -> Iterator[Path] + def setup_py(self) -> Iterator[Path]: setup = self._path / "setup.py" has_setup = setup.exists() @@ -222,19 +228,19 @@ def setup_py(self): # type: () -> Iterator[Path] logger.warning("A setup.py file already exists. Using it.") else: with setup.open("w", encoding="utf-8") as f: - f.write(decode(self.build_setup())) + f.write(self.build_setup().decode()) yield setup if not has_setup: setup.unlink() - def build_pkg_info(self): # type: () -> bytes - return encode(self.get_metadata_content()) + def build_pkg_info(self) -> bytes: + return self.get_metadata_content().encode() def find_packages( - self, include - ): # type: (PackageInclude) -> Tuple[str, List[str], dict] + self, include: PackageInclude + ) -> tuple[str | None, list[str], dict[str, list[str]]]: """ Discover subpackages and data. @@ -247,14 +253,14 @@ def find_packages( base = str(include.elements[0].parent) pkg_name = include.package - pkg_data = defaultdict(list) - # Undocumented distutils feature: + pkg_data: dict[str, list[str]] = defaultdict(list) + # Undocumented setup() feature: # the empty string matches all package names pkg_data[""].append("*") packages = [pkg_name] subpkg_paths = set() - def find_nearest_pkg(rel_path): # type: (str) -> Tuple[str, str] + def find_nearest_pkg(rel_path: str) -> tuple[str, str]: parts = rel_path.split(os.sep) for i in reversed(range(1, len(parts))): ancestor = "/".join(parts[:i]) @@ -265,7 +271,7 @@ def find_nearest_pkg(rel_path): # type: (str) -> Tuple[str, str] # Relative to the top-level package return pkg_name, Path(rel_path).as_posix() - for path, dirnames, filenames in os.walk(str(base), topdown=True): + for path, _dirnames, filenames in os.walk(str(base), topdown=True): if os.path.basename(path) == "__pycache__": continue @@ -274,18 +280,16 @@ def find_nearest_pkg(rel_path): # type: (str) -> Tuple[str, str] continue is_subpkg = any( - [filename.endswith(".py") for filename in filenames] + filename.endswith(".py") for filename in filenames ) and not all( - [ - self.is_excluded(Path(path, filename).relative_to(self._path)) - for filename in filenames - if filename.endswith(".py") - ] + self.is_excluded(Path(path, filename).relative_to(self._path)) + for filename in filenames + if filename.endswith(".py") ) if is_subpkg: subpkg_paths.add(from_top_level) parts = from_top_level.split(os.sep) - packages.append(".".join([pkg_name] + parts)) + packages.append(".".join([pkg_name, *parts])) else: pkg, from_nearest_pkg = find_nearest_pkg(from_top_level) @@ -313,37 +317,43 @@ def find_nearest_pkg(rel_path): # type: (str) -> Tuple[str, str] return pkgdir, sorted(packages), pkg_data - def find_files_to_add( - self, exclude_build=False - ): # type: (bool) -> Set[BuildIncludeFile] - to_add = super(SdistBuilder, self).find_files_to_add(exclude_build) + def find_files_to_add(self, exclude_build: bool = False) -> set[BuildIncludeFile]: + to_add = super().find_files_to_add(exclude_build) - # add any additional files, starting with all LICENSE files - additional_files = { - license_file for license_file in self._path.glob("LICENSE*") - } + # add any additional files + additional_files: set[Path] = set() + + # add legal files + additional_files.update(self._get_legal_files()) + + # add script files + additional_files.update(self.convert_script_files()) # Include project files - additional_files.add("pyproject.toml") + additional_files.add(Path("pyproject.toml")) - # add readme if it is specified + # add readme files if specified if "readme" in self._poetry.local_config: - additional_files.add(self._poetry.local_config["readme"]) + readme: str | Iterable[str] = self._poetry.local_config["readme"] + if isinstance(readme, str): + additional_files.add(Path(readme)) + else: + additional_files.update(Path(r) for r in readme) - for file in additional_files: + for additional_file in additional_files: file = BuildIncludeFile( - path=file, project_root=self._path, source_root=self._path + path=additional_file, project_root=self._path, source_root=self._path ) if file.path.exists(): - logger.debug("Adding: {}".format(file.relative_to_source_root())) + logger.debug(f"Adding: {file.relative_to_source_root()}") to_add.add(file) return to_add @classmethod def convert_dependencies( - cls, package, dependencies - ): # type: ("ProjectPackage", List["Dependency"]) -> Tuple[List[str], Dict[str, List[str]]] + cls, package: ProjectPackage, dependencies: list[Dependency] + ) -> tuple[list[str], dict[str, list[str]]]: main = [] extras = defaultdict(list) req_regex = re.compile(r"^(.+) \((.+)\)$") @@ -353,9 +363,7 @@ def convert_dependencies( for extra_name, reqs in package.extras.items(): for req in reqs: if req.name == dependency.name: - requirement = to_str( - dependency.to_pep_508(with_extras=False) - ) + requirement = dependency.to_pep_508(with_extras=False) if ";" in requirement: requirement, conditions = requirement.split(";") @@ -379,7 +387,7 @@ def convert_dependencies( extras[extra_name].append(requirement) continue - requirement = to_str(dependency.to_pep_508()) + requirement = dependency.to_pep_508() if ";" in requirement: requirement, conditions = requirement.split(";") @@ -400,7 +408,7 @@ def convert_dependencies( return main, dict(extras) @classmethod - def clean_tarinfo(cls, tar_info): # type: (TarInfo) -> TarInfo + def clean_tarinfo(cls, tar_info: TarInfo) -> TarInfo: """ Clean metadata from a TarInfo object to make it more reproducible. @@ -409,6 +417,8 @@ def clean_tarinfo(cls, tar_info): # type: (TarInfo) -> TarInfo - Normalise permissions to 644 or 755 - Set mtime if not None """ + from conda_lock._vendor.poetry.core.masonry.utils.helpers import normalize_file_permissions + ti = copy(tar_info) ti.uid = 0 ti.gid = 0 diff --git a/conda_lock/_vendor/poetry/core/masonry/builders/wheel.py b/conda_lock/_vendor/poetry/core/masonry/builders/wheel.py index a60c351e..3e1c4b0c 100644 --- a/conda_lock/_vendor/poetry/core/masonry/builders/wheel.py +++ b/conda_lock/_vendor/poetry/core/masonry/builders/wheel.py @@ -1,4 +1,4 @@ -from __future__ import unicode_literals +from __future__ import annotations import contextlib import csv @@ -8,40 +8,39 @@ import shutil import stat import subprocess +import sys +import sysconfig import tempfile import zipfile from base64 import urlsafe_b64encode -from io import BytesIO from io import StringIO +from pathlib import Path from typing import TYPE_CHECKING -from typing import Iterator -from typing import Optional from typing import TextIO -from typing import Union -from packaging.tags import sys_tags +import packaging.tags from conda_lock._vendor.poetry.core import __version__ -from conda_lock._vendor.poetry.core.semver import parse_constraint -from conda_lock._vendor.poetry.core.utils._compat import PY2 -from conda_lock._vendor.poetry.core.utils._compat import Path -from conda_lock._vendor.poetry.core.utils._compat import decode - -from ..utils.helpers import escape_name -from ..utils.helpers import escape_version -from ..utils.helpers import normalize_file_permissions -from ..utils.package_include import PackageInclude -from .builder import Builder -from .sdist import SdistBuilder +from conda_lock._vendor.poetry.core.constraints.version import parse_constraint +from conda_lock._vendor.poetry.core.masonry.builders.builder import Builder +from conda_lock._vendor.poetry.core.masonry.builders.sdist import SdistBuilder +from conda_lock._vendor.poetry.core.masonry.utils.helpers import distribution_name +from conda_lock._vendor.poetry.core.masonry.utils.helpers import normalize_file_permissions +from conda_lock._vendor.poetry.core.masonry.utils.package_include import PackageInclude +from conda_lock._vendor.poetry.core.utils.helpers import temporary_directory if TYPE_CHECKING: - from conda_lock._vendor.poetry.core.poetry import Poetry # noqa + from collections.abc import Iterator + + from packaging.utils import NormalizedName + + from conda_lock._vendor.poetry.core.poetry import Poetry wheel_file_template = """\ Wheel-Version: 1.0 -Generator: poetry {version} +Generator: poetry-core {version} Root-Is-Purelib: {pure_lib} Tag: {tag} """ @@ -53,76 +52,97 @@ class WheelBuilder(Builder): format = "wheel" def __init__( - self, poetry, target_dir=None, original=None, executable=None, editable=False, - ): # type: ("Poetry", Optional[Path], Optional[Path], Optional[str], bool) -> None - super(WheelBuilder, self).__init__(poetry, executable=executable) - - self._records = [] + self, + poetry: Poetry, + original: Path | None = None, + executable: Path | None = None, + editable: bool = False, + metadata_directory: Path | None = None, + ) -> None: + super().__init__(poetry, executable=executable) + + self._records: list[tuple[str, str, int]] = [] self._original_path = self._path - self._target_dir = target_dir or (self._poetry.file.parent / "dist") if original: - self._original_path = original.file.parent + self._original_path = original.parent self._editable = editable + self._metadata_directory = metadata_directory @classmethod def make_in( - cls, poetry, directory=None, original=None, executable=None, editable=False, - ): # type: ("Poetry", Optional[Path], Optional[Path], Optional[str], bool) -> str + cls, + poetry: Poetry, + directory: Path | None = None, + original: Path | None = None, + executable: Path | None = None, + editable: bool = False, + metadata_directory: Path | None = None, + ) -> str: wb = WheelBuilder( poetry, - target_dir=directory, original=original, executable=executable, editable=editable, + metadata_directory=metadata_directory, ) - wb.build() + wb.build(target_dir=directory) return wb.wheel_filename @classmethod - def make(cls, poetry, executable=None): # type: ("Poetry", Optional[str]) -> None + def make(cls, poetry: Poetry, executable: Path | None = None) -> None: """Build a wheel in the dist/ directory, and optionally upload it.""" cls.make_in(poetry, executable=executable) - def build(self): # type: () -> None + def build( + self, + target_dir: Path | None = None, + ) -> Path: logger.info("Building wheel") - dist_dir = self._target_dir - if not dist_dir.exists(): - dist_dir.mkdir() + target_dir = target_dir or self.default_target_dir + if not target_dir.exists(): + target_dir.mkdir() - (fd, temp_path) = tempfile.mkstemp(suffix=".whl") + fd, temp_path = tempfile.mkstemp(suffix=".whl") st_mode = os.stat(temp_path).st_mode new_mode = normalize_file_permissions(st_mode) os.chmod(temp_path, new_mode) - with os.fdopen(fd, "w+b") as fd_file: - with zipfile.ZipFile( - fd_file, mode="w", compression=zipfile.ZIP_DEFLATED - ) as zip_file: - if not self._editable: - if not self._poetry.package.build_should_generate_setup(): - self._build(zip_file) - self._copy_module(zip_file) - else: - self._copy_module(zip_file) - self._build(zip_file) - else: - self._build(zip_file) - self._add_pth(zip_file) - - self._write_metadata(zip_file) - self._write_record(zip_file) - - wheel_path = dist_dir / self.wheel_filename + with os.fdopen(fd, "w+b") as fd_file, zipfile.ZipFile( + fd_file, mode="w", compression=zipfile.ZIP_DEFLATED + ) as zip_file: + if self._editable: + self._build(zip_file) + self._add_pth(zip_file) + elif self._poetry.package.build_should_generate_setup(): + self._copy_module(zip_file) + self._build(zip_file) + else: + self._build(zip_file) + self._copy_module(zip_file) + + self._copy_file_scripts(zip_file) + + if self._metadata_directory is None: + with temporary_directory() as temp_dir: + metadata_directory = self.prepare_metadata(Path(temp_dir)) + self._copy_dist_info(zip_file, metadata_directory) + else: + self._copy_dist_info(zip_file, self._metadata_directory) + + self._write_record(zip_file) + + wheel_path = target_dir / self.wheel_filename if wheel_path.exists(): wheel_path.unlink() shutil.move(temp_path, str(wheel_path)) - logger.info("Built {}".format(self.wheel_filename)) + logger.info(f"Built {self.wheel_filename}") + return wheel_path - def _add_pth(self, wheel): # type: (zipfile.ZipFile) -> None + def _add_pth(self, wheel: zipfile.ZipFile) -> None: paths = set() for include in self._module.includes: if isinstance(include, PackageInclude) and ( @@ -139,16 +159,16 @@ def _add_pth(self, wheel): # type: (zipfile.ZipFile) -> None with self._write_to_zip(wheel, str(pth_file)) as f: f.write(content) - def _build(self, wheel): # type: (zipfile.ZipFile) -> None + def _build(self, wheel: zipfile.ZipFile) -> None: if self._package.build_script: if not self._poetry.package.build_should_generate_setup(): # Since we have a build script but no setup.py generation is required, # we assume that the build script will build and copy the files # directly. # That way they will be picked up when adding files to the wheel. - current_path = os.getcwd() + current_path = Path.cwd() try: - os.chdir(str(self._path)) + os.chdir(self._path) self._run_build_script(self._package.build_script) finally: os.chdir(current_path) @@ -156,91 +176,133 @@ def _build(self, wheel): # type: (zipfile.ZipFile) -> None with SdistBuilder(poetry=self._poetry).setup_py() as setup: # We need to place ourselves in the temporary # directory in order to build the package - current_path = os.getcwd() + current_path = Path.cwd() try: - os.chdir(str(self._path)) + os.chdir(self._path) self._run_build_command(setup) finally: os.chdir(current_path) - build_dir = self._path / "build" - lib = list(build_dir.glob("lib.*")) - if not lib: + if self._editable: + # For an editable install, the extension modules will be built + # in-place - so there's no need to copy them into the zip + return + + lib = self._get_build_lib_dir() + if lib is None: # The result of building the extensions # does not exist, this may due to conditional # builds, so we assume that it's okay return - lib = lib[0] - - for pkg in lib.glob("**/*"): + for pkg in sorted(lib.glob("**/*")): if pkg.is_dir() or self.is_excluded(pkg): continue - rel_path = str(pkg.relative_to(lib)) + rel_path = pkg.relative_to(lib) - if rel_path in wheel.namelist(): + if rel_path.as_posix() in wheel.namelist(): continue - logger.debug("Adding: {}".format(rel_path)) + logger.debug(f"Adding: {rel_path}") self._add_file(wheel, pkg, rel_path) - def _run_build_command(self, setup): # type: (Path) -> None - subprocess.check_call( - [ + def _get_build_purelib_dir(self) -> Path: + return self._path / "build/lib" + + def _get_build_platlib_dir(self) -> Path: + # Roughly equivalent to the naming convention in used by distutils, see: + # distutils.command.build.build.finalize_options + plat_specifier = f"{sysconfig.get_platform()}-{sys.implementation.cache_tag}" + return self._path / f"build/lib.{plat_specifier}" + + def _get_build_lib_dir(self) -> Path | None: + # Either the purelib or platlib path will have been used when building + build_platlib = self._get_build_platlib_dir() + build_purelib = self._get_build_purelib_dir() + if build_platlib.exists(): + return build_platlib + elif build_purelib.exists(): + return build_purelib + return None + + def _copy_file_scripts(self, wheel: zipfile.ZipFile) -> None: + file_scripts = self.convert_script_files() + + for abs_path in file_scripts: + self._add_file( + wheel, + abs_path, + Path(self.wheel_data_folder) / "scripts" / abs_path.name, + ) + + def _run_build_command(self, setup: Path) -> None: + if self._editable: + subprocess.check_call([ self.executable.as_posix(), str(setup), - "build", - "-b", - str(self._path / "build"), - ] - ) - - def _run_build_script(self, build_script): # type: (str) -> None - logger.debug("Executing build script: {}".format(build_script)) + "build_ext", + "--inplace", + ]) + subprocess.check_call([ + self.executable.as_posix(), + str(setup), + "build", + "-b", + str(self._path / "build"), + "--build-purelib", + str(self._get_build_purelib_dir()), + "--build-platlib", + str(self._get_build_platlib_dir()), + ]) + + def _run_build_script(self, build_script: str) -> None: + logger.debug(f"Executing build script: {build_script}") subprocess.check_call([self.executable.as_posix(), build_script]) - def _copy_module(self, wheel): # type: (zipfile.ZipFile) -> None + def _copy_module(self, wheel: zipfile.ZipFile) -> None: to_add = self.find_files_to_add() # Walk the files and compress them, # sorting everything so the order is stable. - for file in sorted(list(to_add), key=lambda x: x.path): - self._add_file(wheel, file.path, file.relative_to_source_root()) + for file in sorted(to_add, key=lambda x: x.path): + self._add_file(wheel, file.path, file.relative_to_target_root()) + + def prepare_metadata(self, metadata_directory: Path) -> Path: + dist_info = metadata_directory / self.dist_info + dist_info.mkdir(parents=True, exist_ok=True) - def _write_metadata(self, wheel): # type: (zipfile.ZipFile) -> None if ( "scripts" in self._poetry.local_config or "plugins" in self._poetry.local_config ): - with self._write_to_zip(wheel, self.dist_info + "/entry_points.txt") as f: + with (dist_info / "entry_points.txt").open( + "w", encoding="utf-8", newline="\n" + ) as f: self._write_entry_points(f) - license_files_to_add = [] - for base in ("COPYING", "LICENSE"): - license_files_to_add.append(self._path / base) - license_files_to_add.extend(self._path.glob(base + ".*")) + with (dist_info / "WHEEL").open("w", encoding="utf-8", newline="\n") as f: + self._write_wheel_file(f) - license_files_to_add.extend(self._path.joinpath("LICENSES").glob("**/*")) + with (dist_info / "METADATA").open("w", encoding="utf-8", newline="\n") as f: + self._write_metadata_file(f) - for path in set(license_files_to_add): - if path.is_file(): - relative_path = "%s/%s" % (self.dist_info, path.relative_to(self._path)) - self._add_file(wheel, path, relative_path) - else: - logger.debug("Skipping: {}".format(path.as_posix())) + for legal_file in self._get_legal_files(): + if not legal_file.is_file(): + logger.debug(f"Skipping: {legal_file.as_posix()}") + continue - with self._write_to_zip(wheel, self.dist_info + "/WHEEL") as f: - self._write_wheel_file(f) + dest = dist_info / legal_file.relative_to(self._path) + dest.parent.mkdir(parents=True, exist_ok=True) + shutil.copy(legal_file, dest) - with self._write_to_zip(wheel, self.dist_info + "/METADATA") as f: - self._write_metadata_file(f) + return dist_info - def _write_record(self, wheel): # type: (zipfile.ZipFile) -> None + def _write_record(self, wheel: zipfile.ZipFile) -> None: # Write a record of the files in the wheel with self._write_to_zip(wheel, self.dist_info + "/RECORD") as f: - record = StringIO() if not PY2 else BytesIO() + record = StringIO() csv_writer = csv.writer( record, @@ -249,65 +311,116 @@ def _write_record(self, wheel): # type: (zipfile.ZipFile) -> None lineterminator="\n", ) for path, hash, size in self._records: - csv_writer.writerow((path, "sha256={}".format(hash), size)) + csv_writer.writerow((path, f"sha256={hash}", size)) # RECORD itself is recorded with no hash or size csv_writer.writerow((self.dist_info + "/RECORD", "", "")) - f.write(decode(record.getvalue())) + f.write(record.getvalue()) + + def _copy_dist_info(self, wheel: zipfile.ZipFile, source: Path) -> None: + dist_info = Path(self.dist_info) + for file in sorted(source.glob("**/*")): + if not file.is_file(): + continue + + rel_path = file.relative_to(source) + target = dist_info / rel_path + self._add_file(wheel, file, target) @property - def dist_info(self): # type: () -> str + def dist_info(self) -> str: return self.dist_info_name(self._package.name, self._meta.version) @property - def wheel_filename(self): # type: () -> str - return "{}-{}-{}.whl".format( - escape_name(self._package.pretty_name), - escape_version(self._meta.version), - self.tag, - ) + def wheel_data_folder(self) -> str: + name = distribution_name(self._package.name) + return f"{name}-{self._meta.version}.data" - def supports_python2(self): # type: () -> bool + @property + def wheel_filename(self) -> str: + name = distribution_name(self._package.name) + version = self._meta.version + return f"{name}-{version}-{self.tag}.whl" + + def supports_python2(self) -> bool: return self._package.python_constraint.allows_any( parse_constraint(">=2.0.0 <3.0.0") ) - def dist_info_name(self, distribution, version): # type: (str, str) -> str - escaped_name = escape_name(distribution) - escaped_version = escape_version(version) + def dist_info_name(self, name: NormalizedName, version: str) -> str: + escaped_name = distribution_name(name) + return f"{escaped_name}-{version}.dist-info" - return "{}-{}.dist-info".format(escaped_name, escaped_version) + def _get_sys_tags(self) -> list[str]: + """Get sys_tags via subprocess. + Required if poetry-core is not run inside the build environment. + """ + try: + output = subprocess.check_output( + [ + self.executable.as_posix(), + "-c", + f""" +import importlib.util +import sys + +from pathlib import Path + +spec = importlib.util.spec_from_file_location( + "packaging", Path(r"{packaging.__file__}") +) + +packaging = importlib.util.module_from_spec(spec) +sys.modules[spec.name] = packaging + +spec = importlib.util.spec_from_file_location( + "packaging.tags", Path(r"{packaging.tags.__file__}") +) +packaging_tags = importlib.util.module_from_spec(spec) +spec.loader.exec_module(packaging_tags) +for t in packaging_tags.sys_tags(): + print(t.interpreter, t.abi, t.platform, sep="-") +""", + ], + stderr=subprocess.STDOUT, + text=True, + encoding="utf-8", + ) + except subprocess.CalledProcessError as e: + raise RuntimeError( + "Failed to get sys_tags for python interpreter" + f" '{self.executable.as_posix()}':\n{e.output}" + ) + return output.strip().splitlines() @property - def tag(self): # type: () -> str + def tag(self) -> str: if self._package.build_script: - tag = next(sys_tags()) - tag = (tag.interpreter, tag.abi, tag.platform) + if self.executable != Path(sys.executable): + # poetry-core is not run in the build environment + # -> this is probably not a PEP 517 build but a poetry build + return self._get_sys_tags()[0] + sys_tag = next(packaging.tags.sys_tags()) + tag = (sys_tag.interpreter, sys_tag.abi, sys_tag.platform) else: platform = "any" - if self.supports_python2(): - impl = "py2.py3" - else: - impl = "py3" - + impl = "py2.py3" if self.supports_python2() else "py3" tag = (impl, "none", platform) - return "-".join(tag) def _add_file( - self, wheel, full_path, rel_path - ): # type: (zipfile.ZipFile, Union[Path, str], Union[Path, str]) -> None - full_path, rel_path = str(full_path), str(rel_path) - if os.sep != "/": - # We always want to have /-separated paths in the zip file and in - # RECORD - rel_path = rel_path.replace(os.sep, "/") - - zinfo = zipfile.ZipInfo(rel_path) + self, + wheel: zipfile.ZipFile, + full_path: Path, + rel_path: Path, + ) -> None: + # We always want to have /-separated paths in the zip file and in RECORD + rel_path_name = rel_path.as_posix() + zinfo = zipfile.ZipInfo(rel_path_name) # Normalize permission bits to either 755 (executable) or 644 - st_mode = os.stat(full_path).st_mode + st_mode = full_path.stat().st_mode new_mode = normalize_file_permissions(st_mode) zinfo.external_attr = (new_mode & 0xFFFF) << 16 # Unix attributes @@ -315,7 +428,7 @@ def _add_file( zinfo.external_attr |= 0x10 # MS-DOS directory flag hashsum = hashlib.sha256() - with open(full_path, "rb") as src: + with full_path.open("rb") as src: while True: buf = src.read(1024 * 8) if not buf: @@ -325,15 +438,15 @@ def _add_file( src.seek(0) wheel.writestr(zinfo, src.read(), compress_type=zipfile.ZIP_DEFLATED) - size = os.stat(full_path).st_size + size = full_path.stat().st_size hash_digest = urlsafe_b64encode(hashsum.digest()).decode("ascii").rstrip("=") - self._records.append((rel_path, hash_digest, size)) + self._records.append((rel_path_name, hash_digest, size)) @contextlib.contextmanager def _write_to_zip( - self, wheel, rel_path - ): # type: (zipfile.ZipFile, str) -> Iterator[StringIO] + self, wheel: zipfile.ZipFile, rel_path: str + ) -> Iterator[StringIO]: sio = StringIO() yield sio @@ -350,20 +463,20 @@ def _write_to_zip( wheel.writestr(zi, b, compress_type=zipfile.ZIP_DEFLATED) self._records.append((rel_path, hash_digest, len(b))) - def _write_entry_points(self, fp): # type: (TextIO) -> None + def _write_entry_points(self, fp: TextIO) -> None: """ Write entry_points.txt. """ entry_points = self.convert_entry_points() for group_name in sorted(entry_points): - fp.write("[{}]\n".format(group_name)) + fp.write(f"[{group_name}]\n") for ep in sorted(entry_points[group_name]): fp.write(ep.replace(" ", "") + "\n") fp.write("\n") - def _write_wheel_file(self, fp): # type: (TextIO) -> None + def _write_wheel_file(self, fp: TextIO) -> None: fp.write( wheel_file_template.format( version=__version__, @@ -372,8 +485,8 @@ def _write_wheel_file(self, fp): # type: (TextIO) -> None ) ) - def _write_metadata_file(self, fp): # type: (TextIO) -> None + def _write_metadata_file(self, fp: TextIO) -> None: """ Write out metadata in the 2.x format (email like) """ - fp.write(decode(self.get_metadata_content())) + fp.write(self.get_metadata_content()) diff --git a/conda_lock/_vendor/poetry/core/masonry/metadata.py b/conda_lock/_vendor/poetry/core/masonry/metadata.py index d8469351..9e17e64f 100644 --- a/conda_lock/_vendor/poetry/core/masonry/metadata.py +++ b/conda_lock/_vendor/poetry/core/masonry/metadata.py @@ -1,59 +1,63 @@ +from __future__ import annotations + from typing import TYPE_CHECKING -from conda_lock._vendor.poetry.core.utils.helpers import canonicalize_name -from conda_lock._vendor.poetry.core.utils.helpers import normalize_version -from conda_lock._vendor.poetry.core.version.helpers import format_python_constraint +from conda_lock._vendor.poetry.core.utils.helpers import readme_content_type if TYPE_CHECKING: - from conda_lock._vendor.poetry.core.packages import Package # noqa + from conda_lock._vendor.poetry.core.packages.package import Package class Metadata: - metadata_version = "2.1" # version 1.0 - name = None - version = None - platforms = () - supported_platforms = () - summary = None - description = None - keywords = None - home_page = None - download_url = None - author = None - author_email = None - license = None + name: str | None = None + version: str + platforms: tuple[str, ...] = () + supported_platforms: tuple[str, ...] = () + summary: str | None = None + description: str | None = None + keywords: str | None = None + home_page: str | None = None + download_url: str | None = None + author: str | None = None + author_email: str | None = None + license: str | None = None # version 1.1 - classifiers = () - requires = () - provides = () - obsoletes = () + classifiers: tuple[str, ...] = () + requires: tuple[str, ...] = () + provides: tuple[str, ...] = () + obsoletes: tuple[str, ...] = () # version 1.2 - maintainer = None - maintainer_email = None - requires_python = None - requires_external = () - requires_dist = [] - provides_dist = () - obsoletes_dist = () - project_urls = () + maintainer: str | None = None + maintainer_email: str | None = None + requires_python: str | None = None + requires_external: tuple[str, ...] = () + requires_dist: list[str] = [] # noqa: RUF012 + provides_dist: tuple[str, ...] = () + obsoletes_dist: tuple[str, ...] = () + project_urls: tuple[str, ...] = () # Version 2.1 - description_content_type = None - provides_extra = [] + description_content_type: str | None = None + provides_extra: list[str] = [] # noqa: RUF012 @classmethod - def from_package(cls, package): # type: ("Package") -> Metadata + def from_package(cls, package: Package) -> Metadata: + from conda_lock._vendor.poetry.core.version.helpers import format_python_constraint + meta = cls() - meta.name = canonicalize_name(package.name) - meta.version = normalize_version(package.version.text) + meta.name = package.pretty_name + meta.version = package.version.to_string() meta.summary = package.description - if package.readme: - with package.readme.open(encoding="utf-8") as f: - meta.description = f.read() + if package.readmes: + descriptions = [] + for readme in package.readmes: + with readme.open(encoding="utf-8") as f: + descriptions.append(f.read()) + meta.description = "\n".join(descriptions) meta.keywords = ",".join(package.keywords) meta.home_page = package.homepage or package.repository_url @@ -63,7 +67,7 @@ def from_package(cls, package): # type: ("Package") -> Metadata if package.license: meta.license = package.license.id - meta.classifiers = package.all_classifiers + meta.classifiers = tuple(package.all_classifiers) # Version 1.2 meta.maintainer = package.maintainer_name @@ -76,21 +80,16 @@ def from_package(cls, package): # type: ("Package") -> Metadata meta.requires_dist = [d.to_pep_508() for d in package.requires] # Version 2.1 - if package.readme: - if package.readme.suffix == ".rst": - meta.description_content_type = "text/x-rst" - elif package.readme.suffix in [".md", ".markdown"]: - meta.description_content_type = "text/markdown" - else: - meta.description_content_type = "text/plain" + if package.readmes: + meta.description_content_type = readme_content_type(package.readmes[0]) - meta.provides_extra = [e for e in package.extras] + meta.provides_extra = list(package.extras) if package.urls: for name, url in package.urls.items(): if name == "Homepage" and meta.home_page == url: continue - meta.project_urls += ("{}, {}".format(name, url),) + meta.project_urls += (f"{name}, {url}",) return meta diff --git a/conda_lock/_vendor/poetry/core/masonry/utils/helpers.py b/conda_lock/_vendor/poetry/core/masonry/utils/helpers.py index 3a515f42..ac291f27 100644 --- a/conda_lock/_vendor/poetry/core/masonry/utils/helpers.py +++ b/conda_lock/_vendor/poetry/core/masonry/utils/helpers.py @@ -1,7 +1,21 @@ +from __future__ import annotations + import re +import warnings + +from typing import TYPE_CHECKING +from typing import NewType +from typing import cast + + +if TYPE_CHECKING: + from packaging.utils import NormalizedName + +DistributionName = NewType("DistributionName", str) -def normalize_file_permissions(st_mode): # type: (int) -> int + +def normalize_file_permissions(st_mode: int) -> int: """ Normalizes the permission bits in the st_mode field from stat to 644/755 @@ -17,15 +31,54 @@ def normalize_file_permissions(st_mode): # type: (int) -> int return new_mode -def escape_version(version): # type: (str) -> str +def escape_version(version: str) -> str: """ Escaped version in wheel filename. Doesn't exactly follow the escaping specification in :pep:`427#escaping-and-unicode` because this conflicts with :pep:`440#local-version-identifiers`. """ + warnings.warn( + "escape_version() is deprecated. Use Version.parse().to_string() instead.", + DeprecationWarning, + stacklevel=2, + ) return re.sub(r"[^\w\d.+]+", "_", version, flags=re.UNICODE) -def escape_name(name): # type: (str) -> str - """Escaped wheel name as specified in :pep:`427#escaping-and-unicode`.""" - return re.sub(r"[^\w\d.]+", "_", name, flags=re.UNICODE) +def escape_name(name: str) -> str: + """ + Escaped wheel name as specified in https://packaging.python.org/en/latest/specifications/binary-distribution-format/#escaping-and-unicode. + This function should only be used for the generation of artifact names, + and not to normalize or filter existing artifact names. + """ + warnings.warn( + "escape_name() is deprecated. Use packaging.utils.canonicalize_name() and" + " distribution_name() instead.", + DeprecationWarning, + stacklevel=2, + ) + return re.sub(r"[-_.]+", "_", name, flags=re.UNICODE).lower() + + +def distribution_name(name: NormalizedName) -> DistributionName: + """ + A normalized name, but with "-" replaced by "_". This is used in various places: + + https://packaging.python.org/en/latest/specifications/binary-distribution-format/#escaping-and-unicode + + In distribution names ... This is equivalent to PEP 503 normalisation followed by + replacing - with _. + + https://packaging.python.org/en/latest/specifications/source-distribution-format/#source-distribution-file-name + + ... {name} is normalised according to the same rules as for binary distributions + + https://packaging.python.org/en/latest/specifications/recording-installed-packages/#the-dist-info-directory + + This directory is named as {name}-{version}.dist-info, with name and version fields + corresponding to Core metadata specifications. Both fields must be normalized + (see PEP 503 and PEP 440 for the definition of normalization for each field + respectively), and replace dash (-) characters with underscore (_) characters ... + """ + distribution_name = name.replace("-", "_") + return cast("DistributionName", distribution_name) diff --git a/conda_lock/_vendor/poetry/core/masonry/utils/include.py b/conda_lock/_vendor/poetry/core/masonry/utils/include.py index af40c1b8..f183aa6c 100644 --- a/conda_lock/_vendor/poetry/core/masonry/utils/include.py +++ b/conda_lock/_vendor/poetry/core/masonry/utils/include.py @@ -1,10 +1,13 @@ -from typing import List -from typing import Optional +from __future__ import annotations -from conda_lock._vendor.poetry.core.utils._compat import Path +from typing import TYPE_CHECKING -class Include(object): +if TYPE_CHECKING: + from pathlib import Path + + +class Include: """ Represents an "include" entry. @@ -19,32 +22,30 @@ class Include(object): """ def __init__( - self, base, include, formats=None - ): # type: (Path, str, Optional[List[str]]) -> None + self, base: Path, include: str, formats: list[str] | None = None + ) -> None: self._base = base self._include = str(include) self._formats = formats - self._elements = sorted( - list(self._base.glob(str(self._include))) - ) # type: List[Path] + self._elements: list[Path] = sorted(self._base.glob(str(self._include))) @property - def base(self): # type: () -> Path + def base(self) -> Path: return self._base @property - def elements(self): # type: () -> List[Path] + def elements(self) -> list[Path]: return self._elements @property - def formats(self): # type: () -> Optional[List[str]] + def formats(self) -> list[str] | None: return self._formats - def is_empty(self): # type: () -> bool + def is_empty(self) -> bool: return len(self._elements) == 0 - def refresh(self): # type: () -> Include - self._elements = sorted(list(self._base.glob(self._include))) + def refresh(self) -> Include: + self._elements = sorted(self._base.glob(self._include)) return self diff --git a/conda_lock/_vendor/poetry/core/masonry/utils/module.py b/conda_lock/_vendor/poetry/core/masonry/utils/module.py index c8575f6b..c951ae2d 100644 --- a/conda_lock/_vendor/poetry/core/masonry/utils/module.py +++ b/conda_lock/_vendor/poetry/core/masonry/utils/module.py @@ -1,38 +1,44 @@ +from __future__ import annotations + +from pathlib import Path +from typing import TYPE_CHECKING from typing import Any -from typing import Dict -from typing import List -from typing import Optional +from typing import Mapping +from typing import Sequence -from conda_lock._vendor.poetry.core.utils._compat import Path -from conda_lock._vendor.poetry.core.utils.helpers import module_name -from .include import Include -from .package_include import PackageInclude +if TYPE_CHECKING: + from conda_lock._vendor.poetry.core.masonry.utils.include import Include class ModuleOrPackageNotFound(ValueError): - pass class Module: def __init__( - self, name, directory=".", packages=None, includes=None - ): # type: (str, str, Optional[List[Dict[str, Any]]], Optional[List[Dict[str, Any]]]) -> None + self, + name: str, + directory: str = ".", + packages: Sequence[Mapping[str, Any]] = (), + includes: Sequence[Mapping[str, Any]] = (), + ) -> None: + from conda_lock._vendor.poetry.core.masonry.utils.include import Include + from conda_lock._vendor.poetry.core.masonry.utils.package_include import PackageInclude + from conda_lock._vendor.poetry.core.utils.helpers import module_name + self._name = module_name(name) self._in_src = False self._is_package = False self._path = Path(directory) - self._includes = [] - packages = packages or [] - includes = includes or [] + self._includes: list[Include] = [] if not packages: # It must exist either as a .py file or a directory, but not both pkg_dir = Path(directory, self._name) py_file = Path(directory, self._name + ".py") if pkg_dir.is_dir() and py_file.is_file(): - raise ValueError("Both {} and {} exist".format(pkg_dir, py_file)) + raise ValueError(f"Both {pkg_dir} and {py_file} exist") elif pkg_dir.is_dir(): packages = [{"include": str(pkg_dir.relative_to(self._path))}] elif py_file.is_file(): @@ -44,24 +50,20 @@ def __init__( src_py_file = src / (self._name + ".py") if src_pkg_dir.is_dir() and src_py_file.is_file(): - raise ValueError("Both {} and {} exist".format(pkg_dir, py_file)) + raise ValueError(f"Both {pkg_dir} and {py_file} exist") elif src_pkg_dir.is_dir(): - packages = [ - { - "include": str(src_pkg_dir.relative_to(src)), - "from": str(src.relative_to(self._path)), - } - ] + packages = [{ + "include": str(src_pkg_dir.relative_to(src)), + "from": str(src.relative_to(self._path)), + }] elif src_py_file.is_file(): - packages = [ - { - "include": str(src_py_file.relative_to(src)), - "from": str(src.relative_to(self._path)), - } - ] + packages = [{ + "include": str(src_py_file.relative_to(src)), + "from": str(src.relative_to(self._path)), + }] else: raise ModuleOrPackageNotFound( - "No file/folder found for package {}".format(name) + f"No file/folder found for package {name}" ) for package in packages: @@ -75,6 +77,7 @@ def __init__( package["include"], formats=formats, source=package.get("from"), + target=package.get("to"), ) ) @@ -84,26 +87,26 @@ def __init__( ) @property - def name(self): # type: () -> str + def name(self) -> str: return self._name @property - def path(self): # type: () -> Path + def path(self) -> Path: return self._path @property - def file(self): # type: () -> Path + def file(self) -> Path: if self._is_package: return self._path / "__init__.py" else: return self._path @property - def includes(self): # type: () -> List + def includes(self) -> list[Include]: return self._includes - def is_package(self): # type: () -> bool + def is_package(self) -> bool: return self._is_package - def is_in_src(self): # type: () -> bool + def is_in_src(self) -> bool: return self._in_src diff --git a/conda_lock/_vendor/poetry/core/masonry/utils/package_include.py b/conda_lock/_vendor/poetry/core/masonry/utils/package_include.py index 4758694b..0d0415b1 100644 --- a/conda_lock/_vendor/poetry/core/masonry/utils/package_include.py +++ b/conda_lock/_vendor/poetry/core/masonry/utils/package_include.py @@ -1,64 +1,76 @@ -from typing import List -from typing import Optional +from __future__ import annotations -from conda_lock._vendor.poetry.core.utils._compat import Path +from typing import TYPE_CHECKING -from .include import Include +from conda_lock._vendor.poetry.core.masonry.utils.include import Include + + +if TYPE_CHECKING: + from pathlib import Path class PackageInclude(Include): def __init__( - self, base, include, formats=None, source=None - ): # type: (Path, str, Optional[List[str]], Optional[str]) -> None - self._package = None + self, + base: Path, + include: str, + formats: list[str] | None = None, + source: str | None = None, + target: str | None = None, + ) -> None: + self._package: str self._is_package = False self._is_module = False self._source = source + self._target = target if source is not None: base = base / source - super(PackageInclude, self).__init__(base, include, formats=formats) + super().__init__(base, include, formats=formats) self.check_elements() @property - def package(self): # type: () -> str + def package(self) -> str: return self._package @property - def source(self): # type: () -> Optional[str] + def source(self) -> str | None: return self._source - def is_package(self): # type: () -> bool + @property + def target(self) -> str | None: + return self._target + + def is_package(self) -> bool: return self._is_package - def is_module(self): # type: () -> bool + def is_module(self) -> bool: return self._is_module - def refresh(self): # type: () -> PackageInclude - super(PackageInclude, self).refresh() + def refresh(self) -> PackageInclude: + super().refresh() return self.check_elements() - def is_stub_only(self): # type: () -> bool + def is_stub_only(self) -> bool: # returns `True` if this a PEP 561 stub-only package, # see [PEP 561](https://www.python.org/dev/peps/pep-0561/#stub-only-packages) - return self.package.endswith("-stubs") and all( - el.suffix == ".pyi" - or (el.parent.name == self.package and el.name == "py.typed") + return (self.package or "").endswith("-stubs") and all( + el.suffix == ".pyi" or el.name == "py.typed" for el in self.elements if el.is_file() ) - def has_modules(self): # type: () -> bool + def has_modules(self) -> bool: # Packages no longer need an __init__.py in python3, but there must # at least be one .py file for it to be considered a package return any(element.suffix == ".py" for element in self.elements) - def check_elements(self): # type: () -> PackageInclude + def check_elements(self) -> PackageInclude: if not self._elements: raise ValueError( - "{} does not contain any element".format(self._base / self._include) + f"{self._base / self._include} does not contain any element" ) root = self._elements[0] @@ -67,21 +79,20 @@ def check_elements(self): # type: () -> PackageInclude self._is_package = True self._package = root.parent.name - if not self.is_stub_only() and not self.has_modules(): - raise ValueError("{} is not a package.".format(root.name)) + if not (self.is_stub_only() or self.has_modules()): + raise ValueError(f"{root.name} is not a package.") + elif root.is_dir(): + # If it's a directory, we include everything inside it + self._package = root.name + self._elements: list[Path] = sorted(root.glob("**/*")) + + if not (self.is_stub_only() or self.has_modules()): + raise ValueError(f"{root.name} is not a package.") + + self._is_package = True else: - if root.is_dir(): - # If it's a directory, we include everything inside it - self._package = root.name - self._elements = sorted(list(root.glob("**/*"))) # type: List[Path] - - if not self.is_stub_only() and not self.has_modules(): - raise ValueError("{} is not a package.".format(root.name)) - - self._is_package = True - else: - self._package = root.stem - self._is_module = True + self._package = root.stem + self._is_module = True return self diff --git a/conda_lock/_vendor/poetry/core/packages/__init__.py b/conda_lock/_vendor/poetry/core/packages/__init__.py index bb19288c..e69de29b 100644 --- a/conda_lock/_vendor/poetry/core/packages/__init__.py +++ b/conda_lock/_vendor/poetry/core/packages/__init__.py @@ -1,207 +0,0 @@ -import os -import re - -from typing import List -from typing import Optional -from typing import Union - -from conda_lock._vendor.poetry.core.semver import parse_constraint -from conda_lock._vendor.poetry.core.utils._compat import Path -from conda_lock._vendor.poetry.core.utils.patterns import wheel_file_re -from conda_lock._vendor.poetry.core.version.requirements import Requirement - -from .dependency import Dependency -from .directory_dependency import DirectoryDependency -from .file_dependency import FileDependency -from .package import Package -from .project_package import ProjectPackage -from .url_dependency import URLDependency -from .utils.link import Link -from .utils.utils import convert_markers -from .utils.utils import group_markers -from .utils.utils import is_archive_file -from .utils.utils import is_installable_dir -from .utils.utils import is_url -from .utils.utils import path_to_url -from .utils.utils import strip_extras -from .utils.utils import url_to_path -from .vcs_dependency import VCSDependency - - -def _make_file_or_dir_dep( - name, # type: str - path, # type: Path - base=None, # type: Optional[Path] - extras=None, # type: Optional[List[str]] -): # type: (...) -> Optional[Union[FileDependency, DirectoryDependency]] - """ - Helper function to create a file or directoru dependency with the given arguments. If - path is not a file or directory that exists, `None` is returned. - """ - _path = path - if not path.is_absolute() and base: - # a base path was specified, so we should respect that - _path = Path(base) / path - - if _path.is_file(): - return FileDependency(name, path, base=base, extras=extras) - elif _path.is_dir(): - return DirectoryDependency(name, path, base=base, extras=extras) - - return None - - -def dependency_from_pep_508( - name, relative_to=None -): # type: (str, Optional[Path]) -> Dependency - """ - Resolve a PEP-508 requirement string to a `Dependency` instance. If a `relative_to` - path is specified, this is used as the base directory if the identified dependency is - of file or directory type. - """ - from conda_lock._vendor.poetry.core.vcs.git import ParsedUrl - - # Removing comments - parts = name.split("#", 1) - name = parts[0].strip() - if len(parts) > 1: - rest = parts[1] - if " ;" in rest: - name += " ;" + rest.split(" ;", 1)[1] - - req = Requirement(name) - - if req.marker: - markers = convert_markers(req.marker) - else: - markers = {} - - name = req.name - path = os.path.normpath(os.path.abspath(name)) - link = None - - if is_url(name): - link = Link(name) - elif req.url: - link = Link(req.url) - else: - p, extras = strip_extras(path) - if os.path.isdir(p) and (os.path.sep in name or name.startswith(".")): - - if not is_installable_dir(p): - raise ValueError( - "Directory {!r} is not installable. File 'setup.py' " - "not found.".format(name) - ) - link = Link(path_to_url(p)) - elif is_archive_file(p): - link = Link(path_to_url(p)) - - # it's a local file, dir, or url - if link: - is_file_uri = link.scheme == "file" - is_relative_uri = is_file_uri and re.search(r"\.\./", link.url) - - # Handle relative file URLs - if is_file_uri and is_relative_uri: - path = Path(link.path) - if relative_to: - path = relative_to / path - link = Link(path_to_url(path)) - - # wheel file - version = None - if link.is_wheel: - m = wheel_file_re.match(link.filename) - if not m: - raise ValueError("Invalid wheel name: {}".format(link.filename)) - name = m.group("name") - version = m.group("ver") - - name = req.name or link.egg_fragment - dep = None - - if link.scheme.startswith("git+"): - url = ParsedUrl.parse(link.url) - dep = VCSDependency(name, "git", url.url, rev=url.rev, extras=req.extras) - elif link.scheme == "git": - dep = VCSDependency( - name, "git", link.url_without_fragment, extras=req.extras - ) - elif link.scheme in ["http", "https"]: - dep = URLDependency(name, link.url) - elif is_file_uri: - # handle RFC 8089 references - path = url_to_path(req.url) - dep = _make_file_or_dir_dep( - name=name, path=path, base=relative_to, extras=req.extras - ) - else: - try: - # this is a local path not using the file URI scheme - dep = _make_file_or_dir_dep( - name=name, path=Path(req.url), base=relative_to, extras=req.extras, - ) - except ValueError: - pass - - if dep is None: - dep = Dependency(name, version or "*", extras=req.extras) - - if version: - dep._constraint = parse_constraint(version) - else: - if req.pretty_constraint: - constraint = req.constraint - else: - constraint = "*" - - dep = Dependency(name, constraint, extras=req.extras) - - if "extra" in markers: - # If we have extras, the dependency is optional - dep.deactivate() - - for or_ in markers["extra"]: - for _, extra in or_: - dep.in_extras.append(extra) - - if "python_version" in markers: - ors = [] - for or_ in markers["python_version"]: - ands = [] - for op, version in or_: - # Expand python version - if op == "==" and "*" not in version: - version = "~" + version - op = "" - elif op == "!=": - version += ".*" - elif op in ("in", "not in"): - versions = [] - for v in re.split("[ ,]+", version): - split = v.split(".") - if len(split) in [1, 2]: - split.append("*") - op_ = "" if op == "in" else "!=" - else: - op_ = "==" if op == "in" else "!=" - - versions.append(op_ + ".".join(split)) - - glue = " || " if op == "in" else ", " - if versions: - ands.append(glue.join(versions)) - - continue - - ands.append("{}{}".format(op, version)) - - ors.append(" ".join(ands)) - - dep.python_versions = " || ".join(ors) - - if req.marker: - dep.marker = req.marker - - return dep diff --git a/conda_lock/_vendor/poetry/core/packages/constraints/any_constraint.py b/conda_lock/_vendor/poetry/core/packages/constraints/any_constraint.py deleted file mode 100644 index 88945a11..00000000 --- a/conda_lock/_vendor/poetry/core/packages/constraints/any_constraint.py +++ /dev/null @@ -1,43 +0,0 @@ -from typing import TYPE_CHECKING - -from .base_constraint import BaseConstraint -from .empty_constraint import EmptyConstraint - - -if TYPE_CHECKING: - from . import ConstraintTypes # noqa - - -class AnyConstraint(BaseConstraint): - def allows(self, other): # type: ("ConstraintTypes") -> bool - return True - - def allows_all(self, other): # type: ("ConstraintTypes") -> bool - return True - - def allows_any(self, other): # type: ("ConstraintTypes") -> bool - return True - - def difference(self, other): # type: ("ConstraintTypes") -> "ConstraintTypes" - if other.is_any(): - return EmptyConstraint() - - return other - - def intersect(self, other): # type: ("ConstraintTypes") -> "ConstraintTypes" - return other - - def union(self, other): # type: ("ConstraintTypes") -> AnyConstraint - return AnyConstraint() - - def is_any(self): # type: () -> bool - return True - - def is_empty(self): # type: () -> bool - return False - - def __str__(self): # type: () -> str - return "*" - - def __eq__(self, other): # type: ("ConstraintTypes") -> bool - return other.is_any() diff --git a/conda_lock/_vendor/poetry/core/packages/constraints/base_constraint.py b/conda_lock/_vendor/poetry/core/packages/constraints/base_constraint.py deleted file mode 100644 index 0db9aff4..00000000 --- a/conda_lock/_vendor/poetry/core/packages/constraints/base_constraint.py +++ /dev/null @@ -1,37 +0,0 @@ -from typing import TYPE_CHECKING - - -if TYPE_CHECKING: - from . import ConstraintTypes # noqa - - -class BaseConstraint(object): - def allows(self, other): # type: ("ConstraintTypes") -> bool - raise NotImplementedError - - def allows_all(self, other): # type: ("ConstraintTypes") -> bool - raise NotImplementedError() - - def allows_any(self, other): # type: ("ConstraintTypes") -> bool - raise NotImplementedError() - - def difference(self, other): # type: ("ConstraintTypes") -> "ConstraintTypes" - raise NotImplementedError() - - def intersect(self, other): # type: ("ConstraintTypes") -> "ConstraintTypes" - raise NotImplementedError() - - def union(self, other): # type: ("ConstraintTypes") -> "ConstraintTypes" - raise NotImplementedError() - - def is_any(self): # type: () -> bool - return False - - def is_empty(self): # type: () -> bool - return False - - def __repr__(self): # type: () -> str - return "<{} {}>".format(self.__class__.__name__, str(self)) - - def __eq__(self, other): # type: ("ConstraintTypes") -> bool - raise NotImplementedError() diff --git a/conda_lock/_vendor/poetry/core/packages/constraints/constraint.py b/conda_lock/_vendor/poetry/core/packages/constraints/constraint.py deleted file mode 100644 index 1ebe915f..00000000 --- a/conda_lock/_vendor/poetry/core/packages/constraints/constraint.py +++ /dev/null @@ -1,131 +0,0 @@ -import operator - -from typing import TYPE_CHECKING -from typing import Any -from typing import Union - -from .base_constraint import BaseConstraint -from .empty_constraint import EmptyConstraint - - -if TYPE_CHECKING: - from . import ConstraintTypes # noqa - - -class Constraint(BaseConstraint): - - OP_EQ = operator.eq - OP_NE = operator.ne - - _trans_op_str = {"=": OP_EQ, "==": OP_EQ, "!=": OP_NE} - - _trans_op_int = {OP_EQ: "==", OP_NE: "!="} - - def __init__(self, version, operator="=="): # type: (str, str) -> None - if operator == "=": - operator = "==" - - self._version = version - self._operator = operator - self._op = self._trans_op_str[operator] - - @property - def version(self): # type: () -> str - return self._version - - @property - def operator(self): # type: () -> str - return self._operator - - def allows(self, other): # type: ("ConstraintTypes") -> bool - is_equal_op = self._operator == "==" - is_non_equal_op = self._operator == "!=" - is_other_equal_op = other.operator == "==" - is_other_non_equal_op = other.operator == "!=" - - if is_equal_op and is_other_equal_op: - return self._version == other.version - - if ( - is_equal_op - and is_other_non_equal_op - or is_non_equal_op - and is_other_equal_op - or is_non_equal_op - and is_other_non_equal_op - ): - return self._version != other.version - - return False - - def allows_all(self, other): # type: ("ConstraintTypes") -> bool - if not isinstance(other, Constraint): - return other.is_empty() - - return other == self - - def allows_any(self, other): # type: ("ConstraintTypes") -> bool - if isinstance(other, Constraint): - is_non_equal_op = self._operator == "!=" - is_other_non_equal_op = other.operator == "!=" - - if is_non_equal_op and is_other_non_equal_op: - return self._version != other.version - - return other.allows(self) - - def difference( - self, other - ): # type: ("ConstraintTypes") -> Union[Constraint, "EmptyConstraint"] - if other.allows(self): - return EmptyConstraint() - - return self - - def intersect(self, other): # type: ("ConstraintTypes") -> "ConstraintTypes" - from .multi_constraint import MultiConstraint - - if isinstance(other, Constraint): - if other == self: - return self - - if self.operator == "!=" and other.operator == "==" and self.allows(other): - return other - - if other.operator == "!=" and self.operator == "==" and other.allows(self): - return self - - if other.operator == "!=" and self.operator == "!=": - return MultiConstraint(self, other) - - return EmptyConstraint() - - return other.intersect(self) - - def union(self, other): # type: ("ConstraintTypes") -> "ConstraintTypes" - if isinstance(other, Constraint): - from .union_constraint import UnionConstraint - - return UnionConstraint(self, other) - - return other.union(self) - - def is_any(self): # type: () -> bool - return False - - def is_empty(self): # type: () -> bool - return False - - def __eq__(self, other): # type: (Any) -> bool - if not isinstance(other, Constraint): - return NotImplemented - - return (self.version, self.operator) == (other.version, other.operator) - - def __hash__(self): # type: () -> int - return hash((self._operator, self._version)) - - def __str__(self): # type: () -> str - return "{}{}".format( - self._operator if self._operator != "==" else "", self._version - ) diff --git a/conda_lock/_vendor/poetry/core/packages/constraints/empty_constraint.py b/conda_lock/_vendor/poetry/core/packages/constraints/empty_constraint.py deleted file mode 100644 index 4db043de..00000000 --- a/conda_lock/_vendor/poetry/core/packages/constraints/empty_constraint.py +++ /dev/null @@ -1,39 +0,0 @@ -from typing import TYPE_CHECKING - -from .base_constraint import BaseConstraint - - -if TYPE_CHECKING: - from . import ConstraintTypes # noqa - - -class EmptyConstraint(BaseConstraint): - - pretty_string = None - - def matches(self, _): # type: ("ConstraintTypes") -> bool - return True - - def is_empty(self): # type: () -> bool - return True - - def allows(self, other): # type: ("ConstraintTypes") -> bool - return False - - def allows_all(self, other): # type: ("ConstraintTypes") -> bool - return True - - def allows_any(self, other): # type: ("ConstraintTypes") -> bool - return True - - def intersect(self, other): # type: ("ConstraintTypes") -> "ConstraintTypes" - return other - - def difference(self, other): # type: ("ConstraintTypes") -> None - return - - def __eq__(self, other): # type: ("ConstraintTypes") -> bool - return other.is_empty() - - def __str__(self): # type: () -> str - return "" diff --git a/conda_lock/_vendor/poetry/core/packages/constraints/multi_constraint.py b/conda_lock/_vendor/poetry/core/packages/constraints/multi_constraint.py deleted file mode 100644 index 33fc9e4a..00000000 --- a/conda_lock/_vendor/poetry/core/packages/constraints/multi_constraint.py +++ /dev/null @@ -1,100 +0,0 @@ -from typing import TYPE_CHECKING -from typing import Any -from typing import Tuple - -from .base_constraint import BaseConstraint -from .constraint import Constraint - - -if TYPE_CHECKING: - from . import ConstraintTypes # noqa - - -class MultiConstraint(BaseConstraint): - def __init__(self, *constraints): # type: (*Constraint) -> None - if any(c.operator == "==" for c in constraints): - raise ValueError( - "A multi-constraint can only be comprised of negative constraints" - ) - - self._constraints = constraints - - @property - def constraints(self): # type: () -> Tuple[Constraint] - return self._constraints - - def allows(self, other): # type: ("ConstraintTypes") -> bool - for constraint in self._constraints: - if not constraint.allows(other): - return False - - return True - - def allows_all(self, other): # type: ("ConstraintTypes") -> bool - if other.is_any(): - return False - - if other.is_empty(): - return True - - if isinstance(other, Constraint): - return self.allows(other) - - our_constraints = iter(self._constraints) - their_constraints = iter(other.constraints) - our_constraint = next(our_constraints, None) - their_constraint = next(their_constraints, None) - - while our_constraint and their_constraint: - if our_constraint.allows_all(their_constraint): - their_constraint = next(their_constraints, None) - else: - our_constraint = next(our_constraints, None) - - return their_constraint is None - - def allows_any(self, other): # type: ("ConstraintTypes") -> bool - if other.is_any(): - return True - - if other.is_empty(): - return True - - if isinstance(other, Constraint): - return self.allows(other) - - if isinstance(other, MultiConstraint): - for c1 in self.constraints: - for c2 in other.constraints: - if c1.allows(c2): - return True - - return False - - def intersect(self, other): # type: (Constraint) -> MultiConstraint - if isinstance(other, Constraint): - constraints = self._constraints - if other not in constraints: - constraints += (other,) - else: - constraints = (other,) - - if len(constraints) == 1: - return constraints[0] - - return MultiConstraint(*constraints) - - def __eq__(self, other): # type: (Any) -> bool - if not isinstance(other, MultiConstraint): - return False - - return sorted( - self._constraints, key=lambda c: (c.operator, c.version) - ) == sorted(other.constraints, key=lambda c: (c.operator, c.version)) - - def __str__(self): # type: () -> str - constraints = [] - for constraint in self._constraints: - constraints.append(str(constraint)) - - return "{}".format(", ").join(constraints) diff --git a/conda_lock/_vendor/poetry/core/packages/constraints/union_constraint.py b/conda_lock/_vendor/poetry/core/packages/constraints/union_constraint.py deleted file mode 100644 index ec0330c2..00000000 --- a/conda_lock/_vendor/poetry/core/packages/constraints/union_constraint.py +++ /dev/null @@ -1,124 +0,0 @@ -from typing import TYPE_CHECKING -from typing import Tuple -from typing import Union - -from .base_constraint import BaseConstraint -from .constraint import Constraint -from .empty_constraint import EmptyConstraint -from .multi_constraint import MultiConstraint - - -if TYPE_CHECKING: - from . import ConstraintTypes # noqa - - -class UnionConstraint(BaseConstraint): - def __init__(self, *constraints): # type: (*Constraint) -> None - self._constraints = constraints - - @property - def constraints(self): # type: () -> Tuple[Constraint] - return self._constraints - - def allows( - self, other - ): # type: (Union[Constraint, MultiConstraint, UnionConstraint]) -> bool - for constraint in self._constraints: - if constraint.allows(other): - return True - - return False - - def allows_any(self, other): # type: ("ConstraintTypes") -> bool - if other.is_empty(): - return False - - if other.is_any(): - return True - - if isinstance(other, Constraint): - constraints = [other] - else: - constraints = other.constraints - - for our_constraint in self._constraints: - for their_constraint in constraints: - if our_constraint.allows_any(their_constraint): - return True - - return False - - def allows_all(self, other): # type: ("ConstraintTypes") -> bool - if other.is_any(): - return False - - if other.is_empty(): - return True - - if isinstance(other, Constraint): - constraints = [other] - else: - constraints = other.constraints - - our_constraints = iter(self._constraints) - their_constraints = iter(constraints) - our_constraint = next(our_constraints, None) - their_constraint = next(their_constraints, None) - - while our_constraint and their_constraint: - if our_constraint.allows_all(their_constraint): - their_constraint = next(their_constraints, None) - else: - our_constraint = next(our_constraints, None) - - return their_constraint is None - - def intersect(self, other): # type: ("ConstraintTypes") -> "ConstraintTypes" - if other.is_any(): - return self - - if other.is_empty(): - return other - - if isinstance(other, Constraint): - if self.allows(other): - return other - - return EmptyConstraint() - - new_constraints = [] - for our_constraint in self._constraints: - for their_constraint in other.constraints: - intersection = our_constraint.intersect(their_constraint) - - if not intersection.is_empty() and intersection not in new_constraints: - new_constraints.append(intersection) - - if not new_constraints: - return EmptyConstraint() - - return UnionConstraint(*new_constraints) - - def union(self, other): # type: (Constraint) -> UnionConstraint - if isinstance(other, Constraint): - constraints = self._constraints - if other not in self._constraints: - constraints += (other,) - - return UnionConstraint(*constraints) - - def __eq__(self, other): # type: ("ConstraintTypes") -> bool - - if not isinstance(other, UnionConstraint): - return False - - return sorted( - self._constraints, key=lambda c: (c.operator, c.version) - ) == sorted(other.constraints, key=lambda c: (c.operator, c.version)) - - def __str__(self): # type: () -> str - constraints = [] - for constraint in self._constraints: - constraints.append(str(constraint)) - - return "{}".format(" || ").join(constraints) diff --git a/conda_lock/_vendor/poetry/core/packages/dependency.py b/conda_lock/_vendor/poetry/core/packages/dependency.py index 2e544c16..c72d79c2 100644 --- a/conda_lock/_vendor/poetry/core/packages/dependency.py +++ b/conda_lock/_vendor/poetry/core/packages/dependency.py @@ -1,229 +1,310 @@ +from __future__ import annotations + +import os +import re +import warnings + +from contextlib import suppress +from pathlib import Path from typing import TYPE_CHECKING -from typing import Any -from typing import FrozenSet -from typing import List -from typing import Optional -from typing import Union - -from conda_lock._vendor.poetry.core.semver import Version -from conda_lock._vendor.poetry.core.semver import VersionConstraint -from conda_lock._vendor.poetry.core.semver import VersionRange -from conda_lock._vendor.poetry.core.semver import VersionUnion -from conda_lock._vendor.poetry.core.semver import parse_constraint -from conda_lock._vendor.poetry.core.version.markers import AnyMarker +from typing import Sequence +from typing import TypeVar + +from packaging.utils import canonicalize_name + +from conda_lock._vendor.poetry.core.constraints.generic import parse_constraint as parse_generic_constraint +from conda_lock._vendor.poetry.core.constraints.version import parse_constraint +from conda_lock._vendor.poetry.core.packages.dependency_group import MAIN_GROUP +from conda_lock._vendor.poetry.core.packages.specification import PackageSpecification +from conda_lock._vendor.poetry.core.packages.utils.utils import contains_group_without_marker +from conda_lock._vendor.poetry.core.packages.utils.utils import create_nested_marker +from conda_lock._vendor.poetry.core.packages.utils.utils import normalize_python_version_markers from conda_lock._vendor.poetry.core.version.markers import parse_marker -from .constraints import parse_constraint as parse_generic_constraint -from .constraints.constraint import Constraint -from .constraints.multi_constraint import MultiConstraint -from .constraints.union_constraint import UnionConstraint -from .specification import PackageSpecification -from .utils.utils import convert_markers - if TYPE_CHECKING: - from conda_lock._vendor.poetry.core.version.markers import BaseMarker # noqa - from conda_lock._vendor.poetry.core.packages import Package # noqa - from conda_lock._vendor.poetry.core.version.markers import VersionTypes # noqa + from collections.abc import Iterable + + from packaging.utils import NormalizedName + + from conda_lock._vendor.poetry.core.constraints.version import VersionConstraint + from conda_lock._vendor.poetry.core.packages.directory_dependency import DirectoryDependency + from conda_lock._vendor.poetry.core.packages.file_dependency import FileDependency + from conda_lock._vendor.poetry.core.version.markers import BaseMarker - from .constraints import BaseConstraint # noqa + T = TypeVar("T", bound="Dependency") class Dependency(PackageSpecification): def __init__( self, - name, # type: str - constraint, # type: Union[str, VersionConstraint] - optional=False, # type: bool - category="main", # type: str - allows_prereleases=False, # type: bool - extras=None, # type: Union[List[str], FrozenSet[str]] - source_type=None, # type: Optional[str] - source_url=None, # type: Optional[str] - source_reference=None, # type: Optional[str] - source_resolved_reference=None, # type: Optional[str] - ): - super(Dependency, self).__init__( + name: str, + constraint: str | VersionConstraint, + optional: bool = False, + groups: Iterable[str] | None = None, + allows_prereleases: bool = False, + extras: Iterable[str] | None = None, + source_type: str | None = None, + source_url: str | None = None, + source_reference: str | None = None, + source_resolved_reference: str | None = None, + source_subdirectory: str | None = None, + ) -> None: + from conda_lock._vendor.poetry.core.version.markers import AnyMarker + + super().__init__( name, source_type=source_type, source_url=source_url, source_reference=source_reference, source_resolved_reference=source_resolved_reference, + source_subdirectory=source_subdirectory, features=extras, ) - self._constraint = None - self.set_constraint(constraint=constraint) + # Attributes must be immutable for clone() to be safe! + # (For performance reasons, clone only creates a copy instead of a deep copy). + + self._constraint: VersionConstraint + self._pretty_constraint: str + self.constraint = constraint # type: ignore[assignment] - self._pretty_constraint = str(constraint) self._optional = optional - self._category = category - if isinstance(self._constraint, VersionRange) and self._constraint.min: - allows_prereleases = ( - allows_prereleases or self._constraint.min.is_prerelease() - ) + if not groups: + groups = [MAIN_GROUP] + self._groups = frozenset(groups) self._allows_prereleases = allows_prereleases self._python_versions = "*" self._python_constraint = parse_constraint("*") - self._transitive_python_versions = None - self._transitive_python_constraint = None - self._transitive_marker = None - self._extras = frozenset(extras or []) + self._transitive_python_versions: str | None = None + self._transitive_python_constraint: VersionConstraint | None = None + self._transitive_marker: BaseMarker | None = None - self._in_extras = [] + self._in_extras: Sequence[NormalizedName] = [] self._activated = not self._optional self.is_root = False - self.marker = AnyMarker() - self.source_name = None + self._marker: BaseMarker = AnyMarker() + self.source_name: str | None = None @property - def name(self): # type: () -> str + def name(self) -> NormalizedName: return self._name @property - def constraint(self): # type: () -> "VersionTypes" + def constraint(self) -> VersionConstraint: return self._constraint - def set_constraint(self, constraint): # type: (Union[str, "VersionTypes"]) -> None - try: - if not isinstance(constraint, VersionConstraint): - self._constraint = parse_constraint(constraint) - else: - self._constraint = constraint - except ValueError: - self._constraint = parse_constraint("*") + @constraint.setter + def constraint(self, constraint: str | VersionConstraint) -> None: + if isinstance(constraint, str): + self._constraint = parse_constraint(constraint) + else: + self._constraint = constraint + + self._pretty_constraint = str(constraint) + + def set_constraint(self, constraint: str | VersionConstraint) -> None: + warnings.warn( + "Calling method 'set_constraint' is deprecated and will be removed. " + "It has been replaced by the property 'constraint' for consistency.", + DeprecationWarning, + stacklevel=2, + ) + self.constraint = constraint # type: ignore[assignment] @property - def pretty_constraint(self): # type: () -> str + def pretty_constraint(self) -> str: return self._pretty_constraint @property - def pretty_name(self): # type: () -> str + def pretty_name(self) -> str: return self._pretty_name @property - def category(self): # type: () -> str - return self._category + def groups(self) -> frozenset[str]: + return self._groups @property - def python_versions(self): # type: () -> str + def python_versions(self) -> str: return self._python_versions @python_versions.setter - def python_versions(self, value): # type: (str) -> None + def python_versions(self, value: str) -> None: self._python_versions = value self._python_constraint = parse_constraint(value) if not self._python_constraint.is_any(): - self.marker = self.marker.intersect( + self._marker = self._marker.intersect( parse_marker( - self._create_nested_marker( - "python_version", self._python_constraint - ) + create_nested_marker("python_version", self._python_constraint) ) ) @property - def transitive_python_versions(self): # type: () -> str + def transitive_python_versions(self) -> str: + warnings.warn( + "'transitive_python_versions' is deprecated and will be removed.", + DeprecationWarning, + stacklevel=2, + ) if self._transitive_python_versions is None: return self._python_versions return self._transitive_python_versions @transitive_python_versions.setter - def transitive_python_versions(self, value): # type: (str) -> None + def transitive_python_versions(self, value: str) -> None: + warnings.warn( + "'transitive_python_versions' is deprecated and will be removed.", + DeprecationWarning, + stacklevel=2, + ) self._transitive_python_versions = value self._transitive_python_constraint = parse_constraint(value) @property - def transitive_marker(self): # type: () -> "BaseMarker" + def marker(self) -> BaseMarker: + return self._marker + + @marker.setter + def marker(self, marker: str | BaseMarker) -> None: + from conda_lock._vendor.poetry.core.constraints.version import parse_constraint + from conda_lock._vendor.poetry.core.packages.utils.utils import convert_markers + from conda_lock._vendor.poetry.core.version.markers import BaseMarker + from conda_lock._vendor.poetry.core.version.markers import parse_marker + + if not isinstance(marker, BaseMarker): + marker = parse_marker(marker) + + self._marker = marker + + markers = convert_markers(marker) + + if "extra" in markers: + # If we have extras, the dependency is optional + self.deactivate() + + new_in_extras = [] + for or_ in markers["extra"]: + for op, extra in or_: + if op == "==": + new_in_extras.append(canonicalize_name(extra)) + elif op == "" and "||" in extra: + for _extra in extra.split(" || "): + new_in_extras.append(canonicalize_name(_extra)) + self._in_extras = [*self._in_extras, *new_in_extras] + + # Recalculate python versions. + self._python_versions = "*" + if not contains_group_without_marker(markers, "python_version"): + python_version_markers = markers["python_version"] + self._python_versions = normalize_python_version_markers( + python_version_markers + ) + + self._python_constraint = parse_constraint(self._python_versions) + + @property + def transitive_marker(self) -> BaseMarker: if self._transitive_marker is None: return self.marker return self._transitive_marker @transitive_marker.setter - def transitive_marker(self, value): # type: ("BaseMarker") -> None + def transitive_marker(self, value: BaseMarker) -> None: self._transitive_marker = value @property - def python_constraint(self): # type: () -> "VersionTypes" + def python_constraint(self) -> VersionConstraint: return self._python_constraint @property - def transitive_python_constraint(self): # type: () -> "VersionTypes" + def transitive_python_constraint(self) -> VersionConstraint: + warnings.warn( + "'transitive_python_constraint' is deprecated and will be removed.", + DeprecationWarning, + stacklevel=2, + ) if self._transitive_python_constraint is None: return self._python_constraint return self._transitive_python_constraint @property - def extras(self): # type: () -> FrozenSet[str] - return self._extras + def extras(self) -> frozenset[NormalizedName]: + # extras activated in a dependency is the same as features + return self._features @property - def in_extras(self): # type: () -> list + def in_extras(self) -> Sequence[NormalizedName]: return self._in_extras @property - def base_pep_508_name(self): # type: () -> str - requirement = self.pretty_name - - if self.extras: - requirement += "[{}]".format(",".join(self.extras)) - - if isinstance(self.constraint, VersionUnion): - if self.constraint.excludes_single_version(): - requirement += " ({})".format(str(self.constraint)) + def base_pep_508_name(self) -> str: + from conda_lock._vendor.poetry.core.constraints.version import Version + from conda_lock._vendor.poetry.core.constraints.version import VersionUnion + + requirement = self.complete_pretty_name + + constraint = self.constraint + if isinstance(constraint, VersionUnion): + if ( + constraint.excludes_single_version + or constraint.excludes_single_wildcard_range + ): + # This branch is a short-circuit logic for special cases and + # avoids having to split and parse constraint again. This has + # no functional difference with the logic in the else branch. + requirement += f" ({constraint})" else: - constraints = self.pretty_constraint.split(",") - constraints = [parse_constraint(c) for c in constraints] - constraints = [str(c) for c in constraints] - requirement += " ({})".format(",".join(constraints)) - elif isinstance(self.constraint, Version): - requirement += " (=={})".format(self.constraint.text) - elif not self.constraint.is_any(): - requirement += " ({})".format(str(self.constraint).replace(" ", "")) + constraints = ",".join( + str(parse_constraint(c)) for c in self.pretty_constraint.split(",") + ) + requirement += f" ({constraints})" + elif isinstance(constraint, Version): + requirement += f" (=={constraint.text})" + elif not constraint.is_any(): + requirement += f" ({str(constraint).replace(' ', '')})" return requirement - def allows_prereleases(self): # type: () -> bool + @property + def base_pep_508_name_resolved(self) -> str: + return self.base_pep_508_name + + def allows_prereleases(self) -> bool: return self._allows_prereleases - def is_optional(self): # type: () -> bool + def is_optional(self) -> bool: return self._optional - def is_activated(self): # type: () -> bool + def is_activated(self) -> bool: return self._activated - def is_vcs(self): # type: () -> bool + def is_vcs(self) -> bool: return False - def is_file(self): # type: () -> bool + def is_file(self) -> bool: return False - def is_directory(self): # type: () -> bool + def is_directory(self) -> bool: return False - def is_url(self): # type: () -> bool + def is_url(self) -> bool: return False - def accepts(self, package): # type: (Package) -> bool - """ - Determines if the given package matches this dependency. - """ - return ( - self._name == package.name - and self._constraint.allows(package.version) - and (not package.is_prerelease() or self.allows_prereleases()) - ) + def to_pep_508(self, with_extras: bool = True, *, resolved: bool = False) -> str: + from conda_lock._vendor.poetry.core.packages.utils.utils import convert_markers - def to_pep_508(self, with_extras=True): # type: (bool) -> str - requirement = self.base_pep_508_name + if resolved: + requirement = self.base_pep_508_name_resolved + else: + requirement = self.base_pep_508_name markers = [] has_extras = False @@ -234,7 +315,7 @@ def to_pep_508(self, with_extras=True): # type: (bool) -> str # we re-check for any marker here since the without extra marker might # return an any marker again - if not marker.is_empty() and not marker.is_any(): + if not (marker.is_empty() or marker.is_any()): markers.append(str(marker)) has_extras = "extra" in convert_markers(marker) @@ -244,118 +325,31 @@ def to_pep_508(self, with_extras=True): # type: (bool) -> str python_constraint = self.python_constraint markers.append( - self._create_nested_marker("python_version", python_constraint) + create_nested_marker("python_version", python_constraint) ) in_extras = " || ".join(self._in_extras) if in_extras and with_extras and not has_extras: markers.append( - self._create_nested_marker("extra", parse_generic_constraint(in_extras)) + create_nested_marker("extra", parse_generic_constraint(in_extras)) ) if markers: - if self.is_vcs() or self.is_url(): - requirement += " " - if len(markers) > 1: - markers = ["({})".format(m) for m in markers] - requirement += "; {}".format(" and ".join(markers)) + marker_str = " and ".join(f"({m})" for m in markers) else: - requirement += "; {}".format(markers[0]) + marker_str = markers[0] + requirement += f" ; {marker_str}" return requirement - def _create_nested_marker( - self, name, constraint - ): # type: (str, Union["BaseConstraint", Version, VersionConstraint]) -> str - if isinstance(constraint, (MultiConstraint, UnionConstraint)): - parts = [] - for c in constraint.constraints: - multi = False - if isinstance(c, (MultiConstraint, UnionConstraint)): - multi = True - - parts.append((multi, self._create_nested_marker(name, c))) - - glue = " and " - if isinstance(constraint, UnionConstraint): - parts = [ - "({})".format(part[1]) if part[0] else part[1] for part in parts - ] - glue = " or " - else: - parts = [part[1] for part in parts] - - marker = glue.join(parts) - elif isinstance(constraint, Constraint): - marker = '{} {} "{}"'.format(name, constraint.operator, constraint.version) - elif isinstance(constraint, VersionUnion): - parts = [] - for c in constraint.ranges: - parts.append(self._create_nested_marker(name, c)) - - glue = " or " - parts = ["({})".format(part) for part in parts] - - marker = glue.join(parts) - elif isinstance(constraint, Version): - if constraint.precision >= 3 and name == "python_version": - name = "python_full_version" - - marker = '{} == "{}"'.format(name, constraint.text) - else: - if constraint.min is not None: - min_name = name - if constraint.min.precision >= 3 and name == "python_version": - min_name = "python_full_version" - - if constraint.max is None: - name = min_name - - op = ">=" - if not constraint.include_min: - op = ">" - - version = constraint.min.text - if constraint.max is not None: - max_name = name - if constraint.max.precision >= 3 and name == "python_version": - max_name = "python_full_version" - - text = '{} {} "{}"'.format(min_name, op, version) - - op = "<=" - if not constraint.include_max: - op = "<" - - version = constraint.max - - text += ' and {} {} "{}"'.format(max_name, op, version) - - return text - elif constraint.max is not None: - if constraint.max.precision >= 3 and name == "python_version": - name = "python_full_version" - - op = "<=" - if not constraint.include_max: - op = "<" - - version = constraint.max - else: - return "" - - marker = '{} {} "{}"'.format(name, op, version) - - return marker - - def activate(self): # type: () -> None + def activate(self) -> None: """ Set the dependency as mandatory. """ self._activated = True - def deactivate(self): # type: () -> None + def deactivate(self) -> None: """ Set the dependency as optional. """ @@ -364,56 +358,197 @@ def deactivate(self): # type: () -> None self._activated = False - def with_constraint( - self, constraint - ): # type: (Union[str, VersionConstraint]) -> Dependency - new = Dependency( - self.pretty_name, - constraint, - optional=self.is_optional(), - category=self.category, - allows_prereleases=self.allows_prereleases(), - extras=self._extras, - source_type=self._source_type, - source_url=self._source_url, - source_reference=self._source_reference, - ) + def with_constraint(self: T, constraint: str | VersionConstraint) -> T: + dependency = self.clone() + dependency.constraint = constraint # type: ignore[assignment] + return dependency + + @classmethod + def create_from_pep_508( + cls, name: str, relative_to: Path | None = None + ) -> Dependency: + """ + Resolve a PEP-508 requirement string to a `Dependency` instance. If a + `relative_to` path is specified, this is used as the base directory if the + identified dependency is of file or directory type. + """ + from conda_lock._vendor.poetry.core.packages.url_dependency import URLDependency + from conda_lock._vendor.poetry.core.packages.utils.link import Link + from conda_lock._vendor.poetry.core.packages.utils.utils import is_archive_file + from conda_lock._vendor.poetry.core.packages.utils.utils import is_python_project + from conda_lock._vendor.poetry.core.packages.utils.utils import is_url + from conda_lock._vendor.poetry.core.packages.utils.utils import path_to_url + from conda_lock._vendor.poetry.core.packages.utils.utils import strip_extras + from conda_lock._vendor.poetry.core.packages.utils.utils import url_to_path + from conda_lock._vendor.poetry.core.packages.vcs_dependency import VCSDependency + from conda_lock._vendor.poetry.core.utils.patterns import wheel_file_re + from conda_lock._vendor.poetry.core.vcs.git import ParsedUrl + from conda_lock._vendor.poetry.core.version.requirements import Requirement + + # Removing comments + parts = name.split(" #", 1) + name = parts[0].strip() + if len(parts) > 1: + rest = parts[1] + if " ;" in rest: + name += " ;" + rest.split(" ;", 1)[1] + + req = Requirement(name) + + name = req.name + link = None + + if is_url(name): + link = Link(name) + elif req.url: + link = Link(req.url) + else: + path_str = os.path.normpath(os.path.abspath(name)) + p, extras = strip_extras(path_str) + if os.path.isdir(p) and (os.path.sep in name or name.startswith(".")): + if not is_python_project(Path(name)): + raise ValueError( + f"Directory {name!r} is not installable. Not a Python project." + ) + link = Link(path_to_url(p)) + elif is_archive_file(p): + link = Link(path_to_url(p)) + + # it's a local file, dir, or url + if link: + is_file_uri = link.scheme == "file" + is_relative_uri = is_file_uri and re.search(r"\.\./", link.url) + + # Handle relative file URLs + if is_file_uri and is_relative_uri: + path = Path(link.path) + if relative_to: + path = relative_to / path + link = Link(path_to_url(path)) + + # wheel file + version = None + if link.is_wheel: + m = wheel_file_re.match(link.filename) + if not m: + raise ValueError(f"Invalid wheel name: {link.filename}") + name = m.group("name") + version = m.group("ver") + + dep: Dependency | None = None + + if link.scheme.startswith("git+"): + url = ParsedUrl.parse(link.url) + dep = VCSDependency( + name, + "git", + url.url, + rev=url.rev, + directory=url.subdirectory, + extras=req.extras, + ) + elif link.scheme == "git": + dep = VCSDependency( + name, "git", link.url_without_fragment, extras=req.extras + ) + elif link.scheme in ("http", "https"): + dep = URLDependency( + name, + link.url_without_fragment, + directory=link.subdirectory_fragment, + extras=req.extras, + ) + elif is_file_uri: + # handle RFC 8089 references + path = url_to_path(req.url) + dep = _make_file_or_dir_dep( + name=name, + path=path, + base=relative_to, + subdirectory=link.subdirectory_fragment, + extras=req.extras, + ) + else: + with suppress(ValueError): + # this is a local path not using the file URI scheme + dep = _make_file_or_dir_dep( + name=name, + path=Path(req.url), + base=relative_to, + extras=req.extras, + ) - new.is_root = self.is_root - new.python_versions = self.python_versions - new.transitive_python_versions = self.transitive_python_versions - new.marker = self.marker - new.transitive_marker = self.transitive_marker + if dep is None: + dep = Dependency(name, version or "*", extras=req.extras) + + if version: + dep._constraint = parse_constraint(version) + else: + constraint: VersionConstraint | str + constraint = req.constraint if req.pretty_constraint else "*" + dep = Dependency(name, constraint, extras=req.extras) - for in_extra in self.in_extras: - new.in_extras.append(in_extra) + if req.marker: + dep.marker = req.marker - return new + return dep - def __eq__(self, other): # type: (Any) -> bool + def __eq__(self, other: object) -> bool: if not isinstance(other, Dependency): return NotImplemented - return ( - self.is_same_package_as(other) - and self._constraint == other.constraint - and self._extras == other.extras + # "constraint" is implicitly given for direct origin dependencies and might not + # be set yet ("*"). Thus, it shouldn't be used to determine if two direct origin + # dependencies are equal. + # Calling is_direct_origin() for one dependency is sufficient because + # super().__eq__() returns False for different origins. + return super().__eq__(other) and ( + self._constraint == other.constraint or self.is_direct_origin() ) - def __ne__(self, other): # type: (Any) -> bool - return not self == other + def __hash__(self) -> int: + # don't include _constraint in hash because it is mutable! + return super().__hash__() - def __hash__(self): # type: () -> int - return ( - super(Dependency, self).__hash__() - ^ hash(self._constraint) - ^ hash(self._extras) - ) - - def __str__(self): # type: () -> str + def __str__(self) -> str: if self.is_root: return self._pretty_name + if self.is_direct_origin(): + # adding version since this information is especially useful in debug output + parts = [p.strip() for p in self.base_pep_508_name.split("@", 1)] + return f"{parts[0]} ({self._pretty_constraint}) @ {parts[1]}" return self.base_pep_508_name - def __repr__(self): # type: () -> str - return "<{} {}>".format(self.__class__.__name__, str(self)) + def __repr__(self) -> str: + return f"<{self.__class__.__name__} {self}>" + + +def _make_file_or_dir_dep( + name: str, + path: Path, + base: Path | None = None, + subdirectory: str | None = None, + extras: list[str] | None = None, +) -> FileDependency | DirectoryDependency | None: + """ + Helper function to create a file or directoru dependency with the given arguments. + If path is not a file or directory that exists, `None` is returned. + """ + from conda_lock._vendor.poetry.core.packages.directory_dependency import DirectoryDependency + from conda_lock._vendor.poetry.core.packages.file_dependency import FileDependency + + _path = path + if not path.is_absolute() and base: + # a base path was specified, so we should respect that + _path = Path(base) / path + + if _path.is_file(): + return FileDependency( + name, path, base=base, directory=subdirectory, extras=extras + ) + elif _path.is_dir(): + if subdirectory: + path = path / subdirectory + return DirectoryDependency(name, path, base=base, extras=extras) + + return None diff --git a/conda_lock/_vendor/poetry/core/packages/dependency_group.py b/conda_lock/_vendor/poetry/core/packages/dependency_group.py new file mode 100644 index 00000000..2bdc834d --- /dev/null +++ b/conda_lock/_vendor/poetry/core/packages/dependency_group.py @@ -0,0 +1,57 @@ +from __future__ import annotations + +from typing import TYPE_CHECKING + + +if TYPE_CHECKING: + from conda_lock._vendor.poetry.core.packages.dependency import Dependency + + +MAIN_GROUP = "main" + + +class DependencyGroup: + def __init__(self, name: str, optional: bool = False) -> None: + self._name: str = name + self._optional: bool = optional + self._dependencies: list[Dependency] = [] + + @property + def name(self) -> str: + return self._name + + @property + def dependencies(self) -> list[Dependency]: + return self._dependencies + + def is_optional(self) -> bool: + return self._optional + + def add_dependency(self, dependency: Dependency) -> None: + self._dependencies.append(dependency) + + def remove_dependency(self, name: str) -> None: + from packaging.utils import canonicalize_name + + name = canonicalize_name(name) + + dependencies = [] + for dependency in self.dependencies: + if dependency.name == name: + continue + + dependencies.append(dependency) + + self._dependencies = dependencies + + def __eq__(self, other: object) -> bool: + if not isinstance(other, DependencyGroup): + return NotImplemented + + return self._name == other.name and set(self._dependencies) == set( + other.dependencies + ) + + def __repr__(self) -> str: + cls = self.__class__.__name__ + return f"{cls}({self._name}, optional={self._optional})" diff --git a/conda_lock/_vendor/poetry/core/packages/directory_dependency.py b/conda_lock/_vendor/poetry/core/packages/directory_dependency.py index 88979c60..4851e014 100644 --- a/conda_lock/_vendor/poetry/core/packages/directory_dependency.py +++ b/conda_lock/_vendor/poetry/core/packages/directory_dependency.py @@ -1,138 +1,66 @@ +from __future__ import annotations + +import functools + from typing import TYPE_CHECKING -from typing import FrozenSet -from typing import List -from typing import Union -from conda_lock._vendor.poetry.core.pyproject import PyProjectTOML -from conda_lock._vendor.poetry.core.utils._compat import Path +from conda_lock._vendor.poetry.core.packages.path_dependency import PathDependency +from conda_lock._vendor.poetry.core.packages.utils.utils import is_python_project +from conda_lock._vendor.poetry.core.pyproject.toml import PyProjectTOML if TYPE_CHECKING: - from .constraints import BaseConstraint # noqa - -from .dependency import Dependency + from collections.abc import Iterable + from pathlib import Path -class DirectoryDependency(Dependency): +class DirectoryDependency(PathDependency): def __init__( self, - name, # type: str - path, # type: Path - category="main", # type: str - optional=False, # type: bool - base=None, # type: Path - develop=False, # type: bool - extras=None, # type: Union[List[str], FrozenSet[str]] - ): - self._path = path - self._base = base or Path.cwd() - self._full_path = path - - if not self._path.is_absolute(): - try: - self._full_path = self._base.joinpath(self._path).resolve() - except FileNotFoundError: - raise ValueError("Directory {} does not exist".format(self._path)) - - self._develop = develop - self._supports_poetry = False - - if not self._full_path.exists(): - raise ValueError("Directory {} does not exist".format(self._path)) - - if self._full_path.is_file(): - raise ValueError("{} is a file, expected a directory".format(self._path)) - - # Checking content to determine actions - setup = self._full_path / "setup.py" - self._supports_poetry = PyProjectTOML( - self._full_path / "pyproject.toml" - ).is_poetry_project() - - if not setup.exists() and not self._supports_poetry: - raise ValueError( - "Directory {} does not seem to be a Python package".format( - self._full_path - ) - ) - - super(DirectoryDependency, self).__init__( + name: str, + path: Path, + groups: Iterable[str] | None = None, + optional: bool = False, + base: Path | None = None, + develop: bool = False, + extras: Iterable[str] | None = None, + ) -> None: + super().__init__( name, - "*", - category=category, - optional=optional, - allows_prereleases=True, + path, source_type="directory", - source_url=self._full_path.as_posix(), + groups=groups, + optional=optional, + base=base, extras=extras, ) + # Attributes must be immutable for clone() to be safe! + # (For performance reasons, clone only creates a copy instead of a deep copy). + self._develop = develop - @property - def path(self): # type: () -> Path - return self._path - - @property - def full_path(self): # type: () -> Path - return self._full_path - - @property - def base(self): # type: () -> Path - return self._base + # cache this function to avoid multiple IO reads and parsing + self.supports_poetry = functools.lru_cache(maxsize=1)(self._supports_poetry) @property - def develop(self): # type: () -> bool + def develop(self) -> bool: return self._develop - def supports_poetry(self): # type: () -> bool - return self._supports_poetry - - def is_directory(self): # type: () -> bool - return True - - def with_constraint( - self, constraint - ): # type: ("BaseConstraint") -> DirectoryDependency - new = DirectoryDependency( - self.pretty_name, - path=self.path, - base=self.base, - optional=self.is_optional(), - category=self.category, - develop=self._develop, - extras=self._extras, - ) - - new._constraint = constraint - new._pretty_constraint = str(constraint) - - new.is_root = self.is_root - new.python_versions = self.python_versions - new.marker = self.marker - new.transitive_marker = self.transitive_marker - - for in_extra in self.in_extras: - new.in_extras.append(in_extra) + def _validate(self) -> str: + message = super()._validate() + if message: + return message - return new - - @property - def base_pep_508_name(self): # type: () -> str - requirement = self.pretty_name - - if self.extras: - requirement += "[{}]".format(",".join(self.extras)) - - requirement += " @ {}".format(self._path.as_posix()) - - return requirement - - def __str__(self): # type: () -> str - if self.is_root: - return self._pretty_name - - return "{} ({} {})".format( - self._pretty_name, self._pretty_constraint, self._path.as_posix() - ) + if self._full_path.is_file(): + return ( + f"{self._full_path} for {self.pretty_name} is a file," + " expected a directory" + ) + if not is_python_project(self._full_path): + return ( + f"Directory {self._full_path} for {self.pretty_name} does not seem" + " to be a Python package" + ) + return "" - def __hash__(self): # type: () -> int - return hash((self._name, self._full_path.as_posix())) + def _supports_poetry(self) -> bool: + return PyProjectTOML(self._full_path / "pyproject.toml").is_poetry_project() diff --git a/conda_lock/_vendor/poetry/core/packages/file_dependency.py b/conda_lock/_vendor/poetry/core/packages/file_dependency.py index f3b9593e..1b8ceaa4 100644 --- a/conda_lock/_vendor/poetry/core/packages/file_dependency.py +++ b/conda_lock/_vendor/poetry/core/packages/file_dependency.py @@ -1,123 +1,78 @@ +from __future__ import annotations + import hashlib import io +import warnings from typing import TYPE_CHECKING -from typing import FrozenSet -from typing import List -from typing import Union - -from conda_lock._vendor.poetry.core.packages.utils.utils import path_to_url -from conda_lock._vendor.poetry.core.utils._compat import Path -from .dependency import Dependency +from conda_lock._vendor.poetry.core.packages.path_dependency import PathDependency if TYPE_CHECKING: - from .constraints import BaseConstraint + from collections.abc import Iterable + from pathlib import Path -class FileDependency(Dependency): +class FileDependency(PathDependency): def __init__( self, - name, # type: str - path, # type: Path - category="main", # type: str - optional=False, # type: bool - base=None, # type: Path - extras=None, # type: Union[List[str], FrozenSet[str]] - ): - self._path = path - self._base = base or Path.cwd() - self._full_path = path - - if not self._path.is_absolute(): - try: - self._full_path = self._base.joinpath(self._path).resolve() - except FileNotFoundError: - raise ValueError("Directory {} does not exist".format(self._path)) - - if not self._full_path.exists(): - raise ValueError("File {} does not exist".format(self._path)) - - if self._full_path.is_dir(): - raise ValueError("{} is a directory, expected a file".format(self._path)) - - super(FileDependency, self).__init__( + name: str, + path: Path, + *, + directory: str | None = None, + groups: Iterable[str] | None = None, + optional: bool = False, + base: Path | None = None, + extras: Iterable[str] | None = None, + ) -> None: + super().__init__( name, - "*", - category=category, - optional=optional, - allows_prereleases=True, + path, source_type="file", - source_url=self._full_path.as_posix(), + groups=groups, + optional=optional, + base=base, + subdirectory=directory, extras=extras, ) + # Attributes must be immutable for clone() to be safe! + # (For performance reasons, clone only creates a copy instead of a deep copy). @property - def base(self): # type: () -> Path - return self._base + def directory(self) -> str | None: + return self.source_subdirectory @property - def path(self): # type: () -> Path - return self._path + def base_pep_508_name(self) -> str: + requirement = super().base_pep_508_name - @property - def full_path(self): # type: () -> Path - return self._full_path + if self.directory: + requirement += f"#subdirectory={self.directory}" + + return requirement - def is_file(self): # type: () -> bool - return True + def _validate(self) -> str: + message = super()._validate() + if message: + return message - def hash(self, hash_name="sha256"): # type: (str) -> str + if self._full_path.is_dir(): + return ( + f"{self._full_path} for {self.pretty_name} is a directory," + " expected a file" + ) + return "" + + def hash(self, hash_name: str = "sha256") -> str: + warnings.warn( + "hash() is deprecated. Use poetry.utils.helpers.get_file_hash() instead.", + DeprecationWarning, + stacklevel=2, + ) h = hashlib.new(hash_name) with self._full_path.open("rb") as fp: for content in iter(lambda: fp.read(io.DEFAULT_BUFFER_SIZE), b""): h.update(content) return h.hexdigest() - - def with_constraint(self, constraint): # type: ("BaseConstraint") -> FileDependency - new = FileDependency( - self.pretty_name, - path=self.path, - base=self.base, - optional=self.is_optional(), - category=self.category, - extras=self._extras, - ) - - new._constraint = constraint - new._pretty_constraint = str(constraint) - - new.is_root = self.is_root - new.python_versions = self.python_versions - new.marker = self.marker - new.transitive_marker = self.transitive_marker - - for in_extra in self.in_extras: - new.in_extras.append(in_extra) - - return new - - @property - def base_pep_508_name(self): # type: () -> str - requirement = self.pretty_name - - if self.extras: - requirement += "[{}]".format(",".join(self.extras)) - - path = path_to_url(self.path) if self.path.is_absolute() else self.path - requirement += " @ {}".format(path) - - return requirement - - def __str__(self): # type: () -> str - if self.is_root: - return self._pretty_name - - return "{} ({} {})".format( - self._pretty_name, self._pretty_constraint, self._path - ) - - def __hash__(self): # type: () -> int - return hash((self._name, self._full_path)) diff --git a/conda_lock/_vendor/poetry/core/packages/package.py b/conda_lock/_vendor/poetry/core/packages/package.py index 4b172c0c..70221913 100644 --- a/conda_lock/_vendor/poetry/core/packages/package.py +++ b/conda_lock/_vendor/poetry/core/packages/package.py @@ -1,43 +1,48 @@ -# -*- coding: utf-8 -*- -import copy +from __future__ import annotations + import re +import warnings from contextlib import contextmanager from typing import TYPE_CHECKING -from typing import Dict -from typing import List -from typing import Optional -from typing import Union - -from conda_lock._vendor.poetry.core.semver import Version -from conda_lock._vendor.poetry.core.semver import parse_constraint -from conda_lock._vendor.poetry.core.spdx import License -from conda_lock._vendor.poetry.core.spdx import license_by_id -from conda_lock._vendor.poetry.core.version.markers import AnyMarker +from typing import ClassVar +from typing import Mapping +from typing import Sequence +from typing import TypeVar + +from conda_lock._vendor.poetry.core.constraints.version import parse_constraint +from conda_lock._vendor.poetry.core.constraints.version.exceptions import ParseConstraintError +from conda_lock._vendor.poetry.core.packages.dependency_group import MAIN_GROUP +from conda_lock._vendor.poetry.core.packages.specification import PackageSpecification +from conda_lock._vendor.poetry.core.packages.utils.utils import create_nested_marker +from conda_lock._vendor.poetry.core.version.exceptions import InvalidVersion from conda_lock._vendor.poetry.core.version.markers import parse_marker -# Do not move to the TYPE_CHECKING only section, because Dependency get's imported -# by poetry/packages/locker.py from here -from .dependency import Dependency -from .specification import PackageSpecification -from .utils.utils import create_nested_marker - if TYPE_CHECKING: - from conda_lock._vendor.poetry.core.semver import VersionTypes # noqa - from conda_lock._vendor.poetry.core.version.markers import BaseMarker # noqa + from collections.abc import Collection + from collections.abc import Iterable + from collections.abc import Iterator + from pathlib import Path - from .directory_dependency import DirectoryDependency - from .file_dependency import FileDependency - from .url_dependency import URLDependency - from .vcs_dependency import VCSDependency + from packaging.utils import NormalizedName -AUTHOR_REGEX = re.compile(r"(?u)^(?P[- .,\w\d'’\"()&]+)(?: <(?P.+?)>)?$") + from conda_lock._vendor.poetry.core.constraints.version import Version + from conda_lock._vendor.poetry.core.constraints.version import VersionConstraint + from conda_lock._vendor.poetry.core.packages.dependency import Dependency + from conda_lock._vendor.poetry.core.packages.dependency_group import DependencyGroup + from conda_lock._vendor.poetry.core.spdx.license import License + from conda_lock._vendor.poetry.core.version.markers import BaseMarker + T = TypeVar("T", bound="Package") -class Package(PackageSpecification): +AUTHOR_REGEX = re.compile( + r"(?u)^(?P[- .,\w\d'’\"():&]+)(?: <(?P.+?)>)?$" # noqa: RUF001 +) - AVAILABLE_PYTHONS = { + +class Package(PackageSpecification): + AVAILABLE_PYTHONS: ClassVar[set[str]] = { "2", "2.7", "3", @@ -48,157 +53,191 @@ class Package(PackageSpecification): "3.8", "3.9", "3.10", + "3.11", + "3.12", } def __init__( self, - name, # type: str - version, # type: Union[str, Version] - pretty_version=None, # type: Optional[str] - source_type=None, # type: Optional[str] - source_url=None, # type: Optional[str] - source_reference=None, # type: Optional[str] - source_resolved_reference=None, # type: Optional[str] - features=None, # type: Optional[List[str]] - ): + name: str, + version: str | Version, + pretty_version: str | None = None, + source_type: str | None = None, + source_url: str | None = None, + source_reference: str | None = None, + source_resolved_reference: str | None = None, + source_subdirectory: str | None = None, + features: Iterable[str] | None = None, + develop: bool = False, + yanked: str | bool = False, + ) -> None: """ Creates a new in memory package. """ - super(Package, self).__init__( + from conda_lock._vendor.poetry.core.version.markers import AnyMarker + + if pretty_version is not None: + warnings.warn( + "The `pretty_version` parameter is deprecated and will be removed" + " in a future release.", + DeprecationWarning, + stacklevel=2, + ) + + super().__init__( name, source_type=source_type, source_url=source_url, source_reference=source_reference, source_resolved_reference=source_resolved_reference, + source_subdirectory=source_subdirectory, features=features, ) - if not isinstance(version, Version): - self._version = Version.parse(version) - self._pretty_version = pretty_version or version - else: - self._version = version - self._pretty_version = pretty_version or self._version.text + # Attributes must be immutable for clone() to be safe! + # (For performance reasons, clone only creates a copy instead of a deep copy). + + self._set_version(version) self.description = "" - self._authors = [] - self._maintainers = [] + self.authors: Sequence[str] = [] + self.maintainers: Sequence[str] = [] + + self.homepage: str | None = None + self.repository_url: str | None = None + self.documentation_url: str | None = None + self.keywords: Sequence[str] = [] + self._license: License | None = None + self.readmes: tuple[Path, ...] = () - self.homepage = None - self.repository_url = None - self.documentation_url = None - self.keywords = [] - self._license = None - self.readme = None + self.extras: Mapping[NormalizedName, Sequence[Dependency]] = {} - self.requires = [] - self.dev_requires = [] - self.extras = {} - self.requires_extras = [] + self._dependency_groups: Mapping[str, DependencyGroup] = {} - self.category = "main" - self.files = [] + # Category is heading towards deprecation. + self._category = "main" + self.files: Sequence[Mapping[str, str]] = [] self.optional = False - self.classifiers = [] + self.classifiers: Sequence[str] = [] self._python_versions = "*" self._python_constraint = parse_constraint("*") - self._python_marker = AnyMarker() + self._python_marker: BaseMarker = AnyMarker() + + self.marker: BaseMarker = AnyMarker() - self.platform = None - self.marker = AnyMarker() + self.root_dir: Path | None = None - self.root_dir = None + self.develop = develop - self.develop = True + self._yanked = yanked @property - def name(self): # type: () -> str + def name(self) -> NormalizedName: return self._name @property - def pretty_name(self): # type: () -> str + def pretty_name(self) -> str: return self._pretty_name @property - def version(self): # type: () -> "Version" + def version(self) -> Version: return self._version @property - def pretty_version(self): # type: () -> str - return self._pretty_version + def pretty_version(self) -> str: + return self._version.text @property - def unique_name(self): # type: () -> str + def unique_name(self) -> str: if self.is_root(): return self._name return self.complete_name + "-" + self._version.text @property - def pretty_string(self): # type: () -> str + def pretty_string(self) -> str: return self.pretty_name + " " + self.pretty_version @property - def full_pretty_version(self): # type: () -> str - if self.source_type in ["file", "directory", "url"]: - return "{} {}".format(self._pretty_version, self.source_url) + def full_pretty_version(self) -> str: + if self.source_type in ("file", "directory", "url"): + return f"{self.pretty_version} {self.source_url}" - if self.source_type not in ["hg", "git"]: - return self._pretty_version + if self.source_type not in ("hg", "git"): + return self.pretty_version - if self.source_resolved_reference: - if len(self.source_resolved_reference) == 40: - return "{} {}".format( - self._pretty_version, self.source_resolved_reference[0:7] - ) + ref: str | None + if self.source_resolved_reference and len(self.source_resolved_reference) == 40: + ref = self.source_resolved_reference[0:7] + return f"{self.pretty_version} {ref}" # if source reference is a sha1 hash -- truncate - if len(self.source_reference) == 40: - return "{} {}".format(self._pretty_version, self.source_reference[0:7]) - - return "{} {}".format( - self._pretty_version, - self._source_resolved_reference or self._source_reference, - ) + if self.source_reference and len(self.source_reference) == 40: + return f"{self.pretty_version} {self.source_reference[0:7]}" - @property - def authors(self): # type: () -> list - return self._authors + ref = self._source_resolved_reference or self._source_reference + return f"{self.pretty_version} {ref}" @property - def author_name(self): # type: () -> str + def author_name(self) -> str | None: return self._get_author()["name"] @property - def author_email(self): # type: () -> str + def author_email(self) -> str | None: return self._get_author()["email"] @property - def maintainers(self): # type: () -> list - return self._maintainers - - @property - def maintainer_name(self): # type: () -> str + def maintainer_name(self) -> str | None: return self._get_maintainer()["name"] @property - def maintainer_email(self): # type: () -> str + def maintainer_email(self) -> str | None: return self._get_maintainer()["email"] + @property + def requires(self) -> list[Dependency]: + """ + Returns the main dependencies + """ + if not self._dependency_groups or MAIN_GROUP not in self._dependency_groups: + return [] + + return self._dependency_groups[MAIN_GROUP].dependencies + @property def all_requires( self, - ): # type: () -> List[Union["DirectoryDependency", "FileDependency", "URLDependency", "VCSDependency", Dependency]] - return self.requires + self.dev_requires + ) -> list[Dependency]: + """ + Returns the main dependencies and group dependencies. + """ + return [ + dependency + for group in self._dependency_groups.values() + for dependency in group.dependencies + ] + + def _set_version(self, version: str | Version) -> None: + from conda_lock._vendor.poetry.core.constraints.version import Version + + if not isinstance(version, Version): + try: + version = Version.parse(version) + except InvalidVersion: + raise InvalidVersion( + f"Invalid version '{version}' on package {self.name}" + ) - def _get_author(self): # type: () -> dict - if not self._authors: + self._version = version + + def _get_author(self) -> dict[str, str | None]: + if not self.authors: return {"name": None, "email": None} - m = AUTHOR_REGEX.match(self._authors[0]) + m = AUTHOR_REGEX.match(self.authors[0]) if m is None: raise ValueError( @@ -211,11 +250,11 @@ def _get_author(self): # type: () -> dict return {"name": name, "email": email} - def _get_maintainer(self): # type: () -> dict - if not self._maintainers: + def _get_maintainer(self) -> dict[str, str | None]: + if not self.maintainers: return {"name": None, "email": None} - m = AUTHOR_REGEX.match(self._maintainers[0]) + m = AUTHOR_REGEX.match(self.maintainers[0]) if m is None: raise ValueError( @@ -229,41 +268,49 @@ def _get_maintainer(self): # type: () -> dict return {"name": name, "email": email} @property - def python_versions(self): # type: () -> str + def python_versions(self) -> str: return self._python_versions @python_versions.setter - def python_versions(self, value): # type: (str) -> None + def python_versions(self, value: str) -> None: + try: + constraint = parse_constraint(value) + except ParseConstraintError: + raise ParseConstraintError(f"Invalid python versions '{value}' on {self}") + self._python_versions = value - self._python_constraint = parse_constraint(value) + self._python_constraint = constraint self._python_marker = parse_marker( create_nested_marker("python_version", self._python_constraint) ) @property - def python_constraint(self): # type: () -> "VersionTypes" + def python_constraint(self) -> VersionConstraint: return self._python_constraint @property - def python_marker(self): # type: () -> "BaseMarker" + def python_marker(self) -> BaseMarker: return self._python_marker @property - def license(self): # type: () -> License + def license(self) -> License | None: return self._license @license.setter - def license(self, value): # type: (Optional[str, License]) -> None - if value is None: - self._license = value - elif isinstance(value, License): + def license(self, value: str | License | None) -> None: + from conda_lock._vendor.poetry.core.spdx.helpers import license_by_id + from conda_lock._vendor.poetry.core.spdx.license import License + + if value is None or isinstance(value, License): self._license = value else: self._license = license_by_id(value) @property - def all_classifiers(self): # type: () -> List[str] - classifiers = copy.copy(self.classifiers) + def all_classifiers(self) -> list[str]: + from conda_lock._vendor.poetry.core.constraints.version import Version + + classifiers = list(self.classifiers) # Automatically set python classifiers if self.python_versions == "*": @@ -271,27 +318,48 @@ def all_classifiers(self): # type: () -> List[str] else: python_constraint = self.python_constraint - for version in sorted(self.AVAILABLE_PYTHONS): + python_classifier_prefix = "Programming Language :: Python" + python_classifiers = [] + + # we sort python versions by sorting an int tuple of (major, minor) version + # to ensure we sort 3.10 after 3.9 + for version in sorted( + self.AVAILABLE_PYTHONS, key=lambda x: tuple(map(int, x.split("."))) + ): if len(version) == 1: constraint = parse_constraint(version + ".*") else: constraint = Version.parse(version) if python_constraint.allows_any(constraint): - classifiers.append( - "Programming Language :: Python :: {}".format(version) - ) + classifier = f"{python_classifier_prefix} :: {version}" + if classifier not in python_classifiers: + python_classifiers.append(classifier) # Automatically set license classifiers if self.license: classifiers.append(self.license.classifier) - classifiers = set(classifiers) + # Sort classifiers and insert python classifiers at the right location. We do + # it like this so that 3.10 is sorted after 3.9. + sorted_classifiers = [] + python_classifiers_inserted = False + for classifier in sorted(set(classifiers) - set(python_classifiers)): + if ( + not python_classifiers_inserted + and classifier > python_classifier_prefix + ): + sorted_classifiers.extend(python_classifiers) + python_classifiers_inserted = True + sorted_classifiers.append(classifier) + + if not python_classifiers_inserted: + sorted_classifiers.extend(python_classifiers) - return sorted(classifiers) + return sorted_classifiers @property - def urls(self): # type: () -> Dict[str, str] + def urls(self) -> dict[str, str]: urls = {} if self.homepage: @@ -305,68 +373,198 @@ def urls(self): # type: () -> Dict[str, str] return urls - def is_prerelease(self): # type: () -> bool - return self._version.is_prerelease() + @property + def category(self) -> str: + warnings.warn( + "`category` is deprecated and will be removed in a future release.", + DeprecationWarning, + stacklevel=2, + ) + return self._category + + @category.setter + def category(self, category: str) -> None: + warnings.warn( + "Setting `category` is deprecated and will be removed in a future release.", + DeprecationWarning, + stacklevel=2, + ) + self._category = category + + @property + def readme(self) -> Path | None: + warnings.warn( + "`readme` is deprecated: you are getting only the first readme file." + " Please use the plural form `readmes`.", + DeprecationWarning, + stacklevel=2, + ) + return next(iter(self.readmes), None) + + @readme.setter + def readme(self, path: Path) -> None: + warnings.warn( + "`readme` is deprecated. Please assign a tuple to the plural form" + " `readmes`.", + DeprecationWarning, + stacklevel=2, + ) + self.readmes = (path,) + + @property + def yanked(self) -> bool: + return isinstance(self._yanked, str) or bool(self._yanked) + + @property + def yanked_reason(self) -> str: + if isinstance(self._yanked, str): + return self._yanked + return "" - def is_root(self): # type: () -> bool + def is_prerelease(self) -> bool: + return self._version.is_unstable() + + def is_root(self) -> bool: return False + def dependency_group_names(self, include_optional: bool = False) -> set[str]: + return { + name + for name, group in self._dependency_groups.items() + if not group.is_optional() or include_optional + } + + def add_dependency_group(self, group: DependencyGroup) -> None: + groups = dict(self._dependency_groups) + groups[group.name] = group + self._dependency_groups = groups + + def has_dependency_group(self, name: str) -> bool: + return name in self._dependency_groups + + def dependency_group(self, name: str) -> DependencyGroup: + if not self.has_dependency_group(name): + raise ValueError(f'The dependency group "{name}" does not exist.') + + return self._dependency_groups[name] + def add_dependency( - self, dependency, - ): # type: (Dependency) -> Dependency - if dependency.category == "dev": - self.dev_requires.append(dependency) - else: - self.requires.append(dependency) + self, + dependency: Dependency, + ) -> Dependency: + from conda_lock._vendor.poetry.core.packages.dependency_group import DependencyGroup + + for group_name in dependency.groups: + if group_name not in self._dependency_groups: + # Dynamically add the dependency group + self.add_dependency_group(DependencyGroup(group_name)) + + self._dependency_groups[group_name].add_dependency(dependency) return dependency - def to_dependency( - self, - ): # type: () -> Union[Dependency, "DirectoryDependency", "FileDependency", "URLDependency", "VCSDependency"] - from conda_lock._vendor.poetry.core.utils._compat import Path + def without_dependency_groups(self: T, groups: Collection[str]) -> T: + """ + Returns a clone of the package with the given dependency groups excluded. + """ + updated_groups = { + group_name: group + for group_name, group in self._dependency_groups.items() + if group_name not in groups + } + + package = self.clone() + package._dependency_groups = updated_groups - from .dependency import Dependency - from .directory_dependency import DirectoryDependency - from .file_dependency import FileDependency - from .url_dependency import URLDependency - from .vcs_dependency import VCSDependency + return package + + def without_optional_dependency_groups(self: T) -> T: + """ + Returns a clone of the package without optional dependency groups. + """ + updated_groups = { + group_name: group + for group_name, group in self._dependency_groups.items() + if not group.is_optional() + } + package = self.clone() + package._dependency_groups = updated_groups + return package + + def with_dependency_groups( + self: T, groups: Collection[str], only: bool = False + ) -> T: + """ + Returns a clone of the package with the given dependency groups opted in. + + Note that it will return all dependencies across all groups + more the given, optional, groups. + + If `only` is set to True, then only the given groups will be selected. + """ + updated_groups = { + group_name: group + for group_name, group in self._dependency_groups.items() + if group_name in groups or not only and not group.is_optional() + } + package = self.clone() + package._dependency_groups = updated_groups + + return package + + def to_dependency(self) -> Dependency: + from pathlib import Path + + from conda_lock._vendor.poetry.core.packages.dependency import Dependency + from conda_lock._vendor.poetry.core.packages.directory_dependency import DirectoryDependency + from conda_lock._vendor.poetry.core.packages.file_dependency import FileDependency + from conda_lock._vendor.poetry.core.packages.url_dependency import URLDependency + from conda_lock._vendor.poetry.core.packages.vcs_dependency import VCSDependency + + dep: Dependency if self.source_type == "directory": + assert self._source_url is not None dep = DirectoryDependency( self._name, Path(self._source_url), - category=self.category, + groups=list(self._dependency_groups.keys()), optional=self.optional, base=self.root_dir, develop=self.develop, extras=self.features, ) elif self.source_type == "file": + assert self._source_url is not None dep = FileDependency( self._name, Path(self._source_url), - category=self.category, + directory=self.source_subdirectory, + groups=list(self._dependency_groups.keys()), optional=self.optional, base=self.root_dir, extras=self.features, ) elif self.source_type == "url": + assert self._source_url is not None dep = URLDependency( self._name, self._source_url, - category=self.category, + directory=self.source_subdirectory, + groups=list(self._dependency_groups.keys()), optional=self.optional, extras=self.features, ) elif self.source_type == "git": + assert self._source_url is not None dep = VCSDependency( self._name, self.source_type, - self.source_url, + self._source_url, rev=self.source_reference, resolved_rev=self.source_resolved_reference, - category=self.category, + directory=self.source_subdirectory, + groups=list(self._dependency_groups.keys()), optional=self.optional, develop=self.develop, extras=self.features, @@ -380,13 +578,13 @@ def to_dependency( if not self.python_constraint.is_any(): dep.python_versions = self.python_versions - if self._source_type not in ["directory", "file", "url", "git"]: + if not self.is_direct_origin(): return dep return dep.with_constraint(self._version) @contextmanager - def with_python_versions(self, python_versions): # type: (str) -> None + def with_python_versions(self, python_versions: str) -> Iterator[None]: original_python_versions = self.python_versions self.python_versions = python_versions @@ -395,51 +593,85 @@ def with_python_versions(self, python_versions): # type: (str) -> None self.python_versions = original_python_versions - def with_features(self, features): # type: (List[str]) -> "Package" - package = self.clone() - - package._features = frozenset(features) - - return package - - def without_features(self): # type: () -> "Package" - return self.with_features([]) + def satisfies( + self, dependency: Dependency, ignore_source_type: bool = False + ) -> bool: + """ + Helper method to check if this package satisfies a given dependency. - def clone(self): # type: () -> "Package" - clone = self.__class__(self.pretty_name, self.version) - clone.__dict__ = copy.deepcopy(self.__dict__) - return clone + This is determined by assessing if this instance provides the package specified + by the given dependency. Further, version and source types are checked. + """ + if self.name != dependency.name: + return False + + if not dependency.constraint.allows(self.version): + return False + + if not (ignore_source_type or self.source_satisfies(dependency)): + return False + + return True + + def source_satisfies(self, dependency: Dependency) -> bool: + """Determine whether this package's source satisfies the given dependency.""" + if dependency.source_type is None: + if dependency.source_name is None: + # The dependency doesn't care about the source, so this package + # certainly satisfies it. + return True + + # The dependency specifies a source_name but not a type: it wants either + # pypi or a legacy repository. + # + # - If this package has no source type then it's from pypi, so it + # matches if and only if that's what the dependency wants + # - Else this package is a match if and only if it is from the desired + # repository + if self.source_type is None: + return dependency.source_name.lower() == "pypi" + + return ( + self.source_type == "legacy" + and self.source_reference is not None + and self.source_reference.lower() == dependency.source_name.lower() + ) - def __hash__(self): # type: () -> int - return super(Package, self).__hash__() ^ hash(self._version) + # The dependency specifies a source: this package matches if and only if it is + # from that source. + return dependency.is_same_source_as(self) - def __eq__(self, other): # type: (Package) -> bool + def __eq__(self, other: object) -> bool: if not isinstance(other, Package): return NotImplemented - return self.is_same_package_as(other) and self._version == other.version + return super().__eq__(other) and self._version == other.version + + def __hash__(self) -> int: + return super().__hash__() ^ hash(self._version) - def __str__(self): # type: () -> str - return "{} ({})".format(self.complete_name, self.full_pretty_version) + def __str__(self) -> str: + return f"{self.complete_name} ({self.full_pretty_version})" - def __repr__(self): # type: () -> str + def __repr__(self) -> str: args = [repr(self._name), repr(self._version.text)] if self._features: - args.append("features={}".format(repr(self._features))) + args.append(f"features={self._features!r}") if self._source_type: - args.append("source_type={}".format(repr(self._source_type))) - args.append("source_url={}".format(repr(self._source_url))) + args.append(f"source_type={self._source_type!r}") + args.append(f"source_url={self._source_url!r}") if self._source_reference: - args.append("source_reference={}".format(repr(self._source_reference))) + args.append(f"source_reference={self._source_reference!r}") if self._source_resolved_reference: args.append( - "source_resolved_reference={}".format( - repr(self._source_resolved_reference) - ) + f"source_resolved_reference={self._source_resolved_reference!r}" ) + if self._source_subdirectory: + args.append(f"source_subdirectory={self._source_subdirectory!r}") - return "Package({})".format(", ".join(args)) + args_str = ", ".join(args) + return f"Package({args_str})" diff --git a/conda_lock/_vendor/poetry/core/packages/path_dependency.py b/conda_lock/_vendor/poetry/core/packages/path_dependency.py new file mode 100644 index 00000000..3d1b08cc --- /dev/null +++ b/conda_lock/_vendor/poetry/core/packages/path_dependency.py @@ -0,0 +1,93 @@ +from __future__ import annotations + +import logging + +from abc import ABC +from abc import abstractmethod +from pathlib import Path +from typing import TYPE_CHECKING + +from conda_lock._vendor.poetry.core.packages.dependency import Dependency +from conda_lock._vendor.poetry.core.packages.utils.utils import path_to_url + + +if TYPE_CHECKING: + from collections.abc import Iterable + + +logger = logging.getLogger(__name__) + + +class PathDependency(Dependency, ABC): + @abstractmethod + def __init__( + self, + name: str, + path: Path, + *, + source_type: str, + groups: Iterable[str] | None = None, + optional: bool = False, + base: Path | None = None, + subdirectory: str | None = None, + extras: Iterable[str] | None = None, + ) -> None: + # Attributes must be immutable for clone() to be safe! + # (For performance reasons, clone only creates a copy instead of a deep copy). + assert source_type in ("file", "directory") + self._path = path + self._base = base or Path.cwd() + self._full_path = path + + if not self._path.is_absolute(): + self._full_path = self._base.joinpath(self._path).resolve() + + super().__init__( + name, + "*", + groups=groups, + optional=optional, + allows_prereleases=True, + source_type=source_type, + source_url=self._full_path.as_posix(), + source_subdirectory=subdirectory, + extras=extras, + ) + # cache validation result to avoid unnecessary file system access + self._validation_error = self._validate() + self.validate(raise_error=False) + + @property + def path(self) -> Path: + return self._path + + @property + def full_path(self) -> Path: + return self._full_path + + @property + def base(self) -> Path: + return self._base + + def is_file(self) -> bool: + return self._source_type == "file" + + def is_directory(self) -> bool: + return self._source_type == "directory" + + def validate(self, *, raise_error: bool) -> bool: + if not self._validation_error: + return True + if raise_error: + raise ValueError(self._validation_error) + logger.warning(self._validation_error) + return False + + @property + def base_pep_508_name(self) -> str: + return f"{self.complete_pretty_name} @ {path_to_url(self.full_path)}" + + def _validate(self) -> str: + if not self._full_path.exists(): + return f"Path {self._full_path} for {self.pretty_name} does not exist" + return "" diff --git a/conda_lock/_vendor/poetry/core/packages/project_package.py b/conda_lock/_vendor/poetry/core/packages/project_package.py index 5c3c7059..0b22afd5 100644 --- a/conda_lock/_vendor/poetry/core/packages/project_package.py +++ b/conda_lock/_vendor/poetry/core/packages/project_package.py @@ -1,67 +1,76 @@ +from __future__ import annotations + +import warnings + from typing import TYPE_CHECKING from typing import Any -from typing import Dict -from typing import Optional -from typing import Union +from typing import Mapping +from typing import Sequence -from conda_lock._vendor.poetry.core.semver import VersionRange -from conda_lock._vendor.poetry.core.semver import parse_constraint +from conda_lock._vendor.poetry.core.constraints.version import parse_constraint from conda_lock._vendor.poetry.core.version.markers import parse_marker if TYPE_CHECKING: - from . import ( - DirectoryDependency, - FileDependency, - URLDependency, - VCSDependency, - Dependency, - ) + from conda_lock._vendor.poetry.core.constraints.version import Version + from conda_lock._vendor.poetry.core.packages.dependency import Dependency -from .package import Package -from .utils.utils import create_nested_marker +from conda_lock._vendor.poetry.core.packages.package import Package +from conda_lock._vendor.poetry.core.packages.utils.utils import create_nested_marker class ProjectPackage(Package): def __init__( - self, name, version, pretty_version=None - ): # type: (str, Union[str, VersionRange], Optional[str]) -> None - super(ProjectPackage, self).__init__(name, version, pretty_version) - - self.build_config = dict() - self.packages = [] - self.include = [] - self.exclude = [] - self.custom_urls = {} + self, + name: str, + version: str | Version, + pretty_version: str | None = None, + ) -> None: + if pretty_version is not None: + warnings.warn( + "The `pretty_version` parameter is deprecated and will be removed" + " in a future release.", + DeprecationWarning, + stacklevel=2, + ) + + super().__init__(name, version) + + # Attributes must be immutable for clone() to be safe! + # (For performance reasons, clone only creates a copy instead of a deep copy). + + self.build_config: Mapping[str, Any] = {} + self.packages: Sequence[Mapping[str, Any]] = [] + self.include: Sequence[Mapping[str, Any]] = [] + self.exclude: Sequence[Mapping[str, Any]] = [] + self.custom_urls: Mapping[str, str] = {} if self._python_versions == "*": self._python_constraint = parse_constraint("~2.7 || >=3.4") @property - def build_script(self): # type: () -> Optional[str] + def build_script(self) -> str | None: return self.build_config.get("script") - def is_root(self): # type: () -> bool + def is_root(self) -> bool: return True - def to_dependency( - self, - ): # type: () -> Union["DirectoryDependency", "FileDependency", "URLDependency", "VCSDependency", "Dependency"] - dependency = super(ProjectPackage, self).to_dependency() + def to_dependency(self) -> Dependency: + dependency = super().to_dependency() dependency.is_root = True return dependency @property - def python_versions(self): # type: () -> Union[str, VersionRange] + def python_versions(self) -> str: return self._python_versions @python_versions.setter - def python_versions(self, value): # type: (Union[str, VersionRange]) -> None + def python_versions(self, value: str) -> None: self._python_versions = value - if value == "*" or value == VersionRange(): + if value == "*": value = "~2.7 || >=3.4" self._python_constraint = parse_constraint(value) @@ -70,12 +79,28 @@ def python_versions(self, value): # type: (Union[str, VersionRange]) -> None ) @property - def urls(self): # type: () -> Dict[str, Any] - urls = super(ProjectPackage, self).urls + def version(self) -> Version: + # override version to make it settable + return super().version + + @version.setter + def version(self, value: str | Version) -> None: + self._set_version(value) + + @property + def urls(self) -> dict[str, str]: + urls = super().urls urls.update(self.custom_urls) return urls - def build_should_generate_setup(self): # type: () -> bool - return self.build_config.get("generate-setup-file", True) + def __hash__(self) -> int: + # The parent Package class's __hash__ incorporates the version because + # a Package's version is immutable. But a ProjectPackage's version is + # mutable. So call Package's parent hash function. + return super(Package, self).__hash__() + + def build_should_generate_setup(self) -> bool: + value: bool = self.build_config.get("generate-setup-file", False) + return value diff --git a/conda_lock/_vendor/poetry/core/packages/specification.py b/conda_lock/_vendor/poetry/core/packages/specification.py index 3ab4937f..85b9574e 100644 --- a/conda_lock/_vendor/poetry/core/packages/specification.py +++ b/conda_lock/_vendor/poetry/core/packages/specification.py @@ -1,118 +1,216 @@ -from typing import FrozenSet -from typing import List -from typing import Optional +from __future__ import annotations -from conda_lock._vendor.poetry.core.utils.helpers import canonicalize_name +import copy +from typing import TYPE_CHECKING +from typing import TypeVar -class PackageSpecification(object): +from packaging.utils import canonicalize_name + + +if TYPE_CHECKING: + from collections.abc import Iterable + + from packaging.utils import NormalizedName + + T = TypeVar("T", bound="PackageSpecification") + + +class PackageSpecification: def __init__( self, - name, # type: str - source_type=None, # type: Optional[str] - source_url=None, # type: Optional[str] - source_reference=None, # type: Optional[str] - source_resolved_reference=None, # type: Optional[str] - features=None, # type: Optional[List[str]] - ): + name: str, + source_type: str | None = None, + source_url: str | None = None, + source_reference: str | None = None, + source_resolved_reference: str | None = None, + source_subdirectory: str | None = None, + features: Iterable[str] | None = None, + ) -> None: + from packaging.utils import canonicalize_name + + # Attributes must be immutable for clone() to be safe! + # (For performance reasons, clone only creates a copy instead of a deep copy). + self._pretty_name = name self._name = canonicalize_name(name) self._source_type = source_type self._source_url = source_url self._source_reference = source_reference self._source_resolved_reference = source_resolved_reference + self._source_subdirectory = source_subdirectory if not features: features = [] - self._features = frozenset(features) + self._features = frozenset(canonicalize_name(feature) for feature in features) @property - def name(self): # type: () -> str + def name(self) -> NormalizedName: return self._name @property - def pretty_name(self): # type: () -> str + def pretty_name(self) -> str: return self._pretty_name @property - def complete_name(self): # type: () -> str - name = self._name + def complete_name(self) -> str: + name: str = self._name if self._features: - name = "{}[{}]".format(name, ",".join(sorted(self._features))) + features = ",".join(sorted(self._features)) + name = f"{name}[{features}]" return name @property - def source_type(self): # type: () -> Optional[str] + def complete_pretty_name(self) -> str: + name = self._pretty_name + + if self._features: + features = ",".join(sorted(self._features)) + name = f"{name}[{features}]" + + return name + + @property + def source_type(self) -> str | None: return self._source_type @property - def source_url(self): # type: () -> Optional[str] + def source_url(self) -> str | None: return self._source_url @property - def source_reference(self): # type: () -> Optional[str] + def source_reference(self) -> str | None: return self._source_reference @property - def source_resolved_reference(self): # type: () -> Optional[str] + def source_resolved_reference(self) -> str | None: return self._source_resolved_reference @property - def features(self): # type: () -> FrozenSet[str] + def source_subdirectory(self) -> str | None: + return self._source_subdirectory + + @property + def features(self) -> frozenset[NormalizedName]: return self._features - def is_same_package_as(self, other): # type: ("PackageSpecification") -> bool - if other.complete_name != self.complete_name: + def is_direct_origin(self) -> bool: + return self._source_type in [ + "directory", + "file", + "url", + "git", + ] + + def provides(self, other: PackageSpecification) -> bool: + """ + Helper method to determine if this package provides the given specification. + + This determination is made to be true, if the names are the same and this + package provides all features required by the other specification. + + Source type checks are explicitly ignored here as this is not of interest. + """ + return self.name == other.name and self.features.issuperset(other.features) + + def is_same_source_as(self, other: PackageSpecification) -> bool: + if self._source_type != other.source_type: return False - if self._source_type: - if self._source_type != other.source_type: + if not self._source_type: + # both packages are of source type None + # no need to check further + return True + + if ( + self._source_url or other.source_url + ) and self._source_url != other.source_url: + return False + + if ( + self._source_subdirectory or other.source_subdirectory + ) and self._source_subdirectory != other.source_subdirectory: + return False + + # We check the resolved reference first: + # if they match we assume equality regardless + # of their source reference. + # This is important when comparing a resolved branch VCS + # dependency to a direct commit reference VCS dependency + if ( + self._source_resolved_reference + and other.source_resolved_reference + and self._source_resolved_reference == other.source_resolved_reference + ): + return True + + if self._source_reference or other.source_reference: + # special handling for packages with references + if not (self._source_reference and other.source_reference): + # case: one reference is defined and is non-empty, but other is not return False - if self._source_url or other.source_url: - if self._source_url != other.source_url: - return False - - if self._source_reference or other.source_reference: - # special handling for packages with references - if not self._source_reference or not other.source_reference: - # case: one reference is defined and is non-empty, but other is not - return False - - if not ( - self._source_reference == other.source_reference - or self._source_reference.startswith(other.source_reference) - or other.source_reference.startswith(self._source_reference) - ): - # case: both references defined, but one is not equal to or a short - # representation of the other - return False - - if ( - self._source_resolved_reference - and other.source_resolved_reference - and self._source_resolved_reference - != other.source_resolved_reference - ): - return False + if not ( + self._source_reference == other.source_reference + or self._source_reference.startswith(other.source_reference) + or other.source_reference.startswith(self._source_reference) + ): + # case: both references defined, but one is not equal to or a short + # representation of the other + return False + + if ( + self._source_resolved_reference + and other.source_resolved_reference + and self._source_resolved_reference != other.source_resolved_reference + ): + return False return True - def __hash__(self): # type: () -> int - if not self._source_type: - return hash(self._name) - - return ( - hash(self._name) - ^ hash(self._source_type) - ^ hash(self._source_url) - ^ hash(self._source_reference) - ^ hash(self._source_resolved_reference) - ^ hash(self._features) + def is_same_package_as(self, other: PackageSpecification) -> bool: + if other.complete_name != self.complete_name: + return False + + return self.is_same_source_as(other) + + def clone(self: T) -> T: + return copy.copy(self) + + def with_features(self: T, features: Iterable[str]) -> T: + package = self.clone() + + package._features = frozenset( + canonicalize_name(feature) for feature in features ) - def __str__(self): # type: () -> str - raise NotImplementedError() + return package + + def without_features(self: T) -> T: + return self.with_features([]) + + def __eq__(self, other: object) -> bool: + if not isinstance(other, PackageSpecification): + return NotImplemented + return self.is_same_package_as(other) + + def __hash__(self) -> int: + result = hash(self.complete_name) # complete_name includes features + + if self._source_type: + # Don't include _source_reference and _source_resolved_reference in hash + # because two specs can be equal even if these attributes are not equal. + # (They must still meet certain conditions. See is_same_source_as().) + result ^= ( + hash(self._source_type) + ^ hash(self._source_url) + ^ hash(self._source_subdirectory) + ) + + return result + + def __str__(self) -> str: + raise NotImplementedError diff --git a/conda_lock/_vendor/poetry/core/packages/url_dependency.py b/conda_lock/_vendor/poetry/core/packages/url_dependency.py index 344eb587..99367017 100644 --- a/conda_lock/_vendor/poetry/core/packages/url_dependency.py +++ b/conda_lock/_vendor/poetry/core/packages/url_dependency.py @@ -1,85 +1,63 @@ -from typing import TYPE_CHECKING -from typing import FrozenSet -from typing import List -from typing import Union +from __future__ import annotations -from conda_lock._vendor.poetry.core.utils._compat import urlparse +from typing import TYPE_CHECKING +from urllib.parse import urlparse -from .dependency import Dependency +from conda_lock._vendor.poetry.core.packages.dependency import Dependency if TYPE_CHECKING: - from .constraints import BaseConstraint + from collections.abc import Iterable class URLDependency(Dependency): def __init__( self, - name, # type: str - url, # type: str - category="main", # type: str - optional=False, # type: bool - extras=None, # type: Union[List[str], FrozenSet[str]] - ): + name: str, + url: str, + *, + directory: str | None = None, + groups: Iterable[str] | None = None, + optional: bool = False, + extras: Iterable[str] | None = None, + ) -> None: + # Attributes must be immutable for clone() to be safe! + # (For performance reasons, clone only creates a copy instead of a deep copy). self._url = url + self._directory = directory - parsed = urlparse.urlparse(url) + parsed = urlparse(url) if not parsed.scheme or not parsed.netloc: - raise ValueError("{} does not seem like a valid url".format(url)) + raise ValueError(f"{url} does not seem like a valid url") - super(URLDependency, self).__init__( + super().__init__( name, "*", - category=category, + groups=groups, optional=optional, allows_prereleases=True, source_type="url", source_url=self._url, + source_subdirectory=directory, extras=extras, ) @property - def url(self): # type: () -> str + def url(self) -> str: return self._url @property - def base_pep_508_name(self): # type: () -> str - requirement = self.pretty_name + def directory(self) -> str | None: + return self._directory - if self.extras: - requirement += "[{}]".format(",".join(self.extras)) + @property + def base_pep_508_name(self) -> str: + requirement = f"{self.complete_pretty_name} @ {self._url}" - requirement += " @ {}".format(self._url) + if self.directory: + requirement += f"#subdirectory={self.directory}" return requirement - def is_url(self): # type: () -> bool + def is_url(self) -> bool: return True - - def with_constraint(self, constraint): # type: ("BaseConstraint") -> URLDependency - new = URLDependency( - self.pretty_name, - url=self._url, - optional=self.is_optional(), - category=self.category, - extras=self._extras, - ) - - new._constraint = constraint - new._pretty_constraint = str(constraint) - - new.is_root = self.is_root - new.python_versions = self.python_versions - new.marker = self.marker - new.transitive_marker = self.transitive_marker - - for in_extra in self.in_extras: - new.in_extras.append(in_extra) - - return new - - def __str__(self): # type: () -> str - return "{} ({} url)".format(self._pretty_name, self._pretty_constraint) - - def __hash__(self): # type: () -> int - return hash((self._name, self._url)) diff --git a/conda_lock/_vendor/poetry/core/packages/utils/link.py b/conda_lock/_vendor/poetry/core/packages/utils/link.py index 76f6c1c7..e493e22e 100644 --- a/conda_lock/_vendor/poetry/core/packages/utils/link.py +++ b/conda_lock/_vendor/poetry/core/packages/utils/link.py @@ -1,40 +1,55 @@ +from __future__ import annotations + import posixpath import re +import urllib.parse as urlparse +import warnings +from functools import cached_property from typing import TYPE_CHECKING -from typing import Any -from typing import Optional -from typing import Tuple - -if TYPE_CHECKING: - from pip._internal.index.collector import HTMLPage # noqa +from conda_lock._vendor.poetry.core.packages.utils.utils import path_to_url +from conda_lock._vendor.poetry.core.packages.utils.utils import splitext -from .utils import path_to_url -from .utils import splitext - -try: - import urllib.parse as urlparse -except ImportError: - import urlparse +if TYPE_CHECKING: + from collections.abc import Mapping class Link: def __init__( - self, url, comes_from=None, requires_python=None - ): # type: (str, Optional["HTMLPage"], Optional[str]) -> None + self, + url: str, + *, + requires_python: str | None = None, + hashes: Mapping[str, str] | None = None, + metadata: str | bool | dict[str, str] | None = None, + yanked: str | bool = False, + ) -> None: """ Object representing a parsed link from https://pypi.python.org/simple/* url: url of the resource pointed to (href of the link) - comes_from: - instance of HTMLPage where the link was found, or string. requires_python: String containing the `Requires-Python` metadata field, specified in PEP 345. This may be specified by a data-requires-python attribute in the HTML link tag, as described in PEP 503. + hashes: + A dictionary of hash names and associated hashes of the file. + Only relevant for JSON-API (PEP 691). + metadata: + One of: + - bool indicating that metadata is available + - string of the syntax `=` representing the hash + of the Core Metadata file according to PEP 658 (HTML). + - dict with hash names and associated hashes of the Core Metadata file + according to PEP 691 (JSON). + yanked: + False, if the data-yanked attribute is not present. + A string, if the data-yanked attribute has a string value. + True, if the data-yanked attribute is present but has no value. + According to PEP 592. """ # url can be a UNC windows share @@ -42,91 +57,97 @@ def __init__( url = path_to_url(url) self.url = url - self.comes_from = comes_from self.requires_python = requires_python if requires_python else None + self._hashes = hashes - def __str__(self): # type: () -> str + if isinstance(metadata, str): + metadata = {"true": True, "": False, "false": False}.get( + metadata.strip().lower(), metadata + ) + + self._metadata = metadata + self._yanked = yanked + + def __str__(self) -> str: if self.requires_python: - rp = " (requires-python:%s)" % self.requires_python + rp = f" (requires-python:{self.requires_python})" else: rp = "" - if self.comes_from: - return "%s (from %s)%s" % (self.url, self.comes_from, rp) - else: - return str(self.url) - def __repr__(self): # type: () -> str - return "" % self + return f"{self.url}{rp}" - def __eq__(self, other): # type: (Any) -> bool + def __repr__(self) -> str: + return f"" + + def __eq__(self, other: object) -> bool: if not isinstance(other, Link): return NotImplemented return self.url == other.url - def __ne__(self, other): # type: (Any) -> bool + def __ne__(self, other: object) -> bool: if not isinstance(other, Link): return NotImplemented return self.url != other.url - def __lt__(self, other): # type: (Any) -> bool + def __lt__(self, other: object) -> bool: if not isinstance(other, Link): return NotImplemented return self.url < other.url - def __le__(self, other): # type: (Any) -> bool + def __le__(self, other: object) -> bool: if not isinstance(other, Link): return NotImplemented return self.url <= other.url - def __gt__(self, other): # type: (Any) -> bool + def __gt__(self, other: object) -> bool: if not isinstance(other, Link): return NotImplemented return self.url > other.url - def __ge__(self, other): # type: (Any) -> bool + def __ge__(self, other: object) -> bool: if not isinstance(other, Link): return NotImplemented return self.url >= other.url - def __hash__(self): # type: () -> int + def __hash__(self) -> int: return hash(self.url) - @property - def filename(self): # type: () -> str + @cached_property + def filename(self) -> str: _, netloc, path, _, _ = urlparse.urlsplit(self.url) name = posixpath.basename(path.rstrip("/")) or netloc name = urlparse.unquote(name) - assert name, "URL %r produced no filename" % self.url + return name - @property - def scheme(self): # type: () -> str + @cached_property + def scheme(self) -> str: return urlparse.urlsplit(self.url)[0] - @property - def netloc(self): # type: () -> str + @cached_property + def netloc(self) -> str: return urlparse.urlsplit(self.url)[1] - @property - def path(self): # type: () -> str + @cached_property + def path(self) -> str: return urlparse.unquote(urlparse.urlsplit(self.url)[2]) - def splitext(self): # type: () -> Tuple[str, str] + def splitext(self) -> tuple[str, str]: return splitext(posixpath.basename(self.path.rstrip("/"))) - @property - def ext(self): # type: () -> str + @cached_property + def ext(self) -> str: return self.splitext()[1] - @property - def url_without_fragment(self): # type: () -> str + @cached_property + def url_without_fragment(self) -> str: scheme, netloc, path, query, fragment = urlparse.urlsplit(self.url) return urlparse.urlunsplit((scheme, netloc, path, query, None)) _egg_fragment_re = re.compile(r"[#&]egg=([^&]*)") - @property - def egg_fragment(self): # type: () -> Optional[str] + @cached_property + def egg_fragment(self) -> str | None: match = self._egg_fragment_re.search(self.url) if not match: return None @@ -134,8 +155,8 @@ def egg_fragment(self): # type: () -> Optional[str] _subdirectory_fragment_re = re.compile(r"[#&]subdirectory=([^&]*)") - @property - def subdirectory_fragment(self): # type: () -> Optional[str] + @cached_property + def subdirectory_fragment(self) -> str | None: match = self._subdirectory_fragment_re.search(self.url) if not match: return None @@ -143,47 +164,125 @@ def subdirectory_fragment(self): # type: () -> Optional[str] _hash_re = re.compile(r"(sha1|sha224|sha384|sha256|sha512|md5)=([a-f0-9]+)") + @cached_property + def has_metadata(self) -> bool: + if self._metadata is None: + return False + return bool(self._metadata) and (self.is_wheel or self.is_sdist) + + @cached_property + def metadata_url(self) -> str | None: + if self.has_metadata: + return f"{self.url_without_fragment.split('?', 1)[0]}.metadata" + return None + + @cached_property + def metadata_hashes(self) -> Mapping[str, str]: + if self.has_metadata: + if isinstance(self._metadata, dict): + return self._metadata + if isinstance(self._metadata, str): + match = self._hash_re.search(self._metadata) + if match: + return {match.group(1): match.group(2)} + return {} + + @property + def metadata_hash(self) -> str | None: + warnings.warn( + "metadata_hash is deprecated. Use metadata_hashes instead.", + DeprecationWarning, + stacklevel=2, + ) + if self.has_metadata and isinstance(self._metadata, str): + match = self._hash_re.search(self._metadata) + if match: + return match.group(2) + return None + + @property + def metadata_hash_name(self) -> str | None: + warnings.warn( + "metadata_hash_name is deprecated. Use metadata_hashes instead.", + DeprecationWarning, + stacklevel=2, + ) + if self.has_metadata and isinstance(self._metadata, str): + match = self._hash_re.search(self._metadata) + if match: + return match.group(1) + return None + + @cached_property + def hashes(self) -> Mapping[str, str]: + if self._hashes: + return self._hashes + match = self._hash_re.search(self.url) + if match: + return {match.group(1): match.group(2)} + return {} + @property - def hash(self): # type: () -> Optional[str] + def hash(self) -> str | None: + warnings.warn( + "hash is deprecated. Use hashes instead.", + DeprecationWarning, + stacklevel=2, + ) match = self._hash_re.search(self.url) if match: return match.group(2) return None @property - def hash_name(self): # type: () -> Optional[str] + def hash_name(self) -> str | None: + warnings.warn( + "hash_name is deprecated. Use hashes instead.", + DeprecationWarning, + stacklevel=2, + ) match = self._hash_re.search(self.url) if match: return match.group(1) return None - @property - def show_url(self): # type: () -> str + @cached_property + def show_url(self) -> str: return posixpath.basename(self.url.split("#", 1)[0].split("?", 1)[0]) - @property - def is_wheel(self): # type: () -> bool + @cached_property + def is_wheel(self) -> bool: return self.ext == ".whl" - @property - def is_wininst(self): # type: () -> bool + @cached_property + def is_wininst(self) -> bool: return self.ext == ".exe" - @property - def is_egg(self): # type: () -> bool + @cached_property + def is_egg(self) -> bool: return self.ext == ".egg" - @property - def is_sdist(self): # type: () -> bool + @cached_property + def is_sdist(self) -> bool: return self.ext in {".tar.bz2", ".tar.gz", ".zip"} - @property - def is_artifact(self): # type: () -> bool + @cached_property + def is_artifact(self) -> bool: """ Determines if this points to an actual artifact (e.g. a tarball) or if it points to an "abstract" thing like a path or a VCS location. """ - if self.scheme in ["ssh", "git", "hg", "bzr", "sftp", "svn"]: + if self.scheme in {"ssh", "git", "hg", "bzr", "sftp", "svn"}: return False return True + + @cached_property + def yanked(self) -> bool: + return isinstance(self._yanked, str) or bool(self._yanked) + + @cached_property + def yanked_reason(self) -> str: + if isinstance(self._yanked, str): + return self._yanked + return "" diff --git a/conda_lock/_vendor/poetry/core/packages/utils/utils.py b/conda_lock/_vendor/poetry/core/packages/utils/utils.py index e49782c6..88981cd1 100644 --- a/conda_lock/_vendor/poetry/core/packages/utils/utils.py +++ b/conda_lock/_vendor/poetry/core/packages/utils/utils.py @@ -1,62 +1,58 @@ -import os +from __future__ import annotations + +import functools import posixpath import re import sys +from contextlib import suppress +from pathlib import Path from typing import TYPE_CHECKING from typing import Dict from typing import List from typing import Tuple -from typing import Union - -from six.moves.urllib.parse import unquote # noqa -from six.moves.urllib.parse import urlsplit # noqa -from six.moves.urllib.request import url2pathname # noqa - -from conda_lock._vendor.poetry.core.packages.constraints.constraint import Constraint -from conda_lock._vendor.poetry.core.packages.constraints.multi_constraint import MultiConstraint -from conda_lock._vendor.poetry.core.packages.constraints.union_constraint import UnionConstraint -from conda_lock._vendor.poetry.core.semver import EmptyConstraint -from conda_lock._vendor.poetry.core.semver import Version -from conda_lock._vendor.poetry.core.semver import VersionConstraint -from conda_lock._vendor.poetry.core.semver import VersionRange -from conda_lock._vendor.poetry.core.semver import VersionUnion -from conda_lock._vendor.poetry.core.semver import parse_constraint -from conda_lock._vendor.poetry.core.utils._compat import Path -from conda_lock._vendor.poetry.core.version.markers import BaseMarker -from conda_lock._vendor.poetry.core.version.markers import MarkerUnion -from conda_lock._vendor.poetry.core.version.markers import MultiMarker +from urllib.parse import unquote +from urllib.parse import urlsplit +from urllib.request import url2pathname + +from conda_lock._vendor.poetry.core.constraints.version import Version +from conda_lock._vendor.poetry.core.constraints.version import VersionRange +from conda_lock._vendor.poetry.core.constraints.version import parse_marker_version_constraint from conda_lock._vendor.poetry.core.version.markers import SingleMarker +from conda_lock._vendor.poetry.core.version.markers import SingleMarkerLike +from conda_lock._vendor.poetry.core.version.markers import dnf if TYPE_CHECKING: - from conda_lock._vendor.poetry.core.packages.constraints import BaseConstraint # noqa - from conda_lock._vendor.poetry.core.semver import VersionTypes # noqa + from conda_lock._vendor.poetry.core.constraints.generic import BaseConstraint + from conda_lock._vendor.poetry.core.constraints.version import VersionConstraint + from conda_lock._vendor.poetry.core.version.markers import BaseMarker + + # Even though we've `from __future__ import annotations`, mypy doesn't seem to like + # this as `dict[str, ...]` + ConvertedMarkers = Dict[str, List[List[Tuple[str, str]]]] + BZ2_EXTENSIONS = (".tar.bz2", ".tbz") XZ_EXTENSIONS = (".tar.xz", ".txz", ".tlz", ".tar.lz", ".tar.lzma") ZIP_EXTENSIONS = (".zip", ".whl") TAR_EXTENSIONS = (".tar.gz", ".tgz", ".tar") ARCHIVE_EXTENSIONS = ZIP_EXTENSIONS + BZ2_EXTENSIONS + TAR_EXTENSIONS + XZ_EXTENSIONS -SUPPORTED_EXTENSIONS = ZIP_EXTENSIONS + TAR_EXTENSIONS +SUPPORTED_EXTENSIONS: tuple[str, ...] = ZIP_EXTENSIONS + TAR_EXTENSIONS -try: - import bz2 # noqa +with suppress(ImportError): + import bz2 # noqa: F401 SUPPORTED_EXTENSIONS += BZ2_EXTENSIONS -except ImportError: - pass -try: +with suppress(ImportError): # Only for Python 3.3+ - import lzma # noqa + import lzma # noqa: F401 SUPPORTED_EXTENSIONS += XZ_EXTENSIONS -except ImportError: - pass -def path_to_url(path): # type: (Union[str, Path]) -> str +def path_to_url(path: str | Path) -> str: """ Convert a path to a file: URL. The path will be made absolute unless otherwise specified and have quoted path parts. @@ -64,7 +60,7 @@ def path_to_url(path): # type: (Union[str, Path]) -> str return Path(path).absolute().as_uri() -def url_to_path(url): # type: (str) -> Path +def url_to_path(url: str) -> Path: """ Convert an RFC8089 file URI to path. @@ -72,7 +68,7 @@ def url_to_path(url): # type: (str) -> Path https://github.com/pypa/pip/blob/4d1932fcdd1974c820ea60b3286984ebb0c3beaa/src/pip/_internal/utils/urls.py#L31 """ if not url.startswith("file:"): - raise ValueError("{} is not a valid file URI".format(url)) + raise ValueError(f"{url} is not a valid file URI") _, netloc, path, _, _ = urlsplit(url) @@ -84,13 +80,13 @@ def url_to_path(url): # type: (str) -> Path netloc = "\\\\" + netloc else: raise ValueError( - "non-local file URIs are not supported on this platform: {}".format(url) + f"non-local file URIs are not supported on this platform: {url}" ) return Path(url2pathname(netloc + unquote(path))) -def is_url(name): # type: (str) -> bool +def is_url(name: str) -> bool: if ":" not in name: return False scheme = name.split(":", 1)[0].lower() @@ -110,7 +106,7 @@ def is_url(name): # type: (str) -> bool ] -def strip_extras(path): # type: (str) -> Tuple[str, str] +def strip_extras(path: str) -> tuple[str, str | None]: m = re.match(r"^(.+)(\[[^\]]+\])$", path) extras = None if m: @@ -122,17 +118,22 @@ def strip_extras(path): # type: (str) -> Tuple[str, str] return path_no_extras, extras -def is_installable_dir(path): # type: (str) -> bool - """Return True if `path` is a directory containing a setup.py file.""" - if not os.path.isdir(path): +@functools.lru_cache(maxsize=None) +def is_python_project(path: Path) -> bool: + """Return true if the directory is a Python project""" + if not path.is_dir(): return False - setup_py = os.path.join(path, "setup.py") - if os.path.isfile(setup_py): - return True - return False + setup_py = path / "setup.py" + setup_cfg = path / "setup.cfg" + setuptools_project = setup_py.exists() or setup_cfg.exists() + + pyproject = (path / "pyproject.toml").exists() -def is_archive_file(name): # type: (str) -> bool + return pyproject or setuptools_project + + +def is_archive_file(name: str) -> bool: """Return True if `name` is a considered as an archive file.""" ext = splitext(name)[1].lower() if ext in ARCHIVE_EXTENSIONS: @@ -140,7 +141,7 @@ def is_archive_file(name): # type: (str) -> bool return False -def splitext(path): # type: (str) -> Tuple[str, str] +def splitext(path: str) -> tuple[str, str]: """Like os.path.splitext, but take off .tar too""" base, ext = posixpath.splitext(path) if base.lower().endswith(".tar"): @@ -149,149 +150,159 @@ def splitext(path): # type: (str) -> Tuple[str, str] return base, ext -def group_markers( - markers, or_=False -): # type: (List[BaseMarker], bool) -> List[Union[Tuple[str, str, str], List[Tuple[str, str, str]]]] - groups = [[]] - - for marker in markers: - if or_: - groups.append([]) - - if isinstance(marker, (MultiMarker, MarkerUnion)): - groups[-1].append( - group_markers(marker.markers, isinstance(marker, MarkerUnion)) - ) - elif isinstance(marker, SingleMarker): - lhs, op, rhs = marker.name, marker.operator, marker.value - - groups[-1].append((lhs, op, rhs)) - - return groups - - -def convert_markers(marker): # type: (BaseMarker) -> Dict[str, List[Tuple[str, str]]] - groups = group_markers([marker]) - - requirements = {} - - def _group( - _groups, or_=False - ): # type: (List[Union[Tuple[str, str, str], List[Tuple[str, str, str]]]], bool) -> None - ors = {} - for group in _groups: - if isinstance(group, list): - _group(group, or_=True) +def convert_markers(marker: BaseMarker) -> ConvertedMarkers: + from conda_lock._vendor.poetry.core.version.markers import MarkerUnion + from conda_lock._vendor.poetry.core.version.markers import MultiMarker + from conda_lock._vendor.poetry.core.version.markers import SingleMarker + + requirements: ConvertedMarkers = {} + marker = dnf(marker) + conjunctions = marker.markers if isinstance(marker, MarkerUnion) else [marker] + group_count = len(conjunctions) + + def add_constraint( + marker_name: str, constraint: tuple[str, str], group_index: int + ) -> None: + # python_full_version is equivalent to python_version + # for Poetry so we merge them + if marker_name == "python_full_version": + marker_name = "python_version" + if marker_name not in requirements: + requirements[marker_name] = [[] for _ in range(group_count)] + requirements[marker_name][group_index].append(constraint) + + for i, sub_marker in enumerate(conjunctions): + if isinstance(sub_marker, MultiMarker): + for m in sub_marker.markers: + assert isinstance(m, SingleMarkerLike) + if isinstance(m, SingleMarker): + add_constraint(m.name, (m.operator, m.value), i) + else: + add_constraint(m.name, ("", str(m.constraint)), i) + elif isinstance(sub_marker, SingleMarkerLike): + if isinstance(sub_marker, SingleMarker): + add_constraint( + sub_marker.name, (sub_marker.operator, sub_marker.value), i + ) else: - variable, op, value = group - group_name = str(variable) - - # python_full_version is equivalent to python_version - # for Poetry so we merge them - if group_name == "python_full_version": - group_name = "python_version" - - if group_name not in requirements: - requirements[group_name] = [] - - if group_name not in ors: - ors[group_name] = or_ - - if ors[group_name] or not requirements[group_name]: - requirements[group_name].append([]) + add_constraint(sub_marker.name, ("", str(sub_marker.constraint)), i) - requirements[group_name][-1].append((str(op), str(value))) + for group_name in requirements: + # remove duplicates + seen = [] + for r in requirements[group_name]: + if r not in seen: + seen.append(r) + requirements[group_name] = seen - ors[group_name] = False + return requirements - _group(groups, or_=True) - return requirements +def contains_group_without_marker(markers: ConvertedMarkers, marker_name: str) -> bool: + return marker_name not in markers or [] in markers[marker_name] def create_nested_marker( - name, constraint -): # type: (str, Union["BaseConstraint", VersionUnion, Version, VersionConstraint]) -> str + name: str, + constraint: BaseConstraint | VersionConstraint, +) -> str: + from conda_lock._vendor.poetry.core.constraints.generic import Constraint + from conda_lock._vendor.poetry.core.constraints.generic import MultiConstraint + from conda_lock._vendor.poetry.core.constraints.generic import UnionConstraint + from conda_lock._vendor.poetry.core.constraints.version import VersionUnion + if constraint.is_any(): return "" if isinstance(constraint, (MultiConstraint, UnionConstraint)): - parts = [] + multi_parts = [] for c in constraint.constraints: - multi = False - if isinstance(c, (MultiConstraint, UnionConstraint)): - multi = True - - parts.append((multi, create_nested_marker(name, c))) + multi = isinstance(c, (MultiConstraint, UnionConstraint)) + multi_parts.append((multi, create_nested_marker(name, c))) glue = " and " if isinstance(constraint, UnionConstraint): - parts = ["({})".format(part[1]) if part[0] else part[1] for part in parts] + parts = [f"({part[1]})" if part[0] else part[1] for part in multi_parts] glue = " or " else: - parts = [part[1] for part in parts] + parts = [part[1] for part in multi_parts] marker = glue.join(parts) elif isinstance(constraint, Constraint): - marker = '{} {} "{}"'.format(name, constraint.operator, constraint.version) + marker = f'{name} {constraint.operator} "{constraint.value}"' elif isinstance(constraint, VersionUnion): - parts = [] - for c in constraint.ranges: - parts.append(create_nested_marker(name, c)) - + parts = [create_nested_marker(name, c) for c in constraint.ranges] glue = " or " - parts = ["({})".format(part) for part in parts] - + parts = [f"({part})" for part in parts] marker = glue.join(parts) elif isinstance(constraint, Version): if name == "python_version" and constraint.precision >= 3: name = "python_full_version" - marker = '{} == "{}"'.format(name, constraint.text) + marker = f'{name} == "{constraint.text}"' else: - if constraint.min is not None: - op = ">=" - if not constraint.include_min: - op = ">" - - version = constraint.min - if constraint.max is not None: - min_name = max_name = name - if min_name == "python_version" and constraint.min.precision >= 3: - min_name = "python_full_version" - - if max_name == "python_version" and constraint.max.precision >= 3: - max_name = "python_full_version" - - text = '{} {} "{}"'.format(min_name, op, version) + assert isinstance(constraint, VersionRange) + min_name = max_name = name - op = "<=" - if not constraint.include_max: - op = "<" - - version = constraint.max + parts = [] - text += ' and {} {} "{}"'.format(max_name, op, version) + # `python_version` is a special case: to keep the constructed marker equivalent + # to the constraint we need to be careful with the precision. + # + # PEP 440 tells us that when we come to make the comparison the release + # segment will be zero padded: eg "<= 3.10" is equivalent to "<= 3.10.0". + # + # But "python_version <= 3.10" is _not_ equivalent to "python_version <= 3.10.0" + # - see normalize_python_version_markers. + # + # A similar issue arises for a constraint like "> 3.6". + if constraint.min is not None: + op = ">=" if constraint.include_min else ">" + version = constraint.min + if min_name == "python_version" and version.precision >= 3: + min_name = "python_full_version" + + if ( + min_name == "python_version" + and not constraint.include_min + and version.precision < 3 + ): + padding = ".0" * (3 - version.precision) + part = f'python_full_version > "{version}{padding}"' + else: + part = f'{min_name} {op} "{version}"' - return text - elif constraint.max is not None: - op = "<=" - if not constraint.include_max: - op = "<" + parts.append(part) + if constraint.max is not None: + op = "<=" if constraint.include_max else "<" version = constraint.max - else: - return "" + if max_name == "python_version" and version.precision >= 3: + max_name = "python_full_version" + + if ( + max_name == "python_version" + and constraint.include_max + and version.precision < 3 + ): + padding = ".0" * (3 - version.precision) + part = f'python_full_version <= "{version}{padding}"' + else: + part = f'{max_name} {op} "{version}"' - if name == "python_version" and version.precision >= 3: - name = "python_full_version" + parts.append(part) - marker = '{} {} "{}"'.format(name, op, version) + marker = " and ".join(parts) return marker -def get_python_constraint_from_marker(marker,): # type: (BaseMarker) -> "VersionTypes" +def get_python_constraint_from_marker( + marker: BaseMarker, +) -> VersionConstraint: + from conda_lock._vendor.poetry.core.constraints.version import EmptyConstraint + from conda_lock._vendor.poetry.core.constraints.version import VersionRange + python_marker = marker.only("python_version", "python_full_version") if python_marker.is_any(): return VersionRange() @@ -300,38 +311,69 @@ def get_python_constraint_from_marker(marker,): # type: (BaseMarker) -> "Versio return EmptyConstraint() markers = convert_markers(marker) + if contains_group_without_marker(markers, "python_version"): + # groups are in disjunctive normal form (DNF), + # an empty group means that python_version does not appear in this group, + # which means that python_version is arbitrary for this group + return VersionRange() + + python_version_markers = markers["python_version"] + normalized = normalize_python_version_markers(python_version_markers) + constraint = parse_marker_version_constraint(normalized) + return constraint + +def normalize_python_version_markers( # NOSONAR + disjunction: list[list[tuple[str, str]]], +) -> str: ors = [] - for or_ in markers["python_version"]: + for or_ in disjunction: ands = [] for op, version in or_: # Expand python version - if op == "==": + if op == "==" and "*" not in version and version.count(".") < 2: version = "~" + version op = "" - elif op == "!=": + + elif op == "!=" and "*" not in version and version.count(".") < 2: version += ".*" + elif op in ("<=", ">"): + # Make adjustments on encountering versions with less than full + # precision. + # + # Per PEP-508: + # python_version <-> '.'.join(platform.python_version_tuple()[:2]) + # + # So for two digits of precision we make the following adjustments: + # - `python_version > "x.y"` requires version >= x.(y+1).anything + # - `python_version <= "x.y"` requires version < x.(y+1).anything + # + # Treatment when we see a single digit of precision is less clear: is + # that even a legitimate marker? + # + # Experiment suggests that pip behaviour is essentially to make a + # lexicographical comparison, for example `python_version > "3"` is + # satisfied by version 3.anything, whereas `python_version <= "3"` is + # satisfied only by version 2.anything. + # + # We achieve the above by fiddling with the operator and version in the + # marker. parsed_version = Version.parse(version) - if parsed_version.precision == 1: + if parsed_version.precision < 3: if op == "<=": op = "<" - version = parsed_version.next_major.text elif op == ">": op = ">=" - version = parsed_version.next_major.text - elif parsed_version.precision == 2: - if op == "<=": - op = "<" - version = parsed_version.next_minor.text - elif op == ">": - op = ">=" - version = parsed_version.next_minor.text + + if parsed_version.precision == 2: + version = parsed_version.next_minor().text + elif op in ("in", "not in"): versions = [] - for v in re.split("[ ,]+", version): + for v in SingleMarker.VALUE_SEPARATOR_RE.split(version): split = v.split(".") - if len(split) in [1, 2]: + if len(split) in (1, 2): split.append("*") op_ = "" if op == "in" else "!=" else: @@ -339,14 +381,14 @@ def get_python_constraint_from_marker(marker,): # type: (BaseMarker) -> "Versio versions.append(op_ + ".".join(split)) - glue = " || " if op == "in" else ", " if versions: + glue = " || " if op == "in" else ", " ands.append(glue.join(versions)) continue - ands.append("{}{}".format(op, version)) + ands.append(f"{op}{version}") ors.append(" ".join(ands)) - return parse_constraint(" || ".join(ors)) + return " || ".join(ors) diff --git a/conda_lock/_vendor/poetry/core/packages/vcs_dependency.py b/conda_lock/_vendor/poetry/core/packages/vcs_dependency.py index 6b3d7b53..13907e44 100644 --- a/conda_lock/_vendor/poetry/core/packages/vcs_dependency.py +++ b/conda_lock/_vendor/poetry/core/packages/vcs_dependency.py @@ -1,16 +1,12 @@ -from typing import TYPE_CHECKING -from typing import FrozenSet -from typing import List -from typing import Optional -from typing import Union +from __future__ import annotations -from conda_lock._vendor.poetry.core.vcs import git +from typing import TYPE_CHECKING -from .dependency import Dependency +from conda_lock._vendor.poetry.core.packages.dependency import Dependency if TYPE_CHECKING: - from .constraints import BaseConstraint + from collections.abc import Iterable class VCSDependency(Dependency): @@ -20,146 +16,123 @@ class VCSDependency(Dependency): def __init__( self, - name, # type: str - vcs, # type: str - source, # type: str - branch=None, # type: Optional[str] - tag=None, # type: Optional[str] - rev=None, # type: Optional[str] - resolved_rev=None, # type: Optional[str] - category="main", # type: str - optional=False, # type: bool - develop=False, # type: bool - extras=None, # type: Union[List[str], FrozenSet[str]] - ): + name: str, + vcs: str, + source: str, + branch: str | None = None, + tag: str | None = None, + rev: str | None = None, + resolved_rev: str | None = None, + directory: str | None = None, + groups: Iterable[str] | None = None, + optional: bool = False, + develop: bool = False, + extras: Iterable[str] | None = None, + ) -> None: + # Attributes must be immutable for clone() to be safe! + # (For performance reasons, clone only creates a copy instead of a deep copy). self._vcs = vcs self._source = source - if not any([branch, tag, rev]): - # If nothing has been specified, we assume master - branch = "master" - self._branch = branch self._tag = tag self._rev = rev + self._directory = directory self._develop = develop - super(VCSDependency, self).__init__( + super().__init__( name, "*", - category=category, + groups=groups, optional=optional, allows_prereleases=True, source_type=self._vcs.lower(), source_url=self._source, - source_reference=branch or tag or rev, + source_reference=branch or tag or rev or "HEAD", source_resolved_reference=resolved_rev, + source_subdirectory=directory, extras=extras, ) @property - def vcs(self): # type: () -> str + def vcs(self) -> str: return self._vcs @property - def source(self): # type: () -> str + def source(self) -> str: return self._source @property - def branch(self): # type: () -> Optional[str] + def branch(self) -> str | None: return self._branch @property - def tag(self): # type: () -> Optional[str] + def tag(self) -> str | None: return self._tag @property - def rev(self): # type: () -> Optional[str] + def rev(self) -> str | None: return self._rev @property - def develop(self): # type: () -> bool + def directory(self) -> str | None: + return self._directory + + @property + def develop(self) -> bool: return self._develop @property - def reference(self): # type: () -> str - return self._branch or self._tag or self._rev + def reference(self) -> str: + reference = self._branch or self._tag or self._rev or "" + return reference @property - def pretty_constraint(self): # type: () -> str + def pretty_constraint(self) -> str: if self._branch: what = "branch" version = self._branch elif self._tag: what = "tag" version = self._tag - else: + elif self._rev: what = "rev" version = self._rev + else: + return "" - return "{} {}".format(what, version) + return f"{what} {version}" - @property - def base_pep_508_name(self): # type: () -> str - requirement = self.pretty_name - parsed_url = git.ParsedUrl.parse(self._source) + def _base_pep_508_name(self, *, resolved: bool = False) -> str: + from conda_lock._vendor.poetry.core.vcs import git - if self.extras: - requirement += "[{}]".format(",".join(self.extras)) + requirement = self.complete_pretty_name + parsed_url = git.ParsedUrl.parse(self._source) if parsed_url.protocol is not None: - requirement += " @ {}+{}@{}".format(self._vcs, self._source, self.reference) + requirement += f" @ {self._vcs}+{self._source}" else: - requirement += " @ {}+ssh://{}@{}".format( - self._vcs, parsed_url.format(), self.reference - ) + requirement += f" @ {self._vcs}+ssh://{parsed_url.format()}" - return requirement + if resolved and self.source_resolved_reference: + requirement += f"@{self.source_resolved_reference}" + elif self.reference: + requirement += f"@{self.reference}" - def is_vcs(self): # type: () -> bool - return True - - def accepts_prereleases(self): # type: () -> bool - return True + if self._directory: + requirement += f"#subdirectory={self._directory}" - def with_constraint(self, constraint): # type: ("BaseConstraint") -> VCSDependency - new = VCSDependency( - self.pretty_name, - self._vcs, - self._source, - branch=self._branch, - tag=self._tag, - rev=self._rev, - resolved_rev=self._source_resolved_reference, - optional=self.is_optional(), - category=self.category, - develop=self._develop, - extras=self._extras, - ) - - new._constraint = constraint - new._pretty_constraint = str(constraint) - - new.is_root = self.is_root - new.python_versions = self.python_versions - new.marker = self.marker - new.transitive_marker = self.transitive_marker - - for in_extra in self.in_extras: - new.in_extras.append(in_extra) - - return new + return requirement - def __str__(self): # type: () -> str - reference = self._vcs - if self._branch: - reference += " branch {}".format(self._branch) - elif self._tag: - reference += " tag {}".format(self._tag) - elif self._rev: - reference += " rev {}".format(self._rev) + @property + def base_pep_508_name(self) -> str: + requirement = self._base_pep_508_name() + return requirement - return "{} ({} {})".format(self._pretty_name, self._constraint, reference) + @property + def base_pep_508_name_resolved(self) -> str: + requirement = self._base_pep_508_name(resolved=True) + return requirement - def __hash__(self): # type: () -> int - return hash((self._name, self._vcs, self._branch, self._tag, self._rev)) + def is_vcs(self) -> bool: + return True diff --git a/conda_lock/_vendor/poetry/core/poetry.py b/conda_lock/_vendor/poetry/core/poetry.py index a096cbf6..c4f12533 100644 --- a/conda_lock/_vendor/poetry/core/poetry.py +++ b/conda_lock/_vendor/poetry/core/poetry.py @@ -1,41 +1,50 @@ -from __future__ import absolute_import -from __future__ import unicode_literals +from __future__ import annotations from typing import TYPE_CHECKING from typing import Any -from conda_lock._vendor.poetry.core.pyproject import PyProjectTOML -from conda_lock._vendor.poetry.core.utils._compat import Path # noqa +from conda_lock._vendor.poetry.core.pyproject.toml import PyProjectTOML if TYPE_CHECKING: - from conda_lock._vendor.poetry.core.packages import ProjectPackage # noqa - from conda_lock._vendor.poetry.core.pyproject.toml import PyProjectTOMLFile # noqa + from pathlib import Path + from conda_lock._vendor.poetry.core.packages.project_package import ProjectPackage -class Poetry(object): + +class Poetry: def __init__( - self, file, local_config, package, - ): # type: (Path, dict, "ProjectPackage") -> None - self._pyproject = PyProjectTOML(file) + self, + file: Path, + local_config: dict[str, Any], + package: ProjectPackage, + pyproject_type: type[PyProjectTOML] = PyProjectTOML, + ) -> None: + self._pyproject = pyproject_type(file) self._package = package self._local_config = local_config @property - def pyproject(self): # type: () -> PyProjectTOML + def pyproject(self) -> PyProjectTOML: return self._pyproject @property - def file(self): # type: () -> "PyProjectTOMLFile" - return self._pyproject.file + def pyproject_path(self) -> Path: + return self._pyproject.path @property - def package(self): # type: () -> "ProjectPackage" + def package(self) -> ProjectPackage: return self._package @property - def local_config(self): # type: () -> dict + def is_package_mode(self) -> bool: + package_mode = self._local_config["package-mode"] + assert isinstance(package_mode, bool) + return package_mode + + @property + def local_config(self) -> dict[str, Any]: return self._local_config - def get_project_config(self, config, default=None): # type: (str, Any) -> Any + def get_project_config(self, config: str, default: Any = None) -> Any: return self._local_config.get("config", {}).get(config, default) diff --git a/conda_lock/_vendor/poetry/core/py.typed b/conda_lock/_vendor/poetry/core/py.typed new file mode 100644 index 00000000..e69de29b diff --git a/conda_lock/_vendor/poetry/core/pyproject/__init__.py b/conda_lock/_vendor/poetry/core/pyproject/__init__.py index b8b67752..e69de29b 100644 --- a/conda_lock/_vendor/poetry/core/pyproject/__init__.py +++ b/conda_lock/_vendor/poetry/core/pyproject/__init__.py @@ -1,6 +0,0 @@ -from conda_lock._vendor.poetry.core.pyproject.exceptions import PyProjectException -from conda_lock._vendor.poetry.core.pyproject.tables import BuildSystem -from conda_lock._vendor.poetry.core.pyproject.toml import PyProjectTOML - - -__all__ = [clazz.__name__ for clazz in {BuildSystem, PyProjectException, PyProjectTOML}] diff --git a/conda_lock/_vendor/poetry/core/pyproject/exceptions.py b/conda_lock/_vendor/poetry/core/pyproject/exceptions.py index 07eea82e..9cdbb65b 100644 --- a/conda_lock/_vendor/poetry/core/pyproject/exceptions.py +++ b/conda_lock/_vendor/poetry/core/pyproject/exceptions.py @@ -1,3 +1,5 @@ +from __future__ import annotations + from conda_lock._vendor.poetry.core.exceptions import PoetryCoreException diff --git a/conda_lock/_vendor/poetry/core/pyproject/tables.py b/conda_lock/_vendor/poetry/core/pyproject/tables.py index 1225a6c0..25db6763 100644 --- a/conda_lock/_vendor/poetry/core/pyproject/tables.py +++ b/conda_lock/_vendor/poetry/core/pyproject/tables.py @@ -1,57 +1,50 @@ -from typing import TYPE_CHECKING -from typing import List -from typing import Optional +from __future__ import annotations -from conda_lock._vendor.poetry.core.utils._compat import Path -from conda_lock._vendor.poetry.core.utils.helpers import canonicalize_name +from contextlib import suppress +from pathlib import Path +from typing import TYPE_CHECKING if TYPE_CHECKING: - from conda_lock._vendor.poetry.core.packages import Dependency # noqa + from conda_lock._vendor.poetry.core.packages.dependency import Dependency # TODO: Convert to dataclass once python 2.7, 3.5 is dropped class BuildSystem: def __init__( - self, build_backend=None, requires=None - ): # type: (Optional[str], Optional[List[str]]) -> None + self, build_backend: str | None = None, requires: list[str] | None = None + ) -> None: self.build_backend = ( build_backend if build_backend is not None else "setuptools.build_meta:__legacy__" ) self.requires = requires if requires is not None else ["setuptools", "wheel"] - self._dependencies = None + self._dependencies: list[Dependency] | None = None @property - def dependencies(self): # type: () -> List["Dependency"] + def dependencies(self) -> list[Dependency]: if self._dependencies is None: # avoid circular dependency when loading DirectoryDependency - from conda_lock._vendor.poetry.core.packages import DirectoryDependency - from conda_lock._vendor.poetry.core.packages import FileDependency - from conda_lock._vendor.poetry.core.packages import dependency_from_pep_508 + from conda_lock._vendor.poetry.core.packages.dependency import Dependency + from conda_lock._vendor.poetry.core.packages.directory_dependency import DirectoryDependency + from conda_lock._vendor.poetry.core.packages.file_dependency import FileDependency self._dependencies = [] for requirement in self.requires: dependency = None try: - dependency = dependency_from_pep_508(requirement) + dependency = Dependency.create_from_pep_508(requirement) except ValueError: # PEP 517 requires can be path if not PEP 508 path = Path(requirement) - try: + # compatibility Python < 3.8 + # https://docs.python.org/3/library/pathlib.html#methods + with suppress(OSError): if path.is_file(): - dependency = FileDependency( - name=canonicalize_name(path.name), path=path - ) + dependency = FileDependency(name=path.name, path=path) elif path.is_dir(): - dependency = DirectoryDependency( - name=canonicalize_name(path.name), path=path - ) - except OSError: - # compatibility Python < 3.8 - # https://docs.python.org/3/library/pathlib.html#methods - pass + dependency = DirectoryDependency(name=path.name, path=path) if dependency is None: # skip since we could not determine requirement diff --git a/conda_lock/_vendor/poetry/core/pyproject/toml.py b/conda_lock/_vendor/poetry/core/pyproject/toml.py index c9fa6e60..7fc3d821 100644 --- a/conda_lock/_vendor/poetry/core/pyproject/toml.py +++ b/conda_lock/_vendor/poetry/core/pyproject/toml.py @@ -1,43 +1,45 @@ -from typing import Any -from typing import Optional -from typing import Union +from __future__ import annotations -from tomlkit.container import Container -from tomlkit.toml_document import TOMLDocument +from contextlib import suppress +from typing import TYPE_CHECKING +from typing import Any -from conda_lock._vendor.poetry.core.pyproject.exceptions import PyProjectException from conda_lock._vendor.poetry.core.pyproject.tables import BuildSystem -from conda_lock._vendor.poetry.core.toml import TOMLFile -from conda_lock._vendor.poetry.core.utils._compat import Path +from conda_lock._vendor.poetry.core.utils._compat import tomllib + + +if TYPE_CHECKING: + from pathlib import Path class PyProjectTOML: - def __init__(self, path): # type: (Union[str, Path]) -> None - self._file = TOMLFile(path=path) - self._data = None # type: Optional[TOMLDocument] - self._build_system = None # type: Optional[BuildSystem] - self._poetry_config = None # type: Optional[TOMLDocument] + def __init__(self, path: Path) -> None: + self._path = path + self._data: dict[str, Any] | None = None + self._build_system: BuildSystem | None = None @property - def file(self): # type: () -> TOMLFile - return self._file + def path(self) -> Path: + return self._path @property - def data(self): # type: () -> TOMLDocument + def data(self) -> dict[str, Any]: if self._data is None: - if not self._file.exists(): - self._data = TOMLDocument() + if not self.path.exists(): + self._data = {} else: - self._data = self._file.read() + with self.path.open("rb") as f: + self._data = tomllib.load(f) + return self._data @property - def build_system(self): # type: () -> BuildSystem + def build_system(self) -> BuildSystem: if self._build_system is None: build_backend = None requires = None - if not self._file.exists(): + if not self.path.exists(): build_backend = "poetry.core.masonry.api" requires = ["poetry-core"] @@ -46,45 +48,30 @@ def build_system(self): # type: () -> BuildSystem build_backend=container.get("build-backend", build_backend), requires=container.get("requires", requires), ) + return self._build_system @property - def poetry_config(self): # type: () -> Optional[TOMLDocument] - if self._poetry_config is None: - self._poetry_config = self.data.get("tool", {}).get("poetry") - if self._poetry_config is None: - raise PyProjectException( - "[tool.poetry] section not found in {}".format(self._file) - ) - return self._poetry_config - - def is_poetry_project(self): # type: () -> bool - if self.file.exists(): - try: + def poetry_config(self) -> dict[str, Any]: + try: + tool = self.data["tool"] + assert isinstance(tool, dict) + config = tool["poetry"] + assert isinstance(config, dict) + return config + except KeyError as e: + from conda_lock._vendor.poetry.core.pyproject.exceptions import PyProjectException + + raise PyProjectException( + f"[tool.poetry] section not found in {self._path.as_posix()}" + ) from e + + def is_poetry_project(self) -> bool: + from conda_lock._vendor.poetry.core.pyproject.exceptions import PyProjectException + + if self.path.exists(): + with suppress(PyProjectException): _ = self.poetry_config return True - except PyProjectException: - pass - return False - def __getattr__(self, item): # type: (str) -> Any - return getattr(self.data, item) - - def save(self): # type: () -> None - data = self.data - - if self._poetry_config is not None: - data["tool"]["poetry"] = self._poetry_config - - if self._build_system is not None: - if "build-system" not in data: - data["build-system"] = Container() - data["build-system"]["requires"] = self._build_system.requires - data["build-system"]["build-backend"] = self._build_system.build_backend - - self.file.write(data=data) - - def reload(self): # type: () -> None - self._data = None - self._build_system = None - self._poetry_config = None + return False diff --git a/conda_lock/_vendor/poetry/core/semver/__init__.py b/conda_lock/_vendor/poetry/core/semver/__init__.py deleted file mode 100644 index 2cff22d6..00000000 --- a/conda_lock/_vendor/poetry/core/semver/__init__.py +++ /dev/null @@ -1,151 +0,0 @@ -import re - -from typing import Union - -from .empty_constraint import EmptyConstraint -from .exceptions import ParseConstraintError -from .patterns import BASIC_CONSTRAINT -from .patterns import CARET_CONSTRAINT -from .patterns import TILDE_CONSTRAINT -from .patterns import TILDE_PEP440_CONSTRAINT -from .patterns import X_CONSTRAINT -from .version import Version -from .version_constraint import VersionConstraint -from .version_range import VersionRange -from .version_union import VersionUnion - - -VersionTypes = Union[Version, VersionRange, VersionUnion, EmptyConstraint] - - -def parse_constraint(constraints): # type: (str) -> VersionTypes - if constraints == "*": - return VersionRange() - - or_constraints = re.split(r"\s*\|\|?\s*", constraints.strip()) - or_groups = [] - for constraints in or_constraints: - and_constraints = re.split( - "(?< ,]) *(? 1: - for constraint in and_constraints: - constraint_objects.append(parse_single_constraint(constraint)) - else: - constraint_objects.append(parse_single_constraint(and_constraints[0])) - - if len(constraint_objects) == 1: - constraint = constraint_objects[0] - else: - constraint = constraint_objects[0] - for next_constraint in constraint_objects[1:]: - constraint = constraint.intersect(next_constraint) - - or_groups.append(constraint) - - if len(or_groups) == 1: - return or_groups[0] - else: - return VersionUnion.of(*or_groups) - - -def parse_single_constraint(constraint): # type: (str) -> VersionTypes - m = re.match(r"(?i)^v?[xX*](\.[xX*])*$", constraint) - if m: - return VersionRange() - - # Tilde range - m = TILDE_CONSTRAINT.match(constraint) - if m: - version = Version.parse(m.group(1)) - - high = version.stable.next_minor - if len(m.group(1).split(".")) == 1: - high = version.stable.next_major - - return VersionRange(version, high, include_min=True) - - # PEP 440 Tilde range (~=) - m = TILDE_PEP440_CONSTRAINT.match(constraint) - if m: - precision = 1 - if m.group(3): - precision += 1 - - if m.group(4): - precision += 1 - - version = Version.parse(m.group(1)) - - if precision == 2: - high = version.stable.next_major - else: - high = version.stable.next_minor - - return VersionRange(version, high, include_min=True) - - # Caret range - m = CARET_CONSTRAINT.match(constraint) - if m: - version = Version.parse(m.group(1)) - - return VersionRange(version, version.next_breaking, include_min=True) - - # X Range - m = X_CONSTRAINT.match(constraint) - if m: - op = m.group(1) - major = int(m.group(2)) - minor = m.group(3) - - if minor is not None: - version = Version(major, int(minor), 0) - - result = VersionRange(version, version.next_minor, include_min=True) - else: - if major == 0: - result = VersionRange(max=Version(1, 0, 0)) - else: - version = Version(major, 0, 0) - - result = VersionRange(version, version.next_major, include_min=True) - - if op == "!=": - result = VersionRange().difference(result) - - return result - - # Basic comparator - m = BASIC_CONSTRAINT.match(constraint) - if m: - op = m.group(1) - version = m.group(2) - - if version == "dev": - version = "0.0-dev" - - try: - version = Version.parse(version) - except ValueError: - raise ValueError( - "Could not parse version constraint: {}".format(constraint) - ) - - if op == "<": - return VersionRange(max=version) - elif op == "<=": - return VersionRange(max=version, include_max=True) - elif op == ">": - return VersionRange(min=version) - elif op == ">=": - return VersionRange(min=version, include_min=True) - elif op == "!=": - return VersionUnion(VersionRange(max=version), VersionRange(min=version)) - else: - return version - - raise ParseConstraintError( - "Could not parse version constraint: {}".format(constraint) - ) diff --git a/conda_lock/_vendor/poetry/core/semver/empty_constraint.py b/conda_lock/_vendor/poetry/core/semver/empty_constraint.py deleted file mode 100644 index c463fa58..00000000 --- a/conda_lock/_vendor/poetry/core/semver/empty_constraint.py +++ /dev/null @@ -1,37 +0,0 @@ -from typing import TYPE_CHECKING - -from .version_constraint import VersionConstraint - - -if TYPE_CHECKING: - from . import VersionTypes # noqa - from .version import Version # noqa - - -class EmptyConstraint(VersionConstraint): - def is_empty(self): # type: () -> bool - return True - - def is_any(self): # type: () -> bool - return False - - def allows(self, version): # type: ("Version") -> bool - return False - - def allows_all(self, other): # type: ("VersionTypes") -> bool - return other.is_empty() - - def allows_any(self, other): # type: ("VersionTypes") -> bool - return False - - def intersect(self, other): # type: ("VersionTypes") -> EmptyConstraint - return self - - def union(self, other): # type: ("VersionTypes") -> "VersionTypes" - return other - - def difference(self, other): # type: ("VersionTypes") -> EmptyConstraint - return self - - def __str__(self): # type: () -> str - return "" diff --git a/conda_lock/_vendor/poetry/core/semver/patterns.py b/conda_lock/_vendor/poetry/core/semver/patterns.py deleted file mode 100644 index 6cda2a30..00000000 --- a/conda_lock/_vendor/poetry/core/semver/patterns.py +++ /dev/null @@ -1,22 +0,0 @@ -import re - - -MODIFIERS = ( - "[._-]?" - r"((?!post)(?:beta|b|c|pre|RC|alpha|a|patch|pl|p|dev)(?:(?:[.-]?\d+)*)?)?" - r"([+-]?([0-9A-Za-z-]+(\.[0-9A-Za-z-]+)*))?" -) - -_COMPLETE_VERSION = r"v?(\d+)(?:\.(\d+))?(?:\.(\d+))?(?:\.(\d+))?{}(?:\+[^\s]+)?".format( - MODIFIERS -) - -COMPLETE_VERSION = re.compile("(?i)" + _COMPLETE_VERSION) - -CARET_CONSTRAINT = re.compile(r"(?i)^\^({})$".format(_COMPLETE_VERSION)) -TILDE_CONSTRAINT = re.compile(r"(?i)^~(?!=)\s*({})$".format(_COMPLETE_VERSION)) -TILDE_PEP440_CONSTRAINT = re.compile(r"(?i)^~=\s*({})$".format(_COMPLETE_VERSION)) -X_CONSTRAINT = re.compile(r"^(!=|==)?\s*v?(\d+)(?:\.(\d+))?(?:\.(\d+))?(?:\.[xX*])+$") -BASIC_CONSTRAINT = re.compile( - r"(?i)^(<>|!=|>=?|<=?|==?)?\s*({}|dev)".format(_COMPLETE_VERSION) -) diff --git a/conda_lock/_vendor/poetry/core/semver/version.py b/conda_lock/_vendor/poetry/core/semver/version.py deleted file mode 100644 index acd5f3e8..00000000 --- a/conda_lock/_vendor/poetry/core/semver/version.py +++ /dev/null @@ -1,476 +0,0 @@ -import re - -from typing import TYPE_CHECKING -from typing import List -from typing import Optional -from typing import Union - -from .empty_constraint import EmptyConstraint -from .exceptions import ParseVersionError -from .patterns import COMPLETE_VERSION -from .version_constraint import VersionConstraint -from .version_range import VersionRange -from .version_union import VersionUnion - - -if TYPE_CHECKING: - from . import VersionTypes # noqa - - -class Version(VersionRange): - """ - A parsed semantic version number. - """ - - def __init__( - self, - major, # type: int - minor=None, # type: Optional[int] - patch=None, # type: Optional[int] - rest=None, # type: Optional[int] - pre=None, # type: Optional[str] - build=None, # type: Optional[str] - text=None, # type: Optional[str] - precision=None, # type: Optional[int] - ): # type: (...) -> None - self._major = int(major) - self._precision = None - if precision is None: - self._precision = 1 - - if minor is None: - minor = 0 - else: - if self._precision is not None: - self._precision += 1 - - self._minor = int(minor) - - if patch is None: - patch = 0 - else: - if self._precision is not None: - self._precision += 1 - - if rest is None: - rest = 0 - else: - if self._precision is not None: - self._precision += 1 - - if precision is not None: - self._precision = precision - - self._patch = int(patch) - self._rest = int(rest) - - if text is None: - parts = [str(major)] - if self._precision >= 2 or minor != 0: - parts.append(str(minor)) - - if self._precision >= 3 or patch != 0: - parts.append(str(patch)) - - if self._precision >= 4 or rest != 0: - parts.append(str(rest)) - - text = ".".join(parts) - if pre: - text += "-{}".format(pre) - - if build: - text += "+{}".format(build) - - self._text = text - - pre = self._normalize_prerelease(pre) - - self._prerelease = [] - if pre is not None: - self._prerelease = self._split_parts(pre) - - build = self._normalize_build(build) - - self._build = [] - if build is not None: - if build.startswith(("-", "+")): - build = build[1:] - - self._build = self._split_parts(build) - - @property - def major(self): # type: () -> int - return self._major - - @property - def minor(self): # type: () -> int - return self._minor - - @property - def patch(self): # type: () -> int - return self._patch - - @property - def rest(self): # type: () -> int - return self._rest - - @property - def prerelease(self): # type: () -> List[str] - return self._prerelease - - @property - def build(self): # type: () -> List[str] - return self._build - - @property - def text(self): # type: () -> str - return self._text - - @property - def precision(self): # type: () -> int - return self._precision - - @property - def stable(self): # type: () -> Version - if not self.is_prerelease(): - return self - - return self.next_patch - - @property - def next_major(self): # type: () -> Version - if self.is_prerelease() and self.minor == 0 and self.patch == 0: - return Version(self.major, self.minor, self.patch) - - return self._increment_major() - - @property - def next_minor(self): # type: () -> Version - if self.is_prerelease() and self.patch == 0: - return Version(self.major, self.minor, self.patch) - - return self._increment_minor() - - @property - def next_patch(self): # type: () -> Version - if self.is_prerelease(): - return Version(self.major, self.minor, self.patch) - - return self._increment_patch() - - @property - def next_breaking(self): # type: () -> Version - if self.major == 0: - if self.minor != 0: - return self._increment_minor() - - if self._precision == 1: - return self._increment_major() - elif self._precision == 2: - return self._increment_minor() - - return self._increment_patch() - - return self._increment_major() - - @property - def first_prerelease(self): # type: () -> Version - return Version.parse( - "{}.{}.{}-alpha.0".format(self.major, self.minor, self.patch) - ) - - @property - def min(self): # type: () -> Version - return self - - @property - def max(self): # type: () -> Version - return self - - @property - def full_max(self): # type: () -> Version - return self - - @property - def include_min(self): # type: () -> bool - return True - - @property - def include_max(self): # type: () -> bool - return True - - @classmethod - def parse(cls, text): # type: (str) -> Version - try: - match = COMPLETE_VERSION.match(text) - except TypeError: - match = None - - if match is None: - raise ParseVersionError('Unable to parse "{}".'.format(text)) - - text = text.rstrip(".") - - major = int(match.group(1)) - minor = int(match.group(2)) if match.group(2) else None - patch = int(match.group(3)) if match.group(3) else None - rest = int(match.group(4)) if match.group(4) else None - - pre = match.group(5) - build = match.group(6) - - if build: - build = build.lstrip("+") - - return Version(major, minor, patch, rest, pre, build, text) - - def is_any(self): # type: () -> bool - return False - - def is_empty(self): # type: () -> bool - return False - - def is_prerelease(self): # type: () -> bool - return len(self._prerelease) > 0 - - def allows(self, version): # type: (Version) -> bool - return self == version - - def allows_all(self, other): # type: ("VersionTypes") -> bool - return other.is_empty() or other == self - - def allows_any(self, other): # type: ("VersionTypes") -> bool - return other.allows(self) - - def intersect( - self, other - ): # type: ("VersionTypes") -> Union[Version, EmptyConstraint] - if other.allows(self): - return self - - return EmptyConstraint() - - def union(self, other): # type: ("VersionTypes") -> "VersionTypes" - from .version_range import VersionRange - - if other.allows(self): - return other - - if isinstance(other, VersionRange): - if other.min == self: - return VersionRange( - other.min, - other.max, - include_min=True, - include_max=other.include_max, - ) - - if other.max == self: - return VersionRange( - other.min, - other.max, - include_min=other.include_min, - include_max=True, - ) - - return VersionUnion.of(self, other) - - def difference( - self, other - ): # type: ("VersionTypes") -> Union[Version, EmptyConstraint] - if other.allows(self): - return EmptyConstraint() - - return self - - def equals_without_prerelease(self, other): # type: (Version) -> bool - return ( - self.major == other.major - and self.minor == other.minor - and self.patch == other.patch - ) - - def _increment_major(self): # type: () -> Version - return Version(self.major + 1, 0, 0, precision=self._precision) - - def _increment_minor(self): # type: () -> Version - return Version(self.major, self.minor + 1, 0, precision=self._precision) - - def _increment_patch(self): # type: () -> Version - return Version( - self.major, self.minor, self.patch + 1, precision=self._precision - ) - - def _normalize_prerelease(self, pre): # type: (str) -> Optional[str] - if not pre: - return - - m = re.match(r"(?i)^(a|alpha|b|beta|c|pre|rc|dev)[-.]?(\d+)?$", pre) - if not m: - return - - modifier = m.group(1) - number = m.group(2) - - if number is None: - number = 0 - - if modifier == "a": - modifier = "alpha" - elif modifier == "b": - modifier = "beta" - elif modifier in {"c", "pre"}: - modifier = "rc" - elif modifier == "dev": - modifier = "alpha" - - return "{}.{}".format(modifier, number) - - def _normalize_build(self, build): # type: (str) -> Optional[str] - if not build: - return - - if build.startswith("post"): - build = build.lstrip("post") - - if not build: - return - - return build - - def _split_parts(self, text): # type: (str) -> List[Union[str, int]] - parts = text.split(".") - - for i, part in enumerate(parts): - try: - parts[i] = int(part) - except (TypeError, ValueError): - continue - - return parts - - def __lt__(self, other): # type: (Version) -> int - return self._cmp(other) < 0 - - def __le__(self, other): # type: (Version) -> int - return self._cmp(other) <= 0 - - def __gt__(self, other): # type: (Version) -> int - return self._cmp(other) > 0 - - def __ge__(self, other): # type: (Version) -> int - return self._cmp(other) >= 0 - - def _cmp(self, other): # type: (Version) -> int - if not isinstance(other, VersionConstraint): - return NotImplemented - - if not isinstance(other, Version): - return -other._cmp(self) - - if self.major != other.major: - return self._cmp_parts(self.major, other.major) - - if self.minor != other.minor: - return self._cmp_parts(self.minor, other.minor) - - if self.patch != other.patch: - return self._cmp_parts(self.patch, other.patch) - - if self.rest != other.rest: - return self._cmp_parts(self.rest, other.rest) - - # Pre-releases always come before no pre-release string. - if not self.is_prerelease() and other.is_prerelease(): - return 1 - - if not other.is_prerelease() and self.is_prerelease(): - return -1 - - comparison = self._cmp_lists(self.prerelease, other.prerelease) - if comparison != 0: - return comparison - - # Builds always come after no build string. - if not self.build and other.build: - return -1 - - if not other.build and self.build: - return 1 - - return self._cmp_lists(self.build, other.build) - - def _cmp_parts(self, a, b): # type: (Optional[int], Optional[int]) -> int - if a < b: - return -1 - elif a > b: - return 1 - - return 0 - - def _cmp_lists(self, a, b): # type: (List, List) -> int - for i in range(max(len(a), len(b))): - a_part = None - if i < len(a): - a_part = a[i] - - b_part = None - if i < len(b): - b_part = b[i] - - if a_part == b_part: - continue - - # Missing parts come after present ones. - if a_part is None: - return -1 - - if b_part is None: - return 1 - - if isinstance(a_part, int): - if isinstance(b_part, int): - return self._cmp_parts(a_part, b_part) - - return -1 - else: - if isinstance(b_part, int): - return 1 - - return self._cmp_parts(a_part, b_part) - - return 0 - - def __eq__(self, other): # type: (Version) -> bool - if not isinstance(other, Version): - return NotImplemented - - return ( - self._major == other.major - and self._minor == other.minor - and self._patch == other.patch - and self._rest == other.rest - and self._prerelease == other.prerelease - and self._build == other.build - ) - - def __ne__(self, other): # type: ("VersionTypes") -> bool - return not self == other - - def __str__(self): # type: () -> str - return self._text - - def __repr__(self): # type: () -> str - return "".format(str(self)) - - def __hash__(self): # type: () -> int - return hash( - ( - self.major, - self.minor, - self.patch, - ".".join(str(p) for p in self.prerelease), - ".".join(str(p) for p in self.build), - ) - ) diff --git a/conda_lock/_vendor/poetry/core/semver/version_constraint.py b/conda_lock/_vendor/poetry/core/semver/version_constraint.py deleted file mode 100644 index 4e5f73e4..00000000 --- a/conda_lock/_vendor/poetry/core/semver/version_constraint.py +++ /dev/null @@ -1,31 +0,0 @@ -from typing import TYPE_CHECKING - - -if TYPE_CHECKING: - from conda_lock._vendor.poetry.core.semver import Version # noqa - - -class VersionConstraint: - def is_empty(self): # type: () -> bool - raise NotImplementedError() - - def is_any(self): # type: () -> bool - raise NotImplementedError() - - def allows(self, version): # type: ("Version") -> bool - raise NotImplementedError() - - def allows_all(self, other): # type: (VersionConstraint) -> bool - raise NotImplementedError() - - def allows_any(self, other): # type: (VersionConstraint) -> bool - raise NotImplementedError() - - def intersect(self, other): # type: (VersionConstraint) -> VersionConstraint - raise NotImplementedError() - - def union(self, other): # type: (VersionConstraint) -> VersionConstraint - raise NotImplementedError() - - def difference(self, other): # type: (VersionConstraint) -> VersionConstraint - raise NotImplementedError() diff --git a/conda_lock/_vendor/poetry/core/semver/version_union.py b/conda_lock/_vendor/poetry/core/semver/version_union.py deleted file mode 100644 index 50a597db..00000000 --- a/conda_lock/_vendor/poetry/core/semver/version_union.py +++ /dev/null @@ -1,268 +0,0 @@ -from typing import TYPE_CHECKING -from typing import Any -from typing import List - -from .empty_constraint import EmptyConstraint -from .version_constraint import VersionConstraint - - -if TYPE_CHECKING: - from . import VersionTypes # noqa - from .version import Version - from .version_range import VersionRange - - -class VersionUnion(VersionConstraint): - """ - A version constraint representing a union of multiple disjoint version - ranges. - - An instance of this will only be created if the version can't be represented - as a non-compound value. - """ - - def __init__(self, *ranges): # type: (*"VersionRange") -> None - self._ranges = list(ranges) - - @property - def ranges(self): # type: () -> List["VersionRange"] - return self._ranges - - @classmethod - def of(cls, *ranges): # type: (*"VersionTypes") -> "VersionTypes" - from .version_range import VersionRange - - flattened = [] - for constraint in ranges: - if constraint.is_empty(): - continue - - if isinstance(constraint, VersionUnion): - flattened += constraint.ranges - continue - - flattened.append(constraint) - - if not flattened: - return EmptyConstraint() - - if any([constraint.is_any() for constraint in flattened]): - return VersionRange() - - # Only allow Versions and VersionRanges here so we can more easily reason - # about everything in flattened. _EmptyVersions and VersionUnions are - # filtered out above. - for constraint in flattened: - if isinstance(constraint, VersionRange): - continue - - raise ValueError("Unknown VersionConstraint type {}.".format(constraint)) - - flattened.sort() - - merged = [] - for constraint in flattened: - # Merge this constraint with the previous one, but only if they touch. - if not merged or ( - not merged[-1].allows_any(constraint) - and not merged[-1].is_adjacent_to(constraint) - ): - merged.append(constraint) - else: - merged[-1] = merged[-1].union(constraint) - - if len(merged) == 1: - return merged[0] - - return VersionUnion(*merged) - - def is_empty(self): # type: () -> bool - return False - - def is_any(self): # type: () -> bool - return False - - def allows(self, version): # type: ("Version") -> bool - return any([constraint.allows(version) for constraint in self._ranges]) - - def allows_all(self, other): # type: ("VersionTypes") -> bool - our_ranges = iter(self._ranges) - their_ranges = iter(self._ranges_for(other)) - - our_current_range = next(our_ranges, None) - their_current_range = next(their_ranges, None) - - while our_current_range and their_current_range: - if our_current_range.allows_all(their_current_range): - their_current_range = next(their_ranges, None) - else: - our_current_range = next(our_ranges, None) - - return their_current_range is None - - def allows_any(self, other): # type: ("VersionTypes") -> bool - our_ranges = iter(self._ranges) - their_ranges = iter(self._ranges_for(other)) - - our_current_range = next(our_ranges, None) - their_current_range = next(their_ranges, None) - - while our_current_range and their_current_range: - if our_current_range.allows_any(their_current_range): - return True - - if their_current_range.allows_higher(our_current_range): - our_current_range = next(our_ranges, None) - else: - their_current_range = next(their_ranges, None) - - return False - - def intersect(self, other): # type: ("VersionTypes") -> "VersionTypes" - our_ranges = iter(self._ranges) - their_ranges = iter(self._ranges_for(other)) - new_ranges = [] - - our_current_range = next(our_ranges, None) - their_current_range = next(their_ranges, None) - - while our_current_range and their_current_range: - intersection = our_current_range.intersect(their_current_range) - - if not intersection.is_empty(): - new_ranges.append(intersection) - - if their_current_range.allows_higher(our_current_range): - our_current_range = next(our_ranges, None) - else: - their_current_range = next(their_ranges, None) - - return VersionUnion.of(*new_ranges) - - def union(self, other): # type: ("VersionTypes") -> "VersionTypes" - return VersionUnion.of(self, other) - - def difference(self, other): # type: ("VersionTypes") -> "VersionTypes" - our_ranges = iter(self._ranges) - their_ranges = iter(self._ranges_for(other)) - new_ranges = [] - - state = { - "current": next(our_ranges, None), - "their_range": next(their_ranges, None), - } - - def their_next_range(): # type: () -> bool - state["their_range"] = next(their_ranges, None) - if state["their_range"]: - return True - - new_ranges.append(state["current"]) - our_current = next(our_ranges, None) - while our_current: - new_ranges.append(our_current) - our_current = next(our_ranges, None) - - return False - - def our_next_range(include_current=True): # type: (bool) -> bool - if include_current: - new_ranges.append(state["current"]) - - our_current = next(our_ranges, None) - if not our_current: - return False - - state["current"] = our_current - - return True - - while True: - if state["their_range"] is None: - break - - if state["their_range"].is_strictly_lower(state["current"]): - if not their_next_range(): - break - - continue - - if state["their_range"].is_strictly_higher(state["current"]): - if not our_next_range(): - break - - continue - - difference = state["current"].difference(state["their_range"]) - if isinstance(difference, VersionUnion): - assert len(difference.ranges) == 2 - new_ranges.append(difference.ranges[0]) - state["current"] = difference.ranges[-1] - - if not their_next_range(): - break - elif difference.is_empty(): - if not our_next_range(False): - break - else: - state["current"] = difference - - if state["current"].allows_higher(state["their_range"]): - if not their_next_range(): - break - else: - if not our_next_range(): - break - - if not new_ranges: - return EmptyConstraint() - - if len(new_ranges) == 1: - return new_ranges[0] - - return VersionUnion.of(*new_ranges) - - def _ranges_for(self, constraint): # type: ("VersionTypes") -> List["VersionRange"] - from .version_range import VersionRange - - if constraint.is_empty(): - return [] - - if isinstance(constraint, VersionUnion): - return constraint.ranges - - if isinstance(constraint, VersionRange): - return [constraint] - - raise ValueError("Unknown VersionConstraint type {}".format(constraint)) - - def excludes_single_version(self): # type: () -> bool - from .version import Version - from .version_range import VersionRange - - return isinstance(VersionRange().difference(self), Version) - - def __eq__(self, other): # type: (Any) -> bool - if not isinstance(other, VersionUnion): - return False - - return self._ranges == other.ranges - - def __hash__(self): # type: () -> int - h = hash(self._ranges[0]) - - for range in self._ranges[1:]: - h ^= hash(range) - - return h - - def __str__(self): # type: () -> str - from .version_range import VersionRange - - if self.excludes_single_version(): - return "!={}".format(VersionRange().difference(self)) - - return " || ".join([str(r) for r in self._ranges]) - - def __repr__(self): # type: () -> str - return "".format(str(self)) diff --git a/conda_lock/_vendor/poetry/core/spdx/__init__.py b/conda_lock/_vendor/poetry/core/spdx/__init__.py index 713aa30d..e69de29b 100644 --- a/conda_lock/_vendor/poetry/core/spdx/__init__.py +++ b/conda_lock/_vendor/poetry/core/spdx/__init__.py @@ -1,57 +0,0 @@ -import json -import os - -from io import open -from typing import Dict -from typing import Optional - -from .license import License -from .updater import Updater - - -_licenses = None # type: Optional[Dict[str, License]] - - -def license_by_id(identifier): # type: (str) -> License - if _licenses is None: - load_licenses() - - id = identifier.lower() - - if id not in _licenses: - if not identifier: - raise ValueError("A license identifier is required") - return License(identifier, identifier, False, False) - - return _licenses[id] - - -def load_licenses(): # type: () -> None - global _licenses - - _licenses = {} - - licenses_file = os.path.join(os.path.dirname(__file__), "data", "licenses.json") - - with open(licenses_file, encoding="utf-8") as f: - data = json.loads(f.read()) - - for name, license_info in data.items(): - license = License(name, license_info[0], license_info[1], license_info[2]) - _licenses[name.lower()] = license - - full_name = license_info[0].lower() - if full_name in _licenses: - existing_license = _licenses[full_name] - if not existing_license.is_deprecated: - continue - - _licenses[full_name] = license - - # Add a Proprietary license for non-standard licenses - _licenses["proprietary"] = License("Proprietary", "Proprietary", False, False) - - -if __name__ == "__main__": - updater = Updater() - updater.dump() diff --git a/conda_lock/_vendor/poetry/core/spdx/data/licenses.json b/conda_lock/_vendor/poetry/core/spdx/data/licenses.json index b598305b..bc7885bc 100644 --- a/conda_lock/_vendor/poetry/core/spdx/data/licenses.json +++ b/conda_lock/_vendor/poetry/core/spdx/data/licenses.json @@ -1,7 +1,7 @@ { "0BSD": [ "BSD Zero Clause License", - false, + true, false ], "AAL": [ @@ -42,6 +42,16 @@ "AGPL-1.0": [ "Affero General Public License v1.0", false, + true + ], + "AGPL-1.0-only": [ + "Affero General Public License v1.0 only", + false, + false + ], + "AGPL-1.0-or-later": [ + "Affero General Public License v1.0 or later", + false, false ], "AGPL-3.0": [ @@ -69,6 +79,11 @@ false, false ], + "AML-glslang": [ + "AML glslang variant License", + false, + false + ], "AMPAS": [ "Academy of Motion Picture Arts and Sciences BSD", false, @@ -79,6 +94,11 @@ false, false ], + "ANTLR-PD-fallback": [ + "ANTLR Software Rights Notice with license fallback", + false, + false + ], "APAFML": [ "Adobe Postscript AFM License", false, @@ -109,21 +129,46 @@ true, false ], + "ASWF-Digital-Assets-1.0": [ + "ASWF Digital Assets License version 1.0", + false, + false + ], + "ASWF-Digital-Assets-1.1": [ + "ASWF Digital Assets License 1.1", + false, + false + ], "Abstyles": [ "Abstyles License", false, false ], + "AdaCore-doc": [ + "AdaCore Doc License", + false, + false + ], "Adobe-2006": [ "Adobe Systems Incorporated Source Code License Agreement", false, false ], + "Adobe-Display-PostScript": [ + "Adobe Display PostScript License", + false, + false + ], "Adobe-Glyph": [ "Adobe Glyph List License", false, false ], + "Adobe-Utopia": [ + "Adobe Utopia Font License", + false, + false + ], "Afmparse": [ "Afmparse License", false, @@ -149,6 +194,16 @@ true, false ], + "App-s2p": [ + "App::s2p License", + false, + false + ], + "Arphic-1999": [ + "Arphic Public License", + false, + false + ], "Artistic-1.0": [ "Artistic License 1.0", true, @@ -171,7 +226,7 @@ ], "BSD-1-Clause": [ "BSD 1-Clause License", - false, + true, false ], "BSD-2-Clause": [ @@ -182,18 +237,23 @@ "BSD-2-Clause-FreeBSD": [ "BSD 2-Clause FreeBSD License", false, - false + true ], "BSD-2-Clause-NetBSD": [ "BSD 2-Clause NetBSD License", false, - false + true ], "BSD-2-Clause-Patent": [ "BSD-2-Clause Plus Patent License", true, false ], + "BSD-2-Clause-Views": [ + "BSD 2-Clause with views sentence", + false, + false + ], "BSD-3-Clause": [ "BSD 3-Clause \"New\" or \"Revised\" License", true, @@ -209,8 +269,23 @@ false, false ], + "BSD-3-Clause-HP": [ + "Hewlett-Packard BSD variant license", + false, + false + ], "BSD-3-Clause-LBNL": [ "Lawrence Berkeley National Labs BSD variant license", + true, + false + ], + "BSD-3-Clause-Modification": [ + "BSD 3-Clause Modification", + false, + false + ], + "BSD-3-Clause-No-Military-License": [ + "BSD 3-Clause No Military License", false, false ], @@ -229,16 +304,66 @@ false, false ], + "BSD-3-Clause-Open-MPI": [ + "BSD 3-Clause Open MPI variant", + false, + false + ], + "BSD-3-Clause-Sun": [ + "BSD 3-Clause Sun Microsystems", + false, + false + ], + "BSD-3-Clause-acpica": [ + "BSD 3-Clause acpica variant", + false, + false + ], + "BSD-3-Clause-flex": [ + "BSD 3-Clause Flex variant", + false, + false + ], "BSD-4-Clause": [ "BSD 4-Clause \"Original\" or \"Old\" License", false, false ], + "BSD-4-Clause-Shortened": [ + "BSD 4 Clause Shortened", + false, + false + ], "BSD-4-Clause-UC": [ "BSD-4-Clause (University of California-Specific)", false, false ], + "BSD-4.3RENO": [ + "BSD 4.3 RENO License", + false, + false + ], + "BSD-4.3TAHOE": [ + "BSD 4.3 TAHOE License", + false, + false + ], + "BSD-Advertising-Acknowledgement": [ + "BSD Advertising Acknowledgement License", + false, + false + ], + "BSD-Attribution-HPND-disclaimer": [ + "BSD with Attribution and HPND disclaimer", + false, + false + ], + "BSD-Inferno-Nettverk": [ + "BSD-Inferno-Nettverk", + false, + false + ], "BSD-Protection": [ "BSD Protection License", false, @@ -249,11 +374,36 @@ false, false ], + "BSD-Source-beginning-file": [ + "BSD Source Code Attribution - beginning of file variant", + false, + false + ], + "BSD-Systemics": [ + "Systemics BSD variant license", + false, + false + ], + "BSD-Systemics-W3Works": [ + "Systemics W3Works BSD variant license", + false, + false + ], "BSL-1.0": [ "Boost Software License 1.0", true, false ], + "BUSL-1.1": [ + "Business Source License 1.1", + false, + false + ], + "Baekmuk": [ + "Baekmuk License", + false, + false + ], "Bahyph": [ "Bahyph License", false, @@ -279,163 +429,313 @@ false, false ], + "Bitstream-Charter": [ + "Bitstream Charter Font License", + false, + false + ], + "Bitstream-Vera": [ + "Bitstream Vera Font License", + false, + false + ], + "BlueOak-1.0.0": [ + "Blue Oak Model License 1.0.0", + true, + false + ], + "Boehm-GC": [ + "Boehm-Demers-Weiser GC License", + false, + false + ], "Borceux": [ "Borceux license", false, false ], + "Brian-Gladman-3-Clause": [ + "Brian Gladman 3-Clause License", + false, + false + ], + "C-UDA-1.0": [ + "Computational Use of Data Agreement v1.0", + false, + false + ], + "CAL-1.0": [ + "Cryptographic Autonomy License 1.0", + true, + false + ], + "CAL-1.0-Combined-Work-Exception": [ + "Cryptographic Autonomy License 1.0 (Combined Work Exception)", + true, + false + ], "CATOSL-1.1": [ "Computer Associates Trusted Open Source License 1.1", true, false ], "CC-BY-1.0": [ - "Creative Commons Attribution 1.0", + "Creative Commons Attribution 1.0 Generic", false, false ], "CC-BY-2.0": [ - "Creative Commons Attribution 2.0", + "Creative Commons Attribution 2.0 Generic", false, false ], "CC-BY-2.5": [ - "Creative Commons Attribution 2.5", + "Creative Commons Attribution 2.5 Generic", + false, + false + ], + "CC-BY-2.5-AU": [ + "Creative Commons Attribution 2.5 Australia", false, false ], "CC-BY-3.0": [ - "Creative Commons Attribution 3.0", + "Creative Commons Attribution 3.0 Unported", + false, + false + ], + "CC-BY-3.0-AT": [ + "Creative Commons Attribution 3.0 Austria", + false, + false + ], + "CC-BY-3.0-AU": [ + "Creative Commons Attribution 3.0 Australia", + false, + false + ], + "CC-BY-3.0-DE": [ + "Creative Commons Attribution 3.0 Germany", + false, + false + ], + "CC-BY-3.0-IGO": [ + "Creative Commons Attribution 3.0 IGO", + false, + false + ], + "CC-BY-3.0-NL": [ + "Creative Commons Attribution 3.0 Netherlands", + false, + false + ], + "CC-BY-3.0-US": [ + "Creative Commons Attribution 3.0 United States", false, false ], "CC-BY-4.0": [ - "Creative Commons Attribution 4.0", + "Creative Commons Attribution 4.0 International", false, false ], "CC-BY-NC-1.0": [ - "Creative Commons Attribution Non Commercial 1.0", + "Creative Commons Attribution Non Commercial 1.0 Generic", false, false ], "CC-BY-NC-2.0": [ - "Creative Commons Attribution Non Commercial 2.0", + "Creative Commons Attribution Non Commercial 2.0 Generic", false, false ], "CC-BY-NC-2.5": [ - "Creative Commons Attribution Non Commercial 2.5", + "Creative Commons Attribution Non Commercial 2.5 Generic", false, false ], "CC-BY-NC-3.0": [ - "Creative Commons Attribution Non Commercial 3.0", + "Creative Commons Attribution Non Commercial 3.0 Unported", + false, + false + ], + "CC-BY-NC-3.0-DE": [ + "Creative Commons Attribution Non Commercial 3.0 Germany", false, false ], "CC-BY-NC-4.0": [ - "Creative Commons Attribution Non Commercial 4.0", + "Creative Commons Attribution Non Commercial 4.0 International", false, false ], "CC-BY-NC-ND-1.0": [ - "Creative Commons Attribution Non Commercial No Derivatives 1.0", + "Creative Commons Attribution Non Commercial No Derivatives 1.0 Generic", false, false ], "CC-BY-NC-ND-2.0": [ - "Creative Commons Attribution Non Commercial No Derivatives 2.0", + "Creative Commons Attribution Non Commercial No Derivatives 2.0 Generic", false, false ], "CC-BY-NC-ND-2.5": [ - "Creative Commons Attribution Non Commercial No Derivatives 2.5", + "Creative Commons Attribution Non Commercial No Derivatives 2.5 Generic", false, false ], "CC-BY-NC-ND-3.0": [ - "Creative Commons Attribution Non Commercial No Derivatives 3.0", + "Creative Commons Attribution Non Commercial No Derivatives 3.0 Unported", + false, + false + ], + "CC-BY-NC-ND-3.0-DE": [ + "Creative Commons Attribution Non Commercial No Derivatives 3.0 Germany", + false, + false + ], + "CC-BY-NC-ND-3.0-IGO": [ + "Creative Commons Attribution Non Commercial No Derivatives 3.0 IGO", false, false ], "CC-BY-NC-ND-4.0": [ - "Creative Commons Attribution Non Commercial No Derivatives 4.0", + "Creative Commons Attribution Non Commercial No Derivatives 4.0 International", false, false ], "CC-BY-NC-SA-1.0": [ - "Creative Commons Attribution Non Commercial Share Alike 1.0", + "Creative Commons Attribution Non Commercial Share Alike 1.0 Generic", false, false ], "CC-BY-NC-SA-2.0": [ - "Creative Commons Attribution Non Commercial Share Alike 2.0", + "Creative Commons Attribution Non Commercial Share Alike 2.0 Generic", + false, + false + ], + "CC-BY-NC-SA-2.0-DE": [ + "Creative Commons Attribution Non Commercial Share Alike 2.0 Germany", + false, + false + ], + "CC-BY-NC-SA-2.0-FR": [ + "Creative Commons Attribution-NonCommercial-ShareAlike 2.0 France", + false, + false + ], + "CC-BY-NC-SA-2.0-UK": [ + "Creative Commons Attribution Non Commercial Share Alike 2.0 England and Wales", false, false ], "CC-BY-NC-SA-2.5": [ - "Creative Commons Attribution Non Commercial Share Alike 2.5", + "Creative Commons Attribution Non Commercial Share Alike 2.5 Generic", false, false ], "CC-BY-NC-SA-3.0": [ - "Creative Commons Attribution Non Commercial Share Alike 3.0", + "Creative Commons Attribution Non Commercial Share Alike 3.0 Unported", + false, + false + ], + "CC-BY-NC-SA-3.0-DE": [ + "Creative Commons Attribution Non Commercial Share Alike 3.0 Germany", + false, + false + ], + "CC-BY-NC-SA-3.0-IGO": [ + "Creative Commons Attribution Non Commercial Share Alike 3.0 IGO", false, false ], "CC-BY-NC-SA-4.0": [ - "Creative Commons Attribution Non Commercial Share Alike 4.0", + "Creative Commons Attribution Non Commercial Share Alike 4.0 International", false, false ], "CC-BY-ND-1.0": [ - "Creative Commons Attribution No Derivatives 1.0", + "Creative Commons Attribution No Derivatives 1.0 Generic", false, false ], "CC-BY-ND-2.0": [ - "Creative Commons Attribution No Derivatives 2.0", + "Creative Commons Attribution No Derivatives 2.0 Generic", false, false ], "CC-BY-ND-2.5": [ - "Creative Commons Attribution No Derivatives 2.5", + "Creative Commons Attribution No Derivatives 2.5 Generic", false, false ], "CC-BY-ND-3.0": [ - "Creative Commons Attribution No Derivatives 3.0", + "Creative Commons Attribution No Derivatives 3.0 Unported", + false, + false + ], + "CC-BY-ND-3.0-DE": [ + "Creative Commons Attribution No Derivatives 3.0 Germany", false, false ], "CC-BY-ND-4.0": [ - "Creative Commons Attribution No Derivatives 4.0", + "Creative Commons Attribution No Derivatives 4.0 International", false, false ], "CC-BY-SA-1.0": [ - "Creative Commons Attribution Share Alike 1.0", + "Creative Commons Attribution Share Alike 1.0 Generic", false, false ], "CC-BY-SA-2.0": [ - "Creative Commons Attribution Share Alike 2.0", + "Creative Commons Attribution Share Alike 2.0 Generic", + false, + false + ], + "CC-BY-SA-2.0-UK": [ + "Creative Commons Attribution Share Alike 2.0 England and Wales", + false, + false + ], + "CC-BY-SA-2.1-JP": [ + "Creative Commons Attribution Share Alike 2.1 Japan", false, false ], "CC-BY-SA-2.5": [ - "Creative Commons Attribution Share Alike 2.5", + "Creative Commons Attribution Share Alike 2.5 Generic", false, false ], "CC-BY-SA-3.0": [ - "Creative Commons Attribution Share Alike 3.0", + "Creative Commons Attribution Share Alike 3.0 Unported", + false, + false + ], + "CC-BY-SA-3.0-AT": [ + "Creative Commons Attribution Share Alike 3.0 Austria", + false, + false + ], + "CC-BY-SA-3.0-DE": [ + "Creative Commons Attribution Share Alike 3.0 Germany", + false, + false + ], + "CC-BY-SA-3.0-IGO": [ + "Creative Commons Attribution-ShareAlike 3.0 IGO", false, false ], "CC-BY-SA-4.0": [ - "Creative Commons Attribution Share Alike 4.0", + "Creative Commons Attribution Share Alike 4.0 International", + false, + false + ], + "CC-PDDC": [ + "Creative Commons Public Domain Dedication and Certification", false, false ], @@ -454,11 +754,21 @@ false, false ], + "CDL-1.0": [ + "Common Documentation License 1.0", + false, + false + ], "CDLA-Permissive-1.0": [ "Community Data License Agreement Permissive 1.0", false, false ], + "CDLA-Permissive-2.0": [ + "Community Data License Agreement Permissive 2.0", + false, + false + ], "CDLA-Sharing-1.0": [ "Community Data License Agreement Sharing 1.0", false, @@ -494,6 +804,41 @@ false, false ], + "CERN-OHL-1.1": [ + "CERN Open Hardware Licence v1.1", + false, + false + ], + "CERN-OHL-1.2": [ + "CERN Open Hardware Licence v1.2", + false, + false + ], + "CERN-OHL-P-2.0": [ + "CERN Open Hardware Licence Version 2 - Permissive", + true, + false + ], + "CERN-OHL-S-2.0": [ + "CERN Open Hardware Licence Version 2 - Strongly Reciprocal", + true, + false + ], + "CERN-OHL-W-2.0": [ + "CERN Open Hardware Licence Version 2 - Weakly Reciprocal", + true, + false + ], + "CFITSIO": [ + "CFITSIO License", + false, + false + ], + "CMU-Mach": [ + "CMU Mach License", + false, + false + ], "CNRI-Jython": [ "CNRI Jython License", false, @@ -509,6 +854,11 @@ false, false ], + "COIL-1.0": [ + "Copyfree Open Innovation License", + false, + false + ], "CPAL-1.0": [ "Common Public Attribution License 1.0", true, @@ -534,16 +884,41 @@ false, false ], + "Caldera-no-preamble": [ + "Caldera License (without preamble)", + false, + false + ], "ClArtistic": [ "Clarified Artistic License", false, false ], + "Clips": [ + "Clips License", + false, + false + ], + "Community-Spec-1.0": [ + "Community Specification License 1.0", + false, + false + ], "Condor-1.1": [ "Condor Public License v1.1", false, false ], + "Cornell-Lossless-JPEG": [ + "Cornell Lossless JPEG License", + false, + false + ], + "Cronyx": [ + "Cronyx License", + false, + false + ], "Crossword": [ "Crossword License", false, @@ -564,11 +939,36 @@ false, false ], + "DEC-3-Clause": [ + "DEC 3-Clause License", + false, + false + ], + "DL-DE-BY-2.0": [ + "Data licence Germany – attribution – version 2.0", + false, + false + ], + "DL-DE-ZERO-2.0": [ + "Data licence Germany – zero – version 2.0", + false, + false + ], "DOC": [ "DOC License", false, false ], + "DRL-1.0": [ + "Detection Rule License 1.0", + false, + false + ], + "DRL-1.1": [ + "Detection Rule License 1.1", + false, + false + ], "DSDP": [ "DSDP License", false, @@ -599,6 +999,11 @@ true, false ], + "EPICS": [ + "EPICS Open License", + false, + false + ], "EPL-1.0": [ "Eclipse Public License 1.0", true, @@ -629,6 +1034,11 @@ true, false ], + "Elastic-2.0": [ + "Elastic License 2.0", + false, + false + ], "Entessa": [ "Entessa Public License v1.0", true, @@ -644,8 +1054,23 @@ false, false ], - "FSFAP": [ - "FSF All Permissive License", + "FBM": [ + "Fuzzy Bitmap License", + false, + false + ], + "FDK-AAC": [ + "Fraunhofer FDK AAC Codec Library", + false, + false + ], + "FSFAP": [ + "FSF All Permissive License", + false, + false + ], + "FSFAP-no-warranty-disclaimer": [ + "FSF All Permissive License (without Warranty)", false, false ], @@ -659,6 +1084,11 @@ false, false ], + "FSFULLRWD": [ + "FSF Unlimited License (With License Retention and Warranty Disclaimer)", + false, + false + ], "FTL": [ "Freetype Project License", false, @@ -669,21 +1099,66 @@ true, false ], + "Ferguson-Twofish": [ + "Ferguson Twofish License", + false, + false + ], "Frameworx-1.0": [ "Frameworx Open License 1.0", true, false ], + "FreeBSD-DOC": [ + "FreeBSD Documentation License", + false, + false + ], "FreeImage": [ "FreeImage Public License v1.0", false, false ], + "Furuseth": [ + "Furuseth License", + false, + false + ], + "GCR-docs": [ + "Gnome GCR Documentation License", + false, + false + ], + "GD": [ + "GD License", + false, + false + ], "GFDL-1.1": [ "GNU Free Documentation License v1.1", false, true ], + "GFDL-1.1-invariants-only": [ + "GNU Free Documentation License v1.1 only - invariants", + false, + false + ], + "GFDL-1.1-invariants-or-later": [ + "GNU Free Documentation License v1.1 or later - invariants", + false, + false + ], + "GFDL-1.1-no-invariants-only": [ + "GNU Free Documentation License v1.1 only - no invariants", + false, + false + ], + "GFDL-1.1-no-invariants-or-later": [ + "GNU Free Documentation License v1.1 or later - no invariants", + false, + false + ], "GFDL-1.1-only": [ "GNU Free Documentation License v1.1 only", false, @@ -699,6 +1174,26 @@ false, true ], + "GFDL-1.2-invariants-only": [ + "GNU Free Documentation License v1.2 only - invariants", + false, + false + ], + "GFDL-1.2-invariants-or-later": [ + "GNU Free Documentation License v1.2 or later - invariants", + false, + false + ], + "GFDL-1.2-no-invariants-only": [ + "GNU Free Documentation License v1.2 only - no invariants", + false, + false + ], + "GFDL-1.2-no-invariants-or-later": [ + "GNU Free Documentation License v1.2 or later - no invariants", + false, + false + ], "GFDL-1.2-only": [ "GNU Free Documentation License v1.2 only", false, @@ -714,6 +1209,26 @@ false, true ], + "GFDL-1.3-invariants-only": [ + "GNU Free Documentation License v1.3 only - invariants", + false, + false + ], + "GFDL-1.3-invariants-or-later": [ + "GNU Free Documentation License v1.3 or later - invariants", + false, + false + ], + "GFDL-1.3-no-invariants-only": [ + "GNU Free Documentation License v1.3 only - no invariants", + false, + false + ], + "GFDL-1.3-no-invariants-or-later": [ + "GNU Free Documentation License v1.3 or later - no invariants", + false, + false + ], "GFDL-1.3-only": [ "GNU Free Documentation License v1.3 only", false, @@ -729,6 +1244,11 @@ false, false ], + "GLWTPL": [ + "Good Luck With That Public License", + false, + false + ], "GPL-1.0": [ "GNU General Public License v1.0 only", false, @@ -839,16 +1359,111 @@ false, false ], + "Graphics-Gems": [ + "Graphics Gems License", + false, + false + ], + "HP-1986": [ + "Hewlett-Packard 1986 License", + false, + false + ], + "HP-1989": [ + "Hewlett-Packard 1989 License", + false, + false + ], "HPND": [ "Historical Permission Notice and Disclaimer", true, false ], + "HPND-DEC": [ + "Historical Permission Notice and Disclaimer - DEC variant", + false, + false + ], + "HPND-Kevlin-Henney": [ + "Historical Permission Notice and Disclaimer - Kevlin Henney variant", + false, + false + ], + "HPND-MIT-disclaimer": [ + "Historical Permission Notice and Disclaimer with MIT disclaimer", + false, + false + ], + "HPND-Markus-Kuhn": [ + "Historical Permission Notice and Disclaimer - Markus Kuhn variant", + false, + false + ], + "HPND-Pbmplus": [ + "Historical Permission Notice and Disclaimer - Pbmplus variant", + false, + false + ], + "HPND-UC": [ + "Historical Permission Notice and Disclaimer - University of California variant", + false, + false + ], + "HPND-doc": [ + "Historical Permission Notice and Disclaimer - documentation variant", + false, + false + ], + "HPND-doc-sell": [ + "Historical Permission Notice and Disclaimer - documentation sell variant", + false, + false + ], + "HPND-export-US": [ + "HPND with US Government export control warning", + false, + false + ], + "HPND-export-US-modify": [ + "HPND with US Government export control warning and modification rqmt", + false, + false + ], + "HPND-sell-MIT-disclaimer-xserver": [ + "Historical Permission Notice and Disclaimer - sell xserver variant with MIT disclaimer", + false, + false + ], + "HPND-sell-regexpr": [ + "Historical Permission Notice and Disclaimer - sell regexpr variant", + false, + false + ], + "HPND-sell-variant": [ + "Historical Permission Notice and Disclaimer - sell variant", + false, + false + ], + "HPND-sell-variant-MIT-disclaimer": [ + "HPND sell variant with MIT disclaimer", + false, + false + ], + "HTMLTIDY": [ + "HTML Tidy License", + false, + false + ], "HaskellReport": [ "Haskell Language Report License", false, false ], + "Hippocratic-2.1": [ + "Hippocratic License 2.1", + false, + false + ], "IBM-pibs": [ "IBM PowerPC Initialization and Boot Software", false, @@ -856,6 +1471,11 @@ ], "ICU": [ "ICU License", + true, + false + ], + "IEC-Code-Components-EULA": [ + "IEC Code Components End-user licence agreement", false, false ], @@ -864,6 +1484,11 @@ false, false ], + "IJG-short": [ + "Independent JPEG Group License - short", + false, + false + ], "IPA": [ "IPA Font License", true, @@ -879,6 +1504,11 @@ true, false ], + "ISC-Veillard": [ + "ISC Veillard variant", + false, + false + ], "ImageMagick": [ "ImageMagick License", false, @@ -894,6 +1524,11 @@ false, false ], + "Inner-Net-2.0": [ + "Inner Net License v2.0", + false, + false + ], "Intel": [ "Intel Open Source License", true, @@ -909,16 +1544,46 @@ false, false ], + "JPL-image": [ + "JPL Image Use Policy", + false, + false + ], + "JPNIC": [ + "Japan Network Information Center License", + false, + false + ], "JSON": [ "JSON License", false, false ], + "Jam": [ + "Jam License", + true, + false + ], "JasPer-2.0": [ "JasPer License", false, false ], + "Kastrup": [ + "Kastrup License", + false, + false + ], + "Kazlib": [ + "Kazlib License", + false, + false + ], + "Knuth-CTAN": [ + "Knuth CTAN License", + false, + false + ], "LAL-1.2": [ "Licence Art Libre 1.2", false, @@ -955,7 +1620,7 @@ true ], "LGPL-2.1+": [ - "GNU Library General Public License v2 or later", + "GNU Lesser General Public License v2.1 or later", true, true ], @@ -994,6 +1659,16 @@ false, false ], + "LOOP": [ + "Common Lisp LOOP License", + false, + false + ], + "LPD-document": [ + "LPD Documentation License", + false, + false + ], "LPL-1.0": [ "Lucent Public License Version 1.0", true, @@ -1029,28 +1704,43 @@ true, false ], + "LZMA-SDK-9.11-to-9.20": [ + "LZMA SDK License (versions 9.11 to 9.20)", + false, + false + ], + "LZMA-SDK-9.22": [ + "LZMA SDK License (versions 9.22 and beyond)", + false, + false + ], "Latex2e": [ "Latex2e License", false, false ], + "Latex2e-translated-notice": [ + "Latex2e with translated notice permission", + false, + false + ], "Leptonica": [ "Leptonica License", false, false ], "LiLiQ-P-1.1": [ - "Licence Libre du Qu\u00e9bec \u2013 Permissive version 1.1", + "Licence Libre du Québec – Permissive version 1.1", true, false ], "LiLiQ-R-1.1": [ - "Licence Libre du Qu\u00e9bec \u2013 R\u00e9ciprocit\u00e9 version 1.1", + "Licence Libre du Québec – Réciprocité version 1.1", true, false ], "LiLiQ-Rplus-1.1": [ - "Licence Libre du Qu\u00e9bec \u2013 R\u00e9ciprocit\u00e9 forte version 1.1", + "Licence Libre du Québec – Réciprocité forte version 1.1", true, false ], @@ -1059,16 +1749,66 @@ false, false ], + "Linux-OpenIB": [ + "Linux Kernel Variant of OpenIB.org license", + false, + false + ], + "Linux-man-pages-1-para": [ + "Linux man-pages - 1 paragraph", + false, + false + ], + "Linux-man-pages-copyleft": [ + "Linux man-pages Copyleft", + false, + false + ], + "Linux-man-pages-copyleft-2-para": [ + "Linux man-pages Copyleft - 2 paragraphs", + false, + false + ], + "Linux-man-pages-copyleft-var": [ + "Linux man-pages Copyleft Variant", + false, + false + ], + "Lucida-Bitmap-Fonts": [ + "Lucida Bitmap Fonts License", + false, + false + ], "MIT": [ "MIT License", true, false ], + "MIT-0": [ + "MIT No Attribution", + true, + false + ], "MIT-CMU": [ "CMU License", false, false ], + "MIT-Festival": [ + "MIT Festival Variant", + false, + false + ], + "MIT-Modern-Variant": [ + "MIT License Modern Variant", + true, + false + ], + "MIT-Wu": [ + "MIT Tom Wu Variant", + false, + false + ], "MIT-advertising": [ "Enlightenment License (e16)", false, @@ -1084,11 +1824,31 @@ false, false ], + "MIT-open-group": [ + "MIT Open Group variant", + false, + false + ], + "MIT-testregex": [ + "MIT testregex Variant", + false, + false + ], "MITNFA": [ "MIT +no-false-attribs license", false, false ], + "MMIXware": [ + "MMIXware License", + false, + false + ], + "MPEG-SSG": [ + "MPEG Software Simulation", + false, + false + ], "MPL-1.0": [ "Mozilla Public License 1.0", true, @@ -1109,6 +1869,11 @@ true, false ], + "MS-LPL": [ + "Microsoft Limited Public License", + false, + false + ], "MS-PL": [ "Microsoft Public License", true, @@ -1129,8 +1894,23 @@ false, false ], + "Martin-Birgmeier": [ + "Martin Birgmeier License", + false, + false + ], + "McPhee-slideshow": [ + "McPhee Slideshow License", + false, + false + ], + "Minpack": [ + "Minpack License", + false, + false + ], "MirOS": [ - "MirOS License", + "The MirOS Licence", true, false ], @@ -1139,6 +1919,16 @@ true, false ], + "MulanPSL-1.0": [ + "Mulan Permissive Software License, Version 1", + false, + false + ], + "MulanPSL-2.0": [ + "Mulan Permissive Software License, Version 2", + true, + false + ], "Multics": [ "Multics License", true, @@ -1149,6 +1939,11 @@ false, false ], + "NAIST-2003": [ + "Nara Institute of Science and Technology License (2003)", + false, + false + ], "NASA-1.3": [ "NASA Open Source Agreement 1.3", true, @@ -1159,6 +1954,11 @@ false, false ], + "NCGL-UK-2.0": [ + "Non-Commercial Government Licence", + false, + false + ], "NCSA": [ "University of Illinois/NCSA Open Source License", true, @@ -1169,8 +1969,33 @@ true, false ], + "NICTA-1.0": [ + "NICTA Public Software License, Version 1.0", + false, + false + ], + "NIST-PD": [ + "NIST Public Domain Notice", + false, + false + ], + "NIST-PD-fallback": [ + "NIST Public Domain Notice with license fallback", + false, + false + ], + "NIST-Software": [ + "NIST Software License", + false, + false + ], "NLOD-1.0": [ - "Norwegian Licence for Open Government Data", + "Norwegian Licence for Open Government Data (NLOD) 1.0", + false, + false + ], + "NLOD-2.0": [ + "Norwegian Licence for Open Government Data (NLOD) 2.0", false, false ], @@ -1209,6 +2034,11 @@ true, false ], + "NTP-0": [ + "NTP No Attribution", + false, + false + ], "Naumen": [ "Naumen Public License", true, @@ -1244,6 +2074,11 @@ false, true ], + "O-UDA-1.0": [ + "Open Use of Data Agreement v1.0", + false, + false + ], "OCCT-PL": [ "Open CASCADE Technology Public License", false, @@ -1254,8 +2089,18 @@ true, false ], + "ODC-By-1.0": [ + "Open Data Commons Attribution License v1.0", + false, + false + ], "ODbL-1.0": [ - "ODC Open Database License v1.0", + "Open Data Commons Open Database License v1.0", + false, + false + ], + "OFFIS": [ + "OFFIS License", false, false ], @@ -1264,11 +2109,61 @@ false, false ], + "OFL-1.0-RFN": [ + "SIL Open Font License 1.0 with Reserved Font Name", + false, + false + ], + "OFL-1.0-no-RFN": [ + "SIL Open Font License 1.0 with no Reserved Font Name", + false, + false + ], "OFL-1.1": [ "SIL Open Font License 1.1", true, false ], + "OFL-1.1-RFN": [ + "SIL Open Font License 1.1 with Reserved Font Name", + true, + false + ], + "OFL-1.1-no-RFN": [ + "SIL Open Font License 1.1 with no Reserved Font Name", + true, + false + ], + "OGC-1.0": [ + "OGC Software License, Version 1.0", + false, + false + ], + "OGDL-Taiwan-1.0": [ + "Taiwan Open Government Data License, version 1.0", + false, + false + ], + "OGL-Canada-2.0": [ + "Open Government Licence - Canada", + false, + false + ], + "OGL-UK-1.0": [ + "Open Government Licence v1.0", + false, + false + ], + "OGL-UK-2.0": [ + "Open Government Licence v2.0", + false, + false + ], + "OGL-UK-3.0": [ + "Open Government Licence v3.0", + false, + false + ], "OGTSL": [ "Open Group Test Suite License", true, @@ -1351,7 +2246,12 @@ ], "OLDAP-2.8": [ "Open LDAP Public License v2.8", - false, + true, + false + ], + "OLFL-1.3": [ + "Open Logistics Foundation License Version 1.3", + true, false ], "OML": [ @@ -1364,6 +2264,16 @@ false, false ], + "OPL-UK-3.0": [ + "United Kingdom Open Parliament Licence v3.0", + false, + false + ], + "OPUBL-1.0": [ + "Open Publication License v1.0", + false, + false + ], "OSET-PL-2.1": [ "OSET Public License version 2.1", true, @@ -1394,13 +2304,23 @@ true, false ], + "OpenPBS-2.3": [ + "OpenPBS v2.3 Software License", + false, + false + ], "OpenSSL": [ "OpenSSL License", false, false ], + "PADL": [ + "PADL License", + false, + false + ], "PDDL-1.0": [ - "ODC Public Domain Dedication & License 1.0", + "Open Data Commons Public Domain Dedication & License 1.0", false, false ], @@ -1411,6 +2331,26 @@ ], "PHP-3.01": [ "PHP License v3.01", + true, + false + ], + "PSF-2.0": [ + "Python Software Foundation License 2.0", + false, + false + ], + "Parity-6.0.0": [ + "The Parity Public License 6.0.0", + false, + false + ], + "Parity-7.0.0": [ + "The Parity Public License 7.0.0", + false, + false + ], + "Pixar": [ + "Pixar License", false, false ], @@ -1419,6 +2359,16 @@ false, false ], + "PolyForm-Noncommercial-1.0.0": [ + "PolyForm Noncommercial License 1.0.0", + false, + false + ], + "PolyForm-Small-Business-1.0.0": [ + "PolyForm Small Business License 1.0.0", + false, + false + ], "PostgreSQL": [ "PostgreSQL License", true, @@ -1429,11 +2379,21 @@ true, false ], + "Python-2.0.1": [ + "Python License 2.0.1", + false, + false + ], "QPL-1.0": [ "Q Public License 1.0", true, false ], + "QPL-1.0-INRIA-2004": [ + "Q Public License 1.0 - INRIA 2004 variant", + false, + false + ], "Qhull": [ "Qhull License", false, @@ -1460,7 +2420,7 @@ false ], "RSA-MD": [ - "RSA Message-Digest License ", + "RSA Message-Digest License", false, false ], @@ -1484,6 +2444,11 @@ false, false ], + "SAX-PD-2.0": [ + "Sax Public Domain Notice 2.0", + false, + false + ], "SCEA": [ "SCEA Shared Source License", false, @@ -1504,6 +2469,26 @@ false, false ], + "SGI-OpenGL": [ + "SGI OpenGL License", + false, + false + ], + "SGP4": [ + "SGP4 Permission Notice", + false, + false + ], + "SHL-0.5": [ + "Solderpad Hardware License v0.5", + false, + false + ], + "SHL-0.51": [ + "Solderpad Hardware License, Version 0.51", + false, + false + ], "SISSL": [ "Sun Industry Standards Source License v1.1", true, @@ -1514,6 +2499,11 @@ false, false ], + "SL": [ + "SL License", + false, + false + ], "SMLNJ": [ "Standard ML of New Jersey License", false, @@ -1534,6 +2524,21 @@ true, false ], + "SSH-OpenSSH": [ + "SSH OpenSSH license", + false, + false + ], + "SSH-short": [ + "SSH short notice", + false, + false + ], + "SSPL-1.0": [ + "Server Side Public License, v 1", + false, + false + ], "SWL": [ "Scheme Widget Library (SWL) Software License Agreement", false, @@ -1544,11 +2549,21 @@ false, false ], + "SchemeReport": [ + "Scheme Language Report License", + false, + false + ], "Sendmail": [ "Sendmail License", false, false ], + "Sendmail-8.23": [ + "Sendmail License 8.23", + false, + false + ], "SimPL-2.0": [ "Simple Public License 2.0", true, @@ -1559,6 +2574,11 @@ true, false ], + "Soundex": [ + "Soundex License", + false, + false + ], "Spencer-86": [ "Spencer License 86", false, @@ -1584,6 +2604,21 @@ false, false ], + "SunPro": [ + "SunPro License", + false, + false + ], + "Symlinks": [ + "Symlinks License", + false, + false + ], + "TAPR-OHL-1.0": [ + "TAPR Open Hardware License v1.0", + false, + false + ], "TCL": [ "TCL/TK License", false, @@ -1594,6 +2629,11 @@ false, false ], + "TGPPL-1.0": [ + "Transitive Grace Period Public Licence 1.0", + false, + false + ], "TMate": [ "TMate Open Source License", false, @@ -1609,11 +2649,66 @@ false, false ], + "TPDL": [ + "Time::ParseDate License", + false, + false + ], + "TPL-1.0": [ + "THOR Public License 1.0", + false, + false + ], + "TTWL": [ + "Text-Tabs+Wrap License", + false, + false + ], + "TTYP0": [ + "TTYP0 License", + false, + false + ], + "TU-Berlin-1.0": [ + "Technische Universitaet Berlin License 1.0", + false, + false + ], + "TU-Berlin-2.0": [ + "Technische Universitaet Berlin License 2.0", + false, + false + ], + "TermReadKey": [ + "TermReadKey License", + false, + false + ], + "UCAR": [ + "UCAR License", + false, + false + ], + "UCL-1.0": [ + "Upstream Compatibility License v1.0", + true, + false + ], "UPL-1.0": [ "Universal Permissive License v1.0", true, false ], + "URT-RLE": [ + "Utah Raster Toolkit Run Length Encoded License", + false, + false + ], + "Unicode-3.0": [ + "Unicode License v3", + true, + false + ], "Unicode-DFS-2015": [ "Unicode License Agreement - Data Files and Software (2015)", false, @@ -1621,7 +2716,7 @@ ], "Unicode-DFS-2016": [ "Unicode License Agreement - Data Files and Software (2016)", - false, + true, false ], "Unicode-TOU": [ @@ -1629,9 +2724,14 @@ false, false ], + "UnixCrypt": [ + "UnixCrypt License", + false, + false + ], "Unlicense": [ "The Unlicense", - false, + true, false ], "VOSTROM": [ @@ -1674,6 +2774,11 @@ true, false ], + "Widget-Workshop": [ + "Widget Workshop License", + false, + false + ], "Wsuipa": [ "Wsuipa License", false, @@ -1684,6 +2789,11 @@ false, false ], + "X11-distribute-modifications-variant": [ + "X11 License Distribution Modification Variant", + false, + false + ], "XFree86-1.1": [ "XFree86 License 1.1", false, @@ -1694,11 +2804,21 @@ false, false ], + "Xdebug-1.03": [ + "Xdebug License v 1.03", + false, + false + ], "Xerox": [ "Xerox License", false, false ], + "Xfig": [ + "Xfig License", + false, + false + ], "Xnet": [ "X.Net License", true, @@ -1726,7 +2846,7 @@ ], "ZPL-2.1": [ "Zope Public License 2.1", - false, + true, false ], "Zed": [ @@ -1734,6 +2854,11 @@ false, false ], + "Zeeff": [ + "Zeeff License", + false, + false + ], "Zend-2.0": [ "Zend License v2.0", false, @@ -1754,16 +2879,41 @@ true, false ], + "blessing": [ + "SQLite Blessing", + false, + false + ], "bzip2-1.0.5": [ "bzip2 and libbzip2 License v1.0.5", false, - false + true ], "bzip2-1.0.6": [ "bzip2 and libbzip2 License v1.0.6", false, false ], + "check-cvs": [ + "check-cvs License", + false, + false + ], + "checkmk": [ + "Checkmk License", + false, + false + ], + "copyleft-next-0.3.0": [ + "copyleft-next 0.3.0", + false, + false + ], + "copyleft-next-0.3.1": [ + "copyleft-next 0.3.1", + false, + false + ], "curl": [ "curl License", false, @@ -1774,6 +2924,11 @@ false, false ], + "dtoa": [ + "David M. Gay dtoa License", + false, + false + ], "dvipdfm": [ "dvipdfm License", false, @@ -1789,6 +2944,16 @@ false, false ], + "etalab-2.0": [ + "Etalab Open License 2.0", + false, + false + ], + "fwlw": [ + "fwlw License", + false, + false + ], "gSOAP-1.3b": [ "gSOAP Public License v1.3b", false, @@ -1799,21 +2964,76 @@ false, false ], + "hdparm": [ + "hdparm License", + false, + false + ], "iMatix": [ "iMatix Standard Function Library Agreement", false, false ], + "libpng-2.0": [ + "PNG Reference Library version 2", + false, + false + ], + "libselinux-1.0": [ + "libselinux public domain notice", + false, + false + ], "libtiff": [ "libtiff License", false, false ], + "libutil-David-Nugent": [ + "libutil David Nugent License", + false, + false + ], + "lsof": [ + "lsof License", + false, + false + ], + "magaz": [ + "magaz License", + false, + false + ], + "mailprio": [ + "mailprio License", + false, + false + ], + "metamail": [ + "metamail License", + false, + false + ], + "mpi-permissive": [ + "mpi Permissive License", + false, + false + ], "mpich2": [ "mpich2 License", false, false ], + "mplus": [ + "mplus Font License", + false, + false + ], + "pnmstitch": [ + "pnmstitch License", + false, + false + ], "psfrag": [ "psfrag License", false, @@ -1824,9 +3044,44 @@ false, false ], + "python-ldap": [ + "Python ldap License", + false, + false + ], + "radvd": [ + "radvd License", + false, + false + ], + "snprintf": [ + "snprintf License", + false, + false + ], + "ssh-keyscan": [ + "ssh-keyscan License", + false, + false + ], + "swrule": [ + "swrule License", + false, + false + ], + "ulem": [ + "ulem License", + false, + false + ], + "w3m": [ + "w3m License", + false, + false + ], "wxWindows": [ "wxWindows Library License", - false, + true, true ], "xinetd": [ @@ -1834,6 +3089,16 @@ false, false ], + "xkeyboard-config-Zinoviev": [ + "xkeyboard-config Zinoviev License", + false, + false + ], + "xlock": [ + "xlock License", + false, + false + ], "xpp": [ "XPP License", false, diff --git a/conda_lock/_vendor/poetry/core/spdx/helpers.py b/conda_lock/_vendor/poetry/core/spdx/helpers.py new file mode 100644 index 00000000..815f7ed5 --- /dev/null +++ b/conda_lock/_vendor/poetry/core/spdx/helpers.py @@ -0,0 +1,68 @@ +from __future__ import annotations + +import functools +import json +import sys + +from typing import TYPE_CHECKING + +from conda_lock._vendor.poetry.core.spdx.license import License + + +if sys.version_info < (3, 9): + from pathlib import Path + + def _get_license_file() -> Path: + return Path(__file__).parent / "data" / "licenses.json" + +else: + from importlib.resources import files + + if TYPE_CHECKING: + from importlib.abc import Traversable + + def _get_license_file() -> Traversable: + return files(__package__) / "data" / "licenses.json" + + +def license_by_id(identifier: str) -> License: + if not identifier: + raise ValueError("A license identifier is required") + + licenses = _load_licenses() + return licenses.get( + identifier.lower(), License(identifier, identifier, False, False) + ) + + +@functools.lru_cache +def _load_licenses() -> dict[str, License]: + licenses = {} + licenses_file = _get_license_file() + + with licenses_file.open(encoding="utf-8") as f: + data = json.load(f) + + for name, license_info in data.items(): + license = License(name, license_info[0], license_info[1], license_info[2]) + licenses[name.lower()] = license + + full_name = license_info[0].lower() + if full_name in licenses: + existing_license = licenses[full_name] + if not existing_license.is_deprecated: + continue + + licenses[full_name] = license + + # Add a Proprietary license for non-standard licenses + licenses["proprietary"] = License("Proprietary", "Proprietary", False, False) + + return licenses + + +if __name__ == "__main__": + from conda_lock._vendor.poetry.core.spdx.updater import Updater + + updater = Updater() + updater.dump() diff --git a/conda_lock/_vendor/poetry/core/spdx/license.py b/conda_lock/_vendor/poetry/core/spdx/license.py index f5a9fb6d..e10ad6b8 100644 --- a/conda_lock/_vendor/poetry/core/spdx/license.py +++ b/conda_lock/_vendor/poetry/core/spdx/license.py @@ -1,10 +1,16 @@ +from __future__ import annotations + from collections import namedtuple -from typing import Optional +from typing import ClassVar class License(namedtuple("License", "id name is_osi_approved is_deprecated")): + id: str + name: str + is_osi_approved: bool + is_deprecated: bool - CLASSIFIER_SUPPORTED = { + CLASSIFIER_SUPPORTED: ClassVar[set[str]] = { # Not OSI Approved "Aladdin", "CC0-1.0", @@ -69,7 +75,7 @@ class License(namedtuple("License", "id name is_osi_approved is_deprecated")): "ZPL-2.1", } - CLASSIFIER_NAMES = { + CLASSIFIER_NAMES: ClassVar[dict[str, str]] = { # Not OSI Approved "AFPL": "Aladdin Free Public License (AFPL)", "CC0-1.0": "CC0 1.0 Universal (CC0 1.0) Public Domain Dedication", @@ -131,7 +137,7 @@ class License(namedtuple("License", "id name is_osi_approved is_deprecated")): } @property - def classifier(self): # type: () -> str + def classifier(self) -> str: parts = ["License"] if self.is_osi_approved: @@ -144,7 +150,7 @@ def classifier(self): # type: () -> str return " :: ".join(parts) @property - def classifier_name(self): # type: () -> Optional[str] + def classifier_name(self) -> str | None: if self.id not in self.CLASSIFIER_SUPPORTED: if self.is_osi_approved: return None diff --git a/conda_lock/_vendor/poetry/core/spdx/updater.py b/conda_lock/_vendor/poetry/core/spdx/updater.py index 30c3a519..713e46e0 100644 --- a/conda_lock/_vendor/poetry/core/spdx/updater.py +++ b/conda_lock/_vendor/poetry/core/spdx/updater.py @@ -1,37 +1,30 @@ +from __future__ import annotations + import json -import os -from io import open +from pathlib import Path from typing import Any -from typing import Dict -from typing import Optional - - -try: - from urllib.request import urlopen -except ImportError: - from urllib2 import urlopen +from urllib.request import urlopen class Updater: - BASE_URL = "https://raw.githubusercontent.com/spdx/license-list-data/master/json/" - def __init__(self, base_url=BASE_URL): # type: (str) -> None + def __init__(self, base_url: str = BASE_URL) -> None: self._base_url = base_url - def dump(self, file=None): # type: (Optional[str]) -> None + def dump(self, file: Path | None = None) -> None: if file is None: - file = os.path.join(os.path.dirname(__file__), "data", "licenses.json") + file = Path(__file__).parent / "data" / "licenses.json" licenses_url = self._base_url + "licenses.json" - with open(file, "w", encoding="utf-8") as f: + with file.open("w", encoding="utf-8") as f: f.write( json.dumps(self.get_licenses(licenses_url), indent=2, sort_keys=True) ) - def get_licenses(self, url): # type: (str) -> Dict[str, Any] + def get_licenses(self, url: str) -> dict[str, Any]: licenses = {} with urlopen(url) as r: data = json.loads(r.read().decode()) diff --git a/conda_lock/_vendor/poetry/core/toml/__init__.py b/conda_lock/_vendor/poetry/core/toml/__init__.py deleted file mode 100644 index bda2d245..00000000 --- a/conda_lock/_vendor/poetry/core/toml/__init__.py +++ /dev/null @@ -1,5 +0,0 @@ -from conda_lock._vendor.poetry.core.toml.exceptions import TOMLError -from conda_lock._vendor.poetry.core.toml.file import TOMLFile - - -__all__ = [clazz.__name__ for clazz in {TOMLError, TOMLFile}] diff --git a/conda_lock/_vendor/poetry/core/toml/file.py b/conda_lock/_vendor/poetry/core/toml/file.py deleted file mode 100644 index c5de7a9a..00000000 --- a/conda_lock/_vendor/poetry/core/toml/file.py +++ /dev/null @@ -1,40 +0,0 @@ -from typing import TYPE_CHECKING -from typing import Any -from typing import Union - -from tomlkit.exceptions import TOMLKitError -from tomlkit.toml_file import TOMLFile as BaseTOMLFile - -from conda_lock._vendor.poetry.core.toml import TOMLError -from conda_lock._vendor.poetry.core.utils._compat import Path - - -if TYPE_CHECKING: - from tomlkit.toml_document import TOMLDocument # noqa - - -class TOMLFile(BaseTOMLFile): - def __init__(self, path): # type: (Union[str, Path]) -> None - if isinstance(path, str): - path = Path(path) - super(TOMLFile, self).__init__(path.as_posix()) - self.__path = path - - @property - def path(self): # type: () -> Path - return self.__path - - def exists(self): # type: () -> bool - return self.__path.exists() - - def read(self): # type: () -> "TOMLDocument" - try: - return super(TOMLFile, self).read() - except (ValueError, TOMLKitError) as e: - raise TOMLError("Invalid TOML file {}: {}".format(self.path.as_posix(), e)) - - def __getattr__(self, item): # type: (str) -> Any - return getattr(self.__path, item) - - def __str__(self): # type: () -> str - return self.__path.as_posix() diff --git a/conda_lock/_vendor/poetry/core/utils/_compat.py b/conda_lock/_vendor/poetry/core/utils/_compat.py index 7c5daa9f..c8f45dff 100644 --- a/conda_lock/_vendor/poetry/core/utils/_compat.py +++ b/conda_lock/_vendor/poetry/core/utils/_compat.py @@ -1,125 +1,15 @@ -import sys - -from typing import AnyStr -from typing import List -from typing import Optional -from typing import Union - -import six.moves.urllib.parse as urllib_parse - - -urlparse = urllib_parse - - -try: # Python 2 - long = long - unicode = unicode - basestring = basestring -except NameError: # Python 3 - long = int - unicode = str - basestring = str +from __future__ import annotations +import sys -PY2 = sys.version_info[0] == 2 -PY34 = sys.version_info >= (3, 4) -PY35 = sys.version_info >= (3, 5) -PY36 = sys.version_info >= (3, 6) -PY37 = sys.version_info >= (3, 7) WINDOWS = sys.platform == "win32" -if PY2: - import pipes - - shell_quote = pipes.quote -else: - import shlex - - shell_quote = shlex.quote -if PY35: - from pathlib import Path # noqa +if sys.version_info < (3, 11): + # compatibility for python <3.11 + import tomli as tomllib else: - from pathlib2 import Path # noqa - -if not PY36: - from collections import OrderedDict # noqa -else: - OrderedDict = dict - - -try: - FileNotFoundError -except NameError: - FileNotFoundError = IOError # noqa - - -def decode( - string, encodings=None -): # type: (Union[AnyStr, unicode], Optional[str]) -> Union[str, bytes] - if not PY2 and not isinstance(string, bytes): - return string - - if PY2 and isinstance(string, unicode): - return string - - encodings = encodings or ["utf-8", "latin1", "ascii"] - - for encoding in encodings: - try: - return string.decode(encoding) - except (UnicodeEncodeError, UnicodeDecodeError): - pass - - return string.decode(encodings[0], errors="ignore") - - -def encode( - string, encodings=None -): # type: (AnyStr, Optional[str]) -> Union[str, bytes] - if not PY2 and isinstance(string, bytes): - return string - - if PY2 and isinstance(string, str): - return string - - encodings = encodings or ["utf-8", "latin1", "ascii"] - - for encoding in encodings: - try: - return string.encode(encoding) - except (UnicodeEncodeError, UnicodeDecodeError): - pass - - return string.encode(encodings[0], errors="ignore") - - -def to_str(string): # type: (AnyStr) -> str - if isinstance(string, str) or not isinstance(string, (unicode, bytes)): - return string - - if PY2: - method = "encode" - else: - method = "decode" - - encodings = ["utf-8", "latin1", "ascii"] - - for encoding in encodings: - try: - return getattr(string, method)(encoding) - except (UnicodeEncodeError, UnicodeDecodeError): - pass - - return getattr(string, method)(encodings[0], errors="ignore") - - -def list_to_shell_command(cmd): # type: (List[str]) -> str - executable = cmd[0] - - if " " in executable: - executable = '"{}"'.format(executable) - cmd[0] = executable + import tomllib - return " ".join(cmd) +__all__ = ["tomllib"] diff --git a/conda_lock/_vendor/poetry/core/utils/helpers.py b/conda_lock/_vendor/poetry/core/utils/helpers.py index e17b36d4..a750054e 100644 --- a/conda_lock/_vendor/poetry/core/utils/helpers.py +++ b/conda_lock/_vendor/poetry/core/utils/helpers.py @@ -1,74 +1,82 @@ +from __future__ import annotations + import os -import re import shutil import stat +import sys import tempfile +import time +import unicodedata +import warnings from contextlib import contextmanager +from pathlib import Path +from typing import TYPE_CHECKING from typing import Any -from typing import Iterator -from typing import List -from typing import Union - -from conda_lock._vendor.poetry.core.utils._compat import Path -from conda_lock._vendor.poetry.core.version import Version +from packaging.utils import canonicalize_name -try: - from collections.abc import Mapping -except ImportError: - from collections import Mapping +from conda_lock._vendor.poetry.core.version.pep440 import PEP440Version -_canonicalize_regex = re.compile(r"[-_]+") +if TYPE_CHECKING: + from collections.abc import Iterator -def canonicalize_name(name): # type: (str) -> str - return _canonicalize_regex.sub("-", name).lower() +def combine_unicode(string: str) -> str: + return unicodedata.normalize("NFC", string) -def module_name(name): # type: (str) -> str - return canonicalize_name(name).replace(".", "_").replace("-", "_") +def module_name(name: str) -> str: + return canonicalize_name(name).replace("-", "_") -def normalize_version(version): # type: (str) -> str - return str(Version(version)) +def normalize_version(version: str) -> str: + warnings.warn( + "normalize_version() is deprecated. Use Version.parse().to_string() instead.", + DeprecationWarning, + stacklevel=2, + ) + return PEP440Version.parse(version).to_string() @contextmanager -def temporary_directory(*args, **kwargs): # type: (*Any, **Any) -> Iterator[str] - name = tempfile.mkdtemp(*args, **kwargs) - yield name - safe_rmtree(name) - - -def parse_requires(requires): # type: (str) -> List[str] +def temporary_directory(*args: Any, **kwargs: Any) -> Iterator[str]: + if sys.version_info >= (3, 10): + # mypy reports an error if ignore_cleanup_errors is + # specified literally in the call + kwargs["ignore_cleanup_errors"] = True + with tempfile.TemporaryDirectory(*args, **kwargs) as name: + yield name + else: + name = tempfile.mkdtemp(*args, **kwargs) + yield name + robust_rmtree(name) + + +def parse_requires(requires: str) -> list[str]: lines = requires.split("\n") requires_dist = [] - in_section = False current_marker = None for line in lines: line = line.strip() if not line: - if in_section: - in_section = False - continue if line.startswith("["): # extras or conditional dependencies marker = line.lstrip("[").rstrip("]") if ":" not in marker: - extra, marker = marker, None + extra, marker = marker, "" else: extra, marker = marker.split(":") if extra: if marker: - marker = '{} and extra == "{}"'.format(marker, extra) + marker = f'{marker} and extra == "{extra}"' else: - marker = 'extra == "{}"'.format(extra) + marker = f'extra == "{extra}"' if marker: current_marker = marker @@ -76,14 +84,14 @@ def parse_requires(requires): # type: (str) -> List[str] continue if current_marker: - line = "{} ; {}".format(line, current_marker) + line = f"{line} ; {current_marker}" requires_dist.append(line) return requires_dist -def _on_rm_error(func, path, exc_info): # type: (Any, Union[str, Path], Any) -> None +def _on_rm_error(func: Any, path: str | Path, exc_info: Any) -> None: if not os.path.exists(path): return @@ -91,16 +99,38 @@ def _on_rm_error(func, path, exc_info): # type: (Any, Union[str, Path], Any) -> func(path) -def safe_rmtree(path): # type: (Union[str, Path]) -> None - if Path(path).is_symlink(): - return os.unlink(str(path)) - +def robust_rmtree(path: str | Path, max_timeout: float = 1) -> None: + """ + Robustly tries to delete paths. + Retries several times if an OSError occurs. + If the final attempt fails, the Exception is propagated + to the caller. + """ + path = Path(path) # make sure this is a Path object, not str + timeout = 0.001 + while timeout < max_timeout: + try: + # both os.unlink and shutil.rmtree can throw exceptions on Windows + # if the files are in use when called + if path.is_symlink(): + path.unlink() + else: + shutil.rmtree(path) + return # Only hits this on success + except OSError: + # Increase the timeout and try again + time.sleep(timeout) + timeout *= 2 + + # Final attempt, pass any Exceptions up to caller. shutil.rmtree(path, onerror=_on_rm_error) -def merge_dicts(d1, d2): # type: (dict, dict) -> None - for k, v in d2.items(): - if k in d1 and isinstance(d1[k], dict) and isinstance(d2[k], Mapping): - merge_dicts(d1[k], d2[k]) - else: - d1[k] = d2[k] +def readme_content_type(path: str | Path) -> str: + suffix = Path(path).suffix + if suffix == ".rst": + return "text/x-rst" + elif suffix in (".md", ".markdown"): + return "text/markdown" + else: + return "text/plain" diff --git a/conda_lock/_vendor/poetry/core/utils/patterns.py b/conda_lock/_vendor/poetry/core/utils/patterns.py index 1d6413c2..c2d9d9bf 100644 --- a/conda_lock/_vendor/poetry/core/utils/patterns.py +++ b/conda_lock/_vendor/poetry/core/utils/patterns.py @@ -1,3 +1,5 @@ +from __future__ import annotations + import re diff --git a/conda_lock/_vendor/poetry/core/utils/toml_file.py b/conda_lock/_vendor/poetry/core/utils/toml_file.py deleted file mode 100644 index bd59105a..00000000 --- a/conda_lock/_vendor/poetry/core/utils/toml_file.py +++ /dev/null @@ -1,19 +0,0 @@ -# -*- coding: utf-8 -*- -from typing import Any - -from conda_lock._vendor.poetry.core.toml import TOMLFile - - -class TomlFile(TOMLFile): - @classmethod - def __new__(cls, *args, **kwargs): # type: (*Any, **Any) -> TOMLFile - import warnings - - warnings.warn( - "Use of {}.{} has been deprecated, use {}.{} instead.".format( - cls.__module__, cls.__name__, TOMLFile.__module__, TOMLFile.__name__, - ), - category=DeprecationWarning, - stacklevel=2, - ) - return super(TomlFile, cls).__new__(cls) diff --git a/conda_lock/_vendor/poetry/core/vcs/__init__.py b/conda_lock/_vendor/poetry/core/vcs/__init__.py index a8464873..e6d81199 100644 --- a/conda_lock/_vendor/poetry/core/vcs/__init__.py +++ b/conda_lock/_vendor/poetry/core/vcs/__init__.py @@ -1,26 +1,39 @@ +from __future__ import annotations + import os import subprocess -from conda_lock._vendor.poetry.core.utils._compat import Path -from conda_lock._vendor.poetry.core.utils._compat import decode +from pathlib import Path -from .git import Git +from conda_lock._vendor.poetry.core.vcs.git import Git -def get_vcs(directory): # type: (Path) -> Git +def get_vcs(directory: Path) -> Git | None: working_dir = Path.cwd() os.chdir(str(directory.resolve())) - try: - from .git import executable - - git_dir = decode( - subprocess.check_output( - [executable(), "rev-parse", "--show-toplevel"], stderr=subprocess.STDOUT - ) - ).strip() + vcs: Git | None - vcs = Git(Path(git_dir)) + try: + from conda_lock._vendor.poetry.core.vcs.git import executable + + check_ignore = subprocess.run( + [executable(), "check-ignore", "."], + stderr=subprocess.DEVNULL, + stdout=subprocess.DEVNULL, + ).returncode + + if check_ignore == 0: + vcs = None + else: + git_dir = subprocess.check_output( + [executable(), "rev-parse", "--show-toplevel"], + stderr=subprocess.STDOUT, + text=True, + encoding="utf-8", + ).strip() + + vcs = Git(Path(git_dir)) except (subprocess.CalledProcessError, OSError, RuntimeError): vcs = None diff --git a/conda_lock/_vendor/poetry/core/vcs/git.py b/conda_lock/_vendor/poetry/core/vcs/git.py index 529f872d..9a7c1744 100644 --- a/conda_lock/_vendor/poetry/core/vcs/git.py +++ b/conda_lock/_vendor/poetry/core/vcs/git.py @@ -1,115 +1,108 @@ -# -*- coding: utf-8 -*- +from __future__ import annotations + import re import subprocess from collections import namedtuple +from pathlib import Path from typing import Any -from typing import Optional -from conda_lock._vendor.poetry.core.utils._compat import PY36 from conda_lock._vendor.poetry.core.utils._compat import WINDOWS -from conda_lock._vendor.poetry.core.utils._compat import Path -from conda_lock._vendor.poetry.core.utils._compat import decode -pattern_formats = { - "protocol": r"\w+", - "user": r"[a-zA-Z0-9_.-]+", - "resource": r"[a-zA-Z0-9_.-]+", - "port": r"\d+", - "path": r"[\w~.\-/\\]+", - "name": r"[\w~.\-]+", - "rev": r"[^@#]+", -} +PROTOCOL = r"\w+" +USER = r"[a-zA-Z0-9_.-]+" +RESOURCE = r"[a-zA-Z0-9_.-]+" +PORT = r"\d+" +PATH = r"[%\w~.\-/\\\$]+" +NAME = r"[%\w~.\-]+" +REV = r"[^@#]+?" +SUBDIR = r"[\w\-/\\]+" PATTERNS = [ re.compile( r"^(git\+)?" r"(?Phttps?|git|ssh|rsync|file)://" - r"(?:(?P{user})@)?" - r"(?P{resource})?" - r"(:(?P{port}))?" - r"(?P[:/\\]({path}[/\\])?" - r"((?P{name}?)(\.git|[/\\])?)?)" - r"([@#](?P{rev}))?" - r"$".format( - user=pattern_formats["user"], - resource=pattern_formats["resource"], - port=pattern_formats["port"], - path=pattern_formats["path"], - name=pattern_formats["name"], - rev=pattern_formats["rev"], - ) + rf"(?:(?P{USER})@)?" + rf"(?P{RESOURCE})?" + rf"(:(?P{PORT}))?" + rf"(?P[:/\\]({PATH}[/\\])?" + rf"((?P{NAME}?)(\.git|[/\\])?)?)" + r"(?:" + rf"#(?:egg=.+?&subdirectory=|subdirectory=)(?P{SUBDIR})" + r"|" + r"#egg=?.+" + r"|" + rf"[@#](?P{REV})(?:[&#](?:(?:egg=.+?&subdirectory=|subdirectory=)(?P{SUBDIR})|egg=.+?))?" + r")?" + r"$" ), re.compile( r"(git\+)?" - r"((?P{protocol})://)" - r"(?:(?P{user})@)?" - r"(?P{resource}:?)" - r"(:(?P{port}))?" - r"(?P({path})" - r"(?P{name})(\.git|/)?)" - r"([@#](?P{rev}))?" - r"$".format( - protocol=pattern_formats["protocol"], - user=pattern_formats["user"], - resource=pattern_formats["resource"], - port=pattern_formats["port"], - path=pattern_formats["path"], - name=pattern_formats["name"], - rev=pattern_formats["rev"], - ) + rf"((?P{PROTOCOL})://)" + rf"(?:(?P{USER})@)?" + rf"(?P{RESOURCE}:?)" + rf"(:(?P{PORT}))?" + rf"(?P({PATH})" + rf"(?P{NAME})(\.git|/)?)" + r"(?:" + rf"#(?:egg=.+?&subdirectory=|subdirectory=)(?P{SUBDIR})" + r"|" + r"#egg=?.+" + r"|" + rf"[@#](?P{REV})(?:[&#](?:(?:egg=.+?&subdirectory=|subdirectory=)(?P{SUBDIR})|egg=.+?))?" + r")?" + r"$" ), re.compile( - r"^(?:(?P{user})@)?" - r"(?P{resource})" - r"(:(?P{port}))?" - r"(?P([:/]{path}/)" - r"(?P{name})(\.git|/)?)" - r"([@#](?P{rev}))?" - r"$".format( - user=pattern_formats["user"], - resource=pattern_formats["resource"], - port=pattern_formats["port"], - path=pattern_formats["path"], - name=pattern_formats["name"], - rev=pattern_formats["rev"], - ) + rf"^(?:(?P{USER})@)?" + rf"(?P{RESOURCE})" + rf"(:(?P{PORT}))?" + rf"(?P([:/]{PATH}/)" + rf"(?P{NAME})(\.git|/)?)" + r"(?:" + rf"#(?:egg=.+?&subdirectory=|subdirectory=)(?P{SUBDIR})" + r"|" + r"#egg=?.+" + r"|" + rf"[@#](?P{REV})(?:[&#](?:(?:egg=.+?&subdirectory=|subdirectory=)(?P{SUBDIR})|egg=.+?))?" + r")?" + r"$" ), re.compile( - r"((?P{user})@)?" - r"(?P{resource})" + rf"((?P{USER})@)?" + rf"(?P{RESOURCE})" r"[:/]{{1,2}}" - r"(?P({path})" - r"(?P{name})(\.git|/)?)" - r"([@#](?P{rev}))?" - r"$".format( - user=pattern_formats["user"], - resource=pattern_formats["resource"], - path=pattern_formats["path"], - name=pattern_formats["name"], - rev=pattern_formats["rev"], - ) + rf"(?P({PATH})" + rf"(?P{NAME})(\.git|/)?)" + r"(?:" + rf"#(?:egg=.+?&subdirectory=|subdirectory=)(?P{SUBDIR})" + r"|" + r"#egg=?.+" + r"|" + rf"[@#](?P{REV})(?:[&#](?:(?:egg=.+?&subdirectory=|subdirectory=)(?P{SUBDIR})|egg=.+?))?" + r")?" + r"$" ), ] class GitError(RuntimeError): - pass class ParsedUrl: def __init__( self, - protocol, # type: Optional[str] - resource, # type: Optional[str] - pathname, # type: Optional[str] - user, # type: Optional[str] - port, # type: Optional[str] - name, # type: Optional[str] - rev, # type: Optional[str] - ): + protocol: str | None, + resource: str | None, + pathname: str | None, + user: str | None, + port: str | None, + name: str | None, + rev: str | None, + subdirectory: str | None = None, + ) -> None: self.protocol = protocol self.resource = resource self.pathname = pathname @@ -117,9 +110,10 @@ def __init__( self.port = port self.name = name self.rev = rev + self.subdirectory = subdirectory @classmethod - def parse(cls, url): # type: (str) -> ParsedUrl + def parse(cls, url: str) -> ParsedUrl: for pattern in PATTERNS: m = pattern.match(url) if m: @@ -132,54 +126,53 @@ def parse(cls, url): # type: (str) -> ParsedUrl groups.get("port"), groups.get("name"), groups.get("rev"), + groups.get("rev_subdirectory") or groups.get("subdirectory"), ) - raise ValueError('Invalid git url "{}"'.format(url)) + raise ValueError(f'Invalid git url "{url}"') @property - def url(self): # type: () -> str - return "{}{}{}{}{}".format( - "{}://".format(self.protocol) if self.protocol else "", - "{}@".format(self.user) if self.user else "", - self.resource, - ":{}".format(self.port) if self.port else "", - "/" + self.pathname.lstrip(":/"), - ) - - def format(self): # type: () -> str + def url(self) -> str: + protocol = f"{self.protocol}://" if self.protocol else "" + user = f"{self.user}@" if self.user else "" + port = f":{self.port}" if self.port else "" + path = "/" + (self.pathname or "").lstrip(":/") + return f"{protocol}{user}{self.resource}{port}{path}" + + def format(self) -> str: return self.url - def __str__(self): # type: () -> str + def __str__(self) -> str: return self.format() -GitUrl = namedtuple("GitUrl", ["url", "revision"]) +GitUrl = namedtuple("GitUrl", ["url", "revision", "subdirectory"]) -_executable = None +_executable: str | None = None -def executable(): +def executable() -> str: global _executable if _executable is not None: return _executable - if WINDOWS and PY36: + if WINDOWS: # Finding git via where.exe where = "%WINDIR%\\System32\\where.exe" - paths = decode( - subprocess.check_output([where, "git"], shell=True, encoding="oem") + paths = subprocess.check_output( + [where, "git"], shell=True, encoding="oem" ).split("\n") for path in paths: if not path: continue - path = Path(path.strip()) + _path = Path(path.strip()) try: - path.relative_to(Path.cwd()) + _path.relative_to(Path.cwd()) except ValueError: - _executable = str(path) + _executable = str(_path) break else: @@ -191,22 +184,20 @@ def executable(): return _executable -def _reset_executable(): +def _reset_executable() -> None: global _executable _executable = None class GitConfig: - def __init__(self, requires_git_presence=False): # type: (bool) -> None + def __init__(self, requires_git_presence: bool = False) -> None: self._config = {} try: - config_list = decode( - subprocess.check_output( - [executable(), "config", "-l"], stderr=subprocess.STDOUT - ) - ) + config_list = subprocess.check_output( + [executable(), "config", "-l"], stderr=subprocess.STDOUT + ).decode() m = re.findall("(?ms)^([^=]+)=(.*?)$", config_list) if m: @@ -216,31 +207,36 @@ def __init__(self, requires_git_presence=False): # type: (bool) -> None if requires_git_presence: raise - def get(self, key, default=None): # type: (Any, Optional[Any]) -> Any + def get(self, key: Any, default: Any | None = None) -> Any: return self._config.get(key, default) - def __getitem__(self, item): # type: (Any) -> Any + def __getitem__(self, item: Any) -> Any: return self._config[item] class Git: - def __init__(self, work_dir=None): # type: (Optional[Path]) -> None + def __init__(self, work_dir: Path | None = None) -> None: self._config = GitConfig(requires_git_presence=True) self._work_dir = work_dir @classmethod - def normalize_url(cls, url): # type: (str) -> GitUrl + def normalize_url(cls, url: str) -> GitUrl: parsed = ParsedUrl.parse(url) formatted = re.sub(r"^git\+", "", url) if parsed.rev: - formatted = re.sub(r"[#@]{}$".format(parsed.rev), "", formatted) + formatted = re.sub(rf"[#@]{parsed.rev}(?=[#&]?)(?!\=)", "", formatted) + + if parsed.subdirectory: + formatted = re.sub( + rf"[#&]subdirectory={parsed.subdirectory}$", "", formatted + ) altered = parsed.format() != formatted if altered: if re.match(r"^git\+https?", url) and re.match( - r"^/?:[^0-9]", parsed.pathname + r"^/?:[^0-9]", parsed.pathname or "" ): normalized = re.sub(r"git\+(.*:[^:]+):(.*)", "\\1/\\2", url) elif re.match(r"^git\+file", url): @@ -250,18 +246,38 @@ def normalize_url(cls, url): # type: (str) -> GitUrl else: normalized = parsed.format() - return GitUrl(re.sub(r"#[^#]*$", "", normalized), parsed.rev) + return GitUrl( + re.sub(r"#[^#]*$", "", normalized), parsed.rev, parsed.subdirectory + ) @property - def config(self): # type: () -> GitConfig + def config(self) -> GitConfig: return self._config - def clone(self, repository, dest): # type: (str, Path) -> str + @property + def version(self) -> tuple[int, int, int]: + output = self.run("version") + version = re.search(r"(\d+)\.(\d+)\.(\d+)", output) + if not version: + return (0, 0, 0) + return int(version.group(1)), int(version.group(2)), int(version.group(3)) + + def clone(self, repository: str, dest: Path) -> str: self._check_parameter(repository) - - return self.run("clone", "--recurse-submodules", "--", repository, str(dest)) - - def checkout(self, rev, folder=None): # type: (str, Optional[Path]) -> str + cmd = [ + "clone", + "--filter=blob:none", + "--recurse-submodules", + "--", + repository, + str(dest), + ] + # Blobless clones introduced in Git 2.17 + if self.version < (2, 17): + cmd.remove("--filter=blob:none") + return self.run(*cmd) + + def checkout(self, rev: str, folder: Path | None = None) -> str: args = [] if folder is None and self._work_dir: folder = self._work_dir @@ -276,23 +292,15 @@ def checkout(self, rev, folder=None): # type: (str, Optional[Path]) -> str self._check_parameter(rev) - args += ["checkout", rev] + args += ["checkout", "--recurse-submodules", rev] return self.run(*args) - def rev_parse(self, rev, folder=None): # type: (str, Optional[Path]) -> str + def rev_parse(self, rev: str, folder: Path | None = None) -> str: args = [] if folder is None and self._work_dir: folder = self._work_dir - if folder: - args += [ - "--git-dir", - (folder / ".git").as_posix(), - "--work-tree", - folder.as_posix(), - ] - self._check_parameter(rev) # We need "^0" (an alternative to "^{commit}") to ensure that the @@ -305,9 +313,17 @@ def rev_parse(self, rev, folder=None): # type: (str, Optional[Path]) -> str # they should not be escaped. args += ["rev-parse", rev + "^0"] - return self.run(*args) + return self.run(*args, folder=folder) - def get_ignored_files(self, folder=None): # type: (Optional[Path]) -> list + def get_current_branch(self, folder: Path | None = None) -> str: + if folder is None and self._work_dir: + folder = self._work_dir + + output = self.run("symbolic-ref", "--short", "HEAD", folder=folder) + + return output.strip() + + def get_ignored_files(self, folder: Path | None = None) -> list[str]: args = [] if folder is None and self._work_dir: folder = self._work_dir @@ -325,7 +341,7 @@ def get_ignored_files(self, folder=None): # type: (Optional[Path]) -> list return output.strip().split("\n") - def remote_urls(self, folder=None): # type: (Optional[Path]) -> dict + def remote_urls(self, folder: Path | None = None) -> dict[str, str]: output = self.run( "config", "--get-regexp", r"remote\..*\.url", folder=folder ).strip() @@ -337,12 +353,12 @@ def remote_urls(self, folder=None): # type: (Optional[Path]) -> dict return urls - def remote_url(self, folder=None): # type: (Optional[Path]) -> str + def remote_url(self, folder: Path | None = None) -> str: urls = self.remote_urls(folder=folder) - return urls.get("remote.origin.url", urls[list(urls.keys())[0]]) + return urls.get("remote.origin.url", urls[next(iter(urls.keys()))]) - def run(self, *args, **kwargs): # type: (*Any, **Any) -> str + def run(self, *args: Any, **kwargs: Any) -> str: folder = kwargs.pop("folder", None) if folder: args = ( @@ -350,17 +366,20 @@ def run(self, *args, **kwargs): # type: (*Any, **Any) -> str (folder / ".git").as_posix(), "--work-tree", folder.as_posix(), - ) + args + *args, + ) - return decode( + return ( subprocess.check_output( - [executable()] + list(args), stderr=subprocess.STDOUT + [executable(), *list(args)], stderr=subprocess.STDOUT ) - ).strip() + .decode() + .strip() + ) - def _check_parameter(self, parameter): # type: (str) -> None + def _check_parameter(self, parameter: str) -> None: """ Checks a git parameter to avoid unwanted code execution. """ if parameter.strip().startswith("-"): - raise GitError("Invalid Git parameter: {}".format(parameter)) + raise GitError(f"Invalid Git parameter: {parameter}") diff --git a/conda_lock/_vendor/poetry/core/version/__init__.py b/conda_lock/_vendor/poetry/core/version/__init__.py index 62d0349f..e69de29b 100644 --- a/conda_lock/_vendor/poetry/core/version/__init__.py +++ b/conda_lock/_vendor/poetry/core/version/__init__.py @@ -1,45 +0,0 @@ -import operator - -from typing import Union - -from .exceptions import InvalidVersion -from .legacy_version import LegacyVersion -from .version import Version - - -OP_EQ = operator.eq -OP_LT = operator.lt -OP_LE = operator.le -OP_GT = operator.gt -OP_GE = operator.ge -OP_NE = operator.ne - -_trans_op = { - "=": OP_EQ, - "==": OP_EQ, - "<": OP_LT, - "<=": OP_LE, - ">": OP_GT, - ">=": OP_GE, - "!=": OP_NE, -} - - -def parse( - version, # type: str - strict=False, # type: bool -): # type:(...) -> Union[Version, LegacyVersion] - """ - Parse the given version string and return either a :class:`Version` object - or a LegacyVersion object depending on if the given version is - a valid PEP 440 version or a legacy version. - - If strict=True only PEP 440 versions will be accepted. - """ - try: - return Version(version) - except InvalidVersion: - if strict: - raise - - return LegacyVersion(version) diff --git a/conda_lock/_vendor/poetry/core/version/base.py b/conda_lock/_vendor/poetry/core/version/base.py deleted file mode 100644 index 826f8622..00000000 --- a/conda_lock/_vendor/poetry/core/version/base.py +++ /dev/null @@ -1,34 +0,0 @@ -from typing import Callable - - -class BaseVersion: - def __init__(self, version): # type: (str) -> None - self._version = str(version) - self._key = None - - def __hash__(self): # type: () -> int - return hash(self._key) - - def __lt__(self, other): # type: (BaseVersion) -> bool - return self._compare(other, lambda s, o: s < o) - - def __le__(self, other): # type: (BaseVersion) -> bool - return self._compare(other, lambda s, o: s <= o) - - def __eq__(self, other): # type: (BaseVersion) -> bool - return self._compare(other, lambda s, o: s == o) - - def __ge__(self, other): # type: (BaseVersion) -> bool - return self._compare(other, lambda s, o: s >= o) - - def __gt__(self, other): # type: (BaseVersion) -> bool - return self._compare(other, lambda s, o: s > o) - - def __ne__(self, other): # type: (BaseVersion) -> bool - return self._compare(other, lambda s, o: s != o) - - def _compare(self, other, method): # type: (BaseVersion, Callable) -> bool - if not isinstance(other, BaseVersion): - return NotImplemented - - return method(self._key, other._key) diff --git a/conda_lock/_vendor/poetry/core/version/exceptions.py b/conda_lock/_vendor/poetry/core/version/exceptions.py index 741b13ca..752fada6 100644 --- a/conda_lock/_vendor/poetry/core/version/exceptions.py +++ b/conda_lock/_vendor/poetry/core/version/exceptions.py @@ -1,3 +1,5 @@ -class InvalidVersion(ValueError): +from __future__ import annotations + +class InvalidVersion(ValueError): pass diff --git a/conda_lock/_vendor/poetry/core/version/grammars/__init__.py b/conda_lock/_vendor/poetry/core/version/grammars/__init__.py index e69de29b..caf504b4 100644 --- a/conda_lock/_vendor/poetry/core/version/grammars/__init__.py +++ b/conda_lock/_vendor/poetry/core/version/grammars/__init__.py @@ -0,0 +1,10 @@ +from __future__ import annotations + +from pathlib import Path + + +GRAMMAR_DIR = Path(__file__).parent + +GRAMMAR_PEP_508_CONSTRAINTS = GRAMMAR_DIR / "pep508.lark" + +GRAMMAR_PEP_508_MARKERS = GRAMMAR_DIR / "markers.lark" diff --git a/conda_lock/_vendor/poetry/core/version/grammars/markers.lark b/conda_lock/_vendor/poetry/core/version/grammars/markers.lark index 189ab02a..e0079c2a 100644 --- a/conda_lock/_vendor/poetry/core/version/grammars/markers.lark +++ b/conda_lock/_vendor/poetry/core/version/grammars/markers.lark @@ -15,7 +15,6 @@ MARKER_NAME: "implementation_version" | "platform_system" | "python_version" | "sys_platform" - | "sys_platform" | "os_name" | "os.name" | "sys.platform" diff --git a/conda_lock/_vendor/poetry/core/version/grammars/pep508.lark b/conda_lock/_vendor/poetry/core/version/grammars/pep508.lark index 0f32ff36..1b10d6ce 100644 --- a/conda_lock/_vendor/poetry/core/version/grammars/pep508.lark +++ b/conda_lock/_vendor/poetry/core/version/grammars/pep508.lark @@ -10,7 +10,7 @@ _single_version: LEGACY_VERSION_CONSTRAINT _url: _AT URI marker_spec: marker -NAME: /[a-zA-Z][a-zA-Z0-9-_.]*/ +NAME: /[a-zA-Z0-9][a-zA-Z0-9-_.]*/ FULL_NAME: NAME EXTRA: NAME VERSION_CONSTRAINT: /(~=|==|!=|<=|>=|<|>|===)((?:(?<====)\s*[^\s]*)|(?:(?<===|!=)\s*v?(?:[0-9]+!)?[0-9]+(?:\.[0-9]+)*(?:[-_.]?(a|b|c|rc|alpha|beta|pre|preview)[-_.]?[0-9]*)?(?:(?:-[0-9]+)|(?:[-_.]?(post|rev|r)[-_.]?[0-9]*))?(?:(?:[-_.]?dev[-_.]?[0-9]*)?(?:\+[a-z0-9]+(?:[-_.][a-z0-9]+)*)? # local|\.\*)?)|(?:(?<=~=)\s*v?(?:[0-9]+!)?[0-9]+(?:\.[0-9]+)+(?:[-_.]?(a|b|c|rc|alpha|beta|pre|preview)[-_.]?[0-9]*)?(?:(?:-[0-9]+)|(?:[-_.]?(post|rev|r)[-_.]?[0-9]*))?(?:[-_.]?dev[-_.]?[0-9]*)?)|(?:(? str +def format_python_constraint(constraint: VersionConstraint) -> str: """ This helper will help in transforming disjunctive constraint into proper constraint. """ if isinstance(constraint, Version): if constraint.precision >= 3: - return "=={}".format(str(constraint)) + return f"=={constraint}" # Transform 3.6 or 3 if constraint.precision == 2: # 3.6 - constraint = parse_constraint( - "~{}.{}".format(constraint.major, constraint.minor) - ) + constraint = parse_constraint(f"~{constraint.major}.{constraint.minor}") else: - constraint = parse_constraint("^{}.0".format(constraint.major)) + constraint = parse_constraint(f"^{constraint.major}.0") if not isinstance(constraint, VersionUnion): return str(constraint) diff --git a/conda_lock/_vendor/poetry/core/version/legacy_version.py b/conda_lock/_vendor/poetry/core/version/legacy_version.py deleted file mode 100644 index adaa53d7..00000000 --- a/conda_lock/_vendor/poetry/core/version/legacy_version.py +++ /dev/null @@ -1,92 +0,0 @@ -import re - -from typing import Tuple - -from .base import BaseVersion - - -class LegacyVersion(BaseVersion): - def __init__(self, version): # type: (str) -> None - self._version = str(version) - self._key = _legacy_cmpkey(self._version) - - def __str__(self): # type: () -> str - return self._version - - def __repr__(self): # type: () -> str - return "".format(repr(str(self))) - - @property - def public(self): # type: () -> str - return self._version - - @property - def base_version(self): # type: () -> str - return self._version - - @property - def local(self): # type: () -> None - return None - - @property - def is_prerelease(self): # type: () -> bool - return False - - @property - def is_postrelease(self): # type: () -> bool - return False - - -_legacy_version_component_re = re.compile(r"(\d+ | [a-z]+ | \.| -)", re.VERBOSE) - -_legacy_version_replacement_map = { - "pre": "c", - "preview": "c", - "-": "final-", - "rc": "c", - "dev": "@", -} - - -def _parse_version_parts(s): # type: (str) -> str - for part in _legacy_version_component_re.split(s): - part = _legacy_version_replacement_map.get(part, part) - - if not part or part == ".": - continue - - if part[:1] in "0123456789": - # pad for numeric comparison - yield part.zfill(8) - else: - yield "*" + part - - # ensure that alpha/beta/candidate are before final - yield "*final" - - -def _legacy_cmpkey(version): # type: (str) -> Tuple[int, Tuple[str]] - # We hardcode an epoch of -1 here. A PEP 440 version can only have a epoch - # greater than or equal to 0. This will effectively put the LegacyVersion, - # which uses the defacto standard originally implemented by setuptools, - # as before all PEP 440 versions. - epoch = -1 - - # This scheme is taken from pkg_resources.parse_version setuptools prior to - # it's adoption of the packaging library. - parts = [] - for part in _parse_version_parts(version.lower()): - if part.startswith("*"): - # remove "-" before a prerelease tag - if part < "*final": - while parts and parts[-1] == "*final-": - parts.pop() - - # remove trailing zeros from each series of numeric parts - while parts and parts[-1] == "00000000": - parts.pop() - - parts.append(part) - parts = tuple(parts) - - return epoch, parts diff --git a/conda_lock/_vendor/poetry/core/version/markers.py b/conda_lock/_vendor/poetry/core/version/markers.py index d432d9d4..d43b3620 100644 --- a/conda_lock/_vendor/poetry/core/version/markers.py +++ b/conda_lock/_vendor/poetry/core/version/markers.py @@ -1,24 +1,35 @@ -import os +from __future__ import annotations + +import functools +import itertools import re +from abc import ABC +from abc import abstractmethod from typing import TYPE_CHECKING from typing import Any -from typing import Dict -from typing import Iterator -from typing import List +from typing import ClassVar +from typing import Generic +from typing import TypeVar from typing import Union -from lark import Lark -from lark import Token -from lark import Tree +from packaging.utils import canonicalize_name + +from conda_lock._vendor.poetry.core.constraints.generic import BaseConstraint +from conda_lock._vendor.poetry.core.constraints.generic import Constraint +from conda_lock._vendor.poetry.core.constraints.generic import MultiConstraint +from conda_lock._vendor.poetry.core.constraints.generic import UnionConstraint +from conda_lock._vendor.poetry.core.constraints.version import VersionConstraint +from conda_lock._vendor.poetry.core.constraints.version.exceptions import ParseConstraintError +from conda_lock._vendor.poetry.core.version.grammars import GRAMMAR_PEP_508_MARKERS +from conda_lock._vendor.poetry.core.version.parser import Parser if TYPE_CHECKING: - from conda_lock._vendor.poetry.core.semver import VersionTypes # noqa + from collections.abc import Callable + from collections.abc import Iterable -MarkerTypes = Union[ - "AnyMarker", "EmptyMarker", "SingleMarker", "MultiMarker", "MarkerUnion" -] + from lark import Tree class InvalidMarker(ValueError): @@ -48,81 +59,104 @@ class UndefinedEnvironmentName(ValueError): "platform.python_implementation": "platform_python_implementation", "python_implementation": "platform_python_implementation", } -_parser = Lark.open( - os.path.join(os.path.dirname(__file__), "grammars", "markers.lark"), parser="lalr" -) +PYTHON_VERSION_MARKERS = {"python_version", "python_full_version"} -class BaseMarker(object): - def intersect(self, other): # type: (BaseMarker) -> BaseMarker - raise NotImplementedError() +# Parser: PEP 508 Environment Markers +_parser = Parser(GRAMMAR_PEP_508_MARKERS, "lalr") - def union(self, other): # type: (BaseMarker) -> BaseMarker - raise NotImplementedError() - def is_any(self): # type: () -> bool +class BaseMarker(ABC): + @property + def complexity(self) -> tuple[int, int]: + """ + first element: number of single markers, where SingleMarkerLike count as + actual number + second element: number of single markers, where SingleMarkerLike count as 1 + """ + return 1, 1 + + @abstractmethod + def intersect(self, other: BaseMarker) -> BaseMarker: + raise NotImplementedError + + @abstractmethod + def union(self, other: BaseMarker) -> BaseMarker: + raise NotImplementedError + + def is_any(self) -> bool: return False - def is_empty(self): # type: () -> bool + def is_empty(self) -> bool: return False - def validate(self, environment): # type: (Dict[str, Any]) -> bool - raise NotImplementedError() + @abstractmethod + def validate(self, environment: dict[str, Any] | None) -> bool: + raise NotImplementedError - def without_extras(self): # type: () -> BaseMarker - raise NotImplementedError() + @abstractmethod + def without_extras(self) -> BaseMarker: + raise NotImplementedError - def exclude(self, marker_name): # type: (str) -> BaseMarker - raise NotImplementedError() + @abstractmethod + def exclude(self, marker_name: str) -> BaseMarker: + raise NotImplementedError - def only(self, *marker_names): # type: (str) -> BaseMarker - raise NotImplementedError() + @abstractmethod + def only(self, *marker_names: str) -> BaseMarker: + raise NotImplementedError - def invert(self): # type: () -> BaseMarker - raise NotImplementedError() + @abstractmethod + def invert(self) -> BaseMarker: + raise NotImplementedError - def __repr__(self): # type: () -> str - return "<{} {}>".format(self.__class__.__name__, str(self)) + def __repr__(self) -> str: + return f"<{self.__class__.__name__} {self}>" + + @abstractmethod + def __hash__(self) -> int: + raise NotImplementedError + + @abstractmethod + def __eq__(self, other: object) -> bool: + raise NotImplementedError class AnyMarker(BaseMarker): - def intersect(self, other): # type: (MarkerTypes) -> MarkerTypes + def intersect(self, other: BaseMarker) -> BaseMarker: return other - def union(self, other): # type: (MarkerTypes) -> MarkerTypes + def union(self, other: BaseMarker) -> BaseMarker: return self - def is_any(self): # type: () -> bool + def is_any(self) -> bool: return True - def is_empty(self): # type: () -> bool - return False - - def validate(self, environment): # type: (Dict[str, Any]) -> bool + def validate(self, environment: dict[str, Any] | None) -> bool: return True - def without_extras(self): # type: () -> MarkerTypes + def without_extras(self) -> BaseMarker: return self - def exclude(self, marker_name): # type: (str) -> MarkerTypes + def exclude(self, marker_name: str) -> BaseMarker: return self - def only(self, *marker_names): # type: (*str) -> MarkerTypes + def only(self, *marker_names: str) -> BaseMarker: return self - def invert(self): # type: () -> EmptyMarker + def invert(self) -> EmptyMarker: return EmptyMarker() - def __str__(self): # type: () -> str + def __str__(self) -> str: return "" - def __repr__(self): # type: () -> str + def __repr__(self) -> str: return "" - def __hash__(self): # type: () -> int - return hash(("", "")) + def __hash__(self) -> int: + return hash("any") - def __eq__(self, other): # type: (MarkerTypes) -> bool + def __eq__(self, other: object) -> bool: if not isinstance(other, BaseMarker): return NotImplemented @@ -130,86 +164,195 @@ def __eq__(self, other): # type: (MarkerTypes) -> bool class EmptyMarker(BaseMarker): - def intersect(self, other): # type: (MarkerTypes) -> MarkerTypes + def intersect(self, other: BaseMarker) -> BaseMarker: return self - def union(self, other): # type: (MarkerTypes) -> MarkerTypes + def union(self, other: BaseMarker) -> BaseMarker: return other - def is_any(self): # type: () -> bool - return False - - def is_empty(self): # type: () -> bool + def is_empty(self) -> bool: return True - def validate(self, environment): # type: (Dict[str, Any]) -> bool + def validate(self, environment: dict[str, Any] | None) -> bool: return False - def without_extras(self): # type: () -> BaseMarker + def without_extras(self) -> BaseMarker: return self - def exclude(self, marker_name): # type: (str) -> EmptyMarker + def exclude(self, marker_name: str) -> EmptyMarker: return self - def only(self, *marker_names): # type: (*str) -> EmptyMarker + def only(self, *marker_names: str) -> BaseMarker: return self - def invert(self): # type: () -> AnyMarker + def invert(self) -> AnyMarker: return AnyMarker() - def __str__(self): # type: () -> str + def __str__(self) -> str: return "" - def __repr__(self): # type: () -> str + def __repr__(self) -> str: return "" - def __hash__(self): # type: () -> int - return hash(("", "")) + def __hash__(self) -> int: + return hash("empty") - def __eq__(self, other): # type: (MarkerTypes) -> bool + def __eq__(self, other: object) -> bool: if not isinstance(other, BaseMarker): return NotImplemented return isinstance(other, EmptyMarker) -class SingleMarker(BaseMarker): +SingleMarkerConstraint = TypeVar( + "SingleMarkerConstraint", bound=Union[BaseConstraint, VersionConstraint] +) + + +class SingleMarkerLike(BaseMarker, ABC, Generic[SingleMarkerConstraint]): + def __init__(self, name: str, constraint: SingleMarkerConstraint) -> None: + from conda_lock._vendor.poetry.core.constraints.generic import ( + parse_constraint as parse_generic_constraint, + ) + from conda_lock._vendor.poetry.core.constraints.version import ( + parse_constraint as parse_version_constraint, + ) + + self._name = ALIASES.get(name, name) + self._constraint = constraint + self._parser: Callable[[str], BaseConstraint | VersionConstraint] + if isinstance(constraint, VersionConstraint): + self._parser = parse_version_constraint + else: + self._parser = parse_generic_constraint + + @property + def name(self) -> str: + return self._name + + @property + def constraint(self) -> SingleMarkerConstraint: + return self._constraint + + @property + def _key(self) -> tuple[object, ...]: + return self._name, self._constraint + + def validate(self, environment: dict[str, Any] | None) -> bool: + if environment is None: + return True + + if self._name not in environment: + return True + + # "extra" is special because it can have multiple values at the same time. + # "extra == 'a'" will be true if "a" is one of the active extras. + # "extra != 'a'" will be true if "a" is not one of the active extras. + # Further, extra names are normalized for comparison. + if self._name == "extra": + extras = environment["extra"] + if isinstance(extras, str): + extras = {extras} + extras = {canonicalize_name(extra) for extra in extras} + assert isinstance(self._constraint, Constraint) + normalized_value = canonicalize_name(self._constraint.value) + if self._constraint.operator == "==": + return normalized_value in extras + assert self._constraint.operator == "!=" + return normalized_value not in extras + + # The type of constraint returned by the parser matches our constraint: either + # both are BaseConstraint or both are VersionConstraint. But it's hard for mypy + # to know that. + constraint = self._parser(environment[self._name]) + return self._constraint.allows(constraint) # type: ignore[arg-type] + + def without_extras(self) -> BaseMarker: + return self.exclude("extra") + + def exclude(self, marker_name: str) -> BaseMarker: + if self.name == marker_name: + return AnyMarker() + + return self + + def only(self, *marker_names: str) -> BaseMarker: + if self.name not in marker_names: + return AnyMarker() + + return self + + def intersect(self, other: BaseMarker) -> BaseMarker: + if isinstance(other, SingleMarkerLike): + merged = _merge_single_markers(self, other, MultiMarker) + if merged is not None: + return merged + + return MultiMarker(self, other) + + return other.intersect(self) + + def union(self, other: BaseMarker) -> BaseMarker: + if isinstance(other, SingleMarkerLike): + merged = _merge_single_markers(self, other, MarkerUnion) + if merged is not None: + return merged + + return MarkerUnion(self, other) + + return other.union(self) + + def __eq__(self, other: object) -> bool: + if not isinstance(other, SingleMarkerLike): + return NotImplemented + + return self._key == other._key + + def __hash__(self) -> int: + return hash(self._key) + +class SingleMarker(SingleMarkerLike[Union[BaseConstraint, VersionConstraint]]): _CONSTRAINT_RE = re.compile(r"(?i)^(~=|!=|>=?|<=?|==?=?|in|not in)?\s*(.+)$") - _VERSION_LIKE_MARKER_NAME = { + VALUE_SEPARATOR_RE = re.compile("[ ,|]+") + _VERSION_LIKE_MARKER_NAME: ClassVar[set[str]] = { "python_version", "python_full_version", "platform_release", } def __init__( - self, name, constraint - ): # type: (str, Union[str, "VersionTypes"]) -> None - from conda_lock._vendor.poetry.core.packages.constraints import ( + self, name: str, constraint: str | BaseConstraint | VersionConstraint + ) -> None: + from conda_lock._vendor.poetry.core.constraints.generic import ( parse_constraint as parse_generic_constraint, ) - from conda_lock._vendor.poetry.core.semver import parse_constraint + from conda_lock._vendor.poetry.core.constraints.version import parse_marker_version_constraint - self._name = ALIASES.get(name, name) - self._constraint_string = str(constraint) + parsed_constraint: BaseConstraint | VersionConstraint + parser: Callable[[str], BaseConstraint | VersionConstraint] + original_constraint_string = constraint_string = str(constraint) # Extract operator and value - m = self._CONSTRAINT_RE.match(self._constraint_string) + m = self._CONSTRAINT_RE.match(constraint_string) + if m is None: + raise InvalidMarker(f"Invalid marker for '{name}': {constraint_string}") + self._operator = m.group(1) if self._operator is None: self._operator = "==" self._value = m.group(2) - self._parser = parse_generic_constraint + parser = parse_generic_constraint if name in self._VERSION_LIKE_MARKER_NAME: - self._parser = parse_constraint + parser = parse_marker_version_constraint if self._operator in {"in", "not in"}: versions = [] - for v in re.split("[ ,]+", self._value): + for v in self.VALUE_SEPARATOR_RE.split(self._value): split = v.split(".") - if len(split) in [1, 2]: + if len(split) in (1, 2): split.append("*") op = "" if self._operator == "in" else "!=" else: @@ -221,101 +364,37 @@ def __init__( if self._operator == "in": glue = " || " - self._constraint = self._parser(glue.join(versions)) - else: - self._constraint = self._parser(self._constraint_string) + constraint_string = glue.join(versions) else: # if we have a in/not in operator we split the constraint # into a union/multi-constraint of single constraint - constraint_string = self._constraint_string if self._operator in {"in", "not in"}: op, glue = ("==", " || ") if self._operator == "in" else ("!=", ", ") - values = re.split("[ ,]+", self._value) - constraint_string = glue.join( - ("{} {}".format(op, value) for value in values) - ) + values = self.VALUE_SEPARATOR_RE.split(self._value) + constraint_string = glue.join(f"{op} {value}" for value in values) - self._constraint = self._parser(constraint_string) + try: + parsed_constraint = parser(constraint_string) + except ParseConstraintError as e: + raise InvalidMarker( + f"Invalid marker for '{name}': {original_constraint_string}" + ) from e - @property - def name(self): # type: () -> str - return self._name - - @property - def constraint_string(self): # type: () -> str - if self._operator in {"in", "not in"}: - return "{} {}".format(self._operator, self._value) - - return self._constraint_string - - @property - def constraint(self): # type: () -> "VersionTypes" - return self._constraint + super().__init__(name, parsed_constraint) @property - def operator(self): # type: () -> str + def operator(self) -> str: return self._operator @property - def value(self): # type: () -> str + def value(self) -> str: return self._value - def intersect(self, other): # type: (MarkerTypes) -> MarkerTypes - if isinstance(other, SingleMarker): - if other.name != self.name: - return MultiMarker(self, other) - - if self == other: - return self - - if self._operator in {"in", "not in"} or other.operator in {"in", "not in"}: - return MultiMarker.of(self, other) - - new_constraint = self._constraint.intersect(other.constraint) - if new_constraint.is_empty(): - return EmptyMarker() - - if new_constraint == self._constraint or new_constraint == other.constraint: - return SingleMarker(self._name, new_constraint) - - return MultiMarker.of(self, other) - - return other.intersect(self) - - def union(self, other): # type: (MarkerTypes) -> MarkerTypes - if isinstance(other, SingleMarker): - if self == other: - return self - - return MarkerUnion.of(self, other) - - return other.union(self) - - def validate(self, environment): # type: (Dict[str, Any]) -> bool - if environment is None: - return True - - if self._name not in environment: - return True - - return self._constraint.allows(self._parser(environment[self._name])) - - def without_extras(self): # type: () -> MarkerTypes - return self.exclude("extra") - - def exclude(self, marker_name): # type: (str) -> MarkerTypes - if self.name == marker_name: - return AnyMarker() - - return self - - def only(self, *marker_names): # type: (*str) -> Union[SingleMarker, EmptyMarker] - if self.name not in marker_names: - return EmptyMarker() - - return self + @property + def _key(self) -> tuple[object, ...]: + return self._name, self._operator, self._value - def invert(self): # type: () -> MarkerTypes + def invert(self) -> BaseMarker: if self._operator in ("===", "=="): operator = "!=" elif self._operator == "!=": @@ -336,9 +415,9 @@ def invert(self): # type: () -> MarkerTypes # This one is more tricky to handle # since it's technically a multi marker # so the inverse will be a union of inverse - from conda_lock._vendor.poetry.core.semver import VersionRange + from conda_lock._vendor.poetry.core.constraints.version import VersionRangeConstraint - if not isinstance(self._constraint, VersionRange): + if not isinstance(self._constraint, VersionRangeConstraint): # The constraint must be a version range, otherwise # it's an internal error raise RuntimeError( @@ -346,136 +425,237 @@ def invert(self): # type: () -> MarkerTypes ) min_ = self._constraint.min - min_operator = ">=" if self._constraint.include_min else "<" + min_operator = ">=" if self._constraint.include_min else ">" max_ = self._constraint.max max_operator = "<=" if self._constraint.include_max else "<" - return MultiMarker.of( - SingleMarker(self._name, "{} {}".format(min_operator, min_)), - SingleMarker(self._name, "{} {}".format(max_operator, max_)), + return MultiMarker( + SingleMarker(self._name, f"{min_operator} {min_}"), + SingleMarker(self._name, f"{max_operator} {max_}"), ).invert() else: # We should never go there - raise RuntimeError("Invalid marker operator '{}'".format(self._operator)) + raise RuntimeError(f"Invalid marker operator '{self._operator}'") - return parse_marker("{} {} '{}'".format(self._name, operator, self._value)) + return parse_marker(f"{self._name} {operator} '{self._value}'") - def __eq__(self, other): # type: (MarkerTypes) -> bool + def __eq__(self, other: object) -> bool: if not isinstance(other, SingleMarker): - return False + return NotImplemented + + return self._key == other._key + + def __hash__(self) -> int: + return hash(self._key) + + def __str__(self) -> str: + return f'{self._name} {self._operator} "{self._value}"' + + +class AtomicMultiMarker(SingleMarkerLike[MultiConstraint]): + def __init__(self, name: str, constraint: MultiConstraint) -> None: + assert all(c.operator == "!=" for c in constraint.constraints) + super().__init__(name, constraint) + self._values: list[str] = [] + + @property + def complexity(self) -> tuple[int, int]: + return len(self._constraint.constraints), 1 + + def invert(self) -> BaseMarker: + return AtomicMarkerUnion(self._name, self._constraint.invert()) + + def expand(self) -> MultiMarker: + return MultiMarker( + *(SingleMarker(self._name, c) for c in self._constraint.constraints) + ) + + def __str__(self) -> str: + return " and ".join( + f'{self._name} != "{c.value}"' for c in self._constraint.constraints + ) - return self._name == other.name and self._constraint == other.constraint - def __hash__(self): # type: () -> int - return hash((self._name, self._constraint_string)) +class AtomicMarkerUnion(SingleMarkerLike[UnionConstraint]): + def __init__(self, name: str, constraint: UnionConstraint) -> None: + assert all( + isinstance(c, Constraint) and c.operator == "==" + for c in constraint.constraints + ) + super().__init__(name, constraint) + + @property + def complexity(self) -> tuple[int, int]: + return len(self._constraint.constraints), 1 + + def invert(self) -> BaseMarker: + return AtomicMultiMarker(self._name, self._constraint.invert()) - def __str__(self): # type: () -> str - return '{} {} "{}"'.format(self._name, self._operator, self._value) + def expand(self) -> MarkerUnion: + return MarkerUnion( + *(SingleMarker(self._name, c) for c in self._constraint.constraints) + ) + + def __str__(self) -> str: + # In __init__ we've made sure that we have a UnionConstraint that + # contains only elements of type Constraint (instead of BaseConstraint) + # but mypy can't see that. + return " or ".join( + f'{self._name} == "{c.value}"' # type: ignore[attr-defined] + for c in self._constraint.constraints + ) def _flatten_markers( - markers, flatten_class -): # type: (Iterator[Union[MarkerUnion, MultiMarker]], Any) -> List[MarkerTypes] + markers: Iterable[BaseMarker], + flatten_class: type[MarkerUnion | MultiMarker], +) -> list[BaseMarker]: flattened = [] for marker in markers: if isinstance(marker, flatten_class): - flattened += _flatten_markers(marker.markers, flatten_class) - else: + for _marker in _flatten_markers( + marker.markers, # type: ignore[attr-defined] + flatten_class, + ): + if _marker not in flattened: + flattened.append(_marker) + + elif marker not in flattened: flattened.append(marker) return flattened class MultiMarker(BaseMarker): - def __init__(self, *markers): # type: (*MarkerTypes) -> None - self._markers = [] + def __init__(self, *markers: BaseMarker) -> None: + self._markers = tuple(_flatten_markers(markers, MultiMarker)) - markers = _flatten_markers(markers, MultiMarker) + @property + def markers(self) -> tuple[BaseMarker, ...]: + return self._markers - for m in markers: - self._markers.append(m) + @property + def complexity(self) -> tuple[int, int]: + return tuple(sum(c) for c in zip(*(m.complexity for m in self._markers))) @classmethod - def of(cls, *markers): # type: (*MarkerTypes) -> MarkerTypes - new_markers = [] - markers = _flatten_markers(markers, MultiMarker) - - for marker in markers: - if marker in new_markers: - continue + def of(cls, *markers: BaseMarker) -> BaseMarker: + new_markers = _flatten_markers(markers, MultiMarker) + old_markers: list[BaseMarker] = [] + + while old_markers != new_markers: + old_markers = new_markers + new_markers = [] + for marker in old_markers: + if marker in new_markers: + continue - if marker.is_any(): - continue + if marker.is_any(): + continue - if isinstance(marker, SingleMarker): intersected = False for i, mark in enumerate(new_markers): - if ( - not isinstance(mark, SingleMarker) - or isinstance(mark, SingleMarker) - and mark.name != marker.name - ): - continue - - intersection = mark.constraint.intersect(marker.constraint) - if intersection == mark.constraint: - intersected = True - elif intersection == marker.constraint: - new_markers[i] = marker - intersected = True - elif intersection.is_empty(): - return EmptyMarker() + # If we have a SingleMarker then with any luck after intersection + # it'll become another SingleMarker. + if isinstance(mark, SingleMarkerLike): + new_marker = mark.intersect(marker) + if new_marker.is_empty(): + return EmptyMarker() + + if isinstance(new_marker, SingleMarkerLike): + new_markers[i] = new_marker + intersected = True + break + + # If we have a MarkerUnion then we can look for the simplifications + # implemented in intersect_simplify(). + elif isinstance(mark, MarkerUnion): + intersection = mark.intersect_simplify(marker) + if intersection is not None: + new_markers[i] = intersection + intersected = True + break if intersected: + # flatten again because intersect_simplify may return a multi + new_markers = _flatten_markers(new_markers, MultiMarker) continue - new_markers.append(marker) + new_markers.append(marker) - if any(m.is_empty() for m in new_markers) or not new_markers: + if any(m.is_empty() for m in new_markers): return EmptyMarker() - if len(new_markers) == 1 and new_markers[0].is_any(): + if not new_markers: return AnyMarker() + if len(new_markers) == 1: + return new_markers[0] + return MultiMarker(*new_markers) - @property - def markers(self): # type: () -> List[MarkerTypes] - return self._markers + def intersect(self, other: BaseMarker) -> BaseMarker: + return intersection(self, other) + + def union(self, other: BaseMarker) -> BaseMarker: + return union(self, other) + + def union_simplify(self, other: BaseMarker) -> BaseMarker | None: + """ + Finds a couple of easy simplifications for union on MultiMarkers: - def intersect(self, other): # type: (MarkerTypes) -> MarkerTypes - if other.is_any(): - return self + - union with any marker that appears as part of the multi is just that + marker - if other.is_empty(): + - union between two multimarkers where one is contained by the other is just + the larger of the two + + - union between two multimarkers where there are some common markers + and the union of unique markers is a single marker + """ + if other in self._markers: return other - new_markers = self._markers + [other] + if isinstance(other, MultiMarker): + our_markers = set(self.markers) + their_markers = set(other.markers) - return MultiMarker.of(*new_markers) + if our_markers.issubset(their_markers): + return self - def union(self, other): # type: (MarkerTypes) -> MarkerTypes - if isinstance(other, (SingleMarker, MultiMarker)): - return MarkerUnion.of(self, other) + if their_markers.issubset(our_markers): + return other - return other.union(self) + shared_markers = our_markers.intersection(their_markers) + if not shared_markers: + return None - def validate(self, environment): # type: (Dict[str, Any]) -> bool - for m in self._markers: - if not m.validate(environment): - return False + unique_markers = our_markers - their_markers + other_unique_markers = their_markers - our_markers + unique_union = MultiMarker(*unique_markers).union( + MultiMarker(*other_unique_markers) + ) + if isinstance(unique_union, (SingleMarkerLike, AnyMarker)): + # Use list instead of set for deterministic order. + common_markers = [ + marker for marker in self.markers if marker in shared_markers + ] + return unique_union.intersect(MultiMarker(*common_markers)) - return True + return None + + def validate(self, environment: dict[str, Any] | None) -> bool: + return all(m.validate(environment) for m in self._markers) - def without_extras(self): # type: () -> MarkerTypes + def without_extras(self) -> BaseMarker: return self.exclude("extra") - def exclude(self, marker_name): # type: (str) -> MarkerTypes + def exclude(self, marker_name: str) -> BaseMarker: new_markers = [] for m in self._markers: - if isinstance(m, SingleMarker) and m.name == marker_name: + if isinstance(m, SingleMarkerLike) and m.name == marker_name: # The marker is not relevant since it must be excluded continue @@ -486,216 +666,197 @@ def exclude(self, marker_name): # type: (str) -> MarkerTypes return self.of(*new_markers) - def only(self, *marker_names): # type: (*str) -> MarkerTypes - new_markers = [] + def only(self, *marker_names: str) -> BaseMarker: + return self.of(*(m.only(*marker_names) for m in self._markers)) - for m in self._markers: - if isinstance(m, SingleMarker) and m.name not in marker_names: - # The marker is not relevant since it's not one we want - continue - - marker = m.only(*marker_names) - - if not marker.is_empty(): - new_markers.append(marker) - - return self.of(*new_markers) - - def invert(self): # type: () -> MarkerTypes + def invert(self) -> BaseMarker: markers = [marker.invert() for marker in self._markers] - return MarkerUnion.of(*markers) + return MarkerUnion(*markers) - def __eq__(self, other): # type: (MarkerTypes) -> bool + def __eq__(self, other: object) -> bool: if not isinstance(other, MultiMarker): return False - return set(self._markers) == set(other.markers) - - def __hash__(self): # type: () -> int - h = hash("multi") - for m in self._markers: - h |= hash(m) + return self._markers == other.markers - return h + def __hash__(self) -> int: + return hash(("multi", *self._markers)) - def __str__(self): # type: () -> str + def __str__(self) -> str: elements = [] for m in self._markers: - if isinstance(m, SingleMarker): - elements.append(str(m)) - elif isinstance(m, MultiMarker): + if isinstance(m, (SingleMarker, MultiMarker)): elements.append(str(m)) else: - elements.append("({})".format(str(m))) + elements.append(f"({m})") return " and ".join(elements) class MarkerUnion(BaseMarker): - def __init__(self, *markers): # type: (*MarkerTypes) -> None - self._markers = list(markers) + def __init__(self, *markers: BaseMarker) -> None: + self._markers = tuple(_flatten_markers(markers, MarkerUnion)) @property - def markers(self): # type: () -> List[MarkerTypes] + def markers(self) -> tuple[BaseMarker, ...]: return self._markers - @classmethod - def of(cls, *markers): # type: (*BaseMarker) -> MarkerTypes - flattened_markers = _flatten_markers(markers, MarkerUnion) - - markers = [] - for marker in flattened_markers: - if marker in markers: - continue + @property + def complexity(self) -> tuple[int, int]: + return tuple(sum(c) for c in zip(*(m.complexity for m in self._markers))) - if isinstance(marker, SingleMarker) and marker.name == "python_version": - intersected = False - for i, mark in enumerate(markers): - if ( - not isinstance(mark, SingleMarker) - or isinstance(mark, SingleMarker) - and mark.name != marker.name - ): - continue - - intersection = mark.constraint.union(marker.constraint) - if intersection == mark.constraint: - intersected = True - break - elif intersection == marker.constraint: - markers[i] = marker - intersected = True - break + @classmethod + def of(cls, *markers: BaseMarker) -> BaseMarker: + new_markers = _flatten_markers(markers, MarkerUnion) + old_markers: list[BaseMarker] = [] + + while old_markers != new_markers: + old_markers = new_markers + new_markers = [] + for marker in old_markers: + if marker in new_markers: + continue - if intersected: + if marker.is_empty(): continue - markers.append(marker) + included = False + for i, mark in enumerate(new_markers): + # If we have a SingleMarker then with any luck after union it'll + # become another SingleMarker. + if isinstance(mark, SingleMarkerLike): + new_marker = mark.union(marker) + if new_marker.is_any(): + return AnyMarker() + + if isinstance(new_marker, SingleMarkerLike): + new_markers[i] = new_marker + included = True + break + + # If we have a MultiMarker then we can look for the simplifications + # implemented in union_simplify(). + elif isinstance(mark, MultiMarker): + union = mark.union_simplify(marker) + if union is not None: + new_markers[i] = union + included = True + break + + if included: + # flatten again because union_simplify may return a union + new_markers = _flatten_markers(new_markers, MarkerUnion) + continue - if any(m.is_any() for m in markers): - return AnyMarker() + new_markers.append(marker) - if not markers: + if any(m.is_any() for m in new_markers): return AnyMarker() - if len(markers) == 1: - return markers[0] + if not new_markers: + return EmptyMarker() - return MarkerUnion(*markers) + if len(new_markers) == 1: + return new_markers[0] - def append(self, marker): # type: (MarkerTypes) -> None - if marker in self._markers: - return + return MarkerUnion(*new_markers) - self._markers.append(marker) + def intersect(self, other: BaseMarker) -> BaseMarker: + return intersection(self, other) - def intersect(self, other): # type: (MarkerTypes) -> MarkerTypes - if other.is_any(): - return self + def union(self, other: BaseMarker) -> BaseMarker: + return union(self, other) - if other.is_empty(): - return other + def intersect_simplify(self, other: BaseMarker) -> BaseMarker | None: + """ + Finds a couple of easy simplifications for intersection on MarkerUnions: - new_markers = [] - if isinstance(other, (SingleMarker, MultiMarker)): - for marker in self._markers: - intersection = marker.intersect(other) + - intersection with any marker that appears as part of the union is just + that marker - if not intersection.is_empty(): - new_markers.append(intersection) - elif isinstance(other, MarkerUnion): - for our_marker in self._markers: - for their_marker in other.markers: - intersection = our_marker.intersect(their_marker) + - intersection between two markerunions where one is contained by the other + is just the smaller of the two - if not intersection.is_empty(): - new_markers.append(intersection) + - intersection between two markerunions where there are some common markers + and the intersection of unique markers is not a single marker + """ + if other in self._markers: + return other - return MarkerUnion.of(*new_markers) + if isinstance(other, MarkerUnion): + our_markers = set(self.markers) + their_markers = set(other.markers) - def union(self, other): # type: (MarkerTypes) -> MarkerTypes - if other.is_any(): - return other + if our_markers.issubset(their_markers): + return self - if other.is_empty(): - return self + if their_markers.issubset(our_markers): + return other - new_markers = self._markers + [other] + shared_markers = our_markers.intersection(their_markers) + if not shared_markers: + return None - return MarkerUnion.of(*new_markers) + unique_markers = our_markers - their_markers + other_unique_markers = their_markers - our_markers + unique_intersection = MarkerUnion(*unique_markers).intersect( + MarkerUnion(*other_unique_markers) + ) + if isinstance(unique_intersection, (SingleMarkerLike, EmptyMarker)): + # Use list instead of set for deterministic order. + common_markers = [ + marker for marker in self.markers if marker in shared_markers + ] + return unique_intersection.union(MarkerUnion(*common_markers)) - def validate(self, environment): # type: (Dict[str, Any]) -> bool - for m in self._markers: - if m.validate(environment): - return True + return None - return False + def validate(self, environment: dict[str, Any] | None) -> bool: + return any(m.validate(environment) for m in self._markers) - def without_extras(self): # type: () -> MarkerTypes + def without_extras(self) -> BaseMarker: return self.exclude("extra") - def exclude(self, marker_name): # type: (str) -> MarkerTypes + def exclude(self, marker_name: str) -> BaseMarker: new_markers = [] for m in self._markers: - if isinstance(m, SingleMarker) and m.name == marker_name: + if isinstance(m, SingleMarkerLike) and m.name == marker_name: # The marker is not relevant since it must be excluded continue marker = m.exclude(marker_name) + new_markers.append(marker) - if not marker.is_empty(): - new_markers.append(marker) + if not new_markers: + # All markers were the excluded marker. + return AnyMarker() return self.of(*new_markers) - def only(self, *marker_names): # type: (*str) -> MarkerTypes - new_markers = [] - - for m in self._markers: - if isinstance(m, SingleMarker) and m.name not in marker_names: - # The marker is not relevant since it's not one we want - continue + def only(self, *marker_names: str) -> BaseMarker: + return self.of(*(m.only(*marker_names) for m in self._markers)) - marker = m.only(*marker_names) - - if not marker.is_empty(): - new_markers.append(marker) - - return self.of(*new_markers) - - def invert(self): # type: () -> MarkerTypes + def invert(self) -> BaseMarker: markers = [marker.invert() for marker in self._markers] + return MultiMarker(*markers) - return MultiMarker.of(*markers) - - def __eq__(self, other): # type: (MarkerTypes) -> bool + def __eq__(self, other: object) -> bool: if not isinstance(other, MarkerUnion): return False - return set(self._markers) == set(other.markers) + return self._markers == other.markers - def __hash__(self): # type: () -> int - h = hash("union") - for m in self._markers: - h |= hash(m) - - return h + def __hash__(self) -> int: + return hash(("union", *self._markers)) - def __str__(self): # type: () -> str - return " or ".join( - str(m) for m in self._markers if not m.is_any() and not m.is_empty() - ) + def __str__(self) -> str: + return " or ".join(str(m) for m in self._markers) - def is_any(self): # type: () -> bool - return any(m.is_any() for m in self._markers) - def is_empty(self): # type: () -> bool - return all(m.is_empty() for m in self._markers) - - -def parse_marker(marker): # type: (str) -> MarkerTypes +@functools.lru_cache(maxsize=None) +def parse_marker(marker: str) -> BaseMarker: if marker == "": return EmptyMarker() @@ -709,44 +870,201 @@ def parse_marker(marker): # type: (str) -> MarkerTypes return markers -def _compact_markers(tree_elements, tree_prefix=""): # type: (Tree, str) -> MarkerTypes - groups = [MultiMarker()] +def _compact_markers( + tree_elements: Tree, tree_prefix: str = "", top_level: bool = True +) -> BaseMarker: + from lark import Token + + # groups is a disjunction of conjunctions + # eg [[A, B], [C, D]] represents "(A and B) or (C and D)" + groups: list[list[BaseMarker]] = [[]] + for token in tree_elements: if isinstance(token, Token): - if token.type == "{}BOOL_OP".format(tree_prefix) and token.value == "or": - groups.append(MultiMarker()) + if token.type == f"{tree_prefix}BOOL_OP" and token.value == "or": + groups.append([]) continue if token.data == "marker": - groups[-1] = MultiMarker.of( - groups[-1], _compact_markers(token.children, tree_prefix=tree_prefix) + sub_marker = _compact_markers( + token.children, tree_prefix=tree_prefix, top_level=False ) - elif token.data == "{}item".format(tree_prefix): + groups[-1].append(sub_marker) + + elif token.data == f"{tree_prefix}item": name, op, value = token.children - if value.type == "{}MARKER_NAME".format(tree_prefix): - name, value, = value, name + if value.type == f"{tree_prefix}MARKER_NAME": + name, value = value, name value = value[1:-1] - groups[-1] = MultiMarker.of( - groups[-1], SingleMarker(name, "{}{}".format(op, value)) - ) - elif token.data == "{}BOOL_OP".format(tree_prefix): - if token.children[0] == "or": - groups.append(MultiMarker()) + sub_marker = SingleMarker(str(name), f"{op}{value}") + groups[-1].append(sub_marker) - for i, group in enumerate(reversed(groups)): - if group.is_empty(): - del groups[len(groups) - 1 - i] - continue + elif token.data == f"{tree_prefix}BOOL_OP" and token.children[0] == "or": + groups.append([]) - if isinstance(group, MultiMarker) and len(group.markers) == 1: - groups[len(groups) - 1 - i] = group.markers[0] + # Combine the groups. + sub_markers = [MultiMarker(*group) for group in groups] + + # This function calls itself recursively. In the inner calls we don't perform any + # simplification, instead doing it all only when we have the complete marker. + if not top_level: + return MarkerUnion(*sub_markers) + + return union(*sub_markers) - if not groups: - return EmptyMarker() - if len(groups) == 1: - return groups[0] +@functools.lru_cache(maxsize=None) +def cnf(marker: BaseMarker) -> BaseMarker: + """Transforms the marker into CNF (conjunctive normal form).""" + if isinstance(marker, MarkerUnion): + cnf_markers = [cnf(m) for m in marker.markers] + sub_marker_lists = [ + m.markers if isinstance(m, MultiMarker) else [m] for m in cnf_markers + ] + return MultiMarker.of( + *[MarkerUnion.of(*c) for c in itertools.product(*sub_marker_lists)] + ) + + if isinstance(marker, MultiMarker): + return MultiMarker.of(*[cnf(m) for m in marker.markers]) + + return marker + + +@functools.lru_cache(maxsize=None) +def dnf(marker: BaseMarker) -> BaseMarker: + """Transforms the marker into DNF (disjunctive normal form).""" + if isinstance(marker, MultiMarker): + dnf_markers = [dnf(m) for m in marker.markers] + sub_marker_lists = [ + m.markers if isinstance(m, MarkerUnion) else [m] for m in dnf_markers + ] + return MarkerUnion.of( + *[MultiMarker.of(*c) for c in itertools.product(*sub_marker_lists)] + ) - return MarkerUnion.of(*groups) + if isinstance(marker, MarkerUnion): + return MarkerUnion.of(*[dnf(m) for m in marker.markers]) + + return marker + + +def intersection(*markers: BaseMarker) -> BaseMarker: + return dnf(MultiMarker(*markers)) + + +def union(*markers: BaseMarker) -> BaseMarker: + # Sometimes normalization makes it more complicate instead of simple + # -> choose candidate with the least complexity + unnormalized: BaseMarker = MarkerUnion(*markers) + while ( + isinstance(unnormalized, (MultiMarker, MarkerUnion)) + and len(unnormalized.markers) == 1 + ): + unnormalized = unnormalized.markers[0] + + conjunction = cnf(unnormalized) + if not isinstance(conjunction, MultiMarker): + return conjunction + + disjunction = dnf(conjunction) + if not isinstance(disjunction, MarkerUnion): + return disjunction + + return min(disjunction, conjunction, unnormalized, key=lambda x: x.complexity) + + +@functools.lru_cache(maxsize=None) +def _merge_single_markers( + marker1: SingleMarkerLike[SingleMarkerConstraint], + marker2: SingleMarkerLike[SingleMarkerConstraint], + merge_class: type[MultiMarker | MarkerUnion], +) -> BaseMarker | None: + if {marker1.name, marker2.name} == PYTHON_VERSION_MARKERS: + assert isinstance(marker1, SingleMarker) + assert isinstance(marker2, SingleMarker) + return _merge_python_version_single_markers(marker1, marker2, merge_class) + + if marker1.name != marker2.name: + return None + + # "extra" is special because it can have multiple values at the same time. + # That's why we can only merge two "extra" markers if they have the same value. + if marker1.name == "extra": + assert isinstance(marker1, SingleMarker) + assert isinstance(marker2, SingleMarker) + if marker1.value != marker2.value: # type: ignore[attr-defined] + return None + + if merge_class == MultiMarker: + merge_method = marker1.constraint.intersect + else: + merge_method = marker1.constraint.union + # Markers with the same name have the same constraint type, + # but mypy can't see that. + result_constraint = merge_method(marker2.constraint) # type: ignore[arg-type] + + result_marker: BaseMarker | None = None + if result_constraint.is_empty(): + result_marker = EmptyMarker() + elif result_constraint.is_any(): + result_marker = AnyMarker() + elif result_constraint == marker1.constraint: + result_marker = marker1 + elif result_constraint == marker2.constraint: + result_marker = marker2 + elif isinstance(result_constraint, Constraint) or ( + isinstance(result_constraint, VersionConstraint) + and result_constraint.is_simple() + ): + result_marker = SingleMarker(marker1.name, result_constraint) + elif isinstance(result_constraint, UnionConstraint) and all( + isinstance(c, Constraint) and c.operator == "==" + for c in result_constraint.constraints + ): + result_marker = AtomicMarkerUnion(marker1.name, result_constraint) + elif isinstance(result_constraint, MultiConstraint) and all( + c.operator == "!=" for c in result_constraint.constraints + ): + result_marker = AtomicMultiMarker(marker1.name, result_constraint) + return result_marker + + +def _merge_python_version_single_markers( + marker1: SingleMarker, + marker2: SingleMarker, + merge_class: type[MultiMarker | MarkerUnion], +) -> BaseMarker | None: + from conda_lock._vendor.poetry.core.packages.utils.utils import get_python_constraint_from_marker + + if marker1.name == "python_version": + version_marker = marker1 + full_version_marker = marker2 + else: + version_marker = marker2 + full_version_marker = marker1 + + normalized_constraint = get_python_constraint_from_marker(version_marker) + normalized_marker = SingleMarker("python_full_version", normalized_constraint) + merged_marker = _merge_single_markers( + normalized_marker, full_version_marker, merge_class + ) + if merged_marker == normalized_marker: + # prefer original marker to avoid unnecessary changes + return version_marker + if merged_marker and isinstance(merged_marker, SingleMarker): + # We have to fix markers like 'python_full_version == "3.6"' + # to receive 'python_full_version == "3.6.0"'. + # It seems a bit hacky to convert to string and back to marker, + # but it's probably much simpler than to consider the different constraint + # classes (mostly VersonRangeConstraint, but VersionUnion for "!=") and + # since this conversion is only required for python_full_version markers + # it may be sufficient to handle it here. + marker_string = str(merged_marker) + precision = marker_string.count(".") + 1 + if precision < 3: + marker_string = marker_string[:-1] + ".0" * (3 - precision) + '"' + merged_marker = parse_marker(marker_string) + return merged_marker diff --git a/conda_lock/_vendor/poetry/core/version/parser.py b/conda_lock/_vendor/poetry/core/version/parser.py new file mode 100644 index 00000000..085cfa38 --- /dev/null +++ b/conda_lock/_vendor/poetry/core/version/parser.py @@ -0,0 +1,31 @@ +from __future__ import annotations + +from typing import TYPE_CHECKING +from typing import Any + + +if TYPE_CHECKING: + from pathlib import Path + + from lark import Lark + from lark import Tree + + +class Parser: + def __init__( + self, grammar: Path, parser: str = "lalr", debug: bool = False + ) -> None: + self._grammar = grammar + self._parser = parser + self._debug = debug + self._lark: Lark | None = None + + def parse(self, text: str, **kwargs: Any) -> Tree: + from lark import Lark + + if self._lark is None: + self._lark = Lark.open( + grammar_filename=self._grammar, parser=self._parser, debug=self._debug + ) + + return self._lark.parse(text=text, **kwargs) diff --git a/conda_lock/_vendor/poetry/core/version/pep440/__init__.py b/conda_lock/_vendor/poetry/core/version/pep440/__init__.py new file mode 100644 index 00000000..3d0691c1 --- /dev/null +++ b/conda_lock/_vendor/poetry/core/version/pep440/__init__.py @@ -0,0 +1,9 @@ +from __future__ import annotations + +from conda_lock._vendor.poetry.core.version.pep440.segments import LocalSegmentType +from conda_lock._vendor.poetry.core.version.pep440.segments import Release +from conda_lock._vendor.poetry.core.version.pep440.segments import ReleaseTag +from conda_lock._vendor.poetry.core.version.pep440.version import PEP440Version + + +__all__ = ("LocalSegmentType", "Release", "ReleaseTag", "PEP440Version") diff --git a/conda_lock/_vendor/poetry/core/version/pep440/parser.py b/conda_lock/_vendor/poetry/core/version/pep440/parser.py new file mode 100644 index 00000000..82f72322 --- /dev/null +++ b/conda_lock/_vendor/poetry/core/version/pep440/parser.py @@ -0,0 +1,86 @@ +from __future__ import annotations + +import functools +import re + +from typing import TYPE_CHECKING +from typing import TypeVar + +from packaging.version import VERSION_PATTERN + +from conda_lock._vendor.poetry.core.version.exceptions import InvalidVersion +from conda_lock._vendor.poetry.core.version.pep440 import Release +from conda_lock._vendor.poetry.core.version.pep440 import ReleaseTag + + +if TYPE_CHECKING: + from conda_lock._vendor.poetry.core.version.pep440 import LocalSegmentType + from conda_lock._vendor.poetry.core.version.pep440.version import PEP440Version + +T = TypeVar("T", bound="PEP440Version") + + +class PEP440Parser: + _regex = re.compile(r"^\s*" + VERSION_PATTERN + r"\s*$", re.VERBOSE | re.IGNORECASE) + _local_version_separators = re.compile(r"[._-]") + + @classmethod + def _get_release(cls, match: re.Match[str] | None) -> Release: + if not match or match.group("release") is None: + return Release(0) + return Release.from_parts(*(int(i) for i in match.group("release").split("."))) + + @classmethod + def _get_prerelease(cls, match: re.Match[str] | None) -> ReleaseTag | None: + if not match or match.group("pre") is None: + return None + return ReleaseTag(match.group("pre_l"), int(match.group("pre_n") or 0)) + + @classmethod + def _get_postrelease(cls, match: re.Match[str] | None) -> ReleaseTag | None: + if not match or match.group("post") is None: + return None + + return ReleaseTag( + match.group("post_l") or "post", + int(match.group("post_n1") or match.group("post_n2") or 0), + ) + + @classmethod + def _get_devrelease(cls, match: re.Match[str] | None) -> ReleaseTag | None: + if not match or match.group("dev") is None: + return None + return ReleaseTag(match.group("dev_l"), int(match.group("dev_n") or 0)) + + @classmethod + def _get_local(cls, match: re.Match[str] | None) -> LocalSegmentType | None: + if not match or match.group("local") is None: + return None + + return tuple( + part.lower() + for part in cls._local_version_separators.split(match.group("local")) + ) + + @classmethod + @functools.lru_cache(maxsize=None) + def parse(cls, value: str, version_class: type[T]) -> T: + match = cls._regex.search(value) if value else None + if not match: + raise InvalidVersion(f"Invalid PEP 440 version: '{value}'") + + return version_class( + epoch=int(match.group("epoch")) if match.group("epoch") else 0, + release=cls._get_release(match), + pre=cls._get_prerelease(match), + post=cls._get_postrelease(match), + dev=cls._get_devrelease(match), + local=cls._get_local(match), + text=value, + ) + + +def parse_pep440(value: str, version_class: type[T]) -> T: + return PEP440Parser.parse( # type: ignore[no-any-return] + value, version_class # type: ignore[arg-type] + ) diff --git a/conda_lock/_vendor/poetry/core/version/pep440/segments.py b/conda_lock/_vendor/poetry/core/version/pep440/segments.py new file mode 100644 index 00000000..79234882 --- /dev/null +++ b/conda_lock/_vendor/poetry/core/version/pep440/segments.py @@ -0,0 +1,172 @@ +from __future__ import annotations + +import dataclasses + +from typing import Optional +from typing import Sequence +from typing import Tuple +from typing import Union + + +# Release phase IDs according to PEP440 +RELEASE_PHASE_ID_ALPHA = "a" +RELEASE_PHASE_ID_BETA = "b" +RELEASE_PHASE_ID_RC = "rc" +RELEASE_PHASE_ID_POST = "post" +RELEASE_PHASE_ID_DEV = "dev" + +RELEASE_PHASE_SPELLINGS = { + RELEASE_PHASE_ID_ALPHA: {RELEASE_PHASE_ID_ALPHA, "alpha"}, + RELEASE_PHASE_ID_BETA: {RELEASE_PHASE_ID_BETA, "beta"}, + RELEASE_PHASE_ID_RC: {RELEASE_PHASE_ID_RC, "c", "pre", "preview"}, + RELEASE_PHASE_ID_POST: {RELEASE_PHASE_ID_POST, "r", "rev", "-"}, + RELEASE_PHASE_ID_DEV: {RELEASE_PHASE_ID_DEV}, +} +RELEASE_PHASE_NORMALIZATIONS = { + s: id_ for id_, spellings in RELEASE_PHASE_SPELLINGS.items() for s in spellings +} + + +@dataclasses.dataclass(frozen=True, eq=True, order=True) +class Release: + major: int = dataclasses.field(default=0, compare=False) + minor: int | None = dataclasses.field(default=None, compare=False) + patch: int | None = dataclasses.field(default=None, compare=False) + # some projects use non-semver versioning schemes, eg: 1.2.3.4 + extra: tuple[int, ...] = dataclasses.field(default=(), compare=False) + precision: int = dataclasses.field(init=False, compare=False) + text: str = dataclasses.field(init=False, compare=False) + _compare_key: tuple[int, ...] = dataclasses.field(init=False, compare=True) + + def __post_init__(self) -> None: + if self.extra: + if self.minor is None: + object.__setattr__(self, "minor", 0) + if self.patch is None: + object.__setattr__(self, "patch", 0) + parts = [ + str(part) + for part in (self.major, self.minor, self.patch, *self.extra) + if part is not None + ] + object.__setattr__(self, "text", ".".join(parts)) + object.__setattr__(self, "precision", len(parts)) + + compare_key = [self.major, self.minor or 0, self.patch or 0, *self.extra] + while compare_key and compare_key[-1] == 0: + del compare_key[-1] + object.__setattr__(self, "_compare_key", tuple(compare_key)) + + @classmethod + def from_parts(cls, *parts: int) -> Release: + if not parts: + return cls() + + return cls( + major=parts[0], + minor=parts[1] if len(parts) > 1 else None, + patch=parts[2] if len(parts) > 2 else None, + extra=parts[3:], + ) + + def to_parts(self) -> Sequence[int]: + return tuple( + part + for part in [self.major, self.minor, self.patch, *self.extra] + if part is not None + ) + + def to_string(self) -> str: + return self.text + + def next_major(self) -> Release: + return dataclasses.replace( + self, + major=self.major + 1, + minor=0 if self.minor is not None else None, + patch=0 if self.patch is not None else None, + extra=tuple(0 for _ in self.extra), + ) + + def next_minor(self) -> Release: + return dataclasses.replace( + self, + major=self.major, + minor=self.minor + 1 if self.minor is not None else 1, + patch=0 if self.patch is not None else None, + extra=tuple(0 for _ in self.extra), + ) + + def next_patch(self) -> Release: + return dataclasses.replace( + self, + major=self.major, + minor=self.minor if self.minor is not None else 0, + patch=self.patch + 1 if self.patch is not None else 1, + extra=tuple(0 for _ in self.extra), + ) + + def next(self) -> Release: + if self.precision == 1: + return self.next_major() + + if self.precision == 2: + return self.next_minor() + + if self.precision == 3: + return self.next_patch() + + return dataclasses.replace( + self, + major=self.major, + minor=self.minor, + patch=self.patch, + extra=(*self.extra[:-1], self.extra[-1] + 1), + ) + + +@dataclasses.dataclass(frozen=True, eq=True, order=True) +class ReleaseTag: + phase: str + number: int = dataclasses.field(default=0) + + def __post_init__(self) -> None: + object.__setattr__( + self, "phase", RELEASE_PHASE_NORMALIZATIONS.get(self.phase, self.phase) + ) + + def to_string(self, short: bool = False) -> str: + if short: + import warnings + + warnings.warn( + "Parameter 'short' has no effect and will be removed. " + "(Release tags are always normalized according to PEP 440 now.)", + DeprecationWarning, + stacklevel=2, + ) + + return f"{self.phase}{self.number}" + + def next(self) -> ReleaseTag: + return dataclasses.replace(self, phase=self.phase, number=self.number + 1) + + def next_phase(self) -> ReleaseTag | None: + if self.phase in [ + RELEASE_PHASE_ID_POST, + RELEASE_PHASE_ID_RC, + RELEASE_PHASE_ID_DEV, + ]: + return None + + if self.phase == RELEASE_PHASE_ID_ALPHA: + _phase = RELEASE_PHASE_ID_BETA + elif self.phase == RELEASE_PHASE_ID_BETA: + _phase = RELEASE_PHASE_ID_RC + else: + return None + + return self.__class__(phase=_phase, number=0) + + +LocalSegmentType = Optional[Union[str, int, Tuple[Union[str, int], ...]]] diff --git a/conda_lock/_vendor/poetry/core/version/pep440/version.py b/conda_lock/_vendor/poetry/core/version/pep440/version.py new file mode 100644 index 00000000..8d0386ba --- /dev/null +++ b/conda_lock/_vendor/poetry/core/version/pep440/version.py @@ -0,0 +1,328 @@ +from __future__ import annotations + +import dataclasses +import functools +import warnings + +from typing import TYPE_CHECKING +from typing import Any +from typing import Sequence +from typing import TypeVar + +from conda_lock._vendor.poetry.core.version.pep440.segments import RELEASE_PHASE_ID_ALPHA +from conda_lock._vendor.poetry.core.version.pep440.segments import RELEASE_PHASE_ID_DEV +from conda_lock._vendor.poetry.core.version.pep440.segments import RELEASE_PHASE_ID_POST +from conda_lock._vendor.poetry.core.version.pep440.segments import Release +from conda_lock._vendor.poetry.core.version.pep440.segments import ReleaseTag + + +if TYPE_CHECKING: + from conda_lock._vendor.poetry.core.version.pep440.segments import LocalSegmentType + + +@functools.total_ordering +class AlwaysSmaller: + def __lt__(self, other: object) -> bool: + return True + + +@functools.total_ordering +class AlwaysGreater: + def __gt__(self, other: object) -> bool: + return True + + +class Infinity(AlwaysGreater, int): + pass + + +class NegativeInfinity(AlwaysSmaller, int): + pass + + +T = TypeVar("T", bound="PEP440Version") + +# we use the phase "z" to ensure we always sort this after other phases +_INF_TAG = ReleaseTag("z", Infinity()) +# we use the phase "" to ensure we always sort this before other phases +_NEG_INF_TAG = ReleaseTag("", NegativeInfinity()) + + +@dataclasses.dataclass(frozen=True, eq=True, order=True) +class PEP440Version: + epoch: int = dataclasses.field(default=0, compare=False) + release: Release = dataclasses.field(default_factory=Release, compare=False) + pre: ReleaseTag | None = dataclasses.field(default=None, compare=False) + post: ReleaseTag | None = dataclasses.field(default=None, compare=False) + dev: ReleaseTag | None = dataclasses.field(default=None, compare=False) + local: LocalSegmentType = dataclasses.field(default=None, compare=False) + text: str = dataclasses.field(default="", compare=False) + _compare_key: tuple[ + int, Release, ReleaseTag, ReleaseTag, ReleaseTag, tuple[int | str, ...] + ] = dataclasses.field(init=False, compare=True) + + def __post_init__(self) -> None: + if self.local is not None and not isinstance(self.local, tuple): + object.__setattr__(self, "local", (self.local,)) + + if isinstance(self.release, tuple): + object.__setattr__(self, "release", Release(*self.release)) + + # we do this here to handle both None and tomlkit string values + object.__setattr__( + self, "text", self.to_string() if not self.text else str(self.text) + ) + + object.__setattr__(self, "_compare_key", self._make_compare_key()) + + def _make_compare_key( + self, + ) -> tuple[ + int, + Release, + ReleaseTag, + ReleaseTag, + ReleaseTag, + tuple[tuple[int, int | str], ...], + ]: + """ + This code is based on the implementation of packaging.version._cmpkey(..) + """ + # We need to "trick" the sorting algorithm to put 1.0.dev0 before 1.0a0. + # We'll do this by abusing the pre segment, but we _only_ want to do this + # if there is not a pre or a post segment. If we have one of those then + # the normal sorting rules will handle this case correctly. + if self.pre is None and self.post is None and self.dev is not None: + _pre = _NEG_INF_TAG + # Versions without a pre-release (except as noted above) should sort after + # those with one. + elif self.pre is None: + _pre = _INF_TAG + else: + _pre = self.pre + + # Versions without a post segment should sort before those with one. + _post = _NEG_INF_TAG if self.post is None else self.post + + # Versions without a development segment should sort after those with one. + _dev = _INF_TAG if self.dev is None else self.dev + + _local: tuple[tuple[int, int | str], ...] + if self.local is None: + # Versions without a local segment should sort before those with one. + _local = ((NegativeInfinity(), ""),) + else: + # Versions with a local segment need that segment parsed to implement + # the sorting rules in PEP440. + # - Alpha numeric segments sort before numeric segments + # - Alpha numeric segments sort lexicographically + # - Numeric segments sort numerically + # - Shorter versions sort before longer versions when the prefixes + # match exactly + assert isinstance(self.local, tuple) + # We convert strings that are integers so that they can be compared + _local = tuple( + (int(i), "") if str(i).isnumeric() else (NegativeInfinity(), i) + for i in self.local + ) + return self.epoch, self.release, _pre, _post, _dev, _local + + @property + def major(self) -> int: + return self.release.major + + @property + def minor(self) -> int | None: + return self.release.minor + + @property + def patch(self) -> int | None: + return self.release.patch + + @property + def non_semver_parts(self) -> Sequence[int]: + return self.release.extra + + @property + def parts(self) -> Sequence[int]: + return self.release.to_parts() + + def to_string(self, short: bool = False) -> str: + if short: + import warnings + + warnings.warn( + "Parameter 'short' has no effect and will be removed. " + "(Versions are always normalized according to PEP 440 now.)", + DeprecationWarning, + stacklevel=2, + ) + + version_string = self.release.to_string() + + if self.epoch: + # if epoch is non-zero we should include it + version_string = f"{self.epoch}!{version_string}" + + if self.pre: + version_string += self.pre.to_string() + + if self.post: + version_string = f"{version_string}.{self.post.to_string()}" + + if self.dev: + version_string = f"{version_string}.{self.dev.to_string()}" + + if self.local: + assert isinstance(self.local, tuple) + version_string += "+" + ".".join(map(str, self.local)) + + return version_string.lower() + + @classmethod + def parse(cls: type[T], value: str) -> T: + from conda_lock._vendor.poetry.core.version.pep440.parser import parse_pep440 + + return parse_pep440(value, cls) + + def is_prerelease(self) -> bool: + return self.pre is not None + + def is_postrelease(self) -> bool: + return self.post is not None + + def is_devrelease(self) -> bool: + return self.dev is not None + + def is_local(self) -> bool: + return self.local is not None + + def is_no_suffix_release(self) -> bool: + return not (self.pre or self.post or self.dev) + + def is_unstable(self) -> bool: + return self.is_prerelease() or self.is_devrelease() + + def is_stable(self) -> bool: + return not self.is_unstable() + + def _is_increment_required(self) -> bool: + return self.is_stable() or (not self.is_prerelease() and self.is_postrelease()) + + def next_major(self: T) -> T: + release = self.release + if self._is_increment_required() or Release(release.major, 0, 0) < release: + release = release.next_major() + return self.__class__(epoch=self.epoch, release=release) + + def next_minor(self: T) -> T: + release = self.release + if ( + self._is_increment_required() + or Release(release.major, release.minor, 0) < release + ): + release = release.next_minor() + return self.__class__(epoch=self.epoch, release=release) + + def next_patch(self: T) -> T: + release = self.release + if ( + self._is_increment_required() + or Release(release.major, release.minor, release.patch) < release + ): + release = release.next_patch() + return self.__class__(epoch=self.epoch, release=release) + + def next_stable(self: T) -> T: + release = self.release.next() if self.is_stable() else self.release + return self.__class__(epoch=self.epoch, release=release, local=self.local) + + def next_prerelease(self: T, next_phase: bool = False) -> T: + if self.is_stable(): + warnings.warn( + "Calling next_prerelease() on a stable release is deprecated for" + " its ambiguity. Use next_major(), next_minor(), etc. together with" + " first_prerelease()", + DeprecationWarning, + stacklevel=2, + ) + if self.is_prerelease(): + assert self.pre is not None + if not self.is_devrelease() or self.is_postrelease(): + pre = self.pre.next_phase() if next_phase else self.pre.next() + else: + pre = self.pre + else: + pre = ReleaseTag(RELEASE_PHASE_ID_ALPHA) + return self.__class__(epoch=self.epoch, release=self.release, pre=pre) + + def next_postrelease(self: T) -> T: + if self.is_postrelease(): + assert self.post is not None + post = self.post.next() if self.dev is None else self.post + else: + post = ReleaseTag(RELEASE_PHASE_ID_POST) + return self.__class__( + epoch=self.epoch, + release=self.release, + pre=self.pre, + post=post, + ) + + def next_devrelease(self: T) -> T: + if self.is_devrelease(): + assert self.dev is not None + dev = self.dev.next() + else: + warnings.warn( + "Calling next_devrelease() on a non dev release is deprecated for" + " its ambiguity. Use next_major(), next_minor(), etc. together with" + " first_devrelease()", + DeprecationWarning, + stacklevel=2, + ) + dev = ReleaseTag(RELEASE_PHASE_ID_DEV) + return self.__class__( + epoch=self.epoch, + release=self.release, + pre=self.pre, + post=self.post, + dev=dev, + ) + + def first_prerelease(self: T) -> T: + return self.__class__( + epoch=self.epoch, + release=self.release, + pre=ReleaseTag(RELEASE_PHASE_ID_ALPHA), + ) + + def first_devrelease(self: T) -> T: + return self.__class__( + epoch=self.epoch, + release=self.release, + pre=self.pre, + post=self.post, + dev=ReleaseTag(RELEASE_PHASE_ID_DEV), + ) + + def replace(self: T, **kwargs: Any) -> T: + return self.__class__(**{ + **{ + k: getattr(self, k) + for k in self.__dataclass_fields__ + if k not in ("_compare_key", "text") + }, # setup defaults with current values, excluding compare keys and text + **kwargs, # keys to replace + }) + + def without_local(self: T) -> T: + return self.replace(local=None) + + def without_postrelease(self: T) -> T: + if self.is_postrelease(): + return self.replace(post=None, dev=None) + return self + + def without_devrelease(self: T) -> T: + return self.replace(dev=None) diff --git a/conda_lock/_vendor/poetry/core/version/requirements.py b/conda_lock/_vendor/poetry/core/version/requirements.py index cdc7d016..2bf95dc8 100644 --- a/conda_lock/_vendor/poetry/core/version/requirements.py +++ b/conda_lock/_vendor/poetry/core/version/requirements.py @@ -1,26 +1,12 @@ -# This file is dual licensed under the terms of the Apache License, Version -# 2.0, and the BSD License. See the LICENSE file in the root of this repository -# for complete details. -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function +from __future__ import annotations -import os +import urllib.parse as urlparse -from lark import Lark -from lark import UnexpectedCharacters -from lark import UnexpectedToken - -from conda_lock._vendor.poetry.core.semver import parse_constraint -from conda_lock._vendor.poetry.core.semver.exceptions import ParseConstraintError - -from .markers import _compact_markers - - -try: - import urllib.parse as urlparse -except ImportError: - import urlparse +from conda_lock._vendor.poetry.core.constraints.version import parse_constraint +from conda_lock._vendor.poetry.core.constraints.version.exceptions import ParseConstraintError +from conda_lock._vendor.poetry.core.version.grammars import GRAMMAR_PEP_508_CONSTRAINTS +from conda_lock._vendor.poetry.core.version.markers import _compact_markers +from conda_lock._vendor.poetry.core.version.parser import Parser class InvalidRequirement(ValueError): @@ -29,12 +15,11 @@ class InvalidRequirement(ValueError): """ -_parser = Lark.open( - os.path.join(os.path.dirname(__file__), "grammars", "pep508.lark"), parser="lalr" -) +# Parser: PEP 508 Constraints +_parser = Parser(GRAMMAR_PEP_508_CONSTRAINTS, "lalr") -class Requirement(object): +class Requirement: """ Parse a requirement. @@ -43,17 +28,19 @@ class Requirement(object): string. """ - def __init__(self, requirement_string): # type: (str) -> None + def __init__(self, requirement_string: str) -> None: + from lark import UnexpectedCharacters + from lark import UnexpectedToken + try: parsed = _parser.parse(requirement_string) except (UnexpectedCharacters, UnexpectedToken) as e: raise InvalidRequirement( - "The requirement is invalid: Unexpected character at column {}\n\n{}".format( - e.column, e.get_context(requirement_string) - ) + "The requirement is invalid: Unexpected character at column" + f" {e.column}\n\n{e.get_context(requirement_string)}" ) - self.name = next(parsed.scan_values(lambda t: t.type == "NAME")).value + self.name: str = next(parsed.scan_values(lambda t: t.type == "NAME")).value url = next(parsed.scan_values(lambda t: t.type == "URI"), None) if url: @@ -62,14 +49,13 @@ def __init__(self, requirement_string): # type: (str) -> None if parsed_url.scheme == "file": if urlparse.urlunparse(parsed_url) != url: raise InvalidRequirement( - 'The requirement is invalid: invalid URL "{0}"'.format(url) + f'The requirement is invalid: invalid URL "{url}"' ) elif ( not (parsed_url.scheme and parsed_url.netloc) - or (not parsed_url.scheme and not parsed_url.netloc) ) and not parsed_url.path: raise InvalidRequirement( - 'The requirement is invalid: invalid URL "{0}"'.format(url) + f'The requirement is invalid: invalid URL "{url}"' ) self.url = url else: @@ -77,18 +63,13 @@ def __init__(self, requirement_string): # type: (str) -> None self.extras = [e.value for e in parsed.scan_values(lambda t: t.type == "EXTRA")] constraint = next(parsed.find_data("version_specification"), None) - if not constraint: - constraint = "*" - else: - constraint = ",".join(constraint.children) + constraint = ",".join(constraint.children) if constraint else "*" try: self.constraint = parse_constraint(constraint) except ParseConstraintError: raise InvalidRequirement( - 'The requirement is invalid: invalid version constraint "{}"'.format( - constraint - ) + f'The requirement is invalid: invalid version constraint "{constraint}"' ) self.pretty_constraint = constraint @@ -101,22 +82,23 @@ def __init__(self, requirement_string): # type: (str) -> None self.marker = marker - def __str__(self): # type: () -> str + def __str__(self) -> str: parts = [self.name] if self.extras: - parts.append("[{0}]".format(",".join(sorted(self.extras)))) + extras = ",".join(sorted(self.extras)) + parts.append(f"[{extras}]") if self.pretty_constraint: parts.append(self.pretty_constraint) if self.url: - parts.append("@ {0}".format(self.url)) + parts.append(f"@ {self.url}") if self.marker: - parts.append("; {0}".format(self.marker)) + parts.append(f"; {self.marker}") return "".join(parts) - def __repr__(self): # type: () -> str - return "".format(str(self)) + def __repr__(self) -> str: + return f"" diff --git a/conda_lock/_vendor/poetry/core/version/utils.py b/conda_lock/_vendor/poetry/core/version/utils.py deleted file mode 100644 index a81a9e7f..00000000 --- a/conda_lock/_vendor/poetry/core/version/utils.py +++ /dev/null @@ -1,65 +0,0 @@ -from typing import Any - - -class Infinity(object): - def __repr__(self): # type: () -> str - return "Infinity" - - def __hash__(self): # type: () -> int - return hash(repr(self)) - - def __lt__(self, other): # type: (Any) -> bool - return False - - def __le__(self, other): # type: (Any) -> bool - return False - - def __eq__(self, other): # type: (Any) -> bool - return isinstance(other, self.__class__) - - def __ne__(self, other): # type: (Any) -> bool - return not isinstance(other, self.__class__) - - def __gt__(self, other): # type: (Any) -> bool - return True - - def __ge__(self, other): # type: (Any) -> bool - return True - - def __neg__(self): # type: () -> NegativeInfinity - return NegativeInfinity - - -Infinity = Infinity() # type: ignore - - -class NegativeInfinity(object): - def __repr__(self): # type: () -> str - return "-Infinity" - - def __hash__(self): # type: () -> int - return hash(repr(self)) - - def __lt__(self, other): # type: (Any) -> bool - return True - - def __le__(self, other): # type: (Any) -> bool - return True - - def __eq__(self, other): # type: (Any) -> bool - return isinstance(other, self.__class__) - - def __ne__(self, other): # type: (Any) -> bool - return not isinstance(other, self.__class__) - - def __gt__(self, other): # type: (Any) -> bool - return False - - def __ge__(self, other): # type: (Any) -> bool - return False - - def __neg__(self): # type: () -> Infinity - return Infinity - - -NegativeInfinity = NegativeInfinity() # type: ignore diff --git a/conda_lock/_vendor/poetry/core/version/version.py b/conda_lock/_vendor/poetry/core/version/version.py deleted file mode 100644 index 0726d943..00000000 --- a/conda_lock/_vendor/poetry/core/version/version.py +++ /dev/null @@ -1,243 +0,0 @@ -import re - -from collections import namedtuple -from itertools import dropwhile -from typing import Any -from typing import Optional -from typing import Tuple -from typing import Union - -from .base import BaseVersion -from .exceptions import InvalidVersion -from .utils import Infinity -from .utils import NegativeInfinity - - -_Version = namedtuple("_Version", ["epoch", "release", "dev", "pre", "post", "local"]) - - -VERSION_PATTERN = re.compile( - r""" - ^ - v? - (?: - (?:(?P[0-9]+)!)? # epoch - (?P[0-9]+(?:\.[0-9]+)*) # release segment - (?P
                                          # pre-release
-            [-_.]?
-            (?P(a|b|c|rc|alpha|beta|pre|preview))
-            [-_.]?
-            (?P[0-9]+)?
-        )?
-        (?P                                         # post release
-            (?:-(?P[0-9]+))
-            |
-            (?:
-                [-_.]?
-                (?Ppost|rev|r)
-                [-_.]?
-                (?P[0-9]+)?
-            )
-        )?
-        (?P                                          # dev release
-            [-_.]?
-            (?Pdev)
-            [-_.]?
-            (?P[0-9]+)?
-        )?
-    )
-    (?:\+(?P[a-z0-9]+(?:[-_.][a-z0-9]+)*))?       # local version
-    $
-""",
-    re.IGNORECASE | re.VERBOSE,
-)
-
-
-class Version(BaseVersion):
-    def __init__(self, version):  # type: (str) -> None
-        # Validate the version and parse it into pieces
-        match = VERSION_PATTERN.match(version)
-        if not match:
-            raise InvalidVersion("Invalid version: '{0}'".format(version))
-
-        # Store the parsed out pieces of the version
-        self._version = _Version(
-            epoch=int(match.group("epoch")) if match.group("epoch") else 0,
-            release=tuple(int(i) for i in match.group("release").split(".")),
-            pre=_parse_letter_version(match.group("pre_l"), match.group("pre_n")),
-            post=_parse_letter_version(
-                match.group("post_l"), match.group("post_n1") or match.group("post_n2")
-            ),
-            dev=_parse_letter_version(match.group("dev_l"), match.group("dev_n")),
-            local=_parse_local_version(match.group("local")),
-        )
-
-        # Generate a key which will be used for sorting
-        self._key = _cmpkey(
-            self._version.epoch,
-            self._version.release,
-            self._version.pre,
-            self._version.post,
-            self._version.dev,
-            self._version.local,
-        )
-
-    def __repr__(self):  # type: () -> str
-        return "".format(repr(str(self)))
-
-    def __str__(self):  # type: () -> str
-        parts = []
-
-        # Epoch
-        if self._version.epoch != 0:
-            parts.append("{0}!".format(self._version.epoch))
-
-        # Release segment
-        parts.append(".".join(str(x) for x in self._version.release))
-
-        # Pre-release
-        if self._version.pre is not None:
-            parts.append("".join(str(x) for x in self._version.pre))
-
-        # Post-release
-        if self._version.post is not None:
-            parts.append(".post{0}".format(self._version.post[1]))
-
-        # Development release
-        if self._version.dev is not None:
-            parts.append(".dev{0}".format(self._version.dev[1]))
-
-        # Local version segment
-        if self._version.local is not None:
-            parts.append("+{0}".format(".".join(str(x) for x in self._version.local)))
-
-        return "".join(parts)
-
-    @property
-    def public(self):  # type: () -> str
-        return str(self).split("+", 1)[0]
-
-    @property
-    def base_version(self):  # type: () -> str
-        parts = []
-
-        # Epoch
-        if self._version.epoch != 0:
-            parts.append("{0}!".format(self._version.epoch))
-
-        # Release segment
-        parts.append(".".join(str(x) for x in self._version.release))
-
-        return "".join(parts)
-
-    @property
-    def local(self):  # type: () -> str
-        version_string = str(self)
-        if "+" in version_string:
-            return version_string.split("+", 1)[1]
-
-    @property
-    def is_prerelease(self):  # type: () -> bool
-        return bool(self._version.dev or self._version.pre)
-
-    @property
-    def is_postrelease(self):  # type: () -> bool
-        return bool(self._version.post)
-
-
-def _parse_letter_version(
-    letter, number
-):  # type: (str, Optional[str]) -> Tuple[str, int]
-    if letter:
-        # We consider there to be an implicit 0 in a pre-release if there is
-        # not a numeral associated with it.
-        if number is None:
-            number = 0
-
-        # We normalize any letters to their lower case form
-        letter = letter.lower()
-
-        # We consider some words to be alternate spellings of other words and
-        # in those cases we want to normalize the spellings to our preferred
-        # spelling.
-        if letter == "alpha":
-            letter = "a"
-        elif letter == "beta":
-            letter = "b"
-        elif letter in ["c", "pre", "preview"]:
-            letter = "rc"
-        elif letter in ["rev", "r"]:
-            letter = "post"
-
-        return letter, int(number)
-    if not letter and number:
-        # We assume if we are given a number, but we are not given a letter
-        # then this is using the implicit post release syntax (e.g. 1.0-1)
-        letter = "post"
-
-        return letter, int(number)
-
-
-_local_version_seperators = re.compile(r"[._-]")
-
-
-def _parse_local_version(local):  # type: (Optional[str]) -> Tuple[Union[str, int], ...]
-    """
-    Takes a string like abc.1.twelve and turns it into ("abc", 1, "twelve").
-    """
-    if local is not None:
-        return tuple(
-            part.lower() if not part.isdigit() else int(part)
-            for part in _local_version_seperators.split(local)
-        )
-
-
-def _cmpkey(
-    epoch,  # type: int
-    release,  # type: Optional[Tuple[int, ...]]
-    pre,  # type: Optional[Tuple[str, int]]
-    post,  # type: Optional[Tuple[str, int]]
-    dev,  # type: Optional[Tuple[str, int]]
-    local,  # type: Optional[Tuple[Union[str, int], ...]]
-):  # type: (...) -> Tuple[int, Tuple[int, ...], Union[Union[Infinity, NegativeInfinity, Tuple[str, int]], Any], Union[NegativeInfinity, Tuple[str, int]], Union[Union[Infinity, Tuple[str, int]], Any], Union[NegativeInfinity, Tuple[Union[Tuple[int, str], Tuple[NegativeInfinity, Union[str, int]]], ...]]]
-    # When we compare a release version, we want to compare it with all of the
-    # trailing zeros removed. So we'll use a reverse the list, drop all the now
-    # leading zeros until we come to something non zero, then take the rest
-    # re-reverse it back into the correct order and make it a tuple and use
-    # that for our sorting key.
-    release = tuple(reversed(list(dropwhile(lambda x: x == 0, reversed(release)))))
-
-    # We need to "trick" the sorting algorithm to put 1.0.dev0 before 1.0a0.
-    # We'll do this by abusing the pre segment, but we _only_ want to do this
-    # if there is not a pre or a post segment. If we have one of those then
-    # the normal sorting rules will handle this case correctly.
-    if pre is None and post is None and dev is not None:
-        pre = -Infinity
-
-    # Versions without a pre-release (except as noted above) should sort after
-    # those with one.
-    elif pre is None:
-        pre = Infinity
-
-    # Versions without a post segment should sort before those with one.
-    if post is None:
-        post = -Infinity
-
-    # Versions without a development segment should sort after those with one.
-    if dev is None:
-        dev = Infinity
-
-    if local is None:
-        # Versions without a local segment should sort before those with one.
-        local = -Infinity
-    else:
-        # Versions with a local segment need that segment parsed to implement
-        # the sorting rules in PEP440.
-        # - Alpha numeric segments sort before numeric segments
-        # - Alpha numeric segments sort lexicographically
-        # - Numeric segments sort numerically
-        # - Shorter versions sort before longer versions when the prefixes
-        #   match exactly
-        local = tuple((i, "") if isinstance(i, int) else (-Infinity, i) for i in local)
-
-    return epoch, release, pre, post, dev, local
diff --git a/conda_lock/_vendor/poetry/exceptions.py b/conda_lock/_vendor/poetry/exceptions.py
index 0bbaeb80..0d755667 100644
--- a/conda_lock/_vendor/poetry/exceptions.py
+++ b/conda_lock/_vendor/poetry/exceptions.py
@@ -1,8 +1,9 @@
-class PoetryException(Exception):
+from __future__ import annotations
+
 
+class PoetryException(Exception):
     pass
 
 
 class InvalidProjectFile(PoetryException):
-
     pass
diff --git a/conda_lock/_vendor/poetry/factory.py b/conda_lock/_vendor/poetry/factory.py
old mode 100755
new mode 100644
index 8c80d0ab..f958e7cd
--- a/conda_lock/_vendor/poetry/factory.py
+++ b/conda_lock/_vendor/poetry/factory.py
@@ -1,22 +1,42 @@
-from __future__ import absolute_import
-from __future__ import unicode_literals
+from __future__ import annotations
 
-from typing import Dict
-from typing import Optional
+import contextlib
+import logging
+import re
 
-from clikit.api.io.io import IO
+from typing import TYPE_CHECKING
+from typing import Any
+from typing import cast
 
+from conda_lock._vendor.cleo.io.null_io import NullIO
+from packaging.utils import canonicalize_name
 from conda_lock._vendor.poetry.core.factory import Factory as BaseFactory
-from conda_lock._vendor.poetry.core.toml.file import TOMLFile
+from conda_lock._vendor.poetry.core.packages.dependency_group import MAIN_GROUP
+from conda_lock._vendor.poetry.core.packages.project_package import ProjectPackage
 
-from .config.config import Config
-from .config.file_config_source import FileConfigSource
-from .io.null_io import NullIO
-from .locations import CONFIG_DIR
-from .packages.locker import Locker
-from .poetry import Poetry
-from .repositories.pypi_repository import PyPiRepository
-from .utils._compat import Path
+from conda_lock._vendor.poetry.config.config import Config
+from conda_lock._vendor.poetry.exceptions import PoetryException
+from conda_lock._vendor.poetry.json import validate_object
+from conda_lock._vendor.poetry.packages.locker import Locker
+from conda_lock._vendor.poetry.plugins.plugin import Plugin
+from conda_lock._vendor.poetry.plugins.plugin_manager import PluginManager
+from conda_lock._vendor.poetry.poetry import Poetry
+from conda_lock._vendor.poetry.toml.file import TOMLFile
+
+
+if TYPE_CHECKING:
+    from collections.abc import Iterable
+    from pathlib import Path
+
+    from conda_lock._vendor.cleo.io.io import IO
+    from conda_lock._vendor.poetry.core.packages.package import Package
+    from tomlkit.toml_document import TOMLDocument
+
+    from conda_lock._vendor.poetry.repositories import RepositoryPool
+    from conda_lock._vendor.poetry.repositories.http_repository import HTTPRepository
+    from conda_lock._vendor.poetry.utils.dependency_specification import DependencySpec
+
+logger = logging.getLogger(__name__)
 
 
 class Factory(BaseFactory):
@@ -25,27 +45,29 @@ class Factory(BaseFactory):
     """
 
     def create_poetry(
-        self, cwd=None, io=None
-    ):  # type: (Optional[Path], Optional[IO]) -> Poetry
+        self,
+        cwd: Path | None = None,
+        with_groups: bool = True,
+        io: IO | None = None,
+        disable_plugins: bool = False,
+        disable_cache: bool = False,
+    ) -> Poetry:
         if io is None:
             io = NullIO()
 
-        base_poetry = super(Factory, self).create_poetry(cwd)
+        base_poetry = super().create_poetry(cwd=cwd, with_groups=with_groups)
 
-        locker = Locker(
-            base_poetry.file.parent / "poetry.lock", base_poetry.local_config
-        )
+        poetry_file = base_poetry.pyproject_path
+        locker = Locker(poetry_file.parent / "poetry.lock", base_poetry.local_config)
 
         # Loading global configuration
-        config = self.create_config(io)
+        config = Config.create()
 
         # Loading local configuration
-        local_config_file = TOMLFile(base_poetry.file.parent / "poetry.toml")
+        local_config_file = TOMLFile(poetry_file.parent / "poetry.toml")
         if local_config_file.exists():
             if io.is_debug():
-                io.write_line(
-                    "Loading configuration file {}".format(local_config_file.path)
-                )
+                io.write_line(f"Loading configuration file {local_config_file.path}")
 
             config.merge(local_config_file.read())
 
@@ -55,108 +77,294 @@ def create_poetry(
         for source in base_poetry.pyproject.poetry_config.get("source", []):
             name = source.get("name")
             url = source.get("url")
-            if name and url:
-                if name not in existing_repositories:
-                    repositories[name] = {"url": url}
+            if name and url and name not in existing_repositories:
+                repositories[name] = {"url": url}
 
         config.merge({"repositories": repositories})
 
         poetry = Poetry(
-            base_poetry.file.path,
+            poetry_file,
             base_poetry.local_config,
             base_poetry.package,
             locker,
             config,
+            disable_cache,
         )
 
-        # Configuring sources
-        sources = poetry.local_config.get("source", [])
-        for source in sources:
-            repository = self.create_legacy_repository(source, config)
-            is_default = source.get("default", False)
-            is_secondary = source.get("secondary", False)
-            if io.is_debug():
-                message = "Adding repository {} ({})".format(
-                    repository.name, repository.url
-                )
-                if is_default:
-                    message += " and setting it as the default one"
-                elif is_secondary:
-                    message += " and setting it as secondary"
-
-                io.write_line(message)
-
-            poetry.pool.add_repository(repository, is_default, secondary=is_secondary)
+        poetry.set_pool(
+            self.create_pool(
+                config,
+                poetry.local_config.get("source", []),
+                io,
+                disable_cache=disable_cache,
+            )
+        )
 
-        # Put PyPI last to prefer private repositories
-        # unless we have no default source AND no primary sources
-        # (default = false, secondary = false)
-        if poetry.pool.has_default():
-            if io.is_debug():
-                io.write_line("Deactivating the PyPI repository")
-        else:
-            default = not poetry.pool.has_primary_repositories()
-            poetry.pool.add_repository(PyPiRepository(), default, not default)
+        plugin_manager = PluginManager(Plugin.group, disable_plugins=disable_plugins)
+        plugin_manager.load_plugins()
+        poetry.set_plugin_manager(plugin_manager)
+        plugin_manager.activate(poetry, io)
 
         return poetry
 
     @classmethod
-    def create_config(cls, io=None):  # type: (Optional[IO]) -> Config
+    def get_package(cls, name: str, version: str) -> ProjectPackage:
+        return ProjectPackage(name, version)
+
+    @classmethod
+    def create_pool(
+        cls,
+        config: Config,
+        sources: Iterable[dict[str, Any]] = (),
+        io: IO | None = None,
+        disable_cache: bool = False,
+    ) -> RepositoryPool:
+        from conda_lock._vendor.poetry.repositories import RepositoryPool
+        from conda_lock._vendor.poetry.repositories.repository_pool import Priority
+
         if io is None:
             io = NullIO()
 
-        config = Config()
-        # Load global config
-        config_file = TOMLFile(Path(CONFIG_DIR) / "config.toml")
-        if config_file.exists():
-            if io.is_debug():
-                io.write_line(
-                    "Loading configuration file {}".format(
-                        config_file.path
-                    )
-                )
+        if disable_cache:
+            logger.debug("Disabling source caches")
 
-            config.merge(config_file.read())
+        pool = RepositoryPool(config=config)
 
-        config.set_config_source(FileConfigSource(config_file))
+        explicit_pypi = False
+        for source in sources:
+            repository = cls.create_package_source(
+                source, config, disable_cache=disable_cache
+            )
+            priority = Priority[source.get("priority", Priority.PRIMARY.name).upper()]
+            if "default" in source or "secondary" in source:
+                warning = (
+                    "Found deprecated key 'default' or 'secondary' in"
+                    " pyproject.toml configuration for source"
+                    f" {source.get('name')}. Please provide the key 'priority'"
+                    " instead. Accepted values are:"
+                    f" {', '.join(repr(p.name.lower()) for p in Priority)}."
+                )
+                io.write_error_line(f"Warning: {warning}")
+                if source.get("default"):
+                    priority = Priority.DEFAULT
+                elif source.get("secondary"):
+                    priority = Priority.SECONDARY
+
+            if priority is Priority.SECONDARY:
+                allowed_prios = (p for p in Priority if p is not Priority.SECONDARY)
+                warning = (
+                    "Found deprecated priority 'secondary' for source"
+                    f" '{source.get('name')}' in pyproject.toml. Consider changing the"
+                    " priority to one of the non-deprecated values:"
+                    f" {', '.join(repr(p.name.lower()) for p in allowed_prios)}."
+                )
+                io.write_error_line(f"Warning: {warning}")
+            elif priority is Priority.DEFAULT:
+                warning = (
+                    "Found deprecated priority 'default' for source"
+                    f" '{source.get('name')}' in pyproject.toml. You can achieve"
+                    " the same effect by changing the priority to 'primary' and putting"
+                    " the source first."
+                )
+                io.write_error_line(f"Warning: {warning}")
 
-        # Load global auth config
-        auth_config_file = TOMLFile(Path(CONFIG_DIR) / "auth.toml")
-        if auth_config_file.exists():
             if io.is_debug():
-                io.write_line(
-                    "Loading configuration file {}".format(
-                        auth_config_file.path
-                    )
+                message = f"Adding repository {repository.name} ({repository.url})"
+                if priority is Priority.DEFAULT:
+                    message += " and setting it as the default one"
+                else:
+                    message += f" and setting it as {priority.name.lower()}"
+
+                io.write_line(message)
+
+            pool.add_repository(repository, priority=priority)
+            if repository.name.lower() == "pypi":
+                explicit_pypi = True
+
+        # Only add PyPI if no default repository is configured
+        if not explicit_pypi:
+            if pool.has_default() or pool.has_primary_repositories():
+                if io.is_debug():
+                    io.write_line("Deactivating the PyPI repository")
+            else:
+                from conda_lock._vendor.poetry.repositories.pypi_repository import PyPiRepository
+
+                pool.add_repository(
+                    PyPiRepository(disable_cache=disable_cache),
+                    priority=Priority.PRIMARY,
                 )
 
-            config.merge(auth_config_file.read())
+        if not pool.repositories:
+            raise PoetryException(
+                "At least one source must not be configured as 'explicit'."
+            )
 
-        config.set_auth_config_source(FileConfigSource(auth_config_file))
+        return pool
 
-        return config
+    @classmethod
+    def create_package_source(
+        cls, source: dict[str, str], config: Config, disable_cache: bool = False
+    ) -> HTTPRepository:
+        from conda_lock._vendor.poetry.repositories.exceptions import InvalidSourceError
+        from conda_lock._vendor.poetry.repositories.legacy_repository import LegacyRepository
+        from conda_lock._vendor.poetry.repositories.pypi_repository import PyPiRepository
+        from conda_lock._vendor.poetry.repositories.single_page_repository import SinglePageRepository
+
+        try:
+            name = source["name"]
+        except KeyError:
+            raise InvalidSourceError("Missing [name] in source.")
+
+        pool_size = config.installer_max_workers
+
+        if name.lower() == "pypi":
+            if "url" in source:
+                raise InvalidSourceError(
+                    "The PyPI repository cannot be configured with a custom url."
+                )
+            return PyPiRepository(disable_cache=disable_cache, pool_size=pool_size)
 
-    def create_legacy_repository(
-        self, source, auth_config
-    ):  # type: (Dict[str, str], Config) -> LegacyRepository
-        from .repositories.legacy_repository import LegacyRepository
-        from .utils.helpers import get_cert
-        from .utils.helpers import get_client_cert
+        try:
+            url = source["url"]
+        except KeyError:
+            raise InvalidSourceError(f"Missing [url] in source {name!r}.")
 
-        if "url" in source:
-            # PyPI-like repository
-            if "name" not in source:
-                raise RuntimeError("Missing [name] in source.")
-        else:
-            raise RuntimeError("Unsupported source specified")
+        repository_class = LegacyRepository
 
-        name = source["name"]
-        url = source["url"]
+        if re.match(r".*\.(htm|html)$", url):
+            repository_class = SinglePageRepository
 
-        return LegacyRepository(
+        return repository_class(
             name,
             url,
-            config=auth_config,
-            cert=get_cert(auth_config, name),
-            client_cert=get_client_cert(auth_config, name),
+            config=config,
+            disable_cache=disable_cache,
+            pool_size=pool_size,
         )
+
+    @classmethod
+    def create_pyproject_from_package(cls, package: Package) -> TOMLDocument:
+        import tomlkit
+
+        from conda_lock._vendor.poetry.utils.dependency_specification import dependency_to_specification
+
+        pyproject: dict[str, Any] = tomlkit.document()
+
+        pyproject["tool"] = tomlkit.table(is_super_table=True)
+
+        content: dict[str, Any] = tomlkit.table()
+        pyproject["tool"]["poetry"] = content
+
+        content["name"] = package.name
+        content["version"] = package.version.text
+        content["description"] = package.description
+        content["authors"] = package.authors
+        content["license"] = package.license.id if package.license else ""
+
+        if package.classifiers:
+            content["classifiers"] = package.classifiers
+
+        for key, attr in {
+            ("documentation", "documentation_url"),
+            ("repository", "repository_url"),
+            ("homepage", "homepage"),
+            ("maintainers", "maintainers"),
+            ("keywords", "keywords"),
+        }:
+            value = getattr(package, attr, None)
+            if value:
+                content[key] = value
+
+        readmes = []
+
+        for readme in package.readmes:
+            readme_posix_path = readme.as_posix()
+
+            with contextlib.suppress(ValueError):
+                if package.root_dir:
+                    readme_posix_path = readme.relative_to(package.root_dir).as_posix()
+
+            readmes.append(readme_posix_path)
+
+        if readmes:
+            content["readme"] = readmes
+
+        optional_dependencies = set()
+        extras_section = None
+
+        if package.extras:
+            extras_section = tomlkit.table()
+
+            for extra in package.extras:
+                _dependencies = []
+                for dependency in package.extras[extra]:
+                    _dependencies.append(dependency.name)
+                    optional_dependencies.add(dependency.name)
+
+                extras_section[extra] = _dependencies
+
+        optional_dependencies = set(optional_dependencies)
+        dependency_section = content["dependencies"] = tomlkit.table()
+        dependency_section["python"] = package.python_versions
+
+        for dep in package.all_requires:
+            constraint: DependencySpec | str = dependency_to_specification(
+                dep, tomlkit.inline_table()
+            )
+
+            if not isinstance(constraint, str):
+                if dep.name in optional_dependencies:
+                    constraint["optional"] = True
+
+                if len(constraint) == 1 and "version" in constraint:
+                    assert isinstance(constraint["version"], str)
+                    constraint = constraint["version"]
+                elif not constraint:
+                    constraint = "*"
+
+            for group in dep.groups:
+                if group == MAIN_GROUP:
+                    dependency_section[dep.name] = constraint
+                else:
+                    if "group" not in content:
+                        content["group"] = tomlkit.table(is_super_table=True)
+
+                    if group not in content["group"]:
+                        content["group"][group] = tomlkit.table(is_super_table=True)
+
+                    if "dependencies" not in content["group"][group]:
+                        content["group"][group]["dependencies"] = tomlkit.table()
+
+                    content["group"][group]["dependencies"][dep.name] = constraint
+
+        if extras_section:
+            content["extras"] = extras_section
+
+        pyproject = cast("TOMLDocument", pyproject)
+
+        return pyproject
+
+    @classmethod
+    def validate(
+        cls, config: dict[str, Any], strict: bool = False
+    ) -> dict[str, list[str]]:
+        results = super().validate(config, strict)
+
+        results["errors"].extend(validate_object(config))
+
+        # A project should not depend on itself.
+        dependencies = set(config.get("dependencies", {}).keys())
+        dependencies.update(config.get("dev-dependencies", {}).keys())
+        groups = config.get("group", {}).values()
+        for group in groups:
+            dependencies.update(group.get("dependencies", {}).keys())
+
+        dependencies = {canonicalize_name(d) for d in dependencies}
+
+        project_name = config.get("name")
+        if project_name is not None and canonicalize_name(project_name) in dependencies:
+            results["errors"].append(
+                f"Project name ({project_name}) is same as one of its dependencies"
+            )
+
+        return results
diff --git a/conda_lock/_vendor/poetry/inspection/info.py b/conda_lock/_vendor/poetry/inspection/info.py
index 78e5f40d..77ebdaa2 100644
--- a/conda_lock/_vendor/poetry/inspection/info.py
+++ b/conda_lock/_vendor/poetry/inspection/info.py
@@ -1,99 +1,110 @@
+from __future__ import annotations
+
+import contextlib
+import functools
 import glob
 import logging
 import os
-import tarfile
-import zipfile
 
-from typing import Dict
-from typing import Iterator
-from typing import List
-from typing import Optional
-from typing import Union
+from pathlib import Path
+from typing import TYPE_CHECKING
+from typing import Any
+from typing import Mapping
+from typing import Sequence
 
 import pkginfo
 
 from conda_lock._vendor.poetry.core.factory import Factory
-from conda_lock._vendor.poetry.core.packages import Package
-from conda_lock._vendor.poetry.core.packages import ProjectPackage
-from conda_lock._vendor.poetry.core.packages import dependency_from_pep_508
+from conda_lock._vendor.poetry.core.packages.dependency import Dependency
+from conda_lock._vendor.poetry.core.packages.package import Package
 from conda_lock._vendor.poetry.core.pyproject.toml import PyProjectTOML
-from conda_lock._vendor.poetry.core.utils._compat import PY35
-from conda_lock._vendor.poetry.core.utils._compat import Path
 from conda_lock._vendor.poetry.core.utils.helpers import parse_requires
 from conda_lock._vendor.poetry.core.utils.helpers import temporary_directory
 from conda_lock._vendor.poetry.core.version.markers import InvalidMarker
+from conda_lock._vendor.poetry.core.version.requirements import InvalidRequirement
+
 from conda_lock._vendor.poetry.utils.env import EnvCommandError
-from conda_lock._vendor.poetry.utils.env import EnvManager
-from conda_lock._vendor.poetry.utils.env import VirtualEnv
+from conda_lock._vendor.poetry.utils.env import ephemeral_environment
+from conda_lock._vendor.poetry.utils.helpers import extractall
 from conda_lock._vendor.poetry.utils.setup_reader import SetupReader
 
 
+if TYPE_CHECKING:
+    from collections.abc import Iterator
+
+    from packaging.metadata import RawMetadata
+    from packaging.utils import NormalizedName
+    from conda_lock._vendor.poetry.core.packages.project_package import ProjectPackage
+
+
 logger = logging.getLogger(__name__)
 
 PEP517_META_BUILD = """\
-import pep517.build
-import pep517.meta
-
-path='{source}'
-system=pep517.build.compat_system(path)
-pep517.meta.build(source_dir=path, dest='{dest}', system=system)
+import build
+import build.env
+import pyproject_hooks
+
+source = '{source}'
+dest = '{dest}'
+
+with build.env.DefaultIsolatedEnv() as env:
+    builder = build.ProjectBuilder.from_isolated_env(
+        env, source, runner=pyproject_hooks.quiet_subprocess_runner
+    )
+    env.install(builder.build_system_requires)
+    env.install(builder.get_requires_for_build('wheel'))
+    builder.metadata_path(dest)
 """
 
-PEP517_META_BUILD_DEPS = ["pep517===0.8.2", "toml==0.10.1"]
+PEP517_META_BUILD_DEPS = ["build==1.0.3", "pyproject_hooks==1.0.0"]
 
 
 class PackageInfoError(ValueError):
-    def __init__(
-        self, path, *reasons
-    ):  # type: (Union[Path, str], *Union[BaseException, str]) -> None
-        reasons = (
-            "Unable to determine package info for path: {}".format(str(path)),
-        ) + reasons
-        super(PackageInfoError, self).__init__(
-            "\n\n".join(str(msg).strip() for msg in reasons if msg)
-        )
+    def __init__(self, path: Path, *reasons: BaseException | str) -> None:
+        reasons = (f"Unable to determine package info for path: {path!s}", *reasons)
+        super().__init__("\n\n".join(str(msg).strip() for msg in reasons if msg))
 
 
 class PackageInfo:
     def __init__(
         self,
-        name=None,  # type: Optional[str]
-        version=None,  # type: Optional[str]
-        summary=None,  # type: Optional[str]
-        platform=None,  # type: Optional[str]
-        requires_dist=None,  # type: Optional[List[str]]
-        requires_python=None,  # type: Optional[str]
-        files=None,  # type: Optional[List[str]]
-        cache_version=None,  # type: Optional[str]
-    ):
+        *,
+        name: str | None = None,
+        version: str | None = None,
+        summary: str | None = None,
+        requires_dist: list[str] | None = None,
+        requires_python: str | None = None,
+        files: Sequence[Mapping[str, str]] | None = None,
+        yanked: str | bool = False,
+        cache_version: str | None = None,
+    ) -> None:
         self.name = name
         self.version = version
         self.summary = summary
-        self.platform = platform
         self.requires_dist = requires_dist
         self.requires_python = requires_python
         self.files = files or []
+        self.yanked = yanked
         self._cache_version = cache_version
-        self._source_type = None
-        self._source_url = None
-        self._source_reference = None
+        self._source_type: str | None = None
+        self._source_url: str | None = None
+        self._source_reference: str | None = None
 
     @property
-    def cache_version(self):  # type: () -> Optional[str]
+    def cache_version(self) -> str | None:
         return self._cache_version
 
-    def update(self, other):  # type: (PackageInfo) -> PackageInfo
+    def update(self, other: PackageInfo) -> PackageInfo:
         self.name = other.name or self.name
         self.version = other.version or self.version
         self.summary = other.summary or self.summary
-        self.platform = other.platform or self.platform
         self.requires_dist = other.requires_dist or self.requires_dist
         self.requires_python = other.requires_python or self.requires_python
         self.files = other.files or self.files
         self._cache_version = other.cache_version or self._cache_version
         return self
 
-    def asdict(self):  # type: () -> Dict[str, Optional[Union[str, List[str]]]]
+    def asdict(self) -> dict[str, Any]:
         """
         Helper method to convert package info into a dictionary used for caching.
         """
@@ -101,48 +112,49 @@ def asdict(self):  # type: () -> Dict[str, Optional[Union[str, List[str]]]]
             "name": self.name,
             "version": self.version,
             "summary": self.summary,
-            "platform": self.platform,
             "requires_dist": self.requires_dist,
             "requires_python": self.requires_python,
             "files": self.files,
+            "yanked": self.yanked,
             "_cache_version": self._cache_version,
         }
 
     @classmethod
-    def load(
-        cls, data
-    ):  # type: (Dict[str, Optional[Union[str, List[str]]]]) -> PackageInfo
+    def load(cls, data: dict[str, Any]) -> PackageInfo:
         """
         Helper method to load data from a dictionary produced by `PackageInfo.asdict()`.
 
-        :param data: Data to load. This is expected to be a `dict` object output by `asdict()`.
+        :param data: Data to load. This is expected to be a `dict` object output by
+            `asdict()`.
         """
         cache_version = data.pop("_cache_version", None)
         return cls(cache_version=cache_version, **data)
 
-    @classmethod
-    def _log(cls, msg, level="info"):
-        """Internal helper method to log information."""
-        getattr(logger, level)("{}: {}".format(cls.__name__, msg))
-
     def to_package(
-        self, name=None, extras=None, root_dir=None
-    ):  # type: (Optional[str], Optional[List[str]], Optional[Path]) -> Package
+        self,
+        name: str | None = None,
+        extras: list[str] | None = None,
+        root_dir: Path | None = None,
+    ) -> Package:
         """
-        Create a new `poetry.core.packages.package.Package` instance using metadata from this instance.
+        Create a new `poetry.core.packages.package.Package` instance using metadata from
+        this instance.
 
-        :param name: Name to use for the package, if not specified name from this instance is used.
+        :param name: Name to use for the package, if not specified name from this
+            instance is used.
         :param extras: Extras to activate for this package.
-        :param root_dir:  Optional root directory to use for the package. If set, dependency strings
-            will be parsed relative to this directory.
+        :param root_dir:  Optional root directory to use for the package. If set,
+            dependency strings will be parsed relative to this directory.
         """
         name = name or self.name
 
+        if not name:
+            raise RuntimeError("Unable to create package with no name")
+
         if not self.version:
-            # The version could not be determined, so we raise an error since it is mandatory.
-            raise RuntimeError(
-                "Unable to retrieve the package version for {}".format(name)
-            )
+            # The version could not be determined, so we raise an error since it is
+            # mandatory.
+            raise RuntimeError(f"Unable to retrieve the package version for {name}")
 
         package = Package(
             name=name,
@@ -150,66 +162,94 @@ def to_package(
             source_type=self._source_type,
             source_url=self._source_url,
             source_reference=self._source_reference,
+            yanked=self.yanked,
         )
-        package.description = self.summary
+        if self.summary is not None:
+            package.description = self.summary
         package.root_dir = root_dir
         package.python_versions = self.requires_python or "*"
         package.files = self.files
 
-        if root_dir or (self._source_type in {"directory"} and self._source_url):
-            # this is a local poetry project, this means we can extract "richer" requirement information
-            # eg: development requirements etc.
-            poetry_package = self._get_poetry_package(path=root_dir or self._source_url)
+        # If this is a local poetry project, we can extract "richer" requirement
+        # information, eg: development requirements etc.
+        if root_dir is not None:
+            path = root_dir
+        elif self._source_type == "directory" and self._source_url is not None:
+            path = Path(self._source_url)
+        else:
+            path = None
+
+        if path is not None:
+            poetry_package = self._get_poetry_package(path=path)
             if poetry_package:
                 package.extras = poetry_package.extras
-                package.requires = poetry_package.requires
+                for dependency in poetry_package.requires:
+                    package.add_dependency(dependency)
+
                 return package
 
         seen_requirements = set()
 
+        package_extras: dict[NormalizedName, list[Dependency]] = {}
         for req in self.requires_dist or []:
             try:
                 # Attempt to parse the PEP-508 requirement string
-                dependency = dependency_from_pep_508(req, relative_to=root_dir)
+                dependency = Dependency.create_from_pep_508(req, relative_to=root_dir)
             except InvalidMarker:
                 # Invalid marker, We strip the markers hoping for the best
+                logger.warning(
+                    "Stripping invalid marker (%s) found in %s-%s dependencies",
+                    req,
+                    package.name,
+                    package.version,
+                )
                 req = req.split(";")[0]
-                dependency = dependency_from_pep_508(req, relative_to=root_dir)
-            except ValueError:
-                # Likely unable to parse constraint so we skip it
-                self._log(
-                    "Invalid constraint ({}) found in {}-{} dependencies, "
-                    "skipping".format(req, package.name, package.version),
-                    level="warning",
+                dependency = Dependency.create_from_pep_508(req, relative_to=root_dir)
+            except InvalidRequirement:
+                # Unable to parse requirement so we skip it
+                logger.warning(
+                    "Invalid requirement (%s) found in %s-%s dependencies, skipping",
+                    req,
+                    package.name,
+                    package.version,
                 )
                 continue
 
             if dependency.in_extras:
                 # this dependency is required by an extra package
                 for extra in dependency.in_extras:
-                    if extra not in package.extras:
-                        # this is the first time we encounter this extra for this package
-                        package.extras[extra] = []
+                    if extra not in package_extras:
+                        # this is the first time we encounter this extra for this
+                        # package
+                        package_extras[extra] = []
 
-                    package.extras[extra].append(dependency)
+                    package_extras[extra].append(dependency)
 
             req = dependency.to_pep_508(with_extras=True)
 
             if req not in seen_requirements:
-                package.requires.append(dependency)
+                package.add_dependency(dependency)
                 seen_requirements.add(req)
 
+        package.extras = package_extras
+
         return package
 
     @classmethod
     def _from_distribution(
-        cls, dist
-    ):  # type: (Union[pkginfo.BDist, pkginfo.SDist, pkginfo.Wheel]) -> PackageInfo
+        cls, dist: pkginfo.BDist | pkginfo.SDist | pkginfo.Wheel
+    ) -> PackageInfo:
         """
-        Helper method to parse package information from a `pkginfo.Distribution` instance.
+        Helper method to parse package information from a `pkginfo.Distribution`
+        instance.
 
         :param dist: The distribution instance to parse information from.
         """
+        if dist.metadata_version not in pkginfo.distribution.HEADER_ATTRS:
+            # This check can be replaced once upstream implements strict parsing
+            # https://bugs.launchpad.net/pkginfo/+bug/2058697
+            raise ValueError(f"Unknown metadata version: {dist.metadata_version}")
+
         requirements = None
 
         if dist.requires_dist:
@@ -217,14 +257,13 @@ def _from_distribution(
         else:
             requires = Path(dist.filename) / "requires.txt"
             if requires.exists():
-                with requires.open(encoding="utf-8") as f:
-                    requirements = parse_requires(f.read())
+                text = requires.read_text(encoding="utf-8")
+                requirements = parse_requires(text)
 
         info = cls(
             name=dist.name,
             version=dist.version,
             summary=dist.summary,
-            platform=dist.supported_platforms,
             requires_dist=requirements,
             requires_python=dist.requires_python,
         )
@@ -235,47 +274,39 @@ def _from_distribution(
         return info
 
     @classmethod
-    def _from_sdist_file(cls, path):  # type: (Path) -> PackageInfo
+    def _from_sdist_file(cls, path: Path) -> PackageInfo:
         """
-        Helper method to parse package information from an sdist file. We attempt to first inspect the
-        file using `pkginfo.SDist`. If this does not provide us with package requirements, we extract the
-        source and handle it as a directory.
+        Helper method to parse package information from an sdist file. We attempt to
+        first inspect the file using `pkginfo.SDist`. If this does not provide us with
+        package requirements, we extract the source and handle it as a directory.
 
         :param path: The sdist file to parse information from.
         """
         info = None
 
-        try:
-            info = cls._from_distribution(pkginfo.SDist(str(path)))
-        except ValueError:
-            # Unable to determine dependencies
-            # We pass and go deeper
-            pass
-        else:
-            if info.requires_dist is not None:
-                # we successfully retrieved dependencies from sdist metadata
-                return info
+        with contextlib.suppress(ValueError):
+            sdist = pkginfo.SDist(str(path))
+            info = cls._from_distribution(sdist)
+
+        if info is not None and info.requires_dist is not None:
+            # we successfully retrieved dependencies from sdist metadata
+            return info
 
         # Still not dependencies found
         # So, we unpack and introspect
         suffix = path.suffix
+        zip = suffix == ".zip"
 
-        if suffix == ".zip":
-            context = zipfile.ZipFile
-        else:
-            if suffix == ".bz2":
-                suffixes = path.suffixes
-                if len(suffixes) > 1 and suffixes[-2] == ".tar":
-                    suffix = ".tar.bz2"
-            else:
-                suffix = ".tar.gz"
-
-            context = tarfile.open
+        if suffix == ".bz2":
+            suffixes = path.suffixes
+            if len(suffixes) > 1 and suffixes[-2] == ".tar":
+                suffix = ".tar.bz2"
+        elif not zip:
+            suffix = ".tar.gz"
 
-        with temporary_directory() as tmp:
-            tmp = Path(tmp)
-            with context(path.as_posix()) as archive:
-                archive.extractall(tmp.as_posix())
+        with temporary_directory() as tmp_str:
+            tmp = Path(tmp_str)
+            extractall(source=path, dest=tmp, zip=zip)
 
             # a little bit of guess work to determine the directory we care about
             elements = list(tmp.glob("*"))
@@ -289,6 +320,8 @@ def _from_sdist_file(cls, path):  # type: (Path) -> PackageInfo
 
             # now this is an unpacked directory we know how to deal with
             new_info = cls.from_directory(path=sdist_dir)
+            new_info._source_type = "file"
+            new_info._source_url = path.resolve().as_posix()
 
         if not info:
             return new_info
@@ -296,15 +329,16 @@ def _from_sdist_file(cls, path):  # type: (Path) -> PackageInfo
         return info.update(new_info)
 
     @staticmethod
-    def has_setup_files(path):  # type: (Path) -> bool
+    def has_setup_files(path: Path) -> bool:
         return any((path / f).exists() for f in SetupReader.FILES)
 
     @classmethod
-    def from_setup_files(cls, path):  # type: (Path) -> PackageInfo
+    def from_setup_files(cls, path: Path) -> PackageInfo:
         """
-        Mechanism to parse package information from a `setup.[py|cfg]` file. This uses the implementation
-        at `poetry.utils.setup_reader.SetupReader` in order to parse the file. This is not reliable for
-        complex setup files and should only attempted as a fallback.
+        Mechanism to parse package information from a `setup.[py|cfg]` file. This uses
+        the implementation at `poetry.utils.setup_reader.SetupReader` in order to parse
+        the file. This is not reliable for complex setup files and should only attempted
+        as a fallback.
 
         :param path: Path to `setup.py` file
         """
@@ -322,15 +356,12 @@ def from_setup_files(cls, path):  # type: (Path) -> PackageInfo
         if python_requires is None:
             python_requires = "*"
 
-        requires = ""
-        for dep in result["install_requires"]:
-            requires += dep + "\n"
-
+        requires = "".join(dep + "\n" for dep in result["install_requires"])
         if result["extras_require"]:
             requires += "\n"
 
         for extra_name, deps in result["extras_require"].items():
-            requires += "[{}]\n".format(extra_name)
+            requires += f"[{extra_name}]\n"
 
             for dep in deps:
                 requires += dep + "\n"
@@ -357,26 +388,38 @@ def from_setup_files(cls, path):  # type: (Path) -> PackageInfo
         return info
 
     @staticmethod
-    def _find_dist_info(path):  # type: (Path) -> Iterator[Path]
+    def _find_dist_info(path: Path) -> Iterator[Path]:
         """
         Discover all `*.*-info` directories in a given path.
 
         :param path: Path to search.
         """
         pattern = "**/*.*-info"
-        if PY35:
-            # Sometimes pathlib will fail on recursive symbolic links, so we need to workaround it
-            # and use the glob module instead. Note that this does not happen with pathlib2
-            # so it's safe to use it for Python < 3.4.
-            directories = glob.iglob(path.joinpath(pattern).as_posix(), recursive=True)
-        else:
-            directories = path.glob(pattern)
+        # Sometimes pathlib will fail on recursive symbolic links, so we need to work
+        # around it and use the glob module instead. Note that this does not happen with
+        # pathlib2 so it's safe to use it for Python < 3.4.
+        directories = glob.iglob(path.joinpath(pattern).as_posix(), recursive=True)
 
         for d in directories:
             yield Path(d)
 
     @classmethod
-    def from_metadata(cls, path):  # type: (Path) -> Optional[PackageInfo]
+    def from_metadata(cls, metadata: RawMetadata) -> PackageInfo:
+        """
+        Create package information from core metadata.
+
+        :param metadata: raw metadata
+        """
+        return cls(
+            name=metadata.get("name"),
+            version=metadata.get("version"),
+            summary=metadata.get("summary"),
+            requires_dist=metadata.get("requires_dist"),
+            requires_python=metadata.get("requires_python"),
+        )
+
+    @classmethod
+    def from_metadata_directory(cls, path: Path) -> PackageInfo | None:
         """
         Helper method to parse package information from an unpacked metadata directory.
 
@@ -385,8 +428,9 @@ def from_metadata(cls, path):  # type: (Path) -> Optional[PackageInfo]
         if path.suffix in {".dist-info", ".egg-info"}:
             directories = [path]
         else:
-            directories = cls._find_dist_info(path=path)
+            directories = list(cls._find_dist_info(path=path))
 
+        dist: pkginfo.BDist | pkginfo.SDist | pkginfo.Wheel
         for directory in directories:
             try:
                 if directory.suffix == ".egg-info":
@@ -403,14 +447,12 @@ def from_metadata(cls, path):  # type: (Path) -> Optional[PackageInfo]
                 # handle PKG-INFO in unpacked sdist root
                 dist = pkginfo.UnpackedSDist(path.as_posix())
             except ValueError:
-                return
+                return None
 
-        info = cls._from_distribution(dist=dist)
-        if info:
-            return info
+        return cls._from_distribution(dist=dist)
 
     @classmethod
-    def from_package(cls, package):  # type: (Package) -> PackageInfo
+    def from_package(cls, package: Package) -> PackageInfo:
         """
         Helper method to inspect a `Package` object, in order to generate package info.
 
@@ -426,122 +468,47 @@ def from_package(cls, package):  # type: (Package) -> PackageInfo
             name=package.name,
             version=str(package.version),
             summary=package.description,
-            platform=package.platform,
             requires_dist=list(requires),
             requires_python=package.python_versions,
             files=package.files,
+            yanked=package.yanked_reason if package.yanked else False,
         )
 
     @staticmethod
-    def _get_poetry_package(path):  # type: (Path) -> Optional[ProjectPackage]
+    def _get_poetry_package(path: Path) -> ProjectPackage | None:
         # Note: we ignore any setup.py file at this step
         # TODO: add support for handling non-poetry PEP-517 builds
         if PyProjectTOML(path.joinpath("pyproject.toml")).is_poetry_project():
-            try:
+            with contextlib.suppress(RuntimeError):
                 return Factory().create_poetry(path).package
-            except RuntimeError:
-                return None
 
         return None
 
     @classmethod
-    def _pep517_metadata(cls, path):  # type (Path) -> PackageInfo
+    def from_directory(cls, path: Path, disable_build: bool = False) -> PackageInfo:
         """
-        Helper method to use PEP-517 library to build and read package metadata.
-
-        :param path: Path to package source to build and read metadata for.
-        """
-        info = None
-        try:
-            info = cls.from_setup_files(path)
-            if all([info.version, info.name, info.requires_dist]):
-                return info
-        except PackageInfoError:
-            pass
-
-        with temporary_directory() as tmp_dir:
-            # TODO: cache PEP 517 build environment corresponding to each project venv
-            venv_dir = Path(tmp_dir) / ".venv"
-            EnvManager.build_venv(venv_dir.as_posix())
-            venv = VirtualEnv(venv_dir, venv_dir)
-
-            dest_dir = Path(tmp_dir) / "dist"
-            dest_dir.mkdir()
-
-            try:
-                venv.run_python(
-                    "-m",
-                    "pip",
-                    "install",
-                    "--disable-pip-version-check",
-                    "--ignore-installed",
-                    *PEP517_META_BUILD_DEPS
-                )
-                venv.run_python(
-                    "-",
-                    input_=PEP517_META_BUILD.format(
-                        source=path.as_posix(), dest=dest_dir.as_posix()
-                    ),
-                )
-                return cls.from_metadata(dest_dir)
-            except EnvCommandError as e:
-                # something went wrong while attempting pep517 metadata build
-                # fallback to egg_info if setup.py available
-                cls._log("PEP517 build failed: {}".format(e), level="debug")
-                setup_py = path / "setup.py"
-                if not setup_py.exists():
-                    raise PackageInfoError(
-                        path,
-                        e,
-                        "No fallback setup.py file was found to generate egg_info.",
-                    )
-
-                cwd = Path.cwd()
-                os.chdir(path.as_posix())
-                try:
-                    venv.run_python("setup.py", "egg_info")
-                    return cls.from_metadata(path)
-                except EnvCommandError as fbe:
-                    raise PackageInfoError(
-                        path, "Fallback egg_info generation failed.", fbe
-                    )
-                finally:
-                    os.chdir(cwd.as_posix())
-
-        if info:
-            cls._log(
-                "Falling back to parsed setup.py file for {}".format(path), "debug"
-            )
-            return info
-
-        # if we reach here, everything has failed and all hope is lost
-        raise PackageInfoError(path, "Exhausted all core metadata sources.")
-
-    @classmethod
-    def from_directory(
-        cls, path, disable_build=False
-    ):  # type: (Path, bool) -> PackageInfo
-        """
-        Generate package information from a package source directory. If `disable_build` is not `True` and
-        introspection of all available metadata fails, the package is attempted to be build in an isolated
-        environment so as to generate required metadata.
+        Generate package information from a package source directory. If `disable_build`
+        is not `True` and introspection of all available metadata fails, the package is
+        attempted to be built in an isolated environment so as to generate required
+        metadata.
 
         :param path: Path to generate package information from.
-        :param disable_build: If not `True` and setup reader fails, PEP 517 isolated build is attempted in
-            order to gather metadata.
+        :param disable_build: If not `True` and setup reader fails, PEP 517 isolated
+            build is attempted in order to gather metadata.
         """
         project_package = cls._get_poetry_package(path)
+        info: PackageInfo | None
         if project_package:
             info = cls.from_package(project_package)
         else:
-            info = cls.from_metadata(path)
+            info = cls.from_metadata_directory(path)
 
             if not info or info.requires_dist is None:
                 try:
                     if disable_build:
                         info = cls.from_setup_files(path)
                     else:
-                        info = cls._pep517_metadata(path)
+                        info = get_pep517_metadata(path)
                 except PackageInfoError:
                     if not info:
                         raise
@@ -554,7 +521,7 @@ def from_directory(
         return info
 
     @classmethod
-    def from_sdist(cls, path):  # type: (Path) -> PackageInfo
+    def from_sdist(cls, path: Path) -> PackageInfo:
         """
         Gather package information from an sdist file, packed or unpacked.
 
@@ -568,37 +535,36 @@ def from_sdist(cls, path):  # type: (Path) -> PackageInfo
         return cls.from_directory(path=path)
 
     @classmethod
-    def from_wheel(cls, path):  # type: (Path) -> PackageInfo
+    def from_wheel(cls, path: Path) -> PackageInfo:
         """
         Gather package information from a wheel.
 
         :param path: Path to wheel.
         """
         try:
-            return cls._from_distribution(pkginfo.Wheel(str(path)))
-        except ValueError:
-            return PackageInfo()
+            wheel = pkginfo.Wheel(str(path))
+            return cls._from_distribution(wheel)
+        except ValueError as e:
+            raise PackageInfoError(path, e)
 
     @classmethod
-    def from_bdist(cls, path):  # type: (Path) -> PackageInfo
+    def from_bdist(cls, path: Path) -> PackageInfo:
         """
         Gather package information from a bdist (wheel etc.).
 
         :param path: Path to bdist.
         """
-        if isinstance(path, (pkginfo.BDist, pkginfo.Wheel)):
-            cls._from_distribution(dist=path)
-
         if path.suffix == ".whl":
             return cls.from_wheel(path=path)
 
         try:
-            return cls._from_distribution(pkginfo.BDist(str(path)))
+            bdist = pkginfo.BDist(str(path))
+            return cls._from_distribution(bdist)
         except ValueError as e:
             raise PackageInfoError(path, e)
 
     @classmethod
-    def from_path(cls, path):  # type: (Path) -> PackageInfo
+    def from_path(cls, path: Path) -> PackageInfo:
         """
         Gather package information from a given path (bdist, sdist, directory).
 
@@ -608,3 +574,70 @@ def from_path(cls, path):  # type: (Path) -> PackageInfo
             return cls.from_bdist(path=path)
         except PackageInfoError:
             return cls.from_sdist(path=path)
+
+
+@functools.lru_cache(maxsize=None)
+def get_pep517_metadata(path: Path) -> PackageInfo:
+    """
+    Helper method to use PEP-517 library to build and read package metadata.
+
+    :param path: Path to package source to build and read metadata for.
+    """
+    info = None
+
+    with contextlib.suppress(PackageInfoError):
+        info = PackageInfo.from_setup_files(path)
+        if all([info.version, info.name, info.requires_dist]):
+            return info
+
+    with ephemeral_environment(
+        flags={"no-pip": False, "setuptools": "bundle", "wheel": "bundle"}
+    ) as venv:
+        # TODO: cache PEP 517 build environment corresponding to each project venv
+        dest_dir = venv.path.parent / "dist"
+        dest_dir.mkdir()
+
+        pep517_meta_build_script = PEP517_META_BUILD.format(
+            source=path.as_posix(), dest=dest_dir.as_posix()
+        )
+
+        try:
+            venv.run_pip(
+                "install",
+                "--disable-pip-version-check",
+                "--ignore-installed",
+                "--no-input",
+                *PEP517_META_BUILD_DEPS,
+            )
+            venv.run_python_script(pep517_meta_build_script)
+            info = PackageInfo.from_metadata_directory(dest_dir)
+        except EnvCommandError as e:
+            # something went wrong while attempting pep517 metadata build
+            # fallback to egg_info if setup.py available
+            logger.debug("PEP517 build failed: %s", e)
+            setup_py = path / "setup.py"
+            if not setup_py.exists():
+                raise PackageInfoError(
+                    path,
+                    e,
+                    "No fallback setup.py file was found to generate egg_info.",
+                )
+
+            cwd = Path.cwd()
+            os.chdir(path)
+            try:
+                venv.run("python", "setup.py", "egg_info")
+                info = PackageInfo.from_metadata_directory(path)
+            except EnvCommandError as fbe:
+                raise PackageInfoError(
+                    path, e, "Fallback egg_info generation failed.", fbe
+                )
+            finally:
+                os.chdir(cwd)
+
+    if info:
+        logger.debug("Falling back to parsed setup.py file for %s", path)
+        return info
+
+    # if we reach here, everything has failed and all hope is lost
+    raise PackageInfoError(path, "Exhausted all core metadata sources.")
diff --git a/conda_lock/_vendor/poetry/inspection/lazy_wheel.py b/conda_lock/_vendor/poetry/inspection/lazy_wheel.py
new file mode 100644
index 00000000..28b06490
--- /dev/null
+++ b/conda_lock/_vendor/poetry/inspection/lazy_wheel.py
@@ -0,0 +1,736 @@
+"""Lazy ZIP over HTTP"""
+
+from __future__ import annotations
+
+import io
+import logging
+import re
+
+from bisect import bisect_left
+from bisect import bisect_right
+from contextlib import contextmanager
+from tempfile import NamedTemporaryFile
+from typing import TYPE_CHECKING
+from typing import Any
+from typing import BinaryIO
+from typing import ClassVar
+from typing import TypeVar
+from typing import cast
+from urllib.parse import urlparse
+from zipfile import BadZipFile
+from zipfile import ZipFile
+
+from packaging.metadata import parse_email
+from requests.models import CONTENT_CHUNK_SIZE
+from requests.models import HTTPError
+from requests.models import Response
+from requests.status_codes import codes
+
+
+if TYPE_CHECKING:
+    from collections.abc import Iterable
+    from collections.abc import Iterator
+    from types import TracebackType
+
+    from packaging.metadata import RawMetadata
+    from requests import Session
+
+    from conda_lock._vendor.poetry.utils.authenticator import Authenticator
+
+
+logger = logging.getLogger(__name__)
+
+
+class LazyWheelUnsupportedError(Exception):
+    """Raised when a lazy wheel is unsupported."""
+
+
+class HTTPRangeRequestUnsupported(LazyWheelUnsupportedError):
+    """Raised when the remote server appears unable to support byte ranges."""
+
+
+class HTTPRangeRequestNotRespected(LazyWheelUnsupportedError):
+    """Raised when the remote server tells us that it supports byte ranges
+    but does not respect a respective request."""
+
+
+class UnsupportedWheel(LazyWheelUnsupportedError):
+    """Unsupported wheel."""
+
+
+class InvalidWheel(LazyWheelUnsupportedError):
+    """Invalid (e.g. corrupt) wheel."""
+
+    def __init__(self, location: str, name: str) -> None:
+        self.location = location
+        self.name = name
+
+    def __str__(self) -> str:
+        return f"Wheel {self.name} located at {self.location} is invalid."
+
+
+def metadata_from_wheel_url(
+    name: str, url: str, session: Session | Authenticator
+) -> RawMetadata:
+    """Fetch metadata from the given wheel URL.
+
+    This uses HTTP range requests to only fetch the portion of the wheel
+    containing metadata, just enough for the object to be constructed.
+
+    :raises HTTPRangeRequestUnsupported: if range requests are unsupported for ``url``.
+    :raises InvalidWheel: if the zip file contents could not be parsed.
+    """
+    try:
+        # After context manager exit, wheel.name will point to a deleted file path.
+        # Add `delete_backing_file=False` to disable this for debugging.
+        with LazyWheelOverHTTP(url, session) as lazy_file:
+            metadata_bytes = lazy_file.read_metadata(name)
+
+        metadata, _ = parse_email(metadata_bytes)
+        return metadata
+
+    except (BadZipFile, UnsupportedWheel):
+        # We assume that these errors have occurred because the wheel contents
+        # themselves are invalid, not because we've messed up our bookkeeping
+        # and produced an invalid file.
+        raise InvalidWheel(url, name)
+    except Exception as e:
+        if isinstance(e, LazyWheelUnsupportedError):
+            # this is expected when the code handles issues with lazy wheel metadata retrieval correctly
+            raise e
+
+        logger.debug(
+            "There was an unexpected %s when handling lazy wheel metadata retrieval for %s from %s: %s",
+            type(e).__name__,
+            name,
+            url,
+            e,
+        )
+
+        # Catch all exception to handle any issues that may have occurred during
+        # attempts to use Lazy Wheel.
+        raise LazyWheelUnsupportedError(
+            f"Attempts to use lazy wheel metadata retrieval for {name} from {url} failed"
+        ) from e
+
+
+class MergeIntervals:
+    """Stateful bookkeeping to merge interval graphs."""
+
+    def __init__(self, *, left: Iterable[int] = (), right: Iterable[int] = ()) -> None:
+        self._left = list(left)
+        self._right = list(right)
+
+    def __repr__(self) -> str:
+        return (
+            f"{type(self).__name__}"
+            f"(left={tuple(self._left)}, right={tuple(self._right)})"
+        )
+
+    def _merge(
+        self, start: int, end: int, left: int, right: int
+    ) -> Iterator[tuple[int, int]]:
+        """Return an iterator of intervals to be fetched.
+
+        Args:
+            start: Start of needed interval
+            end: End of needed interval
+            left: Index of first overlapping downloaded data
+            right: Index after last overlapping downloaded data
+        """
+        lslice, rslice = self._left[left:right], self._right[left:right]
+        i = start = min([start] + lslice[:1])
+        end = max([end] + rslice[-1:])
+        for j, k in zip(lslice, rslice):
+            if j > i:
+                yield i, j - 1
+            i = k + 1
+        if i <= end:
+            yield i, end
+        self._left[left:right], self._right[left:right] = [start], [end]
+
+    def minimal_intervals_covering(
+        self, start: int, end: int
+    ) -> Iterator[tuple[int, int]]:
+        """Provide the intervals needed to cover from ``start <= x <= end``.
+
+        This method mutates internal state so that later calls only return intervals not
+        covered by prior calls. The first call to this method will always return exactly
+        one interval, which was exactly the one requested. Later requests for
+        intervals overlapping that first requested interval will yield only the ranges
+        not previously covered (which may be empty, e.g. if the same interval is
+        requested twice).
+
+        This may be used e.g. to download substrings of remote files on demand.
+        """
+        left = bisect_left(self._right, start)
+        right = bisect_right(self._left, end)
+        yield from self._merge(start, end, left, right)
+
+
+T = TypeVar("T", bound="ReadOnlyIOWrapper")
+
+
+class ReadOnlyIOWrapper(BinaryIO):
+    """Implement read-side ``BinaryIO`` methods wrapping an inner ``BinaryIO``.
+
+    This wrapper is useful because Python currently does not distinguish read-only
+    streams at the type level.
+    """
+
+    def __init__(self, inner: BinaryIO) -> None:
+        self._file = inner
+
+    def __enter__(self: T) -> T:
+        self._file.__enter__()
+        return self
+
+    def __exit__(
+        self,
+        exc_type: type[BaseException] | None,
+        exc_value: BaseException | None,
+        traceback: TracebackType | None,
+    ) -> None:
+        self._file.__exit__(exc_type, exc_value, traceback)
+
+    def __iter__(self) -> Iterator[bytes]:
+        raise NotImplementedError
+
+    def __next__(self) -> bytes:
+        raise NotImplementedError
+
+    @property
+    def mode(self) -> str:
+        """Opening mode, which is always rb."""
+        return "rb"
+
+    @property
+    def name(self) -> str:
+        """Path to the underlying file."""
+        return self._file.name
+
+    def seekable(self) -> bool:
+        """Return whether random access is supported, which is True."""
+        return True
+
+    def close(self) -> None:
+        """Close the file."""
+        self._file.close()
+
+    @property
+    def closed(self) -> bool:
+        """Whether the file is closed."""
+        return self._file.closed
+
+    def fileno(self) -> int:
+        return self._file.fileno()
+
+    def flush(self) -> None:
+        self._file.flush()
+
+    def isatty(self) -> bool:
+        return False
+
+    def readable(self) -> bool:
+        """Return whether the file is readable, which is True."""
+        return True
+
+    def read(self, size: int = -1) -> bytes:
+        """Read up to size bytes from the object and return them.
+
+        As a convenience, if size is unspecified or -1,
+        all bytes until EOF are returned.  Fewer than
+        size bytes may be returned if EOF is reached.
+        """
+        return self._file.read(size)
+
+    def readline(self, limit: int = -1) -> bytes:
+        # Explicit impl needed to satisfy mypy.
+        raise NotImplementedError
+
+    def readlines(self, hint: int = -1) -> list[bytes]:
+        raise NotImplementedError
+
+    def seek(self, offset: int, whence: int = 0) -> int:
+        """Change stream position and return the new absolute position.
+
+        Seek to offset relative position indicated by whence:
+        * 0: Start of stream (the default).  pos should be >= 0;
+        * 1: Current position - pos may be negative;
+        * 2: End of stream - pos usually negative.
+        """
+        return self._file.seek(offset, whence)
+
+    def tell(self) -> int:
+        """Return the current position."""
+        return self._file.tell()
+
+    def truncate(self, size: int | None = None) -> int:
+        """Resize the stream to the given size in bytes.
+
+        If size is unspecified resize to the current position.
+        The current stream position isn't changed.
+
+        Return the new file size.
+        """
+        return self._file.truncate(size)
+
+    def writable(self) -> bool:
+        """Return False."""
+        return False
+
+    def write(self, s: Any) -> int:
+        raise NotImplementedError
+
+    def writelines(self, lines: Iterable[Any]) -> None:
+        raise NotImplementedError
+
+
+U = TypeVar("U", bound="LazyFileOverHTTP")
+
+
+class LazyFileOverHTTP(ReadOnlyIOWrapper):
+    """File-like object representing a fixed-length file over HTTP.
+
+    This uses HTTP range requests to lazily fetch the file's content into a temporary
+    file. If such requests are not supported by the server, raises
+    ``HTTPRangeRequestUnsupported`` in the ``__enter__`` method."""
+
+    def __init__(
+        self,
+        url: str,
+        session: Session | Authenticator,
+        delete_backing_file: bool = True,
+    ) -> None:
+        super().__init__(cast(BinaryIO, NamedTemporaryFile(delete=delete_backing_file)))
+
+        self._merge_intervals: MergeIntervals | None = None
+        self._length: int | None = None
+
+        self._request_count = 0
+        self._session = session
+        self._url = url
+
+    def __enter__(self: U) -> U:
+        super().__enter__()
+        self._setup_content()
+        return self
+
+    def __exit__(
+        self,
+        exc_type: type[BaseException] | None,
+        exc_value: BaseException | None,
+        traceback: TracebackType | None,
+    ) -> None:
+        self._reset_content()
+        super().__exit__(exc_type, exc_value, traceback)
+
+    def read(self, size: int = -1) -> bytes:
+        """Read up to size bytes from the object and return them.
+
+        As a convenience, if size is unspecified or -1,
+        all bytes until EOF are returned.  Fewer than
+        size bytes may be returned if EOF is reached.
+
+        :raises ValueError: if ``__enter__`` was not called beforehand.
+        """
+        if self._length is None:
+            raise ValueError(".__enter__() must be called to set up content length")
+        cur = self.tell()
+        logger.debug("read size %d at %d from lazy file %s", size, cur, self.name)
+        if size < 0:
+            assert cur <= self._length
+            download_size = self._length - cur
+        elif size == 0:
+            return b""
+        else:
+            download_size = size
+        stop = min(cur + download_size, self._length)
+        self._ensure_downloaded(cur, stop)
+        return super().read(download_size)
+
+    @classmethod
+    def _uncached_headers(cls) -> dict[str, str]:
+        """HTTP headers to bypass any HTTP caching.
+
+        The requests we perform in this file are intentionally small, and any caching
+        should be done at a higher level.
+
+        Further, caching partial requests might cause issues:
+        https://github.com/pypa/pip/pull/8716
+        """
+        # "no-cache" is the correct value for "up to date every time", so this will also
+        # ensure we get the most recent value from the server:
+        # https://developer.mozilla.org/en-US/docs/Web/HTTP/Caching#provide_up-to-date_content_every_time
+        return {"Accept-Encoding": "identity", "Cache-Control": "no-cache"}
+
+    def _setup_content(self) -> None:
+        """Initialize the internal length field and other bookkeeping.
+
+        Ensure ``self._merge_intervals`` is initialized.
+
+        After parsing the remote file length with ``self._fetch_content_length()``,
+        this method will truncate the underlying file from parent abstract class
+        ``ReadOnlyIOWrapper`` to that size in order to support seek operations against
+        ``io.SEEK_END`` in ``self.read()``.
+
+        Called in ``__enter__``, and should make recursive invocations into a no-op.
+        Subclasses may override this method."""
+        if self._merge_intervals is None:
+            self._merge_intervals = MergeIntervals()
+
+        if self._length is None:
+            logger.debug("begin fetching content length")
+            self._length = self._fetch_content_length()
+            logger.debug("done fetching content length (is: %d)", self._length)
+            # Enable us to seek and write anywhere in the backing file up to this
+            # known length.
+            self.truncate(self._length)
+        else:
+            logger.debug("content length already fetched (is: %d)", self._length)
+
+    def _reset_content(self) -> None:
+        """Unset the internal length field and merge intervals.
+
+        Called in ``__exit__``, and should make recursive invocations into a no-op.
+        Subclasses may override this method."""
+        if self._merge_intervals is not None:
+            logger.debug(
+                "unsetting merge intervals (were: %s)", repr(self._merge_intervals)
+            )
+            self._merge_intervals = None
+
+        if self._length is not None:
+            logger.debug("unsetting content length (was: %d)", self._length)
+            self._length = None
+
+    def _content_length_from_head(self) -> int:
+        """Performs a HEAD request to extract the Content-Length.
+
+        :raises HTTPRangeRequestUnsupported: if the response fails to indicate support
+                                             for "bytes" ranges."""
+        self._request_count += 1
+        head = self._session.head(
+            self._url, headers=self._uncached_headers(), allow_redirects=True
+        )
+        head.raise_for_status()
+        assert head.status_code == codes.ok
+        accepted_range = head.headers.get("Accept-Ranges", None)
+        if accepted_range != "bytes":
+            raise HTTPRangeRequestUnsupported(
+                f"server does not support byte ranges: header was '{accepted_range}'"
+            )
+        return int(head.headers["Content-Length"])
+
+    def _fetch_content_length(self) -> int:
+        """Get the remote file's length."""
+        # NB: This is currently dead code, as _fetch_content_length() is overridden
+        #     again in LazyWheelOverHTTP.
+        return self._content_length_from_head()
+
+    def _stream_response(self, start: int, end: int) -> Response:
+        """Return streaming HTTP response to a range request from start to end."""
+        headers = self._uncached_headers()
+        headers["Range"] = f"bytes={start}-{end}"
+        logger.debug("streamed bytes request: %s", headers["Range"])
+        self._request_count += 1
+        response = self._session.get(self._url, headers=headers, stream=True)
+        response.raise_for_status()
+        if int(response.headers["Content-Length"]) != (end - start + 1):
+            raise HTTPRangeRequestNotRespected(
+                f"server did not respect byte range request: "
+                f"requested {end - start + 1} bytes, got "
+                f"{response.headers['Content-Length']} bytes"
+            )
+        return response
+
+    def _fetch_content_range(self, start: int, end: int) -> Iterator[bytes]:
+        """Perform a series of HTTP range requests to cover the specified byte range.
+
+        NB: For compatibility with HTTP range requests, the range provided to this
+        method must *include* the byte indexed at argument ``end`` (so e.g. ``0-1`` is 2
+        bytes long, and the range can never be empty).
+        """
+        yield from self._stream_response(start, end).iter_content(CONTENT_CHUNK_SIZE)
+
+    @contextmanager
+    def _stay(self) -> Iterator[None]:
+        """Return a context manager keeping the position.
+
+        At the end of the block, seek back to original position.
+        """
+        pos = self.tell()
+        try:
+            yield
+        finally:
+            self.seek(pos)
+
+    def _ensure_downloaded(self, start: int, end: int) -> None:
+        """Ensures bytes start to end (inclusive) have been downloaded and written to
+        the backing file.
+
+        :raises ValueError: if ``__enter__`` was not called beforehand.
+        """
+        if self._merge_intervals is None:
+            raise ValueError(".__enter__() must be called to set up merge intervals")
+        # Reducing by 1 to get an inclusive end range.
+        end -= 1
+        with self._stay():
+            for (
+                range_start,
+                range_end,
+            ) in self._merge_intervals.minimal_intervals_covering(start, end):
+                self.seek(start)
+                for chunk in self._fetch_content_range(range_start, range_end):
+                    self._file.write(chunk)
+
+
+class LazyWheelOverHTTP(LazyFileOverHTTP):
+    """File-like object mapped to a ZIP file over HTTP.
+
+    This uses HTTP range requests to lazily fetch the file's content, which should be
+    provided as the first argument to a ``ZipFile``.
+    """
+
+    # Cache this on the type to avoid trying and failing our initial lazy wheel request
+    # multiple times in the same invocation against an index without this support.
+    _domains_without_negative_range: ClassVar[set[str]] = set()
+
+    _metadata_regex = re.compile(r"^[^/]*\.dist-info/METADATA$")
+
+    def read_metadata(self, name: str) -> bytes:
+        """Download and read the METADATA file from the remote wheel."""
+        with ZipFile(self) as zf:
+            # prefetch metadata to reduce the number of range requests
+            filename = self._prefetch_metadata(name)
+            return zf.read(filename)
+
+    @classmethod
+    def _initial_chunk_length(cls) -> int:
+        """Return the size of the chunk (in bytes) to download from the end of the file.
+
+        This method is called in ``self._fetch_content_length()``. As noted in that
+        method's docstring, this should be set high enough to cover the central
+        directory sizes of the *average* wheels you expect to see, in order to avoid
+        further requests before being able to process the zip file's contents at all.
+        If we choose a small number, we need one more range request for larger wheels.
+        If we choose a big number, we download unnecessary data from smaller wheels.
+        If the chunk size from this method is larger than the size of an entire wheel,
+        that may raise an HTTP error, but this is gracefully handled in
+        ``self._fetch_content_length()`` with a small performance penalty.
+        """
+        return 10_000
+
+    def _fetch_content_length(self) -> int:
+        """Get the total remote file length, but also download a chunk from the end.
+
+        This method is called within ``__enter__``. In an attempt to reduce
+        the total number of requests needed to populate this lazy file's contents, this
+        method will also attempt to fetch a chunk of the file's actual content. This
+        chunk will be ``self._initial_chunk_length()`` bytes in size, or just the remote
+        file's length if that's smaller, and the chunk will come from the *end* of
+        the file.
+
+        This method will first attempt to download with a negative byte range request,
+        i.e. a GET with the headers ``Range: bytes=-N`` for ``N`` equal to
+        ``self._initial_chunk_length()``. If negative offsets are unsupported, it will
+        instead fall back to making a HEAD request first to extract the length, followed
+        by a GET request with the double-ended range header ``Range: bytes=X-Y`` to
+        extract the final ``N`` bytes from the remote resource.
+        """
+        initial_chunk_size = self._initial_chunk_length()
+        ret_length, tail = self._extract_content_length(initial_chunk_size)
+
+        # Need to explicitly truncate here in order to perform the write and seek
+        # operations below when we write the chunk of file contents to disk.
+        self.truncate(ret_length)
+
+        if tail is None:
+            # If we could not download any file contents yet (e.g. if negative byte
+            # ranges were not supported, or the requested range was larger than the file
+            # size), then download all of this at once, hopefully pulling in the entire
+            # central directory.
+            initial_start = max(0, ret_length - initial_chunk_size)
+            self._ensure_downloaded(initial_start, ret_length)
+        else:
+            # If we *could* download some file contents, then write them to the end of
+            # the file and set up our bisect boundaries by hand.
+            with self._stay():
+                response_length = int(tail.headers["Content-Length"])
+                assert response_length == min(initial_chunk_size, ret_length)
+                self.seek(-response_length, io.SEEK_END)
+                # Default initial chunk size is currently 1MB, but streaming content
+                # here allows it to be set arbitrarily large.
+                for chunk in tail.iter_content(CONTENT_CHUNK_SIZE):
+                    self._file.write(chunk)
+
+                # We now need to update our bookkeeping to cover the interval we just
+                # wrote to file so we know not to do it in later read()s.
+                init_chunk_start = ret_length - response_length
+                # MergeIntervals uses inclusive boundaries i.e. start <= x <= end.
+                init_chunk_end = ret_length - 1
+                assert self._merge_intervals is not None
+                assert ((init_chunk_start, init_chunk_end),) == tuple(
+                    # NB: We expect LazyRemoteResource to reset `self._merge_intervals`
+                    # just before it calls the current method, so our assertion here
+                    # checks that indeed no prior overlapping intervals have
+                    # been covered.
+                    self._merge_intervals.minimal_intervals_covering(
+                        init_chunk_start, init_chunk_end
+                    )
+                )
+        return ret_length
+
+    @staticmethod
+    def _parse_full_length_from_content_range(arg: str) -> int:
+        """Parse the file's full underlying length from the Content-Range header.
+
+        This supports both * and numeric ranges, from success or error responses:
+        https://www.rfc-editor.org/rfc/rfc9110#field.content-range.
+        """
+        m = re.match(r"bytes [^/]+/([0-9]+)", arg)
+        if m is None:
+            raise HTTPRangeRequestUnsupported(f"could not parse Content-Range: '{arg}'")
+        return int(m.group(1))
+
+    def _try_initial_chunk_request(
+        self, initial_chunk_size: int
+    ) -> tuple[int, Response]:
+        """Attempt to fetch a chunk from the end of the file with a negative offset."""
+        headers = self._uncached_headers()
+        # Perform a negative range index, which is not supported by some servers.
+        headers["Range"] = f"bytes=-{initial_chunk_size}"
+        logger.debug("initial bytes request: %s", headers["Range"])
+
+        self._request_count += 1
+        tail = self._session.get(self._url, headers=headers, stream=True)
+        tail.raise_for_status()
+
+        code = tail.status_code
+        if code != codes.partial_content:
+            # According to
+            # https://developer.mozilla.org/en-US/docs/Web/HTTP/Range_requests,
+            # a 200 OK implies that range requests are not supported,
+            # regardless of the requested size.
+            # However, some servers that support negative range requests also return a
+            # 200 OK if the requested range from the end was larger than the file size.
+            if code == codes.ok:
+                accept_ranges = tail.headers.get("Accept-Ranges", None)
+                content_length = int(tail.headers["Content-Length"])
+                if accept_ranges == "bytes" and content_length <= initial_chunk_size:
+                    return content_length, tail
+
+            raise HTTPRangeRequestUnsupported(
+                f"did not receive partial content: got code {code}"
+            )
+
+        if "Content-Range" not in tail.headers:
+            raise LazyWheelUnsupportedError(
+                f"file length cannot be determined for {self._url}, "
+                f"did not receive content range header from server"
+            )
+
+        file_length = self._parse_full_length_from_content_range(
+            tail.headers["Content-Range"]
+        )
+        return (file_length, tail)
+
+    def _extract_content_length(
+        self, initial_chunk_size: int
+    ) -> tuple[int, Response | None]:
+        """Get the Content-Length of the remote file, and possibly a chunk of it."""
+        domain = urlparse(self._url).netloc
+        if domain in self._domains_without_negative_range:
+            return (self._content_length_from_head(), None)
+
+        tail: Response | None
+        try:
+            # Initial range request for just the end of the file.
+            file_length, tail = self._try_initial_chunk_request(initial_chunk_size)
+        except HTTPError as e:
+            # Our initial request using a negative byte range was not supported.
+            resp = e.response
+            code = resp.status_code if resp is not None else None
+
+            # This indicates that the requested range from the end was larger than the
+            # actual file size: https://www.rfc-editor.org/rfc/rfc9110#status.416.
+            if (
+                code == codes.requested_range_not_satisfiable
+                and resp is not None
+                and "Content-Range" in resp.headers
+            ):
+                # In this case, we don't have any file content yet, but we do know the
+                # size the file will be, so we can return that and exit here.
+                file_length = self._parse_full_length_from_content_range(
+                    resp.headers["Content-Range"]
+                )
+                return file_length, None
+
+            # pypi notably does not support negative byte ranges: see
+            # https://github.com/pypi/warehouse/issues/12823.
+            logger.debug(
+                "Negative byte range not supported for domain '%s': "
+                "using HEAD request before lazy wheel from now on (code: %s)",
+                domain,
+                code,
+            )
+            # Avoid trying a negative byte range request against this domain for the
+            # rest of the resolve.
+            self._domains_without_negative_range.add(domain)
+            # Apply a HEAD request to get the real size, and nothing else for now.
+            return self._content_length_from_head(), None
+
+        # Some servers that do not support negative offsets,
+        # handle a negative offset like "-10" as "0-10"...
+        # ... or behave even more strangely, see
+        # https://github.com/python-poetry/poetry/issues/9056#issuecomment-1973273721
+        if int(tail.headers["Content-Length"]) > initial_chunk_size or tail.headers.get(
+            "Content-Range", ""
+        ).startswith("bytes -"):
+            tail = None
+            self._domains_without_negative_range.add(domain)
+        return file_length, tail
+
+    def _prefetch_metadata(self, name: str) -> str:
+        """Locate the *.dist-info/METADATA entry from a temporary ``ZipFile`` wrapper,
+        and download it.
+
+        This method assumes that the *.dist-info directory (containing e.g. METADATA) is
+        contained in a single contiguous section of the zip file in order to ensure it
+        can be downloaded in a single ranged GET request."""
+        logger.debug("begin prefetching METADATA for %s", name)
+
+        start: int | None = None
+        end: int | None = None
+
+        # This may perform further requests if __init__() did not pull in the entire
+        # central directory at the end of the file (although _initial_chunk_length()
+        # should be set large enough to avoid this).
+        zf = ZipFile(self)
+
+        filename = ""
+        for info in zf.infolist():
+            if start is None:
+                if self._metadata_regex.search(info.filename):
+                    filename = info.filename
+                    start = info.header_offset
+                    continue
+            else:
+                # The last .dist-info/ entry may be before the end of the file if the
+                # wheel's entries are sorted lexicographically (which is unusual).
+                if not self._metadata_regex.search(info.filename):
+                    end = info.header_offset
+                    break
+        if start is None:
+            raise UnsupportedWheel(
+                f"no {self._metadata_regex!r} found for {name} in {self.name}"
+            )
+        # If it is the last entry of the zip, then give us everything
+        # until the start of the central directory.
+        if end is None:
+            end = zf.start_dir
+        logger.debug(f"fetch {filename}")
+        self._ensure_downloaded(start, end)
+        logger.debug("done prefetching METADATA for %s", name)
+
+        return filename
diff --git a/conda_lock/_vendor/poetry/installation/__init__.py b/conda_lock/_vendor/poetry/installation/__init__.py
index 385d7b8c..bb543a4d 100644
--- a/conda_lock/_vendor/poetry/installation/__init__.py
+++ b/conda_lock/_vendor/poetry/installation/__init__.py
@@ -1 +1,6 @@
-from .installer import Installer
+from __future__ import annotations
+
+from conda_lock._vendor.poetry.installation.installer import Installer
+
+
+__all__ = ["Installer"]
diff --git a/conda_lock/_vendor/poetry/installation/authenticator.py b/conda_lock/_vendor/poetry/installation/authenticator.py
deleted file mode 100644
index 58d0314e..00000000
--- a/conda_lock/_vendor/poetry/installation/authenticator.py
+++ /dev/null
@@ -1,165 +0,0 @@
-import logging
-import time
-
-from typing import TYPE_CHECKING
-
-import requests
-import requests.auth
-import requests.exceptions
-
-from conda_lock._vendor.poetry.exceptions import PoetryException
-from conda_lock._vendor.poetry.utils._compat import urlparse
-from conda_lock._vendor.poetry.utils.password_manager import PasswordManager
-
-
-if TYPE_CHECKING:
-    from typing import Any
-    from typing import Optional
-    from typing import Tuple
-
-    from clikit.api.io import IO
-
-    from conda_lock._vendor.poetry.config.config import Config
-
-
-logger = logging.getLogger()
-
-
-class Authenticator(object):
-    def __init__(self, config, io=None):  # type: (Config, Optional[IO]) -> None
-        self._config = config
-        self._io = io
-        self._credentials = {}
-        self._password_manager = PasswordManager(self._config)
-
-    def _log(self, message, level="debug"):  # type: (str, str) -> None
-        if self._io is not None:
-            self._io.write_line(
-                "<{level:s}>{message:s}".format(
-                    message=message, level=level
-                )
-            )
-        else:
-            getattr(logger, level, logger.debug)(message)
-
-    @property
-    def session(self):  # type: () -> requests.Session
-        return requests.Session()
-
-    def request(
-        self, method, url, **kwargs
-    ):  # type: (str, str, Any) -> requests.Response
-        request = requests.Request(method, url)
-        username, password = self.get_credentials_for_url(url)
-
-        if username is not None and password is not None:
-            request = requests.auth.HTTPBasicAuth(username, password)(request)
-
-        session = self.session
-        prepared_request = session.prepare_request(request)
-
-        proxies = kwargs.get("proxies", {})
-        stream = kwargs.get("stream")
-        verify = kwargs.get("verify")
-        cert = kwargs.get("cert")
-
-        settings = session.merge_environment_settings(
-            prepared_request.url, proxies, stream, verify, cert
-        )
-
-        # Send the request.
-        send_kwargs = {
-            "timeout": kwargs.get("timeout"),
-            "allow_redirects": kwargs.get("allow_redirects", True),
-        }
-        send_kwargs.update(settings)
-
-        attempt = 0
-
-        while True:
-            is_last_attempt = attempt >= 5
-            try:
-                resp = session.send(prepared_request, **send_kwargs)
-            except (requests.exceptions.ConnectionError, OSError) as e:
-                if is_last_attempt:
-                    raise e
-            else:
-                if resp.status_code not in [502, 503, 504] or is_last_attempt:
-                    resp.raise_for_status()
-                    return resp
-
-            if not is_last_attempt:
-                attempt += 1
-                delay = 0.5 * attempt
-                self._log(
-                    "Retrying HTTP request in {} seconds.".format(delay), level="debug"
-                )
-                time.sleep(delay)
-                continue
-
-        # this should never really be hit under any sane circumstance
-        raise PoetryException("Failed HTTP {} request", method.upper())
-
-    def get_credentials_for_url(
-        self, url
-    ):  # type: (str) -> Tuple[Optional[str], Optional[str]]
-        parsed_url = urlparse.urlsplit(url)
-
-        netloc = parsed_url.netloc
-
-        credentials = self._credentials.get(netloc, (None, None))
-
-        if credentials == (None, None):
-            if "@" not in netloc:
-                credentials = self._get_credentials_for_netloc_from_config(netloc)
-            else:
-                # Split from the right because that's how urllib.parse.urlsplit()
-                # behaves if more than one @ is present (which can be checked using
-                # the password attribute of urlsplit()'s return value).
-                auth, netloc = netloc.rsplit("@", 1)
-                if ":" in auth:
-                    # Split from the left because that's how urllib.parse.urlsplit()
-                    # behaves if more than one : is present (which again can be checked
-                    # using the password attribute of the return value)
-                    credentials = auth.split(":", 1)
-                else:
-                    credentials = auth, None
-
-                credentials = tuple(
-                    None if x is None else urlparse.unquote(x) for x in credentials
-                )
-
-        if credentials[0] is not None or credentials[1] is not None:
-            credentials = (credentials[0] or "", credentials[1] or "")
-
-            self._credentials[netloc] = credentials
-
-        return credentials[0], credentials[1]
-
-    def _get_credentials_for_netloc_from_config(
-        self, netloc
-    ):  # type: (str) -> Tuple[Optional[str], Optional[str]]
-        credentials = (None, None)
-
-        for repository_name in self._config.get("repositories", []):
-            repository_config = self._config.get(
-                "repositories.{}".format(repository_name)
-            )
-            if not repository_config:
-                continue
-
-            url = repository_config.get("url")
-            if not url:
-                continue
-
-            parsed_url = urlparse.urlsplit(url)
-
-            if netloc == parsed_url.netloc:
-                auth = self._password_manager.get_http_auth(repository_name)
-
-                if auth is None:
-                    continue
-
-                return auth["username"], auth["password"]
-
-        return credentials
diff --git a/conda_lock/_vendor/poetry/installation/base_installer.py b/conda_lock/_vendor/poetry/installation/base_installer.py
deleted file mode 100644
index 1e068d07..00000000
--- a/conda_lock/_vendor/poetry/installation/base_installer.py
+++ /dev/null
@@ -1,9 +0,0 @@
-class BaseInstaller:
-    def install(self, package):
-        raise NotImplementedError
-
-    def update(self, source, target):
-        raise NotImplementedError
-
-    def remove(self, package):
-        raise NotImplementedError
diff --git a/conda_lock/_vendor/poetry/installation/chef.py b/conda_lock/_vendor/poetry/installation/chef.py
index 373980ff..93272e3c 100644
--- a/conda_lock/_vendor/poetry/installation/chef.py
+++ b/conda_lock/_vendor/poetry/installation/chef.py
@@ -1,110 +1,204 @@
-import hashlib
-import json
+from __future__ import annotations
 
+import os
+import tempfile
+
+from contextlib import redirect_stdout
+from io import StringIO
+from pathlib import Path
 from typing import TYPE_CHECKING
 
-from conda_lock._vendor.poetry.core.packages.utils.link import Link
-from conda_lock._vendor.poetry.utils._compat import Path
+from build import BuildBackendException
+from build import ProjectBuilder
+from build.env import IsolatedEnv as BaseIsolatedEnv
+from conda_lock._vendor.poetry.core.utils.helpers import temporary_directory
+from pyproject_hooks import quiet_subprocess_runner  # type: ignore[import-untyped]
 
-from .chooser import InvalidWheelName
-from .chooser import Wheel
+from conda_lock._vendor.poetry.utils._compat import decode
+from conda_lock._vendor.poetry.utils.env import ephemeral_environment
+from conda_lock._vendor.poetry.utils.helpers import extractall
 
 
 if TYPE_CHECKING:
-    from typing import List
-    from typing import Optional
+    from collections.abc import Collection
 
-    from conda_lock._vendor.poetry.config.config import Config
+    from conda_lock._vendor.poetry.repositories import RepositoryPool
+    from conda_lock._vendor.poetry.utils.cache import ArtifactCache
     from conda_lock._vendor.poetry.utils.env import Env
 
 
-class Chef:
-    def __init__(self, config, env):  # type: (Config, Env) -> None
-        self._config = config
-        self._env = env
-        self._cache_dir = (
-            Path(config.get("cache-dir")).expanduser().joinpath("artifacts")
-        )
+class ChefError(Exception): ...
 
-    def prepare(self, archive):  # type: (Path) -> Path
-        return archive
 
-    def prepare_sdist(self, archive):  # type: (Path) -> Path
-        return archive
+class ChefBuildError(ChefError): ...
 
-    def prepare_wheel(self, archive):  # type: (Path) -> Path
-        return archive
 
-    def should_prepare(self, archive):  # type: (Path) -> bool
-        return not self.is_wheel(archive)
+class ChefInstallError(ChefError):
+    def __init__(self, requirements: Collection[str], output: str, error: str) -> None:
+        message = "\n\n".join(
+            (
+                f"Failed to install {', '.join(requirements)}.",
+                f"Output:\n{output}",
+                f"Error:\n{error}",
+            )
+        )
+        super().__init__(message)
+        self._requirements = requirements
 
-    def is_wheel(self, archive):  # type: (Path) -> bool
-        return archive.suffix == ".whl"
+    @property
+    def requirements(self) -> Collection[str]:
+        return self._requirements
 
-    def get_cached_archive_for_link(self, link):  # type: (Link) -> Optional[Link]
-        # If the archive is already a wheel, there is no need to cache it.
-        if link.is_wheel:
-            pass
 
-        archives = self.get_cached_archives_for_link(link)
+class IsolatedEnv(BaseIsolatedEnv):
+    def __init__(self, env: Env, pool: RepositoryPool) -> None:
+        self._env = env
+        self._pool = pool
+
+    @property
+    def python_executable(self) -> str:
+        return str(self._env.python)
+
+    def make_extra_environ(self) -> dict[str, str]:
+        path = os.environ.get("PATH")
+        scripts_dir = str(self._env._bin_dir)
+        return {
+            "PATH": (
+                os.pathsep.join([scripts_dir, path])
+                if path is not None
+                else scripts_dir
+            )
+        }
+
+    def install(self, requirements: Collection[str]) -> None:
+        from conda_lock._vendor.cleo.io.buffered_io import BufferedIO
+        from conda_lock._vendor.poetry.core.packages.dependency import Dependency
+        from conda_lock._vendor.poetry.core.packages.project_package import ProjectPackage
+
+        from conda_lock._vendor.poetry.config.config import Config
+        from conda_lock._vendor.poetry.installation.installer import Installer
+        from conda_lock._vendor.poetry.packages.locker import Locker
+        from conda_lock._vendor.poetry.repositories.installed_repository import InstalledRepository
+
+        # We build Poetry dependencies from the requirements
+        package = ProjectPackage("__root__", "0.0.0")
+        package.python_versions = ".".join(str(v) for v in self._env.version_info[:3])
+        for requirement in requirements:
+            dependency = Dependency.create_from_pep_508(requirement)
+            package.add_dependency(dependency)
+
+        io = BufferedIO()
+        installer = Installer(
+            io,
+            self._env,
+            package,
+            Locker(self._env.path.joinpath("poetry.lock"), {}),
+            self._pool,
+            Config.create(),
+            InstalledRepository.load(self._env),
+        )
+        installer.update(True)
+        if installer.run() != 0:
+            raise ChefInstallError(requirements, io.fetch_output(), io.fetch_error())
 
-        if not archives:
-            return link
 
-        candidates = []
-        for archive in archives:
-            if not archive.is_wheel:
-                candidates.append((float("inf"), archive))
-                continue
+class Chef:
+    def __init__(
+        self, artifact_cache: ArtifactCache, env: Env, pool: RepositoryPool
+    ) -> None:
+        self._env = env
+        self._pool = pool
+        self._artifact_cache = artifact_cache
+
+    def prepare(
+        self, archive: Path, output_dir: Path | None = None, *, editable: bool = False
+    ) -> Path:
+        if not self._should_prepare(archive):
+            return archive
+
+        if archive.is_dir():
+            destination = output_dir or Path(tempfile.mkdtemp(prefix="poetry-chef-"))
+            return self._prepare(archive, destination=destination, editable=editable)
+
+        return self._prepare_sdist(archive, destination=output_dir)
+
+    def _prepare(
+        self, directory: Path, destination: Path, *, editable: bool = False
+    ) -> Path:
+        from subprocess import CalledProcessError
+
+        with ephemeral_environment(self._env.python) as venv:
+            env = IsolatedEnv(venv, self._pool)
+            builder = ProjectBuilder.from_isolated_env(
+                env, directory, runner=quiet_subprocess_runner
+            )
+            env.install(builder.build_system_requires)
 
+            stdout = StringIO()
+            error: Exception | None = None
             try:
-                wheel = Wheel(archive.filename)
-            except InvalidWheelName:
-                continue
-
-            if not wheel.is_supported_by_environment(self._env):
-                continue
-
-            candidates.append(
-                (wheel.get_minimum_supported_index(self._env.supported_tags), archive),
+                with redirect_stdout(stdout):
+                    dist_format = "wheel" if not editable else "editable"
+                    env.install(
+                        builder.build_system_requires
+                        | builder.get_requires_for_build(dist_format)
+                    )
+                    path = Path(
+                        builder.build(
+                            dist_format,
+                            destination.as_posix(),
+                        )
+                    )
+            except BuildBackendException as e:
+                message_parts = [str(e)]
+                if isinstance(e.exception, CalledProcessError):
+                    text = e.exception.stderr or e.exception.stdout
+                    if text is not None:
+                        message_parts.append(decode(text))
+                else:
+                    message_parts.append(str(e.exception))
+
+                error = ChefBuildError("\n\n".join(message_parts))
+
+            if error is not None:
+                raise error from None
+
+            return path
+
+    def _prepare_sdist(self, archive: Path, destination: Path | None = None) -> Path:
+        from conda_lock._vendor.poetry.core.packages.utils.link import Link
+
+        suffix = archive.suffix
+        zip = suffix == ".zip"
+
+        with temporary_directory() as tmp_dir:
+            archive_dir = Path(tmp_dir)
+            extractall(source=archive, dest=archive_dir, zip=zip)
+
+            elements = list(archive_dir.glob("*"))
+
+            if len(elements) == 1 and elements[0].is_dir():
+                sdist_dir = elements[0]
+            else:
+                sdist_dir = archive_dir / archive.name.rstrip(suffix)
+                if not sdist_dir.is_dir():
+                    sdist_dir = archive_dir
+
+            if destination is None:
+                destination = self._artifact_cache.get_cache_directory_for_link(
+                    Link(archive.as_uri())
+                )
+
+            destination.mkdir(parents=True, exist_ok=True)
+
+            return self._prepare(
+                sdist_dir,
+                destination,
             )
 
-        if not candidates:
-            return link
-
-        return min(candidates)[1]
-
-    def get_cached_archives_for_link(self, link):  # type: (Link) -> List[Link]
-        cache_dir = self.get_cache_directory_for_link(link)
-
-        archive_types = ["whl", "tar.gz", "tar.bz2", "bz2", "zip"]
-        links = []
-        for archive_type in archive_types:
-            for archive in cache_dir.glob("*.{}".format(archive_type)):
-                links.append(Link(archive.as_uri()))
-
-        return links
-
-    def get_cache_directory_for_link(self, link):  # type: (Link) -> Path
-        key_parts = {"url": link.url_without_fragment}
-
-        if link.hash_name is not None and link.hash is not None:
-            key_parts[link.hash_name] = link.hash
+    def _should_prepare(self, archive: Path) -> bool:
+        return archive.is_dir() or not self._is_wheel(archive)
 
-        if link.subdirectory_fragment:
-            key_parts["subdirectory"] = link.subdirectory_fragment
-
-        key_parts["interpreter_name"] = self._env.marker_env["interpreter_name"]
-        key_parts["interpreter_version"] = "".join(
-            self._env.marker_env["interpreter_version"].split(".")[:2]
-        )
-
-        key = hashlib.sha256(
-            json.dumps(
-                key_parts, sort_keys=True, separators=(",", ":"), ensure_ascii=True
-            ).encode("ascii")
-        ).hexdigest()
-
-        split_key = [key[:2], key[2:4], key[4:6], key[6:]]
-
-        return self._cache_dir.joinpath(*split_key)
+    @classmethod
+    def _is_wheel(cls, archive: Path) -> bool:
+        return archive.suffix == ".whl"
diff --git a/conda_lock/_vendor/poetry/installation/chooser.py b/conda_lock/_vendor/poetry/installation/chooser.py
index f205d738..143c277e 100644
--- a/conda_lock/_vendor/poetry/installation/chooser.py
+++ b/conda_lock/_vendor/poetry/installation/chooser.py
@@ -1,46 +1,28 @@
-import re
-
-from typing import List
-from typing import Tuple
-
-from packaging.tags import Tag
-
-from conda_lock._vendor.poetry.core.packages.package import Package
-from conda_lock._vendor.poetry.core.packages.utils.link import Link
-from conda_lock._vendor.poetry.repositories.pool import Pool
-from conda_lock._vendor.poetry.utils.env import Env
-from conda_lock._vendor.poetry.utils.patterns import wheel_file_re
-
+from __future__ import annotations
 
-class InvalidWheelName(Exception):
-    pass
+import logging
+import re
 
+from typing import TYPE_CHECKING
+from typing import Any
 
-class Wheel(object):
-    def __init__(self, filename):  # type: (str) -> None
-        wheel_info = wheel_file_re.match(filename)
-        if not wheel_info:
-            raise InvalidWheelName("{} is not a valid wheel filename.".format(filename))
+from conda_lock._vendor.poetry.config.config import Config
+from conda_lock._vendor.poetry.config.config import PackageFilterPolicy
+from conda_lock._vendor.poetry.repositories.http_repository import HTTPRepository
+from conda_lock._vendor.poetry.utils.helpers import get_highest_priority_hash_type
+from conda_lock._vendor.poetry.utils.wheel import Wheel
 
-        self.filename = filename
-        self.name = wheel_info.group("name").replace("_", "-")
-        self.version = wheel_info.group("ver").replace("_", "-")
-        self.build_tag = wheel_info.group("build")
-        self.pyversions = wheel_info.group("pyver").split(".")
-        self.abis = wheel_info.group("abi").split(".")
-        self.plats = wheel_info.group("plat").split(".")
 
-        self.tags = {
-            Tag(x, y, z) for x in self.pyversions for y in self.abis for z in self.plats
-        }
+if TYPE_CHECKING:
+    from conda_lock._vendor.poetry.core.constraints.version import Version
+    from conda_lock._vendor.poetry.core.packages.package import Package
+    from conda_lock._vendor.poetry.core.packages.utils.link import Link
 
-    def get_minimum_supported_index(self, tags):
-        indexes = [tags.index(t) for t in self.tags if t in tags]
+    from conda_lock._vendor.poetry.repositories.repository_pool import RepositoryPool
+    from conda_lock._vendor.poetry.utils.env import Env
 
-        return min(indexes) if indexes else None
 
-    def is_supported_by_environment(self, env):
-        return bool(set(env.supported_tags).intersection(self.tags))
+logger = logging.getLogger(__name__)
 
 
 class Chooser:
@@ -48,77 +30,109 @@ class Chooser:
     A Chooser chooses an appropriate release archive for packages.
     """
 
-    def __init__(self, pool, env):  # type: (Pool, Env) -> None
+    def __init__(
+        self, pool: RepositoryPool, env: Env, config: Config | None = None
+    ) -> None:
         self._pool = pool
         self._env = env
+        self._config = config or Config.create()
+        self._no_binary_policy: PackageFilterPolicy = PackageFilterPolicy(
+            self._config.get("installer.no-binary", [])
+        )
 
-    def choose_for(self, package):  # type: (Package) -> Link
+    def choose_for(self, package: Package) -> Link:
         """
         Return the url of the selected archive for a given package.
         """
         links = []
         for link in self._get_links(package):
-            if link.is_wheel and not Wheel(link.filename).is_supported_by_environment(
-                self._env
-            ):
-                continue
+            if link.is_wheel:
+                if not self._no_binary_policy.allows(package.name):
+                    logger.debug(
+                        "Skipping wheel for %s as requested in no binary policy for"
+                        " package (%s)",
+                        link.filename,
+                        package.name,
+                    )
+                    continue
+
+                if not Wheel(link.filename).is_supported_by_environment(self._env):
+                    logger.debug(
+                        "Skipping wheel %s as this is not supported by the current"
+                        " environment",
+                        link.filename,
+                    )
+                    continue
 
             if link.ext in {".egg", ".exe", ".msi", ".rpm", ".srpm"}:
+                logger.debug("Skipping unsupported distribution %s", link.filename)
                 continue
 
             links.append(link)
 
         if not links:
-            raise RuntimeError(
-                "Unable to find installation candidates for {}".format(package)
-            )
+            raise RuntimeError(f"Unable to find installation candidates for {package}")
 
         # Get the best link
         chosen = max(links, key=lambda link: self._sort_key(package, link))
-        if not chosen:
-            raise RuntimeError(
-                "Unable to find installation candidates for {}".format(package)
-            )
 
         return chosen
 
-    def _get_links(self, package):  # type: (Package) -> List[Link]
-        if not package.source_type:
-            if not self._pool.has_repository("pypi"):
-                repository = self._pool.repositories[0]
-            else:
-                repository = self._pool.repository("pypi")
-        else:
+    def _get_links(self, package: Package) -> list[Link]:
+        if package.source_type:
+            assert package.source_reference is not None
             repository = self._pool.repository(package.source_reference)
 
+        elif not self._pool.has_repository("pypi"):
+            repository = self._pool.repositories[0]
+        else:
+            repository = self._pool.repository("pypi")
         links = repository.find_links_for_package(package)
 
-        hashes = [f["hash"] for f in package.files]
-        if not hashes:
+        locked_hashes = {f["hash"] for f in package.files}
+        if not locked_hashes:
             return links
 
         selected_links = []
+        skipped = []
+        locked_hash_names = {h.split(":")[0] for h in locked_hashes}
         for link in links:
-            if not link.hash:
+            if not link.hashes:
                 selected_links.append(link)
                 continue
 
-            h = link.hash_name + ":" + link.hash
-            if h not in hashes:
+            link_hash: str | None = None
+            if (candidates := locked_hash_names.intersection(link.hashes.keys())) and (
+                hash_name := get_highest_priority_hash_type(candidates, link.filename)
+            ):
+                link_hash = f"{hash_name}:{link.hashes[hash_name]}"
+
+            elif isinstance(repository, HTTPRepository):
+                link_hash = repository.calculate_sha256(link)
+
+            if link_hash not in locked_hashes:
+                skipped.append((link.filename, link_hash))
+                logger.debug(
+                    "Skipping %s as %s checksum does not match expected value",
+                    link.filename,
+                    link_hash,
+                )
                 continue
 
             selected_links.append(link)
 
         if links and not selected_links:
+            links_str = ", ".join(f"{link}({h})" for link, h in skipped)
             raise RuntimeError(
-                "Retrieved digest for link {}({}) not in poetry.lock metadata {}".format(
-                    link.filename, h, hashes
-                )
+                f"Retrieved digests for links {links_str} not in poetry.lock"
+                f" metadata {locked_hashes}"
             )
 
         return selected_links
 
-    def _sort_key(self, package, link):  # type: (Package, Link) -> Tuple
+    def _sort_key(
+        self, package: Package, link: Link
+    ) -> tuple[int, int, int, Version, tuple[Any, ...], int]:
         """
         Function to pass as the `key` argument to a call to sorted() to sort
         InstallationCandidates by preference.
@@ -142,30 +156,31 @@ def _sort_key(self, package, link):  # type: (Package, Link) -> Tuple
               comparison operators, but then different sdist links
               with the same version, would have to be considered equal
         """
-        support_num = len(self._env.supported_tags)
-        build_tag = ()
+        build_tag: tuple[Any, ...] = ()
         binary_preference = 0
         if link.is_wheel:
             wheel = Wheel(link.filename)
             if not wheel.is_supported_by_environment(self._env):
                 raise RuntimeError(
-                    "{} is not a supported wheel for this platform. It "
-                    "can't be sorted.".format(wheel.filename)
+                    f"{wheel.filename} is not a supported wheel for this platform. It "
+                    "can't be sorted."
                 )
 
             # TODO: Binary preference
-            pri = -(wheel.get_minimum_supported_index(self._env.supported_tags))
+            pri = -(wheel.get_minimum_supported_index(self._env.supported_tags) or 0)
             if wheel.build_tag is not None:
                 match = re.match(r"^(\d+)(.*)$", wheel.build_tag)
+                if not match:
+                    raise ValueError(f"Unable to parse build tag: {wheel.build_tag}")
                 build_tag_groups = match.groups()
                 build_tag = (int(build_tag_groups[0]), build_tag_groups[1])
         else:  # sdist
+            support_num = len(self._env.supported_tags)
             pri = -support_num
 
         has_allowed_hash = int(self._is_link_hash_allowed_for_package(link, package))
 
-        # TODO: Proper yank value
-        yank_value = 0
+        yank_value = int(not link.yanked)
 
         return (
             has_allowed_hash,
@@ -176,12 +191,11 @@ def _sort_key(self, package, link):  # type: (Package, Link) -> Tuple
             pri,
         )
 
-    def _is_link_hash_allowed_for_package(
-        self, link, package
-    ):  # type: (Link, Package) -> bool
-        if not link.hash:
+    def _is_link_hash_allowed_for_package(self, link: Link, package: Package) -> bool:
+        if not link.hashes:
             return True
 
-        h = link.hash_name + ":" + link.hash
+        link_hashes = {f"{name}:{h}" for name, h in link.hashes.items()}
+        locked_hashes = {f["hash"] for f in package.files}
 
-        return h in {f["hash"] for f in package.files}
+        return bool(link_hashes & locked_hashes)
diff --git a/conda_lock/_vendor/poetry/installation/executor.py b/conda_lock/_vendor/poetry/installation/executor.py
index 2f99f0f8..30266abb 100644
--- a/conda_lock/_vendor/poetry/installation/executor.py
+++ b/conda_lock/_vendor/poetry/installation/executor.py
@@ -1,103 +1,169 @@
-# -*- coding: utf-8 -*-
-from __future__ import division
+from __future__ import annotations
 
+import contextlib
+import csv
+import functools
 import itertools
-import os
+import json
 import threading
 
 from concurrent.futures import ThreadPoolExecutor
 from concurrent.futures import wait
+from pathlib import Path
 from subprocess import CalledProcessError
+from typing import TYPE_CHECKING
+from typing import Any
 
-from conda_lock._vendor.poetry.core.packages.file_dependency import FileDependency
+from conda_lock._vendor.cleo.io.null_io import NullIO
 from conda_lock._vendor.poetry.core.packages.utils.link import Link
-from conda_lock._vendor.poetry.core.packages.utils.utils import url_to_path
-from conda_lock._vendor.poetry.core.pyproject.toml import PyProjectTOML
-from conda_lock._vendor.poetry.io.null_io import NullIO
-from conda_lock._vendor.poetry.utils._compat import PY2
-from conda_lock._vendor.poetry.utils._compat import WINDOWS
-from conda_lock._vendor.poetry.utils._compat import OrderedDict
-from conda_lock._vendor.poetry.utils._compat import Path
-from conda_lock._vendor.poetry.utils._compat import cpu_count
+
+from conda_lock._vendor.poetry.installation.chef import Chef
+from conda_lock._vendor.poetry.installation.chef import ChefBuildError
+from conda_lock._vendor.poetry.installation.chef import ChefInstallError
+from conda_lock._vendor.poetry.installation.chooser import Chooser
+from conda_lock._vendor.poetry.installation.operations import Install
+from conda_lock._vendor.poetry.installation.operations import Uninstall
+from conda_lock._vendor.poetry.installation.operations import Update
+from conda_lock._vendor.poetry.installation.wheel_installer import WheelInstaller
+from conda_lock._vendor.poetry.puzzle.exceptions import SolverProblemError
 from conda_lock._vendor.poetry.utils._compat import decode
+from conda_lock._vendor.poetry.utils.authenticator import Authenticator
 from conda_lock._vendor.poetry.utils.env import EnvCommandError
-from conda_lock._vendor.poetry.utils.helpers import safe_rmtree
-
-from .authenticator import Authenticator
-from .chef import Chef
-from .chooser import Chooser
-from .operations.install import Install
-from .operations.operation import Operation
-from .operations.uninstall import Uninstall
-from .operations.update import Update
-
-
-class Executor(object):
-    def __init__(self, env, pool, config, io, parallel=None):
+from conda_lock._vendor.poetry.utils.helpers import Downloader
+from conda_lock._vendor.poetry.utils.helpers import get_file_hash
+from conda_lock._vendor.poetry.utils.helpers import get_highest_priority_hash_type
+from conda_lock._vendor.poetry.utils.helpers import pluralize
+from conda_lock._vendor.poetry.utils.helpers import remove_directory
+from conda_lock._vendor.poetry.utils.pip import pip_install
+
+
+if TYPE_CHECKING:
+    from conda_lock._vendor.cleo.io.io import IO
+    from conda_lock._vendor.cleo.io.outputs.section_output import SectionOutput
+    from conda_lock._vendor.poetry.core.masonry.builders.builder import Builder
+    from conda_lock._vendor.poetry.core.packages.package import Package
+
+    from conda_lock._vendor.poetry.config.config import Config
+    from conda_lock._vendor.poetry.installation.operations.operation import Operation
+    from conda_lock._vendor.poetry.repositories import RepositoryPool
+    from conda_lock._vendor.poetry.utils.env import Env
+
+
+class Executor:
+    def __init__(
+        self,
+        env: Env,
+        pool: RepositoryPool,
+        config: Config,
+        io: IO,
+        parallel: bool | None = None,
+        disable_cache: bool = False,
+    ) -> None:
         self._env = env
         self._io = io
         self._dry_run = False
         self._enabled = True
         self._verbose = False
-        self._authenticator = Authenticator(config, self._io)
-        self._chef = Chef(config, self._env)
-        self._chooser = Chooser(pool, self._env)
+        self._wheel_installer = WheelInstaller(self._env)
+        self._use_modern_installation = config.get(
+            "installer.modern-installation", True
+        )
+        if not self._use_modern_installation:
+            self._io.write_line(
+                "Warning: Setting `installer.modern-installation` to `false` "
+                "is deprecated."
+            )
+            self._io.write_line(
+                "The pip-based installer will be removed in a future release."
+            )
+            self._io.write_line(
+                "See https://github.com/python-poetry/poetry/issues/8987."
+            )
 
         if parallel is None:
             parallel = config.get("installer.parallel", True)
 
-        if parallel and not (PY2 and WINDOWS):
-            # This should be directly handled by ThreadPoolExecutor
-            # however, on some systems the number of CPUs cannot be determined
-            # (it raises a NotImplementedError), so, in this case, we assume
-            # that the system only has one CPU.
-            try:
-                self._max_workers = cpu_count() + 4
-            except NotImplementedError:
-                self._max_workers = 5
+        if parallel:
+            self._max_workers = config.installer_max_workers
         else:
             self._max_workers = 1
 
+        self._artifact_cache = pool.artifact_cache
+        self._authenticator = Authenticator(
+            config, self._io, disable_cache=disable_cache, pool_size=self._max_workers
+        )
+        self._chef = Chef(self._artifact_cache, self._env, pool)
+        self._chooser = Chooser(pool, self._env, config)
+
         self._executor = ThreadPoolExecutor(max_workers=self._max_workers)
         self._total_operations = 0
         self._executed_operations = 0
         self._executed = {"install": 0, "update": 0, "uninstall": 0}
         self._skipped = {"install": 0, "update": 0, "uninstall": 0}
-        self._sections = OrderedDict()
+        self._sections: dict[int, SectionOutput] = {}
+        self._yanked_warnings: list[str] = []
         self._lock = threading.Lock()
         self._shutdown = False
+        self._hashes: dict[str, str] = {}
+
+        # Cache whether decorated output is supported.
+        # https://github.com/python-poetry/cleo/issues/423
+        self._decorated_output: bool = self._io.output.is_decorated()
 
     @property
-    def installations_count(self):  # type: () -> int
+    def installations_count(self) -> int:
         return self._executed["install"]
 
     @property
-    def updates_count(self):  # type: () -> int
+    def updates_count(self) -> int:
         return self._executed["update"]
 
     @property
-    def removals_count(self):  # type: () -> int
+    def removals_count(self) -> int:
         return self._executed["uninstall"]
 
-    def supports_fancy_output(self):  # type: () -> bool
-        return self._io.supports_ansi() and not self._dry_run
+    @property
+    def enabled(self) -> bool:
+        return self._enabled
+
+    def supports_fancy_output(self) -> bool:
+        return self._decorated_output and not self._dry_run
 
-    def disable(self):
+    def disable(self) -> Executor:
         self._enabled = False
 
         return self
 
-    def dry_run(self, dry_run=True):
+    def dry_run(self, dry_run: bool = True) -> Executor:
         self._dry_run = dry_run
 
         return self
 
-    def verbose(self, verbose=True):
+    def verbose(self, verbose: bool = True) -> Executor:
         self._verbose = verbose
 
         return self
 
-    def execute(self, operations):  # type: (Operation) -> int
+    def enable_bytecode_compilation(self, enable: bool = True) -> None:
+        self._wheel_installer.enable_bytecode_compilation(enable)
+
+    def pip_install(
+        self, req: Path, upgrade: bool = False, editable: bool = False
+    ) -> int:
+        try:
+            pip_install(req, self._env, upgrade=upgrade, editable=editable)
+        except EnvCommandError as e:
+            output = decode(e.e.output)
+            if (
+                "KeyboardInterrupt" in output
+                or "ERROR: Operation cancelled by user" in output
+            ):
+                return -2
+            raise
+
+        return 0
+
+    def execute(self, operations: list[Operation]) -> int:
         self._total_operations = len(operations)
         for job_type in self._executed:
             self._executed[job_type] = 0
@@ -106,9 +172,18 @@ def execute(self, operations):  # type: (Operation) -> int
         if operations and (self._enabled or self._dry_run):
             self._display_summary(operations)
 
+        self._sections = {}
+        self._yanked_warnings = []
+
+        # pip has to be installed first without parallelism if we install via pip
+        for i, op in enumerate(operations):
+            if op.package.name == "pip":
+                wait([self._executor.submit(self._execute_operation, op)])
+                del operations[i]
+                break
+
         # We group operations by priority
         groups = itertools.groupby(operations, key=lambda o: -o.priority)
-        self._sections = OrderedDict()
         for _, group in groups:
             tasks = []
             serial_operations = []
@@ -116,7 +191,7 @@ def execute(self, operations):  # type: (Operation) -> int
                 if self._shutdown:
                     break
 
-                # Some operations are unsafe, we mus execute them serially in a group
+                # Some operations are unsafe, we must execute them serially in a group
                 # https://github.com/python-poetry/poetry/issues/3086
                 # https://github.com/python-poetry/poetry/issues/2658
                 #
@@ -148,9 +223,20 @@ def execute(self, operations):  # type: (Operation) -> int
 
                 break
 
+        for warning in self._yanked_warnings:
+            self._io.write_error_line(f"Warning: {warning}")
+        for path, issues in self._wheel_installer.invalid_wheels.items():
+            formatted_issues = "\n".join(issues)
+            warning = (
+                f"Validation of the RECORD file of {path.name} failed."
+                " Please report to the maintainers of that package so they can fix"
+                f" their build process. Details:\n{formatted_issues}\n"
+            )
+            self._io.write_error_line(f"Warning: {warning}")
+
         return 1 if self._shutdown else 0
 
-    def _write(self, operation, line):
+    def _write(self, operation: Operation, line: str) -> None:
         if not self.supports_fancy_output() or not self._should_write_operation(
             operation
         ):
@@ -165,38 +251,34 @@ def _write(self, operation, line):
 
         with self._lock:
             section = self._sections[id(operation)]
-            section.output.clear()
+            section.clear()
             section.write(line)
 
-    def _execute_operation(self, operation):
+    def _execute_operation(self, operation: Operation) -> None:
         try:
+            op_message = self.get_operation_message(operation)
             if self.supports_fancy_output():
-                if id(operation) not in self._sections:
-                    if self._should_write_operation(operation):
-                        with self._lock:
-                            self._sections[id(operation)] = self._io.section()
-                            self._sections[id(operation)].write_line(
-                                "  • {message}: Pending...".format(
-                                    message=self.get_operation_message(operation),
-                                ),
-                            )
+                if id(operation) not in self._sections and self._should_write_operation(
+                    operation
+                ):
+                    with self._lock:
+                        self._sections[id(operation)] = self._io.section()
+                        self._sections[id(operation)].write_line(
+                            f"  - {op_message}:"
+                            " Pending..."
+                        )
             else:
                 if self._should_write_operation(operation):
                     if not operation.skipped:
                         self._io.write_line(
-                            "  • {message}".format(
-                                message=self.get_operation_message(operation),
-                            ),
+                            f"  - {op_message}"
                         )
                     else:
                         self._io.write_line(
-                            "  • {message}: "
+                            f"  - {op_message}: "
                             "Skipped "
                             "for the following reason: "
-                            "{reason}".format(
-                                message=self.get_operation_message(operation),
-                                reason=operation.skip_reason,
-                            )
+                            f"{operation.skip_reason}"
                         )
 
             try:
@@ -214,13 +296,16 @@ def _execute_operation(self, operation):
                 raise KeyboardInterrupt
         except Exception as e:
             try:
-                from clikit.ui.components.exception_trace import ExceptionTrace
+                from conda_lock._vendor.cleo.ui.exception_trace import ExceptionTrace
 
+                io: IO | SectionOutput
                 if not self.supports_fancy_output():
                     io = self._io
                 else:
-                    message = "   {message}: Failed".format(
-                        message=self.get_operation_message(operation, error=True),
+                    message = (
+                        "  -"
+                        f" {self.get_operation_message(operation, error=True)}:"
+                        " Failed"
                     )
                     self._write(operation, message)
                     io = self._sections.get(id(operation), self._io)
@@ -228,14 +313,55 @@ def _execute_operation(self, operation):
                 with self._lock:
                     trace = ExceptionTrace(e)
                     trace.render(io)
+                    pkg = operation.package
+                    if isinstance(e, ChefBuildError):
+                        pip_command = "pip wheel --no-cache-dir --use-pep517"
+                        if pkg.develop:
+                            requirement = pkg.source_url
+                            pip_command += " --editable"
+                        else:
+                            requirement = (
+                                pkg.to_dependency().to_pep_508().split(";")[0].strip()
+                            )
+                        message = (
+                            ""
+                            "Note: This error originates from the build backend,"
+                            " and is likely not a problem with poetry"
+                            f" but with {pkg.pretty_name} ({pkg.full_pretty_version})"
+                            " not supporting PEP 517 builds. You can verify this by"
+                            f" running '{pip_command} \"{requirement}\"'."
+                            ""
+                        )
+                    elif isinstance(e, ChefInstallError):
+                        message = (
+                            ""
+                            "Cannot install build-system.requires"
+                            f" for {pkg.pretty_name}."
+                            ""
+                        )
+                    elif isinstance(e, SolverProblemError):
+                        message = (
+                            ""
+                            "Cannot resolve build-system.requires"
+                            f" for {pkg.pretty_name}."
+                            ""
+                        )
+                    else:
+                        message = f"Cannot install {pkg.pretty_name}."
+
+                    io.write_line("")
+                    io.write_line(message)
                     io.write_line("")
             finally:
                 with self._lock:
                     self._shutdown = True
+
         except KeyboardInterrupt:
             try:
-                message = "   {message}: Cancelled".format(
-                    message=self.get_operation_message(operation, warning=True),
+                message = (
+                    "  -"
+                    f" {self.get_operation_message(operation, warning=True)}:"
+                    " Cancelled"
                 )
                 if not self.supports_fancy_output():
                     self._io.write_line(message)
@@ -245,7 +371,7 @@ def _execute_operation(self, operation):
                 with self._lock:
                     self._shutdown = True
 
-    def _do_execute_operation(self, operation):
+    def _do_execute_operation(self, operation: Operation) -> int:
         method = operation.job_type
 
         operation_message = self.get_operation_message(operation)
@@ -253,12 +379,10 @@ def _do_execute_operation(self, operation):
             if self.supports_fancy_output():
                 self._write(
                     operation,
-                    "  • {message}: "
+                    f"  - {operation_message}: "
                     "Skipped "
                     "for the following reason: "
-                    "{reason}".format(
-                        message=operation_message, reason=operation.skip_reason,
-                    ),
+                    f"{operation.skip_reason}",
                 )
 
             self._skipped[operation.job_type] += 1
@@ -266,29 +390,22 @@ def _do_execute_operation(self, operation):
             return 0
 
         if not self._enabled or self._dry_run:
-            self._io.write_line(
-                "  • {message}".format(
-                    message=operation_message,
-                )
-            )
-
             return 0
 
-        result = getattr(self, "_execute_{}".format(method))(operation)
+        result: int = getattr(self, f"_execute_{method}")(operation)
 
         if result != 0:
             return result
 
-        message = "  • {message}".format(
-            message=self.get_operation_message(operation, done=True),
-        )
+        operation_message = self.get_operation_message(operation, done=True)
+        message = f"  - {operation_message}"
         self._write(operation, message)
 
         self._increment_operations_count(operation, True)
 
         return result
 
-    def _increment_operations_count(self, operation, executed):
+    def _increment_operations_count(self, operation: Operation, executed: bool) -> None:
         with self._lock:
             if executed:
                 self._executed_operations += 1
@@ -296,7 +413,7 @@ def _increment_operations_count(self, operation, executed):
             else:
                 self._skipped[operation.job_type] += 1
 
-    def run_pip(self, *args, **kwargs):  # type: (...) -> int
+    def run_pip(self, *args: Any, **kwargs: Any) -> int:
         try:
             self._env.run_pip(*args, **kwargs)
         except EnvCommandError as e:
@@ -311,7 +428,13 @@ def run_pip(self, *args, **kwargs):  # type: (...) -> int
 
         return 0
 
-    def get_operation_message(self, operation, done=False, error=False, warning=False):
+    def get_operation_message(
+        self,
+        operation: Operation,
+        done: bool = False,
+        error: bool = False,
+        warning: bool = False,
+    ) -> str:
         base_tag = "fg=default"
         operation_color = "c2"
         source_operation_color = "c2"
@@ -330,42 +453,37 @@ def get_operation_message(self, operation, done=False, error=False, warning=Fals
             source_operation_color += "_dark"
             package_color += "_dark"
 
-        if operation.job_type == "install":
-            return "<{}>Installing <{}>{} (<{}>{})".format(
-                base_tag,
-                package_color,
-                operation.package.name,
-                package_color,
-                operation_color,
-                operation.package.full_pretty_version,
+        if isinstance(operation, Install):
+            return (
+                f"<{base_tag}>Installing"
+                f" <{package_color}>{operation.package.name}"
+                f" (<{operation_color}>{operation.package.full_pretty_version})"
             )
 
-        if operation.job_type == "uninstall":
-            return "<{}>Removing <{}>{} (<{}>{})".format(
-                base_tag,
-                package_color,
-                operation.package.name,
-                package_color,
-                operation_color,
-                operation.package.full_pretty_version,
+        if isinstance(operation, Uninstall):
+            return (
+                f"<{base_tag}>Removing"
+                f" <{package_color}>{operation.package.name}"
+                f" (<{operation_color}>{operation.package.full_pretty_version})"
             )
 
-        if operation.job_type == "update":
-            return "<{}>Updating <{}>{} (<{}>{} -> <{}>{})".format(
-                base_tag,
-                package_color,
-                operation.initial_package.name,
-                package_color,
-                source_operation_color,
-                operation.initial_package.full_pretty_version,
-                source_operation_color,
-                operation_color,
-                operation.target_package.full_pretty_version,
+        if isinstance(operation, Update):
+            initial_version = (initial_pkg := operation.initial_package).version
+            target_version = (target_pkg := operation.target_package).version
+            update_kind = (
+                "Updating" if target_version >= initial_version else "Downgrading"
+            )
+            return (
+                f"<{base_tag}>{update_kind}"
+                f" <{package_color}>{initial_pkg.name} "
+                f"(<{source_operation_color}>"
+                f"{initial_pkg.full_pretty_version}"
+                f" -> <{operation_color}>"
+                f"{target_pkg.full_pretty_version})"
             )
-
         return ""
 
-    def _display_summary(self, operations):
+    def _display_summary(self, operations: list[Operation]) -> None:
         installs = 0
         updates = 0
         uninstalls = 0
@@ -389,77 +507,90 @@ def _display_summary(self, operations):
             return
 
         self._io.write_line("")
-        self._io.write_line(
-            "Package operations: "
-            "{} install{}, "
-            "{} update{}, "
-            "{} removal{}"
-            "{}".format(
-                installs,
-                "" if installs == 1 else "s",
-                updates,
-                "" if updates == 1 else "s",
-                uninstalls,
-                "" if uninstalls == 1 else "s",
-                ", {} skipped".format(skipped)
-                if skipped and self._verbose
-                else "",
-            )
-        )
+        self._io.write("Package operations: ")
+        self._io.write(f"{installs} install{pluralize(installs)}, ")
+        self._io.write(f"{updates} update{pluralize(updates)}, ")
+        self._io.write(f"{uninstalls} removal{pluralize(uninstalls)}")
+        if skipped and self._verbose:
+            self._io.write(f", {skipped} skipped")
+        self._io.write_line("")
         self._io.write_line("")
 
-    def _execute_install(self, operation):  # type: (Install) -> None
-        return self._install(operation)
+    def _execute_install(self, operation: Install | Update) -> int:
+        status_code = self._install(operation)
 
-    def _execute_update(self, operation):  # type: (Update) -> None
-        return self._update(operation)
+        self._save_url_reference(operation)
 
-    def _execute_uninstall(self, operation):  # type: (Uninstall) -> None
-        message = "  • {message}: Removing...".format(
-            message=self.get_operation_message(operation),
-        )
+        return status_code
+
+    def _execute_update(self, operation: Install | Update) -> int:
+        status_code = self._update(operation)
+
+        self._save_url_reference(operation)
+
+        return status_code
+
+    def _execute_uninstall(self, operation: Uninstall) -> int:
+        op_msg = self.get_operation_message(operation)
+        message = f"  - {op_msg}: Removing..."
         self._write(operation, message)
 
-        return self._remove(operation)
+        return self._remove(operation.package)
 
-    def _install(self, operation):
+    def _install(self, operation: Install | Update) -> int:
         package = operation.package
-        if package.source_type == "directory":
-            return self._install_directory(operation)
+        if package.source_type == "directory" and not self._use_modern_installation:
+            return self._install_directory_without_wheel_installer(operation)
 
+        cleanup_archive: bool = False
         if package.source_type == "git":
-            return self._install_git(operation)
-
-        if package.source_type == "file":
-            archive = self._prepare_file(operation)
+            archive = self._prepare_git_archive(operation)
+            cleanup_archive = operation.package.develop
+        elif package.source_type == "file":
+            archive = self._prepare_archive(operation)
+        elif package.source_type == "directory":
+            archive = self._prepare_archive(operation)
+            cleanup_archive = True
         elif package.source_type == "url":
+            assert package.source_url is not None
             archive = self._download_link(operation, Link(package.source_url))
         else:
             archive = self._download(operation)
 
         operation_message = self.get_operation_message(operation)
-        message = "  • {message}: Installing...".format(
-            message=operation_message,
+        message = (
+            f"  - {operation_message}:"
+            " Installing..."
         )
         self._write(operation, message)
 
-        args = ["install", "--no-deps", str(archive)]
-        if operation.job_type == "update":
-            args.insert(2, "-U")
+        if not self._use_modern_installation:
+            return self.pip_install(archive, upgrade=operation.job_type == "update")
 
-        return self.run_pip(*args)
+        try:
+            if operation.job_type == "update":
+                # Uninstall first
+                # TODO: Make an uninstaller and find a way to rollback in case
+                # the new package can't be installed
+                assert isinstance(operation, Update)
+                self._remove(operation.initial_package)
+
+            self._wheel_installer.install(archive)
+        finally:
+            if cleanup_archive:
+                archive.unlink()
 
-    def _update(self, operation):
-        return self._install(operation)
+        return 0
 
-    def _remove(self, operation):
-        package = operation.package
+    def _update(self, operation: Install | Update) -> int:
+        return self._install(operation)
 
+    def _remove(self, package: Package) -> int:
         # If we have a VCS package, remove its source directory
         if package.source_type == "git":
             src_dir = self._env.path / "src" / package.name
             if src_dir.exists():
-                safe_rmtree(str(src_dir))
+                remove_directory(src_dir, force=True)
 
         try:
             return self.run_pip("uninstall", package.name, "-y")
@@ -469,226 +600,370 @@ def _remove(self, operation):
 
             raise
 
-    def _prepare_file(self, operation):
+    def _prepare_archive(
+        self, operation: Install | Update, *, output_dir: Path | None = None
+    ) -> Path:
         package = operation.package
+        operation_message = self.get_operation_message(operation)
 
-        message = "  • {message}: Preparing...".format(
-            message=self.get_operation_message(operation),
+        message = (
+            f"  - {operation_message}:"
+            " Preparing..."
         )
         self._write(operation, message)
 
+        assert package.source_url is not None
         archive = Path(package.source_url)
+        if package.source_subdirectory:
+            archive = archive / package.source_subdirectory
         if not Path(package.source_url).is_absolute() and package.root_dir:
             archive = package.root_dir / archive
 
-        archive = self._chef.prepare(archive)
+        self._populate_hashes_dict(archive, package)
+
+        return self._chef.prepare(
+            archive, editable=package.develop, output_dir=output_dir
+        )
+
+    def _prepare_git_archive(self, operation: Install | Update) -> Path:
+        from conda_lock._vendor.poetry.vcs.git import Git
+
+        package = operation.package
+        assert package.source_url is not None
+
+        if package.source_resolved_reference and not package.develop:
+            # Only cache git archives when we know precise reference hash,
+            # otherwise we might get stale archives
+            cached_archive = self._artifact_cache.get_cached_archive_for_git(
+                package.source_url,
+                package.source_resolved_reference,
+                package.source_subdirectory,
+                env=self._env,
+            )
+            if cached_archive is not None:
+                return cached_archive
+
+        operation_message = self.get_operation_message(operation)
+
+        message = (
+            f"  - {operation_message}: Cloning..."
+        )
+        self._write(operation, message)
+
+        source = Git.clone(
+            url=package.source_url,
+            source_root=self._env.path / "src",
+            revision=package.source_resolved_reference or package.source_reference,
+        )
+
+        # Now we just need to install from the source directory
+        original_url = package.source_url
+        package._source_url = str(source.path)
+
+        output_dir = None
+        if package.source_resolved_reference and not package.develop:
+            output_dir = self._artifact_cache.get_cache_directory_for_git(
+                original_url,
+                package.source_resolved_reference,
+                package.source_subdirectory,
+            )
+
+        archive = self._prepare_archive(operation, output_dir=output_dir)
+        if not package.develop:
+            package._source_url = original_url
+
+        if output_dir is not None and output_dir.is_dir():
+            # Mark directories with cached git packages, to distinguish from
+            # "normal" cache
+            (output_dir / ".created_from_git_dependency").touch()
 
         return archive
 
-    def _install_directory(self, operation):
+    def _install_directory_without_wheel_installer(
+        self, operation: Install | Update
+    ) -> int:
         from conda_lock._vendor.poetry.factory import Factory
+        from conda_lock._vendor.poetry.pyproject.toml import PyProjectTOML
 
         package = operation.package
         operation_message = self.get_operation_message(operation)
 
-        message = "  • {message}: Building...".format(
-            message=operation_message,
+        message = (
+            f"  - {operation_message}:"
+            " Building..."
         )
         self._write(operation, message)
 
+        assert package.source_url is not None
         if package.root_dir:
-            req = os.path.join(str(package.root_dir), package.source_url)
+            req = package.root_dir / package.source_url
         else:
-            req = os.path.realpath(package.source_url)
+            req = Path(package.source_url).resolve(strict=False)
 
-        args = ["install", "--no-deps", "-U"]
+        if package.source_subdirectory:
+            req /= package.source_subdirectory
 
-        pyproject = PyProjectTOML(os.path.join(req, "pyproject.toml"))
+        pyproject = PyProjectTOML(req / "pyproject.toml")
 
+        package_poetry = None
         if pyproject.is_poetry_project():
-            # Even if there is a build system specified
-            # some versions of pip (< 19.0.0) don't understand it
-            # so we need to check the version of pip to know
-            # if we can rely on the build system
-            legacy_pip = self._env.pip_version < self._env.pip_version.__class__(
-                19, 0, 0
-            )
-
-            try:
+            with contextlib.suppress(RuntimeError):
                 package_poetry = Factory().create_poetry(pyproject.file.path.parent)
-            except RuntimeError:
-                package_poetry = None
 
-            if package_poetry is not None:
-                if package.develop and not package_poetry.package.build_script:
-                    from conda_lock._vendor.poetry.masonry.builders.editable import EditableBuilder
+        if package_poetry is not None:
+            builder: Builder
+            if package.develop and not package_poetry.package.build_script:
+                from conda_lock._vendor.poetry.masonry.builders.editable import EditableBuilder
 
-                    # This is a Poetry package in editable mode
-                    # we can use the EditableBuilder without going through pip
-                    # to install it, unless it has a build script.
-                    builder = EditableBuilder(package_poetry, self._env, NullIO())
-                    builder.build()
+                # This is a Poetry package in editable mode
+                # we can use the EditableBuilder without going through pip
+                # to install it, unless it has a build script.
+                builder = EditableBuilder(package_poetry, self._env, NullIO())
+                builder.build()
 
-                    return 0
-                elif legacy_pip or package_poetry.package.build_script:
-                    from conda_lock._vendor.poetry.core.masonry.builders.sdist import SdistBuilder
-
-                    # We need to rely on creating a temporary setup.py
-                    # file since the version of pip does not support
-                    # build-systems
-                    # We also need it for non-PEP-517 packages
-                    builder = SdistBuilder(package_poetry)
+                return 0
 
-                    with builder.setup_py():
-                        if package.develop:
-                            args.append("-e")
+            if package_poetry.package.build_script:
+                from conda_lock._vendor.poetry.core.masonry.builders.sdist import SdistBuilder
 
-                        args.append(req)
+                builder = SdistBuilder(package_poetry)
+                with builder.setup_py():
+                    return self.pip_install(req, upgrade=True, editable=package.develop)
 
-                        return self.run_pip(*args)
+        return self.pip_install(req, upgrade=True, editable=package.develop)
 
-        if package.develop:
-            args.append("-e")
-
-        args.append(req)
+    def _download(self, operation: Install | Update) -> Path:
+        link = self._chooser.choose_for(operation.package)
 
-        return self.run_pip(*args)
+        if link.yanked:
+            # Store yanked warnings in a list and print after installing, so they can't
+            # be overlooked. Further, printing them in the concerning section would have
+            # the risk of overwriting the warning, so it is only briefly visible.
+            message = (
+                f"The file chosen for install of {operation.package.pretty_name} "
+                f"{operation.package.pretty_version} ({link.show_url}) is yanked."
+            )
+            if link.yanked_reason:
+                message += f" Reason for being yanked: {link.yanked_reason}"
+            self._yanked_warnings.append(message)
 
-    def _install_git(self, operation):
-        from conda_lock._vendor.poetry.core.vcs import Git
+        return self._download_link(operation, link)
 
+    def _download_link(self, operation: Install | Update, link: Link) -> Path:
         package = operation.package
-        operation_message = self.get_operation_message(operation)
 
-        message = "  • {message}: Cloning...".format(
-            message=operation_message,
+        # Get original package for the link provided
+        download_func = functools.partial(self._download_archive, operation)
+        original_archive = self._artifact_cache.get_cached_archive_for_link(
+            link, strict=True, download_func=download_func
         )
-        self._write(operation, message)
 
-        src_dir = self._env.path / "src" / package.name
-        if src_dir.exists():
-            safe_rmtree(str(src_dir))
-
-        src_dir.parent.mkdir(exist_ok=True)
-
-        git = Git()
-        git.clone(package.source_url, src_dir)
-
-        reference = package.source_resolved_reference
-        if not reference:
-            reference = package.source_reference
+        # Get potential higher prioritized cached archive, otherwise it will fall back
+        # to the original archive.
+        archive = self._artifact_cache.get_cached_archive_for_link(
+            link,
+            strict=False,
+            env=self._env,
+        )
+        if archive is None:
+            # Since we previously downloaded an archive, we now should have
+            # something cached that we can use here. The only case in which
+            # archive is None is if the original archive is not valid for the
+            # current environment.
+            raise RuntimeError(
+                f"Package {link.url} cannot be installed in the current environment"
+                f" {self._env.marker_env}"
+            )
 
-        git.checkout(reference, src_dir)
+        if archive.suffix != ".whl":
+            message = (
+                f"  - {self.get_operation_message(operation)}:"
+                " Preparing..."
+            )
+            self._write(operation, message)
 
-        # Now we just need to install from the source directory
-        package._source_url = str(src_dir)
+            archive = self._chef.prepare(archive, output_dir=original_archive.parent)
 
-        return self._install_directory(operation)
+        # Use the original archive to provide the correct hash.
+        self._populate_hashes_dict(original_archive, package)
 
-    def _download(self, operation):  # type: (Operation) -> Path
-        link = self._chooser.choose_for(operation.package)
+        return archive
 
-        return self._download_link(operation, link)
+    def _populate_hashes_dict(self, archive: Path, package: Package) -> None:
+        if package.files and archive.name in {f["file"] for f in package.files}:
+            archive_hash = self._validate_archive_hash(archive, package)
+            self._hashes[package.name] = archive_hash
+
+    @staticmethod
+    def _validate_archive_hash(archive: Path, package: Package) -> str:
+        known_hashes = {f["hash"] for f in package.files if f["file"] == archive.name}
+        hash_types = {t.split(":")[0] for t in known_hashes}
+        hash_type = get_highest_priority_hash_type(hash_types, archive.name)
+
+        if hash_type is None:
+            raise RuntimeError(
+                f"No usable hash type(s) for {package} from archive"
+                f" {archive.name} found (known hashes: {known_hashes!s})"
+            )
 
-    def _download_link(self, operation, link):
-        package = operation.package
+        archive_hash = f"{hash_type}:{get_file_hash(archive, hash_type)}"
 
-        archive = self._chef.get_cached_archive_for_link(link)
-        if archive is link:
-            # No cached distributions was found, so we download and prepare it
-            try:
-                archive = self._download_archive(operation, link)
-            except BaseException:
-                cache_directory = self._chef.get_cache_directory_for_link(link)
-                cached_file = cache_directory.joinpath(link.filename)
-                # We can't use unlink(missing_ok=True) because it's not available
-                # in pathlib2 for Python 2.7
-                if cached_file.exists():
-                    cached_file.unlink()
-
-                raise
-
-            # TODO: Check readability of the created archive
-
-            if not link.is_wheel:
-                archive = self._chef.prepare(archive)
-
-        if package.files:
-            hashes = {f["hash"] for f in package.files}
-            hash_types = {h.split(":")[0] for h in hashes}
-            archive_hashes = set()
-            archive_path = (
-                url_to_path(archive.url) if isinstance(archive, Link) else archive
+        if archive_hash not in known_hashes:
+            raise RuntimeError(
+                f"Hash for {package} from archive {archive.name} not found in"
+                f" known hashes (was: {archive_hash})"
             )
-            for hash_type in hash_types:
-                archive_hashes.add(
-                    "{}:{}".format(
-                        hash_type,
-                        FileDependency(package.name, archive_path).hash(hash_type),
-                    )
-                )
 
-            if archive_hashes.isdisjoint(hashes):
-                raise RuntimeError(
-                    "Invalid hashes ({}) for {} using archive {}. Expected one of {}.".format(
-                        ", ".join(sorted(archive_hashes)),
-                        package,
-                        archive_path.name,
-                        ", ".join(sorted(hashes)),
-                    )
-                )
+        return archive_hash
 
-        return archive
+    def _download_archive(
+        self,
+        operation: Install | Update,
+        url: str,
+        dest: Path,
+    ) -> None:
+        downloader = Downloader(url, dest, self._authenticator)
+        wheel_size = downloader.total_size
 
-    def _download_archive(self, operation, link):  # type: (Operation, Link) -> Path
-        response = self._authenticator.request(
-            "get", link.url, stream=True, io=self._sections.get(id(operation), self._io)
-        )
-        wheel_size = response.headers.get("content-length")
         operation_message = self.get_operation_message(operation)
-        message = "  • {message}: Downloading...".format(
-            message=operation_message,
+        message = (
+            f"  - {operation_message}: Downloading..."
         )
         progress = None
         if self.supports_fancy_output():
             if wheel_size is None:
                 self._write(operation, message)
             else:
-                from clikit.ui.components.progress_bar import ProgressBar
+                from conda_lock._vendor.cleo.ui.progress_bar import ProgressBar
 
                 progress = ProgressBar(
-                    self._sections[id(operation)].output, max=int(wheel_size)
+                    self._sections[id(operation)], max=int(wheel_size)
                 )
                 progress.set_format(message + " %percent%%")
 
         if progress:
             with self._lock:
+                self._sections[id(operation)].clear()
                 progress.start()
 
-        done = 0
-        archive = self._chef.get_cache_directory_for_link(link) / link.filename
-        archive.parent.mkdir(parents=True, exist_ok=True)
-        with archive.open("wb") as f:
-            for chunk in response.iter_content(chunk_size=4096):
-                if not chunk:
-                    break
-
-                done += len(chunk)
-
-                if progress:
-                    with self._lock:
-                        progress.set_progress(done)
-
-                f.write(chunk)
+        for fetched_size in downloader.download_with_progress(chunk_size=4096):
+            if progress:
+                with self._lock:
+                    progress.set_progress(fetched_size)
 
         if progress:
             with self._lock:
                 progress.finish()
 
-        return archive
+    def _should_write_operation(self, operation: Operation) -> bool:
+        return (
+            not operation.skipped or self._dry_run or self._verbose or not self._enabled
+        )
+
+    def _save_url_reference(self, operation: Operation) -> None:
+        """
+        Create and store a PEP-610 `direct_url.json` file, if needed.
+        """
+        if operation.job_type not in {"install", "update"}:
+            return
+
+        package = operation.package
+
+        if not package.source_url or package.source_type == "legacy":
+            if not self._use_modern_installation:
+                # Since we are installing from our own distribution cache pip will write
+                # a `direct_url.json` file pointing to the cache distribution.
+                #
+                # That's not what we want, so we remove the direct_url.json file, if it
+                # exists.
+                for (
+                    direct_url_json
+                ) in self._env.site_packages.find_distribution_direct_url_json_files(
+                    distribution_name=package.name, writable_only=True
+                ):
+                    direct_url_json.unlink(missing_ok=True)
+            return
+
+        url_reference: dict[str, Any] | None = None
+
+        if package.source_type == "git" and not package.develop:
+            url_reference = self._create_git_url_reference(package)
+        elif package.source_type in ("directory", "git"):
+            url_reference = self._create_directory_url_reference(package)
+        elif package.source_type == "url":
+            url_reference = self._create_url_url_reference(package)
+        elif package.source_type == "file":
+            url_reference = self._create_file_url_reference(package)
+
+        if url_reference:
+            for dist in self._env.site_packages.distributions(
+                name=package.name, writable_only=True
+            ):
+                dist_path = dist._path  # type: ignore[attr-defined]
+                assert isinstance(dist_path, Path)
+                url = dist_path / "direct_url.json"
+                url.write_text(json.dumps(url_reference), encoding="utf-8")
+
+                record = dist_path / "RECORD"
+                if record.exists():
+                    with record.open(mode="a", encoding="utf-8", newline="") as f:
+                        writer = csv.writer(f)
+                        path = url.relative_to(record.parent.parent)
+                        writer.writerow([str(path), "", ""])
+
+    def _create_git_url_reference(self, package: Package) -> dict[str, Any]:
+        reference = {
+            "url": package.source_url,
+            "vcs_info": {
+                "vcs": "git",
+                "requested_revision": package.source_reference,
+                "commit_id": package.source_resolved_reference,
+            },
+        }
+        if package.source_subdirectory:
+            reference["subdirectory"] = package.source_subdirectory
+
+        return reference
+
+    def _create_url_url_reference(self, package: Package) -> dict[str, Any]:
+        archive_info = self._get_archive_info(package)
+
+        return {"url": package.source_url, "archive_info": archive_info}
+
+    def _create_file_url_reference(self, package: Package) -> dict[str, Any]:
+        archive_info = self._get_archive_info(package)
+
+        assert package.source_url is not None
+        return {
+            "url": Path(package.source_url).as_uri(),
+            "archive_info": archive_info,
+        }
+
+    def _create_directory_url_reference(self, package: Package) -> dict[str, Any]:
+        dir_info = {}
+
+        if package.develop:
+            dir_info["editable"] = True
+
+        assert package.source_url is not None
+        return {
+            "url": Path(package.source_url).as_uri(),
+            "dir_info": dir_info,
+        }
+
+    def _get_archive_info(self, package: Package) -> dict[str, Any]:
+        """
+        Create dictionary `archive_info` for file `direct_url.json`.
+
+        Specification: https://packaging.python.org/en/latest/specifications/direct-url
+        (it supersedes PEP 610)
+
+        :param package: This must be a poetry package instance.
+        """
+        archive_info = {}
 
-    def _should_write_operation(self, operation):  # type: (Operation) -> bool
-        if not operation.skipped:
-            return True
+        if package.name in self._hashes:
+            algorithm, value = self._hashes[package.name].split(":")
+            archive_info["hashes"] = {algorithm: value}
 
-        return self._dry_run or self._verbose
+        return archive_info
diff --git a/conda_lock/_vendor/poetry/installation/installer.py b/conda_lock/_vendor/poetry/installation/installer.py
index c09ad973..e731858e 100644
--- a/conda_lock/_vendor/poetry/installation/installer.py
+++ b/conda_lock/_vendor/poetry/installation/installer.py
@@ -1,90 +1,95 @@
-from typing import List
-from typing import Optional
-from typing import Union
+from __future__ import annotations
 
-from clikit.api.io import IO
+from typing import TYPE_CHECKING
+from typing import cast
 
-from conda_lock._vendor.poetry.config.config import Config
-from conda_lock._vendor.poetry.core.packages.project_package import ProjectPackage
-from conda_lock._vendor.poetry.io.null_io import NullIO
-from conda_lock._vendor.poetry.packages import Locker
-from conda_lock._vendor.poetry.repositories import Pool
+from conda_lock._vendor.cleo.io.null_io import NullIO
+from packaging.utils import canonicalize_name
+
+from conda_lock._vendor.poetry.installation.executor import Executor
+from conda_lock._vendor.poetry.installation.operations import Install
+from conda_lock._vendor.poetry.installation.operations import Uninstall
+from conda_lock._vendor.poetry.installation.operations import Update
 from conda_lock._vendor.poetry.repositories import Repository
+from conda_lock._vendor.poetry.repositories import RepositoryPool
 from conda_lock._vendor.poetry.repositories.installed_repository import InstalledRepository
+from conda_lock._vendor.poetry.repositories.lockfile_repository import LockfileRepository
 from conda_lock._vendor.poetry.utils.extras import get_extra_package_names
-from conda_lock._vendor.poetry.utils.helpers import canonicalize_name
 
-from .base_installer import BaseInstaller
-from .executor import Executor
-from .operations import Install
-from .operations import Uninstall
-from .operations import Update
-from .operations.operation import Operation
-from .pip_installer import PipInstaller
+
+if TYPE_CHECKING:
+    from collections.abc import Iterable
+
+    from conda_lock._vendor.cleo.io.io import IO
+    from packaging.utils import NormalizedName
+    from conda_lock._vendor.poetry.core.packages.path_dependency import PathDependency
+    from conda_lock._vendor.poetry.core.packages.project_package import ProjectPackage
+
+    from conda_lock._vendor.poetry.config.config import Config
+    from conda_lock._vendor.poetry.installation.operations.operation import Operation
+    from conda_lock._vendor.poetry.packages import Locker
+    from conda_lock._vendor.poetry.utils.env import Env
 
 
 class Installer:
     def __init__(
         self,
-        io,  # type: IO
-        env,
-        package,  # type: ProjectPackage
-        locker,  # type: Locker
-        pool,  # type: Pool
-        config,  # type: Config
-        installed=None,  # type: Union[InstalledRepository, None]
-        executor=None,  # type: Optional[Executor]
-    ):
+        io: IO,
+        env: Env,
+        package: ProjectPackage,
+        locker: Locker,
+        pool: RepositoryPool,
+        config: Config,
+        installed: Repository | None = None,
+        executor: Executor | None = None,
+        disable_cache: bool = False,
+    ) -> None:
         self._io = io
         self._env = env
         self._package = package
         self._locker = locker
         self._pool = pool
+        self._config = config
 
         self._dry_run = False
-        self._remove_untracked = False
+        self._requires_synchronization = False
         self._update = False
         self._verbose = False
-        self._write_lock = True
-        self._dev_mode = True
-        self._execute_operations = True
+        self._groups: Iterable[str] | None = None
+        self._skip_directory = False
         self._lock = False
 
-        self._whitelist = []
+        self._whitelist: list[NormalizedName] = []
 
-        self._extras = []
+        self._extras: list[NormalizedName] = []
 
         if executor is None:
-            executor = Executor(self._env, self._pool, config, self._io)
+            executor = Executor(
+                self._env, self._pool, config, self._io, disable_cache=disable_cache
+            )
 
         self._executor = executor
-        self._use_executor = False
 
-        self._installer = self._get_installer()
         if installed is None:
             installed = self._get_installed()
 
         self._installed_repository = installed
 
     @property
-    def executor(self):
+    def executor(self) -> Executor:
         return self._executor
 
-    @property
-    def installer(self):
-        return self._installer
-
-    def set_package(self, package):  # type: (ProjectPackage) -> Installer
+    def set_package(self, package: ProjectPackage) -> Installer:
         self._package = package
 
         return self
 
-    def set_locker(self, locker):  # type: (Locker) -> Installer
+    def set_locker(self, locker: Locker) -> Installer:
         self._locker = locker
 
         return self
 
-    def run(self):
+    def run(self) -> int:
         # Check if refresh
         if not self._update and self._lock and self._locker.is_locked():
             return self._do_refresh()
@@ -95,53 +100,50 @@ def run(self):
 
         if self.is_dry_run():
             self.verbose(True)
-            self._write_lock = False
-            self._execute_operations = False
-
-        local_repo = Repository()
 
-        return self._do_install(local_repo)
+        return self._do_install()
 
-    def dry_run(self, dry_run=True):  # type: (bool) -> Installer
+    def dry_run(self, dry_run: bool = True) -> Installer:
         self._dry_run = dry_run
         self._executor.dry_run(dry_run)
 
         return self
 
-    def is_dry_run(self):  # type: () -> bool
+    def is_dry_run(self) -> bool:
         return self._dry_run
 
-    def remove_untracked(self, remove_untracked=True):  # type: (bool) -> Installer
-        self._remove_untracked = remove_untracked
+    def requires_synchronization(
+        self, requires_synchronization: bool = True
+    ) -> Installer:
+        self._requires_synchronization = requires_synchronization
 
         return self
 
-    def is_remove_untracked(self):  # type: () -> bool
-        return self._remove_untracked
-
-    def verbose(self, verbose=True):  # type: (bool) -> Installer
+    def verbose(self, verbose: bool = True) -> Installer:
         self._verbose = verbose
         self._executor.verbose(verbose)
 
         return self
 
-    def is_verbose(self):  # type: () -> bool
+    def is_verbose(self) -> bool:
         return self._verbose
 
-    def dev_mode(self, dev_mode=True):  # type: (bool) -> Installer
-        self._dev_mode = dev_mode
+    def only_groups(self, groups: Iterable[str]) -> Installer:
+        self._groups = groups
 
         return self
 
-    def is_dev_mode(self):  # type: () -> bool
-        return self._dev_mode
-
-    def update(self, update=True):  # type: (bool) -> Installer
+    def update(self, update: bool = True) -> Installer:
         self._update = update
 
         return self
 
-    def lock(self, update=True):  # type: (bool) -> Installer
+    def skip_directory(self, skip_directory: bool = False) -> Installer:
+        self._skip_directory = skip_directory
+
+        return self
+
+    def lock(self, update: bool = True) -> Installer:
         """
         Prepare the installer for locking only.
         """
@@ -151,65 +153,67 @@ def lock(self, update=True):  # type: (bool) -> Installer
 
         return self
 
-    def is_updating(self):  # type: () -> bool
+    def is_updating(self) -> bool:
         return self._update
 
-    def execute_operations(self, execute=True):  # type: (bool) -> Installer
-        self._execute_operations = execute
-
+    def execute_operations(self, execute: bool = True) -> Installer:
         if not execute:
             self._executor.disable()
 
         return self
 
-    def whitelist(self, packages):  # type: (dict) -> Installer
+    def whitelist(self, packages: Iterable[str]) -> Installer:
         self._whitelist = [canonicalize_name(p) for p in packages]
 
         return self
 
-    def extras(self, extras):  # type: (list) -> Installer
-        self._extras = extras
-
-        return self
-
-    def use_executor(self, use_executor=True):  # type: (bool) -> Installer
-        self._use_executor = use_executor
+    def extras(self, extras: list[str]) -> Installer:
+        self._extras = [canonicalize_name(extra) for extra in extras]
 
         return self
 
-    def _do_refresh(self):
-        from conda_lock._vendor.poetry.puzzle import Solver
+    def _do_refresh(self) -> int:
+        from conda_lock._vendor.poetry.puzzle.solver import Solver
 
         # Checking extras
         for extra in self._extras:
             if extra not in self._package.extras:
-                raise ValueError("Extra [{}] is not specified.".format(extra))
+                raise ValueError(f"Extra [{extra}] is not specified.")
 
-        locked_repository = self._locker.locked_repository(True)
+        locked_repository = self._locker.locked_repository()
         solver = Solver(
             self._package,
             self._pool,
-            locked_repository,
-            locked_repository,
-            self._io,  # noqa
+            locked_repository.packages,
+            locked_repository.packages,
+            self._io,
         )
 
-        ops = solver.solve(use_latest=[])
+        # Always re-solve directory dependencies, otherwise we can't determine
+        # if anything has changed (and the lock file contains an invalid version).
+        use_latest = [
+            p.name for p in locked_repository.packages if p.source_type == "directory"
+        ]
+
+        with solver.provider.use_source_root(
+            source_root=self._env.path.joinpath("src")
+        ):
+            ops = solver.solve(use_latest=use_latest).calculate_operations()
 
-        local_repo = Repository()
-        self._populate_local_repo(local_repo, ops)
+        lockfile_repo = LockfileRepository()
+        self._populate_lockfile_repo(lockfile_repo, ops)
 
-        self._write_lock_file(local_repo, force=True)
+        self._write_lock_file(lockfile_repo, force=True)
 
         return 0
 
-    def _do_install(self, local_repo):
-        from conda_lock._vendor.poetry.puzzle import Solver
+    def _do_install(self) -> int:
+        from conda_lock._vendor.poetry.puzzle.solver import Solver
 
-        locked_repository = Repository()
+        locked_repository = Repository("poetry-locked")
         if self._update:
-            if self._locker.is_locked() and not self._lock:
-                locked_repository = self._locker.locked_repository(True)
+            if not self._lock and self._locker.is_locked():
+                locked_repository = self._locker.locked_repository()
 
                 # If no packages have been whitelisted (The ones we want to update),
                 # we whitelist every package in the lock file.
@@ -220,56 +224,57 @@ def _do_install(self, local_repo):
             # Checking extras
             for extra in self._extras:
                 if extra not in self._package.extras:
-                    raise ValueError("Extra [{}] is not specified.".format(extra))
+                    raise ValueError(f"Extra [{extra}] is not specified.")
 
             self._io.write_line("Updating dependencies")
             solver = Solver(
                 self._package,
                 self._pool,
-                self._installed_repository,
-                locked_repository,
+                self._installed_repository.packages,
+                locked_repository.packages,
                 self._io,
-                remove_untracked=self._remove_untracked,
             )
 
-            ops = solver.solve(use_latest=self._whitelist)
+            with solver.provider.use_source_root(
+                source_root=self._env.path.joinpath("src")
+            ):
+                ops = solver.solve(use_latest=self._whitelist).calculate_operations()
         else:
             self._io.write_line("Installing dependencies from lock file")
 
-            locked_repository = self._locker.locked_repository(True)
+            locked_repository = self._locker.locked_repository()
 
             if not self._locker.is_fresh():
-                self._io.write_line(
-                    ""
-                    "Warning: The lock file is not up to date with "
-                    "the latest changes in pyproject.toml. "
-                    "You may be getting outdated dependencies. "
-                    "Run update to update them."
-                    ""
+                raise ValueError(
+                    "pyproject.toml changed significantly since poetry.lock was last generated. "
+                    "Run `poetry lock [--no-update]` to fix the lock file."
                 )
 
+            locker_extras = {
+                canonicalize_name(extra)
+                for extra in self._locker.lock_data.get("extras", {})
+            }
             for extra in self._extras:
-                if extra not in self._locker.lock_data.get("extras", {}):
-                    raise ValueError("Extra [{}] is not specified.".format(extra))
+                if extra not in locker_extras:
+                    raise ValueError(f"Extra [{extra}] is not specified.")
 
             # If we are installing from lock
             # Filter the operations by comparing it with what is
             # currently installed
             ops = self._get_operations_from_lock(locked_repository)
 
-        self._populate_local_repo(local_repo, ops)
+        lockfile_repo = LockfileRepository()
+        uninstalls = self._populate_lockfile_repo(lockfile_repo, ops)
 
-        if self._update:
-            self._write_lock_file(local_repo)
+        if not self.executor.enabled:
+            # If we are only in lock mode, no need to go any further
+            self._write_lock_file(lockfile_repo)
+            return 0
 
-            if self._lock:
-                # If we are only in lock mode, no need to go any further
-                return 0
-
-        root = self._package
-        if not self.is_dev_mode():
-            root = root.clone()
-            del root.dev_requires[:]
+        if self._groups is not None:
+            root = self._package.with_dependency_groups(list(self._groups), only=True)
+        else:
+            root = self._package.without_optional_dependency_groups()
 
         if self._io.is_verbose():
             self._io.write_line("")
@@ -278,201 +283,99 @@ def _do_install(self, local_repo):
             )
 
         # We resolve again by only using the lock file
-        pool = Pool(ignore_repository_names=True)
-
-        # Making a new repo containing the packages
-        # newly resolved and the ones from the current lock file
-        repo = Repository()
-        for package in local_repo.packages + locked_repository.packages:
-            if not repo.has_package(package):
-                repo.add_package(package)
-
-        pool.add_repository(repo)
+        packages = lockfile_repo.packages + locked_repository.packages
+        pool = RepositoryPool.from_packages(packages, self._config)
 
         solver = Solver(
             root,
             pool,
-            self._installed_repository,
-            locked_repository,
+            self._installed_repository.packages,
+            locked_repository.packages,
             NullIO(),
-            remove_untracked=self._remove_untracked,
         )
         # Everything is resolved at this point, so we no longer need
         # to load deferred dependencies (i.e. VCS, URL and path dependencies)
         solver.provider.load_deferred(False)
 
         with solver.use_environment(self._env):
-            ops = solver.solve(use_latest=self._whitelist)
-
-        # We need to filter operations so that packages
-        # not compatible with the current system,
-        # or optional and not requested, are dropped
-        self._filter_operations(ops, local_repo)
-
-        # Execute operations
-        return self._execute(ops)
-
-    def _write_lock_file(self, repo, force=True):  # type: (Repository, bool) -> None
-        if force or (self._update and self._write_lock):
-            updated_lock = self._locker.set_lock_data(self._package, repo.packages)
-
-            if updated_lock:
-                self._io.write_line("")
-                self._io.write_line("Writing lock file")
-
-    def _execute(self, operations):
-        if self._use_executor:
-            return self._executor.execute(operations)
-
-        if not operations and (self._execute_operations or self._dry_run):
-            self._io.write_line("No dependencies to install or update")
-
-        if operations and (self._execute_operations or self._dry_run):
-            installs = 0
-            updates = 0
-            uninstalls = 0
-            skipped = 0
-            for op in operations:
-                if op.skipped:
-                    skipped += 1
-                elif op.job_type == "install":
-                    installs += 1
-                elif op.job_type == "update":
-                    updates += 1
-                elif op.job_type == "uninstall":
-                    uninstalls += 1
-
-            self._io.write_line("")
-            self._io.write_line(
-                "Package operations: "
-                "{} install{}, "
-                "{} update{}, "
-                "{} removal{}"
-                "{}".format(
-                    installs,
-                    "" if installs == 1 else "s",
-                    updates,
-                    "" if updates == 1 else "s",
-                    uninstalls,
-                    "" if uninstalls == 1 else "s",
-                    ", {} skipped".format(skipped)
-                    if skipped and self.is_verbose()
-                    else "",
-                )
+            ops = solver.solve(use_latest=self._whitelist).calculate_operations(
+                with_uninstalls=self._requires_synchronization,
+                synchronize=self._requires_synchronization,
+                skip_directory=self._skip_directory,
             )
 
-        self._io.write_line("")
-
-        for op in operations:
-            self._execute_operation(op)
-
-        return 0
-
-    def _execute_operation(self, operation):  # type: (Operation) -> None
-        """
-        Execute a given operation.
-        """
-        method = operation.job_type
-
-        getattr(self, "_execute_{}".format(method))(operation)
-
-    def _execute_install(self, operation):  # type: (Install) -> None
-        if operation.skipped:
-            if self.is_verbose() and (self._execute_operations or self.is_dry_run()):
-                self._io.write_line(
-                    "  - Skipping {} ({}) {}".format(
-                        operation.package.pretty_name,
-                        operation.package.full_pretty_version,
-                        operation.skip_reason,
-                    )
-                )
+        if not self._requires_synchronization:
+            # If no packages synchronisation has been requested we need
+            # to calculate the uninstall operations
+            from conda_lock._vendor.poetry.puzzle.transaction import Transaction
 
-            return
-
-        if self._execute_operations or self.is_dry_run():
-            self._io.write_line(
-                "  - Installing {} ({})".format(
-                    operation.package.pretty_name, operation.package.full_pretty_version
-                )
+            transaction = Transaction(
+                locked_repository.packages,
+                [(package, 0) for package in lockfile_repo.packages],
+                installed_packages=self._installed_repository.packages,
+                root_package=root,
             )
 
-        if not self._execute_operations:
-            return
-
-        self._installer.install(operation.package)
-
-    def _execute_update(self, operation):  # type: (Update) -> None
-        source = operation.initial_package
-        target = operation.target_package
-
-        if operation.skipped:
-            if self.is_verbose() and (self._execute_operations or self.is_dry_run()):
-                self._io.write_line(
-                    "  - Skipping {} ({}) {}".format(
-                        target.pretty_name,
-                        target.full_pretty_version,
-                        operation.skip_reason,
-                    )
-                )
-
-            return
+            ops = [
+                op
+                for op in transaction.calculate_operations(with_uninstalls=True)
+                if op.job_type == "uninstall"
+            ] + ops
+        else:
+            ops = uninstalls + ops
 
-        if self._execute_operations or self.is_dry_run():
-            self._io.write_line(
-                "  - Updating {} ({} -> {})".format(
-                    target.pretty_name,
-                    source.full_pretty_version,
-                    target.full_pretty_version,
-                )
-            )
+        # We need to filter operations so that packages
+        # not compatible with the current system,
+        # or optional and not requested, are dropped
+        self._filter_operations(ops, lockfile_repo)
 
-        if not self._execute_operations:
-            return
+        # Validate the dependencies
+        for op in ops:
+            dep = op.package.to_dependency()
+            if dep.is_file() or dep.is_directory():
+                dep = cast("PathDependency", dep)
+                dep.validate(raise_error=not op.skipped)
 
-        self._installer.update(source, target)
+        # Execute operations
+        status = self._execute(ops)
 
-    def _execute_uninstall(self, operation):  # type: (Uninstall) -> None
-        if operation.skipped:
-            if self.is_verbose() and (self._execute_operations or self.is_dry_run()):
-                self._io.write_line(
-                    "  - Not removing {} ({}) {}".format(
-                        operation.package.pretty_name,
-                        operation.package.full_pretty_version,
-                        operation.skip_reason,
-                    )
-                )
+        if status == 0 and self._update:
+            # Only write lock file when installation is success
+            self._write_lock_file(lockfile_repo)
 
-            return
+        return status
 
-        if self._execute_operations or self.is_dry_run():
-            self._io.write_line(
-                "  - Removing {} ({})".format(
-                    operation.package.pretty_name, operation.package.full_pretty_version
-                )
-            )
+    def _write_lock_file(self, repo: LockfileRepository, force: bool = False) -> None:
+        if not self.is_dry_run() and (force or self._update):
+            updated_lock = self._locker.set_lock_data(self._package, repo.packages)
 
-        if not self._execute_operations:
-            return
+            if updated_lock:
+                self._io.write_line("")
+                self._io.write_line("Writing lock file")
 
-        self._installer.remove(operation.package)
+    def _execute(self, operations: list[Operation]) -> int:
+        return self._executor.execute(operations)
 
-    def _populate_local_repo(self, local_repo, ops):
+    def _populate_lockfile_repo(
+        self, repo: LockfileRepository, ops: Iterable[Operation]
+    ) -> list[Uninstall]:
+        uninstalls = []
         for op in ops:
             if isinstance(op, Uninstall):
+                uninstalls.append(op)
                 continue
-            elif isinstance(op, Update):
-                package = op.target_package
-            else:
-                package = op.package
 
-            if not local_repo.has_package(package):
-                local_repo.add_package(package)
+            package = op.target_package if isinstance(op, Update) else op.package
+            if not repo.has_package(package):
+                repo.add_package(package)
+
+        return uninstalls
 
     def _get_operations_from_lock(
-        self, locked_repository  # type: Repository
-    ):  # type: (...) -> List[Operation]
+        self, locked_repository: Repository
+    ) -> list[Operation]:
         installed_repo = self._installed_repository
-        ops = []
+        ops: list[Operation] = []
 
         extra_packages = self._get_extra_packages(locked_repository)
         for locked in locked_repository.packages:
@@ -480,9 +383,7 @@ def _get_operations_from_lock(
             for installed in installed_repo.packages:
                 if locked.name == installed.name:
                     is_installed = True
-                    if locked.category == "dev" and not self.is_dev_mode():
-                        ops.append(Uninstall(locked))
-                    elif locked.optional and locked.name not in extra_packages:
+                    if locked.optional and locked.name not in extra_packages:
                         # Installed but optional and not requested in extras
                         ops.append(Uninstall(locked))
                     elif locked.version != installed.version:
@@ -501,15 +402,10 @@ def _get_operations_from_lock(
 
         return ops
 
-    def _filter_operations(
-        self, ops, repo
-    ):  # type: (List[Operation], Repository) -> None
+    def _filter_operations(self, ops: Iterable[Operation], repo: Repository) -> None:
         extra_packages = self._get_extra_packages(repo)
         for op in ops:
-            if isinstance(op, Update):
-                package = op.target_package
-            else:
-                package = op.package
+            package = op.target_package if isinstance(op, Update) else op.package
 
             if op.job_type == "uninstall":
                 continue
@@ -518,41 +414,30 @@ def _filter_operations(
                 op.skip("Not needed for the current environment")
                 continue
 
-            if self._update:
-                extras = {}
-                for extra, deps in self._package.extras.items():
-                    extras[extra] = [dep.name for dep in deps]
-            else:
-                extras = {}
-                for extra, deps in self._locker.lock_data.get("extras", {}).items():
-                    extras[extra] = [dep.lower() for dep in deps]
-
             # If a package is optional and not requested
             # in any extra we skip it
-            if package.optional:
-                if package.name not in extra_packages:
-                    op.skip("Not required")
+            if package.optional and package.name not in extra_packages:
+                op.skip("Not required")
 
-            # If the package is a dev package and dev packages
-            # are not requested, we skip it
-            if package.category == "dev" and not self.is_dev_mode():
-                op.skip("Dev dependencies not requested")
-
-    def _get_extra_packages(self, repo):  # type: (Repository) -> List[str]
+    def _get_extra_packages(self, repo: Repository) -> set[NormalizedName]:
         """
         Returns all package names required by extras.
 
         Maybe we just let the solver handle it?
         """
+        extras: dict[NormalizedName, list[NormalizedName]]
         if self._update:
             extras = {k: [d.name for d in v] for k, v in self._package.extras.items()}
         else:
-            extras = self._locker.lock_data.get("extras", {})
-
-        return list(get_extra_package_names(repo.packages, extras, self._extras))
+            raw_extras = self._locker.lock_data.get("extras", {})
+            extras = {
+                canonicalize_name(extra): [
+                    canonicalize_name(dependency) for dependency in dependencies
+                ]
+                for extra, dependencies in raw_extras.items()
+            }
 
-    def _get_installer(self):  # type: () -> BaseInstaller
-        return PipInstaller(self._env, self._io, self._pool)
+        return get_extra_package_names(repo.packages, extras, self._extras)
 
-    def _get_installed(self):  # type: () -> InstalledRepository
+    def _get_installed(self) -> InstalledRepository:
         return InstalledRepository.load(self._env)
diff --git a/conda_lock/_vendor/poetry/installation/noop_installer.py b/conda_lock/_vendor/poetry/installation/noop_installer.py
deleted file mode 100644
index 0f0c6cda..00000000
--- a/conda_lock/_vendor/poetry/installation/noop_installer.py
+++ /dev/null
@@ -1,29 +0,0 @@
-from .base_installer import BaseInstaller
-
-
-class NoopInstaller(BaseInstaller):
-    def __init__(self):
-        self._installs = []
-        self._updates = []
-        self._removals = []
-
-    @property
-    def installs(self):
-        return self._installs
-
-    @property
-    def updates(self):
-        return self._updates
-
-    @property
-    def removals(self):
-        return self._removals
-
-    def install(self, package):
-        self._installs.append(package)
-
-    def update(self, source, target):
-        self._updates.append((source, target))
-
-    def remove(self, package):
-        self._removals.append(package)
diff --git a/conda_lock/_vendor/poetry/installation/operations/__init__.py b/conda_lock/_vendor/poetry/installation/operations/__init__.py
index 42573c10..04a02158 100644
--- a/conda_lock/_vendor/poetry/installation/operations/__init__.py
+++ b/conda_lock/_vendor/poetry/installation/operations/__init__.py
@@ -1,3 +1,8 @@
-from .install import Install
-from .uninstall import Uninstall
-from .update import Update
+from __future__ import annotations
+
+from conda_lock._vendor.poetry.installation.operations.install import Install
+from conda_lock._vendor.poetry.installation.operations.uninstall import Uninstall
+from conda_lock._vendor.poetry.installation.operations.update import Update
+
+
+__all__ = ["Install", "Uninstall", "Update"]
diff --git a/conda_lock/_vendor/poetry/installation/operations/install.py b/conda_lock/_vendor/poetry/installation/operations/install.py
index 48097c7c..439ec872 100644
--- a/conda_lock/_vendor/poetry/installation/operations/install.py
+++ b/conda_lock/_vendor/poetry/installation/operations/install.py
@@ -1,26 +1,38 @@
-from .operation import Operation
+from __future__ import annotations
+
+from typing import TYPE_CHECKING
+
+from conda_lock._vendor.poetry.installation.operations.operation import Operation
+
+
+if TYPE_CHECKING:
+    from conda_lock._vendor.poetry.core.packages.package import Package
 
 
 class Install(Operation):
-    def __init__(self, package, reason=None, priority=0):
-        super(Install, self).__init__(reason, priority=priority)
+    def __init__(
+        self, package: Package, reason: str | None = None, priority: int = 0
+    ) -> None:
+        super().__init__(reason, priority=priority)
 
         self._package = package
 
     @property
-    def package(self):
+    def package(self) -> Package:
         return self._package
 
     @property
-    def job_type(self):
+    def job_type(self) -> str:
         return "install"
 
-    def __str__(self):
-        return "Installing {} ({})".format(
-            self.package.pretty_name, self.format_version(self.package)
+    def __str__(self) -> str:
+        return (
+            "Installing"
+            f" {self.package.pretty_name} ({self.format_version(self.package)})"
         )
 
-    def __repr__(self):
-        return "".format(
-            self.package.pretty_name, self.format_version(self.package)
+    def __repr__(self) -> str:
+        return (
+            ""
         )
diff --git a/conda_lock/_vendor/poetry/installation/operations/operation.py b/conda_lock/_vendor/poetry/installation/operations/operation.py
index 0c72cc8c..b494785c 100644
--- a/conda_lock/_vendor/poetry/installation/operations/operation.py
+++ b/conda_lock/_vendor/poetry/installation/operations/operation.py
@@ -1,52 +1,58 @@
-# -*- coding: utf-8 -*-
+from __future__ import annotations
 
-from typing import Union
+from typing import TYPE_CHECKING
+from typing import TypeVar
 
 
-class Operation(object):
-    def __init__(
-        self, reason=None, priority=0
-    ):  # type: (Union[str, None], int) -> None
+if TYPE_CHECKING:
+    from conda_lock._vendor.poetry.core.packages.package import Package
+
+T = TypeVar("T", bound="Operation")
+
+
+class Operation:
+    def __init__(self, reason: str | None = None, priority: float = 0) -> None:
         self._reason = reason
 
         self._skipped = False
-        self._skip_reason = None
+        self._skip_reason: str | None = None
         self._priority = priority
 
     @property
-    def job_type(self):  # type: () -> str
+    def job_type(self) -> str:
         raise NotImplementedError
 
     @property
-    def reason(self):  # type: () -> str
+    def reason(self) -> str | None:
         return self._reason
 
     @property
-    def skipped(self):  # type: () -> bool
+    def skipped(self) -> bool:
         return self._skipped
 
     @property
-    def skip_reason(self):  # type: () -> Union[str, None]
+    def skip_reason(self) -> str | None:
         return self._skip_reason
 
     @property
-    def priority(self):  # type: () -> int
+    def priority(self) -> float:
         return self._priority
 
     @property
-    def package(self):
+    def package(self) -> Package:
         raise NotImplementedError()
 
-    def format_version(self, package):  # type: (...) -> str
-        return package.full_pretty_version
+    def format_version(self, package: Package) -> str:
+        version: str = package.full_pretty_version
+        return version
 
-    def skip(self, reason):  # type: (str) -> Operation
+    def skip(self: T, reason: str) -> T:
         self._skipped = True
         self._skip_reason = reason
 
         return self
 
-    def unskip(self):  # type: () -> Operation
+    def unskip(self: T) -> T:
         self._skipped = False
         self._skip_reason = None
 
diff --git a/conda_lock/_vendor/poetry/installation/operations/uninstall.py b/conda_lock/_vendor/poetry/installation/operations/uninstall.py
index b7e40bc6..689eb2bf 100644
--- a/conda_lock/_vendor/poetry/installation/operations/uninstall.py
+++ b/conda_lock/_vendor/poetry/installation/operations/uninstall.py
@@ -1,26 +1,41 @@
-from .operation import Operation
+from __future__ import annotations
+
+from typing import TYPE_CHECKING
+
+from conda_lock._vendor.poetry.installation.operations.operation import Operation
+
+
+if TYPE_CHECKING:
+    from conda_lock._vendor.poetry.core.packages.package import Package
 
 
 class Uninstall(Operation):
-    def __init__(self, package, reason=None, priority=float("inf")):
-        super(Uninstall, self).__init__(reason, priority=priority)
+    def __init__(
+        self,
+        package: Package,
+        reason: str | None = None,
+        priority: float = float("inf"),
+    ) -> None:
+        super().__init__(reason, priority=priority)
 
         self._package = package
 
     @property
-    def package(self):
+    def package(self) -> Package:
         return self._package
 
     @property
-    def job_type(self):
+    def job_type(self) -> str:
         return "uninstall"
 
-    def __str__(self):
-        return "Uninstalling {} ({})".format(
-            self.package.pretty_name, self.format_version(self._package)
+    def __str__(self) -> str:
+        return (
+            "Uninstalling"
+            f" {self.package.pretty_name} ({self.format_version(self._package)})"
         )
 
-    def __repr__(self):
-        return "".format(
-            self.package.pretty_name, self.format_version(self.package)
+    def __repr__(self) -> str:
+        return (
+            ""
         )
diff --git a/conda_lock/_vendor/poetry/installation/operations/update.py b/conda_lock/_vendor/poetry/installation/operations/update.py
index 87803fd7..1f45c78d 100644
--- a/conda_lock/_vendor/poetry/installation/operations/update.py
+++ b/conda_lock/_vendor/poetry/installation/operations/update.py
@@ -1,41 +1,55 @@
-from .operation import Operation
+from __future__ import annotations
+
+from typing import TYPE_CHECKING
+
+from conda_lock._vendor.poetry.installation.operations.operation import Operation
+
+
+if TYPE_CHECKING:
+    from conda_lock._vendor.poetry.core.packages.package import Package
 
 
 class Update(Operation):
-    def __init__(self, initial, target, reason=None, priority=0):
+    def __init__(
+        self,
+        initial: Package,
+        target: Package,
+        reason: str | None = None,
+        priority: int = 0,
+    ) -> None:
         self._initial_package = initial
         self._target_package = target
 
-        super(Update, self).__init__(reason, priority=priority)
+        super().__init__(reason, priority=priority)
 
     @property
-    def initial_package(self):
+    def initial_package(self) -> Package:
         return self._initial_package
 
     @property
-    def target_package(self):
+    def target_package(self) -> Package:
         return self._target_package
 
     @property
-    def package(self):
+    def package(self) -> Package:
         return self._target_package
 
     @property
-    def job_type(self):
+    def job_type(self) -> str:
         return "update"
 
-    def __str__(self):
-        return "Updating {} ({}) to {} ({})".format(
-            self.initial_package.pretty_name,
-            self.format_version(self.initial_package),
-            self.target_package.pretty_name,
-            self.format_version(self.target_package),
+    def __str__(self) -> str:
+        init_version = self.format_version(self.initial_package)
+        target_version = self.format_version(self.target_package)
+        return (
+            f"Updating {self.initial_package.pretty_name} ({init_version}) "
+            f"to {self.target_package.pretty_name} ({target_version})"
         )
 
-    def __repr__(self):
-        return "".format(
-            self.initial_package.pretty_name,
-            self.format_version(self.initial_package),
-            self.target_package.pretty_name,
-            self.format_version(self.target_package),
+    def __repr__(self) -> str:
+        init_version = self.format_version(self.initial_package)
+        target_version = self.format_version(self.target_package)
+        return (
+            f""
         )
diff --git a/conda_lock/_vendor/poetry/installation/pip_installer.py b/conda_lock/_vendor/poetry/installation/pip_installer.py
deleted file mode 100644
index 919520c6..00000000
--- a/conda_lock/_vendor/poetry/installation/pip_installer.py
+++ /dev/null
@@ -1,271 +0,0 @@
-import os
-import tempfile
-
-from subprocess import CalledProcessError
-
-from clikit.api.io import IO
-
-from conda_lock._vendor.poetry.core.pyproject.toml import PyProjectTOML
-from conda_lock._vendor.poetry.repositories.pool import Pool
-from conda_lock._vendor.poetry.utils._compat import encode
-from conda_lock._vendor.poetry.utils.env import Env
-from conda_lock._vendor.poetry.utils.helpers import safe_rmtree
-
-from .base_installer import BaseInstaller
-
-
-try:
-    import urllib.parse as urlparse
-except ImportError:
-    import urlparse
-
-
-class PipInstaller(BaseInstaller):
-    def __init__(self, env, io, pool):  # type: (Env, IO, Pool) -> None
-        self._env = env
-        self._io = io
-        self._pool = pool
-
-    def install(self, package, update=False):
-        if package.source_type == "directory":
-            self.install_directory(package)
-
-            return
-
-        if package.source_type == "git":
-            self.install_git(package)
-
-            return
-
-        args = ["install", "--no-deps"]
-
-        if (
-            package.source_type not in {"git", "directory", "file", "url"}
-            and package.source_url
-        ):
-            repository = self._pool.repository(package.source_reference)
-            parsed = urlparse.urlparse(package.source_url)
-            if parsed.scheme == "http":
-                self._io.error(
-                    "    Installing from unsecure host: {}".format(
-                        parsed.hostname
-                    )
-                )
-                args += ["--trusted-host", parsed.hostname]
-
-            if repository.cert:
-                args += ["--cert", str(repository.cert)]
-
-            if repository.client_cert:
-                args += ["--client-cert", str(repository.client_cert)]
-
-            index_url = repository.authenticated_url
-
-            args += ["--index-url", index_url]
-            if self._pool.has_default():
-                if repository.name != self._pool.repositories[0].name:
-                    args += [
-                        "--extra-index-url",
-                        self._pool.repositories[0].authenticated_url,
-                    ]
-
-        if update:
-            args.append("-U")
-
-        if package.files and not package.source_url:
-            # Format as a requirements.txt
-            # We need to create a requirements.txt file
-            # for each package in order to check hashes.
-            # This is far from optimal but we do not have any
-            # other choice since this is the only way for pip
-            # to verify hashes.
-            req = self.create_temporary_requirement(package)
-            args += ["-r", req]
-
-            try:
-                self.run(*args)
-            finally:
-                os.unlink(req)
-        else:
-            req = self.requirement(package)
-            if not isinstance(req, list):
-                args.append(req)
-            else:
-                args += req
-
-            self.run(*args)
-
-    def update(self, package, target):
-        if package.source_type != target.source_type:
-            # If the source type has changed, we remove the current
-            # package to avoid perpetual updates in some cases
-            self.remove(package)
-
-        self.install(target, update=True)
-
-    def remove(self, package):
-        try:
-            self.run("uninstall", package.name, "-y")
-        except CalledProcessError as e:
-            if "not installed" in str(e):
-                return
-
-            raise
-
-        # This is a workaround for https://github.com/pypa/pip/issues/4176
-        nspkg_pth_file = self._env.site_packages.path / "{}-nspkg.pth".format(
-            package.name
-        )
-        if nspkg_pth_file.exists():
-            nspkg_pth_file.unlink()
-
-        # If we have a VCS package, remove its source directory
-        if package.source_type == "git":
-            src_dir = self._env.path / "src" / package.name
-            if src_dir.exists():
-                safe_rmtree(str(src_dir))
-
-    def run(self, *args, **kwargs):  # type: (...) -> str
-        return self._env.run_pip(*args, **kwargs)
-
-    def requirement(self, package, formatted=False):
-        if formatted and not package.source_type:
-            req = "{}=={}".format(package.name, package.version)
-            for f in package.files:
-                hash_type = "sha256"
-                h = f["hash"]
-                if ":" in h:
-                    hash_type, h = h.split(":")
-
-                req += " --hash {}:{}".format(hash_type, h)
-
-            req += "\n"
-
-            return req
-
-        if package.source_type in ["file", "directory"]:
-            if package.root_dir:
-                req = (package.root_dir / package.source_url).as_posix()
-            else:
-                req = os.path.realpath(package.source_url)
-
-            if package.develop and package.source_type == "directory":
-                req = ["-e", req]
-
-            return req
-
-        if package.source_type == "git":
-            req = "git+{}@{}#egg={}".format(
-                package.source_url, package.source_reference, package.name
-            )
-
-            if package.develop:
-                req = ["-e", req]
-
-            return req
-
-        if package.source_type == "url":
-            return "{}#egg={}".format(package.source_url, package.name)
-
-        return "{}=={}".format(package.name, package.version)
-
-    def create_temporary_requirement(self, package):
-        fd, name = tempfile.mkstemp(
-            "reqs.txt", "{}-{}".format(package.name, package.version)
-        )
-
-        try:
-            os.write(fd, encode(self.requirement(package, formatted=True)))
-        finally:
-            os.close(fd)
-
-        return name
-
-    def install_directory(self, package):
-        from conda_lock._vendor.poetry.factory import Factory
-        from conda_lock._vendor.poetry.io.null_io import NullIO
-
-        if package.root_dir:
-            req = (package.root_dir / package.source_url).as_posix()
-        else:
-            req = os.path.realpath(package.source_url)
-
-        args = ["install", "--no-deps", "-U"]
-
-        pyproject = PyProjectTOML(os.path.join(req, "pyproject.toml"))
-
-        if pyproject.is_poetry_project():
-            # Even if there is a build system specified
-            # some versions of pip (< 19.0.0) don't understand it
-            # so we need to check the version of pip to know
-            # if we can rely on the build system
-            legacy_pip = self._env.pip_version < self._env.pip_version.__class__(
-                19, 0, 0
-            )
-
-            try:
-                package_poetry = Factory().create_poetry(pyproject.file.path.parent)
-            except RuntimeError:
-                package_poetry = None
-
-            if package_poetry is not None:
-                if package.develop and not package_poetry.package.build_script:
-                    from conda_lock._vendor.poetry.masonry.builders.editable import EditableBuilder
-
-                    # This is a Poetry package in editable mode
-                    # we can use the EditableBuilder without going through pip
-                    # to install it, unless it has a build script.
-                    builder = EditableBuilder(package_poetry, self._env, NullIO())
-                    builder.build()
-
-                    return 0
-                elif legacy_pip or package_poetry.package.build_script:
-                    from conda_lock._vendor.poetry.core.masonry.builders.sdist import SdistBuilder
-
-                    # We need to rely on creating a temporary setup.py
-                    # file since the version of pip does not support
-                    # build-systems
-                    # We also need it for non-PEP-517 packages
-                    builder = SdistBuilder(package_poetry)
-
-                    with builder.setup_py():
-                        if package.develop:
-                            args.append("-e")
-
-                        args.append(req)
-
-                        return self.run(*args)
-
-        if package.develop:
-            args.append("-e")
-
-        args.append(req)
-
-        return self.run(*args)
-
-    def install_git(self, package):
-        from conda_lock._vendor.poetry.core.packages import Package
-        from conda_lock._vendor.poetry.core.vcs import Git
-
-        src_dir = self._env.path / "src" / package.name
-        if src_dir.exists():
-            safe_rmtree(str(src_dir))
-
-        src_dir.parent.mkdir(exist_ok=True)
-
-        git = Git()
-        git.clone(package.source_url, src_dir)
-
-        reference = package.source_resolved_reference
-        if not reference:
-            reference = package.source_reference
-
-        git.checkout(reference, src_dir)
-
-        # Now we just need to install from the source directory
-        pkg = Package(package.name, package.version)
-        pkg._source_type = "directory"
-        pkg._source_url = str(src_dir)
-        pkg.develop = package.develop
-
-        self.install_directory(pkg)
diff --git a/conda_lock/_vendor/poetry/installation/wheel_installer.py b/conda_lock/_vendor/poetry/installation/wheel_installer.py
new file mode 100644
index 00000000..c1b808b8
--- /dev/null
+++ b/conda_lock/_vendor/poetry/installation/wheel_installer.py
@@ -0,0 +1,116 @@
+from __future__ import annotations
+
+import logging
+import platform
+import sys
+
+from pathlib import Path
+from typing import TYPE_CHECKING
+
+from installer import install
+from installer.destinations import SchemeDictionaryDestination
+from installer.sources import WheelFile
+from installer.sources import _WheelFileValidationError
+
+from conda_lock._vendor.poetry.__version__ import __version__
+from conda_lock._vendor.poetry.utils._compat import WINDOWS
+
+
+logger = logging.getLogger(__name__)
+
+if TYPE_CHECKING:
+    from collections.abc import Collection
+    from typing import BinaryIO
+
+    from installer.records import RecordEntry
+    from installer.scripts import LauncherKind
+    from installer.utils import Scheme
+
+    from conda_lock._vendor.poetry.utils.env import Env
+
+
+class WheelDestination(SchemeDictionaryDestination):
+    """ """
+
+    def write_to_fs(
+        self,
+        scheme: Scheme,
+        path: str,
+        stream: BinaryIO,
+        is_executable: bool,
+    ) -> RecordEntry:
+        from installer.records import Hash
+        from installer.records import RecordEntry
+        from installer.utils import copyfileobj_with_hashing
+        from installer.utils import make_file_executable
+
+        target_path = Path(self.scheme_dict[scheme]) / path
+        if target_path.exists():
+            # Contrary to the base library we don't raise an error here since it can
+            # break pkgutil-style and pkg_resource-style namespace packages.
+            logger.warning(f"Installing {target_path} over existing file")
+
+        parent_folder = target_path.parent
+        if not parent_folder.exists():
+            # Due to the parallel installation it can happen
+            # that two threads try to create the directory.
+            parent_folder.mkdir(parents=True, exist_ok=True)
+
+        with target_path.open("wb") as f:
+            hash_, size = copyfileobj_with_hashing(stream, f, self.hash_algorithm)
+
+        if is_executable:
+            make_file_executable(target_path)
+
+        return RecordEntry(path, Hash(self.hash_algorithm, hash_), size)
+
+
+class WheelInstaller:
+    def __init__(self, env: Env) -> None:
+        self._env = env
+
+        script_kind: LauncherKind
+        if not WINDOWS:
+            script_kind = "posix"
+        else:
+            if platform.uname()[4].startswith("arm"):
+                script_kind = "win-arm64" if sys.maxsize > 2**32 else "win-arm"
+            else:
+                script_kind = "win-amd64" if sys.maxsize > 2**32 else "win-ia32"
+        self._script_kind = script_kind
+
+        self._bytecode_optimization_levels: Collection[int] = ()
+        self.invalid_wheels: dict[Path, list[str]] = {}
+
+    def enable_bytecode_compilation(self, enable: bool = True) -> None:
+        self._bytecode_optimization_levels = (-1,) if enable else ()
+
+    def install(self, wheel: Path) -> None:
+        with WheelFile.open(wheel) as source:
+            try:
+                # Content validation is temporarily disabled because of
+                # pypa/installer's out of memory issues with big wheels. See
+                # https://github.com/python-poetry/poetry/issues/7983
+                source.validate_record(validate_contents=False)
+            except _WheelFileValidationError as e:
+                self.invalid_wheels[wheel] = e.issues
+
+            scheme_dict = self._env.paths.copy()
+            scheme_dict["headers"] = str(
+                Path(scheme_dict["include"]) / source.distribution
+            )
+            destination = WheelDestination(
+                scheme_dict,
+                interpreter=str(self._env.python),
+                script_kind=self._script_kind,
+                bytecode_optimization_levels=self._bytecode_optimization_levels,
+            )
+
+            install(
+                source=source,
+                destination=destination,
+                # Additional metadata that is generated by the installation tool.
+                additional_metadata={
+                    "INSTALLER": f"Poetry {__version__}".encode(),
+                },
+            )
diff --git a/conda_lock/_vendor/poetry/io/null_io.py b/conda_lock/_vendor/poetry/io/null_io.py
deleted file mode 100644
index 1acab4ea..00000000
--- a/conda_lock/_vendor/poetry/io/null_io.py
+++ /dev/null
@@ -1,11 +0,0 @@
-from conda_lock._vendor.cleo.io.io_mixin import IOMixin
-from clikit.io import NullIO as BaseNullIO
-
-
-class NullIO(IOMixin, BaseNullIO):
-    """
-    A wrapper around CliKit's NullIO.
-    """
-
-    def __init__(self, *args, **kwargs):
-        super(NullIO, self).__init__(*args, **kwargs)
diff --git a/conda_lock/_vendor/poetry/json/__init__.py b/conda_lock/_vendor/poetry/json/__init__.py
index d50eb7a7..5b0e6ac2 100644
--- a/conda_lock/_vendor/poetry/json/__init__.py
+++ b/conda_lock/_vendor/poetry/json/__init__.py
@@ -1,41 +1,42 @@
+from __future__ import annotations
+
 import json
-import os
 
-from io import open
-from typing import List
+from pathlib import Path
+from typing import Any
 
-import jsonschema
+import fastjsonschema
 
+from fastjsonschema.exceptions import JsonSchemaException
+from conda_lock._vendor.poetry.core.json import SCHEMA_DIR as CORE_SCHEMA_DIR
 
-SCHEMA_DIR = os.path.join(os.path.dirname(__file__), "schemas")
 
+SCHEMA_DIR = Path(__file__).parent / "schemas"
 
-class ValidationError(ValueError):
 
+class ValidationError(ValueError):
     pass
 
 
-def validate_object(obj, schema_name):  # type: (dict, str) -> List[str]
-    schema = os.path.join(SCHEMA_DIR, "{}.json".format(schema_name))
-
-    if not os.path.exists(schema):
-        raise ValueError("Schema {} does not exist.".format(schema_name))
+def validate_object(obj: dict[str, Any]) -> list[str]:
+    schema_file = Path(SCHEMA_DIR, "poetry.json")
+    schema = json.loads(schema_file.read_text(encoding="utf-8"))
 
-    with open(schema, encoding="utf-8") as f:
-        schema = json.loads(f.read())
-
-    validator = jsonschema.Draft7Validator(schema)
-    validation_errors = sorted(validator.iter_errors(obj), key=lambda e: e.path)
+    validate = fastjsonschema.compile(schema)
 
     errors = []
-
-    for error in validation_errors:
-        message = error.message
-        if error.path:
-            message = "[{}] {}".format(
-                ".".join(str(x) for x in error.absolute_path), message
-            )
-
-        errors.append(message)
+    try:
+        validate(obj)
+    except JsonSchemaException as e:
+        errors = [e.message]
+
+    core_schema = json.loads(
+        (CORE_SCHEMA_DIR / "poetry-schema.json").read_text(encoding="utf-8")
+    )
+
+    properties = {*schema["properties"].keys(), *core_schema["properties"].keys()}
+    additional_properties = set(obj.keys()) - properties
+    for key in additional_properties:
+        errors.append(f"Additional properties are not allowed ('{key}' was unexpected)")
 
     return errors
diff --git a/conda_lock/_vendor/poetry/json/schemas/poetry-schema.json b/conda_lock/_vendor/poetry/json/schemas/poetry-schema.json
deleted file mode 100644
index e94b90d2..00000000
--- a/conda_lock/_vendor/poetry/json/schemas/poetry-schema.json
+++ /dev/null
@@ -1,530 +0,0 @@
-{
-    "$schema": "http://json-schema.org/draft-04/schema#",
-    "name": "Package",
-    "type": "object",
-    "additionalProperties": false,
-    "required": [
-        "name",
-        "version",
-        "description"
-    ],
-    "properties": {
-        "name": {
-            "type": "string",
-            "description": "Package name."
-        },
-        "version": {
-            "type": "string",
-            "description": "Package version."
-        },
-        "description": {
-            "type": "string",
-            "description": "Short package description."
-        },
-        "keywords": {
-            "type": "array",
-            "items": {
-                "type": "string",
-                "description": "A tag/keyword that this package relates to."
-            }
-        },
-        "homepage": {
-            "type": "string",
-            "description": "Homepage URL for the project.",
-            "format": "uri"
-        },
-        "repository": {
-            "type": "string",
-            "description": "Repository URL for the project.",
-            "format": "uri"
-        },
-        "documentation": {
-            "type": "string",
-            "description": "Documentation URL for the project.",
-            "format": "uri"
-        },
-        "license": {
-            "type": "string",
-            "description": "License name."
-        },
-        "authors": {
-            "$ref": "#/definitions/authors"
-        },
-        "maintainers": {
-            "$ref": "#/definitions/maintainers"
-        },
-        "readme": {
-            "type": "string",
-            "description": "The path to the README file"
-        },
-        "classifiers": {
-            "type": "array",
-            "description": "A list of trove classifers."
-        },
-        "packages": {
-            "type": "array",
-            "description": "A list of packages to include in the final distribution.",
-            "items": {
-                "type": "object",
-                "description": "Information about where the package resides.",
-                "additionalProperties": false,
-                "required": [
-                    "include"
-                ],
-                "properties": {
-                    "include": {
-                        "type": "string",
-                        "description": "What to include in the package."
-                    },
-                    "from": {
-                        "type": "string",
-                        "description": "Where the source directory of the package resides."
-                    },
-                    "format": {
-                        "oneOf": [
-                            {"type": "string"},
-                            {"type":  "array", "items": {"type":  "string"}}
-                        ],
-                        "description": "The format(s) for which the package must be included."
-                    }
-                }
-            }
-        },
-        "include": {
-            "type": "array",
-            "description": "A list of files and folders to include."
-        },
-        "exclude": {
-            "type": "array",
-            "description": "A list of files and folders to exclude."
-        },
-        "dependencies": {
-            "type": "object",
-            "description": "This is a hash of package name (keys) and version constraints (values) that are required to run this package.",
-            "required": [
-                "python"
-            ],
-            "properties": {
-                "python": {
-                    "type": "string",
-                    "description": "The Python versions the package is compatible with."
-                }
-            },
-            "$ref": "#/definitions/dependencies",
-            "additionalProperties": false
-        },
-        "dev-dependencies": {
-            "type": "object",
-            "description": "This is a hash of package name (keys) and version constraints (values) that this package requires for developing it (testing tools and such).",
-            "$ref": "#/definitions/dependencies",
-            "additionalProperties": false
-        },
-        "extras": {
-            "type": "object",
-            "patternProperties": {
-                "^[a-zA-Z-_.0-9]+$": {
-                    "type": "array",
-                    "items": {
-                        "type": "string"
-                    }
-                }
-            }
-        },
-        "build": {
-            "type": "string",
-            "description": "The file used to build extensions."
-        },
-        "source": {
-            "type": "array",
-            "description": "A set of additional repositories where packages can be found.",
-            "additionalProperties": {
-                "$ref": "#/definitions/repository"
-            },
-            "items": {
-                "$ref": "#/definitions/repository"
-            }
-        },
-        "scripts": {
-            "type": "object",
-            "description": "A hash of scripts to be installed.",
-            "items": {
-                "type": "string"
-            }
-        },
-        "plugins": {
-            "type": "object",
-            "description": "A hash of hashes representing plugins",
-            "patternProperties": {
-                "^[a-zA-Z-_.0-9]+$": {
-                    "type": "object",
-                    "patternProperties": {
-                        "^[a-zA-Z-_.0-9]+$": {
-                            "type": "string"
-                        }
-                    }
-                }
-            }
-        },
-        "urls": {
-            "type": "object",
-            "patternProperties": {
-                "^.+$": {
-                    "type": "string",
-                    "description": "The full url of the custom url."
-                }
-            }
-        }
-    },
-    "definitions": {
-        "authors": {
-            "type": "array",
-            "description": "List of authors that contributed to the package. This is typically the main maintainers, not the full list.",
-            "items": {
-                "type": "string"
-            }
-        },
-        "maintainers": {
-            "type": "array",
-            "description": "List of maintainers, other than the original author(s), that upkeep the package.",
-            "items": {
-                "type": "string"
-            }
-        },
-        "dependencies": {
-            "type": "object",
-            "patternProperties": {
-                "^[a-zA-Z-_.0-9]+$": {
-                    "oneOf": [
-                        {
-                            "$ref": "#/definitions/dependency"
-                        },
-                        {
-                            "$ref": "#/definitions/long-dependency"
-                        },
-                        {
-                            "$ref": "#/definitions/git-dependency"
-                        },
-                        {
-                            "$ref": "#/definitions/file-dependency"
-                        },
-                        {
-                            "$ref": "#/definitions/path-dependency"
-                        },
-                        {
-                            "$ref": "#/definitions/url-dependency"
-                        },
-                        {
-                            "$ref": "#/definitions/multiple-constraints-dependency"
-                        }
-                    ]
-                }
-            }
-        },
-        "dependency": {
-            "type": "string",
-            "description": "The constraint of the dependency."
-        },
-        "long-dependency": {
-            "type": "object",
-            "required": [
-                "version"
-            ],
-            "additionalProperties": false,
-            "properties": {
-                "version": {
-                    "type": "string",
-                    "description": "The constraint of the dependency."
-                },
-                "python": {
-                    "type": "string",
-                    "description": "The python versions for which the dependency should be installed."
-                },
-                "platform": {
-                    "type": "string",
-                    "description": "The platform(s) for which the dependency should be installed."
-                },
-                "markers": {
-                    "type": "string",
-                    "description": "The PEP 508 compliant environment markers for which the dependency should be installed."
-                },
-                "allow-prereleases": {
-                    "type": "boolean",
-                    "description": "Whether the dependency allows prereleases or not."
-                },
-                "allows-prereleases": {
-                    "type": "boolean",
-                    "description": "Whether the dependency allows prereleases or not."
-                },
-                "optional": {
-                    "type": "boolean",
-                    "description": "Whether the dependency is optional or not."
-                },
-                "extras": {
-                    "type": "array",
-                    "description": "The required extras for this dependency.",
-                    "items": {
-                        "type": "string"
-                    }
-                },
-                "source": {
-                    "type": "string",
-                    "description": "The exclusive source used to search for this dependency."
-                }
-            }
-        },
-        "git-dependency": {
-            "type": "object",
-            "required": [
-                "git"
-            ],
-            "additionalProperties": false,
-            "properties": {
-                "git": {
-                    "type": "string",
-                    "description": "The url of the git repository.",
-                    "format": "uri"
-                },
-                "branch": {
-                    "type": "string",
-                    "description": "The branch to checkout."
-                },
-                "tag": {
-                    "type": "string",
-                    "description": "The tag to checkout."
-                },
-                "rev": {
-                    "type": "string",
-                    "description": "The revision to checkout."
-                },
-                "python": {
-                    "type": "string",
-                    "description": "The python versions for which the dependency should be installed."
-                },
-                "platform": {
-                    "type": "string",
-                    "description": "The platform(s) for which the dependency should be installed."
-                },
-                "markers": {
-                    "type": "string",
-                    "description": "The PEP 508 compliant environment markers for which the dependency should be installed."
-                },
-                "allow-prereleases": {
-                    "type": "boolean",
-                    "description": "Whether the dependency allows prereleases or not."
-                },
-                "allows-prereleases": {
-                    "type": "boolean",
-                    "description": "Whether the dependency allows prereleases or not."
-                },
-                "optional": {
-                    "type": "boolean",
-                    "description": "Whether the dependency is optional or not."
-                },
-                "extras": {
-                    "type": "array",
-                    "description": "The required extras for this dependency.",
-                    "items": {
-                        "type": "string"
-                    }
-                }
-            }
-        },
-        "file-dependency": {
-            "type": "object",
-            "required": [
-                "file"
-            ],
-            "additionalProperties": false,
-            "properties": {
-                "file": {
-                    "type": "string",
-                    "description": "The path to the file."
-                },
-                "python": {
-                    "type": "string",
-                    "description": "The python versions for which the dependency should be installed."
-                },
-                "platform": {
-                    "type": "string",
-                    "description": "The platform(s) for which the dependency should be installed."
-                },
-                "markers": {
-                    "type": "string",
-                    "description": "The PEP 508 compliant environment markers for which the dependency should be installed."
-                },
-                "optional": {
-                    "type": "boolean",
-                    "description": "Whether the dependency is optional or not."
-                },
-                "extras": {
-                    "type": "array",
-                    "description": "The required extras for this dependency.",
-                    "items": {
-                        "type": "string"
-                    }
-                }
-            }
-        },
-        "path-dependency": {
-            "type": "object",
-            "required": [
-                "path"
-            ],
-            "additionalProperties": false,
-            "properties": {
-                "path": {
-                    "type": "string",
-                    "description": "The path to the dependency."
-                },
-                "python": {
-                    "type": "string",
-                    "description": "The python versions for which the dependency should be installed."
-                },
-                "platform": {
-                    "type": "string",
-                    "description": "The platform(s) for which the dependency should be installed."
-                },
-                "markers": {
-                    "type": "string",
-                    "description": "The PEP 508 compliant environment markers for which the dependency should be installed."
-                },
-                "optional": {
-                    "type": "boolean",
-                    "description": "Whether the dependency is optional or not."
-                },
-                "extras": {
-                    "type": "array",
-                    "description": "The required extras for this dependency.",
-                    "items": {
-                        "type": "string"
-                    }
-                },
-                "develop": {
-                    "type": "boolean",
-                    "description": "Whether to install the dependency in development mode."
-                }
-            }
-        },
-        "url-dependency": {
-            "type": "object",
-            "required": [
-                "url"
-            ],
-            "additionalProperties": false,
-            "properties": {
-                "url": {
-                    "type": "string",
-                    "description": "The url to the file."
-                },
-                "python": {
-                    "type": "string",
-                    "description": "The python versions for which the dependency should be installed."
-                },
-                "platform": {
-                    "type": "string",
-                    "description": "The platform(s) for which the dependency should be installed."
-                },
-                "markers": {
-                    "type": "string",
-                    "description": "The PEP 508 compliant environment markers for which the dependency should be installed."
-                },
-                "optional": {
-                    "type": "boolean",
-                    "description": "Whether the dependency is optional or not."
-                },
-                "extras": {
-                    "type": "array",
-                    "description": "The required extras for this dependency.",
-                    "items": {
-                        "type": "string"
-                    }
-                }
-            }
-        },
-        "multiple-constraints-dependency": {
-            "type": "array",
-            "minItems": 1,
-            "items": {
-                "oneOf": [
-                    {
-                        "$ref": "#/definitions/dependency"
-                    },
-                    {
-                        "$ref": "#/definitions/long-dependency"
-                    },
-                    {
-                        "$ref": "#/definitions/git-dependency"
-                    },
-                    {
-                        "$ref": "#/definitions/file-dependency"
-                    },
-                    {
-                        "$ref": "#/definitions/path-dependency"
-                    },
-                    {
-                        "$ref": "#/definitions/url-dependency"
-                    }
-                ]
-            }
-        },
-        "scripts": {
-            "type": "object",
-            "patternProperties": {
-                "^[a-zA-Z-_.0-9]+$": {
-                    "oneOf": [
-                        {
-                            "$ref": "#/definitions/script"
-                        },
-                        {
-                            "$ref": "#/definitions/extra-script"
-                        }
-                    ]
-                }
-            }
-        },
-        "script": {
-            "type": "string",
-            "description": "A simple script pointing to a callable object."
-        },
-        "extra-script": {
-            "type": "object",
-            "description": "A script that should be installed only if extras are activated.",
-            "additionalProperties": false,
-            "properties": {
-                "callable": {
-                    "$ref": "#/definitions/script"
-                },
-                "extras": {
-                    "type": "array",
-                    "description": "The required extras for this script.",
-                    "items": {
-                        "type": "string"
-                    }
-                }
-            }
-        },
-        "repository": {
-            "type": "object",
-            "additionalProperties": false,
-            "properties": {
-                "name": {
-                    "type": "string",
-                    "description": "The name of the repository"
-                },
-                "url": {
-                    "type": "string",
-                    "description": "The url of the repository",
-                    "format": "uri"
-                },
-                "default": {
-                    "type": "boolean",
-                    "description": "Make this repository the default (disable PyPI)"
-                },
-                "secondary": {
-                    "type": "boolean",
-                    "description": "Declare this repository as secondary, i.e. it will only be looked up last for packages."
-                }
-            }
-        }
-    }
-}
diff --git a/conda_lock/_vendor/poetry/json/schemas/poetry.json b/conda_lock/_vendor/poetry/json/schemas/poetry.json
new file mode 100644
index 00000000..93a822d2
--- /dev/null
+++ b/conda_lock/_vendor/poetry/json/schemas/poetry.json
@@ -0,0 +1,80 @@
+{
+  "$schema": "http://json-schema.org/draft-04/schema#",
+  "additionalProperties": true,
+  "type": "object",
+  "required": [],
+  "properties": {
+    "source": {
+      "type": "array",
+      "description": "A set of additional repositories where packages can be found.",
+      "additionalProperties": {
+        "$ref": "#/definitions/repository"
+      },
+      "items": {
+        "$ref": "#/definitions/repository"
+      }
+    }
+  },
+  "definitions": {
+    "repository": {
+      "type": "object",
+      "additionalProperties": false,
+      "required": [
+        "name"
+      ],
+      "properties": {
+        "name": {
+          "type": "string",
+          "description": "The name of the repository."
+        },
+        "url": {
+          "type": "string",
+          "description": "The url of the repository.",
+          "format": "uri"
+        },
+        "default": {
+          "type": "boolean",
+          "description": "Make this repository the default (disable PyPI). (deprecated, see priority)"
+        },
+        "secondary": {
+          "type": "boolean",
+          "description": "Declare this repository as secondary, i.e. default repositories take precedence. (deprecated, see priority)"
+        },
+        "priority": {
+          "enum": [
+            "primary",
+            "default",
+            "secondary",
+            "supplemental",
+            "explicit"
+          ],
+          "description": "Declare the priority of this repository."
+        },
+        "links": {
+          "type": "boolean",
+          "description": "Declare this as a link source. Links at uri/path can point to sdist or bdist archives."
+        },
+        "indexed": {
+          "type": "boolean",
+          "description": "For PEP 503 simple API repositories, pre-fetch and index the available packages. (experimental)"
+        }
+      },
+      "not": {
+        "anyOf": [
+          {
+            "required": [
+              "priority",
+              "default"
+            ]
+          },
+          {
+            "required": [
+              "priority",
+              "secondary"
+            ]
+          }
+        ]
+      }
+    }
+  }
+}
diff --git a/conda_lock/_vendor/poetry/layouts/__init__.py b/conda_lock/_vendor/poetry/layouts/__init__.py
index 9969ce5e..0227ed72 100644
--- a/conda_lock/_vendor/poetry/layouts/__init__.py
+++ b/conda_lock/_vendor/poetry/layouts/__init__.py
@@ -1,14 +1,13 @@
-from typing import Type
+from __future__ import annotations
 
-from .layout import Layout
-from .src import SrcLayout
-from .standard import StandardLayout
+from conda_lock._vendor.poetry.layouts.layout import Layout
+from conda_lock._vendor.poetry.layouts.src import SrcLayout
 
 
-_LAYOUTS = {"src": SrcLayout, "standard": StandardLayout}
+_LAYOUTS = {"src": SrcLayout, "standard": Layout}
 
 
-def layout(name):  # type: (str) -> Type[Layout]
+def layout(name: str) -> type[Layout]:
     if name not in _LAYOUTS:
         raise ValueError("Invalid layout")
 
diff --git a/conda_lock/_vendor/poetry/layouts/layout.py b/conda_lock/_vendor/poetry/layouts/layout.py
index c8bc0afd..056dd9f5 100644
--- a/conda_lock/_vendor/poetry/layouts/layout.py
+++ b/conda_lock/_vendor/poetry/layouts/layout.py
@@ -1,22 +1,23 @@
+from __future__ import annotations
+
+from pathlib import Path
 from typing import TYPE_CHECKING
-from typing import Optional
+from typing import Any
 
-from tomlkit import dumps
+from packaging.utils import canonicalize_name
+from conda_lock._vendor.poetry.core.utils.helpers import module_name
+from tomlkit import inline_table
 from tomlkit import loads
 from tomlkit import table
+from tomlkit.toml_document import TOMLDocument
 
-from conda_lock._vendor.poetry.utils.helpers import module_name
+from conda_lock._vendor.poetry.pyproject.toml import PyProjectTOML
 
 
 if TYPE_CHECKING:
-    from conda_lock._vendor.poetry.core.pyproject.toml import PyProjectTOML
-
-TESTS_DEFAULT = u"""from {package_name} import __version__
-
+    from collections.abc import Mapping
 
-def test_version():
-    assert __version__ == '{version}'
-"""
+    from tomlkit.items import InlineTable
 
 
 POETRY_DEFAULT = """\
@@ -25,47 +26,42 @@ def test_version():
 version = ""
 description = ""
 authors = []
-
-[tool.poetry.dependencies]
-
-[tool.poetry.dev-dependencies]
-"""
-
-POETRY_WITH_LICENSE = """\
-[tool.poetry]
-name = ""
-version = ""
-description = ""
-authors = []
 license = ""
+readme = ""
+packages = []
 
 [tool.poetry.dependencies]
 
-[tool.poetry.dev-dependencies]
+[tool.poetry.group.dev.dependencies]
 """
 
-BUILD_SYSTEM_MIN_VERSION = "1.0.0"
-BUILD_SYSTEM_MAX_VERSION = None
+BUILD_SYSTEM_MIN_VERSION: str | None = None
+BUILD_SYSTEM_MAX_VERSION: str | None = None
 
 
-class Layout(object):
+class Layout:
     def __init__(
         self,
-        project,
-        version="0.1.0",
-        description="",
-        readme_format="md",
-        author=None,
-        license=None,
-        python="*",
-        dependencies=None,
-        dev_dependencies=None,
-    ):
-        self._project = project
-        self._package_name = module_name(project)
+        project: str,
+        version: str = "0.1.0",
+        description: str = "",
+        readme_format: str = "md",
+        author: str | None = None,
+        license: str | None = None,
+        python: str = "*",
+        dependencies: Mapping[str, str | Mapping[str, Any]] | None = None,
+        dev_dependencies: Mapping[str, str | Mapping[str, Any]] | None = None,
+    ) -> None:
+        self._project = canonicalize_name(project)
+        self._package_path_relative = Path(
+            *(module_name(part) for part in project.split("."))
+        )
+        self._package_name = ".".join(self._package_path_relative.parts)
         self._version = version
         self._description = description
-        self._readme_format = readme_format
+
+        self._readme_format = readme_format.lower()
+
         self._license = license
         self._python = python
         self._dependencies = dependencies or {}
@@ -76,7 +72,38 @@ def __init__(
 
         self._author = author
 
-    def create(self, path, with_tests=True):
+    @property
+    def basedir(self) -> Path:
+        return Path()
+
+    @property
+    def package_path(self) -> Path:
+        return self.basedir / self._package_path_relative
+
+    def get_package_include(self) -> InlineTable | None:
+        package = inline_table()
+
+        # If a project is created in the root directory (this is reasonable inside a
+        # docker container, eg )
+        # then parts will be empty.
+        parts = self._package_path_relative.parts
+        if not parts:
+            return None
+
+        include = parts[0]
+        package.append("include", include)
+
+        if self.basedir != Path():
+            package.append("from", self.basedir.as_posix())
+        else:
+            if module_name(self._project) == include:
+                # package include and package name are the same,
+                # packages table is redundant here.
+                return None
+
+        return package
+
+    def create(self, path: Path, with_tests: bool = True) -> None:
         path.mkdir(parents=True, exist_ok=True)
 
         self._create_default(path)
@@ -87,78 +114,84 @@ def create(self, path, with_tests=True):
 
         self._write_poetry(path)
 
-    def generate_poetry_content(
-        self, original=None
-    ):  # type: (Optional["PyProjectTOML"]) -> str
+    def generate_poetry_content(self) -> TOMLDocument:
         template = POETRY_DEFAULT
-        if self._license:
-            template = POETRY_WITH_LICENSE
 
-        content = loads(template)
+        content: dict[str, Any] = loads(template)
+
         poetry_content = content["tool"]["poetry"]
         poetry_content["name"] = self._project
         poetry_content["version"] = self._version
         poetry_content["description"] = self._description
         poetry_content["authors"].append(self._author)
+
         if self._license:
             poetry_content["license"] = self._license
+        else:
+            poetry_content.remove("license")
+
+        poetry_content["readme"] = f"README.{self._readme_format}"
+        packages = self.get_package_include()
+        if packages:
+            poetry_content["packages"].append(packages)
+        else:
+            poetry_content.remove("packages")
 
         poetry_content["dependencies"]["python"] = self._python
 
         for dep_name, dep_constraint in self._dependencies.items():
             poetry_content["dependencies"][dep_name] = dep_constraint
 
-        for dep_name, dep_constraint in self._dev_dependencies.items():
-            poetry_content["dev-dependencies"][dep_name] = dep_constraint
+        if self._dev_dependencies:
+            for dep_name, dep_constraint in self._dev_dependencies.items():
+                poetry_content["group"]["dev"]["dependencies"][
+                    dep_name
+                ] = dep_constraint
+        else:
+            del poetry_content["group"]
 
         # Add build system
         build_system = table()
-        build_system_version = ">=" + BUILD_SYSTEM_MIN_VERSION
+        build_system_version = ""
+
+        if BUILD_SYSTEM_MIN_VERSION is not None:
+            build_system_version = ">=" + BUILD_SYSTEM_MIN_VERSION
         if BUILD_SYSTEM_MAX_VERSION is not None:
-            build_system_version += ",<" + BUILD_SYSTEM_MAX_VERSION
+            if build_system_version:
+                build_system_version += ","
+            build_system_version += "<" + BUILD_SYSTEM_MAX_VERSION
 
         build_system.add("requires", ["poetry-core" + build_system_version])
         build_system.add("build-backend", "poetry.core.masonry.api")
 
+        assert isinstance(content, TOMLDocument)
         content.add("build-system", build_system)
 
-        content = dumps(content)
-
-        if original and original.file.exists():
-            content = dumps(original.data) + "\n" + content
-
         return content
 
-    def _create_default(self, path, src=True):
-        raise NotImplementedError()
+    def _create_default(self, path: Path, src: bool = True) -> None:
+        package_path = path / self.package_path
+        package_path.mkdir(parents=True)
 
-    def _create_readme(self, path):
-        if self._readme_format == "rst":
-            readme_file = path / "README.rst"
-        else:
-            readme_file = path / "README.md"
+        package_init = package_path / "__init__.py"
+        package_init.touch()
 
+    def _create_readme(self, path: Path) -> Path:
+        readme_file = path.joinpath(f"README.{self._readme_format}")
         readme_file.touch()
+        return readme_file
 
-    def _create_tests(self, path):
+    @staticmethod
+    def _create_tests(path: Path) -> None:
         tests = path / "tests"
-        tests_init = tests / "__init__.py"
-        tests_default = tests / "test_{}.py".format(self._package_name)
-
         tests.mkdir()
-        tests_init.touch(exist_ok=False)
 
-        with tests_default.open("w", encoding="utf-8") as f:
-            f.write(
-                TESTS_DEFAULT.format(
-                    package_name=self._package_name, version=self._version
-                )
-            )
+        tests_init = tests / "__init__.py"
+        tests_init.touch(exist_ok=False)
 
-    def _write_poetry(self, path):
+    def _write_poetry(self, path: Path) -> None:
+        pyproject = PyProjectTOML(path / "pyproject.toml")
         content = self.generate_poetry_content()
-
-        poetry = path / "pyproject.toml"
-
-        with poetry.open("w", encoding="utf-8") as f:
-            f.write(content)
+        for section, item in content.items():
+            pyproject.data.append(section, item)
+        pyproject.save()
diff --git a/conda_lock/_vendor/poetry/layouts/src.py b/conda_lock/_vendor/poetry/layouts/src.py
index 06db7a71..108d9731 100644
--- a/conda_lock/_vendor/poetry/layouts/src.py
+++ b/conda_lock/_vendor/poetry/layouts/src.py
@@ -1,19 +1,11 @@
-# -*- coding: utf-8 -*-
+from __future__ import annotations
 
-from .layout import Layout
+from pathlib import Path
 
-
-DEFAULT = u"""__version__ = '{version}'
-"""
+from conda_lock._vendor.poetry.layouts.layout import Layout
 
 
 class SrcLayout(Layout):
-    def _create_default(self, path):
-        package_path = path / "src" / self._package_name
-
-        package_init = package_path / "__init__.py"
-
-        package_path.mkdir(parents=True)
-
-        with package_init.open("w", encoding="utf-8") as f:
-            f.write(DEFAULT.format(version=self._version))
+    @property
+    def basedir(self) -> Path:
+        return Path("src")
diff --git a/conda_lock/_vendor/poetry/layouts/standard.py b/conda_lock/_vendor/poetry/layouts/standard.py
index eca4c435..e69de29b 100644
--- a/conda_lock/_vendor/poetry/layouts/standard.py
+++ b/conda_lock/_vendor/poetry/layouts/standard.py
@@ -1,19 +0,0 @@
-# -*- coding: utf-8 -*-
-
-from .layout import Layout
-
-
-DEFAULT = u"""__version__ = '{version}'
-"""
-
-
-class StandardLayout(Layout):
-    def _create_default(self, path):
-        package_path = path / self._package_name
-
-        package_init = package_path / "__init__.py"
-
-        package_path.mkdir()
-
-        with package_init.open("w", encoding="utf-8") as f:
-            f.write(DEFAULT.format(version=self._version))
diff --git a/conda_lock/_vendor/poetry/locations.py b/conda_lock/_vendor/poetry/locations.py
index e4508705..77d8e6c3 100644
--- a/conda_lock/_vendor/poetry/locations.py
+++ b/conda_lock/_vendor/poetry/locations.py
@@ -1,19 +1,53 @@
+from __future__ import annotations
+
+import logging
 import os
+import sys
+
+from pathlib import Path
+
+from platformdirs import user_cache_path
+from platformdirs import user_config_path
+from platformdirs import user_data_path
+
+
+logger = logging.getLogger(__name__)
 
-from .utils._compat import Path
-from .utils.appdirs import user_cache_dir
-from .utils.appdirs import user_config_dir
-from .utils.appdirs import user_data_dir
+_APP_NAME = "pypoetry-conda-lock"
 
+DEFAULT_CACHE_DIR = user_cache_path(_APP_NAME, appauthor=False)
+CONFIG_DIR = Path(
+    os.getenv("POETRY_CONFIG_DIR")
+    or user_config_path(_APP_NAME, appauthor=False, roaming=True)
+)
 
-CACHE_DIR = user_cache_dir("pypoetry-conda-lock")
-CONFIG_DIR = user_config_dir("pypoetry-conda-lock")
+# platformdirs 2.0.0 corrected the OSX/macOS config directory from
+# /Users//Library/Application Support/ to
+# /Users//Library/Preferences/.
+#
+# Then platformdirs 3.0.0 corrected it back again!
+#
+# Treat Preferences as deprecated, and hope that this is finally decided.
+if sys.platform == "darwin":
+    _LEGACY_CONFIG_DIR = CONFIG_DIR.parent.parent / "Preferences" / _APP_NAME
+    config_toml = _LEGACY_CONFIG_DIR / "config.toml"
+    auth_toml = _LEGACY_CONFIG_DIR / "auth.toml"
 
-REPOSITORY_CACHE_DIR = Path(CACHE_DIR) / "cache" / "repositories"
+    if any(file.exists() for file in (auth_toml, config_toml)):
+        logger.warning(
+            "Configuration file exists at %s, reusing this"
+            " directory.\n\nConsider moving TOML configuration files to %s, as"
+            " support for the legacy directory will be removed in an upcoming"
+            " release.",
+            _LEGACY_CONFIG_DIR,
+            CONFIG_DIR,
+        )
+        CONFIG_DIR = _LEGACY_CONFIG_DIR
 
 
-def data_dir():  # type: () -> Path
-    if os.getenv("POETRY_HOME"):
-        return Path(os.getenv("POETRY_HOME")).expanduser()
+def data_dir() -> Path:
+    poetry_home = os.getenv("POETRY_HOME")
+    if poetry_home:
+        return Path(poetry_home).expanduser()
 
-    return Path(user_data_dir("pypoetry-conda-lock", roaming=True))
+    return user_data_path(_APP_NAME, appauthor=False, roaming=True)
diff --git a/conda_lock/_vendor/poetry/masonry/api.py b/conda_lock/_vendor/poetry/masonry/api.py
index c6b6e3a3..f5dc6090 100644
--- a/conda_lock/_vendor/poetry/masonry/api.py
+++ b/conda_lock/_vendor/poetry/masonry/api.py
@@ -1,3 +1,5 @@
+from __future__ import annotations
+
 from conda_lock._vendor.poetry.core.masonry.api import build_sdist
 from conda_lock._vendor.poetry.core.masonry.api import build_wheel
 from conda_lock._vendor.poetry.core.masonry.api import get_requires_for_build_sdist
diff --git a/conda_lock/_vendor/poetry/masonry/builders/__init__.py b/conda_lock/_vendor/poetry/masonry/builders/__init__.py
index f1f02b72..606d5628 100644
--- a/conda_lock/_vendor/poetry/masonry/builders/__init__.py
+++ b/conda_lock/_vendor/poetry/masonry/builders/__init__.py
@@ -1 +1,16 @@
-from .editable import EditableBuilder
+from __future__ import annotations
+
+from conda_lock._vendor.poetry.core.masonry.builders.sdist import SdistBuilder
+from conda_lock._vendor.poetry.core.masonry.builders.wheel import WheelBuilder
+
+from conda_lock._vendor.poetry.masonry.builders.editable import EditableBuilder
+
+
+__all__ = ["BUILD_FORMATS", "EditableBuilder"]
+
+
+# might be extended by plugins
+BUILD_FORMATS = {
+    "sdist": SdistBuilder,
+    "wheel": WheelBuilder,
+}
diff --git a/conda_lock/_vendor/poetry/masonry/builders/editable.py b/conda_lock/_vendor/poetry/masonry/builders/editable.py
index ec925e03..2730ab81 100644
--- a/conda_lock/_vendor/poetry/masonry/builders/editable.py
+++ b/conda_lock/_vendor/poetry/masonry/builders/editable.py
@@ -1,27 +1,39 @@
-from __future__ import unicode_literals
+from __future__ import annotations
 
+import csv
 import hashlib
+import json
+import locale
 import os
-import shutil
 
 from base64 import urlsafe_b64encode
+from pathlib import Path
+from typing import TYPE_CHECKING
 
 from conda_lock._vendor.poetry.core.masonry.builders.builder import Builder
 from conda_lock._vendor.poetry.core.masonry.builders.sdist import SdistBuilder
 from conda_lock._vendor.poetry.core.masonry.utils.package_include import PackageInclude
-from conda_lock._vendor.poetry.core.semver.version import Version
+
 from conda_lock._vendor.poetry.utils._compat import WINDOWS
-from conda_lock._vendor.poetry.utils._compat import Path
 from conda_lock._vendor.poetry.utils._compat import decode
+from conda_lock._vendor.poetry.utils.env import build_environment
 from conda_lock._vendor.poetry.utils.helpers import is_dir_writable
+from conda_lock._vendor.poetry.utils.pip import pip_install
+
+
+if TYPE_CHECKING:
+    from conda_lock._vendor.cleo.io.io import IO
 
+    from conda_lock._vendor.poetry.poetry import Poetry
+    from conda_lock._vendor.poetry.utils.env import Env
 
 SCRIPT_TEMPLATE = """\
 #!{python}
+import sys
 from {module} import {callable_holder}
 
 if __name__ == '__main__':
-    {callable_}()
+    sys.exit({callable_}())
 """
 
 WINDOWS_CMD_TEMPLATE = """\
@@ -30,17 +42,17 @@
 
 
 class EditableBuilder(Builder):
-    def __init__(self, poetry, env, io):
-        super(EditableBuilder, self).__init__(poetry)
+    def __init__(self, poetry: Poetry, env: Env, io: IO) -> None:
+        self._poetry: Poetry
+        super().__init__(poetry)
 
         self._env = env
         self._io = io
 
-    def build(self):
+    def build(self, target_dir: Path | None = None) -> Path:
         self._debug(
-            "  - Building package {} in editable mode".format(
-                self._package.name
-            )
+            f"  - Building package {self._package.name} in"
+            " editable mode"
         )
 
         if self._package.build_script:
@@ -48,27 +60,38 @@ def build(self):
                 self._debug(
                     "  - Falling back on using a setup.py"
                 )
-
-                return self._setup_build()
+                self._setup_build()
+                return self._path
 
             self._run_build_script(self._package.build_script)
 
+        for removed in self._env.site_packages.remove_distribution_files(
+            distribution_name=self._package.name
+        ):
+            self._debug(
+                f"  - Removed {removed.name} directory from"
+                f" {removed.parent}"
+            )
+
         added_files = []
         added_files += self._add_pth()
         added_files += self._add_scripts()
         self._add_dist_info(added_files)
 
-    def _run_build_script(self, build_script):
-        self._debug("  - Executing build script: {}".format(build_script))
-        self._env.run("python", str(self._path.joinpath(build_script)), call=True)
+        return self._path
 
-    def _setup_build(self):
+    def _run_build_script(self, build_script: str) -> None:
+        with build_environment(poetry=self._poetry, env=self._env, io=self._io) as env:
+            self._debug(f"  - Executing build script: {build_script}")
+            env.run("python", str(self._path.joinpath(build_script)), call=True)
+
+    def _setup_build(self) -> None:
         builder = SdistBuilder(self._poetry)
         setup = self._path / "setup.py"
         has_setup = setup.exists()
 
         if has_setup:
-            self._io.write_line(
+            self._io.write_error_line(
                 "A setup.py file already exists. Using it."
             )
         else:
@@ -76,57 +99,48 @@ def _setup_build(self):
                 f.write(decode(builder.build_setup()))
 
         try:
-            if self._env.pip_version < Version(19, 0):
-                self._env.run_pip("install", "-e", str(self._path), "--no-deps")
-            else:
-                # Temporarily rename pyproject.toml
-                shutil.move(
-                    str(self._poetry.file), str(self._poetry.file.with_suffix(".tmp"))
-                )
-                try:
-                    self._env.run_pip("install", "-e", str(self._path), "--no-deps")
-                finally:
-                    shutil.move(
-                        str(self._poetry.file.with_suffix(".tmp")),
-                        str(self._poetry.file),
-                    )
+            pip_install(self._path, self._env, upgrade=True, editable=True)
         finally:
             if not has_setup:
-                os.remove(str(setup))
+                os.remove(setup)
 
-    def _add_pth(self):
-        paths = set()
-        for include in self._module.includes:
-            if isinstance(include, PackageInclude) and (
-                include.is_module() or include.is_package()
-            ):
-                paths.add(include.base.resolve().as_posix())
-
-        content = ""
-        for path in paths:
-            content += decode(path + os.linesep)
+    def _add_pth(self) -> list[Path]:
+        paths = {
+            include.base.resolve().as_posix()
+            for include in self._module.includes
+            if isinstance(include, PackageInclude)
+            and (include.is_module() or include.is_package())
+        }
 
+        content = "".join(decode(path + os.linesep) for path in paths)
         pth_file = Path(self._module.name).with_suffix(".pth")
+
+        # remove any pre-existing pth files for this package
+        for file in self._env.site_packages.find(path=pth_file, writable_only=True):
+            self._debug(
+                f"  - Removing existing {file.name} from {file.parent}"
+                f" for {self._poetry.file.path.parent}"
+            )
+            file.unlink(missing_ok=True)
+
         try:
             pth_file = self._env.site_packages.write_text(
-                pth_file, content, encoding="utf-8"
+                pth_file, content, encoding=locale.getpreferredencoding()
             )
             self._debug(
-                "  - Adding {} to {} for {}".format(
-                    pth_file.name, pth_file.parent, self._poetry.file.parent
-                )
+                f"  - Adding {pth_file.name} to {pth_file.parent} for"
+                f" {self._poetry.file.path.parent}"
             )
             return [pth_file]
         except OSError:
             # TODO: Replace with PermissionError
-            self._io.error_line(
-                "  - Failed to create {} for {}".format(
-                    pth_file.name, self._poetry.file.parent
-                )
+            self._io.write_error_line(
+                f"  - Failed to create {pth_file.name} for"
+                f" {self._poetry.file.path.parent}"
             )
             return []
 
-    def _add_scripts(self):
+    def _add_scripts(self) -> list[Path]:
         added = []
         entry_points = self.convert_entry_points()
 
@@ -134,24 +148,40 @@ def _add_scripts(self):
             if is_dir_writable(path=scripts_path, create=True):
                 break
         else:
-            self._io.error_line(
-                "  - Failed to find a suitable script installation directory for {}".format(
-                    self._poetry.file.parent
-                )
+            self._io.write_error_line(
+                "  - Failed to find a suitable script installation directory for"
+                f" {self._poetry.file.path.parent}"
             )
             return []
 
         scripts = entry_points.get("console_scripts", [])
         for script in scripts:
-            name, script = script.split(" = ")
-            module, callable_ = script.split(":")
+            name, script_with_extras = script.split(" = ")
+            script_without_extras = script_with_extras.split("[")[0]
+            try:
+                module, callable_ = script_without_extras.split(":")
+            except ValueError as exc:
+                msg = (
+                    f"Bad script ({name}): script needs to specify a function within a"
+                    " module like: module(.submodule):function\nInstead got:"
+                    f" {script_with_extras}"
+                )
+                if "not enough values" in str(exc):
+                    msg += (
+                        "\nHint: If the script depends on module-level code, try"
+                        " wrapping it in a main() function and modifying your script"
+                        f' like:\n{name} = "{script_with_extras}:main"'
+                    )
+                elif "too many values" in str(exc):
+                    msg += '\nToo many ":" found!'
+
+                raise ValueError(msg)
+
             callable_holder = callable_.split(".", 1)[0]
 
             script_file = scripts_path.joinpath(name)
             self._debug(
-                "  - Adding the {} script to {}".format(
-                    name, scripts_path
-                )
+                f"  - Adding the {name} script to {scripts_path}"
             )
             with script_file.open("w", encoding="utf-8") as f:
                 f.write(
@@ -173,9 +203,8 @@ def _add_scripts(self):
                 cmd_script = script_file.with_suffix(".cmd")
                 cmd = WINDOWS_CMD_TEMPLATE.format(python=self._env.python, script=name)
                 self._debug(
-                    "  - Adding the {} script wrapper to {}".format(
-                        cmd_script.name, scripts_path
-                    )
+                    f"  - Adding the {cmd_script.name} script wrapper to"
+                    f" {scripts_path}"
                 )
 
                 with cmd_script.open("w", encoding="utf-8") as f:
@@ -185,31 +214,17 @@ def _add_scripts(self):
 
         return added
 
-    def _add_dist_info(self, added_files):
+    def _add_dist_info(self, added_files: list[Path]) -> None:
         from conda_lock._vendor.poetry.core.masonry.builders.wheel import WheelBuilder
 
         added_files = added_files[:]
 
         builder = WheelBuilder(self._poetry)
-
-        dist_info_path = Path(builder.dist_info)
-        for dist_info in self._env.site_packages.find(
-            dist_info_path, writable_only=True
-        ):
-            if dist_info.exists():
-                self._debug(
-                    "  - Removing existing {} directory from {}".format(
-                        dist_info.name, dist_info.parent
-                    )
-                )
-                shutil.rmtree(str(dist_info))
-
-        dist_info = self._env.site_packages.mkdir(dist_info_path)
+        dist_info = self._env.site_packages.mkdir(Path(builder.dist_info))
 
         self._debug(
-            "  - Adding the {} directory to {}".format(
-                dist_info.name, dist_info.parent
-            )
+            f"  - Adding the {dist_info.name} directory to"
+            f" {dist_info.parent}"
         )
 
         with dist_info.joinpath("METADATA").open("w", encoding="utf-8") as f:
@@ -230,16 +245,30 @@ def _add_dist_info(self, added_files):
 
             added_files.append(dist_info.joinpath("entry_points.txt"))
 
-        with dist_info.joinpath("RECORD").open("w", encoding="utf-8") as f:
+        # write PEP 610 metadata
+        direct_url_json = dist_info.joinpath("direct_url.json")
+        direct_url_json.write_text(
+            json.dumps(
+                {
+                    "dir_info": {"editable": True},
+                    "url": self._poetry.file.path.parent.absolute().as_uri(),
+                }
+            )
+        )
+        added_files.append(direct_url_json)
+
+        record = dist_info.joinpath("RECORD")
+        with record.open("w", encoding="utf-8", newline="") as f:
+            csv_writer = csv.writer(f)
             for path in added_files:
                 hash = self._get_file_hash(path)
                 size = path.stat().st_size
-                f.write("{},sha256={},{}\n".format(str(path), hash, size))
+                csv_writer.writerow((path, f"sha256={hash}", size))
 
             # RECORD itself is recorded with no hash or size
-            f.write("{},,\n".format(dist_info.joinpath("RECORD")))
+            csv_writer.writerow((record, "", ""))
 
-    def _get_file_hash(self, filepath):
+    def _get_file_hash(self, filepath: Path) -> str:
         hashsum = hashlib.sha256()
         with filepath.open("rb") as src:
             while True:
@@ -252,6 +281,6 @@ def _get_file_hash(self, filepath):
 
         return urlsafe_b64encode(hashsum.digest()).decode("ascii").rstrip("=")
 
-    def _debug(self, msg):
+    def _debug(self, msg: str) -> None:
         if self._io.is_debug():
             self._io.write_line(msg)
diff --git a/conda_lock/_vendor/poetry/mixology/__init__.py b/conda_lock/_vendor/poetry/mixology/__init__.py
index 50fbffb2..3e29eb24 100644
--- a/conda_lock/_vendor/poetry/mixology/__init__.py
+++ b/conda_lock/_vendor/poetry/mixology/__init__.py
@@ -1,7 +1,18 @@
-from .version_solver import VersionSolver
+from __future__ import annotations
 
+from typing import TYPE_CHECKING
 
-def resolve_version(root, provider, locked=None, use_latest=None):
-    solver = VersionSolver(root, provider, locked=locked, use_latest=use_latest)
+from conda_lock._vendor.poetry.mixology.version_solver import VersionSolver
+
+
+if TYPE_CHECKING:
+    from conda_lock._vendor.poetry.core.packages.project_package import ProjectPackage
+
+    from conda_lock._vendor.poetry.mixology.result import SolverResult
+    from conda_lock._vendor.poetry.puzzle.provider import Provider
+
+
+def resolve_version(root: ProjectPackage, provider: Provider) -> SolverResult:
+    solver = VersionSolver(root, provider)
 
     return solver.solve()
diff --git a/conda_lock/_vendor/poetry/mixology/assignment.py b/conda_lock/_vendor/poetry/mixology/assignment.py
index e288c5da..80214b9b 100644
--- a/conda_lock/_vendor/poetry/mixology/assignment.py
+++ b/conda_lock/_vendor/poetry/mixology/assignment.py
@@ -1,7 +1,15 @@
-from typing import Any
+from __future__ import annotations
 
-from .incompatibility import Incompatibility
-from .term import Term
+from typing import TYPE_CHECKING
+
+from conda_lock._vendor.poetry.mixology.term import Term
+
+
+if TYPE_CHECKING:
+    from conda_lock._vendor.poetry.core.packages.dependency import Dependency
+    from conda_lock._vendor.poetry.core.packages.package import Package
+
+    from conda_lock._vendor.poetry.mixology.incompatibility import Incompatibility
 
 
 class Assignment(Term):
@@ -9,36 +17,46 @@ class Assignment(Term):
     A term in a PartialSolution that tracks some additional metadata.
     """
 
-    def __init__(self, dependency, is_positive, decision_level, index, cause=None):
-        super(Assignment, self).__init__(dependency, is_positive)
+    def __init__(
+        self,
+        dependency: Dependency,
+        is_positive: bool,
+        decision_level: int,
+        index: int,
+        cause: Incompatibility | None = None,
+    ) -> None:
+        super().__init__(dependency, is_positive)
 
         self._decision_level = decision_level
         self._index = index
         self._cause = cause
 
     @property
-    def decision_level(self):  # type: () -> int
+    def decision_level(self) -> int:
         return self._decision_level
 
     @property
-    def index(self):  # type: () -> int
+    def index(self) -> int:
         return self._index
 
     @property
-    def cause(self):  # type: () -> Incompatibility
+    def cause(self) -> Incompatibility | None:
         return self._cause
 
     @classmethod
-    def decision(
-        cls, package, decision_level, index
-    ):  # type: (Any, int, int) -> Assignment
+    def decision(cls, package: Package, decision_level: int, index: int) -> Assignment:
         return cls(package.to_dependency(), True, decision_level, index)
 
     @classmethod
     def derivation(
-        cls, dependency, is_positive, cause, decision_level, index
-    ):  # type: (Any, bool, Incompatibility, int, int) -> Assignment
+        cls,
+        dependency: Dependency,
+        is_positive: bool,
+        cause: Incompatibility,
+        decision_level: int,
+        index: int,
+    ) -> Assignment:
         return cls(dependency, is_positive, decision_level, index, cause)
 
-    def is_decision(self):  # type: () -> bool
+    def is_decision(self) -> bool:
         return self._cause is None
diff --git a/conda_lock/_vendor/poetry/mixology/failure.py b/conda_lock/_vendor/poetry/mixology/failure.py
index 2f53b05b..bdb236f8 100644
--- a/conda_lock/_vendor/poetry/mixology/failure.py
+++ b/conda_lock/_vendor/poetry/mixology/failure.py
@@ -1,36 +1,39 @@
-from typing import Dict
-from typing import List
-from typing import Tuple
+from __future__ import annotations
 
-from conda_lock._vendor.poetry.core.semver import parse_constraint
+from typing import TYPE_CHECKING
 
-from .incompatibility import Incompatibility
-from .incompatibility_cause import ConflictCause
-from .incompatibility_cause import PythonCause
+from conda_lock._vendor.poetry.core.constraints.version import parse_constraint
+
+from conda_lock._vendor.poetry.mixology.incompatibility_cause import ConflictCause
+from conda_lock._vendor.poetry.mixology.incompatibility_cause import PythonCause
+
+
+if TYPE_CHECKING:
+    from conda_lock._vendor.poetry.mixology.incompatibility import Incompatibility
 
 
 class SolveFailure(Exception):
-    def __init__(self, incompatibility):  # type: (Incompatibility) -> None
+    def __init__(self, incompatibility: Incompatibility) -> None:
         self._incompatibility = incompatibility
 
     @property
-    def message(self):
+    def message(self) -> str:
         return str(self)
 
-    def __str__(self):
+    def __str__(self) -> str:
         return _Writer(self._incompatibility).write()
 
 
 class _Writer:
-    def __init__(self, root):  # type: (Incompatibility) -> None
+    def __init__(self, root: Incompatibility) -> None:
         self._root = root
-        self._derivations = {}  # type: Dict[Incompatibility, int]
-        self._lines = []  # type: List[Tuple[str, int]]
-        self._line_numbers = {}  # type: Dict[Incompatibility, int]
+        self._derivations: dict[Incompatibility, int] = {}
+        self._lines: list[tuple[str, int | None]] = []
+        self._line_numbers: dict[Incompatibility, int] = {}
 
         self._count_derivations(self._root)
 
-    def write(self):
+    def write(self) -> str:
         buffer = []
 
         required_python_version_notification = False
@@ -38,11 +41,10 @@ def write(self):
             if isinstance(incompatibility.cause, PythonCause):
                 if not required_python_version_notification:
                     buffer.append(
-                        "The current project's Python requirement ({}) "
-                        "is not compatible with some of the required "
-                        "packages Python requirement:".format(
-                            incompatibility.cause.root_python_version
-                        )
+                        "The current project's supported Python range"
+                        f" ({incompatibility.cause.root_python_version}) is not"
+                        " compatible with some of the required packages Python"
+                        " requirement:"
                     )
                     required_python_version_notification = True
 
@@ -51,27 +53,23 @@ def write(self):
                 )
                 constraint = parse_constraint(incompatibility.cause.python_version)
                 buffer.append(
-                    "  - {} requires Python {}, so it will not be satisfied for Python {}".format(
-                        incompatibility.terms[0].dependency.name,
-                        incompatibility.cause.python_version,
-                        root_constraint.difference(constraint),
-                    )
+                    f"  - {incompatibility.terms[0].dependency.name} requires Python"
+                    f" {incompatibility.cause.python_version}, so it will not be"
+                    f" satisfied for Python {root_constraint.difference(constraint)}"
                 )
 
         if required_python_version_notification:
             buffer.append("")
 
         if isinstance(self._root.cause, ConflictCause):
-            self._visit(self._root, {})
+            self._visit(self._root)
         else:
-            self._write(
-                self._root, "Because {}, version solving failed.".format(self._root)
-            )
+            self._write(self._root, f"Because {self._root}, version solving failed.")
 
         padding = (
             0
             if not self._line_numbers
-            else len("({}) ".format(list(self._line_numbers.values())[-1]))
+            else len(f"({list(self._line_numbers.values())[-1]}) ")
         )
 
         last_was_empty = False
@@ -88,7 +86,7 @@ def write(self):
 
             number = line[-1]
             if number is not None:
-                message = "({})".format(number).ljust(padding) + message
+                message = f"({number})".ljust(padding) + message
             else:
                 message = " " * padding + message
 
@@ -97,8 +95,8 @@ def write(self):
         return "\n".join(buffer)
 
     def _write(
-        self, incompatibility, message, numbered=False
-    ):  # type: (Incompatibility, str, bool) -> None
+        self, incompatibility: Incompatibility, message: str, numbered: bool = False
+    ) -> None:
         if numbered:
             number = len(self._line_numbers) + 1
             self._line_numbers[incompatibility] = number
@@ -107,14 +105,17 @@ def _write(
             self._lines.append((message, None))
 
     def _visit(
-        self, incompatibility, details_for_incompatibility, conclusion=False
-    ):  # type: (Incompatibility, Dict, bool) -> None
+        self,
+        incompatibility: Incompatibility,
+        conclusion: bool = False,
+    ) -> None:
         numbered = conclusion or self._derivations[incompatibility] > 1
         conjunction = "So," if conclusion or incompatibility == self._root else "And"
         incompatibility_string = str(incompatibility)
 
-        cause = incompatibility.cause  # type: ConflictCause
-        details_for_cause = {}
+        cause = incompatibility.cause
+        assert isinstance(cause, ConflictCause)
+
         if isinstance(cause.conflict.cause, ConflictCause) and isinstance(
             cause.other.cause, ConflictCause
         ):
@@ -122,14 +123,12 @@ def _visit(
             other_line = self._line_numbers.get(cause.other)
 
             if conflict_line is not None and other_line is not None:
+                reason = cause.conflict.and_to_string(
+                    cause.other, conflict_line, other_line
+                )
                 self._write(
                     incompatibility,
-                    "Because {}, {}.".format(
-                        cause.conflict.and_to_string(
-                            cause.other, details_for_cause, conflict_line, other_line
-                        ),
-                        incompatibility_string,
-                    ),
+                    f"Because {reason}, {incompatibility_string}.",
                     numbered=numbered,
                 )
             elif conflict_line is not None or other_line is not None:
@@ -137,17 +136,16 @@ def _visit(
                     with_line = cause.conflict
                     without_line = cause.other
                     line = conflict_line
-                else:
+                elif other_line is not None:
                     with_line = cause.other
                     without_line = cause.conflict
                     line = other_line
 
-                self._visit(without_line, details_for_cause)
+                self._visit(without_line)
                 self._write(
                     incompatibility,
-                    "{} because {} ({}), {}.".format(
-                        conjunction, str(with_line), line, incompatibility_string
-                    ),
+                    f"{conjunction} because {with_line!s} ({line}),"
+                    f" {incompatibility_string}.",
                     numbered=numbered,
                 )
             else:
@@ -157,27 +155,24 @@ def _visit(
                 if single_line_other or single_line_conflict:
                     first = cause.conflict if single_line_other else cause.other
                     second = cause.other if single_line_other else cause.conflict
-                    self._visit(first, details_for_cause)
-                    self._visit(second, details_for_cause)
+                    self._visit(first)
+                    self._visit(second)
                     self._write(
                         incompatibility,
-                        "Thus, {}.".format(incompatibility_string),
+                        f"Thus, {incompatibility_string}.",
                         numbered=numbered,
                     )
                 else:
-                    self._visit(cause.conflict, {}, conclusion=True)
+                    self._visit(cause.conflict, conclusion=True)
                     self._lines.append(("", None))
 
-                    self._visit(cause.other, details_for_cause)
+                    self._visit(cause.other)
 
                     self._write(
                         incompatibility,
-                        "{} because {} ({}), {}".format(
-                            conjunction,
-                            str(cause.conflict),
-                            self._line_numbers[cause.conflict],
-                            incompatibility_string,
-                        ),
+                        f"{conjunction} because {cause.conflict!s}"
+                        f" ({self._line_numbers[cause.conflict]}),"
+                        f" {incompatibility_string}",
                         numbered=numbered,
                     )
         elif isinstance(cause.conflict.cause, ConflictCause) or isinstance(
@@ -196,66 +191,51 @@ def _visit(
 
             derived_line = self._line_numbers.get(derived)
             if derived_line is not None:
+                reason = ext.and_to_string(derived, None, derived_line)
                 self._write(
                     incompatibility,
-                    "Because {}, {}.".format(
-                        ext.and_to_string(
-                            derived, details_for_cause, None, derived_line
-                        ),
-                        incompatibility_string,
-                    ),
+                    f"Because {reason}, {incompatibility_string}.",
                     numbered=numbered,
                 )
             elif self._is_collapsible(derived):
-                derived_cause = derived.cause  # type: ConflictCause
+                derived_cause = derived.cause
+                assert isinstance(derived_cause, ConflictCause)
                 if isinstance(derived_cause.conflict.cause, ConflictCause):
                     collapsed_derived = derived_cause.conflict
+                    collapsed_ext = derived_cause.other
                 else:
                     collapsed_derived = derived_cause.other
 
-                if isinstance(derived_cause.conflict.cause, ConflictCause):
-                    collapsed_ext = derived_cause.other
-                else:
                     collapsed_ext = derived_cause.conflict
 
-                details_for_cause = {}
-
-                self._visit(collapsed_derived, details_for_cause)
+                self._visit(collapsed_derived)
+                reason = collapsed_ext.and_to_string(ext, None, None)
                 self._write(
                     incompatibility,
-                    "{} because {}, {}.".format(
-                        conjunction,
-                        collapsed_ext.and_to_string(ext, details_for_cause, None, None),
-                        incompatibility_string,
-                    ),
+                    f"{conjunction} because {reason}, {incompatibility_string}.",
                     numbered=numbered,
                 )
             else:
-                self._visit(derived, details_for_cause)
+                self._visit(derived)
                 self._write(
                     incompatibility,
-                    "{} because {}, {}.".format(
-                        conjunction, str(ext), incompatibility_string
-                    ),
+                    f"{conjunction} because {ext!s}, {incompatibility_string}.",
                     numbered=numbered,
                 )
         else:
+            reason = cause.conflict.and_to_string(cause.other, None, None)
             self._write(
                 incompatibility,
-                "Because {}, {}.".format(
-                    cause.conflict.and_to_string(
-                        cause.other, details_for_cause, None, None
-                    ),
-                    incompatibility_string,
-                ),
+                f"Because {reason}, {incompatibility_string}.",
                 numbered=numbered,
             )
 
-    def _is_collapsible(self, incompatibility):  # type: (Incompatibility) -> bool
+    def _is_collapsible(self, incompatibility: Incompatibility) -> bool:
         if self._derivations[incompatibility] > 1:
             return False
 
-        cause = incompatibility.cause  # type: ConflictCause
+        cause = incompatibility.cause
+        assert isinstance(cause, ConflictCause)
         if isinstance(cause.conflict.cause, ConflictCause) and isinstance(
             cause.other.cause, ConflictCause
         ):
@@ -274,12 +254,12 @@ def _is_collapsible(self, incompatibility):  # type: (Incompatibility) -> bool
 
         return complex not in self._line_numbers
 
-    def _is_single_line(self, cause):  # type: (ConflictCause) -> bool
+    def _is_single_line(self, cause: ConflictCause) -> bool:
         return not isinstance(cause.conflict.cause, ConflictCause) and not isinstance(
             cause.other.cause, ConflictCause
         )
 
-    def _count_derivations(self, incompatibility):  # type: (Incompatibility) -> None
+    def _count_derivations(self, incompatibility: Incompatibility) -> None:
         if incompatibility in self._derivations:
             self._derivations[incompatibility] += 1
         else:
diff --git a/conda_lock/_vendor/poetry/mixology/incompatibility.py b/conda_lock/_vendor/poetry/mixology/incompatibility.py
index bba55bb2..f5f3af89 100644
--- a/conda_lock/_vendor/poetry/mixology/incompatibility.py
+++ b/conda_lock/_vendor/poetry/mixology/incompatibility.py
@@ -1,29 +1,32 @@
-from typing import Dict
-from typing import Generator
-from typing import List
+from __future__ import annotations
 
-from .incompatibility_cause import ConflictCause
-from .incompatibility_cause import DependencyCause
-from .incompatibility_cause import IncompatibilityCause
-from .incompatibility_cause import NoVersionsCause
-from .incompatibility_cause import PackageNotFoundCause
-from .incompatibility_cause import PlatformCause
-from .incompatibility_cause import PythonCause
-from .incompatibility_cause import RootCause
-from .term import Term
+from typing import TYPE_CHECKING
+
+from conda_lock._vendor.poetry.mixology.incompatibility_cause import ConflictCause
+from conda_lock._vendor.poetry.mixology.incompatibility_cause import DependencyCause
+from conda_lock._vendor.poetry.mixology.incompatibility_cause import NoVersionsCause
+from conda_lock._vendor.poetry.mixology.incompatibility_cause import PlatformCause
+from conda_lock._vendor.poetry.mixology.incompatibility_cause import PythonCause
+from conda_lock._vendor.poetry.mixology.incompatibility_cause import RootCause
+
+
+if TYPE_CHECKING:
+    from collections.abc import Callable
+    from collections.abc import Iterator
+
+    from conda_lock._vendor.poetry.mixology.incompatibility_cause import IncompatibilityCause
+    from conda_lock._vendor.poetry.mixology.term import Term
 
 
 class Incompatibility:
-    def __init__(
-        self, terms, cause
-    ):  # type: (List[Term], IncompatibilityCause) -> None
+    def __init__(self, terms: list[Term], cause: IncompatibilityCause) -> None:
         # Remove the root package from generated incompatibilities, since it will
         # always be satisfied. This makes error reporting clearer, and may also
         # make solving more efficient.
         if (
             len(terms) != 1
             and isinstance(cause, ConflictCause)
-            and any([term.is_positive() and term.dependency.is_root for term in terms])
+            and any(term.is_positive() and term.dependency.is_root for term in terms)
         ):
             terms = [
                 term
@@ -31,33 +34,29 @@ def __init__(
                 if not term.is_positive() or not term.dependency.is_root
             ]
 
-        if (
-            len(terms) == 1
+        if len(terms) != 1 and (
             # Short-circuit in the common case of a two-term incompatibility with
             # two different packages (for example, a dependency).
-            or len(terms) == 2
-            and terms[0].dependency.complete_name != terms[-1].dependency.complete_name
+            len(terms) != 2
+            or terms[0].dependency.complete_name == terms[-1].dependency.complete_name
         ):
-            pass
-        else:
             # Coalesce multiple terms about the same package if possible.
-            by_name = {}  # type: Dict[str, Dict[str, Term]]
+            by_name: dict[str, dict[str, Term]] = {}
             for term in terms:
-                if term.dependency.complete_name not in by_name:
-                    by_name[term.dependency.complete_name] = {}
-
-                by_ref = by_name[term.dependency.complete_name]
+                by_ref = by_name.setdefault(term.dependency.complete_name, {})
                 ref = term.dependency.complete_name
 
                 if ref in by_ref:
-                    by_ref[ref] = by_ref[ref].intersect(term)
-
-                    # If we have two terms that refer to the same package but have a null
-                    # intersection, they're mutually exclusive, making this incompatibility
-                    # irrelevant, since we already know that mutually exclusive version
-                    # ranges are incompatible. We should never derive an irrelevant
-                    # incompatibility.
-                    assert by_ref[ref] is not None
+                    value = by_ref[ref].intersect(term)
+
+                    # If we have two terms that refer to the same package but have a
+                    # null intersection, they're mutually exclusive, making this
+                    # incompatibility irrelevant, since we already know that mutually
+                    # exclusive version ranges are incompatible. We should never derive
+                    # an irrelevant incompatibility.
+                    err_msg = f"Package '{ref}' is listed as a dependency of itself."
+                    assert value is not None, err_msg
+                    by_ref[ref] = value
                 else:
                     by_ref[ref] = term
 
@@ -78,35 +77,35 @@ def __init__(
         self._cause = cause
 
     @property
-    def terms(self):  # type: () -> List[Term]
+    def terms(self) -> list[Term]:
         return self._terms
 
     @property
-    def cause(self):  # type: () -> IncompatibilityCause
+    def cause(self) -> IncompatibilityCause:
         return self._cause
 
     @property
-    def external_incompatibilities(self):  # type: () -> Generator[Incompatibility]
+    def external_incompatibilities(
+        self,
+    ) -> Iterator[Incompatibility]:
         """
         Returns all external incompatibilities in this incompatibility's
         derivation graph.
         """
         if isinstance(self._cause, ConflictCause):
-            cause = self._cause  # type: ConflictCause
-            for incompatibility in cause.conflict.external_incompatibilities:
-                yield incompatibility
+            cause: ConflictCause = self._cause
+            yield from cause.conflict.external_incompatibilities
 
-            for incompatibility in cause.other.external_incompatibilities:
-                yield incompatibility
+            yield from cause.other.external_incompatibilities
         else:
             yield self
 
-    def is_failure(self):  # type: () -> bool
+    def is_failure(self) -> bool:
         return len(self._terms) == 0 or (
             len(self._terms) == 1 and self._terms[0].dependency.is_root
         )
 
-    def __str__(self):
+    def __str__(self) -> str:
         if isinstance(self._cause, DependencyCause):
             assert len(self._terms) == 2
 
@@ -115,85 +114,71 @@ def __str__(self):
             assert depender.is_positive()
             assert not dependee.is_positive()
 
-            return "{} depends on {}".format(
-                self._terse(depender, allow_every=True), self._terse(dependee)
+            return (
+                f"{self._terse(depender, allow_every=True)} depends on"
+                f" {self._terse(dependee)}"
             )
         elif isinstance(self._cause, PythonCause):
             assert len(self._terms) == 1
             assert self._terms[0].is_positive()
 
-            cause = self._cause  # type: PythonCause
-            text = "{} requires ".format(self._terse(self._terms[0], allow_every=True))
-            text += "Python {}".format(cause.python_version)
+            text = f"{self._terse(self._terms[0], allow_every=True)} requires "
+            text += f"Python {self._cause.python_version}"
 
             return text
         elif isinstance(self._cause, PlatformCause):
             assert len(self._terms) == 1
             assert self._terms[0].is_positive()
 
-            cause = self._cause  # type: PlatformCause
-            text = "{} requires ".format(self._terse(self._terms[0], allow_every=True))
-            text += "platform {}".format(cause.platform)
+            text = f"{self._terse(self._terms[0], allow_every=True)} requires "
+            text += f"platform {self._cause.platform}"
 
             return text
         elif isinstance(self._cause, NoVersionsCause):
             assert len(self._terms) == 1
             assert self._terms[0].is_positive()
 
-            return "no versions of {} match {}".format(
-                self._terms[0].dependency.name, self._terms[0].constraint
+            return (
+                f"no versions of {self._terms[0].dependency.name} match"
+                f" {self._terms[0].constraint}"
             )
-        elif isinstance(self._cause, PackageNotFoundCause):
-            assert len(self._terms) == 1
-            assert self._terms[0].is_positive()
-
-            return "{} doesn't exist".format(self._terms[0].dependency.name)
         elif isinstance(self._cause, RootCause):
             assert len(self._terms) == 1
             assert not self._terms[0].is_positive()
             assert self._terms[0].dependency.is_root
 
-            return "{} is {}".format(
-                self._terms[0].dependency.name, self._terms[0].dependency.constraint
+            return (
+                f"{self._terms[0].dependency.name} is"
+                f" {self._terms[0].dependency.constraint}"
             )
         elif self.is_failure():
             return "version solving failed"
 
         if len(self._terms) == 1:
             term = self._terms[0]
-            if term.constraint.is_any():
-                return "{} is {}".format(
-                    term.dependency.name,
-                    "forbidden" if term.is_positive() else "required",
-                )
-            else:
-                return "{} is {}".format(
-                    term.dependency.name,
-                    "forbidden" if term.is_positive() else "required",
-                )
+            verb = "forbidden" if term.is_positive() else "required"
+            return f"{term.dependency.name} is {verb}"
 
         if len(self._terms) == 2:
             term1 = self._terms[0]
             term2 = self._terms[1]
 
             if term1.is_positive() == term2.is_positive():
-                if term1.is_positive():
-                    package1 = (
-                        term1.dependency.name
-                        if term1.constraint.is_any()
-                        else self._terse(term1)
-                    )
-                    package2 = (
-                        term2.dependency.name
-                        if term2.constraint.is_any()
-                        else self._terse(term2)
-                    )
-
-                    return "{} is incompatible with {}".format(package1, package2)
-                else:
-                    return "either {} or {}".format(
-                        self._terse(term1), self._terse(term2)
-                    )
+                if not term1.is_positive():
+                    return f"either {self._terse(term1)} or {self._terse(term2)}"
+
+                package1 = (
+                    term1.dependency.name
+                    if term1.constraint.is_any()
+                    else self._terse(term1)
+                )
+                package2 = (
+                    term2.dependency.name
+                    if term2.constraint.is_any()
+                    else self._terse(term2)
+                )
+
+                return f"{package1} is incompatible with {package2}"
 
         positive = []
         negative = []
@@ -205,67 +190,67 @@ def __str__(self):
                 negative.append(self._terse(term))
 
         if positive and negative:
-            if len(positive) == 1:
-                positive_term = [term for term in self._terms if term.is_positive()][0]
+            if len(positive) != 1:
+                return f"if {' and '.join(positive)} then {' or '.join(negative)}"
 
-                return "{} requires {}".format(
-                    self._terse(positive_term, allow_every=True), " or ".join(negative)
-                )
-            else:
-                return "if {} then {}".format(
-                    " and ".join(positive), " or ".join(negative)
-                )
+            positive_term = next(term for term in self._terms if term.is_positive())
+            return (
+                f"{self._terse(positive_term, allow_every=True)} requires"
+                f" {' or '.join(negative)}"
+            )
         elif positive:
-            return "one of {} must be false".format(" or ".join(positive))
+            return f"one of {' or '.join(positive)} must be false"
         else:
-            return "one of {} must be true".format(" or ".join(negative))
+            return f"one of {' or '.join(negative)} must be true"
 
     def and_to_string(
-        self, other, details, this_line, other_line
-    ):  # type: (Incompatibility, dict, int, int) -> str
-        requires_both = self._try_requires_both(other, details, this_line, other_line)
+        self,
+        other: Incompatibility,
+        this_line: int | None,
+        other_line: int | None,
+    ) -> str:
+        requires_both = self._try_requires_both(other, this_line, other_line)
         if requires_both is not None:
             return requires_both
 
-        requires_through = self._try_requires_through(
-            other, details, this_line, other_line
-        )
+        requires_through = self._try_requires_through(other, this_line, other_line)
         if requires_through is not None:
             return requires_through
 
-        requires_forbidden = self._try_requires_forbidden(
-            other, details, this_line, other_line
-        )
+        requires_forbidden = self._try_requires_forbidden(other, this_line, other_line)
         if requires_forbidden is not None:
             return requires_forbidden
 
         buffer = [str(self)]
         if this_line is not None:
-            buffer.append(" " + str(this_line))
+            buffer.append(f" {this_line!s}")
 
-        buffer.append(" and {}".format(str(other)))
+        buffer.append(f" and {other!s}")
 
         if other_line is not None:
-            buffer.append(" " + str(other_line))
+            buffer.append(f" {other_line!s}")
 
         return "\n".join(buffer)
 
     def _try_requires_both(
-        self, other, details, this_line, other_line
-    ):  # type: (Incompatibility, dict, int, int) -> str
+        self,
+        other: Incompatibility,
+        this_line: int | None,
+        other_line: int | None,
+    ) -> str | None:
         if len(self._terms) == 1 or len(other.terms) == 1:
-            return
+            return None
 
         this_positive = self._single_term_where(lambda term: term.is_positive())
         if this_positive is None:
-            return
+            return None
 
         other_positive = other._single_term_where(lambda term: term.is_positive())
         if other_positive is None:
-            return
+            return None
 
         if this_positive.dependency != other_positive.dependency:
-            return
+            return None
 
         this_negatives = " or ".join(
             [self._terse(term) for term in self._terms if not term.is_positive()]
@@ -285,28 +270,31 @@ def _try_requires_both(
         else:
             buffer.append("requires")
 
-        buffer.append(" both {}".format(this_negatives))
+        buffer.append(f" both {this_negatives}")
         if this_line is not None:
-            buffer.append(" ({})".format(this_line))
+            buffer.append(f" ({this_line})")
 
-        buffer.append(" and {}".format(other_negatives))
+        buffer.append(f" and {other_negatives}")
 
         if other_line is not None:
-            buffer.append(" ({})".format(other_line))
+            buffer.append(f" ({other_line})")
 
         return "".join(buffer)
 
     def _try_requires_through(
-        self, other, details, this_line, other_line
-    ):  # type: (Incompatibility, dict, int, int) -> str
+        self,
+        other: Incompatibility,
+        this_line: int | None,
+        other_line: int | None,
+    ) -> str | None:
         if len(self._terms) == 1 or len(other.terms) == 1:
-            return
+            return None
 
         this_negative = self._single_term_where(lambda term: not term.is_positive())
         other_negative = other._single_term_where(lambda term: not term.is_positive())
 
         if this_negative is None and other_negative is None:
-            return
+            return None
 
         this_positive = self._single_term_where(lambda term: term.is_positive())
         other_positive = self._single_term_where(lambda term: term.is_positive())
@@ -334,14 +322,14 @@ def _try_requires_through(
             latter = self
             latter_line = this_line
         else:
-            return
+            return None
 
         prior_positives = [term for term in prior.terms if term.is_positive()]
 
         buffer = []
         if len(prior_positives) > 1:
             prior_string = " or ".join([self._terse(term) for term in prior_positives])
-            buffer.append("if {} then ".format(prior_string))
+            buffer.append(f"if {prior_string} then ")
         else:
             if isinstance(prior.cause, DependencyCause):
                 verb = "depends on"
@@ -349,12 +337,12 @@ def _try_requires_through(
                 verb = "requires"
 
             buffer.append(
-                "{} {} ".format(self._terse(prior_positives[0], allow_every=True), verb)
+                f"{self._terse(prior_positives[0], allow_every=True)} {verb} "
             )
 
         buffer.append(self._terse(prior_negative))
         if prior_line is not None:
-            buffer.append(" ({})".format(prior_line))
+            buffer.append(f" ({prior_line})")
 
         buffer.append(" which ")
 
@@ -370,13 +358,16 @@ def _try_requires_through(
         )
 
         if latter_line is not None:
-            buffer.append(" ({})".format(latter_line))
+            buffer.append(f" ({latter_line})")
 
         return "".join(buffer)
 
     def _try_requires_forbidden(
-        self, other, details, this_line, other_line
-    ):  # type: (Incompatibility, dict, int, int) -> str
+        self,
+        other: Incompatibility,
+        this_line: int | None,
+        other_line: int | None,
+    ) -> str | None:
         if len(self._terms) != 1 and len(other.terms) != 1:
             return None
 
@@ -393,17 +384,17 @@ def _try_requires_forbidden(
 
         negative = prior._single_term_where(lambda term: not term.is_positive())
         if negative is None:
-            return
+            return None
 
         if not negative.inverse.satisfies(latter.terms[0]):
-            return
+            return None
 
         positives = [t for t in prior.terms if t.is_positive()]
 
         buffer = []
         if len(positives) > 1:
             prior_string = " or ".join([self._terse(term) for term in positives])
-            buffer.append("if {} then ".format(prior_string))
+            buffer.append(f"if {prior_string} then ")
         else:
             buffer.append(self._terse(positives[0], allow_every=True))
             if isinstance(prior.cause, DependencyCause):
@@ -413,46 +404,46 @@ def _try_requires_forbidden(
 
         buffer.append(self._terse(latter.terms[0]) + " ")
         if prior_line is not None:
-            buffer.append("({}) ".format(prior_line))
+            buffer.append(f"({prior_line}) ")
 
         if isinstance(latter.cause, PythonCause):
-            cause = latter.cause  # type: PythonCause
-            buffer.append("which requires Python {}".format(cause.python_version))
+            cause: PythonCause = latter.cause
+            buffer.append(f"which requires Python {cause.python_version}")
         elif isinstance(latter.cause, NoVersionsCause):
             buffer.append("which doesn't match any versions")
-        elif isinstance(latter.cause, PackageNotFoundCause):
-            buffer.append("which doesn't exist")
         else:
             buffer.append("which is forbidden")
 
         if latter_line is not None:
-            buffer.append(" ({})".format(latter_line))
+            buffer.append(f" ({latter_line})")
 
         return "".join(buffer)
 
-    def _terse(self, term, allow_every=False):
+    def _terse(self, term: Term, allow_every: bool = False) -> str:
         if allow_every and term.constraint.is_any():
-            return "every version of {}".format(term.dependency.complete_name)
+            return f"every version of {term.dependency.complete_name}"
 
         if term.dependency.is_root:
-            return term.dependency.pretty_name
+            pretty_name: str = term.dependency.pretty_name
+            return pretty_name
 
-        return "{} ({})".format(
-            term.dependency.pretty_name, term.dependency.pretty_constraint
-        )
+        if term.dependency.source_type:
+            return str(term.dependency)
+        pretty_name = term.dependency.complete_pretty_name
+        return f"{pretty_name} ({term.dependency.pretty_constraint})"
 
-    def _single_term_where(self, callable):  # type: (callable) -> Term
+    def _single_term_where(self, callable: Callable[[Term], bool]) -> Term | None:
         found = None
         for term in self._terms:
             if not callable(term):
                 continue
 
             if found is not None:
-                return
+                return None
 
             found = term
 
         return found
 
-    def __repr__(self):
-        return "".format(str(self))
+    def __repr__(self) -> str:
+        return f""
diff --git a/conda_lock/_vendor/poetry/mixology/incompatibility_cause.py b/conda_lock/_vendor/poetry/mixology/incompatibility_cause.py
index 8156b4fa..b1e21429 100644
--- a/conda_lock/_vendor/poetry/mixology/incompatibility_cause.py
+++ b/conda_lock/_vendor/poetry/mixology/incompatibility_cause.py
@@ -1,3 +1,12 @@
+from __future__ import annotations
+
+from typing import TYPE_CHECKING
+
+
+if TYPE_CHECKING:
+    from conda_lock._vendor.poetry.mixology.incompatibility import Incompatibility
+
+
 class IncompatibilityCause(Exception):
     """
     The reason and Incompatibility's terms are incompatible.
@@ -5,17 +14,14 @@ class IncompatibilityCause(Exception):
 
 
 class RootCause(IncompatibilityCause):
-
     pass
 
 
 class NoVersionsCause(IncompatibilityCause):
-
     pass
 
 
 class DependencyCause(IncompatibilityCause):
-
     pass
 
 
@@ -25,19 +31,19 @@ class ConflictCause(IncompatibilityCause):
     during conflict resolution.
     """
 
-    def __init__(self, conflict, other):
+    def __init__(self, conflict: Incompatibility, other: Incompatibility) -> None:
         self._conflict = conflict
         self._other = other
 
     @property
-    def conflict(self):
+    def conflict(self) -> Incompatibility:
         return self._conflict
 
     @property
-    def other(self):
+    def other(self) -> Incompatibility:
         return self._other
 
-    def __str__(self):
+    def __str__(self) -> str:
         return str(self._conflict)
 
 
@@ -48,16 +54,16 @@ class PythonCause(IncompatibilityCause):
     with the current python version.
     """
 
-    def __init__(self, python_version, root_python_version):
+    def __init__(self, python_version: str, root_python_version: str) -> None:
         self._python_version = python_version
         self._root_python_version = root_python_version
 
     @property
-    def python_version(self):
+    def python_version(self) -> str:
         return self._python_version
 
     @property
-    def root_python_version(self):
+    def root_python_version(self) -> str:
         return self._root_python_version
 
 
@@ -67,23 +73,9 @@ class PlatformCause(IncompatibilityCause):
     (OS most likely) being incompatible with the current platform.
     """
 
-    def __init__(self, platform):
+    def __init__(self, platform: str) -> None:
         self._platform = platform
 
     @property
-    def platform(self):
+    def platform(self) -> str:
         return self._platform
-
-
-class PackageNotFoundCause(IncompatibilityCause):
-    """
-    The incompatibility represents a package that couldn't be found by its
-    source.
-    """
-
-    def __init__(self, error):
-        self._error = error
-
-    @property
-    def error(self):
-        return self._error
diff --git a/conda_lock/_vendor/poetry/mixology/partial_solution.py b/conda_lock/_vendor/poetry/mixology/partial_solution.py
old mode 100755
new mode 100644
index 17565583..3c147ebd
--- a/conda_lock/_vendor/poetry/mixology/partial_solution.py
+++ b/conda_lock/_vendor/poetry/mixology/partial_solution.py
@@ -1,14 +1,17 @@
-from collections import OrderedDict
-from typing import Dict
-from typing import List
+from __future__ import annotations
 
-from conda_lock._vendor.poetry.core.packages import Dependency
-from conda_lock._vendor.poetry.core.packages import Package
+from typing import TYPE_CHECKING
 
-from .assignment import Assignment
-from .incompatibility import Incompatibility
-from .set_relation import SetRelation
-from .term import Term
+from conda_lock._vendor.poetry.mixology.assignment import Assignment
+from conda_lock._vendor.poetry.mixology.set_relation import SetRelation
+
+
+if TYPE_CHECKING:
+    from conda_lock._vendor.poetry.core.packages.dependency import Dependency
+    from conda_lock._vendor.poetry.core.packages.package import Package
+
+    from conda_lock._vendor.poetry.mixology.incompatibility import Incompatibility
+    from conda_lock._vendor.poetry.mixology.term import Term
 
 
 class PartialSolution:
@@ -17,22 +20,23 @@ class PartialSolution:
     # what's true for the eventual set of package versions that will comprise the
     # total solution.
     #
-    # See https://github.com/dart-lang/mixology/tree/master/doc/solver.md#partial-solution.
+    # See:
+    # https://github.com/dart-lang/mixology/tree/master/doc/solver.md#partial-solution.
     """
 
-    def __init__(self):
+    def __init__(self) -> None:
         # The assignments that have been made so far, in the order they were
         # assigned.
-        self._assignments = []  # type: List[Assignment]
+        self._assignments: list[Assignment] = []
 
         # The decisions made for each package.
-        self._decisions = OrderedDict()  # type: Dict[str, Package]
+        self._decisions: dict[str, Package] = {}
 
         # The intersection of all positive Assignments for each package, minus any
         # negative Assignments that refer to that package.
         #
         # This is derived from self._assignments.
-        self._positive = OrderedDict()  # type: Dict[str, Term]
+        self._positive: dict[str, Term] = {}
 
         # The union of all negative Assignments for each package.
         #
@@ -40,7 +44,7 @@ def __init__(self):
         # map.
         #
         # This is derived from self._assignments.
-        self._negative = OrderedDict()  # type: Dict[str, Dict[str, Term]]
+        self._negative: dict[str, Term] = {}
 
         # The number of distinct solutions that have been attempted so far.
         self._attempted_solutions = 1
@@ -49,26 +53,26 @@ def __init__(self):
         self._backtracking = False
 
     @property
-    def decisions(self):  # type: () -> List[Package]
+    def decisions(self) -> list[Package]:
         return list(self._decisions.values())
 
     @property
-    def decision_level(self):  # type: () -> int
+    def decision_level(self) -> int:
         return len(self._decisions)
 
     @property
-    def attempted_solutions(self):  # type: () -> int
+    def attempted_solutions(self) -> int:
         return self._attempted_solutions
 
     @property
-    def unsatisfied(self):  # type: () -> List[Dependency]
+    def unsatisfied(self) -> list[Dependency]:
         return [
             term.dependency
             for term in self._positive.values()
             if term.dependency.complete_name not in self._decisions
         ]
 
-    def decide(self, package):  # type: (Package) -> None
+    def decide(self, package: Package) -> None:
         """
         Adds an assignment of package as a decision
         and increments the decision level.
@@ -88,8 +92,8 @@ def decide(self, package):  # type: (Package) -> None
         )
 
     def derive(
-        self, dependency, is_positive, cause
-    ):  # type: (Dependency, bool, Incompatibility) -> None
+        self, dependency: Dependency, is_positive: bool, cause: Incompatibility
+    ) -> None:
         """
         Adds an assignment of package as a derivation.
         """
@@ -103,14 +107,14 @@ def derive(
             )
         )
 
-    def _assign(self, assignment):  # type: (Assignment) -> None
+    def _assign(self, assignment: Assignment) -> None:
         """
         Adds an Assignment to _assignments and _positive or _negative.
         """
         self._assignments.append(assignment)
         self._register(assignment)
 
-    def backtrack(self, decision_level):  # type: (int) -> None
+    def backtrack(self, decision_level: int) -> None:
         """
         Resets the current decision level to decision_level, and removes all
         assignments made after that level.
@@ -136,24 +140,24 @@ def backtrack(self, decision_level):  # type: (int) -> None
             if assignment.dependency.complete_name in packages:
                 self._register(assignment)
 
-    def _register(self, assignment):  # type: (Assignment) -> None
+    def _register(self, assignment: Assignment) -> None:
         """
         Registers an Assignment in _positive or _negative.
         """
         name = assignment.dependency.complete_name
         old_positive = self._positive.get(name)
         if old_positive is not None:
-            self._positive[name] = old_positive.intersect(assignment)
+            value = old_positive.intersect(assignment)
+            assert value is not None
+            self._positive[name] = value
 
             return
 
-        ref = assignment.dependency.complete_name
-        negative_by_ref = self._negative.get(name)
-        old_negative = None if negative_by_ref is None else negative_by_ref.get(ref)
-        if old_negative is None:
-            term = assignment
-        else:
-            term = assignment.intersect(old_negative)
+        old_negative = self._negative.get(name)
+        term = (
+            assignment if old_negative is None else assignment.intersect(old_negative)
+        )
+        assert term is not None
 
         if term.is_positive():
             if name in self._negative:
@@ -161,17 +165,14 @@ def _register(self, assignment):  # type: (Assignment) -> None
 
             self._positive[name] = term
         else:
-            if name not in self._negative:
-                self._negative[name] = {}
-
-            self._negative[name][ref] = term
+            self._negative[name] = term
 
-    def satisfier(self, term):  # type: (Term) -> Assignment
+    def satisfier(self, term: Term) -> Assignment:
         """
         Returns the first Assignment in this solution such that the sublist of
         assignments up to and including that entry collectively satisfies term.
         """
-        assigned_term = None  # type: Term
+        assigned_term = None
 
         for assignment in self._assignments:
             if assignment.dependency.complete_name != term.dependency.complete_name:
@@ -197,21 +198,17 @@ def satisfier(self, term):  # type: (Term) -> Assignment
             if assigned_term.satisfies(term):
                 return assignment
 
-        raise RuntimeError("[BUG] {} is not satisfied.".format(term))
+        raise RuntimeError(f"[BUG] {term} is not satisfied.")
 
-    def satisfies(self, term):  # type: (Term) -> bool
+    def satisfies(self, term: Term) -> bool:
         return self.relation(term) == SetRelation.SUBSET
 
-    def relation(self, term):  # type: (Term) -> int
+    def relation(self, term: Term) -> str:
         positive = self._positive.get(term.dependency.complete_name)
         if positive is not None:
             return positive.relation(term)
 
-        by_ref = self._negative.get(term.dependency.complete_name)
-        if by_ref is None:
-            return SetRelation.OVERLAPPING
-
-        negative = by_ref[term.dependency.complete_name]
+        negative = self._negative.get(term.dependency.complete_name)
         if negative is None:
             return SetRelation.OVERLAPPING
 
diff --git a/conda_lock/_vendor/poetry/mixology/result.py b/conda_lock/_vendor/poetry/mixology/result.py
index 5eadeb75..44ecafdc 100644
--- a/conda_lock/_vendor/poetry/mixology/result.py
+++ b/conda_lock/_vendor/poetry/mixology/result.py
@@ -1,13 +1,28 @@
+from __future__ import annotations
+
+from typing import TYPE_CHECKING
+
+
+if TYPE_CHECKING:
+    from conda_lock._vendor.poetry.core.packages.package import Package
+    from conda_lock._vendor.poetry.core.packages.project_package import ProjectPackage
+
+
 class SolverResult:
-    def __init__(self, root, packages, attempted_solutions):
+    def __init__(
+        self,
+        root: ProjectPackage,
+        packages: list[Package],
+        attempted_solutions: int,
+    ) -> None:
         self._root = root
         self._packages = packages
         self._attempted_solutions = attempted_solutions
 
     @property
-    def packages(self):
+    def packages(self) -> list[Package]:
         return self._packages
 
     @property
-    def attempted_solutions(self):
+    def attempted_solutions(self) -> int:
         return self._attempted_solutions
diff --git a/conda_lock/_vendor/poetry/mixology/set_relation.py b/conda_lock/_vendor/poetry/mixology/set_relation.py
index 4bd333bc..a71e8261 100644
--- a/conda_lock/_vendor/poetry/mixology/set_relation.py
+++ b/conda_lock/_vendor/poetry/mixology/set_relation.py
@@ -1,3 +1,6 @@
+from __future__ import annotations
+
+
 class SetRelation:
     """
     An enum of possible relationships between two sets.
diff --git a/conda_lock/_vendor/poetry/mixology/solutions/providers/__init__.py b/conda_lock/_vendor/poetry/mixology/solutions/providers/__init__.py
index 3faec7b6..67fcb12e 100644
--- a/conda_lock/_vendor/poetry/mixology/solutions/providers/__init__.py
+++ b/conda_lock/_vendor/poetry/mixology/solutions/providers/__init__.py
@@ -1 +1,8 @@
-from .python_requirement_solution_provider import PythonRequirementSolutionProvider
+from __future__ import annotations
+
+from conda_lock._vendor.poetry.mixology.solutions.providers.python_requirement_solution_provider import (
+    PythonRequirementSolutionProvider,
+)
+
+
+__all__ = ["PythonRequirementSolutionProvider"]
diff --git a/conda_lock/_vendor/poetry/mixology/solutions/providers/python_requirement_solution_provider.py b/conda_lock/_vendor/poetry/mixology/solutions/providers/python_requirement_solution_provider.py
index 5c490b03..fa00356e 100644
--- a/conda_lock/_vendor/poetry/mixology/solutions/providers/python_requirement_solution_provider.py
+++ b/conda_lock/_vendor/poetry/mixology/solutions/providers/python_requirement_solution_provider.py
@@ -1,30 +1,35 @@
+from __future__ import annotations
+
 import re
 
-from typing import List
+from typing import TYPE_CHECKING
 
 from crashtest.contracts.has_solutions_for_exception import HasSolutionsForException
-from crashtest.contracts.solution import Solution
 
+from conda_lock._vendor.poetry.puzzle.exceptions import SolverProblemError
+
+
+if TYPE_CHECKING:
+    from crashtest.contracts.solution import Solution
 
-class PythonRequirementSolutionProvider(HasSolutionsForException):
-    def can_solve(self, exception):  # type: (Exception) -> bool
-        from conda_lock._vendor.poetry.puzzle.exceptions import SolverProblemError
 
+class PythonRequirementSolutionProvider(HasSolutionsForException):
+    def can_solve(self, exception: Exception) -> bool:
         if not isinstance(exception, SolverProblemError):
             return False
 
         m = re.match(
-            "^The current project's Python requirement (.+) is not compatible "
+            "^The current project's supported Python range (.+) is not compatible "
             "with some of the required packages Python requirement",
             str(exception),
         )
 
-        if not m:
-            return False
-
-        return True
+        return bool(m)
 
-    def get_solutions(self, exception):  # type: (Exception) -> List[Solution]
-        from ..solutions.python_requirement_solution import PythonRequirementSolution
+    def get_solutions(self, exception: Exception) -> list[Solution]:
+        from conda_lock._vendor.poetry.mixology.solutions.solutions.python_requirement_solution import (
+            PythonRequirementSolution,
+        )
 
+        assert isinstance(exception, SolverProblemError)
         return [PythonRequirementSolution(exception)]
diff --git a/conda_lock/_vendor/poetry/mixology/solutions/solutions/__init__.py b/conda_lock/_vendor/poetry/mixology/solutions/solutions/__init__.py
index 838e77b0..49196d7a 100644
--- a/conda_lock/_vendor/poetry/mixology/solutions/solutions/__init__.py
+++ b/conda_lock/_vendor/poetry/mixology/solutions/solutions/__init__.py
@@ -1 +1,8 @@
-from .python_requirement_solution import PythonRequirementSolution
+from __future__ import annotations
+
+from conda_lock._vendor.poetry.mixology.solutions.solutions.python_requirement_solution import (
+    PythonRequirementSolution,
+)
+
+
+__all__ = ["PythonRequirementSolution"]
diff --git a/conda_lock/_vendor/poetry/mixology/solutions/solutions/python_requirement_solution.py b/conda_lock/_vendor/poetry/mixology/solutions/solutions/python_requirement_solution.py
index 7075b094..69a33833 100644
--- a/conda_lock/_vendor/poetry/mixology/solutions/solutions/python_requirement_solution.py
+++ b/conda_lock/_vendor/poetry/mixology/solutions/solutions/python_requirement_solution.py
@@ -1,14 +1,24 @@
+from __future__ import annotations
+
+from typing import TYPE_CHECKING
+
 from crashtest.contracts.solution import Solution
 
 
+if TYPE_CHECKING:
+    from conda_lock._vendor.poetry.mixology.failure import SolveFailure
+    from conda_lock._vendor.poetry.puzzle.exceptions import SolverProblemError
+
+
 class PythonRequirementSolution(Solution):
-    def __init__(self, exception):
-        from conda_lock._vendor.poetry.core.semver import parse_constraint
+    def __init__(self, exception: SolverProblemError) -> None:
+        from conda_lock._vendor.poetry.core.constraints.version import parse_constraint
+
         from conda_lock._vendor.poetry.mixology.incompatibility_cause import PythonCause
 
         self._title = "Check your dependencies Python requirement."
 
-        failure = exception.error
+        failure: SolveFailure = exception.error
         version_solutions = []
         for incompatibility in failure._incompatibility.external_incompatibilities:
             if isinstance(incompatibility.cause, PythonCause):
@@ -18,16 +28,17 @@ def __init__(self, exception):
                 constraint = parse_constraint(incompatibility.cause.python_version)
 
                 version_solutions.append(
-                    "For {}, a possible solution would be "
-                    'to set the `python` property to "{}"'.format(
-                        incompatibility.terms[0].dependency.name,
-                        root_constraint.intersect(constraint),
-                    )
+                    "For "
+                    f"{incompatibility.terms[0].dependency.name},"
+                    " a possible solution would be to set the"
+                    " `python` property to"
+                    f' "{root_constraint.intersect(constraint)}"'
                 )
 
         description = (
-            "The Python requirement can be specified via the `python` "
-            "or `markers` properties"
+            "The Python requirement can be specified via the"
+            " `python` or"
+            " `markers` properties"
         )
         if version_solutions:
             description += "\n\n" + "\n".join(version_solutions)
@@ -41,11 +52,11 @@ def solution_title(self) -> str:
         return self._title
 
     @property
-    def solution_description(self):
+    def solution_description(self) -> str:
         return self._description
 
     @property
-    def documentation_links(self):
+    def documentation_links(self) -> list[str]:
         return [
             "https://python-poetry.org/docs/dependency-specification/#python-restricted-dependencies",
             "https://python-poetry.org/docs/dependency-specification/#using-environment-markers",
diff --git a/conda_lock/_vendor/poetry/mixology/term.py b/conda_lock/_vendor/poetry/mixology/term.py
old mode 100755
new mode 100644
index 37cbced9..1810f696
--- a/conda_lock/_vendor/poetry/mixology/term.py
+++ b/conda_lock/_vendor/poetry/mixology/term.py
@@ -1,12 +1,18 @@
-# -*- coding: utf-8 -*-
-from typing import Union
+from __future__ import annotations
 
-from conda_lock._vendor.poetry.core.packages import Dependency
+import functools
 
-from .set_relation import SetRelation
+from typing import TYPE_CHECKING
 
+from conda_lock._vendor.poetry.mixology.set_relation import SetRelation
 
-class Term(object):
+
+if TYPE_CHECKING:
+    from conda_lock._vendor.poetry.core.constraints.version import VersionConstraint
+    from conda_lock._vendor.poetry.core.packages.dependency import Dependency
+
+
+class Term:
     """
     A statement about a package which is true or false for a given selection of
     package versions.
@@ -14,26 +20,28 @@ class Term(object):
     See https://github.com/dart-lang/pub/tree/master/doc/solver.md#term.
     """
 
-    def __init__(self, dependency, is_positive):  # type: (Dependency, bool)  -> None
+    def __init__(self, dependency: Dependency, is_positive: bool) -> None:
         self._dependency = dependency
         self._positive = is_positive
+        self.relation = functools.lru_cache(maxsize=None)(self._relation)
+        self.intersect = functools.lru_cache(maxsize=None)(self._intersect)
 
     @property
-    def inverse(self):  # type: () -> Term
+    def inverse(self) -> Term:
         return Term(self._dependency, not self.is_positive())
 
     @property
-    def dependency(self):
+    def dependency(self) -> Dependency:
         return self._dependency
 
     @property
-    def constraint(self):
+    def constraint(self) -> VersionConstraint:
         return self._dependency.constraint
 
-    def is_positive(self):  # type: () -> bool
+    def is_positive(self) -> bool:
         return self._positive
 
-    def satisfies(self, other):  # type: (Term) -> bool
+    def satisfies(self, other: Term) -> bool:
         """
         Returns whether this term satisfies another.
         """
@@ -42,15 +50,13 @@ def satisfies(self, other):  # type: (Term) -> bool
             and self.relation(other) == SetRelation.SUBSET
         )
 
-    def relation(self, other):  # type: (Term) -> int
+    def _relation(self, other: Term) -> str:
         """
         Returns the relationship between the package versions
         allowed by this term and another.
         """
         if self.dependency.complete_name != other.dependency.complete_name:
-            raise ValueError(
-                "{} should refer to {}".format(other, self.dependency.complete_name)
-            )
+            raise ValueError(f"{other} should refer to {self.dependency.complete_name}")
 
         other_constraint = other.constraint
 
@@ -106,15 +112,13 @@ def relation(self, other):  # type: (Term) -> int
                 # not foo ^1.5.0 is a superset of not foo ^1.0.0
                 return SetRelation.OVERLAPPING
 
-    def intersect(self, other):  # type: (Term) -> Union[Term, None]
+    def _intersect(self, other: Term) -> Term | None:
         """
         Returns a Term that represents the packages
         allowed by both this term and another
         """
         if self.dependency.complete_name != other.dependency.complete_name:
-            raise ValueError(
-                "{} should refer to {}".format(other, self.dependency.complete_name)
-            )
+            raise ValueError(f"{other} should refer to {self.dependency.complete_name}")
 
         if self._compatible_dependency(other.dependency):
             if self.is_positive() != other.is_positive():
@@ -123,49 +127,61 @@ def intersect(self, other):  # type: (Term) -> Union[Term, None]
                 negative = other if self.is_positive() else self
 
                 return self._non_empty_term(
-                    positive.constraint.difference(negative.constraint), True
+                    positive.constraint.difference(negative.constraint), True, other
                 )
             elif self.is_positive():
                 # foo ^1.0.0 ∩ foo >=1.5.0 <3.0.0 → foo ^1.5.0
                 return self._non_empty_term(
-                    self.constraint.intersect(other.constraint), True
+                    self.constraint.intersect(other.constraint), True, other
                 )
             else:
                 # not foo ^1.0.0 ∩ not foo >=1.5.0 <3.0.0 → not foo >=1.0.0 <3.0.0
                 return self._non_empty_term(
-                    self.constraint.union(other.constraint), False
+                    self.constraint.union(other.constraint), False, other
                 )
         elif self.is_positive() != other.is_positive():
             return self if self.is_positive() else other
         else:
-            return
+            return None
 
-    def difference(self, other):  # type: (Term) -> Term
+    def difference(self, other: Term) -> Term | None:
         """
         Returns a Term that represents packages
         allowed by this term and not by the other
         """
         return self.intersect(other.inverse)
 
-    def _compatible_dependency(self, other):
+    def _compatible_dependency(self, other: Dependency) -> bool:
         return (
             self.dependency.is_root
             or other.is_root
             or other.is_same_package_as(self.dependency)
+            or (
+                # we do this here to indicate direct origin dependencies are
+                # compatible with NVR dependencies
+                self.dependency.complete_name == other.complete_name
+                and self.dependency.is_direct_origin() != other.is_direct_origin()
+            )
         )
 
-    def _non_empty_term(self, constraint, is_positive):
+    def _non_empty_term(
+        self, constraint: VersionConstraint, is_positive: bool, other: Term
+    ) -> Term | None:
         if constraint.is_empty():
-            return
-
-        return Term(self.dependency.with_constraint(constraint), is_positive)
-
-    def __str__(self):
-        return "{} {} ({})".format(
-            "not " if not self.is_positive() else "",
-            self._dependency.pretty_name,
-            self._dependency.pretty_constraint,
+            return None
+
+        # when creating a new term prefer direct-reference dependencies
+        dependency = (
+            other.dependency
+            if not self.dependency.is_direct_origin()
+            and other.dependency.is_direct_origin()
+            else self.dependency
         )
+        return Term(dependency.with_constraint(constraint), is_positive)
+
+    def __str__(self) -> str:
+        prefix = "not " if not self.is_positive() else ""
+        return f"{prefix}{self._dependency}"
 
-    def __repr__(self):
-        return "".format(str(self))
+    def __repr__(self) -> str:
+        return f""
diff --git a/conda_lock/_vendor/poetry/mixology/version_solver.py b/conda_lock/_vendor/poetry/mixology/version_solver.py
old mode 100755
new mode 100644
index afbbbdcb..d7ccac36
--- a/conda_lock/_vendor/poetry/mixology/version_solver.py
+++ b/conda_lock/_vendor/poetry/mixology/version_solver.py
@@ -1,37 +1,136 @@
-# -*- coding: utf-8 -*-
+from __future__ import annotations
+
+import collections
+import functools
 import time
 
 from typing import TYPE_CHECKING
-from typing import Any
-from typing import Dict
-from typing import List
-from typing import Union
-
-from conda_lock._vendor.poetry.core.packages import Dependency
-from conda_lock._vendor.poetry.core.packages import Package
-from conda_lock._vendor.poetry.core.packages import ProjectPackage
-from conda_lock._vendor.poetry.core.semver import Version
-from conda_lock._vendor.poetry.core.semver import VersionRange
-
-from .failure import SolveFailure
-from .incompatibility import Incompatibility
-from .incompatibility_cause import ConflictCause
-from .incompatibility_cause import NoVersionsCause
-from .incompatibility_cause import PackageNotFoundCause
-from .incompatibility_cause import RootCause
-from .partial_solution import PartialSolution
-from .result import SolverResult
-from .set_relation import SetRelation
-from .term import Term
+from typing import Optional
+from typing import Tuple
+
+from conda_lock._vendor.poetry.core.packages.dependency import Dependency
+
+from conda_lock._vendor.poetry.mixology.failure import SolveFailure
+from conda_lock._vendor.poetry.mixology.incompatibility import Incompatibility
+from conda_lock._vendor.poetry.mixology.incompatibility_cause import ConflictCause
+from conda_lock._vendor.poetry.mixology.incompatibility_cause import NoVersionsCause
+from conda_lock._vendor.poetry.mixology.incompatibility_cause import RootCause
+from conda_lock._vendor.poetry.mixology.partial_solution import PartialSolution
+from conda_lock._vendor.poetry.mixology.result import SolverResult
+from conda_lock._vendor.poetry.mixology.set_relation import SetRelation
+from conda_lock._vendor.poetry.mixology.term import Term
+from conda_lock._vendor.poetry.packages import PackageCollection
 
 
 if TYPE_CHECKING:
+    from conda_lock._vendor.poetry.core.packages.project_package import ProjectPackage
+
+    from conda_lock._vendor.poetry.packages import DependencyPackage
     from conda_lock._vendor.poetry.puzzle.provider import Provider
 
 
 _conflict = object()
 
 
+DependencyCacheKey = Tuple[
+    str, Optional[str], Optional[str], Optional[str], Optional[str]
+]
+
+
+class DependencyCache:
+    """
+    A cache of the valid dependencies.
+
+    The key observation here is that during the search - except at backtracking
+    - once we have decided that a dependency is invalid, we never need check it
+    again.
+    """
+
+    def __init__(self, provider: Provider) -> None:
+        self._provider = provider
+
+        # self._cache maps a package name to a stack of cached package lists,
+        # ordered by the decision level which added them to the cache. This is
+        # done so that when backtracking we can maintain cache entries from
+        # previous decision levels, while clearing cache entries from only the
+        # rolled back levels.
+        #
+        # In order to maintain the integrity of the cache, `clear_level()`
+        # needs to be called in descending order as decision levels are
+        # backtracked so that the correct items can be popped from the stack.
+        self._cache: dict[DependencyCacheKey, list[list[DependencyPackage]]] = (
+            collections.defaultdict(list)
+        )
+        self._cached_dependencies_by_level: dict[int, list[DependencyCacheKey]] = (
+            collections.defaultdict(list)
+        )
+
+        self._search_for_cached = functools.lru_cache(maxsize=128)(self._search_for)
+
+    def _search_for(
+        self,
+        dependency: Dependency,
+        key: DependencyCacheKey,
+    ) -> list[DependencyPackage]:
+        cache_entries = self._cache[key]
+        if cache_entries:
+            packages = [
+                p
+                for p in cache_entries[-1]
+                if dependency.constraint.allows(p.package.version)
+            ]
+        else:
+            packages = None
+
+        # provider.search_for() normally does not include pre-release packages
+        # (unless requested), but will include them if there are no other
+        # eligible package versions for a version constraint.
+        #
+        # Therefore, if the eligible versions have been filtered down to
+        # nothing, we need to call provider.search_for() again as it may return
+        # additional results this time.
+        if not packages:
+            packages = self._provider.search_for(dependency)
+
+        return packages
+
+    def search_for(
+        self,
+        dependency: Dependency,
+        decision_level: int,
+    ) -> list[DependencyPackage]:
+        key = (
+            dependency.name,
+            dependency.source_type,
+            dependency.source_url,
+            dependency.source_reference,
+            dependency.source_subdirectory,
+        )
+
+        # We could always use dependency.without_features() here,
+        # but for performance reasons we only do it if necessary.
+        packages = self._search_for_cached(
+            dependency.without_features() if dependency.features else dependency, key
+        )
+        if not self._cache[key] or self._cache[key][-1] is not packages:
+            self._cache[key].append(packages)
+            self._cached_dependencies_by_level[decision_level].append(key)
+
+        if dependency.features and packages:
+            # Use the cached dependency so that a possible explicit source is set.
+            return PackageCollection(
+                packages[0].dependency.with_features(dependency.features), packages
+            )
+
+        return packages
+
+    def clear_level(self, level: int) -> None:
+        if level in self._cached_dependencies_by_level:
+            self._search_for_cached.cache_clear()
+            for key in self._cached_dependencies_by_level.pop(level):
+                self._cache[key].pop()
+
+
 class VersionSolver:
     """
     The version solver that finds a set of package versions that satisfy the
@@ -41,30 +140,22 @@ class VersionSolver:
     on how this solver works.
     """
 
-    def __init__(
-        self,
-        root,  # type: ProjectPackage
-        provider,  # type: Provider
-        locked=None,  # type: Dict[str, Package]
-        use_latest=None,  # type: List[str]
-    ):
+    def __init__(self, root: ProjectPackage, provider: Provider) -> None:
         self._root = root
         self._provider = provider
-        self._locked = locked or {}
-
-        if use_latest is None:
-            use_latest = []
-
-        self._use_latest = use_latest
-
-        self._incompatibilities = {}  # type: Dict[str, List[Incompatibility]]
+        self._dependency_cache = DependencyCache(provider)
+        self._incompatibilities: dict[str, list[Incompatibility]] = {}
+        self._contradicted_incompatibilities: set[Incompatibility] = set()
+        self._contradicted_incompatibilities_by_level: dict[
+            int, set[Incompatibility]
+        ] = collections.defaultdict(set)
         self._solution = PartialSolution()
 
     @property
-    def solution(self):  # type: () -> PartialSolution
+    def solution(self) -> PartialSolution:
         return self._solution
 
-    def solve(self):  # type: () -> SolverResult
+    def solve(self) -> SolverResult:
         """
         Finds a set of dependencies that match the root package's constraints,
         or raises an error if no such set is available.
@@ -78,7 +169,7 @@ def solve(self):  # type: () -> SolverResult
         )
 
         try:
-            next = self._root.name
+            next: str | None = self._root.name
             while next is not None:
                 self._propagate(next)
                 next = self._choose_package_version()
@@ -88,20 +179,16 @@ def solve(self):  # type: () -> SolverResult
             raise
         finally:
             self._log(
-                "Version solving took {:.3f} seconds.\n"
-                "Tried {} solutions.".format(
-                    time.time() - start, self._solution.attempted_solutions
-                )
+                f"Version solving took {time.time() - start:.3f} seconds.\n"
+                f"Tried {self._solution.attempted_solutions} solutions."
             )
 
-    def _propagate(self, package):  # type: (str) -> None
+    def _propagate(self, package: str) -> None:
         """
         Performs unit propagation on incompatibilities transitively
         related to package to derive new assignments for _solution.
         """
-        changed = set()
-        changed.add(package)
-
+        changed = {package}
         while changed:
             package = changed.pop()
 
@@ -110,12 +197,15 @@ def _propagate(self, package):  # type: (str) -> None
             # we can derive stronger assignments sooner and more eagerly find
             # conflicts.
             for incompatibility in reversed(self._incompatibilities[package]):
+                if incompatibility in self._contradicted_incompatibilities:
+                    continue
+
                 result = self._propagate_incompatibility(incompatibility)
 
                 if result is _conflict:
                     # If the incompatibility is satisfied by the solution, we use
-                    # _resolve_conflict() to determine the root cause of the conflict as a
-                    # new incompatibility.
+                    # _resolve_conflict() to determine the root cause of the conflict as
+                    # a new incompatibility.
                     #
                     # It also backjumps to a point in the solution
                     # where that incompatibility will allow us to derive new assignments
@@ -129,11 +219,11 @@ def _propagate(self, package):  # type: (str) -> None
                     changed.add(str(self._propagate_incompatibility(root_cause)))
                     break
                 elif result is not None:
-                    changed.add(result)
+                    changed.add(str(result))
 
     def _propagate_incompatibility(
-        self, incompatibility
-    ):  # type: (Incompatibility) -> Union[str, _conflict, None]
+        self, incompatibility: Incompatibility
+    ) -> str | object | None:
         """
         If incompatibility is almost satisfied by _solution, adds the
         negation of the unsatisfied term to _solution.
@@ -156,12 +246,16 @@ def _propagate_incompatibility(
                 # If term is already contradicted by _solution, then
                 # incompatibility is contradicted as well and there's nothing new we
                 # can deduce from it.
-                return
+                self._contradicted_incompatibilities.add(incompatibility)
+                self._contradicted_incompatibilities_by_level[
+                    self._solution.decision_level
+                ].add(incompatibility)
+                return None
             elif relation == SetRelation.OVERLAPPING:
                 # If more than one term is inconclusive, we can't deduce anything about
                 # incompatibility.
                 if unsatisfied is not None:
-                    return
+                    return None
 
                 # If exactly one term in incompatibility is inconclusive, then it's
                 # almost satisfied and [term] is the unsatisfied term. We can add the
@@ -173,32 +267,34 @@ def _propagate_incompatibility(
         if unsatisfied is None:
             return _conflict
 
-        self._log(
-            "derived: {}{}".format(
-                "not " if unsatisfied.is_positive() else "", unsatisfied.dependency
-            )
-        )
+        self._contradicted_incompatibilities.add(incompatibility)
+        self._contradicted_incompatibilities_by_level[
+            self._solution.decision_level
+        ].add(incompatibility)
+
+        adverb = "not " if unsatisfied.is_positive() else ""
+        self._log(f"derived: {adverb}{unsatisfied.dependency}")
 
         self._solution.derive(
             unsatisfied.dependency, not unsatisfied.is_positive(), incompatibility
         )
 
-        return unsatisfied.dependency.complete_name
+        complete_name: str = unsatisfied.dependency.complete_name
+        return complete_name
 
-    def _resolve_conflict(
-        self, incompatibility
-    ):  # type: (Incompatibility) -> Incompatibility
+    def _resolve_conflict(self, incompatibility: Incompatibility) -> Incompatibility:
         """
         Given an incompatibility that's satisfied by _solution,
-        The `conflict resolution`_ constructs a new incompatibility that encapsulates the root
-        cause of the conflict and backtracks _solution until the new
+        The `conflict resolution`_ constructs a new incompatibility that encapsulates
+        the root cause of the conflict and backtracks _solution until the new
         incompatibility will allow _propagate() to deduce new assignments.
 
         Adds the new incompatibility to _incompatibilities and returns it.
 
-        .. _conflict resolution: https://github.com/dart-lang/pub/tree/master/doc/solver.md#conflict-resolution
+        .. _conflict resolution:
+        https://github.com/dart-lang/pub/tree/master/doc/solver.md#conflict-resolution
         """
-        self._log("conflict: {}".format(incompatibility))
+        self._log(f"conflict: {incompatibility}")
 
         new_incompatibility = False
         while not incompatibility.is_failure():
@@ -261,10 +357,22 @@ def _resolve_conflict(
             # than a derivation), then incompatibility is the root cause. We then
             # backjump to previous_satisfier_level, where incompatibility is
             # guaranteed to allow _propagate to produce more assignments.
+
+            # using assert to suppress mypy [union-attr]
+            assert most_recent_satisfier is not None
             if (
                 previous_satisfier_level < most_recent_satisfier.decision_level
                 or most_recent_satisfier.cause is None
             ):
+                for level in range(
+                    self._solution.decision_level, previous_satisfier_level, -1
+                ):
+                    if level in self._contradicted_incompatibilities_by_level:
+                        self._contradicted_incompatibilities.difference_update(
+                            self._contradicted_incompatibilities_by_level.pop(level),
+                        )
+                    self._dependency_cache.clear_level(level)
+
                 self._solution.backtrack(previous_satisfier_level)
                 if new_incompatibility:
                     self._add_incompatibility(incompatibility)
@@ -277,10 +385,9 @@ def _resolve_conflict(
             # true (that is, we know for sure no solution will satisfy the
             # incompatibility) while also approximating the intuitive notion of the
             # "root cause" of the conflict.
-            new_terms = []
-            for term in incompatibility.terms:
-                if term != most_recent_term:
-                    new_terms.append(term)
+            new_terms = [
+                term for term in incompatibility.terms if term != most_recent_term
+            ]
 
             for term in most_recent_satisfier.cause.terms:
                 if term.dependency != most_recent_satisfier.dependency:
@@ -289,17 +396,20 @@ def _resolve_conflict(
             # The most_recent_satisfier may not satisfy most_recent_term on its own
             # if there are a collection of constraints on most_recent_term that
             # only satisfy it together. For example, if most_recent_term is
-            # `foo ^1.0.0` and _solution contains `[foo >=1.0.0,
-            # foo <2.0.0]`, then most_recent_satisfier will be `foo <2.0.0` even
+            # `foo ^1.0.0` and _solution contains `[foo >=1.0.0,
+            # foo <2.0.0]`, then most_recent_satisfier will be `foo <2.0.0` even
             # though it doesn't totally satisfy `foo ^1.0.0`.
             #
             # In this case, we add `not (most_recent_satisfier \ most_recent_term)` to
             # the incompatibility as well, See the `algorithm documentation`_ for
             # details.
             #
-            # .. _algorithm documentation: https://github.com/dart-lang/pub/tree/master/doc/solver.md#conflict-resolution
+            # .. _algorithm documentation:
+            # https://github.com/dart-lang/pub/tree/master/doc/solver.md#conflict-resolution
             if difference is not None:
-                new_terms.append(difference.inverse)
+                inverse = difference.inverse
+                if inverse.dependency != most_recent_satisfier.dependency:
+                    new_terms.append(inverse)
 
             incompatibility = Incompatibility(
                 new_terms, ConflictCause(incompatibility, most_recent_satisfier.cause)
@@ -307,20 +417,16 @@ def _resolve_conflict(
             new_incompatibility = True
 
             partially = "" if difference is None else " partially"
-            bang = "!"
-            self._log(
-                "{} {} is{} satisfied by {}".format(
-                    bang, most_recent_term, partially, most_recent_satisfier
-                )
-            )
             self._log(
-                '{} which is caused by "{}"'.format(bang, most_recent_satisfier.cause)
+                f"! {most_recent_term} is{partially} satisfied by"
+                f" {most_recent_satisfier}"
             )
-            self._log("{} thus: {}".format(bang, incompatibility))
+            self._log(f'! which is caused by "{most_recent_satisfier.cause}"')
+            self._log(f"! thus: {incompatibility}")
 
         raise SolveFailure(incompatibility)
 
-    def _choose_package_version(self):  # type: () -> Union[str, None]
+    def _choose_package_version(self) -> str | None:
         """
         Tries to select a version of a required package.
 
@@ -330,77 +436,85 @@ def _choose_package_version(self):  # type: () -> Union[str, None]
         """
         unsatisfied = self._solution.unsatisfied
         if not unsatisfied:
-            return
-
-        # Prefer packages with as few remaining versions as possible,
-        # so that if a conflict is necessary it's forced quickly.
-        def _get_min(dependency):
-            if dependency.name in self._use_latest:
-                # If we're forced to use the latest version of a package, it effectively
-                # only has one version to choose from.
-                return not dependency.marker.is_any(), 1
-
-            locked = self._get_locked(dependency)
-            if locked and (
-                dependency.constraint.allows(locked.version)
-                or locked.is_prerelease()
-                and dependency.constraint.allows(locked.version.next_patch)
-            ):
-                return not dependency.marker.is_any(), 1
-
-            # VCS, URL, File or Directory dependencies
-            # represent a single version
-            if (
-                dependency.is_vcs()
-                or dependency.is_url()
-                or dependency.is_file()
-                or dependency.is_directory()
-            ):
-                return not dependency.marker.is_any(), 1
-
-            try:
-                return (
-                    not dependency.marker.is_any(),
-                    len(self._provider.search_for(dependency)),
+            return None
+
+        class Preference:
+            """
+            Preference is one of the criteria for choosing which dependency to solve
+            first. A higher value means that there are "more options" to satisfy
+            a dependency. A lower value takes precedence.
+            """
+
+            DIRECT_ORIGIN = 0
+            NO_CHOICE = 1
+            USE_LATEST = 2
+            LOCKED = 3
+            DEFAULT = 4
+
+        # The original algorithm proposes to prefer packages with as few remaining
+        # versions as possible, so that if a conflict is necessary it's forced quickly.
+        # https://github.com/dart-lang/pub/blob/master/doc/solver.md#decision-making
+        # However, this leads to the famous boto3 vs. urllib3 issue, so we prefer
+        # packages with more remaining versions (see
+        # https://github.com/python-poetry/poetry/pull/8255#issuecomment-1657198242
+        # for more details).
+        # In order to provide results that are as deterministic as possible
+        # and consistent between `poetry lock` and `poetry update`, the return value
+        # of two different dependencies should not be equal if possible.
+        def _get_min(dependency: Dependency) -> tuple[bool, int, int]:
+            # Direct origin dependencies must be handled first: we don't want to resolve
+            # a regular dependency for some package only to find later that we had a
+            # direct-origin dependency.
+            if dependency.is_direct_origin():
+                return False, Preference.DIRECT_ORIGIN, -1
+
+            is_specific_marker = not dependency.marker.is_any()
+
+            use_latest = dependency.name in self._provider.use_latest
+            if not use_latest:
+                locked = self._provider.get_locked(dependency)
+                if locked:
+                    return is_specific_marker, Preference.LOCKED, -1
+
+            num_packages = len(
+                self._dependency_cache.search_for(
+                    dependency, self._solution.decision_level
                 )
-            except ValueError:
-                return not dependency.marker.is_any(), 0
+            )
 
-        if len(unsatisfied) == 1:
-            dependency = unsatisfied[0]
-        else:
-            dependency = min(*unsatisfied, key=_get_min)
+            if num_packages < 2:
+                preference = Preference.NO_CHOICE
+            elif use_latest:
+                preference = Preference.USE_LATEST
+            else:
+                preference = Preference.DEFAULT
+            return is_specific_marker, preference, -num_packages
 
-        locked = self._get_locked(dependency)
-        if locked is None or not dependency.constraint.allows(locked.version):
-            try:
-                packages = self._provider.search_for(dependency)
-            except ValueError as e:
-                self._add_incompatibility(
-                    Incompatibility([Term(dependency, True)], PackageNotFoundCause(e))
-                )
-                return dependency.complete_name
+        dependency = min(unsatisfied, key=_get_min)
 
-            try:
-                version = packages[0]
-            except IndexError:
-                version = None
+        locked = self._provider.get_locked(dependency)
+        if locked is None:
+            packages = self._dependency_cache.search_for(
+                dependency, self._solution.decision_level
+            )
+            package = next(iter(packages), None)
 
-            if version is None:
+            if package is None:
                 # If there are no versions that satisfy the constraint,
                 # add an incompatibility that indicates that.
                 self._add_incompatibility(
                     Incompatibility([Term(dependency, True)], NoVersionsCause())
                 )
 
-                return dependency.complete_name
+                complete_name = dependency.complete_name
+                return complete_name
         else:
-            version = locked
+            package = locked
 
-        version = self._provider.complete_package(version)
+        package = self._provider.complete_package(package)
 
         conflict = False
-        for incompatibility in self._provider.incompatibilities_for(version):
+        for incompatibility in self._provider.incompatibilities_for(package):
             self._add_incompatibility(incompatibility)
 
             # If an incompatibility is already satisfied, then selecting version
@@ -409,27 +523,22 @@ def _get_min(dependency):
             # We'll continue adding its dependencies, then go back to
             # unit propagation which will guide us to choose a better version.
             conflict = conflict or all(
-                [
-                    term.dependency.complete_name == dependency.complete_name
-                    or self._solution.satisfies(term)
-                    for term in incompatibility.terms
-                ]
+                term.dependency.complete_name == dependency.complete_name
+                or self._solution.satisfies(term)
+                for term in incompatibility.terms
             )
 
         if not conflict:
-            self._solution.decide(version)
+            self._solution.decide(package.package)
             self._log(
-                "selecting {} ({})".format(
-                    version.complete_name, version.full_pretty_version
-                )
+                f"selecting {package.package.complete_name}"
+                f" ({package.package.full_pretty_version})"
             )
 
-        return dependency.complete_name
-
-    def _excludes_single_version(self, constraint):  # type: (Any) -> bool
-        return isinstance(VersionRange().difference(constraint), Version)
+        complete_name = dependency.complete_name
+        return complete_name
 
-    def _result(self):  # type: () -> SolverResult
+    def _result(self) -> SolverResult:
         """
         Creates a #SolverResult from the decisions in _solution
         """
@@ -441,8 +550,8 @@ def _result(self):  # type: () -> SolverResult
             self._solution.attempted_solutions,
         )
 
-    def _add_incompatibility(self, incompatibility):  # type: (Incompatibility) -> None
-        self._log("fact: {}".format(incompatibility))
+    def _add_incompatibility(self, incompatibility: Incompatibility) -> None:
+        self._log(f"fact: {incompatibility}")
 
         for term in incompatibility.terms:
             if term.dependency.complete_name not in self._incompatibilities:
@@ -458,18 +567,5 @@ def _add_incompatibility(self, incompatibility):  # type: (Incompatibility) -> N
                 incompatibility
             )
 
-    def _get_locked(self, dependency):  # type: (Dependency) -> Union[Package, None]
-        if dependency.name in self._use_latest:
-            return
-
-        locked = self._locked.get(dependency.name)
-        if not locked:
-            return
-
-        if not dependency.is_same_package_as(locked):
-            return
-
-        return locked
-
-    def _log(self, text):
+    def _log(self, text: str) -> None:
         self._provider.debug(text, self._solution.attempted_solutions)
diff --git a/conda_lock/_vendor/poetry/packages/__init__.py b/conda_lock/_vendor/poetry/packages/__init__.py
index 555a8317..79f4647f 100644
--- a/conda_lock/_vendor/poetry/packages/__init__.py
+++ b/conda_lock/_vendor/poetry/packages/__init__.py
@@ -1,3 +1,8 @@
-from .dependency_package import DependencyPackage
-from .locker import Locker
-from .package_collection import PackageCollection
+from __future__ import annotations
+
+from conda_lock._vendor.poetry.packages.dependency_package import DependencyPackage
+from conda_lock._vendor.poetry.packages.locker import Locker
+from conda_lock._vendor.poetry.packages.package_collection import PackageCollection
+
+
+__all__ = ["DependencyPackage", "Locker", "PackageCollection"]
diff --git a/conda_lock/_vendor/poetry/packages/dependency_package.py b/conda_lock/_vendor/poetry/packages/dependency_package.py
index 9b83627e..ef7eac5e 100644
--- a/conda_lock/_vendor/poetry/packages/dependency_package.py
+++ b/conda_lock/_vendor/poetry/packages/dependency_package.py
@@ -1,51 +1,47 @@
-from typing import List
+from __future__ import annotations
 
-from conda_lock._vendor.poetry.core.packages.dependency import Dependency
-from conda_lock._vendor.poetry.core.packages.package import Package
+from typing import TYPE_CHECKING
 
 
-class DependencyPackage(object):
-    def __init__(self, dependency, package):  # type: (Dependency, Package) -> None
+if TYPE_CHECKING:
+    from conda_lock._vendor.poetry.core.packages.dependency import Dependency
+    from conda_lock._vendor.poetry.core.packages.package import Package
+
+
+class DependencyPackage:
+    def __init__(self, dependency: Dependency, package: Package) -> None:
         self._dependency = dependency
         self._package = package
 
     @property
-    def dependency(self):  # type: () -> Dependency
+    def dependency(self) -> Dependency:
         return self._dependency
 
     @property
-    def package(self):  # type: () -> Package
+    def package(self) -> Package:
         return self._package
 
-    def clone(self):  # type: () -> DependencyPackage
+    def clone(self) -> DependencyPackage:
         return self.__class__(self._dependency, self._package.clone())
 
-    def with_features(self, features):  # type: (List[str]) -> "DependencyPackage"
+    def with_features(self, features: list[str]) -> DependencyPackage:
         return self.__class__(self._dependency, self._package.with_features(features))
 
-    def without_features(self):  # type: () -> "DependencyPackage"
+    def without_features(self) -> DependencyPackage:
         return self.with_features([])
 
-    def __getattr__(self, name):
-        return getattr(self._package, name)
-
-    def __setattr__(self, key, value):
-        if key in {"_dependency", "_package"}:
-            return super(DependencyPackage, self).__setattr__(key, value)
-
-        setattr(self._package, key, value)
-
-    def __str__(self):
+    def __str__(self) -> str:
         return str(self._package)
 
-    def __repr__(self):
+    def __repr__(self) -> str:
         return repr(self._package)
 
-    def __hash__(self):
+    def __hash__(self) -> int:
         return hash(self._package)
 
-    def __eq__(self, other):
+    def __eq__(self, other: object) -> bool:
         if isinstance(other, DependencyPackage):
             other = other.package
 
-        return self._package == other
+        equal: bool = self._package == other
+        return equal
diff --git a/conda_lock/_vendor/poetry/packages/direct_origin.py b/conda_lock/_vendor/poetry/packages/direct_origin.py
new file mode 100644
index 00000000..ca5502d8
--- /dev/null
+++ b/conda_lock/_vendor/poetry/packages/direct_origin.py
@@ -0,0 +1,113 @@
+from __future__ import annotations
+
+import functools
+
+from pathlib import Path
+from typing import TYPE_CHECKING
+
+from conda_lock._vendor.poetry.core.packages.utils.link import Link
+
+from conda_lock._vendor.poetry.inspection.info import PackageInfo
+from conda_lock._vendor.poetry.inspection.info import PackageInfoError
+from conda_lock._vendor.poetry.utils.helpers import download_file
+from conda_lock._vendor.poetry.utils.helpers import get_file_hash
+from conda_lock._vendor.poetry.vcs.git import Git
+
+
+if TYPE_CHECKING:
+    from conda_lock._vendor.poetry.core.packages.package import Package
+
+    from conda_lock._vendor.poetry.utils.cache import ArtifactCache
+
+
+@functools.lru_cache(maxsize=None)
+def _get_package_from_git(
+    url: str,
+    branch: str | None = None,
+    tag: str | None = None,
+    rev: str | None = None,
+    subdirectory: str | None = None,
+    source_root: Path | None = None,
+) -> Package:
+    source = Git.clone(
+        url=url,
+        source_root=source_root,
+        branch=branch,
+        tag=tag,
+        revision=rev,
+        clean=False,
+    )
+    revision = Git.get_revision(source)
+
+    path = Path(source.path)
+    if subdirectory:
+        path = path.joinpath(subdirectory)
+
+    package = DirectOrigin.get_package_from_directory(path)
+    package._source_type = "git"
+    package._source_url = url
+    package._source_reference = rev or tag or branch or "HEAD"
+    package._source_resolved_reference = revision
+    package._source_subdirectory = subdirectory
+
+    return package
+
+
+class DirectOrigin:
+    def __init__(self, artifact_cache: ArtifactCache) -> None:
+        self._artifact_cache = artifact_cache
+
+    @classmethod
+    def get_package_from_file(cls, file_path: Path) -> Package:
+        try:
+            package = PackageInfo.from_path(path=file_path).to_package(
+                root_dir=file_path
+            )
+        except PackageInfoError:
+            raise RuntimeError(
+                f"Unable to determine package info from path: {file_path}"
+            )
+
+        return package
+
+    @classmethod
+    def get_package_from_directory(cls, directory: Path) -> Package:
+        return PackageInfo.from_directory(path=directory).to_package(root_dir=directory)
+
+    def get_package_from_url(self, url: str) -> Package:
+        link = Link(url)
+        artifact = self._artifact_cache.get_cached_archive_for_link(
+            link, strict=True, download_func=download_file
+        )
+
+        package = self.get_package_from_file(artifact)
+        package.files = [
+            {"file": link.filename, "hash": "sha256:" + get_file_hash(artifact)}
+        ]
+
+        package._source_type = "url"
+        package._source_url = url
+
+        return package
+
+    @staticmethod
+    def get_package_from_vcs(
+        vcs: str,
+        url: str,
+        branch: str | None = None,
+        tag: str | None = None,
+        rev: str | None = None,
+        subdirectory: str | None = None,
+        source_root: Path | None = None,
+    ) -> Package:
+        if vcs != "git":
+            raise ValueError(f"Unsupported VCS dependency {vcs}")
+
+        return _get_package_from_git(
+            url=url,
+            branch=branch,
+            tag=tag,
+            rev=rev,
+            subdirectory=subdirectory,
+            source_root=source_root,
+        )
diff --git a/conda_lock/_vendor/poetry/packages/locker.py b/conda_lock/_vendor/poetry/packages/locker.py
index d5074dcb..db86763b 100644
--- a/conda_lock/_vendor/poetry/packages/locker.py
+++ b/conda_lock/_vendor/poetry/packages/locker.py
@@ -1,161 +1,198 @@
+from __future__ import annotations
+
 import json
 import logging
 import os
 import re
 
-from copy import deepcopy
 from hashlib import sha256
-from typing import Dict
-from typing import Iterable
-from typing import Iterator
-from typing import List
-from typing import Optional
-from typing import Sequence
-from typing import Set
-from typing import Tuple
-from typing import Union
-
-from tomlkit import array
-from tomlkit import document
-from tomlkit import inline_table
-from tomlkit import item
-from tomlkit import table
-from tomlkit.exceptions import TOMLKitError
-
-from conda_lock._vendor.poetry.repositories import Repository
-
-from conda_lock._vendor.poetry.core.packages import dependency_from_pep_508
+from pathlib import Path
+from typing import TYPE_CHECKING
+from typing import Any
+from typing import ClassVar
+from typing import cast
+
+from packaging.utils import canonicalize_name
+from conda_lock._vendor.poetry.core.constraints.version import Version
+from conda_lock._vendor.poetry.core.constraints.version import parse_constraint
 from conda_lock._vendor.poetry.core.packages.dependency import Dependency
 from conda_lock._vendor.poetry.core.packages.package import Package
-from conda_lock._vendor.poetry.core.semver import parse_constraint
-from conda_lock._vendor.poetry.core.semver.version import Version
-from conda_lock._vendor.poetry.core.toml.file import TOMLFile
 from conda_lock._vendor.poetry.core.version.markers import parse_marker
 from conda_lock._vendor.poetry.core.version.requirements import InvalidRequirement
-from conda_lock._vendor.poetry.packages import DependencyPackage
-from conda_lock._vendor.poetry.utils._compat import OrderedDict
-from conda_lock._vendor.poetry.utils._compat import Path
-from conda_lock._vendor.poetry.utils.extras import get_extra_package_names
-
-
-logger = logging.getLogger(__name__)
+from tomlkit import array
+from tomlkit import comment
+from tomlkit import document
+from tomlkit import inline_table
+from tomlkit import table
 
+from conda_lock._vendor.poetry.__version__ import __version__
+from conda_lock._vendor.poetry.toml.file import TOMLFile
+from conda_lock._vendor.poetry.utils._compat import tomllib
 
-class Locker(object):
 
-    _VERSION = "1.1"
+if TYPE_CHECKING:
+    from packaging.utils import NormalizedName
+    from conda_lock._vendor.poetry.core.packages.directory_dependency import DirectoryDependency
+    from conda_lock._vendor.poetry.core.packages.file_dependency import FileDependency
+    from conda_lock._vendor.poetry.core.packages.url_dependency import URLDependency
+    from conda_lock._vendor.poetry.core.packages.vcs_dependency import VCSDependency
+    from tomlkit.toml_document import TOMLDocument
 
-    _relevant_keys = ["dependencies", "dev-dependencies", "source", "extras"]
+    from conda_lock._vendor.poetry.repositories.lockfile_repository import LockfileRepository
 
-    def __init__(self, lock, local_config):  # type: (Path, dict) -> None
-        self._lock = TOMLFile(lock)
+logger = logging.getLogger(__name__)
+_GENERATED_IDENTIFIER = "@" + "generated"
+GENERATED_COMMENT = (
+    f"This file is automatically {_GENERATED_IDENTIFIER} by Poetry"
+    f" {__version__} and should not be changed by hand."
+)
+
+
+class Locker:
+    _VERSION = "2.0"
+    _READ_VERSION_RANGE = ">=1,<3"
+
+    _legacy_keys: ClassVar[list[str]] = [
+        "dependencies",
+        "source",
+        "extras",
+        "dev-dependencies",
+    ]
+    _relevant_keys: ClassVar[list[str]] = [*_legacy_keys, "group"]
+
+    def __init__(self, lock: Path, local_config: dict[str, Any]) -> None:
+        self._lock = lock
         self._local_config = local_config
-        self._lock_data = None
+        self._lock_data: dict[str, Any] | None = None
         self._content_hash = self._get_content_hash()
 
     @property
-    def lock(self):  # type: () -> TOMLFile
+    def lock(self) -> Path:
         return self._lock
 
     @property
-    def lock_data(self):
+    def lock_data(self) -> dict[str, Any]:
         if self._lock_data is None:
             self._lock_data = self._get_lock_data()
 
         return self._lock_data
 
-    def is_locked(self):  # type: () -> bool
+    def is_locked(self) -> bool:
         """
         Checks whether the locker has been locked (lockfile found).
         """
-        if not self._lock.exists():
-            return False
-
-        return "package" in self.lock_data
+        return self._lock.exists()
 
-    def is_fresh(self):  # type: () -> bool
+    def is_fresh(self) -> bool:
         """
         Checks whether the lock file is still up to date with the current hash.
         """
-        lock = self._lock.read()
+        with self.lock.open("rb") as f:
+            lock = tomllib.load(f)
         metadata = lock.get("metadata", {})
 
         if "content-hash" in metadata:
-            return self._content_hash == lock["metadata"]["content-hash"]
+            fresh: bool = self._content_hash == metadata["content-hash"]
+            return fresh
 
         return False
 
-    def locked_repository(
-        self, with_dev_reqs=False
-    ):  # type: (bool) -> Repository
+    def set_local_config(self, local_config: dict[str, Any]) -> None:
+        self._local_config = local_config
+        self._content_hash = self._get_content_hash()
+
+    def locked_repository(self) -> LockfileRepository:
         """
         Searches and returns a repository of locked packages.
         """
         from conda_lock._vendor.poetry.factory import Factory
+        from conda_lock._vendor.poetry.repositories.lockfile_repository import LockfileRepository
+
+        repository = LockfileRepository()
 
         if not self.is_locked():
-            return Repository()
+            return repository
 
         lock_data = self.lock_data
-        packages = Repository()
-
-        if with_dev_reqs:
-            locked_packages = lock_data["package"]
-        else:
-            locked_packages = [
-                p for p in lock_data["package"] if p["category"] == "main"
-            ]
+        locked_packages = cast("list[dict[str, Any]]", lock_data["package"])
 
         if not locked_packages:
-            return packages
+            return repository
 
         for info in locked_packages:
             source = info.get("source", {})
             source_type = source.get("type")
             url = source.get("url")
             if source_type in ["directory", "file"]:
-                url = self._lock.path.parent.joinpath(url).resolve().as_posix()
+                url = self.lock.parent.joinpath(url).resolve().as_posix()
 
+            name = info["name"]
             package = Package(
-                info["name"],
-                info["version"],
+                name,
                 info["version"],
                 source_type=source_type,
                 source_url=url,
                 source_reference=source.get("reference"),
                 source_resolved_reference=source.get("resolved_reference"),
+                source_subdirectory=source.get("subdirectory"),
             )
             package.description = info.get("description", "")
-            package.category = info["category"]
             package.optional = info["optional"]
-            if "hashes" in lock_data["metadata"]:
-                # Old lock so we create dummy files from the hashes
-                package.files = [
-                    {"name": h, "hash": h}
-                    for h in lock_data["metadata"]["hashes"][info["name"]]
-                ]
+            metadata = cast("dict[str, Any]", lock_data["metadata"])
+
+            # Storing of package files and hashes has been through a few generations in
+            # the lockfile, we can read them all:
+            #
+            # - latest and preferred is that this is read per package, from
+            #   package.files
+            # - oldest is that hashes were stored in metadata.hashes without filenames
+            # - in between those two, hashes were stored alongside filenames in
+            #   metadata.files
+            package_files = info.get("files")
+            if package_files is not None:
+                package.files = package_files
+            elif "hashes" in metadata:
+                hashes = cast("dict[str, Any]", metadata["hashes"])
+                package.files = [{"name": h, "hash": h} for h in hashes[name]]
+            elif source_type in {"git", "directory", "url"}:
+                package.files = []
             else:
-                package.files = lock_data["metadata"]["files"][info["name"]]
+                files = metadata["files"][name]
+                if source_type == "file":
+                    filename = Path(url).name
+                    package.files = [item for item in files if item["file"] == filename]
+                else:
+                    # Strictly speaking, this is not correct, but we have no chance
+                    # to always determine which are the correct files because the
+                    # lockfile doesn't keep track which files belong to which package.
+                    package.files = files
 
             package.python_versions = info["python-versions"]
+
+            package_extras: dict[NormalizedName, list[Dependency]] = {}
             extras = info.get("extras", {})
             if extras:
                 for name, deps in extras.items():
-                    package.extras[name] = []
+                    name = canonicalize_name(name)
+                    package_extras[name] = []
 
                     for dep in deps:
                         try:
-                            dependency = dependency_from_pep_508(dep)
+                            dependency = Dependency.create_from_pep_508(dep)
                         except InvalidRequirement:
                             # handle lock files with invalid PEP 508
                             m = re.match(r"^(.+?)(?:\[(.+?)])?(?:\s+\((.+)\))?$", dep)
+                            if not m:
+                                raise
                             dep_name = m.group(1)
                             extras = m.group(2) or ""
                             constraint = m.group(3) or "*"
                             dependency = Dependency(
                                 dep_name, constraint, extras=extras.split(",")
                             )
-                        package.extras[name].append(dependency)
+                        package_extras[name].append(dependency)
+
+            package.extras = package_extras
 
             if "marker" in info:
                 package.marker = parse_marker(info["marker"])
@@ -174,10 +211,11 @@ def locked_repository(
                         package.marker = parse_marker(split_dep[1].strip())
 
             for dep_name, constraint in info.get("dependencies", {}).items():
-
-                root_dir = self._lock.path.parent
+                root_dir = self.lock.parent
                 if package.source_type == "directory":
-                    # root dir should be the source of the package relative to the lock path
+                    # root dir should be the source of the package relative to the lock
+                    # path
+                    assert package.source_url is not None
                     root_dir = Path(package.source_url)
 
                 if isinstance(constraint, list):
@@ -195,250 +233,75 @@ def locked_repository(
             if "develop" in info:
                 package.develop = info["develop"]
 
-            packages.add_package(package)
-
-        return packages
-
-    @staticmethod
-    def __get_locked_package(
-        _dependency, packages_by_name
-    ):  # type: (Dependency, Dict[str, List[Package]]) -> Optional[Package]
-        """
-        Internal helper to identify corresponding locked package using dependency
-        version constraints.
-        """
-        for _package in packages_by_name.get(_dependency.name, []):
-            if _dependency.constraint.allows(_package.version):
-                return _package
-        return None
-
-    @classmethod
-    def __walk_dependency_level(
-        cls,
-        dependencies,
-        level,
-        pinned_versions,
-        packages_by_name,
-        project_level_dependencies,
-        nested_dependencies,
-    ):  # type: (List[Dependency], int,  bool, Dict[str, List[Package]], Set[str], Dict[Tuple[str, str], Dependency]) -> Dict[Tuple[str, str], Dependency]
-        if not dependencies:
-            return nested_dependencies
-
-        next_level_dependencies = []
-
-        for requirement in dependencies:
-            key = (requirement.name, requirement.pretty_constraint)
-            locked_package = cls.__get_locked_package(requirement, packages_by_name)
-
-            if locked_package:
-                # create dependency from locked package to retain dependency metadata
-                # if this is not done, we can end-up with incorrect nested dependencies
-                marker = requirement.marker
-                requirement = locked_package.to_dependency()
-                requirement.marker = requirement.marker.intersect(marker)
-
-                key = (requirement.name, requirement.pretty_constraint)
-
-                if pinned_versions:
-                    requirement.set_constraint(
-                        locked_package.to_dependency().constraint
-                    )
-
-                for require in locked_package.requires:
-                    if require.marker.is_empty():
-                        require.marker = requirement.marker
-                    else:
-                        require.marker = require.marker.intersect(requirement.marker)
-
-                    require.marker = require.marker.intersect(locked_package.marker)
-
-                    if key not in nested_dependencies:
-                        next_level_dependencies.append(require)
-
-            if requirement.name in project_level_dependencies and level == 0:
-                # project level dependencies take precedence
-                continue
-
-            if not locked_package:
-                # we make a copy to avoid any side-effects
-                requirement = deepcopy(requirement)
-
-            if key not in nested_dependencies:
-                nested_dependencies[key] = requirement
-            else:
-                nested_dependencies[key].marker = nested_dependencies[
-                    key
-                ].marker.intersect(requirement.marker)
-
-        return cls.__walk_dependency_level(
-            dependencies=next_level_dependencies,
-            level=level + 1,
-            pinned_versions=pinned_versions,
-            packages_by_name=packages_by_name,
-            project_level_dependencies=project_level_dependencies,
-            nested_dependencies=nested_dependencies,
-        )
-
-    @classmethod
-    def get_project_dependencies(
-        cls, project_requires, locked_packages, pinned_versions=False, with_nested=False
-    ):  # type: (List[Dependency], List[Package], bool, bool) -> Iterable[Dependency]
-        # group packages entries by name, this is required because requirement might use different constraints
-        packages_by_name = {}
-        for pkg in locked_packages:
-            if pkg.name not in packages_by_name:
-                packages_by_name[pkg.name] = []
-            packages_by_name[pkg.name].append(pkg)
-
-        project_level_dependencies = set()
-        dependencies = []
-
-        for dependency in project_requires:
-            dependency = deepcopy(dependency)
-            locked_package = cls.__get_locked_package(dependency, packages_by_name)
-            if locked_package:
-                locked_dependency = locked_package.to_dependency()
-                locked_dependency.marker = dependency.marker.intersect(
-                    locked_package.marker
-                )
-
-                if not pinned_versions:
-                    locked_dependency.set_constraint(dependency.constraint)
-
-                dependency = locked_dependency
-
-            project_level_dependencies.add(dependency.name)
-            dependencies.append(dependency)
-
-        if not with_nested:
-            # return only with project level dependencies
-            return dependencies
-
-        nested_dependencies = cls.__walk_dependency_level(
-            dependencies=dependencies,
-            level=0,
-            pinned_versions=pinned_versions,
-            packages_by_name=packages_by_name,
-            project_level_dependencies=project_level_dependencies,
-            nested_dependencies=dict(),
-        )
+            repository.add_package(package)
 
-        # Merge same dependencies using marker union
-        for requirement in dependencies:
-            key = (requirement.name, requirement.pretty_constraint)
-            if key not in nested_dependencies:
-                nested_dependencies[key] = requirement
-            else:
-                nested_dependencies[key].marker = nested_dependencies[key].marker.union(
-                    requirement.marker
-                )
-
-        return sorted(nested_dependencies.values(), key=lambda x: x.name.lower())
-
-    def get_project_dependency_packages(
-        self, project_requires, dev=False, extras=None
-    ):  # type: (List[Dependency], bool, Optional[Union[bool, Sequence[str]]]) -> Iterator[DependencyPackage]
-        repository = self.locked_repository(with_dev_reqs=dev)
-
-        # Build a set of all packages required by our selected extras
-        extra_package_names = (
-            None if (isinstance(extras, bool) and extras is True) else ()
-        )
-
-        if extra_package_names is not None:
-            extra_package_names = set(
-                get_extra_package_names(
-                    repository.packages, self.lock_data.get("extras", {}), extras or (),
-                )
-            )
-
-        # If a package is optional and we haven't opted in to it, do not select
-        selected = []
-        for dependency in project_requires:
-            try:
-                package = repository.find_packages(dependency=dependency)[0]
-            except IndexError:
-                continue
-
-            if extra_package_names is not None and (
-                package.optional and package.name not in extra_package_names
-            ):
-                # a package is locked as optional, but is not activated via extras
-                continue
-
-            selected.append(dependency)
+        return repository
 
-        for dependency in self.get_project_dependencies(
-            project_requires=selected,
-            locked_packages=repository.packages,
-            with_nested=True,
-        ):
-            try:
-                package = repository.find_packages(dependency=dependency)[0]
-            except IndexError:
-                continue
+    def set_lock_data(self, root: Package, packages: list[Package]) -> bool:
+        """Store lock data and eventually persist to the lock file"""
+        lock = self._compute_lock_data(root, packages)
 
-            for extra in dependency.extras:
-                package.requires_extras.append(extra)
+        if self._should_write(lock):
+            self._write_lock_data(lock)
+            return True
 
-            yield DependencyPackage(dependency=dependency, package=package)
+        return False
 
-    def set_lock_data(self, root, packages):  # type: (...) -> bool
-        files = table()
-        packages = self._lock_packages(packages)
+    def _compute_lock_data(
+        self, root: Package, packages: list[Package]
+    ) -> TOMLDocument:
+        package_specs = self._lock_packages(packages)
         # Retrieving hashes
-        for package in packages:
-            if package["name"] not in files:
-                files[package["name"]] = []
+        for package in package_specs:
+            files = array()
 
             for f in package["files"]:
                 file_metadata = inline_table()
                 for k, v in sorted(f.items()):
                     file_metadata[k] = v
 
-                files[package["name"]].append(file_metadata)
+                files.append(file_metadata)
 
-            if files[package["name"]]:
-                files[package["name"]] = item(files[package["name"]]).multiline(True)
-
-            del package["files"]
+            package["files"] = files.multiline(True)
 
         lock = document()
-        lock["package"] = packages
+        lock.add(comment(GENERATED_COMMENT))
+        lock["package"] = package_specs
 
         if root.extras:
             lock["extras"] = {
-                extra: [dep.pretty_name for dep in deps]
+                extra: sorted(dep.pretty_name for dep in deps)
                 for extra, deps in sorted(root.extras.items())
             }
 
-        lock["metadata"] = OrderedDict(
-            [
-                ("lock-version", self._VERSION),
-                ("python-versions", root.python_versions),
-                ("content-hash", self._content_hash),
-                ("files", files),
-            ]
-        )
-
-        if not self.is_locked() or lock != self.lock_data:
-            self._write_lock_data(lock)
-
-            return True
+        lock["metadata"] = {
+            "lock-version": self._VERSION,
+            "python-versions": root.python_versions,
+            "content-hash": self._content_hash,
+        }
 
-        return False
+        return lock
 
-    def _write_lock_data(self, data):
-        self.lock.write(data)
+    def _should_write(self, lock: TOMLDocument) -> bool:
+        # if lock file exists: compare with existing lock data
+        do_write = True
+        if self.is_locked():
+            try:
+                lock_data = self.lock_data
+            except RuntimeError:
+                # incompatible, invalid or no lock file
+                pass
+            else:
+                do_write = lock != lock_data
+        return do_write
 
-        # Checking lock file data consistency
-        if data != self.lock.read():
-            raise RuntimeError("Inconsistent lock file data.")
+    def _write_lock_data(self, data: TOMLDocument) -> None:
+        lockfile = TOMLFile(self.lock)
+        lockfile.write(data)
 
         self._lock_data = None
 
-    def _get_content_hash(self):  # type: () -> str
+    def _get_content_hash(self) -> str:
         """
         Returns the sha256 hash of the sorted content of the pyproject file.
         """
@@ -446,36 +309,44 @@ def _get_content_hash(self):  # type: () -> str
 
         relevant_content = {}
         for key in self._relevant_keys:
-            relevant_content[key] = content.get(key)
+            data = content.get(key)
+
+            if data is None and key not in self._legacy_keys:
+                continue
 
-        content_hash = sha256(
-            json.dumps(relevant_content, sort_keys=True).encode()
-        ).hexdigest()
+            relevant_content[key] = data
 
-        return content_hash
+        return sha256(json.dumps(relevant_content, sort_keys=True).encode()).hexdigest()
 
-    def _get_lock_data(self):  # type: () -> dict
-        if not self._lock.exists():
+    def _get_lock_data(self) -> dict[str, Any]:
+        if not self.lock.exists():
             raise RuntimeError("No lockfile found. Unable to read locked packages")
 
-        try:
-            lock_data = self._lock.read()
-        except TOMLKitError as e:
-            raise RuntimeError("Unable to read the lock file ({}).".format(e))
+        with self.lock.open("rb") as f:
+            try:
+                lock_data = tomllib.load(f)
+            except tomllib.TOMLDecodeError as e:
+                raise RuntimeError(f"Unable to read the lock file ({e}).")
+
+        # if the lockfile doesn't contain a metadata section at all,
+        # it probably needs to be rebuilt completely
+        if "metadata" not in lock_data:
+            raise RuntimeError(
+                "The lock file does not have a metadata entry.\n"
+                "Regenerate the lock file with the `poetry lock` command."
+            )
 
-        lock_version = Version.parse(lock_data["metadata"].get("lock-version", "1.0"))
+        metadata = lock_data["metadata"]
+        lock_version = Version.parse(metadata.get("lock-version", "1.0"))
         current_version = Version.parse(self._VERSION)
-        # We expect the locker to be able to read lock files
-        # from the same semantic versioning range
-        accepted_versions = parse_constraint(
-            "^{}".format(Version(current_version.major, 0))
-        )
+        accepted_versions = parse_constraint(self._READ_VERSION_RANGE)
         lock_version_allowed = accepted_versions.allows(lock_version)
         if lock_version_allowed and current_version < lock_version:
             logger.warning(
-                "The lock file might not be compatible with the current version of Poetry.\n"
-                "Upgrade Poetry to ensure the lock file is read properly or, alternatively, "
-                "regenerate the lock file with the `poetry lock` command."
+                "The lock file might not be compatible with the current version of"
+                " Poetry.\nUpgrade Poetry to ensure the lock file is read properly or,"
+                " alternatively, regenerate the lock file with the `poetry lock`"
+                " command."
             )
         elif not lock_version_allowed:
             raise RuntimeError(
@@ -486,34 +357,54 @@ def _get_lock_data(self):  # type: () -> dict
 
         return lock_data
 
-    def _lock_packages(
-        self, packages
-    ):  # type: (List['poetry.packages.Package']) -> list
+    def _lock_packages(self, packages: list[Package]) -> list[dict[str, Any]]:
         locked = []
 
-        for package in sorted(packages, key=lambda x: x.name):
+        for package in sorted(
+            packages,
+            key=lambda x: (
+                x.name,
+                x.version,
+                x.source_type or "",
+                x.source_url or "",
+                x.source_subdirectory or "",
+                x.source_reference or "",
+                x.source_resolved_reference or "",
+            ),
+        ):
             spec = self._dump_package(package)
 
             locked.append(spec)
 
         return locked
 
-    def _dump_package(self, package):  # type: (Package) -> dict
-        dependencies = OrderedDict()
-        for dependency in sorted(package.requires, key=lambda d: d.name):
-            if dependency.pretty_name not in dependencies:
-                dependencies[dependency.pretty_name] = []
+    def _dump_package(self, package: Package) -> dict[str, Any]:
+        dependencies: dict[str, list[Any]] = {}
+        for dependency in sorted(
+            package.requires,
+            key=lambda d: d.name,
+        ):
+            dependencies.setdefault(dependency.pretty_name, [])
 
             constraint = inline_table()
 
-            if dependency.is_directory() or dependency.is_file():
+            if dependency.is_directory():
+                dependency = cast("DirectoryDependency", dependency)
                 constraint["path"] = dependency.path.as_posix()
 
-                if dependency.is_directory() and dependency.develop:
+                if dependency.develop:
                     constraint["develop"] = True
+
+            elif dependency.is_file():
+                dependency = cast("FileDependency", dependency)
+                constraint["path"] = dependency.path.as_posix()
+
             elif dependency.is_url():
+                dependency = cast("URLDependency", dependency)
                 constraint["url"] = dependency.url
+
             elif dependency.is_vcs():
+                dependency = cast("VCSDependency", dependency)
                 constraint[dependency.vcs] = dependency.source
 
                 if dependency.branch:
@@ -522,6 +413,10 @@ def _dump_package(self, package):  # type: (Package) -> dict
                     constraint["tag"] = dependency.tag
                 elif dependency.rev:
                     constraint["rev"] = dependency.rev
+
+                if dependency.directory:
+                    constraint["subdirectory"] = dependency.directory
+
             else:
                 constraint["version"] = str(dependency.pretty_constraint)
 
@@ -538,26 +433,23 @@ def _dump_package(self, package):  # type: (Package) -> dict
 
         # All the constraints should have the same type,
         # but we want to simplify them if it's possible
-        for dependency, constraints in tuple(dependencies.items()):
+        for dependency_name, constraints in dependencies.items():
             if all(
                 len(constraint) == 1 and "version" in constraint
                 for constraint in constraints
             ):
-                dependencies[dependency] = [
+                dependencies[dependency_name] = [
                     constraint["version"] for constraint in constraints
                 ]
 
-        data = OrderedDict(
-            [
-                ("name", package.pretty_name),
-                ("version", package.pretty_version),
-                ("description", package.description or ""),
-                ("category", package.category),
-                ("optional", package.optional),
-                ("python-versions", package.python_versions),
-                ("files", sorted(package.files, key=lambda x: x["file"])),
-            ]
-        )
+        data: dict[str, Any] = {
+            "name": package.pretty_name,
+            "version": package.pretty_version,
+            "description": package.description or "",
+            "optional": package.optional,
+            "python-versions": package.python_versions,
+            "files": sorted(package.files, key=lambda x: x["file"]),
+        }
 
         if dependencies:
             data["dependencies"] = table()
@@ -570,14 +462,9 @@ def _dump_package(self, package):  # type: (Package) -> dict
                         data["dependencies"][k].append(constraint)
 
         if package.extras:
-            extras = OrderedDict()
+            extras = {}
             for name, deps in sorted(package.extras.items()):
-                # TODO: This should use dep.to_pep_508() once this is fixed
-                # https://github.com/python-poetry/poetry-core/pull/102
-                extras[name] = sorted(
-                    dep.base_pep_508_name if not dep.constraint.is_any() else dep.name
-                    for dep in deps
-                )
+                extras[name] = sorted(dep.base_pep_508_name for dep in deps)
 
             data["extras"] = extras
 
@@ -587,11 +474,12 @@ def _dump_package(self, package):  # type: (Package) -> dict
                 # The lock file should only store paths relative to the root project
                 url = Path(
                     os.path.relpath(
-                        Path(url).as_posix(), self._lock.path.parent.as_posix()
+                        Path(url).resolve(),
+                        Path(self.lock.parent).resolve(),
                     )
                 ).as_posix()
 
-            data["source"] = OrderedDict()
+            data["source"] = {}
 
             if package.source_type:
                 data["source"]["type"] = package.source_type
@@ -604,12 +492,10 @@ def _dump_package(self, package):  # type: (Package) -> dict
             if package.source_resolved_reference:
                 data["source"]["resolved_reference"] = package.source_resolved_reference
 
+            if package.source_subdirectory:
+                data["source"]["subdirectory"] = package.source_subdirectory
+
             if package.source_type in ["directory", "git"]:
                 data["develop"] = package.develop
 
         return data
-
-
-class NullLocker(Locker):
-    def set_lock_data(self, root, packages):  # type: (Package, List[Package]) -> None
-        pass
diff --git a/conda_lock/_vendor/poetry/packages/package_collection.py b/conda_lock/_vendor/poetry/packages/package_collection.py
index e10ea635..fe77136a 100644
--- a/conda_lock/_vendor/poetry/packages/package_collection.py
+++ b/conda_lock/_vendor/poetry/packages/package_collection.py
@@ -1,22 +1,35 @@
-from .dependency_package import DependencyPackage
+from __future__ import annotations
 
+from typing import TYPE_CHECKING
+from typing import List
 
-class PackageCollection(list):
-    def __init__(self, dependency, packages=None):
-        self._dependency = dependency
+from conda_lock._vendor.poetry.packages.dependency_package import DependencyPackage
+
+
+if TYPE_CHECKING:
+    from collections.abc import Iterable
 
-        if packages is None:
-            packages = []
+    from conda_lock._vendor.poetry.core.packages.dependency import Dependency
+    from conda_lock._vendor.poetry.core.packages.package import Package
+
+
+class PackageCollection(List[DependencyPackage]):
+    def __init__(
+        self,
+        dependency: Dependency,
+        packages: Iterable[Package | DependencyPackage] = (),
+    ) -> None:
+        self._dependency = dependency
 
-        super(PackageCollection, self).__init__()
+        super().__init__()
 
         for package in packages:
             self.append(package)
 
-    def append(self, package):
+    def append(self, package: Package | DependencyPackage) -> None:
         if isinstance(package, DependencyPackage):
             package = package.package
 
         package = DependencyPackage(self._dependency, package)
 
-        return super(PackageCollection, self).append(package)
+        return super().append(package)
diff --git a/conda_lock/_vendor/poetry/plugins/__init__.py b/conda_lock/_vendor/poetry/plugins/__init__.py
new file mode 100644
index 00000000..038a620a
--- /dev/null
+++ b/conda_lock/_vendor/poetry/plugins/__init__.py
@@ -0,0 +1,7 @@
+from __future__ import annotations
+
+from conda_lock._vendor.poetry.plugins.application_plugin import ApplicationPlugin
+from conda_lock._vendor.poetry.plugins.plugin import Plugin
+
+
+__all__ = ["ApplicationPlugin", "Plugin"]
diff --git a/conda_lock/_vendor/poetry/plugins/application_plugin.py b/conda_lock/_vendor/poetry/plugins/application_plugin.py
new file mode 100644
index 00000000..6f2b284d
--- /dev/null
+++ b/conda_lock/_vendor/poetry/plugins/application_plugin.py
@@ -0,0 +1,27 @@
+from __future__ import annotations
+
+from typing import TYPE_CHECKING
+
+from conda_lock._vendor.poetry.plugins.base_plugin import BasePlugin
+
+
+if TYPE_CHECKING:
+    from conda_lock._vendor.poetry.console.application import Application
+    from conda_lock._vendor.poetry.console.commands.command import Command
+
+
+class ApplicationPlugin(BasePlugin):
+    """
+    Base class for application plugins.
+    """
+
+    group = "poetry.application.plugin"
+
+    @property
+    def commands(self) -> list[type[Command]]:
+        return []
+
+    def activate(self, application: Application) -> None:
+        for command in self.commands:
+            assert command.name is not None
+            application.command_loader.register_factory(command.name, command)
diff --git a/conda_lock/_vendor/poetry/plugins/base_plugin.py b/conda_lock/_vendor/poetry/plugins/base_plugin.py
new file mode 100644
index 00000000..07146060
--- /dev/null
+++ b/conda_lock/_vendor/poetry/plugins/base_plugin.py
@@ -0,0 +1,21 @@
+from __future__ import annotations
+
+from abc import abstractmethod
+
+
+class BasePlugin:
+    """
+    Base class for all plugin types
+
+    The `activate()` method must be implemented and receives the Poetry instance.
+    """
+
+    PLUGIN_API_VERSION = "1.0.0"
+
+    @property
+    @abstractmethod
+    def group(self) -> str:
+        """
+        Name of entrypoint group the plugin belongs to.
+        """
+        raise NotImplementedError()
diff --git a/conda_lock/_vendor/poetry/plugins/plugin.py b/conda_lock/_vendor/poetry/plugins/plugin.py
new file mode 100644
index 00000000..04b441dc
--- /dev/null
+++ b/conda_lock/_vendor/poetry/plugins/plugin.py
@@ -0,0 +1,24 @@
+from __future__ import annotations
+
+from abc import abstractmethod
+from typing import TYPE_CHECKING
+
+from conda_lock._vendor.poetry.plugins.base_plugin import BasePlugin
+
+
+if TYPE_CHECKING:
+    from conda_lock._vendor.cleo.io.io import IO
+
+    from conda_lock._vendor.poetry.poetry import Poetry
+
+
+class Plugin(BasePlugin):
+    """
+    Generic plugin not related to the console application.
+    """
+
+    group = "poetry.plugin"
+
+    @abstractmethod
+    def activate(self, poetry: Poetry, io: IO) -> None:
+        raise NotImplementedError()
diff --git a/conda_lock/_vendor/poetry/plugins/plugin_manager.py b/conda_lock/_vendor/poetry/plugins/plugin_manager.py
new file mode 100644
index 00000000..568e105b
--- /dev/null
+++ b/conda_lock/_vendor/poetry/plugins/plugin_manager.py
@@ -0,0 +1,83 @@
+from __future__ import annotations
+
+import logging
+
+from typing import TYPE_CHECKING
+
+from conda_lock._vendor.poetry.plugins.application_plugin import ApplicationPlugin
+from conda_lock._vendor.poetry.plugins.plugin import Plugin
+from conda_lock._vendor.poetry.utils._compat import metadata
+
+
+if TYPE_CHECKING:
+    from typing import Any
+
+    from conda_lock._vendor.poetry.utils.env import Env
+
+
+logger = logging.getLogger(__name__)
+
+
+class PluginManager:
+    """
+    This class registers and activates plugins.
+    """
+
+    def __init__(self, group: str, disable_plugins: bool = False) -> None:
+        self._group = group
+        self._disable_plugins = disable_plugins
+        self._plugins: list[Plugin] = []
+
+    def load_plugins(self, env: Env | None = None) -> None:
+        if self._disable_plugins:
+            return
+
+        plugin_entrypoints = self.get_plugin_entry_points(env=env)
+
+        for ep in plugin_entrypoints:
+            self._load_plugin_entry_point(ep)
+
+    @staticmethod
+    def _is_plugin_candidate(ep: metadata.EntryPoint, env: Env | None = None) -> bool:
+        """
+        Helper method to check if given entry point is a valid as a plugin candidate.
+        When an environment is specified, the entry point's associated distribution
+        should be installed, and discoverable in the given environment.
+        """
+        return env is None or (
+            ep.dist is not None
+            and env.site_packages.find_distribution(ep.dist.name) is not None
+        )
+
+    def get_plugin_entry_points(
+        self, env: Env | None = None
+    ) -> list[metadata.EntryPoint]:
+        return [
+            ep
+            for ep in metadata.entry_points(group=self._group)
+            if self._is_plugin_candidate(ep, env)
+        ]
+
+    def add_plugin(self, plugin: Plugin) -> None:
+        if not isinstance(plugin, (Plugin, ApplicationPlugin)):
+            raise ValueError(
+                "The Poetry plugin must be an instance of Plugin or ApplicationPlugin"
+            )
+
+        self._plugins.append(plugin)
+
+    def activate(self, *args: Any, **kwargs: Any) -> None:
+        for plugin in self._plugins:
+            plugin.activate(*args, **kwargs)
+
+    def _load_plugin_entry_point(self, ep: metadata.EntryPoint) -> None:
+        logger.debug("Loading the %s plugin", ep.name)
+
+        plugin = ep.load()  # type: ignore[no-untyped-call]
+
+        if not issubclass(plugin, (Plugin, ApplicationPlugin)):
+            raise ValueError(
+                "The Poetry plugin must be an instance of Plugin or ApplicationPlugin"
+            )
+
+        self.add_plugin(plugin())
diff --git a/conda_lock/_vendor/poetry/poetry.py b/conda_lock/_vendor/poetry/poetry.py
index b18eb86d..f1ff10ba 100644
--- a/conda_lock/_vendor/poetry/poetry.py
+++ b/conda_lock/_vendor/poetry/poetry.py
@@ -1,57 +1,97 @@
-from __future__ import absolute_import
-from __future__ import unicode_literals
+from __future__ import annotations
+
+from typing import TYPE_CHECKING
+from typing import Any
+from typing import cast
 
-from conda_lock._vendor.poetry.core.packages import ProjectPackage
 from conda_lock._vendor.poetry.core.poetry import Poetry as BasePoetry
 
-from .__version__ import __version__
-from .config.config import Config
-from .packages import Locker
-from .repositories.pool import Pool
-from .utils._compat import Path
+from conda_lock._vendor.poetry.__version__ import __version__
+from conda_lock._vendor.poetry.config.source import Source
+from conda_lock._vendor.poetry.pyproject.toml import PyProjectTOML
 
 
-class Poetry(BasePoetry):
+if TYPE_CHECKING:
+    from pathlib import Path
+
+    from conda_lock._vendor.poetry.core.packages.project_package import ProjectPackage
+
+    from conda_lock._vendor.poetry.config.config import Config
+    from conda_lock._vendor.poetry.packages.locker import Locker
+    from conda_lock._vendor.poetry.plugins.plugin_manager import PluginManager
+    from conda_lock._vendor.poetry.repositories.repository_pool import RepositoryPool
+    from conda_lock._vendor.poetry.toml import TOMLFile
+
 
+class Poetry(BasePoetry):
     VERSION = __version__
 
     def __init__(
         self,
-        file,  # type: Path
-        local_config,  # type: dict
-        package,  # type: ProjectPackage
-        locker,  # type: Locker
-        config,  # type: Config
-    ):
-        super(Poetry, self).__init__(file, local_config, package)
+        file: Path,
+        local_config: dict[str, Any],
+        package: ProjectPackage,
+        locker: Locker,
+        config: Config,
+        disable_cache: bool = False,
+    ) -> None:
+        from conda_lock._vendor.poetry.repositories.repository_pool import RepositoryPool
+
+        super().__init__(file, local_config, package, pyproject_type=PyProjectTOML)
 
         self._locker = locker
         self._config = config
-        self._pool = Pool()
+        self._pool = RepositoryPool(config=config)
+        self._plugin_manager: PluginManager | None = None
+        self._disable_cache = disable_cache
+
+    @property
+    def pyproject(self) -> PyProjectTOML:
+        pyproject = super().pyproject
+        return cast("PyProjectTOML", pyproject)
 
     @property
-    def locker(self):  # type: () -> Locker
+    def file(self) -> TOMLFile:
+        return self.pyproject.file
+
+    @property
+    def locker(self) -> Locker:
         return self._locker
 
     @property
-    def pool(self):  # type: () -> Pool
+    def pool(self) -> RepositoryPool:
         return self._pool
 
     @property
-    def config(self):  # type: () -> Config
+    def config(self) -> Config:
         return self._config
 
-    def set_locker(self, locker):  # type: (Locker) -> Poetry
+    @property
+    def disable_cache(self) -> bool:
+        return self._disable_cache
+
+    def set_locker(self, locker: Locker) -> Poetry:
         self._locker = locker
 
         return self
 
-    def set_pool(self, pool):  # type: (Pool) -> Poetry
+    def set_pool(self, pool: RepositoryPool) -> Poetry:
         self._pool = pool
 
         return self
 
-    def set_config(self, config):  # type: (Config) -> Poetry
+    def set_config(self, config: Config) -> Poetry:
         self._config = config
 
         return self
+
+    def set_plugin_manager(self, plugin_manager: PluginManager) -> Poetry:
+        self._plugin_manager = plugin_manager
+
+        return self
+
+    def get_sources(self) -> list[Source]:
+        return [
+            Source(**source)
+            for source in self.pyproject.poetry_config.get("source", [])
+        ]
diff --git a/conda_lock/_vendor/poetry/puzzle/__init__.py b/conda_lock/_vendor/poetry/puzzle/__init__.py
index 70089f30..d28c36b7 100644
--- a/conda_lock/_vendor/poetry/puzzle/__init__.py
+++ b/conda_lock/_vendor/poetry/puzzle/__init__.py
@@ -1 +1,6 @@
-from .solver import Solver
+from __future__ import annotations
+
+from conda_lock._vendor.poetry.puzzle.solver import Solver
+
+
+__all__ = ["Solver"]
diff --git a/conda_lock/_vendor/poetry/puzzle/exceptions.py b/conda_lock/_vendor/poetry/puzzle/exceptions.py
index e2e0b0dc..2f220f53 100644
--- a/conda_lock/_vendor/poetry/puzzle/exceptions.py
+++ b/conda_lock/_vendor/poetry/puzzle/exceptions.py
@@ -1,18 +1,30 @@
+from __future__ import annotations
+
+from typing import TYPE_CHECKING
+
+
+if TYPE_CHECKING:
+    from conda_lock._vendor.poetry.core.packages.dependency import Dependency
+    from conda_lock._vendor.poetry.core.packages.package import Package
+
+    from conda_lock._vendor.poetry.mixology.failure import SolveFailure
+
+
 class SolverProblemError(Exception):
-    def __init__(self, error):
+    def __init__(self, error: SolveFailure) -> None:
         self._error = error
 
-        super(SolverProblemError, self).__init__(str(error))
+        super().__init__(str(error))
 
     @property
-    def error(self):
+    def error(self) -> SolveFailure:
         return self._error
 
 
 class OverrideNeeded(Exception):
-    def __init__(self, *overrides):
+    def __init__(self, *overrides: dict[Package, dict[str, Dependency]]) -> None:
         self._overrides = overrides
 
     @property
-    def overrides(self):
+    def overrides(self) -> tuple[dict[Package, dict[str, Dependency]], ...]:
         return self._overrides
diff --git a/conda_lock/_vendor/poetry/puzzle/provider.py b/conda_lock/_vendor/poetry/puzzle/provider.py
old mode 100755
new mode 100644
index b41de8d6..d71658f7
--- a/conda_lock/_vendor/poetry/puzzle/provider.py
+++ b/conda_lock/_vendor/poetry/puzzle/provider.py
@@ -1,346 +1,413 @@
+from __future__ import annotations
+
+import itertools
 import logging
-import os
 import re
 import time
 
+from collections import defaultdict
 from contextlib import contextmanager
-from tempfile import mkdtemp
-from typing import Any
-from typing import List
-from typing import Optional
-
-from clikit.ui.components import ProgressIndicator
-
-from conda_lock._vendor.poetry.core.packages import Dependency
-from conda_lock._vendor.poetry.core.packages import DirectoryDependency
-from conda_lock._vendor.poetry.core.packages import FileDependency
-from conda_lock._vendor.poetry.core.packages import Package
-from conda_lock._vendor.poetry.core.packages import URLDependency
-from conda_lock._vendor.poetry.core.packages import VCSDependency
+from typing import TYPE_CHECKING
+from typing import ClassVar
+from typing import cast
+
+from conda_lock._vendor.cleo.ui.progress_indicator import ProgressIndicator
+from conda_lock._vendor.poetry.core.constraints.version import EmptyConstraint
+from conda_lock._vendor.poetry.core.constraints.version import Version
+from conda_lock._vendor.poetry.core.constraints.version import VersionRange
 from conda_lock._vendor.poetry.core.packages.utils.utils import get_python_constraint_from_marker
-from conda_lock._vendor.poetry.core.semver.version import Version
-from conda_lock._vendor.poetry.core.vcs.git import Git
-from conda_lock._vendor.poetry.core.version.markers import MarkerUnion
-from conda_lock._vendor.poetry.inspection.info import PackageInfo
-from conda_lock._vendor.poetry.inspection.info import PackageInfoError
+from conda_lock._vendor.poetry.core.version.markers import AnyMarker
+from conda_lock._vendor.poetry.core.version.markers import union as marker_union
+
 from conda_lock._vendor.poetry.mixology.incompatibility import Incompatibility
 from conda_lock._vendor.poetry.mixology.incompatibility_cause import DependencyCause
 from conda_lock._vendor.poetry.mixology.incompatibility_cause import PythonCause
 from conda_lock._vendor.poetry.mixology.term import Term
 from conda_lock._vendor.poetry.packages import DependencyPackage
+from conda_lock._vendor.poetry.packages.direct_origin import DirectOrigin
 from conda_lock._vendor.poetry.packages.package_collection import PackageCollection
 from conda_lock._vendor.poetry.puzzle.exceptions import OverrideNeeded
-from conda_lock._vendor.poetry.repositories import Pool
-from conda_lock._vendor.poetry.utils._compat import OrderedDict
-from conda_lock._vendor.poetry.utils._compat import Path
-from conda_lock._vendor.poetry.utils._compat import urlparse
-from conda_lock._vendor.poetry.utils.env import Env
-from conda_lock._vendor.poetry.utils.helpers import download_file
-from conda_lock._vendor.poetry.utils.helpers import safe_rmtree
-from conda_lock._vendor.poetry.utils.helpers import temporary_directory
+from conda_lock._vendor.poetry.repositories.exceptions import PackageNotFound
+from conda_lock._vendor.poetry.utils.helpers import get_file_hash
+
+
+if TYPE_CHECKING:
+    from collections.abc import Callable
+    from collections.abc import Collection
+    from collections.abc import Iterable
+    from collections.abc import Iterator
+    from pathlib import Path
+
+    from conda_lock._vendor.cleo.io.io import IO
+    from packaging.utils import NormalizedName
+    from conda_lock._vendor.poetry.core.constraints.version import VersionConstraint
+    from conda_lock._vendor.poetry.core.packages.dependency import Dependency
+    from conda_lock._vendor.poetry.core.packages.directory_dependency import DirectoryDependency
+    from conda_lock._vendor.poetry.core.packages.file_dependency import FileDependency
+    from conda_lock._vendor.poetry.core.packages.package import Package
+    from conda_lock._vendor.poetry.core.packages.url_dependency import URLDependency
+    from conda_lock._vendor.poetry.core.packages.vcs_dependency import VCSDependency
+    from conda_lock._vendor.poetry.core.version.markers import BaseMarker
+
+    from conda_lock._vendor.poetry.repositories import RepositoryPool
+    from conda_lock._vendor.poetry.utils.env import Env
 
 
 logger = logging.getLogger(__name__)
 
 
+class IncompatibleConstraintsError(Exception):
+    """
+    Exception when there are duplicate dependencies with incompatible constraints.
+    """
+
+    def __init__(
+        self, package: Package, *dependencies: Dependency, with_sources: bool = False
+    ) -> None:
+        constraints = []
+        for dep in dependencies:
+            constraint = dep.to_pep_508()
+            if dep.is_direct_origin():
+                # add version info because issue might be a version conflict
+                # with a version constraint
+                constraint += f" ({dep.constraint})"
+            if with_sources and dep.source_name:
+                constraint += f" ; source={dep.source_name}"
+            constraints.append(constraint)
+        super().__init__(
+            f"Incompatible constraints in requirements of {package}:\n"
+            + "\n".join(constraints)
+        )
+
+
 class Indicator(ProgressIndicator):
-    def _formatter_elapsed(self):
+    CONTEXT: str | None = None
+
+    @staticmethod
+    @contextmanager
+    def context() -> Iterator[Callable[[str | None], None]]:
+        def _set_context(context: str | None) -> None:
+            Indicator.CONTEXT = context
+
+        yield _set_context
+
+        _set_context(None)
+
+    def _formatter_context(self) -> str:
+        if Indicator.CONTEXT is None:
+            return " "
+        else:
+            return f" {Indicator.CONTEXT} "
+
+    def _formatter_elapsed(self) -> str:
+        assert self._start_time is not None
         elapsed = time.time() - self._start_time
 
-        return "{:.1f}s".format(elapsed)
+        return f"{elapsed:.1f}s"
 
 
 class Provider:
-
-    UNSAFE_PACKAGES = {"setuptools", "distribute", "pip", "wheel"}
+    UNSAFE_PACKAGES: ClassVar[set[str]] = set()
 
     def __init__(
-        self, package, pool, io, env=None
-    ):  # type: (Package, Pool, Any, Optional[Env]) -> None
+        self,
+        package: Package,
+        pool: RepositoryPool,
+        io: IO,
+        *,
+        installed: list[Package] | None = None,
+        locked: list[Package] | None = None,
+    ) -> None:
         self._package = package
         self._pool = pool
+        self._direct_origin = DirectOrigin(self._pool.artifact_cache)
         self._io = io
-        self._env = env
+        self._env: Env | None = None
         self._python_constraint = package.python_constraint
-        self._search_for = {}
-        self._is_debugging = self._io.is_debug() or self._io.is_very_verbose()
-        self._in_progress = False
-        self._overrides = {}
-        self._deferred_cache = {}
+        self._is_debugging: bool = self._io.is_debug() or self._io.is_very_verbose()
+        self._overrides: dict[Package, dict[str, Dependency]] = {}
+        self._deferred_cache: dict[Dependency, Package] = {}
         self._load_deferred = True
+        self._source_root: Path | None = None
+        self._installed_packages = installed if installed is not None else []
+        self._direct_origin_packages: dict[str, Package] = {}
+        self._locked: dict[NormalizedName, list[DependencyPackage]] = defaultdict(list)
+        self._use_latest: Collection[NormalizedName] = []
+
+        self._explicit_sources: dict[str, str] = {}
+        for package in locked or []:
+            self._locked[package.name].append(
+                DependencyPackage(package.to_dependency(), package)
+            )
+        for dependency_packages in self._locked.values():
+            dependency_packages.sort(
+                key=lambda p: p.package.version,
+                reverse=True,
+            )
 
     @property
-    def pool(self):  # type: () -> Pool
+    def pool(self) -> RepositoryPool:
         return self._pool
 
-    def is_debugging(self):
+    @property
+    def use_latest(self) -> Collection[NormalizedName]:
+        return self._use_latest
+
+    def is_debugging(self) -> bool:
         return self._is_debugging
 
-    def set_overrides(self, overrides):
+    def set_overrides(self, overrides: dict[Package, dict[str, Dependency]]) -> None:
         self._overrides = overrides
 
-    def load_deferred(self, load_deferred):  # type: (bool) -> None
+    def load_deferred(self, load_deferred: bool) -> None:
         self._load_deferred = load_deferred
 
     @contextmanager
-    def use_environment(self, env):  # type: (Env) -> Provider
-        original_env = self._env
+    def use_source_root(self, source_root: Path) -> Iterator[Provider]:
+        original_source_root = self._source_root
+        self._source_root = source_root
+
+        try:
+            yield self
+        finally:
+            self._source_root = original_source_root
+
+    @contextmanager
+    def use_environment(self, env: Env) -> Iterator[Provider]:
         original_python_constraint = self._python_constraint
 
         self._env = env
         self._python_constraint = Version.parse(env.marker_env["python_full_version"])
 
-        yield self
+        try:
+            yield self
+        finally:
+            self._env = None
+            self._python_constraint = original_python_constraint
+
+    @contextmanager
+    def use_latest_for(self, names: Collection[NormalizedName]) -> Iterator[Provider]:
+        self._use_latest = names
+
+        try:
+            yield self
+        finally:
+            self._use_latest = []
 
-        self._env = original_env
-        self._python_constraint = original_python_constraint
+    @staticmethod
+    def validate_package_for_dependency(
+        dependency: Dependency, package: Package
+    ) -> None:
+        if dependency.name != package.name:
+            # For now, the dependency's name must match the actual package's name
+            raise RuntimeError(
+                f"The dependency name for {dependency.name} does not match the actual"
+                f" package's name: {package.name}"
+            )
 
-    def search_for(self, dependency):  # type: (Dependency) -> List[Package]
+    def search_for_installed_packages(
+        self,
+        dependency: Dependency,
+    ) -> list[Package]:
         """
-        Search for the specifications that match the given dependency.
+        Search for installed packages, when available, that satisfy the given
+        dependency.
 
-        The specifications in the returned list will be considered in reverse
-        order, so the latest version ought to be last.
+        This is useful when dealing with packages that are under development, not
+        published on package sources and/or only available via system installations.
         """
-        if dependency.is_root:
-            return PackageCollection(dependency, [self._package])
+        if not self._installed_packages:
+            return []
 
-        for constraint in self._search_for.keys():
-            if (
-                constraint.is_same_package_as(dependency)
-                and constraint.constraint.intersect(dependency.constraint)
-                == dependency.constraint
-            ):
-                packages = [
-                    p
-                    for p in self._search_for[constraint]
-                    if dependency.constraint.allows(p.version)
-                ]
-
-                packages.sort(
-                    key=lambda p: (
-                        not p.is_prerelease() and not dependency.allows_prereleases(),
-                        p.version,
-                    ),
-                    reverse=True,
-                )
+        logger.debug(
+            "Falling back to installed packages to discover metadata for %s",
+            dependency.complete_name,
+        )
+        packages = [
+            package
+            for package in self._installed_packages
+            if package.satisfies(dependency, ignore_source_type=True)
+        ]
+        logger.debug(
+            "Found %d compatible packages for %s",
+            len(packages),
+            dependency.complete_name,
+        )
+        return packages
 
-                return PackageCollection(dependency, packages)
+    def search_for_direct_origin_dependency(self, dependency: Dependency) -> Package:
+        package = self._deferred_cache.get(dependency)
+        if package is not None:
+            pass
+
+        elif dependency.is_vcs():
+            dependency = cast("VCSDependency", dependency)
+            package = self._search_for_vcs(dependency)
 
-        if dependency.is_vcs():
-            packages = self.search_for_vcs(dependency)
         elif dependency.is_file():
-            packages = self.search_for_file(dependency)
+            dependency = cast("FileDependency", dependency)
+            package = self._search_for_file(dependency)
+
         elif dependency.is_directory():
-            packages = self.search_for_directory(dependency)
+            dependency = cast("DirectoryDependency", dependency)
+            package = self._search_for_directory(dependency)
+
         elif dependency.is_url():
-            packages = self.search_for_url(dependency)
-        else:
-            packages = self._pool.find_packages(dependency)
+            dependency = cast("URLDependency", dependency)
+            package = self._search_for_url(dependency)
 
-            packages.sort(
-                key=lambda p: (
-                    not p.is_prerelease() and not dependency.allows_prereleases(),
-                    p.version,
-                ),
-                reverse=True,
+        else:
+            raise RuntimeError(
+                f"{dependency}: unknown direct dependency type {dependency.source_type}"
             )
 
-        self._search_for[dependency] = packages
+        if dependency.is_vcs():
+            dependency._source_reference = package.source_reference
+            dependency._source_resolved_reference = package.source_resolved_reference
+            dependency._source_subdirectory = package.source_subdirectory
+
+        dependency._constraint = package.version
+        dependency._pretty_constraint = package.version.text
+
+        self._deferred_cache[dependency] = package
+
+        return package
+
+    def search_for(self, dependency: Dependency) -> list[DependencyPackage]:
+        """
+        Search for the specifications that match the given dependency.
+
+        The specifications in the returned list will be considered in reverse
+        order, so the latest version ought to be last.
+        """
+        if dependency.is_root:
+            return PackageCollection(dependency, [self._package])
+
+        if dependency.is_direct_origin():
+            package = self.search_for_direct_origin_dependency(dependency)
+            self._direct_origin_packages[dependency.name] = package
+            return PackageCollection(dependency, [package])
+
+        # If we've previously found a direct-origin package that meets this dependency,
+        # use it.
+        #
+        # We rely on the VersionSolver resolving direct-origin dependencies first.
+        direct_origin_package = self._direct_origin_packages.get(dependency.name)
+        if direct_origin_package and direct_origin_package.satisfies(dependency):
+            packages = [direct_origin_package]
+            return PackageCollection(dependency, packages)
+
+        packages = self._pool.find_packages(dependency)
+
+        packages.sort(
+            key=lambda p: (
+                not p.yanked,
+                not p.is_prerelease() and not dependency.allows_prereleases(),
+                p.version,
+            ),
+            reverse=True,
+        )
+
+        if not packages:
+            packages = self.search_for_installed_packages(dependency)
 
         return PackageCollection(dependency, packages)
 
-    def search_for_vcs(self, dependency):  # type: (VCSDependency) -> List[Package]
+    def _search_for_vcs(self, dependency: VCSDependency) -> Package:
         """
         Search for the specifications that match the given VCS dependency.
 
         Basically, we clone the repository in a temporary directory
         and get the information we need by checking out the specified reference.
         """
-        if dependency in self._deferred_cache:
-            return [self._deferred_cache[dependency]]
-
-        package = self.get_package_from_vcs(
+        package = self._direct_origin.get_package_from_vcs(
             dependency.vcs,
             dependency.source,
             branch=dependency.branch,
             tag=dependency.tag,
             rev=dependency.rev,
-            name=dependency.name,
-        )
-        package.develop = dependency.develop
-
-        dependency._constraint = package.version
-        dependency._pretty_constraint = package.version.text
-
-        self._deferred_cache[dependency] = package
-
-        return [package]
-
-    @classmethod
-    def get_package_from_vcs(
-        cls, vcs, url, branch=None, tag=None, rev=None, name=None
-    ):  # type: (str, str, Optional[str], Optional[str]) -> Package
-        if vcs != "git":
-            raise ValueError("Unsupported VCS dependency {}".format(vcs))
-
-        tmp_dir = Path(
-            mkdtemp(prefix="pypoetry-git-{}".format(url.split("/")[-1].rstrip(".git")))
+            subdirectory=dependency.source_subdirectory,
+            source_root=self._source_root
+            or (self._env.path.joinpath("src") if self._env else None),
         )
 
-        try:
-            git = Git()
-            git.clone(url, tmp_dir)
-            reference = branch or tag or rev
-            if reference is not None:
-                git.checkout(reference, tmp_dir)
-            else:
-                reference = "HEAD"
+        self.validate_package_for_dependency(dependency=dependency, package=package)
 
-            revision = git.rev_parse(reference, tmp_dir).strip()
-
-            package = cls.get_package_from_directory(tmp_dir, name=name)
-            package._source_type = "git"
-            package._source_url = url
-            package._source_reference = reference
-            package._source_resolved_reference = revision
-        except Exception:
-            raise
-        finally:
-            safe_rmtree(str(tmp_dir))
+        package.develop = dependency.develop
 
         return package
 
-    def search_for_file(self, dependency):  # type: (FileDependency) -> List[Package]
-        if dependency in self._deferred_cache:
-            dependency, _package = self._deferred_cache[dependency]
-
-            package = _package.clone()
-        else:
-            package = self.get_package_from_file(dependency.full_path)
-
-            dependency._constraint = package.version
-            dependency._pretty_constraint = package.version.text
+    def _search_for_file(self, dependency: FileDependency) -> Package:
+        dependency.validate(raise_error=True)
+        package = self._direct_origin.get_package_from_file(dependency.full_path)
 
-            self._deferred_cache[dependency] = (dependency, package)
-
-        if dependency.name != package.name:
-            # For now, the dependency's name must match the actual package's name
-            raise RuntimeError(
-                "The dependency name for {} does not match the actual package's name: {}".format(
-                    dependency.name, package.name
-                )
-            )
+        self.validate_package_for_dependency(dependency=dependency, package=package)
 
         if dependency.base is not None:
             package.root_dir = dependency.base
 
         package.files = [
-            {"file": dependency.path.name, "hash": "sha256:" + dependency.hash()}
+            {
+                "file": dependency.path.name,
+                "hash": "sha256:" + get_file_hash(dependency.full_path),
+            }
         ]
 
-        return [package]
-
-    @classmethod
-    def get_package_from_file(cls, file_path):  # type: (Path) -> Package
-        try:
-            package = PackageInfo.from_path(path=file_path).to_package(
-                root_dir=file_path
-            )
-        except PackageInfoError:
-            raise RuntimeError(
-                "Unable to determine package info from path: {}".format(file_path)
-            )
-
         return package
 
-    def search_for_directory(
-        self, dependency
-    ):  # type: (DirectoryDependency) -> List[Package]
-        if dependency in self._deferred_cache:
-            dependency, _package = self._deferred_cache[dependency]
-
-            package = _package.clone()
-        else:
-            package = self.get_package_from_directory(
-                dependency.full_path, name=dependency.name
-            )
-
-            dependency._constraint = package.version
-            dependency._pretty_constraint = package.version.text
+    def _search_for_directory(self, dependency: DirectoryDependency) -> Package:
+        dependency.validate(raise_error=True)
+        package = self._direct_origin.get_package_from_directory(dependency.full_path)
 
-            self._deferred_cache[dependency] = (dependency, package)
+        self.validate_package_for_dependency(dependency=dependency, package=package)
 
         package.develop = dependency.develop
 
         if dependency.base is not None:
             package.root_dir = dependency.base
 
-        return [package]
-
-    @classmethod
-    def get_package_from_directory(
-        cls, directory, name=None
-    ):  # type: (Path, Optional[str]) -> Package
-        package = PackageInfo.from_directory(path=directory).to_package(
-            root_dir=directory
-        )
-
-        if name and name != package.name:
-            # For now, the dependency's name must match the actual package's name
-            raise RuntimeError(
-                "The dependency name for {} does not match the actual package's name: {}".format(
-                    name, package.name
-                )
-            )
-
         return package
 
-    def search_for_url(self, dependency):  # type: (URLDependency) -> List[Package]
-        if dependency in self._deferred_cache:
-            return [self._deferred_cache[dependency]]
-
-        package = self.get_package_from_url(dependency.url)
+    def _search_for_url(self, dependency: URLDependency) -> Package:
+        package = self._direct_origin.get_package_from_url(dependency.url)
 
-        if dependency.name != package.name:
-            # For now, the dependency's name must match the actual package's name
-            raise RuntimeError(
-                "The dependency name for {} does not match the actual package's name: {}".format(
-                    dependency.name, package.name
-                )
-            )
+        self.validate_package_for_dependency(dependency=dependency, package=package)
 
         for extra in dependency.extras:
             if extra in package.extras:
                 for dep in package.extras[extra]:
                     dep.activate()
 
-                package.requires += package.extras[extra]
-
-        dependency._constraint = package.version
-        dependency._pretty_constraint = package.version.text
-
-        self._deferred_cache[dependency] = package
+                for extra_dep in package.extras[extra]:
+                    package.add_dependency(extra_dep)
 
-        return [package]
+        return package
 
-    @classmethod
-    def get_package_from_url(cls, url):  # type: (str) -> Package
-        with temporary_directory() as temp_dir:
-            temp_dir = Path(temp_dir)
-            file_name = os.path.basename(urlparse.urlparse(url).path)
-            download_file(url, str(temp_dir / file_name))
+    def _get_dependencies_with_overrides(
+        self, dependencies: list[Dependency], package: Package
+    ) -> list[Dependency]:
+        overrides = self._overrides.get(package, {})
+        _dependencies = []
+        overridden = []
+        for dep in dependencies:
+            if dep.name in overrides:
+                if dep.name in overridden:
+                    continue
 
-            package = cls.get_package_from_file(temp_dir / file_name)
+                # empty constraint is used in overrides to mark that the package has
+                # already been handled and is not required for the attached markers
+                if not overrides[dep.name].constraint.is_empty():
+                    _dependencies.append(overrides[dep.name])
+                overridden.append(dep.name)
 
-        package._source_type = "url"
-        package._source_url = url
+                continue
 
-        return package
+            _dependencies.append(dep)
+        return _dependencies
 
     def incompatibilities_for(
-        self, package
-    ):  # type: (DependencyPackage) -> List[Incompatibility]
+        self, dependency_package: DependencyPackage
+    ) -> list[Incompatibility]:
         """
         Returns incompatibilities that encapsulate a given package's dependencies,
         or that it can't be safely selected.
@@ -350,6 +417,7 @@ def incompatibilities_for(
         won't return incompatibilities that have already been returned by a
         previous call to _incompatibilities_for().
         """
+        package = dependency_package.package
         if package.is_root():
             dependencies = package.all_requires
         else:
@@ -357,7 +425,7 @@ def incompatibilities_for(
 
             if not package.python_constraint.allows_all(self._python_constraint):
                 transitive_python_constraint = get_python_constraint_from_marker(
-                    package.dependency.transitive_marker
+                    dependency_package.dependency.transitive_marker
                 )
                 intersection = package.python_constraint.intersect(
                     transitive_python_constraint
@@ -370,7 +438,7 @@ def incompatibilities_for(
                 if (
                     transitive_python_constraint.is_any()
                     or self._python_constraint.intersect(
-                        package.dependency.python_constraint
+                        dependency_package.dependency.python_constraint
                     ).is_empty()
                     or intersection.is_empty()
                     or not difference.is_empty()
@@ -391,21 +459,7 @@ def incompatibilities_for(
             and self._python_constraint.allows_any(dep.python_constraint)
             and (not self._env or dep.marker.validate(self._env.marker_env))
         ]
-
-        overrides = self._overrides.get(package, {})
-        dependencies = []
-        overridden = []
-        for dep in _dependencies:
-            if dep.name in overrides:
-                if dep.name in overridden:
-                    continue
-
-                dependencies.append(overrides[dep.name])
-                overridden.append(dep.name)
-
-                continue
-
-            dependencies.append(dep)
+        dependencies = self._get_dependencies_with_overrides(_dependencies, package)
 
         return [
             Incompatibility(
@@ -416,42 +470,41 @@ def incompatibilities_for(
         ]
 
     def complete_package(
-        self, package
-    ):  # type: (DependencyPackage) -> DependencyPackage
+        self, dependency_package: DependencyPackage
+    ) -> DependencyPackage:
+        package = dependency_package.package
+        dependency = dependency_package.dependency
 
         if package.is_root():
-            package = package.clone()
+            dependency_package = dependency_package.clone()
+            package = dependency_package.package
+            dependency = dependency_package.dependency
             requires = package.all_requires
-        elif not package.is_root() and package.source_type not in {
-            "directory",
-            "file",
-            "url",
-            "git",
-        }:
-            package = DependencyPackage(
-                package.dependency,
-                self._pool.package(
-                    package.name,
-                    package.version.text,
-                    extras=list(package.dependency.extras),
-                    repository=package.dependency.source_name,
-                ),
-            )
+        elif package.is_direct_origin():
             requires = package.requires
         else:
-            requires = package.requires
+            try:
+                dependency_package = DependencyPackage(
+                    dependency,
+                    self._pool.package(
+                        package.pretty_name,
+                        package.version,
+                        extras=list(dependency.extras),
+                        repository_name=dependency.source_name,
+                    ),
+                )
+            except PackageNotFound as e:
+                try:
+                    dependency_package = next(
+                        DependencyPackage(dependency, pkg)
+                        for pkg in self.search_for_installed_packages(dependency)
+                    )
+                except StopIteration:
+                    raise e from e
 
-        if self._load_deferred:
-            # Retrieving constraints for deferred dependencies
-            for r in requires:
-                if r.is_directory():
-                    self.search_for_directory(r)
-                elif r.is_file():
-                    self.search_for_file(r)
-                elif r.is_vcs():
-                    self.search_for_vcs(r)
-                elif r.is_url():
-                    self.search_for_url(r)
+            package = dependency_package.package
+            dependency = dependency_package.dependency
+            requires = package.requires
 
         optional_dependencies = []
         _dependencies = []
@@ -459,15 +512,27 @@ def complete_package(
         # If some extras/features were required, we need to
         # add a special dependency representing the base package
         # to the current package
-        if package.dependency.extras:
-            for extra in package.dependency.extras:
+        if dependency.extras:
+            for extra in dependency.extras:
                 if extra not in package.extras:
                     continue
 
                 optional_dependencies += [d.name for d in package.extras[extra]]
 
-            package = package.with_features(list(package.dependency.extras))
-            _dependencies.append(package.without_features().to_dependency())
+            dependency_package = dependency_package.with_features(
+                list(dependency.extras)
+            )
+            package = dependency_package.package
+            dependency = dependency_package.dependency
+            new_dependency = package.without_features().to_dependency()
+
+            # When adding dependency foo[extra] -> foo, preserve foo's source, if it's
+            # specified. This prevents us from trying to get foo from PyPI
+            # when user explicitly set repo for foo[extra].
+            if not new_dependency.source_name and dependency.source_name:
+                new_dependency.source_name = dependency.source_name
+
+            _dependencies.append(new_dependency)
 
         for dep in requires:
             if not self._python_constraint.allows_any(dep.python_constraint):
@@ -479,29 +544,30 @@ def complete_package(
             if self._env and not dep.marker.validate(self._env.marker_env):
                 continue
 
-            if not package.is_root():
-                if (dep.is_optional() and dep.name not in optional_dependencies) or (
+            if not package.is_root() and (
+                (dep.is_optional() and dep.name not in optional_dependencies)
+                or (
                     dep.in_extras
-                    and not set(dep.in_extras).intersection(package.dependency.extras)
-                ):
-                    continue
+                    and not set(dep.in_extras).intersection(dependency.extras)
+                )
+            ):
+                continue
 
             _dependencies.append(dep)
 
-        overrides = self._overrides.get(package, {})
-        dependencies = []
-        overridden = []
-        for dep in _dependencies:
-            if dep.name in overrides:
-                if dep.name in overridden:
-                    continue
-
-                dependencies.append(overrides[dep.name])
-                overridden.append(dep.name)
-
-                continue
+        if self._load_deferred:
+            # Retrieving constraints for deferred dependencies
+            for dep in _dependencies:
+                if dep.is_direct_origin():
+                    locked = self.get_locked(dep)
+                    # If lock file contains exactly the same URL and reference
+                    # (commit hash) of dependency as is requested,
+                    # do not analyze it again: nothing could have changed.
+                    if locked is not None and locked.package.is_same_package_as(dep):
+                        continue
+                    self.search_for_direct_origin_dependency(dep)
 
-            dependencies.append(dep)
+        dependencies = self._get_dependencies_with_overrides(_dependencies, package)
 
         # Searching for duplicate dependencies
         #
@@ -509,24 +575,21 @@ def complete_package(
         # the requirements will be merged.
         #
         # For instance:
-        #   - enum34; python_version=="2.7"
-        #   - enum34; python_version=="3.3"
+        #   • enum34; python_version=="2.7"
+        #   • enum34; python_version=="3.3"
         #
         # will become:
-        #   - enum34; python_version=="2.7" or python_version=="3.3"
+        #   • enum34; python_version=="2.7" or python_version=="3.3"
         #
         # If the duplicate dependencies have different constraints
         # we have to split the dependency graph.
         #
         # An example of this is:
-        #   - pypiwin32 (220); sys_platform == "win32" and python_version >= "3.6"
-        #   - pypiwin32 (219); sys_platform == "win32" and python_version < "3.6"
-        duplicates = OrderedDict()
+        #   • pypiwin32 (220); sys_platform == "win32" and python_version >= "3.6"
+        #   • pypiwin32 (219); sys_platform == "win32" and python_version < "3.6"
+        duplicates: dict[str, list[Dependency]] = defaultdict(list)
         for dep in dependencies:
-            if dep.name not in duplicates:
-                duplicates[dep.name] = []
-
-            duplicates[dep.name].append(dep)
+            duplicates[dep.complete_name].append(dep)
 
         dependencies = []
         for dep_name, deps in duplicates.items():
@@ -534,181 +597,139 @@ def complete_package(
                 dependencies.append(deps[0])
                 continue
 
-            self.debug("Duplicate dependencies for {}".format(dep_name))
+            self.debug(f"Duplicate dependencies for {dep_name}")
 
-            # Regrouping by constraint
-            by_constraint = OrderedDict()
-            for dep in deps:
-                if dep.constraint not in by_constraint:
-                    by_constraint[dep.constraint] = []
-
-                by_constraint[dep.constraint].append(dep)
-
-            # We merge by constraint
-            for constraint, _deps in by_constraint.items():
-                new_markers = []
-                for dep in _deps:
-                    marker = dep.marker.without_extras()
-                    if marker.is_any():
-                        # No marker or only extras
-                        continue
-
-                    new_markers.append(marker)
-
-                if not new_markers:
-                    continue
-
-                dep = _deps[0]
-                dep.marker = dep.marker.union(MarkerUnion(*new_markers))
-                by_constraint[constraint] = [dep]
-
-                continue
-
-            if len(by_constraint) == 1:
-                self.debug(
-                    "Merging requirements for {}".format(str(deps[0]))
-                )
-                dependencies.append(list(by_constraint.values())[0][0])
-                continue
-
-            # We leave dependencies as-is if they have the same
-            # python/platform constraints.
-            # That way the resolver will pickup the conflict
-            # and display a proper error.
-            _deps = [value[0] for value in by_constraint.values()]
-            seen = set()
-            for _dep in _deps:
-                pep_508_dep = _dep.to_pep_508(False)
-                if ";" not in pep_508_dep:
-                    _requirements = ""
-                else:
-                    _requirements = pep_508_dep.split(";")[1].strip()
-
-                if _requirements not in seen:
-                    seen.add(_requirements)
-
-            if len(_deps) != len(seen):
-                for _dep in _deps:
-                    dependencies.append(_dep)
+            # For dependency resolution, markers of duplicate dependencies must be
+            # mutually exclusive.
+            active_extras = None if package.is_root() else dependency.extras
+            deps = self._resolve_overlapping_markers(package, deps, active_extras)
 
+            if len(deps) == 1:
+                self.debug(f"Merging requirements for {dep_name}")
+                dependencies.append(deps[0])
                 continue
 
             # At this point, we raise an exception that will
             # tell the solver to make new resolutions with specific overrides.
             #
             # For instance, if the foo (1.2.3) package has the following dependencies:
-            #   - bar (>=2.0) ; python_version >= "3.6"
-            #   - bar (<2.0) ; python_version < "3.6"
+            #   • bar (>=2.0) ; python_version >= "3.6"
+            #   • bar (<2.0) ; python_version < "3.6"
             #
             # then the solver will need to make two new resolutions
             # with the following overrides:
-            #   - {=2.0)>}
-            #   - {}
-            markers = []
-            for constraint, _deps in by_constraint.items():
-                markers.append(_deps[0].marker)
+            #   • {=2.0)>}
+            #   • {}
+
+            def fmt_warning(d: Dependency) -> str:
+                dependency_marker = d.marker if not d.marker.is_any() else "*"
+                return (
+                    f"{d.name} ({d.pretty_constraint})"
+                    f" with markers {dependency_marker}"
+                )
 
-            _deps = [_dep[0] for _dep in by_constraint.values()]
+            warnings = ", ".join(fmt_warning(d) for d in deps[:-1])
+            warnings += f" and {fmt_warning(deps[-1])}"
             self.debug(
-                "Different requirements found for {}.".format(
-                    ", ".join(
-                        "{} ({}) with markers {}".format(
-                            d.name,
-                            d.pretty_constraint,
-                            d.marker if not d.marker.is_any() else "*",
-                        )
-                        for d in _deps[:-1]
-                    )
-                    + " and "
-                    + "{} ({}) with markers {}".format(
-                        _deps[-1].name,
-                        _deps[-1].pretty_constraint,
-                        _deps[-1].marker if not _deps[-1].marker.is_any() else "*",
-                    )
-                )
+                f"Different requirements found for {warnings}."
             )
 
-            # We need to check if one of the duplicate dependencies
-            # has no markers. If there is one, we need to change its
-            # environment markers to the inverse of the union of the
-            # other dependencies markers.
-            # For instance, if we have the following dependencies:
-            #   - ipython
-            #   - ipython (1.2.4) ; implementation_name == "pypy"
-            #
-            # the marker for `ipython` will become `implementation_name != "pypy"`.
-            any_markers_dependencies = [d for d in _deps if d.marker.is_any()]
-            other_markers_dependencies = [d for d in _deps if not d.marker.is_any()]
-
-            if any_markers_dependencies:
-                marker = other_markers_dependencies[0].marker
-                for other_dep in other_markers_dependencies[1:]:
-                    marker = marker.union(other_dep.marker)
-
-                for i, d in enumerate(_deps):
-                    if d.marker.is_any():
-                        _deps[i].marker = marker.invert()
-
             overrides = []
-            for _dep in _deps:
-                current_overrides = self._overrides.copy()
-                package_overrides = current_overrides.get(package, {}).copy()
-                package_overrides.update({_dep.name: _dep})
-                current_overrides.update({package: package_overrides})
-                overrides.append(current_overrides)
+            overrides_marker_intersection: BaseMarker = AnyMarker()
+            for dep_overrides in self._overrides.values():
+                for dep in dep_overrides.values():
+                    overrides_marker_intersection = (
+                        overrides_marker_intersection.intersect(dep.marker)
+                    )
+            for dep in deps:
+                if not overrides_marker_intersection.intersect(dep.marker).is_empty():
+                    current_overrides = self._overrides.copy()
+                    package_overrides = current_overrides.get(package, {}).copy()
+                    package_overrides.update({dep.name: dep})
+                    current_overrides.update({package: package_overrides})
+                    overrides.append(current_overrides)
 
-            raise OverrideNeeded(*overrides)
+            if overrides:
+                raise OverrideNeeded(*overrides)
 
         # Modifying dependencies as needed
         clean_dependencies = []
         for dep in dependencies:
-            if not package.dependency.transitive_marker.without_extras().is_any():
-                marker_intersection = package.dependency.transitive_marker.without_extras().intersect(
-                    dep.marker.without_extras()
+            if not dependency.transitive_marker.without_extras().is_any():
+                transitive_marker_intersection = (
+                    dependency.transitive_marker.without_extras().intersect(
+                        dep.marker.without_extras()
+                    )
                 )
-                if marker_intersection.is_empty():
+                if transitive_marker_intersection.is_empty():
                     # The dependency is not needed, since the markers specified
                     # for the current package selection are not compatible with
                     # the markers for the current dependency, so we skip it
                     continue
 
-                dep.transitive_marker = marker_intersection
+                dep.transitive_marker = transitive_marker_intersection
 
-            if not package.dependency.python_constraint.is_any():
+            if not dependency.python_constraint.is_any():
                 python_constraint_intersection = dep.python_constraint.intersect(
-                    package.dependency.python_constraint
+                    dependency.python_constraint
                 )
                 if python_constraint_intersection.is_empty():
                     # This dependency is not needed under current python constraint.
                     continue
-                dep.transitive_python_versions = str(python_constraint_intersection)
 
             clean_dependencies.append(dep)
 
-        package.requires = clean_dependencies
-
-        return package
-
-    def debug(self, message, depth=0):
+        package = package.with_dependency_groups([], only=True)
+        dependency_package = DependencyPackage(dependency, package)
+
+        for dep in clean_dependencies:
+            package.add_dependency(dep)
+
+        if self._locked and package.is_root():
+            # At this point all duplicates have been eliminated via overrides
+            # so that explicit sources are unambiguous.
+            # Clear _explicit_sources because it might be filled
+            # from a previous override.
+            self._explicit_sources.clear()
+            for dep in clean_dependencies:
+                if dep.source_name:
+                    self._explicit_sources[dep.name] = dep.source_name
+
+        return dependency_package
+
+    def get_locked(self, dependency: Dependency) -> DependencyPackage | None:
+        if dependency.name in self._use_latest:
+            return None
+
+        locked = self._locked.get(dependency.name, [])
+        for dependency_package in locked:
+            package = dependency_package.package
+            if package.satisfies(dependency):
+                if explicit_source := self._explicit_sources.get(dependency.name):
+                    dependency.source_name = explicit_source
+                return DependencyPackage(dependency, package)
+        return None
+
+    def debug(self, message: str, depth: int = 0) -> None:
         if not (self._io.is_very_verbose() or self._io.is_debug()):
             return
 
         if message.startswith("fact:"):
             if "depends on" in message:
                 m = re.match(r"fact: (.+?) depends on (.+?) \((.+?)\)", message)
+                if m is None:
+                    raise ValueError(f"Unable to parse fact: {message}")
                 m2 = re.match(r"(.+?) \((.+?)\)", m.group(1))
                 if m2:
                     name = m2.group(1)
-                    version = " ({})".format(m2.group(2))
+                    version = f" ({m2.group(2)})"
                 else:
                     name = m.group(1)
                     version = ""
 
                 message = (
-                    "fact: {}{} "
-                    "depends on {} ({})".format(
-                        name, version, m.group(2), m.group(3)
-                    )
+                    f"fact: {name}{version} "
+                    f"depends on {m.group(2)} ({m.group(3)})"
                 )
             elif " is " in message:
                 message = re.sub(
@@ -720,7 +741,7 @@ def debug(self, message, depth=0):
                 message = re.sub(
                     r"(?<=: )(.+?) \((.+?)\)", "\\1 (\\2)", message
                 )
-                message = "fact: {}".format(message.split("fact: ")[1])
+                message = f"fact: {message.split('fact: ')[1]}"
         elif message.startswith("selecting "):
             message = re.sub(
                 r"selecting (.+?) \((.+?)\)",
@@ -730,12 +751,13 @@ def debug(self, message, depth=0):
         elif message.startswith("derived:"):
             m = re.match(r"derived: (.+?) \((.+?)\)$", message)
             if m:
-                message = "derived: {} ({})".format(
-                    m.group(1), m.group(2)
+                message = (
+                    f"derived: {m.group(1)}"
+                    f" ({m.group(2)})"
                 )
             else:
-                message = "derived: {}".format(
-                    message.split("derived: ")[1]
+                message = (
+                    f"derived: {message.split('derived: ')[1]}"
                 )
         elif message.startswith("conflict:"):
             m = re.match(r"conflict: (.+?) depends on (.+?) \((.+?)\)", message)
@@ -743,20 +765,19 @@ def debug(self, message, depth=0):
                 m2 = re.match(r"(.+?) \((.+?)\)", m.group(1))
                 if m2:
                     name = m2.group(1)
-                    version = " ({})".format(m2.group(2))
+                    version = f" ({m2.group(2)})"
                 else:
                     name = m.group(1)
                     version = ""
 
                 message = (
-                    "conflict: {}{} "
-                    "depends on {} ({})".format(
-                        name, version, m.group(2), m.group(3)
-                    )
+                    f"conflict: {name}{version} "
+                    f"depends on {m.group(2)} ({m.group(3)})"
                 )
             else:
-                message = "conflict: {}".format(
-                    message.split("conflict: ")[1]
+                message = (
+                    "conflict:"
+                    f" {message.split('conflict: ')[1]}"
                 )
 
         message = message.replace("! ", "! ")
@@ -766,7 +787,7 @@ def debug(self, message, depth=0):
             debug_info = (
                 "\n".join(
                     [
-                        "{}: {}".format(str(depth).rjust(4), s)
+                        f"{str(depth).rjust(4)}: {s}"
                         for s in debug_info.split("\n")
                     ]
                 )
@@ -775,18 +796,160 @@ def debug(self, message, depth=0):
 
             self._io.write(debug_info)
 
-    @contextmanager
-    def progress(self):
-        if not self._io.output.supports_ansi() or self.is_debugging():
-            self._io.write_line("Resolving dependencies...")
-            yield
-        else:
-            indicator = Indicator(self._io, "{message} ({elapsed:2s})")
+    def _group_by_source(
+        self, dependencies: Iterable[Dependency]
+    ) -> list[list[Dependency]]:
+        """
+        Takes a list of dependencies and returns a list of groups of dependencies,
+        each group containing all dependencies from the same source.
+        """
+        groups: list[list[Dependency]] = []
+        for dep in dependencies:
+            for group in groups:
+                if (
+                    dep.is_same_source_as(group[0])
+                    and dep.source_name == group[0].source_name
+                ):
+                    group.append(dep)
+                    break
+            else:
+                groups.append([dep])
+        return groups
 
-            with indicator.auto(
-                "Resolving dependencies...",
-                "Resolving dependencies...",
-            ):
-                yield
+    def _merge_dependencies_by_constraint(
+        self, dependencies: Iterable[Dependency]
+    ) -> list[Dependency]:
+        """
+        Merge dependencies with the same constraint
+        by building a union of their markers.
+
+        For instance, if we have:
+           - foo (>=2.0) ; python_version >= "3.6" and python_version < "3.7"
+           - foo (>=2.0) ; python_version >= "3.7"
+        we can avoid two overrides by merging them to:
+           - foo (>=2.0) ; python_version >= "3.6"
+        """
+        dep_groups = self._group_by_source(dependencies)
+        merged_dependencies = []
+        for group in dep_groups:
+            by_constraint: dict[VersionConstraint, list[Dependency]] = defaultdict(list)
+            for dep in group:
+                by_constraint[dep.constraint].append(dep)
+            for deps in by_constraint.values():
+                dep = deps[0]
+                if len(deps) > 1:
+                    new_markers = (dep.marker for dep in deps)
+                    dep.marker = marker_union(*new_markers)
+                merged_dependencies.append(dep)
+
+        return merged_dependencies
+
+    def _is_relevant_marker(
+        self, marker: BaseMarker, active_extras: Collection[NormalizedName] | None
+    ) -> bool:
+        """
+        A marker is relevant if
+        - it is not empty
+        - allowed by the project's python constraint
+        - allowed by active extras of the dependency (not relevant for root package)
+        - allowed by the environment (only during installation)
+        """
+        return (
+            not marker.is_empty()
+            and self._python_constraint.allows_any(
+                get_python_constraint_from_marker(marker)
+            )
+            and (active_extras is None or marker.validate({"extra": active_extras}))
+            and (not self._env or marker.validate(self._env.marker_env))
+        )
 
-        self._in_progress = False
+    def _resolve_overlapping_markers(
+        self,
+        package: Package,
+        dependencies: list[Dependency],
+        active_extras: Collection[NormalizedName] | None,
+    ) -> list[Dependency]:
+        """
+        Convert duplicate dependencies with potentially overlapping markers
+        into duplicate dependencies with mutually exclusive markers.
+
+        Therefore, the intersections of all combinations of markers and inverted markers
+        have to be calculated. If such an intersection is relevant (not empty, etc.),
+        the intersection of all constraints, whose markers were not inverted is built
+        and a new dependency with the calculated version constraint and marker is added.
+        (The marker of such a dependency does not overlap with the marker
+        of any other new dependency.)
+        """
+        # In order to reduce the number of intersections,
+        # we merge duplicate dependencies by constraint.
+        dependencies = self._merge_dependencies_by_constraint(dependencies)
+
+        new_dependencies = []
+        for uses in itertools.product([True, False], repeat=len(dependencies)):
+            # intersection of markers
+            # For performance optimization, we don't just intersect all markers at once,
+            # but intersect them one after the other to get empty markers early.
+            # Further, we intersect the inverted markers at last because
+            # they are more likely to overlap than the non-inverted ones.
+            markers = (
+                dep.marker if use else dep.marker.invert()
+                for use, dep in sorted(
+                    zip(uses, dependencies), key=lambda ud: ud[0], reverse=True
+                )
+            )
+            used_marker_intersection: BaseMarker = AnyMarker()
+            for m in markers:
+                used_marker_intersection = used_marker_intersection.intersect(m)
+            if not self._is_relevant_marker(used_marker_intersection, active_extras):
+                continue
+
+            # intersection of constraints
+            constraint: VersionConstraint = VersionRange()
+            specific_source_dependency = None
+            used_dependencies = list(itertools.compress(dependencies, uses))
+            for dep in used_dependencies:
+                if dep.is_direct_origin() or dep.source_name:
+                    # if direct origin or specific source:
+                    # conflict if specific source already set and not the same
+                    if specific_source_dependency and (
+                        not dep.is_same_source_as(specific_source_dependency)
+                        or dep.source_name != specific_source_dependency.source_name
+                    ):
+                        raise IncompatibleConstraintsError(
+                            package, dep, specific_source_dependency, with_sources=True
+                        )
+                    specific_source_dependency = dep
+                constraint = constraint.intersect(dep.constraint)
+            if constraint.is_empty():
+                # conflict in overlapping area
+                raise IncompatibleConstraintsError(package, *used_dependencies)
+
+            if not any(uses):
+                # This is an edge case where the dependency is not required
+                # for the resulting marker. However, we have to consider it anyway
+                #  in order to not miss other dependencies later, for instance:
+                #   • foo (1.0) ; python == 3.7
+                #   • foo (2.0) ; python == 3.8
+                #   • bar (2.0) ; python == 3.8
+                #   • bar (3.0) ; python == 3.9
+                # the last dependency would be missed without this,
+                # because the intersection with both foo dependencies is empty.
+
+                # Set constraint to empty to mark dependency as "not required".
+                constraint = EmptyConstraint()
+                used_dependencies = dependencies
+
+            # build new dependency with intersected constraint and marker
+            # (and correct source)
+            new_dep = (
+                specific_source_dependency
+                if specific_source_dependency
+                else used_dependencies[0]
+            ).with_constraint(constraint)
+            new_dep.marker = used_marker_intersection
+            new_dependencies.append(new_dep)
+
+        # In order to reduce the number of overrides we merge duplicate
+        # dependencies by constraint again. After overlapping markers were
+        # resolved, there might be new dependencies with the same constraint.
+        return self._merge_dependencies_by_constraint(new_dependencies)
diff --git a/conda_lock/_vendor/poetry/puzzle/solver.py b/conda_lock/_vendor/poetry/puzzle/solver.py
index f63e8247..e96d88e5 100644
--- a/conda_lock/_vendor/poetry/puzzle/solver.py
+++ b/conda_lock/_vendor/poetry/puzzle/solver.py
@@ -1,210 +1,135 @@
-import enum
+from __future__ import annotations
+
 import time
 
 from collections import defaultdict
 from contextlib import contextmanager
-from typing import List
-from typing import Optional
-
-from clikit.io import ConsoleIO
+from typing import TYPE_CHECKING
+from typing import FrozenSet
+from typing import Tuple
+from typing import TypeVar
 
-from conda_lock._vendor.poetry.core.packages import Package
-from conda_lock._vendor.poetry.core.packages.project_package import ProjectPackage
-from conda_lock._vendor.poetry.installation.operations import Install
-from conda_lock._vendor.poetry.installation.operations import Uninstall
-from conda_lock._vendor.poetry.installation.operations import Update
-from conda_lock._vendor.poetry.installation.operations.operation import Operation
 from conda_lock._vendor.poetry.mixology import resolve_version
 from conda_lock._vendor.poetry.mixology.failure import SolveFailure
-from conda_lock._vendor.poetry.packages import DependencyPackage
-from conda_lock._vendor.poetry.repositories import Pool
-from conda_lock._vendor.poetry.repositories import Repository
-from conda_lock._vendor.poetry.utils.env import Env
+from conda_lock._vendor.poetry.puzzle.exceptions import OverrideNeeded
+from conda_lock._vendor.poetry.puzzle.exceptions import SolverProblemError
+from conda_lock._vendor.poetry.puzzle.provider import Indicator
+from conda_lock._vendor.poetry.puzzle.provider import Provider
+
+
+if TYPE_CHECKING:
+    from collections.abc import Collection
+    from collections.abc import Iterator
 
-from .exceptions import OverrideNeeded
-from .exceptions import SolverProblemError
-from .provider import Provider
+    from conda_lock._vendor.cleo.io.io import IO
+    from packaging.utils import NormalizedName
+    from conda_lock._vendor.poetry.core.packages.dependency import Dependency
+    from conda_lock._vendor.poetry.core.packages.package import Package
+    from conda_lock._vendor.poetry.core.packages.project_package import ProjectPackage
+
+    from conda_lock._vendor.poetry.puzzle.transaction import Transaction
+    from conda_lock._vendor.poetry.repositories import RepositoryPool
+    from conda_lock._vendor.poetry.utils.env import Env
 
 
 class Solver:
     def __init__(
         self,
-        package,  # type: ProjectPackage
-        pool,  # type: Pool
-        installed,  # type: Repository
-        locked,  # type: Repository
-        io,  # type: ConsoleIO
-        remove_untracked=False,  # type: bool
-        provider=None,  # type: Optional[Provider]
-    ):
+        package: ProjectPackage,
+        pool: RepositoryPool,
+        installed: list[Package],
+        locked: list[Package],
+        io: IO,
+    ) -> None:
         self._package = package
         self._pool = pool
-        self._installed = installed
-        self._locked = locked
+        self._installed_packages = installed
+        self._locked_packages = locked
         self._io = io
 
-        if provider is None:
-            provider = Provider(self._package, self._pool, self._io)
-
-        self._provider = provider
-        self._overrides = []
-        self._remove_untracked = remove_untracked
+        self._provider = Provider(
+            self._package, self._pool, self._io, installed=installed, locked=locked
+        )
+        self._overrides: list[dict[Package, dict[str, Dependency]]] = []
 
     @property
-    def provider(self):  # type: () -> Provider
+    def provider(self) -> Provider:
         return self._provider
 
     @contextmanager
-    def use_environment(self, env):  # type: (Env) -> None
+    def use_environment(self, env: Env) -> Iterator[None]:
         with self.provider.use_environment(env):
             yield
 
-    def solve(self, use_latest=None):  # type: (...) -> List[Operation]
-        with self._provider.progress():
+    def solve(
+        self, use_latest: Collection[NormalizedName] | None = None
+    ) -> Transaction:
+        from conda_lock._vendor.poetry.puzzle.transaction import Transaction
+
+        with self._progress(), self._provider.use_latest_for(use_latest or []):
             start = time.time()
-            packages, depths = self._solve(use_latest=use_latest)
+            packages, depths = self._solve()
             end = time.time()
 
             if len(self._overrides) > 1:
                 self._provider.debug(
-                    "Complete version solving took {:.3f} seconds with {} overrides".format(
-                        end - start, len(self._overrides)
-                    )
+                    # ignore the warning as provider does not do interpolation
+                    f"Complete version solving took {end - start:.3f}"
+                    f" seconds with {len(self._overrides)} overrides"
                 )
                 self._provider.debug(
-                    "Resolved with overrides: {}".format(
-                        ", ".join("({})".format(b) for b in self._overrides)
-                    )
+                    # ignore the warning as provider does not do interpolation
+                    "Resolved with overrides:"
+                    f" {', '.join(f'({b})' for b in self._overrides)}"
                 )
 
-        operations = []
-        for i, package in enumerate(packages):
-            installed = False
-            for pkg in self._installed.packages:
-                if package.name == pkg.name:
-                    installed = True
-
-                    if pkg.source_type == "git" and package.source_type == "git":
-                        from conda_lock._vendor.poetry.core.vcs.git import Git
-
-                        # Trying to find the currently installed version
-                        pkg_source_url = Git.normalize_url(pkg.source_url)
-                        package_source_url = Git.normalize_url(package.source_url)
-                        for locked in self._locked.packages:
-                            if locked.name != pkg.name or locked.source_type != "git":
-                                continue
-
-                            locked_source_url = Git.normalize_url(locked.source_url)
-                            if (
-                                locked.name == pkg.name
-                                and locked.source_type == pkg.source_type
-                                and locked_source_url == pkg_source_url
-                                and locked.source_reference == pkg.source_reference
-                                and locked.source_resolved_reference
-                                == pkg.source_resolved_reference
-                            ):
-                                pkg = Package(
-                                    pkg.name,
-                                    locked.version,
-                                    source_type="git",
-                                    source_url=locked.source_url,
-                                    source_reference=locked.source_reference,
-                                    source_resolved_reference=locked.source_resolved_reference,
-                                )
-                                break
-
-                        if pkg_source_url != package_source_url or (
-                            (
-                                not pkg.source_resolved_reference
-                                or not package.source_resolved_reference
-                            )
-                            and pkg.source_reference != package.source_reference
-                            and not pkg.source_reference.startswith(
-                                package.source_reference
-                            )
-                            or (
-                                pkg.source_resolved_reference
-                                and package.source_resolved_reference
-                                and pkg.source_resolved_reference
-                                != package.source_resolved_reference
-                                and not pkg.source_resolved_reference.startswith(
-                                    package.source_resolved_reference
-                                )
-                            )
-                        ):
-                            operations.append(Update(pkg, package, priority=depths[i]))
-                        else:
-                            operations.append(
-                                Install(package).skip("Already installed")
-                            )
-                    elif package.version != pkg.version:
-                        # Checking version
-                        operations.append(Update(pkg, package, priority=depths[i]))
-                    elif pkg.source_type and package.source_type != pkg.source_type:
-                        operations.append(Update(pkg, package, priority=depths[i]))
-                    else:
-                        operations.append(
-                            Install(package, priority=depths[i]).skip(
-                                "Already installed"
-                            )
-                        )
-
-                    break
-
-            if not installed:
-                operations.append(Install(package, priority=depths[i]))
-
-        # Checking for removals
-        for pkg in self._locked.packages:
-            remove = True
-            for package in packages:
-                if pkg.name == package.name:
-                    remove = False
-                    break
-
-            if remove:
-                skip = True
-                for installed in self._installed.packages:
-                    if installed.name == pkg.name:
-                        skip = False
-                        break
-
-                op = Uninstall(pkg)
-                if skip:
-                    op.skip("Not currently installed")
-
-                operations.append(op)
-
-        if self._remove_untracked:
-            locked_names = {locked.name for locked in self._locked.packages}
-
-            for installed in self._installed.packages:
-                if installed.name == self._package.name:
-                    continue
-                if installed.name in Provider.UNSAFE_PACKAGES:
-                    # Never remove pip, setuptools etc.
-                    continue
-                if installed.name not in locked_names:
-                    operations.append(Uninstall(installed))
-
-        return sorted(
-            operations, key=lambda o: (-o.priority, o.package.name, o.package.version,),
+        for p in packages:
+            if p.yanked:
+                message = (
+                    f"The locked version {p.pretty_version} for {p.pretty_name} is a"
+                    " yanked version."
+                )
+                if p.yanked_reason:
+                    message += f" Reason for being yanked: {p.yanked_reason}"
+                self._io.write_error_line(f"Warning: {message}")
+
+        return Transaction(
+            self._locked_packages,
+            list(zip(packages, depths)),
+            installed_packages=self._installed_packages,
+            root_package=self._package,
         )
 
-    def solve_in_compatibility_mode(self, overrides, use_latest=None):
-        locked = {}
-        for package in self._locked.packages:
-            locked[package.name] = DependencyPackage(package.to_dependency(), package)
+    @contextmanager
+    def _progress(self) -> Iterator[None]:
+        if not self._io.output.is_decorated() or self._provider.is_debugging():
+            self._io.write_line("Resolving dependencies...")
+            yield
+        else:
+            indicator = Indicator(
+                self._io, "{message}{context}({elapsed:2s})"
+            )
+
+            with indicator.auto(
+                "Resolving dependencies...",
+                "Resolving dependencies...",
+            ):
+                yield
 
+    def _solve_in_compatibility_mode(
+        self,
+        overrides: tuple[dict[Package, dict[str, Dependency]], ...],
+    ) -> tuple[list[Package], list[int]]:
         packages = []
         depths = []
         for override in overrides:
             self._provider.debug(
+                # ignore the warning as provider does not do interpolation
                 "Retrying dependency resolution "
-                "with the following overrides ({}).".format(override)
+                f"with the following overrides ({override})."
             )
             self._provider.set_overrides(override)
-            _packages, _depths = self._solve(use_latest=use_latest)
+            _packages, _depths = self._solve()
             for index, package in enumerate(_packages):
                 if package not in packages:
                     packages.append(package)
@@ -217,34 +142,25 @@ def solve_in_compatibility_mode(self, overrides, use_latest=None):
 
                     for dep in package.requires:
                         if dep not in pkg.requires:
-                            pkg.requires.append(dep)
+                            pkg.add_dependency(dep)
 
         return packages, depths
 
-    def _solve(self, use_latest=None):
+    def _solve(self) -> tuple[list[Package], list[int]]:
         if self._provider._overrides:
             self._overrides.append(self._provider._overrides)
 
-        locked = {}
-        for package in self._locked.packages:
-            locked[package.name] = DependencyPackage(package.to_dependency(), package)
-
         try:
-            result = resolve_version(
-                self._package, self._provider, locked=locked, use_latest=use_latest
-            )
+            result = resolve_version(self._package, self._provider)
 
             packages = result.packages
         except OverrideNeeded as e:
-            return self.solve_in_compatibility_mode(e.overrides, use_latest=use_latest)
+            return self._solve_in_compatibility_mode(e.overrides)
         except SolveFailure as e:
             raise SolverProblemError(e)
 
-        results = dict(
-            depth_first_search(
-                PackageNode(self._package, packages), aggregate_package_nodes
-            )
-        )
+        combined_nodes = depth_first_search(PackageNode(self._package, packages))
+        results = dict(aggregate_package_nodes(nodes) for nodes in combined_nodes)
 
         # Merging feature packages with base packages
         final_packages = []
@@ -253,167 +169,141 @@ def _solve(self, use_latest=None):
             if package.features:
                 for _package in packages:
                     if (
-                        _package.name == package.name
-                        and not _package.is_same_package_as(package)
+                        not _package.features
+                        and _package.name == package.name
                         and _package.version == package.version
                     ):
                         for dep in package.requires:
-                            if dep.is_same_package_as(_package):
+                            # Prevent adding base package as a dependency to itself
+                            if _package.name == dep.name:
                                 continue
 
-                            if dep not in _package.requires:
-                                _package.requires.append(dep)
-
-                continue
-
-            final_packages.append(package)
-            depths.append(results[package])
+                            try:
+                                index = _package.requires.index(dep)
+                            except ValueError:
+                                _package.add_dependency(dep)
+                            else:
+                                _dep = _package.requires[index]
+                                if _dep.marker != dep.marker:
+                                    # marker of feature package is more accurate
+                                    # because it includes relevant extras
+                                    _dep.marker = dep.marker
+            else:
+                final_packages.append(package)
+                depths.append(results[package])
 
         # Return the packages in their original order with associated depths
         return final_packages, depths
 
 
-class DFSNode(object):
-    def __init__(self, id, name, base_name):
+DFSNodeID = Tuple[str, FrozenSet[str], bool]
+
+T = TypeVar("T", bound="DFSNode")
+
+
+class DFSNode:
+    def __init__(self, id: DFSNodeID, name: str, base_name: str) -> None:
         self.id = id
         self.name = name
         self.base_name = base_name
 
-    def reachable(self):
+    def reachable(self: T) -> list[T]:
         return []
 
-    def visit(self, parents):
+    def visit(self, parents: list[PackageNode]) -> None:
         pass
 
-    def __str__(self):
+    def __str__(self) -> str:
         return str(self.id)
 
 
-class VisitedState(enum.Enum):
-    Unvisited = 0
-    PartiallyVisited = 1
-    Visited = 2
-
-
-def depth_first_search(source, aggregator):
-    back_edges = defaultdict(list)
-    visited = {}
-    topo_sorted_nodes = []
+def depth_first_search(source: PackageNode) -> list[list[PackageNode]]:
+    back_edges: dict[DFSNodeID, list[PackageNode]] = defaultdict(list)
+    visited: set[DFSNodeID] = set()
+    topo_sorted_nodes: list[PackageNode] = []
 
     dfs_visit(source, back_edges, visited, topo_sorted_nodes)
 
     # Combine the nodes by name
-    combined_nodes = defaultdict(list)
-    name_children = defaultdict(list)
+    combined_nodes: dict[str, list[PackageNode]] = defaultdict(list)
     for node in topo_sorted_nodes:
         node.visit(back_edges[node.id])
-        name_children[node.name].extend(node.reachable())
         combined_nodes[node.name].append(node)
 
-    combined_topo_sorted_nodes = []
-    for node in topo_sorted_nodes:
-        if node.name in combined_nodes:
-            combined_topo_sorted_nodes.append(combined_nodes.pop(node.name))
-
-    results = [
-        aggregator(nodes, name_children[nodes[0].name])
-        for nodes in combined_topo_sorted_nodes
+    combined_topo_sorted_nodes: list[list[PackageNode]] = [
+        combined_nodes.pop(node.name)
+        for node in topo_sorted_nodes
+        if node.name in combined_nodes
     ]
-    return results
 
+    return combined_topo_sorted_nodes
 
-def dfs_visit(node, back_edges, visited, sorted_nodes):
-    if visited.get(node.id, VisitedState.Unvisited) == VisitedState.Visited:
-        return True
-    if visited.get(node.id, VisitedState.Unvisited) == VisitedState.PartiallyVisited:
-        # We have a circular dependency.
-        # Since the dependencies are resolved we can
-        # simply skip it because we already have it
-        return True
 
-    visited[node.id] = VisitedState.PartiallyVisited
+def dfs_visit(
+    node: PackageNode,
+    back_edges: dict[DFSNodeID, list[PackageNode]],
+    visited: set[DFSNodeID],
+    sorted_nodes: list[PackageNode],
+) -> None:
+    if node.id in visited:
+        return
+    visited.add(node.id)
+
     for neighbor in node.reachable():
         back_edges[neighbor.id].append(node)
-        if not dfs_visit(neighbor, back_edges, visited, sorted_nodes):
-            return False
-    visited[node.id] = VisitedState.Visited
+        dfs_visit(neighbor, back_edges, visited, sorted_nodes)
     sorted_nodes.insert(0, node)
-    return True
 
 
 class PackageNode(DFSNode):
     def __init__(
-        self, package, packages, previous=None, previous_dep=None, dep=None,
-    ):
+        self,
+        package: Package,
+        packages: list[Package],
+        previous: PackageNode | None = None,
+        dep: Dependency | None = None,
+    ) -> None:
         self.package = package
         self.packages = packages
 
-        self.previous = previous
-        self.previous_dep = previous_dep
         self.dep = dep
         self.depth = -1
 
         if not previous:
-            self.category = "dev"
+            self.groups: frozenset[str] = frozenset()
             self.optional = True
-        else:
-            self.category = dep.category
+        elif dep:
+            self.groups = dep.groups
             self.optional = dep.is_optional()
+        else:
+            raise ValueError("Both previous and dep must be passed")
 
-        super(PackageNode, self).__init__(
-            (package.complete_name, self.category, self.optional),
+        super().__init__(
+            (package.complete_name, self.groups, self.optional),
             package.complete_name,
             package.name,
         )
 
-    def reachable(self):
-        children = []  # type: List[PackageNode]
-
-        if (
-            self.previous_dep
-            and self.previous_dep is not self.dep
-            and self.previous_dep.name == self.dep.name
-        ):
-            return []
+    def reachable(self) -> list[PackageNode]:
+        children: list[PackageNode] = []
 
         for dependency in self.package.all_requires:
-            if self.previous and self.previous.name == dependency.name:
-                # We have a circular dependency.
-                # Since the dependencies are resolved we can
-                # simply skip it because we already have it
-                # N.B. this only catches cycles of length 2;
-                # dependency cycles in general are handled by the DFS traversal
-                continue
-
             for pkg in self.packages:
                 if pkg.complete_name == dependency.complete_name and (
                     dependency.constraint.allows(pkg.version)
-                    or dependency.allows_prereleases()
-                    and pkg.version.is_prerelease()
-                    and dependency.constraint.allows(pkg.version.stable)
                 ):
-                    # If there is already a child with this name
-                    # we merge the requirements
-                    if any(
-                        child.package.name == pkg.name
-                        and child.category == dependency.category
-                        for child in children
-                    ):
-                        continue
-
                     children.append(
                         PackageNode(
                             pkg,
                             self.packages,
                             self,
-                            dependency,
                             self.dep or dependency,
                         )
                     )
 
         return children
 
-    def visit(self, parents):
+    def visit(self, parents: list[PackageNode]) -> None:
         # The root package, which has no parents, is defined as having depth -1
         # So that the root package's top-level dependencies have depth 0.
         self.depth = 1 + max(
@@ -425,17 +315,18 @@ def visit(self, parents):
         )
 
 
-def aggregate_package_nodes(nodes, children):
+def aggregate_package_nodes(nodes: list[PackageNode]) -> tuple[Package, int]:
     package = nodes[0].package
     depth = max(node.depth for node in nodes)
-    category = (
-        "main" if any(node.category == "main" for node in children + nodes) else "dev"
-    )
-    optional = all(node.optional for node in children + nodes)
+    groups: list[str] = []
+    for node in nodes:
+        groups.extend(node.groups)
+
+    optional = all(node.optional for node in nodes)
     for node in nodes:
         node.depth = depth
-        node.category = category
         node.optional = optional
-    package.category = category
+
     package.optional = optional
+
     return package, depth
diff --git a/conda_lock/_vendor/poetry/puzzle/transaction.py b/conda_lock/_vendor/poetry/puzzle/transaction.py
new file mode 100644
index 00000000..857cf8e1
--- /dev/null
+++ b/conda_lock/_vendor/poetry/puzzle/transaction.py
@@ -0,0 +1,129 @@
+from __future__ import annotations
+
+from typing import TYPE_CHECKING
+
+
+if TYPE_CHECKING:
+    from conda_lock._vendor.poetry.core.packages.package import Package
+
+    from conda_lock._vendor.poetry.installation.operations.operation import Operation
+
+
+class Transaction:
+    def __init__(
+        self,
+        current_packages: list[Package],
+        result_packages: list[tuple[Package, int]],
+        installed_packages: list[Package] | None = None,
+        root_package: Package | None = None,
+    ) -> None:
+        self._current_packages = current_packages
+        self._result_packages = result_packages
+
+        if installed_packages is None:
+            installed_packages = []
+
+        self._installed_packages = installed_packages
+        self._root_package = root_package
+
+    def calculate_operations(
+        self,
+        with_uninstalls: bool = True,
+        synchronize: bool = False,
+        *,
+        skip_directory: bool = False,
+    ) -> list[Operation]:
+        from conda_lock._vendor.poetry.installation.operations import Install
+        from conda_lock._vendor.poetry.installation.operations import Uninstall
+        from conda_lock._vendor.poetry.installation.operations import Update
+
+        operations: list[Operation] = []
+
+        for result_package, priority in self._result_packages:
+            installed = False
+
+            for installed_package in self._installed_packages:
+                if result_package.name == installed_package.name:
+                    installed = True
+
+                    # We have to perform an update if the version or another
+                    # attribute of the package has changed (source type, url, ref, ...).
+                    if result_package.version != installed_package.version or (
+                        (
+                            # This has to be done because installed packages cannot
+                            # have type "legacy". If a package with type "legacy"
+                            # is installed, the installed package has no source_type.
+                            # Thus, if installed_package has no source_type and
+                            # the result_package has source_type "legacy" (negation of
+                            # the following condition), update must not be performed.
+                            # This quirk has the side effect that when switching
+                            # from PyPI to legacy (or vice versa),
+                            # no update is performed.
+                            installed_package.source_type
+                            or result_package.source_type != "legacy"
+                        )
+                        and not result_package.is_same_package_as(installed_package)
+                    ):
+                        operations.append(
+                            Update(installed_package, result_package, priority=priority)
+                        )
+                    else:
+                        operations.append(
+                            Install(result_package).skip("Already installed")
+                        )
+
+                    break
+
+            if not (
+                installed
+                or (skip_directory and result_package.source_type == "directory")
+            ):
+                operations.append(Install(result_package, priority=priority))
+
+        if with_uninstalls:
+            uninstalls: set[str] = set()
+            for current_package in self._current_packages:
+                found = any(
+                    current_package.name == result_package.name
+                    for result_package, _ in self._result_packages
+                )
+
+                if not found:
+                    for installed_package in self._installed_packages:
+                        if installed_package.name == current_package.name:
+                            uninstalls.add(installed_package.name)
+                            operations.append(Uninstall(current_package))
+
+            if synchronize:
+                result_package_names = {
+                    result_package.name for result_package, _ in self._result_packages
+                }
+                # We preserve pip when not managed by poetry, this is done to avoid
+                # externally managed virtual environments causing unnecessary removals.
+                preserved_package_names = {"pip"} - result_package_names
+
+                for installed_package in self._installed_packages:
+                    if installed_package.name in uninstalls:
+                        continue
+
+                    if (
+                        self._root_package
+                        and installed_package.name == self._root_package.name
+                    ):
+                        continue
+
+                    if installed_package.name in preserved_package_names:
+                        continue
+
+                    if installed_package.name not in result_package_names:
+                        uninstalls.add(installed_package.name)
+                        operations.append(Uninstall(installed_package))
+
+        return sorted(
+            operations,
+            key=lambda o: (
+                -o.priority,
+                o.package.name,
+                o.package.version,
+            ),
+        )
diff --git a/conda_lock/_vendor/poetry/py.typed b/conda_lock/_vendor/poetry/py.typed
new file mode 100644
index 00000000..e69de29b
diff --git a/conda_lock/_vendor/poetry/pyproject/__init__.py b/conda_lock/_vendor/poetry/pyproject/__init__.py
new file mode 100644
index 00000000..e69de29b
diff --git a/conda_lock/_vendor/poetry/pyproject/toml.py b/conda_lock/_vendor/poetry/pyproject/toml.py
new file mode 100644
index 00000000..84cbe9db
--- /dev/null
+++ b/conda_lock/_vendor/poetry/pyproject/toml.py
@@ -0,0 +1,62 @@
+from __future__ import annotations
+
+from typing import TYPE_CHECKING
+
+from conda_lock._vendor.poetry.core.pyproject.toml import PyProjectTOML as BasePyProjectTOML
+from tomlkit.api import table
+from tomlkit.items import Table
+from tomlkit.toml_document import TOMLDocument
+
+from conda_lock._vendor.poetry.toml import TOMLFile
+
+
+if TYPE_CHECKING:
+    from pathlib import Path
+
+
+class PyProjectTOML(BasePyProjectTOML):
+    """
+    Enhanced version of poetry-core's PyProjectTOML
+    which is capable of writing pyproject.toml
+
+    The poetry-core class uses tomli to read the file,
+    here we use tomlkit to preserve comments and formatting when writing.
+    """
+
+    def __init__(self, path: Path) -> None:
+        super().__init__(path)
+        self._toml_file = TOMLFile(path=path)
+        self._toml_document: TOMLDocument | None = None
+
+    @property
+    def file(self) -> TOMLFile:
+        return self._toml_file
+
+    @property
+    def data(self) -> TOMLDocument:
+        if self._toml_document is None:
+            if not self.file.exists():
+                self._toml_document = TOMLDocument()
+            else:
+                self._toml_document = self.file.read()
+
+        return self._toml_document
+
+    def save(self) -> None:
+        data = self.data
+
+        if self._build_system is not None:
+            if "build-system" not in data:
+                data["build-system"] = table()
+
+            build_system = data["build-system"]
+            assert isinstance(build_system, Table)
+
+            build_system["requires"] = self._build_system.requires
+            build_system["build-backend"] = self._build_system.build_backend
+
+        self.file.write(data=data)
+
+    def reload(self) -> None:
+        self._toml_document = None
+        self._build_system = None
diff --git a/conda_lock/_vendor/poetry/repositories/__init__.py b/conda_lock/_vendor/poetry/repositories/__init__.py
index ab92fb11..f9942fc5 100644
--- a/conda_lock/_vendor/poetry/repositories/__init__.py
+++ b/conda_lock/_vendor/poetry/repositories/__init__.py
@@ -1,2 +1,7 @@
-from .pool import Pool
-from .repository import Repository
+from __future__ import annotations
+
+from conda_lock._vendor.poetry.repositories.repository import Repository
+from conda_lock._vendor.poetry.repositories.repository_pool import RepositoryPool
+
+
+__all__ = ["Repository", "RepositoryPool"]
diff --git a/conda_lock/_vendor/poetry/repositories/abstract_repository.py b/conda_lock/_vendor/poetry/repositories/abstract_repository.py
new file mode 100644
index 00000000..d2b497f3
--- /dev/null
+++ b/conda_lock/_vendor/poetry/repositories/abstract_repository.py
@@ -0,0 +1,34 @@
+from __future__ import annotations
+
+from abc import ABC
+from abc import abstractmethod
+from typing import TYPE_CHECKING
+
+
+if TYPE_CHECKING:
+    from conda_lock._vendor.poetry.core.constraints.version import Version
+    from conda_lock._vendor.poetry.core.packages.dependency import Dependency
+    from conda_lock._vendor.poetry.core.packages.package import Package
+
+
+class AbstractRepository(ABC):
+    def __init__(self, name: str) -> None:
+        self._name = name
+
+    @property
+    def name(self) -> str:
+        return self._name
+
+    @abstractmethod
+    def find_packages(self, dependency: Dependency) -> list[Package]: ...
+
+    @abstractmethod
+    def search(self, query: str) -> list[Package]: ...
+
+    @abstractmethod
+    def package(
+        self,
+        name: str,
+        version: Version,
+        extras: list[str] | None = None,
+    ) -> Package: ...
diff --git a/conda_lock/_vendor/poetry/repositories/base_repository.py b/conda_lock/_vendor/poetry/repositories/base_repository.py
deleted file mode 100644
index 46422ca0..00000000
--- a/conda_lock/_vendor/poetry/repositories/base_repository.py
+++ /dev/null
@@ -1,19 +0,0 @@
-class BaseRepository(object):
-    def __init__(self):
-        self._packages = []
-
-    @property
-    def packages(self):
-        return self._packages
-
-    def has_package(self, package):
-        raise NotImplementedError()
-
-    def package(self, name, version, extras=None):
-        raise NotImplementedError()
-
-    def find_packages(self, dependency):
-        raise NotImplementedError()
-
-    def search(self, query):
-        raise NotImplementedError()
diff --git a/conda_lock/_vendor/poetry/repositories/cached_repository.py b/conda_lock/_vendor/poetry/repositories/cached_repository.py
new file mode 100644
index 00000000..8f62fefc
--- /dev/null
+++ b/conda_lock/_vendor/poetry/repositories/cached_repository.py
@@ -0,0 +1,77 @@
+from __future__ import annotations
+
+from abc import ABC
+from abc import abstractmethod
+from typing import TYPE_CHECKING
+from typing import Any
+
+from packaging.utils import canonicalize_name
+from conda_lock._vendor.poetry.core.constraints.version import parse_constraint
+
+from conda_lock._vendor.poetry.config.config import Config
+from conda_lock._vendor.poetry.repositories.repository import Repository
+from conda_lock._vendor.poetry.utils.cache import FileCache
+
+
+if TYPE_CHECKING:
+    from packaging.utils import NormalizedName
+    from conda_lock._vendor.poetry.core.constraints.version import Version
+    from conda_lock._vendor.poetry.core.packages.package import Package
+
+    from conda_lock._vendor.poetry.inspection.info import PackageInfo
+
+
+class CachedRepository(Repository, ABC):
+    CACHE_VERSION = parse_constraint("2.0.0")
+
+    def __init__(
+        self, name: str, disable_cache: bool = False, config: Config | None = None
+    ) -> None:
+        super().__init__(name)
+        self._disable_cache = disable_cache
+        self._cache_dir = (config or Config.create()).repository_cache_directory / name
+        self._release_cache: FileCache[dict[str, Any]] = FileCache(path=self._cache_dir)
+
+    @abstractmethod
+    def _get_release_info(
+        self, name: NormalizedName, version: Version
+    ) -> dict[str, Any]: ...
+
+    def get_release_info(self, name: NormalizedName, version: Version) -> PackageInfo:
+        """
+        Return the release information given a package name and a version.
+
+        The information is returned from the cache if it exists
+        or retrieved from the remote server.
+        """
+        from conda_lock._vendor.poetry.inspection.info import PackageInfo
+
+        if self._disable_cache:
+            return PackageInfo.load(self._get_release_info(name, version))
+
+        cached = self._release_cache.remember(
+            f"{name}:{version}", lambda: self._get_release_info(name, version)
+        )
+
+        cache_version = cached.get("_cache_version", "0.0.0")
+        if parse_constraint(cache_version) != self.CACHE_VERSION:
+            # The cache must be updated
+            self._log(
+                f"The cache for {name} {version} is outdated. Refreshing.",
+                level="debug",
+            )
+            cached = self._get_release_info(name, version)
+
+            self._release_cache.put(f"{name}:{version}", cached)
+
+        return PackageInfo.load(cached)
+
+    def package(
+        self,
+        name: str,
+        version: Version,
+        extras: list[str] | None = None,
+    ) -> Package:
+        return self.get_release_info(canonicalize_name(name), version).to_package(
+            name=name, extras=extras
+        )
diff --git a/conda_lock/_vendor/poetry/repositories/exceptions.py b/conda_lock/_vendor/poetry/repositories/exceptions.py
index 170303f3..c742f268 100644
--- a/conda_lock/_vendor/poetry/repositories/exceptions.py
+++ b/conda_lock/_vendor/poetry/repositories/exceptions.py
@@ -1,8 +1,13 @@
-class RepositoryError(Exception):
+from __future__ import annotations
+
 
+class RepositoryError(Exception):
     pass
 
 
 class PackageNotFound(Exception):
+    pass
+
 
+class InvalidSourceError(Exception):
     pass
diff --git a/conda_lock/_vendor/poetry/repositories/http_repository.py b/conda_lock/_vendor/poetry/repositories/http_repository.py
new file mode 100644
index 00000000..14ee546d
--- /dev/null
+++ b/conda_lock/_vendor/poetry/repositories/http_repository.py
@@ -0,0 +1,432 @@
+from __future__ import annotations
+
+import functools
+import hashlib
+
+from contextlib import contextmanager
+from contextlib import suppress
+from pathlib import Path
+from typing import TYPE_CHECKING
+from typing import Any
+from typing import Iterator
+
+import requests
+import requests.adapters
+
+from packaging.metadata import parse_email
+from conda_lock._vendor.poetry.core.constraints.version import parse_constraint
+from conda_lock._vendor.poetry.core.packages.dependency import Dependency
+from conda_lock._vendor.poetry.core.utils.helpers import temporary_directory
+from conda_lock._vendor.poetry.core.version.markers import parse_marker
+
+from conda_lock._vendor.poetry.config.config import Config
+from conda_lock._vendor.poetry.inspection.info import PackageInfo
+from conda_lock._vendor.poetry.inspection.lazy_wheel import LazyWheelUnsupportedError
+from conda_lock._vendor.poetry.inspection.lazy_wheel import metadata_from_wheel_url
+from conda_lock._vendor.poetry.repositories.cached_repository import CachedRepository
+from conda_lock._vendor.poetry.repositories.exceptions import PackageNotFound
+from conda_lock._vendor.poetry.repositories.exceptions import RepositoryError
+from conda_lock._vendor.poetry.repositories.link_sources.html import HTMLPage
+from conda_lock._vendor.poetry.utils.authenticator import Authenticator
+from conda_lock._vendor.poetry.utils.constants import REQUESTS_TIMEOUT
+from conda_lock._vendor.poetry.utils.helpers import HTTPRangeRequestSupported
+from conda_lock._vendor.poetry.utils.helpers import download_file
+from conda_lock._vendor.poetry.utils.helpers import get_highest_priority_hash_type
+from conda_lock._vendor.poetry.utils.patterns import wheel_file_re
+
+
+if TYPE_CHECKING:
+    from packaging.utils import NormalizedName
+    from conda_lock._vendor.poetry.core.packages.utils.link import Link
+
+    from conda_lock._vendor.poetry.repositories.link_sources.base import LinkSource
+    from conda_lock._vendor.poetry.utils.authenticator import RepositoryCertificateConfig
+
+
+class HTTPRepository(CachedRepository):
+    def __init__(
+        self,
+        name: str,
+        url: str,
+        config: Config | None = None,
+        disable_cache: bool = False,
+        pool_size: int = requests.adapters.DEFAULT_POOLSIZE,
+    ) -> None:
+        super().__init__(name, disable_cache, config)
+        self._url = url
+        if config is None:
+            config = Config.create()
+        self._authenticator = Authenticator(
+            config=config,
+            cache_id=name,
+            disable_cache=disable_cache,
+            pool_size=pool_size,
+        )
+        self._authenticator.add_repository(name, url)
+        self.get_page = functools.lru_cache(maxsize=None)(self._get_page)
+
+        self._lazy_wheel = config.get("solver.lazy-wheel", True)
+        # We are tracking if a domain supports range requests or not to avoid
+        # unnecessary requests.
+        # ATTENTION: A domain might support range requests only for some files, so the
+        # meaning is as follows:
+        # - Domain not in dict: We don't know anything.
+        # - True: The domain supports range requests for at least some files.
+        # - False: The domain does not support range requests for the files we tried.
+        self._supports_range_requests: dict[str, bool] = {}
+
+    @property
+    def session(self) -> Authenticator:
+        return self._authenticator
+
+    @property
+    def url(self) -> str:
+        return self._url
+
+    @property
+    def certificates(self) -> RepositoryCertificateConfig:
+        return self._authenticator.get_certs_for_url(self.url)
+
+    @property
+    def authenticated_url(self) -> str:
+        return self._authenticator.authenticated_url(url=self.url)
+
+    def _download(
+        self, url: str, dest: Path, *, raise_accepts_ranges: bool = False
+    ) -> None:
+        return download_file(
+            url, dest, session=self.session, raise_accepts_ranges=raise_accepts_ranges
+        )
+
+    @contextmanager
+    def _cached_or_downloaded_file(
+        self, link: Link, *, raise_accepts_ranges: bool = False
+    ) -> Iterator[Path]:
+        self._log(f"Downloading: {link.url}", level="debug")
+        with temporary_directory() as temp_dir:
+            filepath = Path(temp_dir) / link.filename
+            self._download(
+                link.url, filepath, raise_accepts_ranges=raise_accepts_ranges
+            )
+            yield filepath
+
+    def _get_info_from_wheel(self, link: Link) -> PackageInfo:
+        from conda_lock._vendor.poetry.inspection.info import PackageInfo
+
+        netloc = link.netloc
+
+        # If "lazy-wheel" is enabled and the domain supports range requests
+        # or we don't know yet, we try range requests.
+        raise_accepts_ranges = self._lazy_wheel
+        if self._lazy_wheel and self._supports_range_requests.get(netloc, True):
+            try:
+                package_info = PackageInfo.from_metadata(
+                    metadata_from_wheel_url(link.filename, link.url, self.session)
+                )
+            except LazyWheelUnsupportedError as e:
+                # Do not set to False if we already know that the domain supports
+                # range requests for some URLs!
+                self._log(
+                    f"Disabling lazy wheel support for {netloc}: {e}",
+                    level="debug",
+                )
+                raise_accepts_ranges = False
+                self._supports_range_requests.setdefault(netloc, False)
+            else:
+                self._supports_range_requests[netloc] = True
+                return package_info
+
+        try:
+            with self._cached_or_downloaded_file(
+                link, raise_accepts_ranges=raise_accepts_ranges
+            ) as filepath:
+                return PackageInfo.from_wheel(filepath)
+        except HTTPRangeRequestSupported:
+            # The domain did not support range requests for the first URL(s) we tried,
+            # but supports it for some URLs (especially the current URL),
+            # so we abort the download, update _supports_range_requests to try
+            # range requests for all files and use it for the current URL.
+            self._log(
+                f"Abort downloading {link.url} because server supports range requests",
+                level="debug",
+            )
+            self._supports_range_requests[netloc] = True
+            return self._get_info_from_wheel(link)
+
+    def _get_info_from_sdist(self, link: Link) -> PackageInfo:
+        from conda_lock._vendor.poetry.inspection.info import PackageInfo
+
+        with self._cached_or_downloaded_file(link) as filepath:
+            return PackageInfo.from_sdist(filepath)
+
+    def _get_info_from_metadata(self, link: Link) -> PackageInfo | None:
+        if link.has_metadata:
+            try:
+                assert link.metadata_url is not None
+                response = self.session.get(link.metadata_url)
+                if link.metadata_hashes and (
+                    hash_name := get_highest_priority_hash_type(
+                        set(link.metadata_hashes.keys()), f"{link.filename}.metadata"
+                    )
+                ):
+                    metadata_hash = getattr(hashlib, hash_name)(
+                        response.content
+                    ).hexdigest()
+                    if metadata_hash != link.metadata_hashes[hash_name]:
+                        self._log(
+                            f"Metadata file hash ({metadata_hash}) does not match"
+                            f" expected hash ({link.metadata_hashes[hash_name]})."
+                            f" Metadata file for {link.filename} will be ignored.",
+                            level="warning",
+                        )
+                        return None
+
+                metadata, _ = parse_email(response.content)
+                return PackageInfo.from_metadata(metadata)
+
+            except requests.HTTPError:
+                self._log(
+                    f"Failed to retrieve metadata at {link.metadata_url}",
+                    level="warning",
+                )
+
+        return None
+
+    def _get_info_from_links(
+        self,
+        links: list[Link],
+        *,
+        ignore_yanked: bool = True,
+    ) -> PackageInfo:
+        # Sort links by distribution type
+        wheels: list[Link] = []
+        sdists: list[Link] = []
+        for link in links:
+            if link.yanked and ignore_yanked:
+                # drop yanked files unless the entire release is yanked
+                continue
+            if link.is_wheel:
+                wheels.append(link)
+            elif link.filename.endswith(
+                (".tar.gz", ".zip", ".bz2", ".xz", ".Z", ".tar")
+            ):
+                sdists.append(link)
+
+        # Prefer to read data from wheels: this is faster and more reliable
+        if wheels:
+            # We ought just to be able to look at any of the available wheels to read
+            # metadata, they all should give the same answer.
+            #
+            # In practice this hasn't always been true.
+            #
+            # Most of the code in here is to deal with cases such as isort 4.3.4 which
+            # published separate python3 and python2 wheels with quite different
+            # dependencies.  We try to detect such cases and combine the data from the
+            # two wheels into what ought to have been published in the first place...
+            universal_wheel = None
+            universal_python2_wheel = None
+            universal_python3_wheel = None
+            platform_specific_wheels = []
+            for wheel in wheels:
+                m = wheel_file_re.match(wheel.filename)
+                if not m:
+                    continue
+
+                pyver = m.group("pyver")
+                abi = m.group("abi")
+                plat = m.group("plat")
+                if abi == "none" and plat == "any":
+                    # Universal wheel
+                    if pyver == "py2.py3":
+                        # Any Python
+                        universal_wheel = wheel
+                    elif pyver == "py2":
+                        universal_python2_wheel = wheel
+                    else:
+                        universal_python3_wheel = wheel
+                else:
+                    platform_specific_wheels.append(wheel)
+
+            if universal_wheel is not None:
+                return self._get_info_from_metadata(
+                    universal_wheel
+                ) or self._get_info_from_wheel(universal_wheel)
+
+            info = None
+            if universal_python2_wheel and universal_python3_wheel:
+                info = self._get_info_from_metadata(
+                    universal_python2_wheel
+                ) or self._get_info_from_wheel(universal_python2_wheel)
+
+                py3_info = self._get_info_from_metadata(
+                    universal_python3_wheel
+                ) or self._get_info_from_wheel(universal_python3_wheel)
+
+                if info.requires_python or py3_info.requires_python:
+                    info.requires_python = str(
+                        parse_constraint(info.requires_python or "^2.7").union(
+                            parse_constraint(py3_info.requires_python or "^3")
+                        )
+                    )
+
+                if py3_info.requires_dist:
+                    if not info.requires_dist:
+                        info.requires_dist = py3_info.requires_dist
+
+                        return info
+
+                    py2_requires_dist = {
+                        Dependency.create_from_pep_508(r).to_pep_508()
+                        for r in info.requires_dist
+                    }
+                    py3_requires_dist = {
+                        Dependency.create_from_pep_508(r).to_pep_508()
+                        for r in py3_info.requires_dist
+                    }
+                    base_requires_dist = py2_requires_dist & py3_requires_dist
+                    py2_only_requires_dist = py2_requires_dist - py3_requires_dist
+                    py3_only_requires_dist = py3_requires_dist - py2_requires_dist
+
+                    # Normalizing requires_dist
+                    requires_dist = list(base_requires_dist)
+                    for requirement in py2_only_requires_dist:
+                        dep = Dependency.create_from_pep_508(requirement)
+                        dep.marker = dep.marker.intersect(
+                            parse_marker("python_version == '2.7'")
+                        )
+                        requires_dist.append(dep.to_pep_508())
+
+                    for requirement in py3_only_requires_dist:
+                        dep = Dependency.create_from_pep_508(requirement)
+                        dep.marker = dep.marker.intersect(
+                            parse_marker("python_version >= '3'")
+                        )
+                        requires_dist.append(dep.to_pep_508())
+
+                    info.requires_dist = sorted(set(requires_dist))
+
+            if info:
+                return info
+
+            # Prefer non platform specific wheels
+            if universal_python3_wheel:
+                return self._get_info_from_metadata(
+                    universal_python3_wheel
+                ) or self._get_info_from_wheel(universal_python3_wheel)
+
+            if universal_python2_wheel:
+                return self._get_info_from_metadata(
+                    universal_python2_wheel
+                ) or self._get_info_from_wheel(universal_python2_wheel)
+
+            if platform_specific_wheels:
+                first_wheel = platform_specific_wheels[0]
+                return self._get_info_from_metadata(
+                    first_wheel
+                ) or self._get_info_from_wheel(first_wheel)
+
+        return self._get_info_from_metadata(sdists[0]) or self._get_info_from_sdist(
+            sdists[0]
+        )
+
+    def _links_to_data(self, links: list[Link], data: PackageInfo) -> dict[str, Any]:
+        if not links:
+            raise PackageNotFound(
+                f'No valid distribution links found for package: "{data.name}" version:'
+                f' "{data.version}"'
+            )
+
+        files: list[dict[str, Any]] = []
+        for link in links:
+            if link.yanked and not data.yanked:
+                # drop yanked files unless the entire release is yanked
+                continue
+
+            file_hash: str | None
+            for hash_name in ("sha512", "sha384", "sha256"):
+                if hash_name in link.hashes:
+                    file_hash = f"{hash_name}:{link.hashes[hash_name]}"
+                    break
+            else:
+                file_hash = self.calculate_sha256(link)
+
+            if file_hash is None and (
+                hash_type := get_highest_priority_hash_type(
+                    set(link.hashes.keys()), link.filename
+                )
+            ):
+                file_hash = f"{hash_type}:{link.hashes[hash_type]}"
+
+            files.append({"file": link.filename, "hash": file_hash})
+
+        data.files = files
+
+        # drop yanked files unless the entire release is yanked
+        info = self._get_info_from_links(links, ignore_yanked=not data.yanked)
+
+        data.summary = info.summary
+        data.requires_dist = info.requires_dist
+        data.requires_python = info.requires_python
+
+        return data.asdict()
+
+    def calculate_sha256(self, link: Link) -> str | None:
+        with self._cached_or_downloaded_file(link) as filepath:
+            hash_name = get_highest_priority_hash_type(
+                set(link.hashes.keys()), link.filename
+            )
+            known_hash = None
+            with suppress(ValueError, AttributeError):
+                # Handle ValueError here as well since under FIPS environments
+                # this is what is raised (e.g., for MD5)
+                known_hash = getattr(hashlib, hash_name)() if hash_name else None
+            required_hash = hashlib.sha256()
+
+            chunksize = 4096
+            with filepath.open("rb") as f:
+                while True:
+                    chunk = f.read(chunksize)
+                    if not chunk:
+                        break
+                    if known_hash:
+                        known_hash.update(chunk)
+                    required_hash.update(chunk)
+
+            if (
+                not hash_name
+                or not known_hash
+                or known_hash.hexdigest() == link.hashes[hash_name]
+            ):
+                return f"{required_hash.name}:{required_hash.hexdigest()}"
+        return None
+
+    def _get_response(self, endpoint: str) -> requests.Response | None:
+        url = self._url + endpoint
+        try:
+            response: requests.Response = self.session.get(
+                url, raise_for_status=False, timeout=REQUESTS_TIMEOUT
+            )
+            if response.status_code in (401, 403):
+                self._log(
+                    f"Authorization error accessing {url}",
+                    level="warning",
+                )
+                return None
+            if response.status_code == 404:
+                return None
+            response.raise_for_status()
+        except requests.exceptions.HTTPError as e:
+            raise RepositoryError(e)
+
+        if response.url != url:
+            self._log(
+                f"Response URL {response.url} differs from request URL {url}",
+                level="debug",
+            )
+        return response
+
+    def _get_page(self, name: NormalizedName) -> LinkSource:
+        response = self._get_response(f"/{name}/")
+        if not response:
+            raise PackageNotFound(f"Package [{name}] not found.")
+        return HTMLPage(response.url, response.text)
diff --git a/conda_lock/_vendor/poetry/repositories/installed_repository.py b/conda_lock/_vendor/poetry/repositories/installed_repository.py
index 1f1ab237..c363a32e 100644
--- a/conda_lock/_vendor/poetry/repositories/installed_repository.py
+++ b/conda_lock/_vendor/poetry/repositories/installed_repository.py
@@ -1,29 +1,34 @@
+from __future__ import annotations
+
 import itertools
+import json
+import logging
 
-from typing import Set
-from typing import Union
+from pathlib import Path
+from typing import TYPE_CHECKING
 
-from conda_lock._vendor.poetry.core.packages import Package
+from packaging.utils import canonicalize_name
+from conda_lock._vendor.poetry.core.packages.package import Package
+from conda_lock._vendor.poetry.core.packages.utils.utils import url_to_path
 from conda_lock._vendor.poetry.core.utils.helpers import module_name
-from conda_lock._vendor.poetry.utils._compat import Path
-from conda_lock._vendor.poetry.utils._compat import metadata
-from conda_lock._vendor.poetry.utils.env import Env
 
-from .repository import Repository
+from conda_lock._vendor.poetry.repositories.repository import Repository
+from conda_lock._vendor.poetry.utils._compat import metadata
 
 
-_VENDORS = Path(__file__).parent.parent.joinpath("_vendor")
+if TYPE_CHECKING:
+    from conda_lock._vendor.poetry.utils.env import Env
 
 
-try:
-    FileNotFoundError
-except NameError:
-    FileNotFoundError = OSError
+logger = logging.getLogger(__name__)
 
 
 class InstalledRepository(Repository):
+    def __init__(self) -> None:
+        super().__init__("poetry-installed")
+
     @classmethod
-    def get_package_paths(cls, env, name):  # type: (Env, str) -> Set[Path]
+    def get_package_paths(cls, env: Env, name: str) -> set[Path]:
         """
         Process a .pth file within the site-packages directories, and return any valid
         paths. We skip executable .pth files as there is no reliable means to do this
@@ -41,10 +46,11 @@ def get_package_paths(cls, env, name):  # type: (Env, str) -> Set[Path]
         paths = set()
 
         # we identify the candidate pth files to check, this is done so to handle cases
-        # where the pth file for foo-bar might have been installed as either foo-bar.pth or
-        # foo_bar.pth (expected) in either pure or platform lib directories.
+        # where the pth file for foo-bar might have been installed as either foo-bar.pth
+        # or foo_bar.pth (expected) in either pure or platform lib directories.
         candidates = itertools.product(
-            {env.purelib, env.platlib}, {name, module_name(name)},
+            {env.purelib, env.platlib},
+            {name, module_name(name)},
         )
 
         for lib, module in candidates:
@@ -58,35 +64,24 @@ def get_package_paths(cls, env, name):  # type: (Env, str) -> Set[Path]
                     if line and not line.startswith(("#", "import ", "import\t")):
                         path = Path(line)
                         if not path.is_absolute():
-                            try:
-                                path = lib.joinpath(path).resolve()
-                            except FileNotFoundError:
-                                # this is required to handle pathlib oddity on win32 python==3.5
-                                path = lib.joinpath(path)
+                            path = lib.joinpath(path).resolve()
                         paths.add(path)
-        return paths
 
-    @classmethod
-    def set_package_vcs_properties_from_path(
-        cls, src, package
-    ):  # type: (Path, Package) -> None
-        from conda_lock._vendor.poetry.core.vcs.git import Git
-
-        git = Git()
-        revision = git.rev_parse("HEAD", src).strip()
-        url = git.remote_url(src)
+        src_path = env.path / "src" / name
+        if not paths and src_path.exists():
+            paths.add(src_path)
 
-        package._source_type = "git"
-        package._source_url = url
-        package._source_reference = revision
+        return paths
 
     @classmethod
-    def set_package_vcs_properties(cls, package, env):  # type: (Package, Env) -> None
-        src = env.path / "src" / package.name
-        cls.set_package_vcs_properties_from_path(src, package)
+    def get_package_vcs_properties_from_path(cls, src: Path) -> tuple[str, str, str]:
+        from conda_lock._vendor.poetry.vcs.git import Git
+
+        info = Git.info(repo=src)
+        return "git", info.origin, info.revision
 
     @classmethod
-    def is_vcs_package(cls, package, env):  # type: (Union[Path, Package], Env) -> bool
+    def is_vcs_package(cls, package: Path | Package, env: Env) -> bool:
         # A VCS dependency should have been installed
         # in the src directory.
         src = env.path / "src"
@@ -101,66 +96,189 @@ def is_vcs_package(cls, package, env):  # type: (Union[Path, Package], Env) -> b
             return True
 
     @classmethod
-    def load(cls, env):  # type: (Env) -> InstalledRepository
+    def create_package_from_distribution(
+        cls, distribution: metadata.Distribution, env: Env
+    ) -> Package:
+        # We first check for a direct_url.json file to determine
+        # the type of package.
+        path = Path(str(distribution._path))  # type: ignore[attr-defined]
+
+        if (
+            path.name.endswith(".dist-info")
+            and path.joinpath("direct_url.json").exists()
+        ):
+            return cls.create_package_from_pep610(distribution)
+
+        is_standard_package = env.is_path_relative_to_lib(path)
+
+        source_type = None
+        source_url = None
+        source_reference = None
+        source_resolved_reference = None
+        source_subdirectory = None
+        if is_standard_package:
+            if path.name.endswith(".dist-info"):
+                paths = cls.get_package_paths(
+                    env=env, name=distribution.metadata["name"]
+                )
+                if paths:
+                    is_editable_package = False
+                    for src in paths:
+                        if cls.is_vcs_package(src, env):
+                            (
+                                source_type,
+                                source_url,
+                                source_reference,
+                            ) = cls.get_package_vcs_properties_from_path(src)
+                            break
+
+                        if not (
+                            is_editable_package or env.is_path_relative_to_lib(src)
+                        ):
+                            is_editable_package = True
+                    else:
+                        # TODO: handle multiple source directories?
+                        if is_editable_package:
+                            source_type = "directory"
+                            source_url = paths.pop().as_posix()
+        elif cls.is_vcs_package(path, env):
+            (
+                source_type,
+                source_url,
+                source_reference,
+            ) = cls.get_package_vcs_properties_from_path(
+                env.path / "src" / canonicalize_name(distribution.metadata["name"])
+            )
+        else:
+            # If not, it's a path dependency
+            source_type = "directory"
+            source_url = str(path.parent)
+
+        package = Package(
+            distribution.metadata["name"],
+            distribution.metadata["version"],
+            source_type=source_type,
+            source_url=source_url,
+            source_reference=source_reference,
+            source_resolved_reference=source_resolved_reference,
+            source_subdirectory=source_subdirectory,
+        )
+
+        package.description = distribution.metadata.get(  # type: ignore[attr-defined]
+            "summary",
+            "",
+        )
+
+        return package
+
+    @classmethod
+    def create_package_from_pep610(cls, distribution: metadata.Distribution) -> Package:
+        path = Path(str(distribution._path))  # type: ignore[attr-defined]
+        source_type = None
+        source_url = None
+        source_reference = None
+        source_resolved_reference = None
+        source_subdirectory = None
+        develop = False
+
+        url_reference = json.loads(
+            path.joinpath("direct_url.json").read_text(encoding="utf-8")
+        )
+        if "archive_info" in url_reference:
+            # File or URL distribution
+            if url_reference["url"].startswith("file:"):
+                # File distribution
+                source_type = "file"
+                source_url = url_to_path(url_reference["url"]).as_posix()
+            else:
+                # URL distribution
+                source_type = "url"
+                source_url = url_reference["url"]
+        elif "dir_info" in url_reference:
+            # Directory distribution
+            source_type = "directory"
+            source_url = url_to_path(url_reference["url"]).as_posix()
+            develop = url_reference["dir_info"].get("editable", False)
+        elif "vcs_info" in url_reference:
+            # VCS distribution
+            source_type = url_reference["vcs_info"]["vcs"]
+            source_url = url_reference["url"]
+            source_resolved_reference = url_reference["vcs_info"]["commit_id"]
+            source_reference = url_reference["vcs_info"].get(
+                "requested_revision", source_resolved_reference
+            )
+        source_subdirectory = url_reference.get("subdirectory")
+
+        package = Package(
+            distribution.metadata["name"],
+            distribution.metadata["version"],
+            source_type=source_type,
+            source_url=source_url,
+            source_reference=source_reference,
+            source_resolved_reference=source_resolved_reference,
+            source_subdirectory=source_subdirectory,
+            develop=develop,
+        )
+
+        package.description = distribution.metadata.get(  # type: ignore[attr-defined]
+            "summary",
+            "",
+        )
+
+        return package
+
+    @classmethod
+    def load(cls, env: Env, with_dependencies: bool = False) -> InstalledRepository:
         """
         Load installed packages.
         """
+        from conda_lock._vendor.poetry.core.packages.dependency import Dependency
+
         repo = cls()
         seen = set()
+        skipped = set()
 
         for entry in reversed(env.sys_path):
+            if not entry.strip():
+                logger.debug(
+                    "Project environment contains an empty path in sys_path,"
+                    " ignoring."
+                )
+                continue
+
             for distribution in sorted(
-                metadata.distributions(path=[entry]), key=lambda d: str(d._path),
+                metadata.distributions(path=[entry]),
+                key=lambda d: str(d._path),  # type: ignore[attr-defined]
             ):
-                name = distribution.metadata["name"]
-                path = Path(str(distribution._path))
-                version = distribution.metadata["version"]
-                package = Package(name, version, version)
-                package.description = distribution.metadata.get("summary", "")
+                path = Path(str(distribution._path))  # type: ignore[attr-defined]
 
-                if package.name in seen:
+                if path in skipped:
                     continue
 
-                try:
-                    path.relative_to(_VENDORS)
-                except ValueError:
-                    pass
-                else:
+                name = distribution.metadata.get("name")  # type: ignore[attr-defined]
+                if name is None:
+                    logger.warning(
+                        "Project environment contains an invalid distribution"
+                        " (%s). Consider removing it manually or recreate"
+                        " the environment.",
+                        path,
+                    )
+                    skipped.add(path)
                     continue
 
-                seen.add(package.name)
-
-                repo.add_package(package)
+                name = canonicalize_name(name)
 
-                is_standard_package = env.is_path_relative_to_lib(path)
-
-                if is_standard_package:
-                    if path.name.endswith(".dist-info"):
-                        paths = cls.get_package_paths(env=env, name=package.pretty_name)
-                        if paths:
-                            is_editable_package = False
-                            for src in paths:
-                                if cls.is_vcs_package(src, env):
-                                    cls.set_package_vcs_properties(package, env)
-                                    break
-
-                                if not (
-                                    is_editable_package
-                                    or env.is_path_relative_to_lib(src)
-                                ):
-                                    is_editable_package = True
-                            else:
-                                # TODO: handle multiple source directories?
-                                if is_editable_package:
-                                    package._source_type = "directory"
-                                    package._source_url = paths.pop().as_posix()
+                if name in seen:
                     continue
 
-                if cls.is_vcs_package(path, env):
-                    cls.set_package_vcs_properties(package, env)
-                else:
-                    # If not, it's a path dependency
-                    package._source_type = "directory"
-                    package._source_url = str(path.parent)
+                package = cls.create_package_from_distribution(distribution, env)
+
+                if with_dependencies:
+                    for require in distribution.metadata.get_all("requires-dist", []):
+                        dep = Dependency.create_from_pep_508(require)
+                        package.add_dependency(dep)
+
+                seen.add(package.name)
+                repo.add_package(package)
 
         return repo
diff --git a/conda_lock/_vendor/poetry/repositories/legacy_repository.py b/conda_lock/_vendor/poetry/repositories/legacy_repository.py
old mode 100755
new mode 100644
index 3e8b4dcc..48cdf544
--- a/conda_lock/_vendor/poetry/repositories/legacy_repository.py
+++ b/conda_lock/_vendor/poetry/repositories/legacy_repository.py
@@ -1,309 +1,56 @@
-import cgi
-import re
-import warnings
+from __future__ import annotations
 
-from collections import defaultdict
-from typing import Generator
-from typing import Optional
-from typing import Union
+from typing import TYPE_CHECKING
+from typing import Any
 
-import requests
-import requests.auth
+import requests.adapters
 
-from cachecontrol import CacheControl
-from cachecontrol.caches.file_cache import FileCache
-from cachy import CacheManager
+from conda_lock._vendor.poetry.core.packages.package import Package
 
-from conda_lock._vendor.poetry.core.packages import Package
-from conda_lock._vendor.poetry.core.packages.utils.link import Link
-from conda_lock._vendor.poetry.core.semver import Version
-from conda_lock._vendor.poetry.core.semver import VersionConstraint
-from conda_lock._vendor.poetry.core.semver import VersionRange
-from conda_lock._vendor.poetry.core.semver import parse_constraint
-from conda_lock._vendor.poetry.locations import REPOSITORY_CACHE_DIR
-from conda_lock._vendor.poetry.utils._compat import Path
-from conda_lock._vendor.poetry.utils.helpers import canonicalize_name
-from conda_lock._vendor.poetry.utils.patterns import wheel_file_re
+from conda_lock._vendor.poetry.inspection.info import PackageInfo
+from conda_lock._vendor.poetry.repositories.exceptions import PackageNotFound
+from conda_lock._vendor.poetry.repositories.http_repository import HTTPRepository
+from conda_lock._vendor.poetry.repositories.link_sources.html import SimpleRepositoryPage
 
-from ..config.config import Config
-from ..inspection.info import PackageInfo
-from ..installation.authenticator import Authenticator
-from .exceptions import PackageNotFound
-from .exceptions import RepositoryError
-from .pypi_repository import PyPiRepository
 
+if TYPE_CHECKING:
+    from packaging.utils import NormalizedName
+    from conda_lock._vendor.poetry.core.constraints.version import Version
+    from conda_lock._vendor.poetry.core.constraints.version import VersionConstraint
+    from conda_lock._vendor.poetry.core.packages.utils.link import Link
 
-try:
-    import urllib.parse as urlparse
-except ImportError:
-    import urlparse
+    from conda_lock._vendor.poetry.config.config import Config
 
-try:
-    from html import unescape
-except ImportError:
-    try:
-        from html.parser import HTMLParser
-    except ImportError:
-        from HTMLParser import HTMLParser
 
-    unescape = HTMLParser().unescape
-
-
-try:
-    from urllib.parse import quote
-except ImportError:
-    from urllib import quote
-
-
-with warnings.catch_warnings():
-    warnings.simplefilter("ignore")
-    import html5lib
-
-
-class Page:
-
-    VERSION_REGEX = re.compile(r"(?i)([a-z0-9_\-.]+?)-(?=\d)([a-z0-9_.!+-]+)")
-    SUPPORTED_FORMATS = [
-        ".tar.gz",
-        ".whl",
-        ".zip",
-        ".tar.bz2",
-        ".tar.xz",
-        ".tar.Z",
-        ".tar",
-    ]
-
-    def __init__(self, url, content, headers):
-        if not url.endswith("/"):
-            url += "/"
-
-        self._url = url
-        encoding = None
-        if headers and "Content-Type" in headers:
-            content_type, params = cgi.parse_header(headers["Content-Type"])
-
-            if "charset" in params:
-                encoding = params["charset"]
-
-        self._content = content
-
-        if encoding is None:
-            self._parsed = html5lib.parse(content, namespaceHTMLElements=False)
-        else:
-            self._parsed = html5lib.parse(
-                content, transport_encoding=encoding, namespaceHTMLElements=False
-            )
-
-    @property
-    def versions(self):  # type: () -> Generator[Version]
-        seen = set()
-        for link in self.links:
-            version = self.link_version(link)
-
-            if not version:
-                continue
-
-            if version in seen:
-                continue
-
-            seen.add(version)
-
-            yield version
-
-    @property
-    def links(self):  # type: () -> Generator[Link]
-        for anchor in self._parsed.findall(".//a"):
-            if anchor.get("href"):
-                href = anchor.get("href")
-                url = self.clean_link(urlparse.urljoin(self._url, href))
-                pyrequire = anchor.get("data-requires-python")
-                pyrequire = unescape(pyrequire) if pyrequire else None
-
-                link = Link(url, self, requires_python=pyrequire)
-
-                if link.ext not in self.SUPPORTED_FORMATS:
-                    continue
-
-                yield link
-
-    def links_for_version(self, version):  # type: (Version) -> Generator[Link]
-        for link in self.links:
-            if self.link_version(link) == version:
-                yield link
-
-    def link_version(self, link):  # type: (Link) -> Union[Version, None]
-        m = wheel_file_re.match(link.filename)
-        if m:
-            version = m.group("ver")
-        else:
-            info, ext = link.splitext()
-            match = self.VERSION_REGEX.match(info)
-            if not match:
-                return
-
-            version = match.group(2)
-
-        try:
-            version = Version.parse(version)
-        except ValueError:
-            return
-
-        return version
-
-    _clean_re = re.compile(r"[^a-z0-9$&+,/:;=?@.#%_\\|-]", re.I)
-
-    def clean_link(self, url):
-        """Makes sure a link is fully encoded.  That is, if a ' ' shows up in
-        the link, it will be rewritten to %20 (while not over-quoting
-        % or other characters)."""
-        return self._clean_re.sub(lambda match: "%%%2x" % ord(match.group(0)), url)
-
-
-class LegacyRepository(PyPiRepository):
+class LegacyRepository(HTTPRepository):
     def __init__(
-        self, name, url, config=None, disable_cache=False, cert=None, client_cert=None
-    ):  # type: (str, str, Optional[Config], bool, Optional[Path], Optional[Path]) -> None
+        self,
+        name: str,
+        url: str,
+        config: Config | None = None,
+        disable_cache: bool = False,
+        pool_size: int = requests.adapters.DEFAULT_POOLSIZE,
+    ) -> None:
         if name == "pypi":
             raise ValueError("The name [pypi] is reserved for repositories")
 
-        self._packages = []
-        self._name = name
-        self._url = url.rstrip("/")
-        self._client_cert = client_cert
-        self._cert = cert
-        self._cache_dir = REPOSITORY_CACHE_DIR / name
-        self._cache = CacheManager(
-            {
-                "default": "releases",
-                "serializer": "json",
-                "stores": {
-                    "releases": {"driver": "file", "path": str(self._cache_dir)},
-                    "packages": {"driver": "dict"},
-                    "matches": {"driver": "dict"},
-                },
-            }
-        )
-
-        self._authenticator = Authenticator(
-            config=config or Config(use_environment=True)
-        )
-        self._basic_auth = None
-        username, password = self._authenticator.get_credentials_for_url(self._url)
-        if username is not None and password is not None:
-            self._basic_auth = requests.auth.HTTPBasicAuth(username, password)
-
-        self._disable_cache = disable_cache
-
-    @property
-    def cert(self):  # type: () -> Optional[Path]
-        return self._cert
-
-    @property
-    def client_cert(self):  # type: () -> Optional[Path]
-        return self._client_cert
-
-    @property
-    def session(self):
-        session = self._authenticator.session
-
-        if self._basic_auth:
-            session.auth = self._basic_auth
-
-        if self._cert:
-            session.verify = str(self._cert)
-
-        if self._client_cert:
-            session.cert = str(self._client_cert)
-
-        return CacheControl(session, cache=FileCache(str(self._cache_dir / "_http")))
+        super().__init__(name, url.rstrip("/"), config, disable_cache, pool_size)
 
     @property
-    def authenticated_url(self):  # type: () -> str
-        if not self._basic_auth:
-            return self.url
-
-        parsed = urlparse.urlparse(self.url)
-
-        return "{scheme}://{username}:{password}@{netloc}{path}".format(
-            scheme=parsed.scheme,
-            username=quote(self._basic_auth.username, safe=""),
-            password=quote(self._basic_auth.password, safe=""),
-            netloc=parsed.netloc,
-            path=parsed.path,
-        )
-
-    def find_packages(self, dependency):
-        packages = []
-
-        constraint = dependency.constraint
-        if constraint is None:
-            constraint = "*"
-
-        if not isinstance(constraint, VersionConstraint):
-            constraint = parse_constraint(constraint)
-
-        allow_prereleases = dependency.allows_prereleases()
-        if isinstance(constraint, VersionRange):
-            if (
-                constraint.max is not None
-                and constraint.max.is_prerelease()
-                or constraint.min is not None
-                and constraint.min.is_prerelease()
-            ):
-                allow_prereleases = True
-
-        key = dependency.name
-        if not constraint.is_any():
-            key = "{}:{}".format(key, str(constraint))
-
-        ignored_pre_release_versions = []
-
-        if self._cache.store("matches").has(key):
-            versions = self._cache.store("matches").get(key)
-        else:
-            page = self._get("/{}/".format(dependency.name.replace(".", "-")))
-            if page is None:
-                return []
-
-            versions = []
-            for version in page.versions:
-                if version.is_prerelease() and not allow_prereleases:
-                    if constraint.is_any():
-                        # we need this when all versions of the package are pre-releases
-                        ignored_pre_release_versions.append(version)
-                    continue
-
-                if constraint.allows(version):
-                    versions.append(version)
-
-            self._cache.store("matches").put(key, versions, 5)
-
-        for package_versions in (versions, ignored_pre_release_versions):
-            for version in package_versions:
-                package = Package(
-                    dependency.name,
-                    version,
-                    source_type="legacy",
-                    source_reference=self.name,
-                    source_url=self._url,
-                )
-
-                packages.append(package)
-
-            self._log(
-                "{} packages found for {} {}".format(
-                    len(packages), dependency.name, str(constraint)
-                ),
-                level="debug",
-            )
-
-            if packages or not constraint.is_any():
-                # we have matching packages, or constraint is not (*)
-                break
-
-        return packages
-
-    def package(self, name, version, extras=None):  # type: (...) -> Package
+    def packages(self) -> list[Package]:
+        # LegacyRepository._packages is not populated and other implementations
+        # implicitly rely on this (e.g. Pool.search via
+        # LegacyRepository.search). To avoid special-casing Pool or changing
+        # behavior, we stub and return an empty list.
+        #
+        # TODO: Rethinking search behaviour and design.
+        # Ref: https://github.com/python-poetry/poetry/issues/2446 and
+        # https://github.com/python-poetry/poetry/pull/6669#discussion_r990874908.
+        return []
+
+    def package(
+        self, name: str, version: Version, extras: list[str] | None = None
+    ) -> Package:
         """
         Retrieve the release information.
 
@@ -316,95 +63,79 @@ def package(self, name, version, extras=None):  # type: (...) -> Package
         should be much faster.
         """
         try:
-            index = self._packages.index(Package(name, version, version))
+            index = self._packages.index(Package(name, version))
 
             return self._packages[index]
         except ValueError:
-            package = super(LegacyRepository, self).package(name, version, extras)
+            package = super().package(name, version, extras)
             package._source_type = "legacy"
             package._source_url = self._url
             package._source_reference = self.name
 
             return package
 
-    def find_links_for_package(self, package):
-        page = self._get("/{}/".format(package.name.replace(".", "-")))
-        if page is None:
+    def find_links_for_package(self, package: Package) -> list[Link]:
+        try:
+            page = self.get_page(package.name)
+        except PackageNotFound:
             return []
 
-        return list(page.links_for_version(package.version))
-
-    def _get_release_info(self, name, version):  # type: (str, str) -> dict
-        page = self._get("/{}/".format(canonicalize_name(name).replace(".", "-")))
-        if page is None:
-            raise PackageNotFound('No package named "{}"'.format(name))
-
-        data = PackageInfo(
-            name=name,
-            version=version,
-            summary="",
-            platform=None,
-            requires_dist=[],
-            requires_python=None,
-            files=[],
-            cache_version=str(self.CACHE_VERSION),
-        )
-
-        links = list(page.links_for_version(Version.parse(version)))
-        if not links:
-            raise PackageNotFound(
-                'No valid distribution links found for package: "{}" version: "{}"'.format(
-                    name, version
-                )
-            )
-        urls = defaultdict(list)
-        files = []
-        for link in links:
-            if link.is_wheel:
-                urls["bdist_wheel"].append(link.url)
-            elif link.filename.endswith(
-                (".tar.gz", ".zip", ".bz2", ".xz", ".Z", ".tar")
-            ):
-                urls["sdist"].append(link.url)
-
-            h = link.hash
-            if h:
-                h = link.hash_name + ":" + link.hash
-                files.append({"file": link.filename, "hash": h})
+        return list(page.links_for_version(package.name, package.version))
 
-        data.files = files
-
-        info = self._get_info_from_urls(urls)
-
-        data.summary = info.summary
-        data.requires_dist = info.requires_dist
-        data.requires_python = info.requires_python
-
-        return data.asdict()
-
-    def _get(self, endpoint):  # type: (str) -> Union[Page, None]
-        url = self._url + endpoint
+    def _find_packages(
+        self, name: NormalizedName, constraint: VersionConstraint
+    ) -> list[Package]:
+        """
+        Find packages on the remote server.
+        """
         try:
-            response = self.session.get(url)
-            if response.status_code == 404:
-                return
-            response.raise_for_status()
-        except requests.HTTPError as e:
-            raise RepositoryError(e)
-
-        if response.status_code in (401, 403):
-            self._log(
-                "Authorization error accessing {url}".format(url=response.url),
-                level="warn",
-            )
-            return
+            page = self.get_page(name)
+        except PackageNotFound:
+            self._log(f"No packages found for {name}", level="debug")
+            return []
 
-        if response.url != url:
-            self._log(
-                "Response URL {response_url} differs from request URL {url}".format(
-                    response_url=response.url, url=url
-                ),
-                level="debug",
+        versions = [
+            (version, page.yanked(name, version))
+            for version in page.versions(name)
+            if constraint.allows(version)
+        ]
+
+        return [
+            Package(
+                name,
+                version,
+                source_type="legacy",
+                source_reference=self.name,
+                source_url=self._url,
+                yanked=yanked,
             )
+            for version, yanked in versions
+        ]
+
+    def _get_release_info(
+        self, name: NormalizedName, version: Version
+    ) -> dict[str, Any]:
+        page = self.get_page(name)
+
+        links = list(page.links_for_version(name, version))
+        yanked = page.yanked(name, version)
+
+        return self._links_to_data(
+            links,
+            PackageInfo(
+                name=name,
+                version=version.text,
+                summary="",
+                requires_dist=[],
+                requires_python=None,
+                files=[],
+                yanked=yanked,
+                cache_version=str(self.CACHE_VERSION),
+            ),
+        )
 
-        return Page(response.url, response.content, response.headers)
+    def _get_page(self, name: NormalizedName) -> SimpleRepositoryPage:
+        response = self._get_response(f"/{name}/")
+        if not response:
+            raise PackageNotFound(f"Package [{name}] not found.")
+        return SimpleRepositoryPage(response.url, response.text)
diff --git a/conda_lock/_vendor/poetry/repositories/link_sources/__init__.py b/conda_lock/_vendor/poetry/repositories/link_sources/__init__.py
new file mode 100644
index 00000000..e69de29b
diff --git a/conda_lock/_vendor/poetry/repositories/link_sources/base.py b/conda_lock/_vendor/poetry/repositories/link_sources/base.py
new file mode 100644
index 00000000..2b3d0757
--- /dev/null
+++ b/conda_lock/_vendor/poetry/repositories/link_sources/base.py
@@ -0,0 +1,127 @@
+from __future__ import annotations
+
+import logging
+import re
+
+from functools import cached_property
+from typing import TYPE_CHECKING
+from typing import ClassVar
+from typing import DefaultDict
+from typing import List
+
+from conda_lock._vendor.poetry.core.constraints.version import Version
+from conda_lock._vendor.poetry.core.packages.package import Package
+from conda_lock._vendor.poetry.core.version.exceptions import InvalidVersion
+
+from conda_lock._vendor.poetry.utils.patterns import sdist_file_re
+from conda_lock._vendor.poetry.utils.patterns import wheel_file_re
+
+
+if TYPE_CHECKING:
+    from collections.abc import Iterator
+
+    from packaging.utils import NormalizedName
+    from conda_lock._vendor.poetry.core.packages.utils.link import Link
+
+    LinkCache = DefaultDict[NormalizedName, DefaultDict[Version, List[Link]]]
+
+
+logger = logging.getLogger(__name__)
+
+
+class LinkSource:
+    VERSION_REGEX = re.compile(r"(?i)([a-z0-9_\-.]+?)-(?=\d)([a-z0-9_.!+-]+)")
+    CLEAN_REGEX = re.compile(r"[^a-z0-9$&+,/:;=?@.#%_\\|-]", re.I)
+    SUPPORTED_FORMATS: ClassVar[list[str]] = [
+        ".tar.gz",
+        ".whl",
+        ".zip",
+        ".tar.bz2",
+        ".tar.xz",
+        ".tar.Z",
+        ".tar",
+    ]
+
+    def __init__(self, url: str) -> None:
+        self._url = url
+
+    @property
+    def url(self) -> str:
+        return self._url
+
+    def versions(self, name: NormalizedName) -> Iterator[Version]:
+        yield from self._link_cache[name]
+
+    @property
+    def packages(self) -> Iterator[Package]:
+        for link in self.links:
+            pkg = self.link_package_data(link)
+
+            if pkg:
+                yield pkg
+
+    @property
+    def links(self) -> Iterator[Link]:
+        for links_per_version in self._link_cache.values():
+            for links in links_per_version.values():
+                yield from links
+
+    @classmethod
+    def link_package_data(cls, link: Link) -> Package | None:
+        name: str | None = None
+        version_string: str | None = None
+        version: Version | None = None
+        m = wheel_file_re.match(link.filename) or sdist_file_re.match(link.filename)
+
+        if m:
+            name = m.group("name")
+            version_string = m.group("ver")
+        else:
+            info, ext = link.splitext()
+            match = cls.VERSION_REGEX.match(info)
+            if match:
+                name = match.group(1)
+                version_string = match.group(2)
+
+        if version_string:
+            try:
+                version = Version.parse(version_string)
+            except InvalidVersion:
+                logger.debug(
+                    "Skipping url (%s) due to invalid version (%s)", link.url, version
+                )
+                return None
+
+        pkg = None
+        if name and version:
+            pkg = Package(name, version, source_url=link.url)
+        return pkg
+
+    def links_for_version(
+        self, name: NormalizedName, version: Version
+    ) -> Iterator[Link]:
+        yield from self._link_cache[name][version]
+
+    def clean_link(self, url: str) -> str:
+        """Makes sure a link is fully encoded.  That is, if a ' ' shows up in
+        the link, it will be rewritten to %20 (while not over-quoting
+        % or other characters)."""
+        return self.CLEAN_REGEX.sub(lambda match: f"%{ord(match.group(0)):02x}", url)
+
+    def yanked(self, name: NormalizedName, version: Version) -> str | bool:
+        reasons = set()
+        for link in self.links_for_version(name, version):
+            if link.yanked:
+                if link.yanked_reason:
+                    reasons.add(link.yanked_reason)
+            else:
+                # release is not yanked if at least one file is not yanked
+                return False
+        # if all files are yanked (or there are no files) the release is yanked
+        if reasons:
+            return "\n".join(sorted(reasons))
+        return True
+
+    @cached_property
+    def _link_cache(self) -> LinkCache:
+        raise NotImplementedError()
diff --git a/conda_lock/_vendor/poetry/repositories/link_sources/html.py b/conda_lock/_vendor/poetry/repositories/link_sources/html.py
new file mode 100644
index 00000000..4af97ad5
--- /dev/null
+++ b/conda_lock/_vendor/poetry/repositories/link_sources/html.py
@@ -0,0 +1,75 @@
+from __future__ import annotations
+
+import urllib.parse
+
+from collections import defaultdict
+from functools import cached_property
+from html import unescape
+from typing import TYPE_CHECKING
+
+from conda_lock._vendor.poetry.core.packages.utils.link import Link
+
+from conda_lock._vendor.poetry.repositories.link_sources.base import LinkSource
+from conda_lock._vendor.poetry.repositories.parsers.html_page_parser import HTMLPageParser
+
+
+if TYPE_CHECKING:
+    from conda_lock._vendor.poetry.repositories.link_sources.base import LinkCache
+
+
+class HTMLPage(LinkSource):
+    def __init__(self, url: str, content: str) -> None:
+        super().__init__(url=url)
+
+        parser = HTMLPageParser()
+        parser.feed(content)
+        self._parsed = parser.anchors
+        self._base_url: str | None = parser.base_url
+
+    @cached_property
+    def _link_cache(self) -> LinkCache:
+        links: LinkCache = defaultdict(lambda: defaultdict(list))
+        for anchor in self._parsed:
+            if href := anchor.get("href"):
+                url = self.clean_link(
+                    urllib.parse.urljoin(self._base_url or self._url, href)
+                )
+                pyrequire = anchor.get("data-requires-python")
+                pyrequire = unescape(pyrequire) if pyrequire else None
+                yanked_value = anchor.get("data-yanked")
+                yanked: str | bool
+                if yanked_value:
+                    yanked = unescape(yanked_value)
+                else:
+                    yanked = "data-yanked" in anchor
+
+                # see https://peps.python.org/pep-0714/#clients
+                # and https://peps.python.org/pep-0658/#specification
+                metadata: str | bool
+                for metadata_key in ("data-core-metadata", "data-dist-info-metadata"):
+                    metadata_value = anchor.get(metadata_key)
+                    if metadata_value:
+                        metadata = unescape(metadata_value)
+                    else:
+                        metadata = metadata_key in anchor
+                    if metadata:
+                        break
+                link = Link(
+                    url, requires_python=pyrequire, yanked=yanked, metadata=metadata
+                )
+
+                if link.ext not in self.SUPPORTED_FORMATS:
+                    continue
+
+                pkg = self.link_package_data(link)
+                if pkg:
+                    links[pkg.name][pkg.version].append(link)
+
+        return links
+
+
+class SimpleRepositoryPage(HTMLPage):
+    def __init__(self, url: str, content: str) -> None:
+        if not url.endswith("/"):
+            url += "/"
+        super().__init__(url=url, content=content)
diff --git a/conda_lock/_vendor/poetry/repositories/link_sources/json.py b/conda_lock/_vendor/poetry/repositories/link_sources/json.py
new file mode 100644
index 00000000..d7c50be1
--- /dev/null
+++ b/conda_lock/_vendor/poetry/repositories/link_sources/json.py
@@ -0,0 +1,55 @@
+from __future__ import annotations
+
+from collections import defaultdict
+from functools import cached_property
+from typing import TYPE_CHECKING
+from typing import Any
+
+from conda_lock._vendor.poetry.core.packages.utils.link import Link
+
+from conda_lock._vendor.poetry.repositories.link_sources.base import LinkSource
+
+
+if TYPE_CHECKING:
+    from conda_lock._vendor.poetry.repositories.link_sources.base import LinkCache
+
+
+class SimpleJsonPage(LinkSource):
+    """Links as returned by PEP 691 compatible JSON-based Simple API."""
+
+    def __init__(self, url: str, content: dict[str, Any]) -> None:
+        super().__init__(url=url)
+        self.content = content
+
+    @cached_property
+    def _link_cache(self) -> LinkCache:
+        links: LinkCache = defaultdict(lambda: defaultdict(list))
+        for file in self.content["files"]:
+            url = file["url"]
+            requires_python = file.get("requires-python")
+            yanked = file.get("yanked", False)
+
+            # see https://peps.python.org/pep-0714/#clients
+            # and https://peps.python.org/pep-0691/#project-detail
+            metadata: dict[str, str] | bool = False
+            for metadata_key in ("core-metadata", "dist-info-metadata"):
+                if metadata_key in file:
+                    metadata_value = file[metadata_key]
+                    if metadata_value and isinstance(metadata_value, dict):
+                        metadata = metadata_value
+                    else:
+                        metadata = bool(metadata_value)
+                    break
+
+            link = Link(
+                url, requires_python=requires_python, yanked=yanked, metadata=metadata
+            )
+
+            if link.ext not in self.SUPPORTED_FORMATS:
+                continue
+
+            pkg = self.link_package_data(link)
+            if pkg:
+                links[pkg.name][pkg.version].append(link)
+
+        return links
diff --git a/conda_lock/_vendor/poetry/repositories/lockfile_repository.py b/conda_lock/_vendor/poetry/repositories/lockfile_repository.py
new file mode 100644
index 00000000..bc192169
--- /dev/null
+++ b/conda_lock/_vendor/poetry/repositories/lockfile_repository.py
@@ -0,0 +1,32 @@
+from __future__ import annotations
+
+from typing import TYPE_CHECKING
+
+from conda_lock._vendor.poetry.repositories import Repository
+
+
+if TYPE_CHECKING:
+    from conda_lock._vendor.poetry.core.packages.package import Package
+
+
+class LockfileRepository(Repository):
+    """
+    Special repository that distinguishes packages not only by name and version,
+    but also by source type, url, etc.
+    """
+
+    def __init__(self) -> None:
+        super().__init__("poetry-lockfile")
+
+    def has_package(self, package: Package) -> bool:
+        return any(p == package for p in self.packages)
+
+    def remove_package(self, package: Package) -> None:
+        index = None
+        for i, repo_package in enumerate(self.packages):
+            if repo_package == package:
+                index = i
+                break
+
+        if index is not None:
+            del self._packages[index]
diff --git a/conda_lock/_vendor/poetry/repositories/parsers/__init__.py b/conda_lock/_vendor/poetry/repositories/parsers/__init__.py
new file mode 100644
index 00000000..e69de29b
diff --git a/conda_lock/_vendor/poetry/repositories/parsers/html_page_parser.py b/conda_lock/_vendor/poetry/repositories/parsers/html_page_parser.py
new file mode 100644
index 00000000..f569ca5f
--- /dev/null
+++ b/conda_lock/_vendor/poetry/repositories/parsers/html_page_parser.py
@@ -0,0 +1,18 @@
+from __future__ import annotations
+
+from html.parser import HTMLParser
+
+
+class HTMLPageParser(HTMLParser):
+    def __init__(self) -> None:
+        super().__init__()
+        self.base_url: str | None = None
+        self.anchors: list[dict[str, str | None]] = []
+
+    def handle_starttag(self, tag: str, attrs: list[tuple[str, str | None]]) -> None:
+        if tag == "base" and self.base_url is None:
+            base_url = dict(attrs).get("href")
+            if base_url is not None:
+                self.base_url = base_url
+        elif tag == "a":
+            self.anchors.append(dict(attrs))
diff --git a/conda_lock/_vendor/poetry/repositories/parsers/pypi_search_parser.py b/conda_lock/_vendor/poetry/repositories/parsers/pypi_search_parser.py
new file mode 100644
index 00000000..1c52c418
--- /dev/null
+++ b/conda_lock/_vendor/poetry/repositories/parsers/pypi_search_parser.py
@@ -0,0 +1,70 @@
+from __future__ import annotations
+
+import functools
+
+from dataclasses import dataclass
+from html.parser import HTMLParser
+from typing import Callable
+
+
+# The following code was originally written for PDM project
+# https://github.com/pdm-project/pdm/blob/1f4f48a35cdded064def85df117bebf713f7c17a/src/pdm/models/search.py
+# and later changed to fit Poetry needs
+
+
+@dataclass
+class Result:
+    name: str = ""
+    version: str = ""
+    description: str = ""
+
+
+class SearchResultParser(HTMLParser):
+    """A simple HTML parser for pypi.org search results."""
+
+    def __init__(self) -> None:
+        super().__init__()
+        self.results: list[Result] = []
+        self._current: Result | None = None
+        self._nest_anchors = 0
+        self._data_callback: Callable[[str], None] | None = None
+
+    @staticmethod
+    def _match_class(attrs: list[tuple[str, str | None]], name: str) -> bool:
+        attrs_map = dict(attrs)
+        return name in (attrs_map.get("class") or "").split()
+
+    def handle_starttag(self, tag: str, attrs: list[tuple[str, str | None]]) -> None:
+        if not self._current:
+            if tag == "a" and self._match_class(attrs, "package-snippet"):
+                self._current = Result()
+                self._nest_anchors = 1
+        else:
+            if tag == "span" and self._match_class(attrs, "package-snippet__name"):
+                self._data_callback = functools.partial(setattr, self._current, "name")
+            elif tag == "span" and self._match_class(attrs, "package-snippet__version"):
+                self._data_callback = functools.partial(
+                    setattr, self._current, "version"
+                )
+            elif tag == "p" and self._match_class(
+                attrs, "package-snippet__description"
+            ):
+                self._data_callback = functools.partial(
+                    setattr, self._current, "description"
+                )
+            elif tag == "a":
+                self._nest_anchors += 1
+
+    def handle_data(self, data: str) -> None:
+        if self._data_callback is not None:
+            self._data_callback(data)
+            self._data_callback = None
+
+    def handle_endtag(self, tag: str) -> None:
+        if tag != "a" or self._current is None:
+            return
+        self._nest_anchors -= 1
+        if self._nest_anchors == 0:
+            if self._current.name and self._current.version:
+                self.results.append(self._current)
+            self._current = None
diff --git a/conda_lock/_vendor/poetry/repositories/pool.py b/conda_lock/_vendor/poetry/repositories/pool.py
deleted file mode 100755
index 6f5c64a1..00000000
--- a/conda_lock/_vendor/poetry/repositories/pool.py
+++ /dev/null
@@ -1,184 +0,0 @@
-from typing import TYPE_CHECKING
-from typing import Dict
-from typing import List
-from typing import Optional
-
-from .base_repository import BaseRepository
-from .exceptions import PackageNotFound
-from .repository import Repository
-
-
-if TYPE_CHECKING:
-    from conda_lock._vendor.poetry.core.packages import Package
-
-
-class Pool(BaseRepository):
-    def __init__(
-        self, repositories=None, ignore_repository_names=False
-    ):  # type: (Optional[List[Repository]], bool) -> None
-        if repositories is None:
-            repositories = []
-
-        self._lookup = {}  # type: Dict[str, int]
-        self._repositories = []  # type: List[Repository]
-        self._default = False
-        self._has_primary_repositories = False
-        self._secondary_start_idx = None
-
-        for repository in repositories:
-            self.add_repository(repository)
-
-        self._ignore_repository_names = ignore_repository_names
-
-        super(Pool, self).__init__()
-
-    @property
-    def repositories(self):  # type: () -> List[Repository]
-        return self._repositories
-
-    def has_default(self):  # type: () -> bool
-        return self._default
-
-    def has_primary_repositories(self):  # type: () -> bool
-        return self._has_primary_repositories
-
-    def has_repository(self, name):  # type: (str) -> bool
-        name = name.lower() if name is not None else None
-
-        return name in self._lookup
-
-    def repository(self, name):  # type: (str) -> Repository
-        if name is not None:
-            name = name.lower()
-
-        if name in self._lookup:
-            return self._repositories[self._lookup[name]]
-
-        raise ValueError('Repository "{}" does not exist.'.format(name))
-
-    def add_repository(
-        self, repository, default=False, secondary=False
-    ):  # type: (Repository, bool, bool) -> Pool
-        """
-        Adds a repository to the pool.
-        """
-        repository_name = (
-            repository.name.lower() if repository.name is not None else None
-        )
-        if default:
-            if self.has_default():
-                raise ValueError("Only one repository can be the default")
-
-            self._default = True
-            self._repositories.insert(0, repository)
-            for name in self._lookup:
-                self._lookup[name] += 1
-
-            if self._secondary_start_idx is not None:
-                self._secondary_start_idx += 1
-
-            self._lookup[repository_name] = 0
-        elif secondary:
-            if self._secondary_start_idx is None:
-                self._secondary_start_idx = len(self._repositories)
-
-            self._repositories.append(repository)
-            self._lookup[repository_name] = len(self._repositories) - 1
-        else:
-            self._has_primary_repositories = True
-            if self._secondary_start_idx is None:
-                self._repositories.append(repository)
-                self._lookup[repository_name] = len(self._repositories) - 1
-            else:
-                self._repositories.insert(self._secondary_start_idx, repository)
-
-                for name, idx in self._lookup.items():
-                    if idx < self._secondary_start_idx:
-                        continue
-
-                    self._lookup[name] += 1
-
-                self._lookup[repository_name] = self._secondary_start_idx
-                self._secondary_start_idx += 1
-
-        return self
-
-    def remove_repository(self, repository_name):  # type: (str) -> Pool
-        if repository_name is not None:
-            repository_name = repository_name.lower()
-
-        idx = self._lookup.get(repository_name)
-        if idx is not None:
-            del self._repositories[idx]
-
-        return self
-
-    def has_package(self, package):
-        raise NotImplementedError()
-
-    def package(
-        self, name, version, extras=None, repository=None
-    ):  # type: (str, str, List[str], str) -> Package
-        if repository is not None:
-            repository = repository.lower()
-
-        if (
-            repository is not None
-            and repository not in self._lookup
-            and not self._ignore_repository_names
-        ):
-            raise ValueError('Repository "{}" does not exist.'.format(repository))
-
-        if repository is not None and not self._ignore_repository_names:
-            try:
-                return self.repository(repository).package(name, version, extras=extras)
-            except PackageNotFound:
-                pass
-        else:
-            for idx, repo in enumerate(self._repositories):
-                try:
-                    package = repo.package(name, version, extras=extras)
-                except PackageNotFound:
-                    continue
-
-                if package:
-                    self._packages.append(package)
-
-                    return package
-
-        raise PackageNotFound("Package {} ({}) not found.".format(name, version))
-
-    def find_packages(
-        self, dependency,
-    ):
-        repository = dependency.source_name
-        if repository is not None:
-            repository = repository.lower()
-
-        if (
-            repository is not None
-            and repository not in self._lookup
-            and not self._ignore_repository_names
-        ):
-            raise ValueError('Repository "{}" does not exist.'.format(repository))
-
-        if repository is not None and not self._ignore_repository_names:
-            return self.repository(repository).find_packages(dependency)
-
-        packages = []
-        for repo in self._repositories:
-            packages += repo.find_packages(dependency)
-
-        return packages
-
-    def search(self, query):
-        from .legacy_repository import LegacyRepository
-
-        results = []
-        for repository in self._repositories:
-            if isinstance(repository, LegacyRepository):
-                continue
-
-            results += repository.search(query)
-
-        return results
diff --git a/conda_lock/_vendor/poetry/repositories/pypi_repository.py b/conda_lock/_vendor/poetry/repositories/pypi_repository.py
old mode 100755
new mode 100644
index 2eb42b47..d78e968c
--- a/conda_lock/_vendor/poetry/repositories/pypi_repository.py
+++ b/conda_lock/_vendor/poetry/repositories/pypi_repository.py
@@ -1,260 +1,138 @@
+from __future__ import annotations
+
 import logging
-import os
 
-from collections import defaultdict
-from typing import Dict
-from typing import List
-from typing import Union
+from typing import TYPE_CHECKING
+from typing import Any
 
 import requests
+import requests.adapters
 
-from cachecontrol import CacheControl
-from cachecontrol.caches.file_cache import FileCache
 from cachecontrol.controller import logger as cache_control_logger
-from cachy import CacheManager
-from html5lib.html5parser import parse
-
-from conda_lock._vendor.poetry.core.packages import Dependency
-from conda_lock._vendor.poetry.core.packages import Package
-from conda_lock._vendor.poetry.core.packages import dependency_from_pep_508
+from conda_lock._vendor.poetry.core.packages.package import Package
 from conda_lock._vendor.poetry.core.packages.utils.link import Link
-from conda_lock._vendor.poetry.core.semver import VersionConstraint
-from conda_lock._vendor.poetry.core.semver import VersionRange
-from conda_lock._vendor.poetry.core.semver import parse_constraint
-from conda_lock._vendor.poetry.core.semver.exceptions import ParseVersionError
-from conda_lock._vendor.poetry.core.version.markers import parse_marker
-from conda_lock._vendor.poetry.locations import REPOSITORY_CACHE_DIR
-from conda_lock._vendor.poetry.utils._compat import Path
-from conda_lock._vendor.poetry.utils._compat import to_str
-from conda_lock._vendor.poetry.utils.helpers import download_file
-from conda_lock._vendor.poetry.utils.helpers import temporary_directory
-from conda_lock._vendor.poetry.utils.patterns import wheel_file_re
-
-from ..inspection.info import PackageInfo
-from .exceptions import PackageNotFound
-from .remote_repository import RemoteRepository
+from conda_lock._vendor.poetry.core.version.exceptions import InvalidVersion
 
-
-try:
-    import urllib.parse as urlparse
-except ImportError:
-    import urlparse
+from conda_lock._vendor.poetry.repositories.exceptions import PackageNotFound
+from conda_lock._vendor.poetry.repositories.http_repository import HTTPRepository
+from conda_lock._vendor.poetry.repositories.link_sources.json import SimpleJsonPage
+from conda_lock._vendor.poetry.repositories.parsers.pypi_search_parser import SearchResultParser
+from conda_lock._vendor.poetry.utils.constants import REQUESTS_TIMEOUT
 
 
 cache_control_logger.setLevel(logging.ERROR)
 
 logger = logging.getLogger(__name__)
 
+if TYPE_CHECKING:
+    from packaging.utils import NormalizedName
+    from conda_lock._vendor.poetry.core.constraints.version import Version
+    from conda_lock._vendor.poetry.core.constraints.version import VersionConstraint
 
-class PyPiRepository(RemoteRepository):
+SUPPORTED_PACKAGE_TYPES = {"sdist", "bdist_wheel"}
 
-    CACHE_VERSION = parse_constraint("1.0.0")
 
-    def __init__(self, url="https://pypi.org/", disable_cache=False, fallback=True):
-        super(PyPiRepository, self).__init__(url.rstrip("/") + "/simple/")
+class PyPiRepository(HTTPRepository):
+    def __init__(
+        self,
+        url: str = "https://pypi.org/",
+        disable_cache: bool = False,
+        fallback: bool = True,
+        pool_size: int = requests.adapters.DEFAULT_POOLSIZE,
+    ) -> None:
+        super().__init__(
+            "PyPI",
+            url.rstrip("/") + "/simple/",
+            disable_cache=disable_cache,
+            pool_size=pool_size,
+        )
 
         self._base_url = url
-        self._disable_cache = disable_cache
         self._fallback = fallback
 
-        release_cache_dir = REPOSITORY_CACHE_DIR / "pypi"
-        self._cache = CacheManager(
-            {
-                "default": "releases",
-                "serializer": "json",
-                "stores": {
-                    "releases": {"driver": "file", "path": str(release_cache_dir)},
-                    "packages": {"driver": "dict"},
-                },
-            }
-        )
-
-        self._cache_control_cache = FileCache(str(release_cache_dir / "_http"))
-        self._name = "PyPI"
-
-    @property
-    def session(self):
-        return CacheControl(requests.session(), cache=self._cache_control_cache)
-
-    def find_packages(self, dependency):  # type: (Dependency) -> List[Package]
-        """
-        Find packages on the remote server.
-        """
-        constraint = dependency.constraint
-        if constraint is None:
-            constraint = "*"
-
-        if not isinstance(constraint, VersionConstraint):
-            constraint = parse_constraint(constraint)
-
-        allow_prereleases = dependency.allows_prereleases()
-        if isinstance(constraint, VersionRange):
-            if (
-                constraint.max is not None
-                and constraint.max.is_prerelease()
-                or constraint.min is not None
-                and constraint.min.is_prerelease()
-            ):
-                allow_prereleases = True
-
-        try:
-            info = self.get_package_info(dependency.name)
-        except PackageNotFound:
-            self._log(
-                "No packages found for {} {}".format(dependency.name, str(constraint)),
-                level="debug",
-            )
-            return []
-
-        packages = []
-        ignored_pre_release_packages = []
-
-        for version, release in info["releases"].items():
-            if not release:
-                # Bad release
-                self._log(
-                    "No release information found for {}-{}, skipping".format(
-                        dependency.name, version
-                    ),
-                    level="debug",
-                )
-                continue
-
-            try:
-                package = Package(info["info"]["name"], version)
-            except ParseVersionError:
-                self._log(
-                    'Unable to parse version "{}" for the {} package, skipping'.format(
-                        version, dependency.name
-                    ),
-                    level="debug",
-                )
-                continue
-
-            if package.is_prerelease() and not allow_prereleases:
-                if constraint.is_any():
-                    # we need this when all versions of the package are pre-releases
-                    ignored_pre_release_packages.append(package)
-                continue
-
-            if not constraint or (constraint and constraint.allows(package.version)):
-                packages.append(package)
-
-        self._log(
-            "{} packages found for {} {}".format(
-                len(packages), dependency.name, str(constraint)
-            ),
-            level="debug",
-        )
-
-        return packages or ignored_pre_release_packages
-
-    def package(
-        self,
-        name,  # type: str
-        version,  # type: str
-        extras=None,  # type: (Union[list, None])
-    ):  # type: (...) -> Package
-        return self.get_release_info(name, version).to_package(name=name, extras=extras)
-
-    def search(self, query):
+    def search(self, query: str) -> list[Package]:
         results = []
 
-        search = {"q": query}
-
-        response = requests.session().get(self._base_url + "search", params=search)
-        content = parse(response.content, namespaceHTMLElements=False)
-        for result in content.findall(".//*[@class='package-snippet']"):
-            name = result.find("h3/*[@class='package-snippet__name']").text
-            version = result.find("h3/*[@class='package-snippet__version']").text
-
-            if not name or not version:
-                continue
-
-            description = result.find("p[@class='package-snippet__description']").text
-            if not description:
-                description = ""
+        response = requests.get(
+            self._base_url + "search", params={"q": query}, timeout=REQUESTS_TIMEOUT
+        )
+        parser = SearchResultParser()
+        parser.feed(response.text)
 
+        for result in parser.results:
             try:
-                result = Package(name, version, description)
-                result.description = to_str(description.strip())
-                results.append(result)
-            except ParseVersionError:
+                package = Package(result.name, result.version)
+                package.description = result.description.strip()
+                results.append(package)
+            except InvalidVersion:
                 self._log(
-                    'Unable to parse version "{}" for the {} package, skipping'.format(
-                        version, name
-                    ),
+                    f'Unable to parse version "{result.version}" for the'
+                    f" {result.name} package, skipping",
                     level="debug",
                 )
 
         return results
 
-    def get_package_info(self, name):  # type: (str) -> dict
+    def get_package_info(self, name: NormalizedName) -> dict[str, Any]:
         """
         Return the package information given its name.
 
         The information is returned from the cache if it exists
         or retrieved from the remote server.
         """
-        if self._disable_cache:
-            return self._get_package_info(name)
-
-        return self._cache.store("packages").remember_forever(
-            name, lambda: self._get_package_info(name)
-        )
-
-    def _get_package_info(self, name):  # type: (str) -> dict
-        data = self._get("pypi/{}/json".format(name))
-        if data is None:
-            raise PackageNotFound("Package [{}] not found.".format(name))
-
-        return data
+        return self._get_package_info(name)
 
-    def get_release_info(self, name, version):  # type: (str, str) -> PackageInfo
+    def _find_packages(
+        self, name: NormalizedName, constraint: VersionConstraint
+    ) -> list[Package]:
         """
-        Return the release information given a package name and a version.
-
-        The information is returned from the cache if it exists
-        or retrieved from the remote server.
+        Find packages on the remote server.
         """
-        if self._disable_cache:
-            return PackageInfo.load(self._get_release_info(name, version))
+        try:
+            json_page = self.get_page(name)
+        except PackageNotFound:
+            self._log(f"No packages found for {name}", level="debug")
+            return []
 
-        cached = self._cache.remember_forever(
-            "{}:{}".format(name, version), lambda: self._get_release_info(name, version)
-        )
+        versions = [
+            (version, json_page.yanked(name, version))
+            for version in json_page.versions(name)
+            if constraint.allows(version)
+        ]
 
-        cache_version = cached.get("_cache_version", "0.0.0")
-        if parse_constraint(cache_version) != self.CACHE_VERSION:
-            # The cache must be updated
-            self._log(
-                "The cache for {} {} is outdated. Refreshing.".format(name, version),
-                level="debug",
-            )
-            cached = self._get_release_info(name, version)
+        return [Package(name, version, yanked=yanked) for version, yanked in versions]
 
-            self._cache.forever("{}:{}".format(name, version), cached)
+    def _get_package_info(self, name: NormalizedName) -> dict[str, Any]:
+        headers = {"Accept": "application/vnd.pypi.simple.v1+json"}
+        info = self._get(f"simple/{name}/", headers=headers)
+        if info is None:
+            raise PackageNotFound(f"Package [{name}] not found.")
 
-        return PackageInfo.load(cached)
+        return info
 
-    def find_links_for_package(self, package):
-        json_data = self._get("pypi/{}/{}/json".format(package.name, package.version))
+    def find_links_for_package(self, package: Package) -> list[Link]:
+        json_data = self._get(f"pypi/{package.name}/{package.version}/json")
         if json_data is None:
             return []
 
         links = []
         for url in json_data["urls"]:
-            h = "sha256={}".format(url["digests"]["sha256"])
-            links.append(Link(url["url"] + "#" + h))
+            if url["packagetype"] in SUPPORTED_PACKAGE_TYPES:
+                h = f"sha256={url['digests']['sha256']}"
+                links.append(Link(url["url"] + "#" + h, yanked=self._get_yanked(url)))
 
         return links
 
-    def _get_release_info(self, name, version):  # type: (str, str) -> dict
-        self._log("Getting info for {} ({}) from PyPI".format(name, version), "debug")
+    def _get_release_info(
+        self, name: NormalizedName, version: Version
+    ) -> dict[str, Any]:
+        from conda_lock._vendor.poetry.inspection.info import PackageInfo
+
+        self._log(f"Getting info for {name} ({version}) from PyPI", "debug")
 
-        json_data = self._get("pypi/{}/{}/json".format(name, version))
+        json_data = self._get(f"pypi/{name}/{version}/json")
         if json_data is None:
-            raise PackageNotFound("Package [{}] not found.".format(name))
+            raise PackageNotFound(f"Package [{name}] not found.")
 
         info = json_data["info"]
 
@@ -262,10 +140,9 @@ def _get_release_info(self, name, version):  # type: (str, str) -> dict
             name=info["name"],
             version=info["version"],
             summary=info["summary"],
-            platform=info["platform"],
             requires_dist=info["requires_dist"],
             requires_python=info["requires_python"],
-            files=info.get("files", []),
+            yanked=self._get_yanked(info),
             cache_version=str(self.CACHE_VERSION),
         )
 
@@ -274,34 +151,30 @@ def _get_release_info(self, name, version):  # type: (str, str) -> dict
         except KeyError:
             version_info = []
 
+        files = info.get("files", [])
         for file_info in version_info:
-            data.files.append(
-                {
-                    "file": file_info["filename"],
-                    "hash": "sha256:" + file_info["digests"]["sha256"],
-                }
-            )
+            if file_info["packagetype"] in SUPPORTED_PACKAGE_TYPES:
+                files.append(
+                    {
+                        "file": file_info["filename"],
+                        "hash": "sha256:" + file_info["digests"]["sha256"],
+                    }
+                )
+        data.files = files
 
         if self._fallback and data.requires_dist is None:
-            self._log("No dependencies found, downloading archives", level="debug")
+            self._log(
+                "No dependencies found, downloading metadata and/or archives",
+                level="debug",
+            )
             # No dependencies set (along with other information)
             # This might be due to actually no dependencies
-            # or badly set metadata when uploading
+            # or badly set metadata when uploading.
             # So, we need to make sure there is actually no
-            # dependencies by introspecting packages
-            urls = defaultdict(list)
-            for url in json_data["urls"]:
-                # Only get sdist and wheels if they exist
-                dist_type = url["packagetype"]
-                if dist_type not in ["sdist", "bdist_wheel"]:
-                    continue
-
-                urls[dist_type].append(url["url"])
-
-            if not urls:
-                return data.asdict()
-
-            info = self._get_info_from_urls(urls)
+            # dependencies by introspecting packages.
+            page = self.get_page(name)
+            links = list(page.links_for_version(name, version))
+            info = self._get_info_from_links(links)
 
             data.requires_dist = info.requires_dist
 
@@ -310,144 +183,40 @@ def _get_release_info(self, name, version):  # type: (str, str) -> dict
 
         return data.asdict()
 
-    def _get(self, endpoint):  # type: (str) -> Union[dict, None]
+    def _get_page(self, name: NormalizedName) -> SimpleJsonPage:
+        source = self._base_url + f"simple/{name}/"
+        info = self.get_package_info(name)
+        return SimpleJsonPage(source, info)
+
+    def _get(
+        self, endpoint: str, headers: dict[str, str] | None = None
+    ) -> dict[str, Any] | None:
         try:
-            json_response = self.session.get(self._base_url + endpoint)
+            json_response = self.session.get(
+                self._base_url + endpoint,
+                raise_for_status=False,
+                timeout=REQUESTS_TIMEOUT,
+                headers=headers,
+            )
         except requests.exceptions.TooManyRedirects:
             # Cache control redirect loop.
             # We try to remove the cache and try again
-            self._cache_control_cache.delete(self._base_url + endpoint)
-            json_response = self.session.get(self._base_url + endpoint)
+            self.session.delete_cache(self._base_url + endpoint)
+            json_response = self.session.get(
+                self._base_url + endpoint,
+                raise_for_status=False,
+                timeout=REQUESTS_TIMEOUT,
+                headers=headers,
+            )
 
-        if json_response.status_code == 404:
+        if json_response.status_code != 200:
             return None
 
-        json_data = json_response.json()
-
-        return json_data
-
-    def _get_info_from_urls(self, urls):  # type: (Dict[str, List[str]]) -> PackageInfo
-        # Checking wheels first as they are more likely to hold
-        # the necessary information
-        if "bdist_wheel" in urls:
-            # Check fo a universal wheel
-            wheels = urls["bdist_wheel"]
-
-            universal_wheel = None
-            universal_python2_wheel = None
-            universal_python3_wheel = None
-            platform_specific_wheels = []
-            for wheel in wheels:
-                link = Link(wheel)
-                m = wheel_file_re.match(link.filename)
-                if not m:
-                    continue
-
-                pyver = m.group("pyver")
-                abi = m.group("abi")
-                plat = m.group("plat")
-                if abi == "none" and plat == "any":
-                    # Universal wheel
-                    if pyver == "py2.py3":
-                        # Any Python
-                        universal_wheel = wheel
-                    elif pyver == "py2":
-                        universal_python2_wheel = wheel
-                    else:
-                        universal_python3_wheel = wheel
-                else:
-                    platform_specific_wheels.append(wheel)
-
-            if universal_wheel is not None:
-                return self._get_info_from_wheel(universal_wheel)
-
-            info = None
-            if universal_python2_wheel and universal_python3_wheel:
-                info = self._get_info_from_wheel(universal_python2_wheel)
-
-                py3_info = self._get_info_from_wheel(universal_python3_wheel)
-                if py3_info.requires_dist:
-                    if not info.requires_dist:
-                        info.requires_dist = py3_info.requires_dist
-
-                        return info
-
-                    py2_requires_dist = set(
-                        dependency_from_pep_508(r).to_pep_508()
-                        for r in info.requires_dist
-                    )
-                    py3_requires_dist = set(
-                        dependency_from_pep_508(r).to_pep_508()
-                        for r in py3_info.requires_dist
-                    )
-                    base_requires_dist = py2_requires_dist & py3_requires_dist
-                    py2_only_requires_dist = py2_requires_dist - py3_requires_dist
-                    py3_only_requires_dist = py3_requires_dist - py2_requires_dist
-
-                    # Normalizing requires_dist
-                    requires_dist = list(base_requires_dist)
-                    for requirement in py2_only_requires_dist:
-                        dep = dependency_from_pep_508(requirement)
-                        dep.marker = dep.marker.intersect(
-                            parse_marker("python_version == '2.7'")
-                        )
-                        requires_dist.append(dep.to_pep_508())
-
-                    for requirement in py3_only_requires_dist:
-                        dep = dependency_from_pep_508(requirement)
-                        dep.marker = dep.marker.intersect(
-                            parse_marker("python_version >= '3'")
-                        )
-                        requires_dist.append(dep.to_pep_508())
-
-                    info.requires_dist = sorted(list(set(requires_dist)))
-
-            if info:
-                return info
-
-            # Prefer non platform specific wheels
-            if universal_python3_wheel:
-                return self._get_info_from_wheel(universal_python3_wheel)
-
-            if universal_python2_wheel:
-                return self._get_info_from_wheel(universal_python2_wheel)
-
-            if platform_specific_wheels and "sdist" not in urls:
-                # Pick the first wheel available and hope for the best
-                return self._get_info_from_wheel(platform_specific_wheels[0])
-
-        return self._get_info_from_sdist(urls["sdist"][0])
-
-    def _get_info_from_wheel(self, url):  # type: (str) -> PackageInfo
-        self._log(
-            "Downloading wheel: {}".format(urlparse.urlparse(url).path.rsplit("/")[-1]),
-            level="debug",
-        )
-
-        filename = os.path.basename(urlparse.urlparse(url).path.rsplit("/")[-1])
-
-        with temporary_directory() as temp_dir:
-            filepath = Path(temp_dir) / filename
-            self._download(url, str(filepath))
-
-            return PackageInfo.from_wheel(filepath)
-
-    def _get_info_from_sdist(self, url):  # type: (str) -> PackageInfo
-        self._log(
-            "Downloading sdist: {}".format(urlparse.urlparse(url).path.rsplit("/")[-1]),
-            level="debug",
-        )
-
-        filename = os.path.basename(urlparse.urlparse(url).path)
-
-        with temporary_directory() as temp_dir:
-            filepath = Path(temp_dir) / filename
-            self._download(url, str(filepath))
-
-            return PackageInfo.from_sdist(filepath)
-
-    def _download(self, url, dest):  # type: (str, str) -> None
-        return download_file(url, dest, session=self.session)
+        json: dict[str, Any] = json_response.json()
+        return json
 
-    def _log(self, msg, level="info"):
-        getattr(logger, level)("{}: {}".format(self._name, msg))
+    @staticmethod
+    def _get_yanked(json_data: dict[str, Any]) -> str | bool:
+        if json_data.get("yanked", False):
+            return json_data.get("yanked_reason") or True
+        return False
diff --git a/conda_lock/_vendor/poetry/repositories/remote_repository.py b/conda_lock/_vendor/poetry/repositories/remote_repository.py
deleted file mode 100644
index 7717740d..00000000
--- a/conda_lock/_vendor/poetry/repositories/remote_repository.py
+++ /dev/null
@@ -1,16 +0,0 @@
-from .repository import Repository
-
-
-class RemoteRepository(Repository):
-    def __init__(self, url):  # type: (str) -> None
-        self._url = url
-
-        super(RemoteRepository, self).__init__()
-
-    @property
-    def url(self):  # type: () -> str
-        return self._url
-
-    @property
-    def authenticated_url(self):  # type: () -> str
-        return self._url
diff --git a/conda_lock/_vendor/poetry/repositories/repository.py b/conda_lock/_vendor/poetry/repositories/repository.py
old mode 100755
new mode 100644
index a8a8e555..344301ef
--- a/conda_lock/_vendor/poetry/repositories/repository.py
+++ b/conda_lock/_vendor/poetry/repositories/repository.py
@@ -1,89 +1,75 @@
-from conda_lock._vendor.poetry.core.semver import VersionConstraint
-from conda_lock._vendor.poetry.core.semver import VersionRange
-from conda_lock._vendor.poetry.core.semver import parse_constraint
+from __future__ import annotations
 
-from .base_repository import BaseRepository
+import logging
 
+from typing import TYPE_CHECKING
 
-class Repository(BaseRepository):
-    def __init__(self, packages=None, name=None):
-        super(Repository, self).__init__()
+from packaging.utils import canonicalize_name
+from conda_lock._vendor.poetry.core.constraints.version import Version
 
-        self._name = name
+from conda_lock._vendor.poetry.repositories.abstract_repository import AbstractRepository
+from conda_lock._vendor.poetry.repositories.exceptions import PackageNotFound
 
-        if packages is None:
-            packages = []
 
-        for package in packages:
-            self.add_package(package)
+if TYPE_CHECKING:
+    from packaging.utils import NormalizedName
+    from conda_lock._vendor.poetry.core.constraints.version import VersionConstraint
+    from conda_lock._vendor.poetry.core.packages.dependency import Dependency
+    from conda_lock._vendor.poetry.core.packages.package import Package
+    from conda_lock._vendor.poetry.core.packages.utils.link import Link
 
-    @property
-    def name(self):
-        return self._name
 
-    def package(self, name, version, extras=None):
-        name = name.lower()
+class Repository(AbstractRepository):
+    def __init__(self, name: str, packages: list[Package] | None = None) -> None:
+        super().__init__(name)
+        self._packages: list[Package] = []
 
-        for package in self.packages:
-            if name == package.name and package.version.text == version:
-                return package.clone()
+        for package in packages or []:
+            self.add_package(package)
 
-    def find_packages(self, dependency):
-        constraint = dependency.constraint
+    @property
+    def packages(self) -> list[Package]:
+        return self._packages
+
+    def find_packages(self, dependency: Dependency) -> list[Package]:
         packages = []
         ignored_pre_release_packages = []
 
-        if constraint is None:
-            constraint = "*"
-
-        if not isinstance(constraint, VersionConstraint):
-            constraint = parse_constraint(constraint)
-
+        constraint = dependency.constraint
         allow_prereleases = dependency.allows_prereleases()
-        if isinstance(constraint, VersionRange):
+        for package in self._find_packages(dependency.name, constraint):
+            if package.yanked and not isinstance(constraint, Version):
+                # PEP 592: yanked files are always ignored, unless they are the only
+                # file that matches a version specifier that "pins" to an exact
+                # version
+                continue
             if (
-                constraint.max is not None
-                and constraint.max.is_prerelease()
-                or constraint.min is not None
-                and constraint.min.is_prerelease()
+                package.is_prerelease()
+                and not allow_prereleases
+                and not package.is_direct_origin()
             ):
-                allow_prereleases = True
+                ignored_pre_release_packages.append(package)
+                continue
 
-        for package in self.packages:
-            if dependency.name == package.name:
-                if (
-                    package.is_prerelease()
-                    and not allow_prereleases
-                    and not package.source_type
-                ):
-                    # If prereleases are not allowed and the package is a prerelease
-                    # and is a standard package then we skip it
-                    if constraint.is_any():
-                        # we need this when all versions of the package are pre-releases
-                        ignored_pre_release_packages.append(package)
-                    continue
-
-                if constraint.allows(package.version) or (
-                    package.is_prerelease()
-                    and constraint.allows(package.version.next_patch)
-                ):
-                    packages.append(package)
+            packages.append(package)
+
+        self._log(
+            f"{len(packages)} packages found for {dependency.name} {constraint!s}",
+            level="debug",
+        )
 
         return packages or ignored_pre_release_packages
 
-    def has_package(self, package):
+    def has_package(self, package: Package) -> bool:
         package_id = package.unique_name
+        return any(
+            package_id == repo_package.unique_name for repo_package in self.packages
+        )
 
-        for repo_package in self.packages:
-            if package_id == repo_package.unique_name:
-                return True
-
-        return False
-
-    def add_package(self, package):
+    def add_package(self, package: Package) -> None:
         self._packages.append(package)
 
-    def remove_package(self, package):
+    def remove_package(self, package: Package) -> None:
         package_id = package.unique_name
 
         index = None
@@ -95,11 +81,8 @@ def remove_package(self, package):
         if index is not None:
             del self._packages[index]
 
-    def find_links_for_package(self, package):
-        return []
-
-    def search(self, query):
-        results = []
+    def search(self, query: str) -> list[Package]:
+        results: list[Package] = []
 
         for package in self.packages:
             if query in package.name:
@@ -107,5 +90,31 @@ def search(self, query):
 
         return results
 
-    def __len__(self):
+    def _find_packages(
+        self, name: NormalizedName, constraint: VersionConstraint
+    ) -> list[Package]:
+        return [
+            package
+            for package in self._packages
+            if package.name == name and constraint.allows(package.version)
+        ]
+
+    def _log(self, msg: str, level: str = "info") -> None:
+        logger = logging.getLogger(f"{__name__}.{self.__class__.__name__}")
+        getattr(logger, level)(f"Source ({self.name}): {msg}")
+
+    def __len__(self) -> int:
         return len(self._packages)
+
+    def find_links_for_package(self, package: Package) -> list[Link]:
+        return []
+
+    def package(
+        self, name: str, version: Version, extras: list[str] | None = None
+    ) -> Package:
+        canonicalized_name = canonicalize_name(name)
+        for package in self.packages:
+            if canonicalized_name == package.name and package.version == version:
+                return package
+
+        raise PackageNotFound(f"Package {name} ({version}) not found.")
diff --git a/conda_lock/_vendor/poetry/repositories/repository_pool.py b/conda_lock/_vendor/poetry/repositories/repository_pool.py
new file mode 100644
index 00000000..203fad60
--- /dev/null
+++ b/conda_lock/_vendor/poetry/repositories/repository_pool.py
@@ -0,0 +1,225 @@
+from __future__ import annotations
+
+import enum
+import warnings
+
+from collections import OrderedDict
+from dataclasses import dataclass
+from enum import IntEnum
+from typing import TYPE_CHECKING
+
+from conda_lock._vendor.poetry.config.config import Config
+from conda_lock._vendor.poetry.repositories.abstract_repository import AbstractRepository
+from conda_lock._vendor.poetry.repositories.exceptions import PackageNotFound
+from conda_lock._vendor.poetry.repositories.repository import Repository
+from conda_lock._vendor.poetry.utils.cache import ArtifactCache
+
+
+if TYPE_CHECKING:
+    from conda_lock._vendor.poetry.core.constraints.version import Version
+    from conda_lock._vendor.poetry.core.packages.dependency import Dependency
+    from conda_lock._vendor.poetry.core.packages.package import Package
+
+_SENTINEL = object()
+
+
+class Priority(IntEnum):
+    # The order of the members below dictates the actual priority. The first member has
+    # top priority.
+    DEFAULT = enum.auto()
+    PRIMARY = enum.auto()
+    SECONDARY = enum.auto()
+    SUPPLEMENTAL = enum.auto()
+    EXPLICIT = enum.auto()
+
+
+@dataclass(frozen=True)
+class PrioritizedRepository:
+    repository: Repository
+    priority: Priority
+
+
+class RepositoryPool(AbstractRepository):
+    def __init__(
+        self,
+        repositories: list[Repository] | None = None,
+        ignore_repository_names: object = _SENTINEL,
+        *,
+        config: Config | None = None,
+    ) -> None:
+        super().__init__("poetry-repository-pool")
+        self._repositories: OrderedDict[str, PrioritizedRepository] = OrderedDict()
+
+        if repositories is None:
+            repositories = []
+        for repository in repositories:
+            self.add_repository(repository)
+
+        self._artifact_cache = ArtifactCache(
+            cache_dir=(config or Config.create()).artifacts_cache_directory
+        )
+
+        if ignore_repository_names is not _SENTINEL:
+            warnings.warn(
+                "The 'ignore_repository_names' argument to 'RepositoryPool.__init__' is"
+                " deprecated. It has no effect anymore and will be removed in a future"
+                " version.",
+                DeprecationWarning,
+                stacklevel=2,
+            )
+
+    @staticmethod
+    def from_packages(packages: list[Package], config: Config | None) -> RepositoryPool:
+        pool = RepositoryPool(config=config)
+        for package in packages:
+            if package.is_direct_origin():
+                continue
+
+            repo_name = package.source_reference or "PyPI"
+            try:
+                repo = pool.repository(repo_name)
+            except IndexError:
+                repo = Repository(repo_name)
+                pool.add_repository(repo)
+
+            if not repo.has_package(package):
+                repo.add_package(package)
+
+        return pool
+
+    @property
+    def repositories(self) -> list[Repository]:
+        """
+        Returns the repositories in the pool,
+        in the order they will be searched for packages.
+
+        ATTENTION: For backwards compatibility and practical reasons,
+                   repositories with priority EXPLICIT are NOT included,
+                   because they will not be searched.
+        """
+        sorted_repositories = self._sorted_repositories
+        return [
+            prio_repo.repository
+            for prio_repo in sorted_repositories
+            if prio_repo.priority is not Priority.EXPLICIT
+        ]
+
+    @property
+    def all_repositories(self) -> list[Repository]:
+        return [prio_repo.repository for prio_repo in self._sorted_repositories]
+
+    @property
+    def _sorted_repositories(self) -> list[PrioritizedRepository]:
+        return sorted(
+            self._repositories.values(), key=lambda prio_repo: prio_repo.priority
+        )
+
+    @property
+    def artifact_cache(self) -> ArtifactCache:
+        return self._artifact_cache
+
+    def has_default(self) -> bool:
+        return self._contains_priority(Priority.DEFAULT)
+
+    def has_primary_repositories(self) -> bool:
+        return self._contains_priority(Priority.PRIMARY)
+
+    def _contains_priority(self, priority: Priority) -> bool:
+        return any(
+            prio_repo.priority is priority for prio_repo in self._repositories.values()
+        )
+
+    def has_repository(self, name: str) -> bool:
+        return name.lower() in self._repositories
+
+    def repository(self, name: str) -> Repository:
+        return self._get_prioritized_repository(name).repository
+
+    def get_priority(self, name: str) -> Priority:
+        return self._get_prioritized_repository(name).priority
+
+    def _get_prioritized_repository(self, name: str) -> PrioritizedRepository:
+        name = name.lower()
+        if self.has_repository(name):
+            return self._repositories[name]
+        raise IndexError(f'Repository "{name}" does not exist.')
+
+    def add_repository(
+        self,
+        repository: Repository,
+        default: bool = False,
+        secondary: bool = False,
+        *,
+        priority: Priority = Priority.PRIMARY,
+    ) -> RepositoryPool:
+        """
+        Adds a repository to the pool.
+        """
+        repository_name = repository.name.lower()
+        if self.has_repository(repository_name):
+            raise ValueError(
+                f"A repository with name {repository_name} was already added."
+            )
+
+        if default or secondary:
+            warnings.warn(
+                "Parameters 'default' and 'secondary' to"
+                " 'RepositoryPool.add_repository' are deprecated. Please provide"
+                " the keyword-argument 'priority' instead.",
+                DeprecationWarning,
+                stacklevel=2,
+            )
+            priority = Priority.DEFAULT if default else Priority.SECONDARY
+
+        if priority is Priority.DEFAULT and self.has_default():
+            raise ValueError("Only one repository can be the default.")
+
+        self._repositories[repository_name] = PrioritizedRepository(
+            repository, priority
+        )
+        return self
+
+    def remove_repository(self, name: str) -> RepositoryPool:
+        if not self.has_repository(name):
+            raise IndexError(
+                f"RepositoryPool can not remove unknown repository '{name}'."
+            )
+        del self._repositories[name.lower()]
+        return self
+
+    def package(
+        self,
+        name: str,
+        version: Version,
+        extras: list[str] | None = None,
+        repository_name: str | None = None,
+    ) -> Package:
+        if repository_name:
+            return self.repository(repository_name).package(
+                name, version, extras=extras
+            )
+
+        for repo in self.repositories:
+            try:
+                return repo.package(name, version, extras=extras)
+            except PackageNotFound:
+                continue
+        raise PackageNotFound(f"Package {name} ({version}) not found.")
+
+    def find_packages(self, dependency: Dependency) -> list[Package]:
+        repository_name = dependency.source_name
+        if repository_name:
+            return self.repository(repository_name).find_packages(dependency)
+
+        packages: list[Package] = []
+        for repo in self.repositories:
+            if packages and self.get_priority(repo.name) is Priority.SUPPLEMENTAL:
+                break
+            packages += repo.find_packages(dependency)
+        return packages
+
+    def search(self, query: str) -> list[Package]:
+        results: list[Package] = []
+        for repo in self.repositories:
+            results += repo.search(query)
+        return results
diff --git a/conda_lock/_vendor/poetry/repositories/single_page_repository.py b/conda_lock/_vendor/poetry/repositories/single_page_repository.py
new file mode 100644
index 00000000..ce3cbae5
--- /dev/null
+++ b/conda_lock/_vendor/poetry/repositories/single_page_repository.py
@@ -0,0 +1,22 @@
+from __future__ import annotations
+
+from typing import TYPE_CHECKING
+
+from conda_lock._vendor.poetry.repositories.exceptions import PackageNotFound
+from conda_lock._vendor.poetry.repositories.legacy_repository import LegacyRepository
+from conda_lock._vendor.poetry.repositories.link_sources.html import SimpleRepositoryPage
+
+
+if TYPE_CHECKING:
+    from packaging.utils import NormalizedName
+
+
+class SinglePageRepository(LegacyRepository):
+    def _get_page(self, name: NormalizedName) -> SimpleRepositoryPage:
+        """
+        Single page repositories only have one page irrespective of endpoint.
+        """
+        response = self._get_response("")
+        if not response:
+            raise PackageNotFound(f"Package [{name}] not found.")
+        return SimpleRepositoryPage(response.url, response.text)
diff --git a/conda_lock/_vendor/poetry/toml/__init__.py b/conda_lock/_vendor/poetry/toml/__init__.py
new file mode 100644
index 00000000..16c6ad2d
--- /dev/null
+++ b/conda_lock/_vendor/poetry/toml/__init__.py
@@ -0,0 +1,7 @@
+from __future__ import annotations
+
+from conda_lock._vendor.poetry.toml.exceptions import TOMLError
+from conda_lock._vendor.poetry.toml.file import TOMLFile
+
+
+__all__ = ["TOMLError", "TOMLFile"]
diff --git a/conda_lock/_vendor/poetry/core/toml/exceptions.py b/conda_lock/_vendor/poetry/toml/exceptions.py
similarity index 83%
rename from conda_lock/_vendor/poetry/core/toml/exceptions.py
rename to conda_lock/_vendor/poetry/toml/exceptions.py
index 4352f48d..93e00002 100644
--- a/conda_lock/_vendor/poetry/core/toml/exceptions.py
+++ b/conda_lock/_vendor/poetry/toml/exceptions.py
@@ -1,6 +1,7 @@
-from tomlkit.exceptions import TOMLKitError
+from __future__ import annotations
 
 from conda_lock._vendor.poetry.core.exceptions import PoetryCoreException
+from tomlkit.exceptions import TOMLKitError
 
 
 class TOMLError(TOMLKitError, PoetryCoreException):
diff --git a/conda_lock/_vendor/poetry/toml/file.py b/conda_lock/_vendor/poetry/toml/file.py
new file mode 100644
index 00000000..15645fc8
--- /dev/null
+++ b/conda_lock/_vendor/poetry/toml/file.py
@@ -0,0 +1,50 @@
+from __future__ import annotations
+
+import warnings
+
+from typing import TYPE_CHECKING
+from typing import Any
+
+from tomlkit.toml_file import TOMLFile as BaseTOMLFile
+
+
+if TYPE_CHECKING:
+    from pathlib import Path
+
+    from tomlkit.toml_document import TOMLDocument
+
+
+class TOMLFile(BaseTOMLFile):
+    def __init__(self, path: Path) -> None:
+        super().__init__(path)
+        self.__path = path
+
+    @property
+    def path(self) -> Path:
+        return self.__path
+
+    def exists(self) -> bool:
+        return self.__path.exists()
+
+    def read(self) -> TOMLDocument:
+        from tomlkit.exceptions import TOMLKitError
+
+        from conda_lock._vendor.poetry.toml import TOMLError
+
+        try:
+            return super().read()
+        except (ValueError, TOMLKitError) as e:
+            raise TOMLError(f"Invalid TOML file {self.path.as_posix()}: {e}")
+
+    def __getattr__(self, item: str) -> Any:
+        warnings.warn(
+            "`__getattr__` will be removed from the `TOMLFile` in a future release."
+            "\n\nInstead of accessing properties of the underlying `Path` as "
+            "`tomlfile.whatever`, prefer `tomlfile.path.whatever`.",
+            DeprecationWarning,
+            stacklevel=2,
+        )
+        return getattr(self.__path, item)
+
+    def __str__(self) -> str:
+        return self.__path.as_posix()
diff --git a/conda_lock/_vendor/poetry/utils/_compat.py b/conda_lock/_vendor/poetry/utils/_compat.py
index 937f9b30..be1194c7 100644
--- a/conda_lock/_vendor/poetry/utils/_compat.py
+++ b/conda_lock/_vendor/poetry/utils/_compat.py
@@ -1,290 +1,59 @@
-import sys
-
-
-try:
-    from functools32 import lru_cache
-except ImportError:
-    from functools import lru_cache
-
-try:
-    from glob2 import glob
-except ImportError:
-    from glob import glob
-
-try:
-    import zipfile as zipp
-
-    from importlib import metadata
-except ImportError:
-    import importlib_metadata as metadata
-    import zipp
-
-try:
-    import urllib.parse as urlparse
-except ImportError:
-    import urlparse
-
-try:
-    from os import cpu_count
-except ImportError:  # Python 2
-    from multiprocessing import cpu_count
-
-try:  # Python 2
-    long = long
-    unicode = unicode
-    basestring = basestring
-except NameError:  # Python 3
-    long = int
-    unicode = str
-    basestring = str
-
-
-PY2 = sys.version_info[0] == 2
-PY34 = sys.version_info >= (3, 4)
-PY35 = sys.version_info >= (3, 5)
-PY36 = sys.version_info >= (3, 6)
+from __future__ import annotations
 
-WINDOWS = sys.platform == "win32"
-
-try:
-    from shlex import quote
-except ImportError:
-    # PY2
-    from pipes import quote  # noqa
-
-if PY34:
-    from importlib.machinery import EXTENSION_SUFFIXES
-else:
-    from imp import get_suffixes
+import sys
 
-    EXTENSION_SUFFIXES = [suffix[0] for suffix in get_suffixes()]
+from contextlib import suppress
 
 
-if PY35:
-    from pathlib import Path
-else:
-    from pathlib2 import Path
+# TODO: use try/except ImportError when
+# https://github.com/python/mypy/issues/1393 is fixed
 
-if not PY36:
-    from collections import OrderedDict
+if sys.version_info < (3, 11):
+    # compatibility for python <3.11
+    import tomli as tomllib
 else:
-    OrderedDict = dict
-
+    import tomllib  # nopycln: import
 
-if PY35:
-    import subprocess as subprocess
 
-    from subprocess import CalledProcessError
+if sys.version_info < (3, 10):
+    # compatibility for python <3.10
+    import importlib_metadata as metadata
 else:
-    import subprocess32 as subprocess
-
-    from subprocess32 import CalledProcessError
-
-
-if PY34:
-    # subprocess32 pass the calls directly to subprocess
-    # on Python 3.3+ but Python 3.4 does not provide run()
-    # so we backport it
-    import signal
-
-    from subprocess import PIPE
-    from subprocess import Popen
-    from subprocess import SubprocessError
-    from subprocess import TimeoutExpired
-
-    class CalledProcessError(SubprocessError):
-        """Raised when run() is called with check=True and the process
-        returns a non-zero exit status.
-
-        Attributes:
-          cmd, returncode, stdout, stderr, output
-        """
-
-        def __init__(self, returncode, cmd, output=None, stderr=None):
-            self.returncode = returncode
-            self.cmd = cmd
-            self.output = output
-            self.stderr = stderr
-
-        def __str__(self):
-            if self.returncode and self.returncode < 0:
-                try:
-                    return "Command '%s' died with %r." % (
-                        self.cmd,
-                        signal.Signals(-self.returncode),
-                    )
-                except ValueError:
-                    return "Command '%s' died with unknown signal %d." % (
-                        self.cmd,
-                        -self.returncode,
-                    )
-            else:
-                return "Command '%s' returned non-zero exit status %d." % (
-                    self.cmd,
-                    self.returncode,
-                )
-
-        @property
-        def stdout(self):
-            """Alias for output attribute, to match stderr"""
-            return self.output
-
-        @stdout.setter
-        def stdout(self, value):
-            # There's no obvious reason to set this, but allow it anyway so
-            # .stdout is a transparent alias for .output
-            self.output = value
-
-    class CompletedProcess(object):
-        """A process that has finished running.
-        This is returned by run().
-        Attributes:
-          args: The list or str args passed to run().
-          returncode: The exit code of the process, negative for signals.
-          stdout: The standard output (None if not captured).
-          stderr: The standard error (None if not captured).
-        """
-
-        def __init__(self, args, returncode, stdout=None, stderr=None):
-            self.args = args
-            self.returncode = returncode
-            self.stdout = stdout
-            self.stderr = stderr
-
-        def __repr__(self):
-            args = [
-                "args={!r}".format(self.args),
-                "returncode={!r}".format(self.returncode),
-            ]
-            if self.stdout is not None:
-                args.append("stdout={!r}".format(self.stdout))
-            if self.stderr is not None:
-                args.append("stderr={!r}".format(self.stderr))
-            return "{}({})".format(type(self).__name__, ", ".join(args))
-
-        def check_returncode(self):
-            """Raise CalledProcessError if the exit code is non-zero."""
-            if self.returncode:
-                raise CalledProcessError(
-                    self.returncode, self.args, self.stdout, self.stderr
-                )
-
-    def run(*popenargs, **kwargs):
-        """Run command with arguments and return a CompletedProcess instance.
-        The returned instance will have attributes args, returncode, stdout and
-        stderr. By default, stdout and stderr are not captured, and those attributes
-        will be None. Pass stdout=PIPE and/or stderr=PIPE in order to capture them.
-        If check is True and the exit code was non-zero, it raises a
-        CalledProcessError. The CalledProcessError object will have the return code
-        in the returncode attribute, and output & stderr attributes if those streams
-        were captured.
-        If timeout is given, and the process takes too long, a TimeoutExpired
-        exception will be raised.
-        There is an optional argument "input", allowing you to
-        pass a string to the subprocess's stdin.  If you use this argument
-        you may not also use the Popen constructor's "stdin" argument, as
-        it will be used internally.
-        The other arguments are the same as for the Popen constructor.
-        If universal_newlines=True is passed, the "input" argument must be a
-        string and stdout/stderr in the returned object will be strings rather than
-        bytes.
-        """
-        input = kwargs.pop("input", None)
-        timeout = kwargs.pop("timeout", None)
-        check = kwargs.pop("check", False)
-        if input is not None:
-            if "stdin" in kwargs:
-                raise ValueError("stdin and input arguments may not both be used.")
-            kwargs["stdin"] = PIPE
-
-        process = Popen(*popenargs, **kwargs)
-        try:
-            process.__enter__()  # No-Op really... illustrate "with in 2.4"
-            try:
-                stdout, stderr = process.communicate(input, timeout=timeout)
-            except TimeoutExpired:
-                process.kill()
-                stdout, stderr = process.communicate()
-                raise TimeoutExpired(
-                    process.args, timeout, output=stdout, stderr=stderr
-                )
-            except:
-                process.kill()
-                process.wait()
-                raise
-            retcode = process.poll()
-            if check and retcode:
-                raise CalledProcessError(
-                    retcode, process.args, output=stdout, stderr=stderr
-                )
-        finally:
-            # None because our context manager __exit__ does not use them.
-            process.__exit__(None, None, None)
-
-        return CompletedProcess(process.args, retcode, stdout, stderr)
-
-    subprocess.run = run
-    subprocess.CalledProcessError = CalledProcessError
+    from importlib import metadata
 
+WINDOWS = sys.platform == "win32"
 
-def decode(string, encodings=None):
-    if not PY2 and not isinstance(string, bytes):
-        return string
 
-    if PY2 and isinstance(string, unicode):
+def decode(string: bytes | str, encodings: list[str] | None = None) -> str:
+    if not isinstance(string, bytes):
         return string
 
     encodings = encodings or ["utf-8", "latin1", "ascii"]
 
     for encoding in encodings:
-        try:
+        with suppress(UnicodeEncodeError, UnicodeDecodeError):
             return string.decode(encoding)
-        except (UnicodeEncodeError, UnicodeDecodeError):
-            pass
 
     return string.decode(encodings[0], errors="ignore")
 
 
-def encode(string, encodings=None):
-    if not PY2 and isinstance(string, bytes):
-        return string
-
-    if PY2 and isinstance(string, str):
+def encode(string: str, encodings: list[str] | None = None) -> bytes:
+    if isinstance(string, bytes):
         return string
 
     encodings = encodings or ["utf-8", "latin1", "ascii"]
 
     for encoding in encodings:
-        try:
+        with suppress(UnicodeEncodeError, UnicodeDecodeError):
             return string.encode(encoding)
-        except (UnicodeEncodeError, UnicodeDecodeError):
-            pass
 
     return string.encode(encodings[0], errors="ignore")
 
 
-def to_str(string):
-    if isinstance(string, str) or not isinstance(string, (unicode, bytes)):
-        return string
-
-    if PY2:
-        method = "encode"
-    else:
-        method = "decode"
-
-    encodings = ["utf-8", "latin1", "ascii"]
-
-    for encoding in encodings:
-        try:
-            return getattr(string, method)(encoding)
-        except (UnicodeEncodeError, UnicodeDecodeError):
-            pass
-
-    return getattr(string, method)(encodings[0], errors="ignore")
-
-
-def list_to_shell_command(cmd):
-    return " ".join(
-        '"{}"'.format(token) if " " in token and token[0] not in {"'", '"'} else token
-        for token in cmd
-    )
+__all__ = [
+    "WINDOWS",
+    "decode",
+    "encode",
+    "metadata",
+    "tomllib",
+]
diff --git a/conda_lock/_vendor/poetry/utils/appdirs.py b/conda_lock/_vendor/poetry/utils/appdirs.py
deleted file mode 100644
index 5b9da0cd..00000000
--- a/conda_lock/_vendor/poetry/utils/appdirs.py
+++ /dev/null
@@ -1,252 +0,0 @@
-"""
-This code was taken from https://github.com/ActiveState/appdirs and modified
-to suit our purposes.
-"""
-import os
-import sys
-
-
-WINDOWS = sys.platform.startswith("win") or (sys.platform == "cli" and os.name == "nt")
-
-
-def expanduser(path):
-    """
-    Expand ~ and ~user constructions.
-
-    Includes a workaround for http://bugs.python.org/issue14768
-    """
-    expanded = os.path.expanduser(path)
-    if path.startswith("~/") and expanded.startswith("//"):
-        expanded = expanded[1:]
-    return expanded
-
-
-def user_cache_dir(appname):
-    r"""
-    Return full path to the user-specific cache dir for this application.
-
-        "appname" is the name of application.
-
-    Typical user cache directories are:
-        macOS:      ~/Library/Caches/
-        Unix:       ~/.cache/ (XDG default)
-        Windows:    C:\Users\\AppData\Local\\Cache
-
-    On Windows the only suggestion in the MSDN docs is that local settings go
-    in the `CSIDL_LOCAL_APPDATA` directory. This is identical to the
-    non-roaming app data dir (the default returned by `user_data_dir`). Apps
-    typically put cache data somewhere *under* the given dir here. Some
-    examples:
-        ...\Mozilla\Firefox\Profiles\\Cache
-        ...\Acme\SuperApp\Cache\1.0
-
-    OPINION: This function appends "Cache" to the `CSIDL_LOCAL_APPDATA` value.
-    """
-    if WINDOWS:
-        # Get the base path
-        path = os.path.normpath(_get_win_folder("CSIDL_LOCAL_APPDATA"))
-
-        # Add our app name and Cache directory to it
-        path = os.path.join(path, appname, "Cache")
-    elif sys.platform == "darwin":
-        # Get the base path
-        path = expanduser("~/Library/Caches")
-
-        # Add our app name to it
-        path = os.path.join(path, appname)
-    else:
-        # Get the base path
-        path = os.getenv("XDG_CACHE_HOME", expanduser("~/.cache"))
-
-        # Add our app name to it
-        path = os.path.join(path, appname)
-
-    return path
-
-
-def user_data_dir(appname, roaming=False):
-    r"""
-    Return full path to the user-specific data dir for this application.
-
-        "appname" is the name of application.
-            If None, just the system directory is returned.
-        "roaming" (boolean, default False) can be set True to use the Windows
-            roaming appdata directory. That means that for users on a Windows
-            network setup for roaming profiles, this user data will be
-            sync'd on login. See
-            
-            for a discussion of issues.
-
-    Typical user data directories are:
-        macOS:                  ~/Library/Application Support/
-        Unix:                   ~/.local/share/    # or in
-                                $XDG_DATA_HOME, if defined
-        Win XP (not roaming):   C:\Documents and Settings\\ ...
-                                ...Application Data\
-        Win XP (roaming):       C:\Documents and Settings\\Local ...
-                                ...Settings\Application Data\
-        Win 7  (not roaming):   C:\Users\\AppData\Local\
-        Win 7  (roaming):       C:\Users\\AppData\Roaming\
-
-    For Unix, we follow the XDG spec and support $XDG_DATA_HOME.
-    That means, by default "~/.local/share/".
-    """
-    if WINDOWS:
-        const = roaming and "CSIDL_APPDATA" or "CSIDL_LOCAL_APPDATA"
-        path = os.path.join(os.path.normpath(_get_win_folder(const)), appname)
-    elif sys.platform == "darwin":
-        path = os.path.join(expanduser("~/Library/Application Support/"), appname)
-    else:
-        path = os.path.join(
-            os.getenv("XDG_DATA_HOME", expanduser("~/.local/share")), appname
-        )
-
-    return path
-
-
-def user_config_dir(appname, roaming=True):
-    """Return full path to the user-specific config dir for this application.
-
-        "appname" is the name of application.
-            If None, just the system directory is returned.
-        "roaming" (boolean, default True) can be set False to not use the
-            Windows roaming appdata directory. That means that for users on a
-            Windows network setup for roaming profiles, this user data will be
-            sync'd on login. See
-            
-            for a discussion of issues.
-
-    Typical user data directories are:
-        macOS:                  same as user_data_dir
-        Unix:                   ~/.config/
-        Win *:                  same as user_data_dir
-
-    For Unix, we follow the XDG spec and support $XDG_CONFIG_HOME.
-    That means, by default "~/.config/".
-    """
-    if WINDOWS:
-        path = user_data_dir(appname, roaming=roaming)
-    elif sys.platform == "darwin":
-        path = user_data_dir(appname)
-    else:
-        path = os.getenv("XDG_CONFIG_HOME", expanduser("~/.config"))
-        path = os.path.join(path, appname)
-
-    return path
-
-
-# for the discussion regarding site_config_dirs locations
-# see 
-def site_config_dirs(appname):
-    r"""Return a list of potential user-shared config dirs for this application.
-
-        "appname" is the name of application.
-
-    Typical user config directories are:
-        macOS:      /Library/Application Support//
-        Unix:       /etc or $XDG_CONFIG_DIRS[i]// for each value in
-                    $XDG_CONFIG_DIRS
-        Win XP:     C:\Documents and Settings\All Users\Application ...
-                    ...Data\\
-        Vista:      (Fail! "C:\ProgramData" is a hidden *system* directory
-                    on Vista.)
-        Win 7:      Hidden, but writeable on Win 7:
-                    C:\ProgramData\\
-    """
-    if WINDOWS:
-        path = os.path.normpath(_get_win_folder("CSIDL_COMMON_APPDATA"))
-        pathlist = [os.path.join(path, appname)]
-    elif sys.platform == "darwin":
-        pathlist = [os.path.join("/Library/Application Support", appname)]
-    else:
-        # try looking in $XDG_CONFIG_DIRS
-        xdg_config_dirs = os.getenv("XDG_CONFIG_DIRS", "/etc/xdg")
-        if xdg_config_dirs:
-            pathlist = [
-                os.path.join(expanduser(x), appname)
-                for x in xdg_config_dirs.split(os.pathsep)
-            ]
-        else:
-            pathlist = []
-
-        # always look in /etc directly as well
-        pathlist.append("/etc")
-
-    return pathlist
-
-
-# -- Windows support functions --
-
-
-def _get_win_folder_from_registry(csidl_name):
-    """
-    This is a fallback technique at best. I'm not sure if using the
-    registry for this guarantees us the correct answer for all CSIDL_*
-    names.
-    """
-    import _winreg
-
-    shell_folder_name = {
-        "CSIDL_APPDATA": "AppData",
-        "CSIDL_COMMON_APPDATA": "Common AppData",
-        "CSIDL_LOCAL_APPDATA": "Local AppData",
-    }[csidl_name]
-
-    key = _winreg.OpenKey(
-        _winreg.HKEY_CURRENT_USER,
-        r"Software\Microsoft\Windows\CurrentVersion\Explorer\Shell Folders",
-    )
-    directory, _type = _winreg.QueryValueEx(key, shell_folder_name)
-    return directory
-
-
-def _get_win_folder_with_ctypes(csidl_name):
-    csidl_const = {
-        "CSIDL_APPDATA": 26,
-        "CSIDL_COMMON_APPDATA": 35,
-        "CSIDL_LOCAL_APPDATA": 28,
-    }[csidl_name]
-
-    buf = ctypes.create_unicode_buffer(1024)
-    ctypes.windll.shell32.SHGetFolderPathW(None, csidl_const, None, 0, buf)
-
-    # Downgrade to short path name if have highbit chars. See
-    # .
-    has_high_char = False
-    for c in buf:
-        if ord(c) > 255:
-            has_high_char = True
-            break
-    if has_high_char:
-        buf2 = ctypes.create_unicode_buffer(1024)
-        if ctypes.windll.kernel32.GetShortPathNameW(buf.value, buf2, 1024):
-            buf = buf2
-
-    return buf.value
-
-
-if WINDOWS:
-    try:
-        import ctypes
-
-        _get_win_folder = _get_win_folder_with_ctypes
-    except ImportError:
-        _get_win_folder = _get_win_folder_from_registry
-
-
-def _win_path_to_bytes(path):
-    """Encode Windows paths to bytes. Only used on Python 2.
-
-    Motivation is to be consistent with other operating systems where paths
-    are also returned as bytes. This avoids problems mixing bytes and Unicode
-    elsewhere in the codebase. For more details and discussion see
-    .
-
-    If encoding using ASCII and MBCS fails, return the original Unicode path.
-    """
-    for encoding in ("ASCII", "MBCS"):
-        try:
-            return path.encode(encoding)
-        except (UnicodeEncodeError, LookupError):
-            pass
-    return path
diff --git a/conda_lock/_vendor/poetry/utils/authenticator.py b/conda_lock/_vendor/poetry/utils/authenticator.py
new file mode 100644
index 00000000..e33e27a9
--- /dev/null
+++ b/conda_lock/_vendor/poetry/utils/authenticator.py
@@ -0,0 +1,458 @@
+from __future__ import annotations
+
+import contextlib
+import dataclasses
+import functools
+import logging
+import time
+import urllib.parse
+
+from os.path import commonprefix
+from pathlib import Path
+from typing import TYPE_CHECKING
+from typing import Any
+
+import requests
+import requests.adapters
+import requests.auth
+import requests.exceptions
+
+from cachecontrol import CacheControlAdapter
+from cachecontrol.caches import FileCache
+from requests_toolbelt import user_agent
+
+from conda_lock._vendor.poetry.__version__ import __version__
+from conda_lock._vendor.poetry.config.config import Config
+from conda_lock._vendor.poetry.exceptions import PoetryException
+from conda_lock._vendor.poetry.utils.constants import REQUESTS_TIMEOUT
+from conda_lock._vendor.poetry.utils.constants import RETRY_AFTER_HEADER
+from conda_lock._vendor.poetry.utils.constants import STATUS_FORCELIST
+from conda_lock._vendor.poetry.utils.password_manager import HTTPAuthCredential
+from conda_lock._vendor.poetry.utils.password_manager import PasswordManager
+
+
+if TYPE_CHECKING:
+    from conda_lock._vendor.cleo.io.io import IO
+
+
+logger = logging.getLogger(__name__)
+
+
+@dataclasses.dataclass(frozen=True)
+class RepositoryCertificateConfig:
+    cert: Path | None = dataclasses.field(default=None)
+    client_cert: Path | None = dataclasses.field(default=None)
+    verify: bool = dataclasses.field(default=True)
+
+    @classmethod
+    def create(
+        cls, repository: str, config: Config | None
+    ) -> RepositoryCertificateConfig:
+        config = config if config else Config.create()
+
+        verify: str | bool = config.get(
+            f"certificates.{repository}.verify",
+            config.get(f"certificates.{repository}.cert", True),
+        )
+        client_cert: str = config.get(f"certificates.{repository}.client-cert")
+
+        return cls(
+            cert=Path(verify) if isinstance(verify, str) else None,
+            client_cert=Path(client_cert) if client_cert else None,
+            verify=verify if isinstance(verify, bool) else True,
+        )
+
+
+@dataclasses.dataclass
+class AuthenticatorRepositoryConfig:
+    name: str
+    url: str
+    netloc: str = dataclasses.field(init=False)
+    path: str = dataclasses.field(init=False)
+
+    def __post_init__(self) -> None:
+        parsed_url = urllib.parse.urlsplit(self.url)
+        self.netloc = parsed_url.netloc
+        self.path = parsed_url.path
+
+    def certs(self, config: Config) -> RepositoryCertificateConfig:
+        return RepositoryCertificateConfig.create(self.name, config)
+
+    @property
+    def http_credential_keys(self) -> list[str]:
+        return [self.url, self.netloc, self.name]
+
+    def get_http_credentials(
+        self, password_manager: PasswordManager, username: str | None = None
+    ) -> HTTPAuthCredential:
+        # try with the repository name via the password manager
+        credential = HTTPAuthCredential(
+            **(password_manager.get_http_auth(self.name) or {})
+        )
+
+        if credential.password is not None:
+            return credential
+
+        if password_manager.use_keyring:
+            # fallback to url and netloc based keyring entries
+            credential = password_manager.get_credential(
+                self.url, self.netloc, username=credential.username
+            )
+
+        return credential
+
+
+class Authenticator:
+    def __init__(
+        self,
+        config: Config | None = None,
+        io: IO | None = None,
+        cache_id: str | None = None,
+        disable_cache: bool = False,
+        pool_size: int = requests.adapters.DEFAULT_POOLSIZE,
+    ) -> None:
+        self._config = config or Config.create()
+        self._io = io
+        self._sessions_for_netloc: dict[str, requests.Session] = {}
+        self._credentials: dict[str, HTTPAuthCredential] = {}
+        self._certs: dict[str, RepositoryCertificateConfig] = {}
+        self._configured_repositories: (
+            dict[str, AuthenticatorRepositoryConfig] | None
+        ) = None
+        self._password_manager = PasswordManager(self._config)
+        self._cache_control = (
+            FileCache(
+                self._config.repository_cache_directory
+                / (cache_id or "_default_cache")
+                / "_http"
+            )
+            if not disable_cache
+            else None
+        )
+        self.get_repository_config_for_url = functools.lru_cache(maxsize=None)(
+            self._get_repository_config_for_url
+        )
+        self._pool_size = pool_size
+        self._user_agent = user_agent("poetry", __version__)
+
+    def create_session(self) -> requests.Session:
+        session = requests.Session()
+        session.headers["User-Agent"] = self._user_agent
+
+        if self._cache_control is None:
+            return session
+
+        adapter = CacheControlAdapter(
+            cache=self._cache_control,
+            pool_maxsize=self._pool_size,
+        )
+        session.mount("http://", adapter)
+        session.mount("https://", adapter)
+
+        return session
+
+    def get_session(self, url: str | None = None) -> requests.Session:
+        if not url:
+            return self.create_session()
+
+        parsed_url = urllib.parse.urlsplit(url)
+        netloc = parsed_url.netloc
+
+        if netloc not in self._sessions_for_netloc:
+            logger.debug("Creating new session for %s", netloc)
+            self._sessions_for_netloc[netloc] = self.create_session()
+
+        return self._sessions_for_netloc[netloc]
+
+    def close(self) -> None:
+        for session in self._sessions_for_netloc.values():
+            if session is not None:
+                with contextlib.suppress(AttributeError):
+                    session.close()
+
+    def __del__(self) -> None:
+        self.close()
+
+    def delete_cache(self, url: str) -> None:
+        if self._cache_control is not None:
+            self._cache_control.delete(key=url)
+
+    def authenticated_url(self, url: str) -> str:
+        parsed = urllib.parse.urlparse(url)
+        credential = self.get_credentials_for_url(url)
+
+        if credential.username is not None and credential.password is not None:
+            username = urllib.parse.quote(credential.username, safe="")
+            password = urllib.parse.quote(credential.password, safe="")
+
+            return (
+                f"{parsed.scheme}://{username}:{password}@{parsed.netloc}{parsed.path}"
+            )
+
+        return url
+
+    def request(
+        self, method: str, url: str, raise_for_status: bool = True, **kwargs: Any
+    ) -> requests.Response:
+        headers = kwargs.get("headers")
+        request = requests.Request(method, url, headers=headers)
+        credential = self.get_credentials_for_url(url)
+
+        if credential.username is not None or credential.password is not None:
+            request = requests.auth.HTTPBasicAuth(
+                credential.username or "", credential.password or ""
+            )(request)
+
+        session = self.get_session(url=url)
+        prepared_request = session.prepare_request(request)
+
+        proxies: dict[str, str] = kwargs.get("proxies", {})
+        stream: bool | None = kwargs.get("stream")
+
+        certs = self.get_certs_for_url(url)
+        verify: bool | str | Path = kwargs.get("verify") or certs.cert or certs.verify
+        cert: str | Path | None = kwargs.get("cert") or certs.client_cert
+
+        if cert is not None:
+            cert = str(cert)
+
+        verify = str(verify) if isinstance(verify, Path) else verify
+
+        settings = session.merge_environment_settings(
+            prepared_request.url, proxies, stream, verify, cert
+        )
+
+        # Send the request.
+        send_kwargs = {
+            "timeout": kwargs.get("timeout", REQUESTS_TIMEOUT),
+            "allow_redirects": kwargs.get("allow_redirects", True),
+        }
+        send_kwargs.update(settings)
+
+        attempt = 0
+        resp = None
+
+        while True:
+            is_last_attempt = attempt >= 5
+            try:
+                resp = session.send(prepared_request, **send_kwargs)
+            except (requests.exceptions.ConnectionError, OSError) as e:
+                if is_last_attempt:
+                    raise e
+            else:
+                if resp.status_code not in STATUS_FORCELIST or is_last_attempt:
+                    if raise_for_status:
+                        resp.raise_for_status()
+                    return resp
+
+            if not is_last_attempt:
+                attempt += 1
+                delay = self._get_backoff(resp, attempt)
+                logger.debug("Retrying HTTP request in %s seconds.", delay)
+                time.sleep(delay)
+                continue
+
+        # this should never really be hit under any sane circumstance
+        raise PoetryException("Failed HTTP {} request", method.upper())
+
+    def _get_backoff(self, response: requests.Response | None, attempt: int) -> float:
+        if response is not None:
+            retry_after = response.headers.get(RETRY_AFTER_HEADER, "")
+            if retry_after:
+                return float(retry_after)
+
+        return 0.5 * attempt
+
+    def get(self, url: str, **kwargs: Any) -> requests.Response:
+        return self.request("get", url, **kwargs)
+
+    def head(self, url: str, **kwargs: Any) -> requests.Response:
+        kwargs.setdefault("allow_redirects", False)
+        return self.request("head", url, **kwargs)
+
+    def post(self, url: str, **kwargs: Any) -> requests.Response:
+        return self.request("post", url, **kwargs)
+
+    def _get_credentials_for_repository(
+        self, repository: AuthenticatorRepositoryConfig, username: str | None = None
+    ) -> HTTPAuthCredential:
+        # cache repository credentials by repository url to avoid multiple keyring
+        # backend queries when packages are being downloaded from the same source
+        key = f"{repository.url}#username={username or ''}"
+
+        if key not in self._credentials:
+            self._credentials[key] = repository.get_http_credentials(
+                password_manager=self._password_manager, username=username
+            )
+
+        return self._credentials[key]
+
+    def _get_credentials_for_url(
+        self, url: str, exact_match: bool = False
+    ) -> HTTPAuthCredential:
+        repository = self.get_repository_config_for_url(url, exact_match)
+
+        credential = (
+            self._get_credentials_for_repository(repository=repository)
+            if repository is not None
+            else HTTPAuthCredential()
+        )
+
+        if credential.password is None:
+            parsed_url = urllib.parse.urlsplit(url)
+            netloc = parsed_url.netloc
+            credential = self._password_manager.get_credential(
+                url, netloc, username=credential.username
+            )
+
+            return HTTPAuthCredential(
+                username=credential.username, password=credential.password
+            )
+
+        return credential
+
+    def get_credentials_for_git_url(self, url: str) -> HTTPAuthCredential:
+        parsed_url = urllib.parse.urlsplit(url)
+
+        if parsed_url.scheme not in {"http", "https"}:
+            return HTTPAuthCredential()
+
+        key = f"git+{url}"
+
+        if key not in self._credentials:
+            self._credentials[key] = self._get_credentials_for_url(url, True)
+
+        return self._credentials[key]
+
+    def get_credentials_for_url(self, url: str) -> HTTPAuthCredential:
+        parsed_url = urllib.parse.urlsplit(url)
+        netloc = parsed_url.netloc
+
+        if url not in self._credentials:
+            if "@" not in netloc:
+                # no credentials were provided in the url, try finding the
+                # best repository configuration
+                self._credentials[url] = self._get_credentials_for_url(url)
+            else:
+                # Split from the right because that's how urllib.parse.urlsplit()
+                # behaves if more than one @ is present (which can be checked using
+                # the password attribute of urlsplit()'s return value).
+                auth, netloc = netloc.rsplit("@", 1)
+                # Split from the left because that's how urllib.parse.urlsplit()
+                # behaves if more than one : is present (which again can be checked
+                # using the password attribute of the return value)
+                user, password = auth.split(":", 1) if ":" in auth else (auth, "")
+                self._credentials[url] = HTTPAuthCredential(
+                    urllib.parse.unquote(user),
+                    urllib.parse.unquote(password),
+                )
+
+        return self._credentials[url]
+
+    def get_pypi_token(self, name: str) -> str | None:
+        return self._password_manager.get_pypi_token(name)
+
+    def get_http_auth(
+        self, name: str, username: str | None = None
+    ) -> HTTPAuthCredential | None:
+        if name == "pypi":
+            repository = AuthenticatorRepositoryConfig(
+                name, "https://upload.pypi.org/legacy/"
+            )
+        else:
+            if name not in self.configured_repositories:
+                return None
+            repository = self.configured_repositories[name]
+
+        return self._get_credentials_for_repository(
+            repository=repository, username=username
+        )
+
+    def get_certs_for_repository(self, name: str) -> RepositoryCertificateConfig:
+        if name.lower() == "pypi" or name not in self.configured_repositories:
+            return RepositoryCertificateConfig()
+        return self.configured_repositories[name].certs(self._config)
+
+    @property
+    def configured_repositories(self) -> dict[str, AuthenticatorRepositoryConfig]:
+        if self._configured_repositories is None:
+            self._configured_repositories = {}
+            for repository_name in self._config.get("repositories", []):
+                url = self._config.get(f"repositories.{repository_name}.url")
+                self._configured_repositories[repository_name] = (
+                    AuthenticatorRepositoryConfig(repository_name, url)
+                )
+
+        return self._configured_repositories
+
+    def reset_credentials_cache(self) -> None:
+        self.get_repository_config_for_url.cache_clear()
+        self._credentials = {}
+
+    def add_repository(self, name: str, url: str) -> None:
+        self.configured_repositories[name] = AuthenticatorRepositoryConfig(name, url)
+        self.reset_credentials_cache()
+
+    def get_certs_for_url(self, url: str) -> RepositoryCertificateConfig:
+        if url not in self._certs:
+            self._certs[url] = self._get_certs_for_url(url)
+        return self._certs[url]
+
+    def _get_repository_config_for_url(
+        self, url: str, exact_match: bool = False
+    ) -> AuthenticatorRepositoryConfig | None:
+        parsed_url = urllib.parse.urlsplit(url)
+        candidates_netloc_only = []
+        candidates_path_match = []
+
+        for repository in self.configured_repositories.values():
+            if exact_match:
+                if parsed_url.path == repository.path:
+                    return repository
+                continue
+
+            if repository.netloc == parsed_url.netloc:
+                if parsed_url.path.startswith(repository.path) or commonprefix(
+                    (parsed_url.path, repository.path)
+                ):
+                    candidates_path_match.append(repository)
+                    continue
+                candidates_netloc_only.append(repository)
+
+        if candidates_path_match:
+            candidates = candidates_path_match
+        elif candidates_netloc_only:
+            candidates = candidates_netloc_only
+        else:
+            return None
+
+        if len(candidates) > 1:
+            logger.debug(
+                "Multiple source configurations found for %s - %s",
+                parsed_url.netloc,
+                ", ".join(c.name for c in candidates),
+            )
+            # prefer the more specific path
+            candidates.sort(
+                key=lambda c: len(commonprefix([parsed_url.path, c.path])), reverse=True
+            )
+
+        return candidates[0]
+
+    def _get_certs_for_url(self, url: str) -> RepositoryCertificateConfig:
+        selected = self.get_repository_config_for_url(url)
+        if selected:
+            return selected.certs(config=self._config)
+        return RepositoryCertificateConfig()
+
+
+_authenticator: Authenticator | None = None
+
+
+def get_default_authenticator() -> Authenticator:
+    global _authenticator
+
+    if _authenticator is None:
+        _authenticator = Authenticator()
+
+    return _authenticator
diff --git a/conda_lock/_vendor/poetry/utils/cache.py b/conda_lock/_vendor/poetry/utils/cache.py
new file mode 100644
index 00000000..951781b7
--- /dev/null
+++ b/conda_lock/_vendor/poetry/utils/cache.py
@@ -0,0 +1,342 @@
+from __future__ import annotations
+
+import dataclasses
+import hashlib
+import json
+import logging
+import shutil
+import threading
+import time
+
+from collections import defaultdict
+from pathlib import Path
+from typing import TYPE_CHECKING
+from typing import Any
+from typing import Generic
+from typing import TypeVar
+from typing import overload
+
+from conda_lock._vendor.poetry.utils._compat import decode
+from conda_lock._vendor.poetry.utils._compat import encode
+from conda_lock._vendor.poetry.utils.helpers import get_highest_priority_hash_type
+from conda_lock._vendor.poetry.utils.wheel import InvalidWheelName
+from conda_lock._vendor.poetry.utils.wheel import Wheel
+
+
+if TYPE_CHECKING:
+    from collections.abc import Callable
+
+    from conda_lock._vendor.poetry.core.packages.utils.link import Link
+
+    from conda_lock._vendor.poetry.utils.env import Env
+
+
+# Used by FileCache for items that do not expire.
+MAX_DATE = 9999999999
+T = TypeVar("T")
+
+logger = logging.getLogger(__name__)
+
+
+def _expiration(minutes: int) -> int:
+    """
+    Calculates the time in seconds since epoch that occurs 'minutes' from now.
+
+    :param minutes: The number of minutes to count forward
+    """
+    return round(time.time()) + minutes * 60
+
+
+_HASHES = {
+    "md5": (hashlib.md5, 2),
+    "sha1": (hashlib.sha1, 4),
+    "sha256": (hashlib.sha256, 8),
+}
+
+
+@dataclasses.dataclass(frozen=True)
+class CacheItem(Generic[T]):
+    """
+    Stores data and metadata for cache items.
+    """
+
+    data: T
+    expires: int | None = None
+
+    @property
+    def expired(self) -> bool:
+        """
+        Return true if the cache item has exceeded its expiration period.
+        """
+        return self.expires is not None and time.time() >= self.expires
+
+
+@dataclasses.dataclass(frozen=True)
+class FileCache(Generic[T]):
+    """
+    Cachy-compatible minimal file cache. Stores subsequent data in a JSON format.
+
+    :param path: The path that the cache starts at.
+    :param hash_type: The hash to use for encoding keys/building directories.
+    """
+
+    path: Path
+    hash_type: str = "sha256"
+
+    def __post_init__(self) -> None:
+        if self.hash_type not in _HASHES:
+            raise ValueError(
+                f"FileCache.hash_type is unknown value: '{self.hash_type}'."
+            )
+
+    def get(self, key: str) -> T | None:
+        return self._get_payload(key)
+
+    def has(self, key: str) -> bool:
+        """
+        Determine if a file exists and has not expired in the cache.
+        :param key: The cache key
+        :returns: True if the key exists in the cache
+        """
+        return self.get(key) is not None
+
+    def put(self, key: str, value: Any, minutes: int | None = None) -> None:
+        """
+        Store an item in the cache.
+
+        :param key: The cache key
+        :param value: The cache value
+        :param minutes: The lifetime in minutes of the cached value
+        """
+        payload: CacheItem[Any] = CacheItem(
+            value, expires=_expiration(minutes) if minutes is not None else None
+        )
+        path = self._path(key)
+        path.parent.mkdir(parents=True, exist_ok=True)
+        with path.open("wb") as f:
+            f.write(self._serialize(payload))
+
+    def forget(self, key: str) -> None:
+        """
+        Remove an item from the cache.
+
+        :param key: The cache key
+        """
+        path = self._path(key)
+        if path.exists():
+            path.unlink()
+
+    def flush(self) -> None:
+        """
+        Clear the cache.
+        """
+        shutil.rmtree(self.path)
+
+    def remember(
+        self, key: str, callback: T | Callable[[], T], minutes: int | None = None
+    ) -> T:
+        """
+        Get an item from the cache, or use a default from callback.
+
+        :param key: The cache key
+        :param callback: Callback function providing default value
+        :param minutes: The lifetime in minutes of the cached value
+        """
+        value = self.get(key)
+        if value is None:
+            value = callback() if callable(callback) else callback
+            self.put(key, value, minutes)
+        return value
+
+    def _get_payload(self, key: str) -> T | None:
+        path = self._path(key)
+
+        if not path.exists():
+            return None
+
+        with path.open("rb") as f:
+            file_content = f.read()
+
+        try:
+            payload = self._deserialize(file_content)
+        except (json.JSONDecodeError, ValueError):
+            self.forget(key)
+            logger.warning("Corrupt cache file was detected and cleaned up.")
+            return None
+
+        if payload.expired:
+            self.forget(key)
+            return None
+        else:
+            return payload.data
+
+    def _path(self, key: str) -> Path:
+        hash_type, parts_count = _HASHES[self.hash_type]
+        h = hash_type(encode(key)).hexdigest()
+        parts = [h[i : i + 2] for i in range(0, len(h), 2)][:parts_count]
+        return Path(self.path, *parts, h)
+
+    def _serialize(self, payload: CacheItem[T]) -> bytes:
+        expires = payload.expires or MAX_DATE
+        data = json.dumps(payload.data)
+        return encode(f"{expires:010d}{data}")
+
+    def _deserialize(self, data_raw: bytes) -> CacheItem[T]:
+        data_str = decode(data_raw)
+        data = json.loads(data_str[10:])
+        expires = int(data_str[:10])
+        return CacheItem(data, expires)
+
+
+class ArtifactCache:
+    def __init__(self, *, cache_dir: Path) -> None:
+        self._cache_dir = cache_dir
+        self._archive_locks: defaultdict[Path, threading.Lock] = defaultdict(
+            threading.Lock
+        )
+
+    def get_cache_directory_for_link(self, link: Link) -> Path:
+        key_parts = {"url": link.url_without_fragment}
+
+        if hash_name := get_highest_priority_hash_type(
+            set(link.hashes.keys()), link.filename
+        ):
+            key_parts[hash_name] = link.hashes[hash_name]
+
+        if link.subdirectory_fragment:
+            key_parts["subdirectory"] = link.subdirectory_fragment
+
+        return self._get_directory_from_hash(key_parts)
+
+    def _get_directory_from_hash(self, key_parts: object) -> Path:
+        key = hashlib.sha256(
+            json.dumps(
+                key_parts, sort_keys=True, separators=(",", ":"), ensure_ascii=True
+            ).encode("ascii")
+        ).hexdigest()
+
+        split_key = [key[:2], key[2:4], key[4:6], key[6:]]
+        return self._cache_dir.joinpath(*split_key)
+
+    def get_cache_directory_for_git(
+        self, url: str, ref: str, subdirectory: str | None
+    ) -> Path:
+        key_parts = {"url": url, "ref": ref}
+        if subdirectory:
+            key_parts["subdirectory"] = subdirectory
+
+        return self._get_directory_from_hash(key_parts)
+
+    @overload
+    def get_cached_archive_for_link(
+        self,
+        link: Link,
+        *,
+        strict: bool,
+        env: Env | None = ...,
+        download_func: Callable[[str, Path], None],
+    ) -> Path: ...
+
+    @overload
+    def get_cached_archive_for_link(
+        self,
+        link: Link,
+        *,
+        strict: bool,
+        env: Env | None = ...,
+        download_func: None = ...,
+    ) -> Path | None: ...
+
+    def get_cached_archive_for_link(
+        self,
+        link: Link,
+        *,
+        strict: bool,
+        env: Env | None = None,
+        download_func: Callable[[str, Path], None] | None = None,
+    ) -> Path | None:
+        cache_dir = self.get_cache_directory_for_link(link)
+
+        cached_archive = self._get_cached_archive(
+            cache_dir, strict=strict, filename=link.filename, env=env
+        )
+        if cached_archive is None and strict and download_func is not None:
+            cached_archive = cache_dir / link.filename
+            with self._archive_locks[cached_archive]:
+                # Check again if the archive exists (under the lock) to avoid
+                # duplicate downloads because it may have already been downloaded
+                # by another thread in the meantime
+                if not cached_archive.exists():
+                    cache_dir.mkdir(parents=True, exist_ok=True)
+                    try:
+                        download_func(link.url, cached_archive)
+                    except BaseException:
+                        cached_archive.unlink(missing_ok=True)
+                        raise
+
+        return cached_archive
+
+    def get_cached_archive_for_git(
+        self, url: str, reference: str, subdirectory: str | None, env: Env
+    ) -> Path | None:
+        cache_dir = self.get_cache_directory_for_git(url, reference, subdirectory)
+
+        return self._get_cached_archive(cache_dir, strict=False, env=env)
+
+    def _get_cached_archive(
+        self,
+        cache_dir: Path,
+        *,
+        strict: bool,
+        filename: str | None = None,
+        env: Env | None = None,
+    ) -> Path | None:
+        # implication "not strict -> env must not be None"
+        assert strict or env is not None
+        # implication "strict -> filename must not be None"
+        assert not strict or filename is not None
+
+        archives = self._get_cached_archives(cache_dir)
+        if not archives:
+            return None
+
+        candidates: list[tuple[float | None, Path]] = []
+
+        for archive in archives:
+            if strict:
+                # in strict mode return the original cached archive instead of the
+                # prioritized archive type.
+                if filename == archive.name:
+                    return archive
+                continue
+
+            assert env is not None
+
+            if archive.suffix != ".whl":
+                candidates.append((float("inf"), archive))
+                continue
+
+            try:
+                wheel = Wheel(archive.name)
+            except InvalidWheelName:
+                continue
+
+            if not wheel.is_supported_by_environment(env):
+                continue
+
+            candidates.append(
+                (wheel.get_minimum_supported_index(env.supported_tags), archive),
+            )
+
+        if not candidates:
+            return None
+
+        return min(candidates)[1]
+
+    def _get_cached_archives(self, cache_dir: Path) -> list[Path]:
+        archive_types = ["whl", "tar.gz", "tar.bz2", "bz2", "zip"]
+        paths: list[Path] = []
+        for archive_type in archive_types:
+            paths += cache_dir.glob(f"*.{archive_type}")
+
+        return paths
diff --git a/conda_lock/_vendor/poetry/utils/constants.py b/conda_lock/_vendor/poetry/utils/constants.py
new file mode 100644
index 00000000..e8fe2918
--- /dev/null
+++ b/conda_lock/_vendor/poetry/utils/constants.py
@@ -0,0 +1,12 @@
+from __future__ import annotations
+
+import os
+
+
+# Timeout for HTTP requests using the requests library.
+REQUESTS_TIMEOUT = int(os.getenv("POETRY_REQUESTS_TIMEOUT", 15))
+
+RETRY_AFTER_HEADER = "retry-after"
+
+# Server response codes to retry requests on.
+STATUS_FORCELIST = [429, 500, 501, 502, 503, 504]
diff --git a/conda_lock/_vendor/poetry/utils/dependency_specification.py b/conda_lock/_vendor/poetry/utils/dependency_specification.py
new file mode 100644
index 00000000..6a5a0e85
--- /dev/null
+++ b/conda_lock/_vendor/poetry/utils/dependency_specification.py
@@ -0,0 +1,232 @@
+from __future__ import annotations
+
+import contextlib
+import os
+import re
+import urllib.parse
+
+from pathlib import Path
+from typing import TYPE_CHECKING
+from typing import Dict
+from typing import List
+from typing import TypeVar
+from typing import Union
+from typing import cast
+
+from conda_lock._vendor.poetry.core.packages.dependency import Dependency
+from tomlkit.items import InlineTable
+
+from conda_lock._vendor.poetry.packages.direct_origin import DirectOrigin
+
+
+if TYPE_CHECKING:
+    from conda_lock._vendor.poetry.core.packages.vcs_dependency import VCSDependency
+
+    from conda_lock._vendor.poetry.utils.cache import ArtifactCache
+    from conda_lock._vendor.poetry.utils.env import Env
+
+
+DependencySpec = Dict[str, Union[str, bool, Dict[str, Union[str, bool]], List[str]]]
+BaseSpec = TypeVar("BaseSpec", DependencySpec, InlineTable)
+
+GIT_URL_SCHEMES = {"git+http", "git+https", "git+ssh"}
+
+
+def dependency_to_specification(
+    dependency: Dependency, specification: BaseSpec
+) -> BaseSpec:
+    if dependency.is_vcs():
+        dependency = cast("VCSDependency", dependency)
+        assert dependency.source_url is not None
+        specification[dependency.vcs] = dependency.source_url
+        if dependency.reference:
+            specification["rev"] = dependency.reference
+    elif dependency.is_file() or dependency.is_directory():
+        assert dependency.source_url is not None
+        specification["path"] = dependency.source_url
+    elif dependency.is_url():
+        assert dependency.source_url is not None
+        specification["url"] = dependency.source_url
+    elif dependency.pretty_constraint != "*" and not dependency.constraint.is_empty():
+        specification["version"] = dependency.pretty_constraint
+
+    if not dependency.marker.is_any():
+        specification["markers"] = str(dependency.marker)
+
+    if dependency.extras:
+        specification["extras"] = sorted(dependency.extras)
+
+    return specification
+
+
+class RequirementsParser:
+    def __init__(
+        self,
+        *,
+        artifact_cache: ArtifactCache,
+        env: Env | None = None,
+        cwd: Path | None = None,
+    ) -> None:
+        self._direct_origin = DirectOrigin(artifact_cache)
+        self._env = env
+        self._cwd = cwd or Path.cwd()
+
+    def parse(self, requirement: str) -> DependencySpec:
+        requirement = requirement.strip()
+
+        specification = self._parse_pep508(requirement)
+
+        if specification is not None:
+            return specification
+
+        extras = []
+        extras_m = re.search(r"\[([\w\d,-_ ]+)\]$", requirement)
+        if extras_m:
+            extras = [e.strip() for e in extras_m.group(1).split(",")]
+            requirement, _ = requirement.split("[")
+
+        specification = (
+            self._parse_url(requirement)
+            or self._parse_path(requirement)
+            or self._parse_simple(requirement)
+        )
+
+        if specification:
+            if extras:
+                specification.setdefault("extras", extras)
+            return specification
+
+        raise ValueError(f"Invalid dependency specification: {requirement}")
+
+    def _parse_pep508(self, requirement: str) -> DependencySpec | None:
+        if " ; " not in requirement and re.search(r"@[\^~!=<>\d]", requirement):
+            # this is of the form package@, do not attempt to parse it
+            return None
+
+        with contextlib.suppress(ValueError):
+            dependency = Dependency.create_from_pep_508(requirement)
+            specification: DependencySpec = {}
+            specification = dependency_to_specification(dependency, specification)
+
+            if specification:
+                specification["name"] = dependency.name
+                return specification
+
+        return None
+
+    def _parse_git_url(self, requirement: str) -> DependencySpec | None:
+        from conda_lock._vendor.poetry.core.vcs.git import Git
+        from conda_lock._vendor.poetry.core.vcs.git import ParsedUrl
+
+        parsed = ParsedUrl.parse(requirement)
+        url = Git.normalize_url(requirement)
+
+        pair = {"name": parsed.name, "git": url.url}
+
+        if parsed.rev:
+            pair["rev"] = url.revision
+
+        if parsed.subdirectory:
+            pair["subdirectory"] = parsed.subdirectory
+
+        source_root = self._env.path.joinpath("src") if self._env else None
+        package = self._direct_origin.get_package_from_vcs(
+            "git",
+            url=url.url,
+            rev=pair.get("rev"),
+            subdirectory=parsed.subdirectory,
+            source_root=source_root,
+        )
+        pair["name"] = package.name
+        return pair
+
+    def _parse_url(self, requirement: str) -> DependencySpec | None:
+        url_parsed = urllib.parse.urlparse(requirement)
+        if not (url_parsed.scheme and url_parsed.netloc):
+            return None
+
+        if url_parsed.scheme in GIT_URL_SCHEMES:
+            return self._parse_git_url(requirement)
+
+        if url_parsed.scheme in ["http", "https"]:
+            package = self._direct_origin.get_package_from_url(requirement)
+            assert package.source_url is not None
+            return {"name": package.name, "url": package.source_url}
+
+        return None
+
+    def _parse_path(self, requirement: str) -> DependencySpec | None:
+        if (os.path.sep in requirement or "/" in requirement) and (
+            self._cwd.joinpath(requirement).exists()
+            or Path(requirement).expanduser().exists()
+            and Path(requirement).expanduser().is_absolute()
+        ):
+            path = Path(requirement).expanduser()
+            is_absolute = path.is_absolute()
+
+            if not path.is_absolute():
+                path = self._cwd.joinpath(requirement)
+
+            if path.is_file():
+                package = self._direct_origin.get_package_from_file(path.resolve())
+            else:
+                package = self._direct_origin.get_package_from_directory(path.resolve())
+
+            return {
+                "name": package.name,
+                "path": (
+                    path.relative_to(self._cwd).as_posix()
+                    if not is_absolute
+                    else path.as_posix()
+                ),
+            }
+
+        return None
+
+    def _parse_simple(
+        self,
+        requirement: str,
+    ) -> DependencySpec | None:
+        extras: list[str] = []
+        pair = re.sub(
+            "^([^@=: ]+)(?:@|==|(?~!])=|:| )(.*)$", "\\1 \\2", requirement
+        )
+        pair = pair.strip()
+
+        require: DependencySpec = {}
+
+        if " " in pair:
+            name, version = pair.split(" ", 1)
+            extras_m = re.search(r"\[([\w\d,-_]+)\]$", name)
+            if extras_m:
+                extras = [e.strip() for e in extras_m.group(1).split(",")]
+                name, _ = name.split("[")
+
+            require["name"] = name
+            if version != "latest":
+                require["version"] = version
+        else:
+            m = re.match(
+                r"^([^><=!: ]+)((?:>=|<=|>|<|!=|~=|~|\^).*)$", requirement.strip()
+            )
+            if m:
+                name, constraint = m.group(1), m.group(2)
+                extras_m = re.search(r"\[([\w\d,-_]+)\]$", name)
+                if extras_m:
+                    extras = [e.strip() for e in extras_m.group(1).split(",")]
+                    name, _ = name.split("[")
+
+                require["name"] = name
+                require["version"] = constraint
+            else:
+                extras_m = re.search(r"\[([\w\d,-_]+)\]$", pair)
+                if extras_m:
+                    extras = [e.strip() for e in extras_m.group(1).split(",")]
+                    pair, _ = pair.split("[")
+
+                require["name"] = pair
+
+        if extras:
+            require["extras"] = extras
+
+        return require
diff --git a/conda_lock/_vendor/poetry/utils/env.py b/conda_lock/_vendor/poetry/utils/env.py
deleted file mode 100644
index 9ca1e68e..00000000
--- a/conda_lock/_vendor/poetry/utils/env.py
+++ /dev/null
@@ -1,1664 +0,0 @@
-import base64
-import hashlib
-import json
-import os
-import platform
-import re
-import shutil
-import sys
-import sysconfig
-import textwrap
-
-from contextlib import contextmanager
-from copy import deepcopy
-from typing import Any
-from typing import Dict
-from typing import List
-from typing import Optional
-from typing import Tuple
-from typing import Union
-
-import packaging.tags
-import tomlkit
-import virtualenv
-
-from clikit.api.io import IO
-from packaging.tags import Tag
-from packaging.tags import interpreter_name
-from packaging.tags import interpreter_version
-from packaging.tags import sys_tags
-
-from conda_lock._vendor.poetry.core.semver import parse_constraint
-from conda_lock._vendor.poetry.core.semver.version import Version
-from conda_lock._vendor.poetry.core.toml.file import TOMLFile
-from conda_lock._vendor.poetry.core.version.markers import BaseMarker
-from conda_lock._vendor.poetry.locations import CACHE_DIR
-from conda_lock._vendor.poetry.poetry import Poetry
-from conda_lock._vendor.poetry.utils._compat import CalledProcessError
-from conda_lock._vendor.poetry.utils._compat import Path
-from conda_lock._vendor.poetry.utils._compat import decode
-from conda_lock._vendor.poetry.utils._compat import encode
-from conda_lock._vendor.poetry.utils._compat import list_to_shell_command
-from conda_lock._vendor.poetry.utils._compat import subprocess
-from conda_lock._vendor.poetry.utils.helpers import is_dir_writable
-from conda_lock._vendor.poetry.utils.helpers import paths_csv
-
-
-GET_ENVIRONMENT_INFO = """\
-import json
-import os
-import platform
-import sys
-import sysconfig
-
-INTERPRETER_SHORT_NAMES = {
-    "python": "py",
-    "cpython": "cp",
-    "pypy": "pp",
-    "ironpython": "ip",
-    "jython": "jy",
-}
-
-
-def interpreter_version():
-    version = sysconfig.get_config_var("interpreter_version")
-    if version:
-        version = str(version)
-    else:
-        version = _version_nodot(sys.version_info[:2])
-
-    return version
-
-
-def _version_nodot(version):
-    # type: (PythonVersion) -> str
-    if any(v >= 10 for v in version):
-        sep = "_"
-    else:
-        sep = ""
-
-    return sep.join(map(str, version))
-
-
-if hasattr(sys, "implementation"):
-    info = sys.implementation.version
-    iver = "{0.major}.{0.minor}.{0.micro}".format(info)
-    kind = info.releaselevel
-    if kind != "final":
-        iver += kind[0] + str(info.serial)
-
-    implementation_name = sys.implementation.name
-else:
-    iver = "0"
-    implementation_name = platform.python_implementation().lower()
-
-env = {
-    "implementation_name": implementation_name,
-    "implementation_version": iver,
-    "os_name": os.name,
-    "platform_machine": platform.machine(),
-    "platform_release": platform.release(),
-    "platform_system": platform.system(),
-    "platform_version": platform.version(),
-    "python_full_version": platform.python_version(),
-    "platform_python_implementation": platform.python_implementation(),
-    "python_version": ".".join(platform.python_version_tuple()[:2]),
-    "sys_platform": sys.platform,
-    "version_info": tuple(sys.version_info),
-    # Extra information
-    "interpreter_name": INTERPRETER_SHORT_NAMES.get(implementation_name, implementation_name),
-    "interpreter_version": interpreter_version(),
-}
-
-print(json.dumps(env))
-"""
-
-
-GET_BASE_PREFIX = """\
-import sys
-
-if hasattr(sys, "real_prefix"):
-    print(sys.real_prefix)
-elif hasattr(sys, "base_prefix"):
-    print(sys.base_prefix)
-else:
-    print(sys.prefix)
-"""
-
-GET_PYTHON_VERSION = """\
-import sys
-
-print('.'.join([str(s) for s in sys.version_info[:3]]))
-"""
-
-GET_SYS_PATH = """\
-import json
-import sys
-
-print(json.dumps(sys.path))
-"""
-
-GET_PATHS = """\
-import json
-import sysconfig
-
-print(json.dumps(sysconfig.get_paths()))
-"""
-
-GET_PATHS_FOR_GENERIC_ENVS = """\
-# We can't use sysconfig.get_paths() because
-# on some distributions it does not return the proper paths
-# (those used by pip for instance). We go through distutils
-# to get the proper ones.
-import json
-import site
-import sysconfig
-
-from distutils.command.install import SCHEME_KEYS  # noqa
-from distutils.core import Distribution
-
-d = Distribution()
-d.parse_config_files()
-obj = d.get_command_obj("install", create=True)
-obj.finalize_options()
-
-paths = sysconfig.get_paths().copy()
-for key in SCHEME_KEYS:
-    if key == "headers":
-        # headers is not a path returned by sysconfig.get_paths()
-        continue
-
-    paths[key] = getattr(obj, f"install_{key}")
-
-if site.check_enableusersite() and hasattr(obj, "install_usersite"):
-    paths["usersite"] = getattr(obj, "install_usersite")
-    paths["userbase"] = getattr(obj, "install_userbase")
-
-print(json.dumps(paths))
-"""
-
-
-class SitePackages:
-    def __init__(
-        self, path, fallbacks=None, skip_write_checks=False
-    ):  # type: (Path, List[Path], bool) -> None
-        self._path = path
-        self._fallbacks = fallbacks or []
-        self._skip_write_checks = skip_write_checks
-        self._candidates = [self._path] + self._fallbacks
-        self._writable_candidates = None if not skip_write_checks else self._candidates
-
-    @property
-    def path(self):  # type: () -> Path
-        return self._path
-
-    @property
-    def candidates(self):  # type: () -> List[Path]
-        return self._candidates
-
-    @property
-    def writable_candidates(self):  # type: () -> List[Path]
-        if self._writable_candidates is not None:
-            return self._writable_candidates
-
-        self._writable_candidates = []
-        for candidate in self._candidates:
-            if not is_dir_writable(path=candidate, create=True):
-                continue
-            self._writable_candidates.append(candidate)
-
-        return self._writable_candidates
-
-    def make_candidates(
-        self, path, writable_only=False
-    ):  # type: (Path, bool) -> List[Path]
-        candidates = self._candidates if not writable_only else self.writable_candidates
-        if path.is_absolute():
-            for candidate in candidates:
-                try:
-                    path.relative_to(candidate)
-                    return [path]
-                except ValueError:
-                    pass
-            else:
-                raise ValueError(
-                    "{} is not relative to any discovered {}sites".format(
-                        path, "writable " if writable_only else ""
-                    )
-                )
-
-        return [candidate / path for candidate in candidates if candidate]
-
-    def _path_method_wrapper(
-        self, path, method, *args, **kwargs
-    ):  # type: (Path, str, *Any, **Any) -> Union[Tuple[Path, Any], List[Tuple[Path, Any]]]
-
-        # TODO: Move to parameters after dropping Python 2.7
-        return_first = kwargs.pop("return_first", True)
-        writable_only = kwargs.pop("writable_only", False)
-
-        candidates = self.make_candidates(path, writable_only=writable_only)
-
-        if not candidates:
-            raise RuntimeError(
-                'Unable to find a suitable destination for "{}" in {}'.format(
-                    str(path), paths_csv(self._candidates)
-                )
-            )
-
-        results = []
-
-        for candidate in candidates:
-            try:
-                result = candidate, getattr(candidate, method)(*args, **kwargs)
-                if return_first:
-                    return result
-                else:
-                    results.append(result)
-            except (IOError, OSError):
-                # TODO: Replace with PermissionError
-                pass
-
-        if results:
-            return results
-
-        raise OSError("Unable to access any of {}".format(paths_csv(candidates)))
-
-    def write_text(self, path, *args, **kwargs):  # type: (Path, *Any, **Any) -> Path
-        return self._path_method_wrapper(path, "write_text", *args, **kwargs)[0]
-
-    def mkdir(self, path, *args, **kwargs):  # type: (Path, *Any, **Any) -> Path
-        return self._path_method_wrapper(path, "mkdir", *args, **kwargs)[0]
-
-    def exists(self, path):  # type: (Path) -> bool
-        return any(
-            value[-1]
-            for value in self._path_method_wrapper(path, "exists", return_first=False)
-        )
-
-    def find(self, path, writable_only=False):  # type: (Path, bool) -> List[Path]
-        return [
-            value[0]
-            for value in self._path_method_wrapper(
-                path, "exists", return_first=False, writable_only=writable_only
-            )
-            if value[-1] is True
-        ]
-
-    def __getattr__(self, item):
-        try:
-            return super(SitePackages, self).__getattribute__(item)
-        except AttributeError:
-            return getattr(self.path, item)
-
-
-class EnvError(Exception):
-
-    pass
-
-
-class EnvCommandError(EnvError):
-    def __init__(self, e, input=None):  # type: (CalledProcessError) -> None
-        self.e = e
-
-        message = "Command {} errored with the following return code {}, and output: \n{}".format(
-            e.cmd, e.returncode, decode(e.output)
-        )
-        if input:
-            message += "input was : {}".format(input)
-        super(EnvCommandError, self).__init__(message)
-
-
-class NoCompatiblePythonVersionFound(EnvError):
-    def __init__(self, expected, given=None):
-        if given:
-            message = (
-                "The specified Python version ({}) "
-                "is not supported by the project ({}).\n"
-                "Please choose a compatible version "
-                "or loosen the python constraint specified "
-                "in the pyproject.toml file.".format(given, expected)
-            )
-        else:
-            message = (
-                "Poetry was unable to find a compatible version. "
-                "If you have one, you can explicitly use it "
-                'via the "env use" command.'
-            )
-
-        super(NoCompatiblePythonVersionFound, self).__init__(message)
-
-
-class EnvManager(object):
-    """
-    Environments manager
-    """
-
-    _env = None
-
-    ENVS_FILE = "envs.toml"
-
-    def __init__(self, poetry):  # type: (Poetry) -> None
-        self._poetry = poetry
-
-    def activate(self, python, io):  # type: (str, IO) -> Env
-        venv_path = self._poetry.config.get("virtualenvs.path")
-        if venv_path is None:
-            venv_path = Path(CACHE_DIR) / "virtualenvs"
-        else:
-            venv_path = Path(venv_path)
-
-        cwd = self._poetry.file.parent
-
-        envs_file = TOMLFile(venv_path / self.ENVS_FILE)
-
-        try:
-            python_version = Version.parse(python)
-            python = "python{}".format(python_version.major)
-            if python_version.precision > 1:
-                python += ".{}".format(python_version.minor)
-        except ValueError:
-            # Executable in PATH or full executable path
-            pass
-
-        try:
-            python_version = decode(
-                subprocess.check_output(
-                    list_to_shell_command(
-                        [
-                            python,
-                            "-c",
-                            "\"import sys; print('.'.join([str(s) for s in sys.version_info[:3]]))\"",
-                        ]
-                    ),
-                    shell=True,
-                )
-            )
-        except CalledProcessError as e:
-            raise EnvCommandError(e)
-
-        python_version = Version.parse(python_version.strip())
-        minor = "{}.{}".format(python_version.major, python_version.minor)
-        patch = python_version.text
-
-        create = False
-        is_root_venv = self._poetry.config.get("virtualenvs.in-project")
-        # If we are required to create the virtual environment in the root folder,
-        # create or recreate it if needed
-        if is_root_venv:
-            create = False
-            venv = self._poetry.file.parent / ".venv"
-            if venv.exists():
-                # We need to check if the patch version is correct
-                _venv = VirtualEnv(venv)
-                current_patch = ".".join(str(v) for v in _venv.version_info[:3])
-
-                if patch != current_patch:
-                    create = True
-
-            self.create_venv(io, executable=python, force=create)
-
-            return self.get(reload=True)
-
-        envs = tomlkit.document()
-        base_env_name = self.generate_env_name(self._poetry.package.name, str(cwd))
-        if envs_file.exists():
-            envs = envs_file.read()
-            current_env = envs.get(base_env_name)
-            if current_env is not None:
-                current_minor = current_env["minor"]
-                current_patch = current_env["patch"]
-
-                if current_minor == minor and current_patch != patch:
-                    # We need to recreate
-                    create = True
-
-        name = "{}-py{}".format(base_env_name, minor)
-        venv = venv_path / name
-
-        # Create if needed
-        if not venv.exists() or venv.exists() and create:
-            in_venv = os.environ.get("VIRTUAL_ENV") is not None
-            if in_venv or not venv.exists():
-                create = True
-
-            if venv.exists():
-                # We need to check if the patch version is correct
-                _venv = VirtualEnv(venv)
-                current_patch = ".".join(str(v) for v in _venv.version_info[:3])
-
-                if patch != current_patch:
-                    create = True
-
-            self.create_venv(io, executable=python, force=create)
-
-        # Activate
-        envs[base_env_name] = {"minor": minor, "patch": patch}
-        envs_file.write(envs)
-
-        return self.get(reload=True)
-
-    def deactivate(self, io):  # type: (IO) -> None
-        venv_path = self._poetry.config.get("virtualenvs.path")
-        if venv_path is None:
-            venv_path = Path(CACHE_DIR) / "virtualenvs"
-        else:
-            venv_path = Path(venv_path)
-
-        name = self._poetry.package.name
-        name = self.generate_env_name(name, str(self._poetry.file.parent))
-
-        envs_file = TOMLFile(venv_path / self.ENVS_FILE)
-        if envs_file.exists():
-            envs = envs_file.read()
-            env = envs.get(name)
-            if env is not None:
-                io.write_line(
-                    "Deactivating virtualenv: {}".format(
-                        venv_path / (name + "-py{}".format(env["minor"]))
-                    )
-                )
-                del envs[name]
-
-                envs_file.write(envs)
-
-    def get(self, reload=False):  # type: (bool) -> Env
-        if self._env is not None and not reload:
-            return self._env
-
-        python_minor = ".".join([str(v) for v in sys.version_info[:2]])
-
-        venv_path = self._poetry.config.get("virtualenvs.path")
-        if venv_path is None:
-            venv_path = Path(CACHE_DIR) / "virtualenvs"
-        else:
-            venv_path = Path(venv_path)
-
-        cwd = self._poetry.file.parent
-        envs_file = TOMLFile(venv_path / self.ENVS_FILE)
-        env = None
-        base_env_name = self.generate_env_name(self._poetry.package.name, str(cwd))
-        if envs_file.exists():
-            envs = envs_file.read()
-            env = envs.get(base_env_name)
-            if env:
-                python_minor = env["minor"]
-
-        # Check if we are inside a virtualenv or not
-        # Conda sets CONDA_PREFIX in its envs, see
-        # https://github.com/conda/conda/issues/2764
-        env_prefix = os.environ.get("VIRTUAL_ENV", os.environ.get("CONDA_PREFIX"))
-        conda_env_name = os.environ.get("CONDA_DEFAULT_ENV")
-        # It's probably not a good idea to pollute Conda's global "base" env, since
-        # most users have it activated all the time.
-        in_venv = env_prefix is not None and conda_env_name != "base"
-
-        if not in_venv or env is not None:
-            # Checking if a local virtualenv exists
-            if self._poetry.config.get("virtualenvs.in-project") is not False:
-                if (cwd / ".venv").exists() and (cwd / ".venv").is_dir():
-                    venv = cwd / ".venv"
-
-                    return VirtualEnv(venv)
-
-            create_venv = self._poetry.config.get("virtualenvs.create", True)
-
-            if not create_venv:
-                return self.get_system_env()
-
-            venv_path = self._poetry.config.get("virtualenvs.path")
-            if venv_path is None:
-                venv_path = Path(CACHE_DIR) / "virtualenvs"
-            else:
-                venv_path = Path(venv_path)
-
-            name = "{}-py{}".format(base_env_name, python_minor.strip())
-
-            venv = venv_path / name
-
-            if not venv.exists():
-                return self.get_system_env()
-
-            return VirtualEnv(venv)
-
-        if env_prefix is not None:
-            prefix = Path(env_prefix)
-            base_prefix = None
-        else:
-            prefix = Path(sys.prefix)
-            base_prefix = self.get_base_prefix()
-
-        return VirtualEnv(prefix, base_prefix)
-
-    def list(self, name=None):  # type: (Optional[str]) -> List[VirtualEnv]
-        if name is None:
-            name = self._poetry.package.name
-
-        venv_name = self.generate_env_name(name, str(self._poetry.file.parent))
-
-        venv_path = self._poetry.config.get("virtualenvs.path")
-        if venv_path is None:
-            venv_path = Path(CACHE_DIR) / "virtualenvs"
-        else:
-            venv_path = Path(venv_path)
-
-        env_list = [
-            VirtualEnv(Path(p))
-            for p in sorted(venv_path.glob("{}-py*".format(venv_name)))
-        ]
-
-        venv = self._poetry.file.parent / ".venv"
-        if (
-            self._poetry.config.get("virtualenvs.in-project")
-            and venv.exists()
-            and venv.is_dir()
-        ):
-            env_list.insert(0, VirtualEnv(venv))
-        return env_list
-
-    def remove(self, python):  # type: (str) -> Env
-        venv_path = self._poetry.config.get("virtualenvs.path")
-        if venv_path is None:
-            venv_path = Path(CACHE_DIR) / "virtualenvs"
-        else:
-            venv_path = Path(venv_path)
-
-        cwd = self._poetry.file.parent
-        envs_file = TOMLFile(venv_path / self.ENVS_FILE)
-        base_env_name = self.generate_env_name(self._poetry.package.name, str(cwd))
-
-        if python.startswith(base_env_name):
-            venvs = self.list()
-            for venv in venvs:
-                if venv.path.name == python:
-                    # Exact virtualenv name
-                    if not envs_file.exists():
-                        self.remove_venv(venv.path)
-
-                        return venv
-
-                    venv_minor = ".".join(str(v) for v in venv.version_info[:2])
-                    base_env_name = self.generate_env_name(cwd.name, str(cwd))
-                    envs = envs_file.read()
-
-                    current_env = envs.get(base_env_name)
-                    if not current_env:
-                        self.remove_venv(venv.path)
-
-                        return venv
-
-                    if current_env["minor"] == venv_minor:
-                        del envs[base_env_name]
-                        envs_file.write(envs)
-
-                    self.remove_venv(venv.path)
-
-                    return venv
-
-            raise ValueError(
-                'Environment "{}" does not exist.'.format(python)
-            )
-
-        try:
-            python_version = Version.parse(python)
-            python = "python{}".format(python_version.major)
-            if python_version.precision > 1:
-                python += ".{}".format(python_version.minor)
-        except ValueError:
-            # Executable in PATH or full executable path
-            pass
-
-        try:
-            python_version = decode(
-                subprocess.check_output(
-                    list_to_shell_command(
-                        [
-                            python,
-                            "-c",
-                            "\"import sys; print('.'.join([str(s) for s in sys.version_info[:3]]))\"",
-                        ]
-                    ),
-                    shell=True,
-                )
-            )
-        except CalledProcessError as e:
-            raise EnvCommandError(e)
-
-        python_version = Version.parse(python_version.strip())
-        minor = "{}.{}".format(python_version.major, python_version.minor)
-
-        name = "{}-py{}".format(base_env_name, minor)
-        venv = venv_path / name
-
-        if not venv.exists():
-            raise ValueError(
-                'Environment "{}" does not exist.'.format(name)
-            )
-
-        if envs_file.exists():
-            envs = envs_file.read()
-            current_env = envs.get(base_env_name)
-            if current_env is not None:
-                current_minor = current_env["minor"]
-
-                if current_minor == minor:
-                    del envs[base_env_name]
-                    envs_file.write(envs)
-
-        self.remove_venv(venv)
-
-        return VirtualEnv(venv, venv)
-
-    def create_venv(
-        self, io, name=None, executable=None, force=False
-    ):  # type: (IO, Optional[str], Optional[str], bool) -> Env
-        if self._env is not None and not force:
-            return self._env
-
-        cwd = self._poetry.file.parent
-        env = self.get(reload=True)
-
-        if not env.is_sane():
-            force = True
-
-        if env.is_venv() and not force:
-            # Already inside a virtualenv.
-            return env
-
-        create_venv = self._poetry.config.get("virtualenvs.create")
-        root_venv = self._poetry.config.get("virtualenvs.in-project")
-
-        venv_path = self._poetry.config.get("virtualenvs.path")
-        if root_venv:
-            venv_path = cwd / ".venv"
-        elif venv_path is None:
-            venv_path = Path(CACHE_DIR) / "virtualenvs"
-        else:
-            venv_path = Path(venv_path)
-
-        if not name:
-            name = self._poetry.package.name
-
-        python_patch = ".".join([str(v) for v in sys.version_info[:3]])
-        python_minor = ".".join([str(v) for v in sys.version_info[:2]])
-        if executable:
-            python_patch = decode(
-                subprocess.check_output(
-                    list_to_shell_command(
-                        [
-                            executable,
-                            "-c",
-                            "\"import sys; print('.'.join([str(s) for s in sys.version_info[:3]]))\"",
-                        ]
-                    ),
-                    shell=True,
-                ).strip()
-            )
-            python_minor = ".".join(python_patch.split(".")[:2])
-
-        supported_python = self._poetry.package.python_constraint
-        if not supported_python.allows(Version.parse(python_patch)):
-            # The currently activated or chosen Python version
-            # is not compatible with the Python constraint specified
-            # for the project.
-            # If an executable has been specified, we stop there
-            # and notify the user of the incompatibility.
-            # Otherwise, we try to find a compatible Python version.
-            if executable:
-                raise NoCompatiblePythonVersionFound(
-                    self._poetry.package.python_versions, python_patch
-                )
-
-            io.write_line(
-                "The currently activated Python version {} "
-                "is not supported by the project ({}).\n"
-                "Trying to find and use a compatible version. ".format(
-                    python_patch, self._poetry.package.python_versions
-                )
-            )
-
-            for python_to_try in reversed(
-                sorted(
-                    self._poetry.package.AVAILABLE_PYTHONS,
-                    key=lambda v: (v.startswith("3"), -len(v), v),
-                )
-            ):
-                if len(python_to_try) == 1:
-                    if not parse_constraint("^{}.0".format(python_to_try)).allows_any(
-                        supported_python
-                    ):
-                        continue
-                elif not supported_python.allows_all(
-                    parse_constraint(python_to_try + ".*")
-                ):
-                    continue
-
-                python = "python" + python_to_try
-
-                if io.is_debug():
-                    io.write_line("Trying {}".format(python))
-
-                try:
-                    python_patch = decode(
-                        subprocess.check_output(
-                            list_to_shell_command(
-                                [
-                                    python,
-                                    "-c",
-                                    "\"import sys; print('.'.join([str(s) for s in sys.version_info[:3]]))\"",
-                                ]
-                            ),
-                            stderr=subprocess.STDOUT,
-                            shell=True,
-                        ).strip()
-                    )
-                except CalledProcessError:
-                    continue
-
-                if not python_patch:
-                    continue
-
-                if supported_python.allows(Version.parse(python_patch)):
-                    io.write_line("Using {} ({})".format(python, python_patch))
-                    executable = python
-                    python_minor = ".".join(python_patch.split(".")[:2])
-                    break
-
-            if not executable:
-                raise NoCompatiblePythonVersionFound(
-                    self._poetry.package.python_versions
-                )
-
-        if root_venv:
-            venv = venv_path
-        else:
-            name = self.generate_env_name(name, str(cwd))
-            name = "{}-py{}".format(name, python_minor.strip())
-            venv = venv_path / name
-
-        if not venv.exists():
-            if create_venv is False:
-                io.write_line(
-                    ""
-                    "Skipping virtualenv creation, "
-                    "as specified in config file."
-                    ""
-                )
-
-                return self.get_system_env()
-
-            io.write_line(
-                "Creating virtualenv {} in {}".format(name, str(venv_path))
-            )
-
-            self.build_venv(venv, executable=executable)
-        else:
-            if force:
-                if not env.is_sane():
-                    io.write_line(
-                        "The virtual environment found in {} seems to be broken.".format(
-                            env.path
-                        )
-                    )
-                io.write_line(
-                    "Recreating virtualenv {} in {}".format(name, str(venv))
-                )
-                self.remove_venv(venv)
-                self.build_venv(venv, executable=executable)
-            elif io.is_very_verbose():
-                io.write_line("Virtualenv {} already exists.".format(name))
-
-        # venv detection:
-        # stdlib venv may symlink sys.executable, so we can't use realpath.
-        # but others can symlink *to* the venv Python,
-        # so we can't just use sys.executable.
-        # So we just check every item in the symlink tree (generally <= 3)
-        p = os.path.normcase(sys.executable)
-        paths = [p]
-        while os.path.islink(p):
-            p = os.path.normcase(os.path.join(os.path.dirname(p), os.readlink(p)))
-            paths.append(p)
-
-        p_venv = os.path.normcase(str(venv))
-        if any(p.startswith(p_venv) for p in paths):
-            # Running properly in the virtualenv, don't need to do anything
-            return SystemEnv(Path(sys.prefix), Path(self.get_base_prefix()))
-
-        return VirtualEnv(venv)
-
-    @classmethod
-    def build_venv(
-        cls, path, executable=None
-    ):  # type: (Union[Path,str], Optional[Union[str, Path]]) -> virtualenv.run.session.Session
-        if isinstance(executable, Path):
-            executable = executable.resolve().as_posix()
-        return virtualenv.cli_run(
-            [
-                "--no-download",
-                "--no-periodic-update",
-                "--python",
-                executable or sys.executable,
-                str(path),
-            ]
-        )
-
-    @classmethod
-    def remove_venv(cls, path):  # type: (Union[Path,str]) -> None
-        if isinstance(path, str):
-            path = Path(path)
-        assert path.is_dir()
-        try:
-            shutil.rmtree(str(path))
-            return
-        except OSError as e:
-            # Continue only if e.errno == 16
-            if e.errno != 16:  # ERRNO 16: Device or resource busy
-                raise e
-
-        # Delete all files and folders but the toplevel one. This is because sometimes
-        # the venv folder is mounted by the OS, such as in a docker volume. In such
-        # cases, an attempt to delete the folder itself will result in an `OSError`.
-        # See https://github.com/python-poetry/poetry/pull/2064
-        for file_path in path.iterdir():
-            if file_path.is_file() or file_path.is_symlink():
-                file_path.unlink()
-            elif file_path.is_dir():
-                shutil.rmtree(str(file_path))
-
-    @classmethod
-    def get_system_env(
-        cls, naive=False
-    ):  # type: (bool) -> Union["SystemEnv", "GenericEnv"]
-        """
-        Retrieve the current Python environment.
-        This can be the base Python environment or an activated virtual environment.
-        This method also works around the issue that the virtual environment
-        used by Poetry internally (when installed via the custom installer)
-        is incorrectly detected as the system environment. Note that this workaround
-        happens only when `naive` is False since there are times where we actually
-        want to retrieve Poetry's custom virtual environment
-        (e.g. plugin installation or self update).
-        """
-        prefix, base_prefix = Path(sys.prefix), Path(cls.get_base_prefix())
-        env = SystemEnv(prefix)
-        if not naive:
-            if prefix.joinpath("poetry_env").exists():
-                env = GenericEnv(base_prefix, child_env=env)
-            else:
-                from conda_lock._vendor.poetry.locations import data_dir
-
-                try:
-                    prefix.relative_to(data_dir())
-                except ValueError:
-                    pass
-                else:
-                    env = GenericEnv(base_prefix, child_env=env)
-
-        return env
-
-    @classmethod
-    def get_base_prefix(cls):  # type: () -> str
-        if hasattr(sys, "real_prefix"):
-            return sys.real_prefix
-
-        if hasattr(sys, "base_prefix"):
-            return sys.base_prefix
-
-        return sys.prefix
-
-    @classmethod
-    def generate_env_name(cls, name, cwd):  # type: (str, str) -> str
-        name = name.lower()
-        sanitized_name = re.sub(r'[ $`!*@"\\\r\n\t]', "_", name)[:42]
-        h = hashlib.sha256(encode(cwd)).digest()
-        h = base64.urlsafe_b64encode(h).decode()[:8]
-
-        return "{}-{}".format(sanitized_name, h)
-
-
-class Env(object):
-    """
-    An abstract Python environment.
-    """
-
-    def __init__(self, path, base=None):  # type: (Path, Optional[Path]) -> None
-        self._is_windows = sys.platform == "win32"
-        self._is_mingw = sysconfig.get_platform().startswith("mingw")
-        self._is_conda = bool(os.environ.get("CONDA_DEFAULT_ENV"))
-
-        if not self._is_windows or self._is_mingw:
-            bin_dir = "bin"
-        else:
-            bin_dir = "Scripts"
-
-        self._path = path
-        self._bin_dir = self._path / bin_dir
-
-        self._base = base or path
-
-        self._executable = "python"
-        self._pip_executable = "pip"
-
-        self.find_executables()
-
-        self._marker_env = None
-        self._pip_version = None
-        self._site_packages = None
-        self._paths = None
-        self._supported_tags = None
-        self._purelib = None
-        self._platlib = None
-        self._script_dirs = None
-
-    @property
-    def path(self):  # type: () -> Path
-        return self._path
-
-    @property
-    def base(self):  # type: () -> Path
-        return self._base
-
-    @property
-    def version_info(self):  # type: () -> Tuple[int]
-        return tuple(self.marker_env["version_info"])
-
-    @property
-    def python_implementation(self):  # type: () -> str
-        return self.marker_env["platform_python_implementation"]
-
-    @property
-    def python(self):  # type: () -> str
-        """
-        Path to current python executable
-        """
-        return self._bin(self._executable)
-
-    @property
-    def marker_env(self):
-        if self._marker_env is None:
-            self._marker_env = self.get_marker_env()
-
-        return self._marker_env
-
-    @property
-    def parent_env(self):  # type: () -> GenericEnv
-        return GenericEnv(self.base, child_env=self)
-
-    @property
-    def pip(self):  # type: () -> str
-        """
-        Path to current pip executable
-        """
-        return self._bin(self._pip_executable)
-
-    @property
-    def platform(self):  # type: () -> str
-        return sys.platform
-
-    @property
-    def os(self):  # type: () -> str
-        return os.name
-
-    @property
-    def pip_version(self):
-        if self._pip_version is None:
-            self._pip_version = self.get_pip_version()
-
-        return self._pip_version
-
-    @property
-    def site_packages(self):  # type: () -> SitePackages
-        if self._site_packages is None:
-            # we disable write checks if no user site exist
-            fallbacks = [self.usersite] if self.usersite else []
-            self._site_packages = SitePackages(
-                self.purelib, fallbacks, skip_write_checks=False if fallbacks else True
-            )
-        return self._site_packages
-
-    @property
-    def usersite(self):  # type: () -> Optional[Path]
-        if "usersite" in self.paths:
-            return Path(self.paths["usersite"])
-
-    @property
-    def userbase(self):  # type: () -> Optional[Path]
-        if "userbase" in self.paths:
-            return Path(self.paths["userbase"])
-
-    @property
-    def purelib(self):  # type: () -> Path
-        if self._purelib is None:
-            self._purelib = Path(self.paths["purelib"])
-
-        return self._purelib
-
-    @property
-    def platlib(self):  # type: () -> Path
-        if self._platlib is None:
-            if "platlib" in self.paths:
-                self._platlib = Path(self.paths["platlib"])
-            else:
-                self._platlib = self.purelib
-
-        return self._platlib
-
-    def is_path_relative_to_lib(self, path):  # type: (Path) -> bool
-        for lib_path in [self.purelib, self.platlib]:
-            try:
-                path.relative_to(lib_path)
-                return True
-            except ValueError:
-                pass
-
-        return False
-
-    @property
-    def sys_path(self):  # type: () -> List[str]
-        raise NotImplementedError()
-
-    @property
-    def paths(self):  # type: () -> Dict[str, str]
-        if self._paths is None:
-            self._paths = self.get_paths()
-
-        return self._paths
-
-    @property
-    def supported_tags(self):  # type: () -> List[Tag]
-        if self._supported_tags is None:
-            self._supported_tags = self.get_supported_tags()
-
-        return self._supported_tags
-
-    @classmethod
-    def get_base_prefix(cls):  # type: () -> str
-        if hasattr(sys, "real_prefix"):
-            return sys.real_prefix
-
-        if hasattr(sys, "base_prefix"):
-            return sys.base_prefix
-
-        return sys.prefix
-
-    def _find_python_executable(self):  # type: () -> None
-        bin_dir = self._bin_dir
-
-        if self._is_windows and self._is_conda:
-            bin_dir = self._path
-
-        python_executables = sorted(
-            p.name
-            for p in bin_dir.glob("python*")
-            if re.match(r"python(?:\d+(?:\.\d+)?)?(?:\.exe)?$", p.name)
-        )
-        if python_executables:
-            executable = python_executables[0]
-            if executable.endswith(".exe"):
-                executable = executable[:-4]
-
-            self._executable = executable
-
-    def _find_pip_executable(self):  # type: () -> None
-        pip_executables = sorted(
-            p.name
-            for p in self._bin_dir.glob("pip*")
-            if re.match(r"pip(?:\d+(?:\.\d+)?)?(?:\.exe)?$", p.name)
-        )
-        if pip_executables:
-            pip_executable = pip_executables[0]
-            if pip_executable.endswith(".exe"):
-                pip_executable = pip_executable[:-4]
-
-            self._pip_executable = pip_executable
-
-    def find_executables(self):  # type: () -> None
-        self._find_python_executable()
-        self._find_pip_executable()
-
-    def get_version_info(self):  # type: () -> Tuple[int]
-        raise NotImplementedError()
-
-    def get_python_implementation(self):  # type: () -> str
-        raise NotImplementedError()
-
-    def get_marker_env(self):  # type: () -> Dict[str, Any]
-        raise NotImplementedError()
-
-    def get_pip_command(self):  # type: () -> List[str]
-        raise NotImplementedError()
-
-    def get_supported_tags(self):  # type: () -> List[Tag]
-        raise NotImplementedError()
-
-    def get_pip_version(self):  # type: () -> Version
-        raise NotImplementedError()
-
-    def get_paths(self):  # type: () -> Dict[str, str]
-        raise NotImplementedError()
-
-    def is_valid_for_marker(self, marker):  # type: (BaseMarker) -> bool
-        return marker.validate(self.marker_env)
-
-    def is_sane(self):  # type: () -> bool
-        """
-        Checks whether the current environment is sane or not.
-        """
-        return True
-
-    def run(self, bin, *args, **kwargs):
-        bin = self._bin(bin)
-        cmd = [bin] + list(args)
-        return self._run(cmd, **kwargs)
-
-    def run_python(self, *args, **kwargs):
-        return self.run(self._executable, *args, **kwargs)
-
-    def run_pip(self, *args, **kwargs):
-        pip = self.get_pip_command()
-        cmd = pip + list(args)
-        return self._run(cmd, **kwargs)
-
-    def run_python_script(self, content, **kwargs):  # type: (str, Any) -> str
-        return self.run(self._executable, "-W", "ignore", "-", input_=content, **kwargs)
-
-    def _run(self, cmd, **kwargs):
-        """
-        Run a command inside the Python environment.
-        """
-        call = kwargs.pop("call", False)
-        input_ = kwargs.pop("input_", None)
-
-        try:
-            if self._is_windows:
-                kwargs["shell"] = True
-
-            if kwargs.get("shell", False):
-                cmd = list_to_shell_command(cmd)
-
-            if input_:
-                output = subprocess.run(
-                    cmd,
-                    stdout=subprocess.PIPE,
-                    stderr=subprocess.STDOUT,
-                    input=encode(input_),
-                    check=True,
-                    **kwargs
-                ).stdout
-            elif call:
-                return subprocess.call(cmd, stderr=subprocess.STDOUT, **kwargs)
-            else:
-                output = subprocess.check_output(
-                    cmd, stderr=subprocess.STDOUT, **kwargs
-                )
-        except CalledProcessError as e:
-            raise EnvCommandError(e, input=input_)
-
-        return decode(output)
-
-    def execute(self, bin, *args, **kwargs):
-        bin = self._bin(bin)
-        env = kwargs.pop("env", {k: v for k, v in os.environ.items()})
-
-        if not self._is_windows:
-            args = [bin] + list(args)
-            return os.execvpe(bin, args, env=env)
-        else:
-            exe = subprocess.Popen([bin] + list(args), env=env, **kwargs)
-            exe.communicate()
-            return exe.returncode
-
-    def is_venv(self):  # type: () -> bool
-        raise NotImplementedError()
-
-    @property
-    def script_dirs(self):  # type: () -> List[Path]
-        if self._script_dirs is None:
-            self._script_dirs = (
-                [Path(self.paths["scripts"])]
-                if "scripts" in self.paths
-                else self._bin_dir
-            )
-            if self.userbase:
-                self._script_dirs.append(self.userbase / self._script_dirs[0].name)
-        return self._script_dirs
-
-    def _bin(self, bin):  # type: (str) -> str
-        """
-        Return path to the given executable.
-        """
-        if self._is_windows and not bin.endswith(".exe"):
-            bin_path = self._bin_dir / (bin + ".exe")
-        else:
-            bin_path = self._bin_dir / bin
-
-        if not bin_path.exists():
-            # On Windows, some executables can be in the base path
-            # This is especially true when installing Python with
-            # the official installer, where python.exe will be at
-            # the root of the env path.
-            # This is an edge case and should not be encountered
-            # in normal uses but this happens in the sonnet script
-            # that creates a fake virtual environment pointing to
-            # a base Python install.
-            if self._is_windows:
-                if not bin.endswith(".exe"):
-                    bin_path = self._path / (bin + ".exe")
-                else:
-                    bin_path = self._path / bin
-
-                if bin_path.exists():
-                    return str(bin_path)
-
-            return bin
-
-        return str(bin_path)
-
-    def __eq__(self, other):  # type: (Env) -> bool
-        return other.__class__ == self.__class__ and other.path == self.path
-
-    def __repr__(self):
-        return '{}("{}")'.format(self.__class__.__name__, self._path)
-
-
-class SystemEnv(Env):
-    """
-    A system (i.e. not a virtualenv) Python environment.
-    """
-
-    @property
-    def python(self):  # type: () -> str
-        return sys.executable
-
-    @property
-    def sys_path(self):  # type: () -> List[str]
-        return sys.path
-
-    def get_version_info(self):  # type: () -> Tuple[int]
-        return sys.version_info
-
-    def get_python_implementation(self):  # type: () -> str
-        return platform.python_implementation()
-
-    def get_pip_command(self):  # type: () -> List[str]
-        # If we're not in a venv, assume the interpreter we're running on
-        # has a pip and use that
-        return [sys.executable, "-m", "pip"]
-
-    def get_paths(self):  # type: () -> Dict[str, str]
-        # We can't use sysconfig.get_paths() because
-        # on some distributions it does not return the proper paths
-        # (those used by pip for instance). We go through distutils
-        # to get the proper ones.
-        import site
-
-        from distutils.command.install import SCHEME_KEYS  # noqa
-        from distutils.core import Distribution
-
-        d = Distribution()
-        d.parse_config_files()
-        obj = d.get_command_obj("install", create=True)
-        obj.finalize_options()
-
-        paths = sysconfig.get_paths().copy()
-        for key in SCHEME_KEYS:
-            if key == "headers":
-                # headers is not a path returned by sysconfig.get_paths()
-                continue
-
-            paths[key] = getattr(obj, "install_{}".format(key))
-
-        if site.check_enableusersite() and hasattr(obj, "install_usersite"):
-            paths["usersite"] = getattr(obj, "install_usersite")
-            paths["userbase"] = getattr(obj, "install_userbase")
-
-        return paths
-
-    def get_supported_tags(self):  # type: () -> List[Tag]
-        return list(sys_tags())
-
-    def get_marker_env(self):  # type: () -> Dict[str, Any]
-        if hasattr(sys, "implementation"):
-            info = sys.implementation.version
-            iver = "{0.major}.{0.minor}.{0.micro}".format(info)
-            kind = info.releaselevel
-            if kind != "final":
-                iver += kind[0] + str(info.serial)
-
-            implementation_name = sys.implementation.name
-        else:
-            iver = "0"
-            implementation_name = ""
-
-        return {
-            "implementation_name": implementation_name,
-            "implementation_version": iver,
-            "os_name": os.name,
-            "platform_machine": platform.machine(),
-            "platform_release": platform.release(),
-            "platform_system": platform.system(),
-            "platform_version": platform.version(),
-            "python_full_version": platform.python_version(),
-            "platform_python_implementation": platform.python_implementation(),
-            "python_version": ".".join(
-                v for v in platform.python_version().split(".")[:2]
-            ),
-            "sys_platform": sys.platform,
-            "version_info": sys.version_info,
-            # Extra information
-            "interpreter_name": interpreter_name(),
-            "interpreter_version": interpreter_version(),
-        }
-
-    def get_pip_version(self):  # type: () -> Version
-        from pip import __version__
-
-        return Version.parse(__version__)
-
-    def is_venv(self):  # type: () -> bool
-        return self._path != self._base
-
-
-class VirtualEnv(Env):
-    """
-    A virtual Python environment.
-    """
-
-    def __init__(self, path, base=None):  # type: (Path, Optional[Path]) -> None
-        super(VirtualEnv, self).__init__(path, base)
-
-        # If base is None, it probably means this is
-        # a virtualenv created from VIRTUAL_ENV.
-        # In this case we need to get sys.base_prefix
-        # from inside the virtualenv.
-        if base is None:
-            self._base = Path(self.run_python_script(GET_BASE_PREFIX).strip())
-
-    @property
-    def sys_path(self):  # type: () -> List[str]
-        output = self.run_python_script(GET_SYS_PATH)
-
-        return json.loads(output)
-
-    def get_version_info(self):  # type: () -> Tuple[int]
-        output = self.run_python_script(GET_PYTHON_VERSION)
-
-        return tuple([int(s) for s in output.strip().split(".")])
-
-    def get_python_implementation(self):  # type: () -> str
-        return self.marker_env["platform_python_implementation"]
-
-    def get_pip_command(self):  # type: () -> List[str]
-        # We're in a virtualenv that is known to be sane,
-        # so assume that we have a functional pip
-        return [self._bin(self._pip_executable)]
-
-    def get_supported_tags(self):  # type: () -> List[Tag]
-        file_path = Path(packaging.tags.__file__)
-        if file_path.suffix == ".pyc":
-            # Python 2
-            file_path = file_path.with_suffix(".py")
-
-        with file_path.open(encoding="utf-8") as f:
-            script = decode(f.read())
-
-        script = script.replace(
-            "from ._typing import TYPE_CHECKING, cast",
-            "TYPE_CHECKING = False\ncast = lambda type_, value: value",
-        )
-        script = script.replace(
-            "from ._typing import MYPY_CHECK_RUNNING, cast",
-            "MYPY_CHECK_RUNNING = False\ncast = lambda type_, value: value",
-        )
-
-        script += textwrap.dedent(
-            """
-            import json
-
-            print(json.dumps([(t.interpreter, t.abi, t.platform) for t in sys_tags()]))
-            """
-        )
-
-        output = self.run_python_script(script)
-
-        return [Tag(*t) for t in json.loads(output)]
-
-    def get_marker_env(self):  # type: () -> Dict[str, Any]
-        output = self.run(self._executable, "-", input_=GET_ENVIRONMENT_INFO)
-
-        return json.loads(output)
-
-    def get_pip_version(self):  # type: () -> Version
-        output = self.run_pip("--version").strip()
-        m = re.match("pip (.+?)(?: from .+)?$", output)
-        if not m:
-            return Version.parse("0.0")
-
-        return Version.parse(m.group(1))
-
-    def get_paths(self):  # type: () -> Dict[str, str]
-        output = self.run_python_script(GET_PATHS)
-
-        return json.loads(output)
-
-    def is_venv(self):  # type: () -> bool
-        return True
-
-    def is_sane(self):
-        # A virtualenv is considered sane if both "python" and "pip" exist.
-        return os.path.exists(self.python) and os.path.exists(self._bin("pip"))
-
-    def _run(self, cmd, **kwargs):
-        kwargs["env"] = self.get_temp_environ(environ=kwargs.get("env"))
-        return super(VirtualEnv, self)._run(cmd, **kwargs)
-
-    def get_temp_environ(
-        self, environ=None, exclude=None, **kwargs
-    ):  # type: (Optional[Dict[str, str]], Optional[List[str]], **str) -> Dict[str, str]
-        exclude = exclude or []
-        exclude.extend(["PYTHONHOME", "__PYVENV_LAUNCHER__"])
-
-        if environ:
-            environ = deepcopy(environ)
-            for key in exclude:
-                environ.pop(key, None)
-        else:
-            environ = {k: v for k, v in os.environ.items() if k not in exclude}
-
-        environ.update(kwargs)
-
-        environ["PATH"] = self._updated_path()
-        environ["VIRTUAL_ENV"] = str(self._path)
-
-        return environ
-
-    def execute(self, bin, *args, **kwargs):
-        kwargs["env"] = self.get_temp_environ(environ=kwargs.get("env"))
-        return super(VirtualEnv, self).execute(bin, *args, **kwargs)
-
-    @contextmanager
-    def temp_environ(self):
-        environ = dict(os.environ)
-        try:
-            yield
-        finally:
-            os.environ.clear()
-            os.environ.update(environ)
-
-    def _updated_path(self):
-        return os.pathsep.join([str(self._bin_dir), os.environ.get("PATH", "")])
-
-
-class GenericEnv(VirtualEnv):
-    def __init__(
-        self, path, base=None, child_env=None
-    ):  # type: (Path, Optional[Path], Optional[Env]) -> None
-        self._child_env = child_env
-
-        super(GenericEnv, self).__init__(path, base=base)
-
-    def find_executables(self):  # type: () -> None
-        patterns = [("python*", "pip*")]
-
-        if self._child_env:
-            minor_version = "{}.{}".format(
-                self._child_env.version_info[0], self._child_env.version_info[1]
-            )
-            major_version = "{}".format(self._child_env.version_info[0])
-            patterns = [
-                ("python{}".format(minor_version), "pip{}".format(minor_version)),
-                ("python{}".format(major_version), "pip{}".format(major_version)),
-            ]
-
-        python_executable = None
-        pip_executable = None
-
-        for python_pattern, pip_pattern in patterns:
-            if python_executable and pip_executable:
-                break
-
-            if not python_executable:
-                python_executables = sorted(
-                    [
-                        p.name
-                        for p in self._bin_dir.glob(python_pattern)
-                        if re.match(r"python(?:\d+(?:\.\d+)?)?(?:\.exe)?$", p.name)
-                    ]
-                )
-
-                if python_executables:
-                    executable = python_executables[0]
-                    if executable.endswith(".exe"):
-                        executable = executable[:-4]
-
-                    python_executable = executable
-
-            if not pip_executable:
-                pip_executables = sorted(
-                    [
-                        p.name
-                        for p in self._bin_dir.glob(pip_pattern)
-                        if re.match(r"pip(?:\d+(?:\.\d+)?)?(?:\.exe)?$", p.name)
-                    ]
-                )
-                if pip_executables:
-                    pip_executable = pip_executables[0]
-                    if pip_executable.endswith(".exe"):
-                        pip_executable = pip_executable[:-4]
-
-                    pip_executable = pip_executable
-
-            if python_executable:
-                self._executable = python_executable
-
-            if pip_executable:
-                self._pip_executable = pip_executable
-
-    def get_paths(self):  # type: () -> Dict[str, str]
-        output = self.run_python_script(GET_PATHS_FOR_GENERIC_ENVS)
-
-        return json.loads(output)
-
-    def execute(self, bin, *args, **kwargs):  # type: (str, str, Any) -> Optional[int]
-        return super(VirtualEnv, self).execute(bin, *args, **kwargs)
-
-    def _run(self, cmd, **kwargs):  # type: (List[str], Any) -> Optional[int]
-        return super(VirtualEnv, self)._run(cmd, **kwargs)
-
-    def is_venv(self):  # type: () -> bool
-        return self._path != self._base
-
-
-class NullEnv(SystemEnv):
-    def __init__(self, path=None, base=None, execute=False):
-        if path is None:
-            path = Path(sys.prefix)
-
-        super(NullEnv, self).__init__(path, base=base)
-
-        self._execute = execute
-        self.executed = []
-
-    def get_pip_command(self):  # type: () -> List[str]
-        return [self._bin("python"), "-m", "pip"]
-
-    def _run(self, cmd, **kwargs):
-        self.executed.append(cmd)
-
-        if self._execute:
-            return super(NullEnv, self)._run(cmd, **kwargs)
-
-    def execute(self, bin, *args, **kwargs):
-        self.executed.append([bin] + list(args))
-
-        if self._execute:
-            return super(NullEnv, self).execute(bin, *args, **kwargs)
-
-    def _bin(self, bin):
-        return bin
-
-
-class MockEnv(NullEnv):
-    def __init__(
-        self,
-        version_info=(3, 7, 0),
-        python_implementation="CPython",
-        platform="darwin",
-        os_name="posix",
-        is_venv=False,
-        pip_version="19.1",
-        sys_path=None,
-        marker_env=None,
-        supported_tags=None,
-        **kwargs
-    ):
-        super(MockEnv, self).__init__(**kwargs)
-
-        self._version_info = version_info
-        self._python_implementation = python_implementation
-        self._platform = platform
-        self._os_name = os_name
-        self._is_venv = is_venv
-        self._pip_version = Version.parse(pip_version)
-        self._sys_path = sys_path
-        self._mock_marker_env = marker_env
-        self._supported_tags = supported_tags
-
-    @property
-    def platform(self):  # type: () -> str
-        return self._platform
-
-    @property
-    def os(self):  # type: () -> str
-        return self._os_name
-
-    @property
-    def pip_version(self):
-        return self._pip_version
-
-    @property
-    def sys_path(self):
-        if self._sys_path is None:
-            return super(MockEnv, self).sys_path
-
-        return self._sys_path
-
-    def get_marker_env(self):  # type: () -> Dict[str, Any]
-        if self._mock_marker_env is not None:
-            return self._mock_marker_env
-
-        marker_env = super(MockEnv, self).get_marker_env()
-        marker_env["python_implementation"] = self._python_implementation
-        marker_env["version_info"] = self._version_info
-        marker_env["python_version"] = ".".join(str(v) for v in self._version_info[:2])
-        marker_env["python_full_version"] = ".".join(str(v) for v in self._version_info)
-        marker_env["sys_platform"] = self._platform
-        marker_env["interpreter_name"] = self._python_implementation.lower()
-        marker_env["interpreter_version"] = "cp" + "".join(
-            str(v) for v in self._version_info[:2]
-        )
-
-        return marker_env
-
-    def is_venv(self):  # type: () -> bool
-        return self._is_venv
diff --git a/conda_lock/_vendor/poetry/utils/env/__init__.py b/conda_lock/_vendor/poetry/utils/env/__init__.py
new file mode 100644
index 00000000..2cf81241
--- /dev/null
+++ b/conda_lock/_vendor/poetry/utils/env/__init__.py
@@ -0,0 +1,123 @@
+from __future__ import annotations
+
+from contextlib import contextmanager
+from pathlib import Path
+from typing import TYPE_CHECKING
+
+from conda_lock._vendor.poetry.core.utils.helpers import temporary_directory
+
+from conda_lock._vendor.poetry.utils.env.base_env import Env
+from conda_lock._vendor.poetry.utils.env.env_manager import EnvManager
+from conda_lock._vendor.poetry.utils.env.exceptions import EnvCommandError
+from conda_lock._vendor.poetry.utils.env.exceptions import EnvError
+from conda_lock._vendor.poetry.utils.env.exceptions import IncorrectEnvError
+from conda_lock._vendor.poetry.utils.env.exceptions import InvalidCurrentPythonVersionError
+from conda_lock._vendor.poetry.utils.env.exceptions import NoCompatiblePythonVersionFound
+from conda_lock._vendor.poetry.utils.env.exceptions import PythonVersionNotFound
+from conda_lock._vendor.poetry.utils.env.generic_env import GenericEnv
+from conda_lock._vendor.poetry.utils.env.mock_env import MockEnv
+from conda_lock._vendor.poetry.utils.env.null_env import NullEnv
+from conda_lock._vendor.poetry.utils.env.script_strings import GET_BASE_PREFIX
+from conda_lock._vendor.poetry.utils.env.script_strings import GET_ENV_PATH_ONELINER
+from conda_lock._vendor.poetry.utils.env.script_strings import GET_ENVIRONMENT_INFO
+from conda_lock._vendor.poetry.utils.env.script_strings import GET_PATHS
+from conda_lock._vendor.poetry.utils.env.script_strings import GET_PATHS_FOR_GENERIC_ENVS
+from conda_lock._vendor.poetry.utils.env.script_strings import GET_PYTHON_VERSION
+from conda_lock._vendor.poetry.utils.env.script_strings import GET_PYTHON_VERSION_ONELINER
+from conda_lock._vendor.poetry.utils.env.script_strings import GET_SYS_PATH
+from conda_lock._vendor.poetry.utils.env.script_strings import GET_SYS_TAGS
+from conda_lock._vendor.poetry.utils.env.site_packages import SitePackages
+from conda_lock._vendor.poetry.utils.env.system_env import SystemEnv
+from conda_lock._vendor.poetry.utils.env.virtual_env import VirtualEnv
+
+
+if TYPE_CHECKING:
+    from collections.abc import Iterator
+
+    from conda_lock._vendor.cleo.io.io import IO
+    from conda_lock._vendor.poetry.core.poetry import Poetry as CorePoetry
+
+
+@contextmanager
+def ephemeral_environment(
+    executable: Path | None = None,
+    flags: dict[str, str | bool] | None = None,
+) -> Iterator[VirtualEnv]:
+    with temporary_directory() as tmp_dir:
+        # TODO: cache PEP 517 build environment corresponding to each project venv
+        venv_dir = Path(tmp_dir) / ".venv"
+        EnvManager.build_venv(
+            path=venv_dir,
+            executable=executable,
+            flags=flags,
+        )
+        yield VirtualEnv(venv_dir, venv_dir)
+
+
+@contextmanager
+def build_environment(
+    poetry: CorePoetry, env: Env | None = None, io: IO | None = None
+) -> Iterator[Env]:
+    """
+    If a build script is specified for the project, there could be additional build
+    time dependencies, eg: cython, setuptools etc. In these cases, we create an
+    ephemeral build environment with all requirements specified under
+    `build-system.requires` and return this. Otherwise, the given default project
+    environment is returned.
+    """
+    if not env or poetry.package.build_script:
+        with ephemeral_environment(executable=env.python if env else None) as venv:
+            if io:
+                requires = [
+                    f"{requirement}"
+                    for requirement in poetry.pyproject.build_system.requires
+                ]
+
+                io.write_error_line(
+                    "Preparing build environment with build-system requirements"
+                    f" {', '.join(requires)}"
+                )
+
+            output = venv.run_pip(
+                "install",
+                "--disable-pip-version-check",
+                "--ignore-installed",
+                "--no-input",
+                *poetry.pyproject.build_system.requires,
+            )
+
+            if io and io.is_debug() and output:
+                io.write_error(output)
+
+            yield venv
+    else:
+        yield env
+
+
+__all__ = [
+    "GET_BASE_PREFIX",
+    "GET_ENVIRONMENT_INFO",
+    "GET_PATHS",
+    "GET_PYTHON_VERSION",
+    "GET_SYS_PATH",
+    "GET_SYS_TAGS",
+    "GET_ENV_PATH_ONELINER",
+    "GET_PYTHON_VERSION_ONELINER",
+    "GET_PATHS_FOR_GENERIC_ENVS",
+    "EnvError",
+    "EnvCommandError",
+    "IncorrectEnvError",
+    "InvalidCurrentPythonVersionError",
+    "NoCompatiblePythonVersionFound",
+    "PythonVersionNotFound",
+    "Env",
+    "EnvManager",
+    "GenericEnv",
+    "MockEnv",
+    "NullEnv",
+    "SystemEnv",
+    "VirtualEnv",
+    "SitePackages",
+    "build_environment",
+    "ephemeral_environment",
+]
diff --git a/conda_lock/_vendor/poetry/utils/env/base_env.py b/conda_lock/_vendor/poetry/utils/env/base_env.py
new file mode 100644
index 00000000..80dad62c
--- /dev/null
+++ b/conda_lock/_vendor/poetry/utils/env/base_env.py
@@ -0,0 +1,406 @@
+from __future__ import annotations
+
+import contextlib
+import os
+import re
+import subprocess
+import sys
+import sysconfig
+
+from pathlib import Path
+from subprocess import CalledProcessError
+from typing import TYPE_CHECKING
+from typing import Any
+
+from virtualenv.seed.wheels.embed import get_embed_wheel
+
+from conda_lock._vendor.poetry.utils.env.exceptions import EnvCommandError
+from conda_lock._vendor.poetry.utils.env.site_packages import SitePackages
+from conda_lock._vendor.poetry.utils.helpers import get_real_windows_path
+
+
+if TYPE_CHECKING:
+    from packaging.tags import Tag
+    from conda_lock._vendor.poetry.core.version.markers import BaseMarker
+    from virtualenv.seed.wheels.util import Wheel
+
+    from conda_lock._vendor.poetry.utils.env.generic_env import GenericEnv
+
+
+class Env:
+    """
+    An abstract Python environment.
+    """
+
+    def __init__(self, path: Path, base: Path | None = None) -> None:
+        self._is_windows = sys.platform == "win32"
+        self._is_mingw = sysconfig.get_platform().startswith("mingw")
+        self._is_conda = bool(os.environ.get("CONDA_DEFAULT_ENV"))
+
+        if self._is_windows:
+            path = get_real_windows_path(path)
+            base = get_real_windows_path(base) if base else None
+
+        bin_dir = "bin" if not self._is_windows or self._is_mingw else "Scripts"
+        self._path = path
+        self._bin_dir = self._path / bin_dir
+
+        self._executable = "python"
+        self._pip_executable = "pip"
+
+        self.find_executables()
+
+        self._base = base or path
+
+        self._marker_env: dict[str, Any] | None = None
+        self._site_packages: SitePackages | None = None
+        self._paths: dict[str, str] | None = None
+        self._supported_tags: list[Tag] | None = None
+        self._purelib: Path | None = None
+        self._platlib: Path | None = None
+        self._script_dirs: list[Path] | None = None
+
+        self._embedded_pip_path: Path | None = None
+
+    @property
+    def path(self) -> Path:
+        return self._path
+
+    @property
+    def base(self) -> Path:
+        return self._base
+
+    @property
+    def version_info(self) -> tuple[int, int, int, str, int]:
+        version_info: tuple[int, int, int, str, int] = self.marker_env["version_info"]
+        return version_info
+
+    @property
+    def python_implementation(self) -> str:
+        implementation: str = self.marker_env["platform_python_implementation"]
+        return implementation
+
+    @property
+    def python(self) -> Path:
+        """
+        Path to current python executable
+        """
+        return Path(self._bin(self._executable))
+
+    @property
+    def marker_env(self) -> dict[str, Any]:
+        if self._marker_env is None:
+            self._marker_env = self.get_marker_env()
+
+        return self._marker_env
+
+    @property
+    def parent_env(self) -> GenericEnv:
+        from conda_lock._vendor.poetry.utils.env.generic_env import GenericEnv
+
+        return GenericEnv(self.base, child_env=self)
+
+    def _find_python_executable(self) -> None:
+        bin_dir = self._bin_dir
+
+        if self._is_windows and self._is_conda:
+            bin_dir = self._path
+
+        python_executables = sorted(
+            p.name
+            for p in bin_dir.glob("python*")
+            if re.match(r"python(?:\d+(?:\.\d+)?)?(?:\.exe)?$", p.name)
+        )
+        if python_executables:
+            executable = python_executables[0]
+            if executable.endswith(".exe"):
+                executable = executable[:-4]
+
+            self._executable = executable
+
+    def _find_pip_executable(self) -> None:
+        pip_executables = sorted(
+            p.name
+            for p in self._bin_dir.glob("pip*")
+            if re.match(r"pip(?:\d+(?:\.\d+)?)?(?:\.exe)?$", p.name)
+        )
+        if pip_executables:
+            pip_executable = pip_executables[0]
+            if pip_executable.endswith(".exe"):
+                pip_executable = pip_executable[:-4]
+
+            self._pip_executable = pip_executable
+
+    def find_executables(self) -> None:
+        self._find_python_executable()
+        self._find_pip_executable()
+
+    def get_embedded_wheel(self, distribution: str) -> Path:
+        wheel: Wheel = get_embed_wheel(
+            distribution, f"{self.version_info[0]}.{self.version_info[1]}"
+        )
+        path: Path = wheel.path
+        return path
+
+    @property
+    def pip_embedded(self) -> Path:
+        if self._embedded_pip_path is None:
+            self._embedded_pip_path = self.get_embedded_wheel("pip") / "pip"
+        return self._embedded_pip_path
+
+    @property
+    def pip(self) -> Path:
+        """
+        Path to current pip executable
+        """
+        # we do not use as_posix() here due to issues with windows pathlib2
+        # implementation
+        path = Path(self._bin(self._pip_executable))
+        if not path.exists():
+            return self.pip_embedded
+        return path
+
+    @property
+    def platform(self) -> str:
+        return sys.platform
+
+    @property
+    def os(self) -> str:
+        return os.name
+
+    @property
+    def site_packages(self) -> SitePackages:
+        if self._site_packages is None:
+            # we disable write checks if no user site exist
+            fallbacks = [self.usersite] if self.usersite else []
+            self._site_packages = SitePackages(
+                self.purelib,
+                self.platlib,
+                fallbacks,
+                skip_write_checks=not fallbacks,
+            )
+        return self._site_packages
+
+    @property
+    def usersite(self) -> Path | None:
+        if "usersite" in self.paths:
+            return Path(self.paths["usersite"])
+        return None
+
+    @property
+    def userbase(self) -> Path | None:
+        if "userbase" in self.paths:
+            return Path(self.paths["userbase"])
+        return None
+
+    @property
+    def purelib(self) -> Path:
+        if self._purelib is None:
+            self._purelib = Path(self.paths["purelib"])
+
+        return self._purelib
+
+    @property
+    def platlib(self) -> Path:
+        if self._platlib is None:
+            if "platlib" in self.paths:
+                self._platlib = Path(self.paths["platlib"])
+            else:
+                self._platlib = self.purelib
+
+        return self._platlib
+
+    def _get_lib_dirs(self) -> list[Path]:
+        return [self.purelib, self.platlib]
+
+    def is_path_relative_to_lib(self, path: Path) -> bool:
+        for lib_path in self._get_lib_dirs():
+            with contextlib.suppress(ValueError):
+                path.relative_to(lib_path)
+                return True
+
+        return False
+
+    @property
+    def sys_path(self) -> list[str]:
+        raise NotImplementedError()
+
+    @property
+    def paths(self) -> dict[str, str]:
+        if self._paths is None:
+            self._paths = self.get_paths()
+
+            if self.is_venv():
+                # We copy pip's logic here for the `include` path
+                self._paths["include"] = str(
+                    self.path.joinpath(
+                        "include",
+                        "site",
+                        f"python{self.version_info[0]}.{self.version_info[1]}",
+                    )
+                )
+
+        return self._paths
+
+    @property
+    def supported_tags(self) -> list[Tag]:
+        if self._supported_tags is None:
+            self._supported_tags = self.get_supported_tags()
+
+        return self._supported_tags
+
+    @classmethod
+    def get_base_prefix(cls) -> Path:
+        real_prefix = getattr(sys, "real_prefix", None)
+        if real_prefix is not None:
+            return Path(real_prefix)
+
+        base_prefix = getattr(sys, "base_prefix", None)
+        if base_prefix is not None:
+            return Path(base_prefix)
+
+        return Path(sys.prefix)
+
+    def get_version_info(self) -> tuple[Any, ...]:
+        raise NotImplementedError()
+
+    def get_python_implementation(self) -> str:
+        raise NotImplementedError()
+
+    def get_marker_env(self) -> dict[str, Any]:
+        raise NotImplementedError()
+
+    def get_pip_command(self, embedded: bool = False) -> list[str]:
+        if embedded or not Path(self._bin(self._pip_executable)).exists():
+            return [str(self.python), str(self.pip_embedded)]
+        # run as module so that pip can update itself on Windows
+        return [str(self.python), "-m", "pip"]
+
+    def get_supported_tags(self) -> list[Tag]:
+        raise NotImplementedError()
+
+    def get_paths(self) -> dict[str, str]:
+        raise NotImplementedError()
+
+    def is_valid_for_marker(self, marker: BaseMarker) -> bool:
+        valid: bool = marker.validate(self.marker_env)
+        return valid
+
+    def is_sane(self) -> bool:
+        """
+        Checks whether the current environment is sane or not.
+        """
+        return True
+
+    def get_command_from_bin(self, bin: str) -> list[str]:
+        if bin == "pip":
+            # when pip is required we need to ensure that we fall back to
+            # embedded pip when pip is not available in the environment
+            return self.get_pip_command()
+
+        return [self._bin(bin)]
+
+    def run(self, bin: str, *args: str, **kwargs: Any) -> str:
+        cmd = self.get_command_from_bin(bin) + list(args)
+        return self._run(cmd, **kwargs)
+
+    def run_pip(self, *args: str, **kwargs: Any) -> str:
+        pip = self.get_pip_command()
+        cmd = pip + list(args)
+        return self._run(cmd, **kwargs)
+
+    def run_python_script(self, content: str, **kwargs: Any) -> str:
+        return self.run(
+            self._executable,
+            "-I",
+            "-W",
+            "ignore",
+            "-c",
+            content,
+            stderr=subprocess.PIPE,
+            **kwargs,
+        )
+
+    def _run(self, cmd: list[str], **kwargs: Any) -> str:
+        """
+        Run a command inside the Python environment.
+        """
+        call = kwargs.pop("call", False)
+        env = kwargs.pop("env", dict(os.environ))
+        stderr = kwargs.pop("stderr", subprocess.STDOUT)
+
+        try:
+            if call:
+                assert stderr != subprocess.PIPE
+                subprocess.check_call(cmd, stderr=stderr, env=env, **kwargs)
+                output = ""
+            else:
+                output = subprocess.check_output(
+                    cmd, stderr=stderr, env=env, text=True, **kwargs
+                )
+        except CalledProcessError as e:
+            raise EnvCommandError(e)
+
+        return output
+
+    def execute(self, bin: str, *args: str, **kwargs: Any) -> int:
+        command = self.get_command_from_bin(bin) + list(args)
+        env = kwargs.pop("env", dict(os.environ))
+
+        if not self._is_windows:
+            return os.execvpe(command[0], command, env=env)
+
+        kwargs["shell"] = True
+        exe = subprocess.Popen(command, env=env, **kwargs)
+        exe.communicate()
+        return exe.returncode
+
+    def is_venv(self) -> bool:
+        raise NotImplementedError()
+
+    @property
+    def script_dirs(self) -> list[Path]:
+        if self._script_dirs is None:
+            scripts = self.paths.get("scripts")
+            self._script_dirs = [
+                Path(scripts) if scripts is not None else self._bin_dir
+            ]
+            if self.userbase:
+                self._script_dirs.append(self.userbase / self._script_dirs[0].name)
+        return self._script_dirs
+
+    def _bin(self, bin: str) -> str:
+        """
+        Return path to the given executable.
+        """
+        if self._is_windows and not bin.endswith(".exe"):
+            bin_path = self._bin_dir / (bin + ".exe")
+        else:
+            bin_path = self._bin_dir / bin
+
+        if not bin_path.exists():
+            # On Windows, some executables can be in the base path
+            # This is especially true when installing Python with
+            # the official installer, where python.exe will be at
+            # the root of the env path.
+            if self._is_windows:
+                if not bin.endswith(".exe"):
+                    bin_path = self._path / (bin + ".exe")
+                else:
+                    bin_path = self._path / bin
+
+                if bin_path.exists():
+                    return str(bin_path)
+
+            return bin
+
+        return str(bin_path)
+
+    def __eq__(self, other: object) -> bool:
+        if not isinstance(other, Env):
+            return False
+
+        return other.__class__ == self.__class__ and other.path == self.path
+
+    def __repr__(self) -> str:
+        return f'{self.__class__.__name__}("{self._path}")'
diff --git a/conda_lock/_vendor/poetry/utils/env/env_manager.py b/conda_lock/_vendor/poetry/utils/env/env_manager.py
new file mode 100644
index 00000000..c53e285a
--- /dev/null
+++ b/conda_lock/_vendor/poetry/utils/env/env_manager.py
@@ -0,0 +1,755 @@
+from __future__ import annotations
+
+import base64
+import hashlib
+import os
+import plistlib
+import re
+import shutil
+import subprocess
+import sys
+
+from functools import cached_property
+from pathlib import Path
+from subprocess import CalledProcessError
+from typing import TYPE_CHECKING
+
+import tomlkit
+import virtualenv
+
+from conda_lock._vendor.cleo.io.null_io import NullIO
+from conda_lock._vendor.cleo.io.outputs.output import Verbosity
+from conda_lock._vendor.poetry.core.constraints.version import Version
+from conda_lock._vendor.poetry.core.constraints.version import parse_constraint
+
+from conda_lock._vendor.poetry.toml.file import TOMLFile
+from conda_lock._vendor.poetry.utils._compat import WINDOWS
+from conda_lock._vendor.poetry.utils._compat import encode
+from conda_lock._vendor.poetry.utils.env.exceptions import EnvCommandError
+from conda_lock._vendor.poetry.utils.env.exceptions import IncorrectEnvError
+from conda_lock._vendor.poetry.utils.env.exceptions import InvalidCurrentPythonVersionError
+from conda_lock._vendor.poetry.utils.env.exceptions import NoCompatiblePythonVersionFound
+from conda_lock._vendor.poetry.utils.env.exceptions import PythonVersionNotFound
+from conda_lock._vendor.poetry.utils.env.generic_env import GenericEnv
+from conda_lock._vendor.poetry.utils.env.script_strings import GET_ENV_PATH_ONELINER
+from conda_lock._vendor.poetry.utils.env.script_strings import GET_PYTHON_VERSION_ONELINER
+from conda_lock._vendor.poetry.utils.env.system_env import SystemEnv
+from conda_lock._vendor.poetry.utils.env.virtual_env import VirtualEnv
+from conda_lock._vendor.poetry.utils.helpers import get_real_windows_path
+from conda_lock._vendor.poetry.utils.helpers import remove_directory
+
+
+if TYPE_CHECKING:
+    from conda_lock._vendor.cleo.io.io import IO
+
+    from conda_lock._vendor.poetry.poetry import Poetry
+    from conda_lock._vendor.poetry.utils.env.base_env import Env
+
+
+class EnvsFile(TOMLFile):
+    """
+    This file contains one section per project with the project's base env name
+    as section name. Each section contains the minor and patch version of the
+    python executable used to create the currently active virtualenv.
+
+    Example:
+
+    [poetry-QRErDmmj]
+    minor = "3.9"
+    patch = "3.9.13"
+
+    [poetry-core-m5r7DkRA]
+    minor = "3.11"
+    patch = "3.11.6"
+    """
+
+    def remove_section(self, name: str, minor: str | None = None) -> str | None:
+        """
+        Remove a section from the envs file.
+
+        If "minor" is given, the section is only removed if its minor value
+        matches "minor".
+
+        Returns the "minor" value of the removed section.
+        """
+        envs = self.read()
+        current_env = envs.get(name)
+        if current_env is not None and (not minor or current_env["minor"] == minor):
+            del envs[name]
+            self.write(envs)
+            minor = current_env["minor"]
+            assert isinstance(minor, str)
+            return minor
+
+        return None
+
+
+class EnvManager:
+    """
+    Environments manager
+    """
+
+    _env = None
+
+    ENVS_FILE = "envs.toml"
+
+    def __init__(self, poetry: Poetry, io: None | IO = None) -> None:
+        self._poetry = poetry
+        self._io = io or NullIO()
+
+    @staticmethod
+    def _full_python_path(python: str) -> Path | None:
+        # eg first find pythonXY.bat on windows.
+        path_python = shutil.which(python)
+        if path_python is None:
+            return None
+
+        try:
+            executable = subprocess.check_output(
+                [path_python, "-c", "import sys; print(sys.executable)"], text=True
+            ).strip()
+            return Path(executable)
+
+        except CalledProcessError:
+            return None
+
+    @staticmethod
+    def _detect_active_python(io: None | IO = None) -> Path | None:
+        io = io or NullIO()
+        io.write_error_line(
+            "Trying to detect current active python executable as specified in"
+            " the config.",
+            verbosity=Verbosity.VERBOSE,
+        )
+
+        executable = EnvManager._full_python_path("python")
+
+        if executable is not None:
+            io.write_error_line(f"Found: {executable}", verbosity=Verbosity.VERBOSE)
+        else:
+            io.write_error_line(
+                "Unable to detect the current active python executable. Falling"
+                " back to default.",
+                verbosity=Verbosity.VERBOSE,
+            )
+
+        return executable
+
+    @staticmethod
+    def get_python_version(
+        precision: int = 3,
+        prefer_active_python: bool = False,
+        io: None | IO = None,
+    ) -> Version:
+        version = ".".join(str(v) for v in sys.version_info[:precision])
+
+        if prefer_active_python:
+            executable = EnvManager._detect_active_python(io)
+
+            if executable:
+                python_patch = subprocess.check_output(
+                    [executable, "-c", GET_PYTHON_VERSION_ONELINER], text=True
+                ).strip()
+
+                version = ".".join(str(v) for v in python_patch.split(".")[:precision])
+
+        return Version.parse(version)
+
+    @property
+    def in_project_venv(self) -> Path:
+        venv: Path = self._poetry.file.path.parent / ".venv"
+        return venv
+
+    @cached_property
+    def envs_file(self) -> EnvsFile:
+        return EnvsFile(self._poetry.config.virtualenvs_path / self.ENVS_FILE)
+
+    @cached_property
+    def base_env_name(self) -> str:
+        return self.generate_env_name(
+            self._poetry.package.name,
+            str(self._poetry.file.path.parent),
+        )
+
+    def activate(self, python: str) -> Env:
+        venv_path = self._poetry.config.virtualenvs_path
+
+        try:
+            python_version = Version.parse(python)
+            python = f"python{python_version.major}"
+            if python_version.precision > 1:
+                python += f".{python_version.minor}"
+        except ValueError:
+            # Executable in PATH or full executable path
+            pass
+
+        python_path = self._full_python_path(python)
+        if python_path is None:
+            raise PythonVersionNotFound(python)
+
+        try:
+            python_version_string = subprocess.check_output(
+                [python_path, "-c", GET_PYTHON_VERSION_ONELINER], text=True
+            )
+        except CalledProcessError as e:
+            raise EnvCommandError(e)
+
+        python_version = Version.parse(python_version_string.strip())
+        minor = f"{python_version.major}.{python_version.minor}"
+        patch = python_version.text
+
+        create = False
+        # If we are required to create the virtual environment in the project directory,
+        # create or recreate it if needed
+        if self.use_in_project_venv():
+            create = False
+            venv = self.in_project_venv
+            if venv.exists():
+                # We need to check if the patch version is correct
+                _venv = VirtualEnv(venv)
+                current_patch = ".".join(str(v) for v in _venv.version_info[:3])
+
+                if patch != current_patch:
+                    create = True
+
+            self.create_venv(executable=python_path, force=create)
+
+            return self.get(reload=True)
+
+        envs = tomlkit.document()
+        if self.envs_file.exists():
+            envs = self.envs_file.read()
+            current_env = envs.get(self.base_env_name)
+            if current_env is not None:
+                current_minor = current_env["minor"]
+                current_patch = current_env["patch"]
+
+                if current_minor == minor and current_patch != patch:
+                    # We need to recreate
+                    create = True
+
+        name = f"{self.base_env_name}-py{minor}"
+        venv = venv_path / name
+
+        # Create if needed
+        if not venv.exists() or venv.exists() and create:
+            in_venv = os.environ.get("VIRTUAL_ENV") is not None
+            if in_venv or not venv.exists():
+                create = True
+
+            if venv.exists():
+                # We need to check if the patch version is correct
+                _venv = VirtualEnv(venv)
+                current_patch = ".".join(str(v) for v in _venv.version_info[:3])
+
+                if patch != current_patch:
+                    create = True
+
+            self.create_venv(executable=python_path, force=create)
+
+        # Activate
+        envs[self.base_env_name] = {"minor": minor, "patch": patch}
+        self.envs_file.write(envs)
+
+        return self.get(reload=True)
+
+    def deactivate(self) -> None:
+        venv_path = self._poetry.config.virtualenvs_path
+
+        if self.envs_file.exists() and (
+            minor := self.envs_file.remove_section(self.base_env_name)
+        ):
+            venv = venv_path / f"{self.base_env_name}-py{minor}"
+            self._io.write_error_line(
+                f"Deactivating virtualenv: {venv}"
+            )
+
+    def get(self, reload: bool = False) -> Env:
+        if self._env is not None and not reload:
+            return self._env
+
+        prefer_active_python = self._poetry.config.get(
+            "virtualenvs.prefer-active-python"
+        )
+        python_minor = self.get_python_version(
+            precision=2, prefer_active_python=prefer_active_python, io=self._io
+        ).to_string()
+
+        env = None
+        if self.envs_file.exists():
+            envs = self.envs_file.read()
+            env = envs.get(self.base_env_name)
+            if env:
+                python_minor = env["minor"]
+
+        # Check if we are inside a virtualenv or not
+        # Conda sets CONDA_PREFIX in its envs, see
+        # https://github.com/conda/conda/issues/2764
+        env_prefix = os.environ.get("VIRTUAL_ENV", os.environ.get("CONDA_PREFIX"))
+        conda_env_name = os.environ.get("CONDA_DEFAULT_ENV")
+        # It's probably not a good idea to pollute Conda's global "base" env, since
+        # most users have it activated all the time.
+        in_venv = env_prefix is not None and conda_env_name != "base"
+
+        if not in_venv or env is not None:
+            # Checking if a local virtualenv exists
+            if self.in_project_venv_exists():
+                venv = self.in_project_venv
+
+                return VirtualEnv(venv)
+
+            create_venv = self._poetry.config.get("virtualenvs.create", True)
+
+            if not create_venv:
+                return self.get_system_env()
+
+            venv_path = self._poetry.config.virtualenvs_path
+
+            name = f"{self.base_env_name}-py{python_minor.strip()}"
+
+            venv = venv_path / name
+
+            if not venv.exists():
+                return self.get_system_env()
+
+            return VirtualEnv(venv)
+
+        if env_prefix is not None:
+            prefix = Path(env_prefix)
+            base_prefix = None
+        else:
+            prefix = Path(sys.prefix)
+            base_prefix = self.get_base_prefix()
+
+        return VirtualEnv(prefix, base_prefix)
+
+    def list(self, name: str | None = None) -> list[VirtualEnv]:
+        if name is None:
+            name = self._poetry.package.name
+
+        venv_name = self.generate_env_name(name, str(self._poetry.file.path.parent))
+        venv_path = self._poetry.config.virtualenvs_path
+        env_list = [VirtualEnv(p) for p in sorted(venv_path.glob(f"{venv_name}-py*"))]
+
+        if self.in_project_venv_exists():
+            venv = self.in_project_venv
+            env_list.insert(0, VirtualEnv(venv))
+        return env_list
+
+    @staticmethod
+    def check_env_is_for_current_project(env: str, base_env_name: str) -> bool:
+        """
+        Check if env name starts with projects name.
+
+        This is done to prevent action on other project's envs.
+        """
+        return env.startswith(base_env_name)
+
+    def remove(self, python: str) -> Env:
+        python_path = Path(python)
+        if python_path.is_file():
+            # Validate env name if provided env is a full path to python
+            try:
+                env_dir = subprocess.check_output(
+                    [python, "-c", GET_ENV_PATH_ONELINER], text=True
+                ).strip("\n")
+                env_name = Path(env_dir).name
+                if not self.check_env_is_for_current_project(
+                    env_name, self.base_env_name
+                ):
+                    raise IncorrectEnvError(env_name)
+            except CalledProcessError as e:
+                raise EnvCommandError(e)
+
+        if self.check_env_is_for_current_project(python, self.base_env_name):
+            venvs = self.list()
+            for venv in venvs:
+                if venv.path.name == python:
+                    # Exact virtualenv name
+                    if self.envs_file.exists():
+                        venv_minor = ".".join(str(v) for v in venv.version_info[:2])
+                        self.envs_file.remove_section(self.base_env_name, venv_minor)
+
+                    self.remove_venv(venv.path)
+
+                    return venv
+
+            raise ValueError(
+                f'Environment "{python}" does not exist.'
+            )
+        else:
+            venv_path = self._poetry.config.virtualenvs_path
+            # Get all the poetry envs, even for other projects
+            env_names = [p.name for p in sorted(venv_path.glob("*-*-py*"))]
+            if python in env_names:
+                raise IncorrectEnvError(python)
+
+        try:
+            python_version = Version.parse(python)
+            python = f"python{python_version.major}"
+            if python_version.precision > 1:
+                python += f".{python_version.minor}"
+        except ValueError:
+            # Executable in PATH or full executable path
+            pass
+
+        try:
+            python_version_string = subprocess.check_output(
+                [python, "-c", GET_PYTHON_VERSION_ONELINER], text=True
+            )
+        except CalledProcessError as e:
+            raise EnvCommandError(e)
+
+        python_version = Version.parse(python_version_string.strip())
+        minor = f"{python_version.major}.{python_version.minor}"
+
+        name = f"{self.base_env_name}-py{minor}"
+        venv_path = venv_path / name
+
+        if not venv_path.exists():
+            raise ValueError(f'Environment "{name}" does not exist.')
+
+        if self.envs_file.exists():
+            self.envs_file.remove_section(self.base_env_name, minor)
+
+        self.remove_venv(venv_path)
+
+        return VirtualEnv(venv_path, venv_path)
+
+    def use_in_project_venv(self) -> bool:
+        in_project: bool | None = self._poetry.config.get("virtualenvs.in-project")
+        if in_project is not None:
+            return in_project
+
+        return self.in_project_venv.is_dir()
+
+    def in_project_venv_exists(self) -> bool:
+        in_project: bool | None = self._poetry.config.get("virtualenvs.in-project")
+        if in_project is False:
+            return False
+
+        return self.in_project_venv.is_dir()
+
+    def create_venv(
+        self,
+        name: str | None = None,
+        executable: Path | None = None,
+        force: bool = False,
+    ) -> Env:
+        if self._env is not None and not force:
+            return self._env
+
+        cwd = self._poetry.file.path.parent
+        env = self.get(reload=True)
+
+        if not env.is_sane():
+            force = True
+
+        if env.is_venv() and not force:
+            # Already inside a virtualenv.
+            current_python = Version.parse(
+                ".".join(str(c) for c in env.version_info[:3])
+            )
+            if not self._poetry.package.python_constraint.allows(current_python):
+                raise InvalidCurrentPythonVersionError(
+                    self._poetry.package.python_versions, str(current_python)
+                )
+            return env
+
+        create_venv = self._poetry.config.get("virtualenvs.create")
+        in_project_venv = self.use_in_project_venv()
+        prefer_active_python = self._poetry.config.get(
+            "virtualenvs.prefer-active-python"
+        )
+        venv_prompt = self._poetry.config.get("virtualenvs.prompt")
+
+        if not executable and prefer_active_python:
+            executable = self._detect_active_python()
+
+        venv_path = (
+            self.in_project_venv
+            if in_project_venv
+            else self._poetry.config.virtualenvs_path
+        )
+        if not name:
+            name = self._poetry.package.name
+
+        python_patch = ".".join([str(v) for v in sys.version_info[:3]])
+        python_minor = ".".join([str(v) for v in sys.version_info[:2]])
+        if executable:
+            python_patch = subprocess.check_output(
+                [executable, "-c", GET_PYTHON_VERSION_ONELINER], text=True
+            ).strip()
+            python_minor = ".".join(python_patch.split(".")[:2])
+
+        supported_python = self._poetry.package.python_constraint
+        if not supported_python.allows(Version.parse(python_patch)):
+            # The currently activated or chosen Python version
+            # is not compatible with the Python constraint specified
+            # for the project.
+            # If an executable has been specified, we stop there
+            # and notify the user of the incompatibility.
+            # Otherwise, we try to find a compatible Python version.
+            if executable and not prefer_active_python:
+                raise NoCompatiblePythonVersionFound(
+                    self._poetry.package.python_versions, python_patch
+                )
+
+            self._io.write_error_line(
+                f"The currently activated Python version {python_patch} is not"
+                f" supported by the project ({self._poetry.package.python_versions}).\n"
+                "Trying to find and use a compatible version. "
+            )
+
+            for suffix in sorted(
+                self._poetry.package.AVAILABLE_PYTHONS,
+                key=lambda v: (v.startswith("3"), -len(v), v),
+                reverse=True,
+            ):
+                if len(suffix) == 1:
+                    if not parse_constraint(f"^{suffix}.0").allows_any(
+                        supported_python
+                    ):
+                        continue
+                elif not supported_python.allows_any(parse_constraint(suffix + ".*")):
+                    continue
+
+                python_name = f"python{suffix}"
+                if self._io.is_debug():
+                    self._io.write_error_line(f"Trying {python_name}")
+
+                python = self._full_python_path(python_name)
+                if python is None:
+                    continue
+
+                try:
+                    python_patch = subprocess.check_output(
+                        [python, "-c", GET_PYTHON_VERSION_ONELINER],
+                        stderr=subprocess.STDOUT,
+                        text=True,
+                    ).strip()
+                except CalledProcessError:
+                    continue
+
+                if supported_python.allows(Version.parse(python_patch)):
+                    self._io.write_error_line(
+                        f"Using {python_name} ({python_patch})"
+                    )
+                    executable = python
+                    python_minor = ".".join(python_patch.split(".")[:2])
+                    break
+
+            if not executable:
+                raise NoCompatiblePythonVersionFound(
+                    self._poetry.package.python_versions
+                )
+
+        if in_project_venv:
+            venv = venv_path
+        else:
+            name = self.generate_env_name(name, str(cwd))
+            name = f"{name}-py{python_minor.strip()}"
+            venv = venv_path / name
+
+        if venv_prompt is not None:
+            venv_prompt = venv_prompt.format(
+                project_name=self._poetry.package.name or "virtualenv",
+                python_version=python_minor,
+            )
+
+        if not venv.exists():
+            if create_venv is False:
+                self._io.write_error_line(
+                    ""
+                    "Skipping virtualenv creation, "
+                    "as specified in config file."
+                    ""
+                )
+
+                return self.get_system_env()
+
+            self._io.write_error_line(
+                f"Creating virtualenv {name} in"
+                f" {venv_path if not WINDOWS else get_real_windows_path(venv_path)!s}"
+            )
+        else:
+            create_venv = False
+            if force:
+                if not env.is_sane():
+                    self._io.write_error_line(
+                        f"The virtual environment found in {env.path} seems to"
+                        " be broken."
+                    )
+                self._io.write_error_line(
+                    f"Recreating virtualenv {name} in {venv!s}"
+                )
+                self.remove_venv(venv)
+                create_venv = True
+            elif self._io.is_very_verbose():
+                self._io.write_error_line(f"Virtualenv {name} already exists.")
+
+        if create_venv:
+            self.build_venv(
+                venv,
+                executable=executable,
+                flags=self._poetry.config.get("virtualenvs.options"),
+                prompt=venv_prompt,
+            )
+
+        # venv detection:
+        # stdlib venv may symlink sys.executable, so we can't use realpath.
+        # but others can symlink *to* the venv Python,
+        # so we can't just use sys.executable.
+        # So we just check every item in the symlink tree (generally <= 3)
+        p = os.path.normcase(sys.executable)
+        paths = [p]
+        while os.path.islink(p):
+            p = os.path.normcase(os.path.join(os.path.dirname(p), os.readlink(p)))
+            paths.append(p)
+
+        p_venv = os.path.normcase(str(venv))
+        if any(p.startswith(p_venv) for p in paths):
+            # Running properly in the virtualenv, don't need to do anything
+            return self.get_system_env()
+
+        return VirtualEnv(venv)
+
+    @classmethod
+    def build_venv(
+        cls,
+        path: Path,
+        executable: Path | None = None,
+        flags: dict[str, str | bool] | None = None,
+        with_pip: bool | None = None,
+        with_wheel: bool | None = None,
+        with_setuptools: bool | None = None,
+        prompt: str | None = None,
+    ) -> virtualenv.run.session.Session:
+        flags = flags or {}
+
+        if with_pip is not None:
+            flags["no-pip"] = not with_pip
+
+        if with_wheel is not None:
+            wheel_flags: dict[str, str | bool] = (
+                {"wheel": "bundle"} if with_wheel else {"no-wheel": True}
+            )
+            flags.update(wheel_flags)
+
+        if with_setuptools is not None:
+            setuptools_flags: dict[str, str | bool] = (
+                {"setuptools": "bundle"} if with_setuptools else {"no-setuptools": True}
+            )
+            flags.update(setuptools_flags)
+
+        flags.setdefault("no-pip", True)
+
+        if "setuptools" not in flags and "no-setuptools" not in flags:
+            flags["no-setuptools"] = True
+
+        if "wheel" not in flags and "no-wheel" not in flags:
+            flags["no-wheel"] = True
+
+        if WINDOWS:
+            path = get_real_windows_path(path)
+            executable = get_real_windows_path(executable) if executable else None
+
+        executable_str = None if executable is None else executable.resolve().as_posix()
+
+        args = [
+            "--no-download",
+            "--no-periodic-update",
+            "--python",
+            executable_str or sys.executable,
+        ]
+
+        if prompt is not None:
+            args.extend(["--prompt", prompt])
+
+        for flag, value in flags.items():
+            if value is True:
+                args.append(f"--{flag}")
+
+            elif value is not False:
+                args.append(f"--{flag}={value}")
+
+        args.append(str(path))
+
+        cli_result = virtualenv.cli_run(args, setup_logging=False)
+
+        # Exclude the venv folder from from macOS Time Machine backups
+        # TODO: Add backup-ignore markers for other platforms too
+        if sys.platform == "darwin":
+            import xattr
+
+            xattr.setxattr(
+                str(path),
+                "com.apple.metadata:com_apple_backup_excludeItem",
+                plistlib.dumps("com.apple.backupd", fmt=plistlib.FMT_BINARY),
+            )
+
+        return cli_result
+
+    @classmethod
+    def remove_venv(cls, path: Path) -> None:
+        assert path.is_dir()
+        try:
+            remove_directory(path)
+            return
+        except OSError as e:
+            # Continue only if e.errno == 16
+            if e.errno != 16:  # ERRNO 16: Device or resource busy
+                raise e
+
+        # Delete all files and folders but the toplevel one. This is because sometimes
+        # the venv folder is mounted by the OS, such as in a docker volume. In such
+        # cases, an attempt to delete the folder itself will result in an `OSError`.
+        # See https://github.com/python-poetry/poetry/pull/2064
+        for file_path in path.iterdir():
+            if file_path.is_file() or file_path.is_symlink():
+                file_path.unlink()
+            elif file_path.is_dir():
+                remove_directory(file_path, force=True)
+
+    @classmethod
+    def get_system_env(cls, naive: bool = False) -> Env:
+        """
+        Retrieve the current Python environment.
+
+        This can be the base Python environment or an activated virtual environment.
+
+        This method also workaround the issue that the virtual environment
+        used by Poetry internally (when installed via the custom installer)
+        is incorrectly detected as the system environment. Note that this workaround
+        happens only when `naive` is False since there are times where we actually
+        want to retrieve Poetry's custom virtual environment
+        (e.g. plugin installation or self update).
+        """
+        prefix, base_prefix = Path(sys.prefix), Path(cls.get_base_prefix())
+        env: Env = SystemEnv(prefix)
+        if not naive:
+            env = GenericEnv(base_prefix, child_env=env)
+
+        return env
+
+    @classmethod
+    def get_base_prefix(cls) -> Path:
+        real_prefix = getattr(sys, "real_prefix", None)
+        if real_prefix is not None:
+            return Path(real_prefix)
+
+        base_prefix = getattr(sys, "base_prefix", None)
+        if base_prefix is not None:
+            return Path(base_prefix)
+
+        return Path(sys.prefix)
+
+    @classmethod
+    def generate_env_name(cls, name: str, cwd: str) -> str:
+        name = name.lower()
+        sanitized_name = re.sub(r'[ $`!*@"\\\r\n\t]', "_", name)[:42]
+        normalized_cwd = os.path.normcase(os.path.realpath(cwd))
+        h_bytes = hashlib.sha256(encode(normalized_cwd)).digest()
+        h_str = base64.urlsafe_b64encode(h_bytes).decode()[:8]
+
+        return f"{sanitized_name}-{h_str}"
diff --git a/conda_lock/_vendor/poetry/utils/env/exceptions.py b/conda_lock/_vendor/poetry/utils/env/exceptions.py
new file mode 100644
index 00000000..b3d3b56e
--- /dev/null
+++ b/conda_lock/_vendor/poetry/utils/env/exceptions.py
@@ -0,0 +1,69 @@
+from __future__ import annotations
+
+from typing import TYPE_CHECKING
+
+from conda_lock._vendor.poetry.utils._compat import decode
+
+
+if TYPE_CHECKING:
+    from subprocess import CalledProcessError
+
+
+class EnvError(Exception):
+    pass
+
+
+class IncorrectEnvError(EnvError):
+    def __init__(self, env_name: str) -> None:
+        message = f"Env {env_name} doesn't belong to this project."
+        super().__init__(message)
+
+
+class EnvCommandError(EnvError):
+    def __init__(self, e: CalledProcessError) -> None:
+        self.e = e
+
+        message_parts = [
+            f"Command {e.cmd} errored with the following return code {e.returncode}"
+        ]
+        if e.output:
+            message_parts.append(f"Output:\n{decode(e.output)}")
+        if e.stderr:
+            message_parts.append(f"Error output:\n{decode(e.stderr)}")
+        super().__init__("\n\n".join(message_parts))
+
+
+class PythonVersionNotFound(EnvError):
+    def __init__(self, expected: str) -> None:
+        super().__init__(f"Could not find the python executable {expected}")
+
+
+class NoCompatiblePythonVersionFound(EnvError):
+    def __init__(self, expected: str, given: str | None = None) -> None:
+        if given:
+            message = (
+                f"The specified Python version ({given}) "
+                f"is not supported by the project ({expected}).\n"
+                "Please choose a compatible version "
+                "or loosen the python constraint specified "
+                "in the pyproject.toml file."
+            )
+        else:
+            message = (
+                "Poetry was unable to find a compatible version. "
+                "If you have one, you can explicitly use it "
+                'via the "env use" command.'
+            )
+
+        super().__init__(message)
+
+
+class InvalidCurrentPythonVersionError(EnvError):
+    def __init__(self, expected: str, given: str) -> None:
+        message = (
+            f"Current Python version ({given}) "
+            f"is not allowed by the project ({expected}).\n"
+            'Please change python executable via the "env use" command.'
+        )
+
+        super().__init__(message)
diff --git a/conda_lock/_vendor/poetry/utils/env/generic_env.py b/conda_lock/_vendor/poetry/utils/env/generic_env.py
new file mode 100644
index 00000000..1bd6e523
--- /dev/null
+++ b/conda_lock/_vendor/poetry/utils/env/generic_env.py
@@ -0,0 +1,102 @@
+from __future__ import annotations
+
+import json
+import os
+import re
+import subprocess
+
+from typing import TYPE_CHECKING
+from typing import Any
+
+from conda_lock._vendor.poetry.utils.env.script_strings import GET_PATHS_FOR_GENERIC_ENVS
+from conda_lock._vendor.poetry.utils.env.virtual_env import VirtualEnv
+
+
+if TYPE_CHECKING:
+    from pathlib import Path
+
+    from conda_lock._vendor.poetry.utils.env.base_env import Env
+
+
+class GenericEnv(VirtualEnv):
+    def __init__(
+        self, path: Path, base: Path | None = None, child_env: Env | None = None
+    ) -> None:
+        self._child_env = child_env
+
+        super().__init__(path, base=base)
+
+    def find_executables(self) -> None:
+        patterns = [("python*", "pip*")]
+
+        if self._child_env:
+            minor_version = (
+                f"{self._child_env.version_info[0]}.{self._child_env.version_info[1]}"
+            )
+            major_version = f"{self._child_env.version_info[0]}"
+            patterns = [
+                (f"python{minor_version}", f"pip{minor_version}"),
+                (f"python{major_version}", f"pip{major_version}"),
+            ]
+
+        python_executable = None
+        pip_executable = None
+
+        for python_pattern, pip_pattern in patterns:
+            if python_executable and pip_executable:
+                break
+
+            if not python_executable:
+                python_executables = sorted(
+                    p.name
+                    for p in self._bin_dir.glob(python_pattern)
+                    if re.match(r"python(?:\d+(?:\.\d+)?)?(?:\.exe)?$", p.name)
+                )
+
+                if python_executables:
+                    executable = python_executables[0]
+                    if executable.endswith(".exe"):
+                        executable = executable[:-4]
+
+                    python_executable = executable
+
+            if not pip_executable:
+                pip_executables = sorted(
+                    p.name
+                    for p in self._bin_dir.glob(pip_pattern)
+                    if re.match(r"pip(?:\d+(?:\.\d+)?)?(?:\.exe)?$", p.name)
+                )
+                if pip_executables:
+                    pip_executable = pip_executables[0]
+                    if pip_executable.endswith(".exe"):
+                        pip_executable = pip_executable[:-4]
+
+            if python_executable:
+                self._executable = python_executable
+
+            if pip_executable:
+                self._pip_executable = pip_executable
+
+    def get_paths(self) -> dict[str, str]:
+        output = self.run_python_script(GET_PATHS_FOR_GENERIC_ENVS)
+
+        paths: dict[str, str] = json.loads(output)
+        return paths
+
+    def execute(self, bin: str, *args: str, **kwargs: Any) -> int:
+        command = self.get_command_from_bin(bin) + list(args)
+        env = kwargs.pop("env", dict(os.environ))
+
+        if not self._is_windows:
+            return os.execvpe(command[0], command, env=env)
+
+        exe = subprocess.Popen(command, env=env, **kwargs)
+        exe.communicate()
+
+        return exe.returncode
+
+    def _run(self, cmd: list[str], **kwargs: Any) -> str:
+        return super(VirtualEnv, self)._run(cmd, **kwargs)
+
+    def is_venv(self) -> bool:
+        return self._path != self._base
diff --git a/conda_lock/_vendor/poetry/utils/env/mock_env.py b/conda_lock/_vendor/poetry/utils/env/mock_env.py
new file mode 100644
index 00000000..b8dcc769
--- /dev/null
+++ b/conda_lock/_vendor/poetry/utils/env/mock_env.py
@@ -0,0 +1,78 @@
+from __future__ import annotations
+
+from typing import TYPE_CHECKING
+from typing import Any
+
+from conda_lock._vendor.poetry.utils.env.null_env import NullEnv
+
+
+if TYPE_CHECKING:
+    from packaging.tags import Tag
+
+
+class MockEnv(NullEnv):
+    def __init__(
+        self,
+        version_info: tuple[int, int, int] = (3, 7, 0),
+        *,
+        python_implementation: str = "CPython",
+        platform: str = "darwin",
+        platform_machine: str = "amd64",
+        os_name: str = "posix",
+        is_venv: bool = False,
+        sys_path: list[str] | None = None,
+        marker_env: dict[str, Any] | None = None,
+        supported_tags: list[Tag] | None = None,
+        **kwargs: Any,
+    ) -> None:
+        super().__init__(**kwargs)
+
+        self._version_info = version_info
+        self._python_implementation = python_implementation
+        self._platform = platform
+        self._platform_machine = platform_machine
+        self._os_name = os_name
+        self._is_venv = is_venv
+        self._sys_path = sys_path
+        self._mock_marker_env = marker_env
+        self._supported_tags = supported_tags
+
+    @property
+    def platform(self) -> str:
+        return self._platform
+
+    @property
+    def platform_machine(self) -> str:
+        return self._platform_machine
+
+    @property
+    def os(self) -> str:
+        return self._os_name
+
+    @property
+    def sys_path(self) -> list[str]:
+        if self._sys_path is None:
+            return super().sys_path
+
+        return self._sys_path
+
+    def get_marker_env(self) -> dict[str, Any]:
+        if self._mock_marker_env is not None:
+            return self._mock_marker_env
+
+        marker_env = super().get_marker_env()
+        marker_env["python_implementation"] = self._python_implementation
+        marker_env["version_info"] = self._version_info
+        marker_env["python_version"] = ".".join(str(v) for v in self._version_info[:2])
+        marker_env["python_full_version"] = ".".join(str(v) for v in self._version_info)
+        marker_env["sys_platform"] = self._platform
+        marker_env["platform_machine"] = self._platform_machine
+        marker_env["interpreter_name"] = self._python_implementation.lower()
+        marker_env["interpreter_version"] = "cp" + "".join(
+            str(v) for v in self._version_info[:2]
+        )
+
+        return marker_env
+
+    def is_venv(self) -> bool:
+        return self._is_venv
diff --git a/conda_lock/_vendor/poetry/utils/env/null_env.py b/conda_lock/_vendor/poetry/utils/env/null_env.py
new file mode 100644
index 00000000..2e4132af
--- /dev/null
+++ b/conda_lock/_vendor/poetry/utils/env/null_env.py
@@ -0,0 +1,49 @@
+from __future__ import annotations
+
+import sys
+
+from pathlib import Path
+from typing import Any
+
+from conda_lock._vendor.poetry.utils.env.system_env import SystemEnv
+
+
+class NullEnv(SystemEnv):
+    def __init__(
+        self, path: Path | None = None, base: Path | None = None, execute: bool = False
+    ) -> None:
+        if path is None:
+            path = Path(sys.prefix)
+
+        super().__init__(path, base=base)
+
+        self._execute = execute
+        self.executed: list[list[str]] = []
+
+    @property
+    def paths(self) -> dict[str, str]:
+        if self._paths is None:
+            self._paths = self.get_paths()
+            self._paths["platlib"] = str(self._path / "platlib")
+            self._paths["purelib"] = str(self._path / "purelib")
+            self._paths["scripts"] = str(self._path / "scripts")
+            self._paths["data"] = str(self._path / "data")
+
+        return self._paths
+
+    def _run(self, cmd: list[str], **kwargs: Any) -> str:
+        self.executed.append(cmd)
+
+        if self._execute:
+            return super()._run(cmd, **kwargs)
+        return ""
+
+    def execute(self, bin: str, *args: str, **kwargs: Any) -> int:
+        self.executed.append([bin, *list(args)])
+
+        if self._execute:
+            return super().execute(bin, *args, **kwargs)
+        return 0
+
+    def _bin(self, bin: str) -> str:
+        return bin
diff --git a/conda_lock/_vendor/poetry/utils/env/script_strings.py b/conda_lock/_vendor/poetry/utils/env/script_strings.py
new file mode 100644
index 00000000..3e663e3e
--- /dev/null
+++ b/conda_lock/_vendor/poetry/utils/env/script_strings.py
@@ -0,0 +1,148 @@
+from __future__ import annotations
+
+import packaging.tags
+
+
+GET_SYS_TAGS = f"""
+import importlib.util
+import json
+import sys
+
+from pathlib import Path
+
+spec = importlib.util.spec_from_file_location(
+    "packaging", Path(r"{packaging.__file__}")
+)
+packaging = importlib.util.module_from_spec(spec)
+sys.modules[spec.name] = packaging
+
+spec = importlib.util.spec_from_file_location(
+    "packaging.tags", Path(r"{packaging.tags.__file__}")
+)
+packaging_tags = importlib.util.module_from_spec(spec)
+spec.loader.exec_module(packaging_tags)
+
+print(
+    json.dumps([(t.interpreter, t.abi, t.platform) for t in packaging_tags.sys_tags()])
+)
+"""
+
+GET_ENVIRONMENT_INFO = """\
+import json
+import os
+import platform
+import sys
+import sysconfig
+
+INTERPRETER_SHORT_NAMES = {
+    "python": "py",
+    "cpython": "cp",
+    "pypy": "pp",
+    "ironpython": "ip",
+    "jython": "jy",
+}
+
+
+def interpreter_version():
+    version = sysconfig.get_config_var("interpreter_version")
+    if version:
+        version = str(version)
+    else:
+        version = _version_nodot(sys.version_info[:2])
+
+    return version
+
+
+def _version_nodot(version):
+    if any(v >= 10 for v in version):
+        sep = "_"
+    else:
+        sep = ""
+
+    return sep.join(map(str, version))
+
+
+if hasattr(sys, "implementation"):
+    info = sys.implementation.version
+    iver = "{0.major}.{0.minor}.{0.micro}".format(info)
+    kind = info.releaselevel
+    if kind != "final":
+        iver += kind[0] + str(info.serial)
+
+    implementation_name = sys.implementation.name
+else:
+    iver = "0"
+    implementation_name = platform.python_implementation().lower()
+
+env = {
+    "implementation_name": implementation_name,
+    "implementation_version": iver,
+    "os_name": os.name,
+    "platform_machine": platform.machine(),
+    "platform_release": platform.release(),
+    "platform_system": platform.system(),
+    "platform_version": platform.version(),
+    "python_full_version": platform.python_version().rstrip("+"),
+    "platform_python_implementation": platform.python_implementation(),
+    "python_version": ".".join(platform.python_version_tuple()[:2]),
+    "sys_platform": sys.platform,
+    "version_info": tuple(sys.version_info),
+    # Extra information
+    "interpreter_name": INTERPRETER_SHORT_NAMES.get(
+        implementation_name, implementation_name
+    ),
+    "interpreter_version": interpreter_version(),
+}
+
+print(json.dumps(env))
+"""
+
+GET_BASE_PREFIX = """\
+import sys
+
+if hasattr(sys, "real_prefix"):
+    print(sys.real_prefix)
+elif hasattr(sys, "base_prefix"):
+    print(sys.base_prefix)
+else:
+    print(sys.prefix)
+"""
+
+GET_PYTHON_VERSION = """\
+import sys
+
+print('.'.join([str(s) for s in sys.version_info[:3]]))
+"""
+
+GET_PYTHON_VERSION_ONELINER = (
+    "import sys; print('.'.join([str(s) for s in sys.version_info[:3]]))"
+)
+GET_ENV_PATH_ONELINER = "import sys; print(sys.prefix)"
+
+GET_SYS_PATH = """\
+import json
+import sys
+
+print(json.dumps(sys.path))
+"""
+
+GET_PATHS = """\
+import json
+import sysconfig
+
+print(json.dumps(sysconfig.get_paths()))
+"""
+
+GET_PATHS_FOR_GENERIC_ENVS = """\
+import json
+import site
+import sysconfig
+
+paths = sysconfig.get_paths().copy()
+
+if site.check_enableusersite():
+    paths["usersite"] = site.getusersitepackages()
+    paths["userbase"] = site.getuserbase()
+
+print(json.dumps(paths))
+"""
diff --git a/conda_lock/_vendor/poetry/utils/env/site_packages.py b/conda_lock/_vendor/poetry/utils/env/site_packages.py
new file mode 100644
index 00000000..461342b3
--- /dev/null
+++ b/conda_lock/_vendor/poetry/utils/env/site_packages.py
@@ -0,0 +1,227 @@
+from __future__ import annotations
+
+import contextlib
+import itertools
+
+from pathlib import Path
+from typing import TYPE_CHECKING
+from typing import Any
+
+from conda_lock._vendor.poetry.utils._compat import metadata
+from conda_lock._vendor.poetry.utils.helpers import is_dir_writable
+from conda_lock._vendor.poetry.utils.helpers import paths_csv
+from conda_lock._vendor.poetry.utils.helpers import remove_directory
+
+
+if TYPE_CHECKING:
+    from collections.abc import Iterable
+
+
+class SitePackages:
+    def __init__(
+        self,
+        purelib: Path,
+        platlib: Path | None = None,
+        fallbacks: list[Path] | None = None,
+        skip_write_checks: bool = False,
+    ) -> None:
+        self._purelib = purelib
+        self._platlib = platlib or purelib
+
+        if platlib and platlib.resolve() == purelib.resolve():
+            self._platlib = purelib
+
+        self._fallbacks = fallbacks or []
+        self._skip_write_checks = skip_write_checks
+
+        self._candidates: list[Path] = []
+        for path in itertools.chain([self._purelib, self._platlib], self._fallbacks):
+            if path not in self._candidates:
+                self._candidates.append(path)
+
+        self._writable_candidates = None if not skip_write_checks else self._candidates
+
+    @property
+    def path(self) -> Path:
+        return self._purelib
+
+    @property
+    def purelib(self) -> Path:
+        return self._purelib
+
+    @property
+    def platlib(self) -> Path:
+        return self._platlib
+
+    @property
+    def candidates(self) -> list[Path]:
+        return self._candidates
+
+    @property
+    def writable_candidates(self) -> list[Path]:
+        if self._writable_candidates is not None:
+            return self._writable_candidates
+
+        self._writable_candidates = []
+        for candidate in self._candidates:
+            if not is_dir_writable(path=candidate, create=True):
+                continue
+            self._writable_candidates.append(candidate)
+
+        return self._writable_candidates
+
+    def make_candidates(
+        self, path: Path, writable_only: bool = False, strict: bool = False
+    ) -> list[Path]:
+        candidates = self._candidates if not writable_only else self.writable_candidates
+        if path.is_absolute():
+            for candidate in candidates:
+                with contextlib.suppress(ValueError):
+                    path.relative_to(candidate)
+                    return [path]
+            site_type = "writable " if writable_only else ""
+            raise ValueError(
+                f"{path} is not relative to any discovered {site_type}sites"
+            )
+
+        results = [candidate / path for candidate in candidates]
+
+        if not results and strict:
+            raise RuntimeError(
+                f'Unable to find a suitable destination for "{path}" in'
+                f" {paths_csv(self._candidates)}"
+            )
+
+        return results
+
+    def distributions(
+        self, name: str | None = None, writable_only: bool = False
+    ) -> Iterable[metadata.Distribution]:
+        path = list(
+            map(
+                str, self._candidates if not writable_only else self.writable_candidates
+            )
+        )
+
+        yield from metadata.PathDistribution.discover(name=name, path=path)
+
+    def find_distribution(
+        self, name: str, writable_only: bool = False
+    ) -> metadata.Distribution | None:
+        for distribution in self.distributions(name=name, writable_only=writable_only):
+            return distribution
+        return None
+
+    def find_distribution_files_with_suffix(
+        self, distribution_name: str, suffix: str, writable_only: bool = False
+    ) -> Iterable[Path]:
+        for distribution in self.distributions(
+            name=distribution_name, writable_only=writable_only
+        ):
+            files = [] if distribution.files is None else distribution.files
+            for file in files:
+                if file.name.endswith(suffix):
+                    path = distribution.locate_file(file)
+                    assert isinstance(path, Path)
+                    yield path
+
+    def find_distribution_files_with_name(
+        self, distribution_name: str, name: str, writable_only: bool = False
+    ) -> Iterable[Path]:
+        for distribution in self.distributions(
+            name=distribution_name, writable_only=writable_only
+        ):
+            files = [] if distribution.files is None else distribution.files
+            for file in files:
+                if file.name == name:
+                    path = distribution.locate_file(file)
+                    assert isinstance(path, Path)
+                    yield path
+
+    def find_distribution_direct_url_json_files(
+        self, distribution_name: str, writable_only: bool = False
+    ) -> Iterable[Path]:
+        return self.find_distribution_files_with_name(
+            distribution_name=distribution_name,
+            name="direct_url.json",
+            writable_only=writable_only,
+        )
+
+    def remove_distribution_files(self, distribution_name: str) -> list[Path]:
+        paths = []
+
+        for distribution in self.distributions(
+            name=distribution_name, writable_only=True
+        ):
+            files = [] if distribution.files is None else distribution.files
+            for file in files:
+                path = distribution.locate_file(file)
+                assert isinstance(path, Path)
+                path.unlink(missing_ok=True)
+
+            distribution_path: Path = distribution._path  # type: ignore[attr-defined]
+            if distribution_path.exists():
+                remove_directory(distribution_path, force=True)
+
+            paths.append(distribution_path)
+
+        return paths
+
+    def _path_method_wrapper(
+        self,
+        path: Path,
+        method: str,
+        *args: Any,
+        return_first: bool = True,
+        writable_only: bool = False,
+        **kwargs: Any,
+    ) -> tuple[Path, Any] | list[tuple[Path, Any]]:
+        candidates = self.make_candidates(
+            path, writable_only=writable_only, strict=True
+        )
+
+        results = []
+
+        for candidate in candidates:
+            try:
+                result = candidate, getattr(candidate, method)(*args, **kwargs)
+                if return_first:
+                    return result
+                results.append(result)
+            except OSError:
+                # TODO: Replace with PermissionError
+                pass
+
+        if results:
+            return results
+
+        raise OSError(f"Unable to access any of {paths_csv(candidates)}")
+
+    def write_text(self, path: Path, *args: Any, **kwargs: Any) -> Path:
+        paths = self._path_method_wrapper(path, "write_text", *args, **kwargs)
+        assert isinstance(paths, tuple)
+        return paths[0]
+
+    def mkdir(self, path: Path, *args: Any, **kwargs: Any) -> Path:
+        paths = self._path_method_wrapper(path, "mkdir", *args, **kwargs)
+        assert isinstance(paths, tuple)
+        return paths[0]
+
+    def exists(self, path: Path) -> bool:
+        return any(
+            value[-1]
+            for value in self._path_method_wrapper(path, "exists", return_first=False)
+        )
+
+    def find(
+        self,
+        path: Path,
+        writable_only: bool = False,
+    ) -> list[Path]:
+        return [
+            value[0]
+            for value in self._path_method_wrapper(
+                path, "exists", return_first=False, writable_only=writable_only
+            )
+            if value[-1] is True
+        ]
diff --git a/conda_lock/_vendor/poetry/utils/env/system_env.py b/conda_lock/_vendor/poetry/utils/env/system_env.py
new file mode 100644
index 00000000..1f2fb904
--- /dev/null
+++ b/conda_lock/_vendor/poetry/utils/env/system_env.py
@@ -0,0 +1,88 @@
+from __future__ import annotations
+
+import os
+import platform
+import site
+import sys
+import sysconfig
+
+from pathlib import Path
+from typing import Any
+
+from packaging.tags import Tag
+from packaging.tags import interpreter_name
+from packaging.tags import interpreter_version
+from packaging.tags import sys_tags
+
+from conda_lock._vendor.poetry.utils.env.base_env import Env
+
+
+class SystemEnv(Env):
+    """
+    A system (i.e. not a virtualenv) Python environment.
+    """
+
+    @property
+    def python(self) -> Path:
+        return Path(sys.executable)
+
+    @property
+    def sys_path(self) -> list[str]:
+        return sys.path
+
+    def get_version_info(self) -> tuple[Any, ...]:
+        return tuple(sys.version_info)
+
+    def get_python_implementation(self) -> str:
+        return platform.python_implementation()
+
+    def get_paths(self) -> dict[str, str]:
+        import site
+
+        paths = sysconfig.get_paths().copy()
+
+        if site.check_enableusersite():
+            paths["usersite"] = site.getusersitepackages()
+            paths["userbase"] = site.getuserbase()
+
+        return paths
+
+    def get_supported_tags(self) -> list[Tag]:
+        return list(sys_tags())
+
+    def get_marker_env(self) -> dict[str, Any]:
+        if hasattr(sys, "implementation"):
+            info = sys.implementation.version
+            iver = f"{info.major}.{info.minor}.{info.micro}"
+            kind = info.releaselevel
+            if kind != "final":
+                iver += kind[0] + str(info.serial)
+
+            implementation_name = sys.implementation.name
+        else:
+            iver = "0"
+            implementation_name = ""
+
+        return {
+            "implementation_name": implementation_name,
+            "implementation_version": iver,
+            "os_name": os.name,
+            "platform_machine": platform.machine(),
+            "platform_release": platform.release(),
+            "platform_system": platform.system(),
+            "platform_version": platform.version(),
+            # Workaround for https://github.com/python/cpython/issues/99968
+            "python_full_version": platform.python_version().rstrip("+"),
+            "platform_python_implementation": platform.python_implementation(),
+            "python_version": ".".join(platform.python_version().split(".")[:2]),
+            "sys_platform": sys.platform,
+            "version_info": sys.version_info,
+            "interpreter_name": interpreter_name(),
+            "interpreter_version": interpreter_version(),
+        }
+
+    def is_venv(self) -> bool:
+        return self._path != self._base
+
+    def _get_lib_dirs(self) -> list[Path]:
+        return super()._get_lib_dirs() + [Path(d) for d in site.getsitepackages()]
diff --git a/conda_lock/_vendor/poetry/utils/env/virtual_env.py b/conda_lock/_vendor/poetry/utils/env/virtual_env.py
new file mode 100644
index 00000000..b93b1071
--- /dev/null
+++ b/conda_lock/_vendor/poetry/utils/env/virtual_env.py
@@ -0,0 +1,145 @@
+from __future__ import annotations
+
+import json
+import os
+import re
+import sys
+
+from contextlib import contextmanager
+from copy import deepcopy
+from functools import cached_property
+from pathlib import Path
+from typing import TYPE_CHECKING
+from typing import Any
+
+from packaging.tags import Tag
+
+from conda_lock._vendor.poetry.utils.env.base_env import Env
+from conda_lock._vendor.poetry.utils.env.script_strings import GET_BASE_PREFIX
+from conda_lock._vendor.poetry.utils.env.script_strings import GET_ENVIRONMENT_INFO
+from conda_lock._vendor.poetry.utils.env.script_strings import GET_PATHS
+from conda_lock._vendor.poetry.utils.env.script_strings import GET_PYTHON_VERSION
+from conda_lock._vendor.poetry.utils.env.script_strings import GET_SYS_PATH
+from conda_lock._vendor.poetry.utils.env.script_strings import GET_SYS_TAGS
+from conda_lock._vendor.poetry.utils.env.system_env import SystemEnv
+
+
+if TYPE_CHECKING:
+    from collections.abc import Iterator
+
+
+class VirtualEnv(Env):
+    """
+    A virtual Python environment.
+    """
+
+    def __init__(self, path: Path, base: Path | None = None) -> None:
+        super().__init__(path, base)
+
+        # If base is None, it probably means this is
+        # a virtualenv created from VIRTUAL_ENV.
+        # In this case we need to get sys.base_prefix
+        # from inside the virtualenv.
+        if base is None:
+            output = self.run_python_script(GET_BASE_PREFIX)
+            self._base = Path(output.strip())
+
+    @property
+    def sys_path(self) -> list[str]:
+        output = self.run_python_script(GET_SYS_PATH)
+        paths: list[str] = json.loads(output)
+        return paths
+
+    def get_version_info(self) -> tuple[Any, ...]:
+        output = self.run_python_script(GET_PYTHON_VERSION)
+        assert isinstance(output, str)
+
+        return tuple(int(s) for s in output.strip().split("."))
+
+    def get_python_implementation(self) -> str:
+        implementation: str = self.marker_env["platform_python_implementation"]
+        return implementation
+
+    def get_supported_tags(self) -> list[Tag]:
+        output = self.run_python_script(GET_SYS_TAGS)
+
+        return [Tag(*t) for t in json.loads(output)]
+
+    def get_marker_env(self) -> dict[str, Any]:
+        output = self.run_python_script(GET_ENVIRONMENT_INFO)
+
+        env: dict[str, Any] = json.loads(output)
+        return env
+
+    def get_paths(self) -> dict[str, str]:
+        output = self.run_python_script(GET_PATHS)
+        paths: dict[str, str] = json.loads(output)
+        return paths
+
+    def is_venv(self) -> bool:
+        return True
+
+    def is_sane(self) -> bool:
+        # A virtualenv is considered sane if "python" exists.
+        return os.path.exists(self.python)
+
+    def _run(self, cmd: list[str], **kwargs: Any) -> str:
+        kwargs["env"] = self.get_temp_environ(environ=kwargs.get("env"))
+        return super()._run(cmd, **kwargs)
+
+    def get_temp_environ(
+        self,
+        environ: dict[str, str] | None = None,
+        exclude: list[str] | None = None,
+        **kwargs: str,
+    ) -> dict[str, str]:
+        exclude = exclude or []
+        exclude.extend(["PYTHONHOME", "__PYVENV_LAUNCHER__"])
+
+        if environ:
+            environ = deepcopy(environ)
+            for key in exclude:
+                environ.pop(key, None)
+        else:
+            environ = {k: v for k, v in os.environ.items() if k not in exclude}
+
+        environ.update(kwargs)
+
+        environ["PATH"] = self._updated_path()
+        environ["VIRTUAL_ENV"] = str(self._path)
+
+        return environ
+
+    def execute(self, bin: str, *args: str, **kwargs: Any) -> int:
+        kwargs["env"] = self.get_temp_environ(environ=kwargs.get("env"))
+        return super().execute(bin, *args, **kwargs)
+
+    @contextmanager
+    def temp_environ(self) -> Iterator[None]:
+        environ = dict(os.environ)
+        try:
+            yield
+        finally:
+            os.environ.clear()
+            os.environ.update(environ)
+
+    def _updated_path(self) -> str:
+        return os.pathsep.join([str(self._bin_dir), os.environ.get("PATH", "")])
+
+    @cached_property
+    def includes_system_site_packages(self) -> bool:
+        pyvenv_cfg = self._path / "pyvenv.cfg"
+        return pyvenv_cfg.exists() and (
+            re.search(
+                r"^\s*include-system-site-packages\s*=\s*true\s*$",
+                pyvenv_cfg.read_text(),
+                re.IGNORECASE | re.MULTILINE,
+            )
+            is not None
+        )
+
+    def is_path_relative_to_lib(self, path: Path) -> bool:
+        return super().is_path_relative_to_lib(path) or (
+            self.includes_system_site_packages
+            and SystemEnv(Path(sys.prefix)).is_path_relative_to_lib(path)
+        )
diff --git a/conda_lock/_vendor/poetry/utils/exporter.py b/conda_lock/_vendor/poetry/utils/exporter.py
deleted file mode 100644
index 1554d716..00000000
--- a/conda_lock/_vendor/poetry/utils/exporter.py
+++ /dev/null
@@ -1,169 +0,0 @@
-from typing import Optional
-from typing import Sequence
-from typing import Union
-
-from clikit.api.io import IO
-
-from conda_lock._vendor.poetry.core.packages.utils.utils import path_to_url
-from conda_lock._vendor.poetry.poetry import Poetry
-from conda_lock._vendor.poetry.utils._compat import Path
-from conda_lock._vendor.poetry.utils._compat import decode
-from conda_lock._vendor.poetry.utils._compat import urlparse
-
-
-class Exporter(object):
-    """
-    Exporter class to export a lock file to alternative formats.
-    """
-
-    FORMAT_REQUIREMENTS_TXT = "requirements.txt"
-    #: The names of the supported export formats.
-    ACCEPTED_FORMATS = (FORMAT_REQUIREMENTS_TXT,)
-    ALLOWED_HASH_ALGORITHMS = ("sha256", "sha384", "sha512")
-
-    def __init__(self, poetry):  # type: (Poetry) -> None
-        self._poetry = poetry
-
-    def export(
-        self,
-        fmt,
-        cwd,
-        output,
-        with_hashes=True,
-        dev=False,
-        extras=None,
-        with_credentials=False,
-    ):  # type: (str, Path, Union[IO, str], bool, bool, Optional[Union[bool, Sequence[str]]], bool) -> None
-        if fmt not in self.ACCEPTED_FORMATS:
-            raise ValueError("Invalid export format: {}".format(fmt))
-
-        getattr(self, "_export_{}".format(fmt.replace(".", "_")))(
-            cwd,
-            output,
-            with_hashes=with_hashes,
-            dev=dev,
-            extras=extras,
-            with_credentials=with_credentials,
-        )
-
-    def _export_requirements_txt(
-        self,
-        cwd,
-        output,
-        with_hashes=True,
-        dev=False,
-        extras=None,
-        with_credentials=False,
-    ):  # type: (Path, Union[IO, str], bool, bool, Optional[Union[bool, Sequence[str]]], bool) -> None
-        indexes = set()
-        content = ""
-        dependency_lines = set()
-
-        for dependency_package in self._poetry.locker.get_project_dependency_packages(
-            project_requires=self._poetry.package.all_requires, dev=dev, extras=extras
-        ):
-            line = ""
-
-            dependency = dependency_package.dependency
-            package = dependency_package.package
-
-            if package.develop:
-                line += "-e "
-
-            requirement = dependency.to_pep_508(with_extras=False)
-            is_direct_local_reference = (
-                dependency.is_file() or dependency.is_directory()
-            )
-            is_direct_remote_reference = dependency.is_vcs() or dependency.is_url()
-
-            if is_direct_remote_reference:
-                line = requirement
-            elif is_direct_local_reference:
-                dependency_uri = path_to_url(dependency.source_url)
-                line = "{} @ {}".format(dependency.name, dependency_uri)
-            else:
-                line = "{}=={}".format(package.name, package.version)
-
-            if not is_direct_remote_reference:
-                if ";" in requirement:
-                    markers = requirement.split(";", 1)[1].strip()
-                    if markers:
-                        line += "; {}".format(markers)
-
-            if (
-                not is_direct_remote_reference
-                and not is_direct_local_reference
-                and package.source_url
-            ):
-                indexes.add(package.source_url)
-
-            if package.files and with_hashes:
-                hashes = []
-                for f in package.files:
-                    h = f["hash"]
-                    algorithm = "sha256"
-                    if ":" in h:
-                        algorithm, h = h.split(":")
-
-                        if algorithm not in self.ALLOWED_HASH_ALGORITHMS:
-                            continue
-
-                    hashes.append("{}:{}".format(algorithm, h))
-
-                if hashes:
-                    line += " \\\n"
-                    for i, h in enumerate(hashes):
-                        line += "    --hash={}{}".format(
-                            h, " \\\n" if i < len(hashes) - 1 else ""
-                        )
-            dependency_lines.add(line)
-
-        content += "\n".join(sorted(dependency_lines))
-        content += "\n"
-
-        if indexes:
-            # If we have extra indexes, we add them to the beginning of the output
-            indexes_header = ""
-            for index in sorted(indexes):
-                repositories = [
-                    r
-                    for r in self._poetry.pool.repositories
-                    if r.url == index.rstrip("/")
-                ]
-                if not repositories:
-                    continue
-                repository = repositories[0]
-                if (
-                    self._poetry.pool.has_default()
-                    and repository is self._poetry.pool.repositories[0]
-                ):
-                    url = (
-                        repository.authenticated_url
-                        if with_credentials
-                        else repository.url
-                    )
-                    indexes_header = "--index-url {}\n".format(url)
-                    continue
-
-                url = (
-                    repository.authenticated_url if with_credentials else repository.url
-                )
-                parsed_url = urlparse.urlsplit(url)
-                if parsed_url.scheme == "http":
-                    indexes_header += "--trusted-host {}\n".format(parsed_url.netloc)
-                indexes_header += "--extra-index-url {}\n".format(url)
-
-            content = indexes_header + "\n" + content
-
-        self._output(content, cwd, output)
-
-    def _output(
-        self, content, cwd, output
-    ):  # type: (str, Path, Union[IO, str]) -> None
-        decoded = decode(content)
-        try:
-            output.write(decoded)
-        except AttributeError:
-            filepath = cwd / output
-            with filepath.open("w", encoding="utf-8") as f:
-                f.write(decoded)
diff --git a/conda_lock/_vendor/poetry/utils/extras.py b/conda_lock/_vendor/poetry/utils/extras.py
index 6dca56eb..cfccf8f7 100644
--- a/conda_lock/_vendor/poetry/utils/extras.py
+++ b/conda_lock/_vendor/poetry/utils/extras.py
@@ -1,17 +1,22 @@
-from typing import Iterator
-from typing import List
-from typing import Mapping
-from typing import Sequence
+from __future__ import annotations
 
-from conda_lock._vendor.poetry.core.packages import Package
-from conda_lock._vendor.poetry.utils.helpers import canonicalize_name
+from typing import TYPE_CHECKING
+
+
+if TYPE_CHECKING:
+    from collections.abc import Collection
+    from collections.abc import Iterable
+    from collections.abc import Mapping
+
+    from packaging.utils import NormalizedName
+    from conda_lock._vendor.poetry.core.packages.package import Package
 
 
 def get_extra_package_names(
-    packages,  # type: Sequence[Package]
-    extras,  # type: Mapping[str, List[str]]
-    extra_names,  # type: Sequence[str]
-):  # type: (...) -> Iterator[str]
+    packages: Iterable[Package],
+    extras: Mapping[NormalizedName, Iterable[NormalizedName]],
+    extra_names: Collection[NormalizedName],
+) -> set[NormalizedName]:
     """
     Returns all package names required by the given extras.
 
@@ -20,40 +25,33 @@ def get_extra_package_names(
         in the `extras` section of `poetry.lock`.
     :param extra_names: A list of strings specifying names of extra groups to resolve.
     """
+    from packaging.utils import canonicalize_name
+
     if not extra_names:
-        return []
+        return set()
 
     # lookup for packages by name, faster than looping over packages repeatedly
     packages_by_name = {package.name: package for package in packages}
 
-    # get and flatten names of packages we've opted into as extras
-    extra_package_names = [
+    # Depth-first search, with our entry points being the packages directly required by
+    # extras.
+    seen_package_names = set()
+    stack = [
         canonicalize_name(extra_package_name)
         for extra_name in extra_names
         for extra_package_name in extras.get(extra_name, ())
     ]
 
-    # keep record of packages seen during recursion in order to avoid recursion error
-    seen_package_names = set()
+    while stack:
+        package_name = stack.pop()
+
+        # We expect to find all packages, but can just carry on if we don't.
+        package = packages_by_name.get(package_name)
+        if package is None or package.name in seen_package_names:
+            continue
+
+        seen_package_names.add(package.name)
+
+        stack += [dependency.name for dependency in package.requires]
 
-    def _extra_packages(package_names):
-        """Recursively find dependencies for packages names"""
-        # for each extra pacakge name
-        for package_name in package_names:
-            # Find the actual Package object. A missing key indicates an implicit
-            # dependency (like setuptools), which should be ignored
-            package = packages_by_name.get(canonicalize_name(package_name))
-            if package:
-                if package.name not in seen_package_names:
-                    seen_package_names.add(package.name)
-                    yield package.name
-                # Recurse for dependencies
-                for dependency_package_name in _extra_packages(
-                    dependency.name
-                    for dependency in package.requires
-                    if dependency.name not in seen_package_names
-                ):
-                    seen_package_names.add(dependency_package_name)
-                    yield dependency_package_name
-
-    return _extra_packages(extra_package_names)
+    return seen_package_names
diff --git a/conda_lock/_vendor/poetry/utils/helpers.py b/conda_lock/_vendor/poetry/utils/helpers.py
index cd01c464..a43fc0ae 100644
--- a/conda_lock/_vendor/poetry/utils/helpers.py
+++ b/conda_lock/_vendor/poetry/utils/helpers.py
@@ -1,73 +1,93 @@
+from __future__ import annotations
+
+import hashlib
+import io
+import logging
 import os
-import re
 import shutil
 import stat
+import sys
+import tarfile
 import tempfile
+import zipfile
 
+from collections.abc import Mapping
 from contextlib import contextmanager
-from typing import List
-from typing import Optional
+from contextlib import suppress
+from functools import cached_property
+from pathlib import Path
+from typing import TYPE_CHECKING
+from typing import Any
+from typing import overload
 
 import requests
 
-from conda_lock._vendor.poetry.config.config import Config
-from conda_lock._vendor.poetry.core.packages.package import Package
-from conda_lock._vendor.poetry.core.version import Version
-from conda_lock._vendor.poetry.utils._compat import Path
-
-
-try:
-    from collections.abc import Mapping
-except ImportError:
-    from collections import Mapping
-
-
-_canonicalize_regex = re.compile("[-_]+")
-
-
-def canonicalize_name(name):  # type: (str) -> str
-    return _canonicalize_regex.sub("-", name).lower()
-
-
-def module_name(name):  # type: (str) -> str
-    return canonicalize_name(name).replace(".", "_").replace("-", "_")
-
-
-def normalize_version(version):  # type: (str) -> str
-    return str(Version(version))
-
-
-def _del_ro(action, name, exc):
-    os.chmod(name, stat.S_IWRITE)
-    os.remove(name)
+from requests.utils import atomic_open
+
+from conda_lock._vendor.poetry.utils.constants import REQUESTS_TIMEOUT
+
+
+if TYPE_CHECKING:
+    from collections.abc import Callable
+    from collections.abc import Iterator
+    from types import TracebackType
+
+    from conda_lock._vendor.poetry.core.packages.package import Package
+    from requests import Session
+
+    from conda_lock._vendor.poetry.utils.authenticator import Authenticator
+
+logger = logging.getLogger(__name__)
+prioritised_hash_types: tuple[str, ...] = tuple(
+    t
+    for t in [
+        "sha3_512",
+        "sha3_384",
+        "sha3_256",
+        "sha3_224",
+        "sha512",
+        "sha384",
+        "sha256",
+        "sha224",
+        "shake_256",
+        "shake_128",
+        "blake2s",
+        "blake2b",
+    ]
+    if t in hashlib.algorithms_available
+)
+non_prioritised_available_hash_types: frozenset[str] = frozenset(
+    set(hashlib.algorithms_available).difference(prioritised_hash_types)
+)
 
 
 @contextmanager
-def temporary_directory(*args, **kwargs):
-    name = tempfile.mkdtemp(*args, **kwargs)
+def directory(path: Path) -> Iterator[Path]:
+    cwd = Path.cwd()
+    try:
+        os.chdir(path)
+        yield path
+    finally:
+        os.chdir(cwd)
 
-    yield name
 
-    shutil.rmtree(name, onerror=_del_ro)
+# Correct type signature when used as `shutil.rmtree(..., onexc=_on_rm_error)`.
+@overload
+def _on_rm_error(
+    func: Callable[[str], None], path: str, exc_info: Exception
+) -> None: ...
 
 
-def get_cert(config, repository_name):  # type: (Config, str) -> Optional[Path]
-    cert = config.get("certificates.{}.cert".format(repository_name))
-    if cert:
-        return Path(cert)
-    else:
-        return None
+# Correct type signature when used as `shutil.rmtree(..., onerror=_on_rm_error)`.
+@overload
+def _on_rm_error(
+    func: Callable[[str], None],
+    path: str,
+    exc_info: tuple[type[BaseException], BaseException, TracebackType],
+) -> None: ...
 
 
-def get_client_cert(config, repository_name):  # type: (Config, str) -> Optional[Path]
-    client_cert = config.get("certificates.{}.client-cert".format(repository_name))
-    if client_cert:
-        return Path(client_cert)
-    else:
-        return None
-
-
-def _on_rm_error(func, path, exc_info):
+def _on_rm_error(func: Callable[[str], None], path: str, exc_info: Any) -> None:
     if not os.path.exists(path):
         return
 
@@ -75,52 +95,129 @@ def _on_rm_error(func, path, exc_info):
     func(path)
 
 
-def safe_rmtree(path):
-    if Path(path).is_symlink():
-        return os.unlink(str(path))
+def remove_directory(path: Path, force: bool = False) -> None:
+    """
+    Helper function handle safe removal, and optionally forces stubborn file removal.
+    This is particularly useful when dist files are read-only or git writes read-only
+    files on Windows.
+
+    Internally, all arguments are passed to `shutil.rmtree`.
+    """
+    if path.is_symlink():
+        return os.unlink(path)
 
-    shutil.rmtree(path, onerror=_on_rm_error)
+    kwargs: dict[str, Any] = {}
+    if force:
+        onexc = "onexc" if sys.version_info >= (3, 12) else "onerror"
+        kwargs[onexc] = _on_rm_error
 
+    shutil.rmtree(path, **kwargs)
 
-def merge_dicts(d1, d2):
-    for k, v in d2.items():
+
+def merge_dicts(d1: dict[str, Any], d2: dict[str, Any]) -> None:
+    for k in d2:
         if k in d1 and isinstance(d1[k], dict) and isinstance(d2[k], Mapping):
             merge_dicts(d1[k], d2[k])
         else:
             d1[k] = d2[k]
 
 
-def download_file(
-    url, dest, session=None, chunk_size=1024
-):  # type: (str, str, Optional[requests.Session], int) -> None
-    get = requests.get if not session else session.get
+class HTTPRangeRequestSupported(Exception):
+    """Raised when server unexpectedly supports byte ranges."""
 
-    with get(url, stream=True) as response:
-        response.raise_for_status()
 
-        with open(dest, "wb") as f:
-            for chunk in response.iter_content(chunk_size=chunk_size):
+def download_file(
+    url: str,
+    dest: Path,
+    *,
+    session: Authenticator | Session | None = None,
+    chunk_size: int = 1024,
+    raise_accepts_ranges: bool = False,
+) -> None:
+    from conda_lock._vendor.poetry.puzzle.provider import Indicator
+
+    downloader = Downloader(url, dest, session)
+
+    if raise_accepts_ranges and downloader.accepts_ranges:
+        raise HTTPRangeRequestSupported(f"URL {url} supports range requests.")
+
+    set_indicator = False
+    with Indicator.context() as update_context:
+        update_context(f"Downloading {url}")
+
+        total_size = downloader.total_size
+        if total_size > 0:
+            fetched_size = 0
+            last_percent = 0
+
+            # if less than 1MB, we simply show that we're downloading
+            # but skip the updating
+            set_indicator = total_size > 1024 * 1024
+
+        for fetched_size in downloader.download_with_progress(chunk_size):
+            if set_indicator:
+                percent = (fetched_size * 100) // total_size
+                if percent > last_percent:
+                    last_percent = percent
+                    update_context(f"Downloading {url} {percent:3}%")
+
+
+class Downloader:
+    def __init__(
+        self,
+        url: str,
+        dest: Path,
+        session: Authenticator | Session | None = None,
+    ):
+        self._dest = dest
+
+        get = requests.get if not session else session.get
+        headers = {"Accept-Encoding": "Identity"}
+
+        self._response = get(
+            url, stream=True, headers=headers, timeout=REQUESTS_TIMEOUT
+        )
+        self._response.raise_for_status()
+
+    @cached_property
+    def accepts_ranges(self) -> bool:
+        return self._response.headers.get("Accept-Ranges") == "bytes"
+
+    @cached_property
+    def total_size(self) -> int:
+        total_size = 0
+        if "Content-Length" in self._response.headers:
+            with suppress(ValueError):
+                total_size = int(self._response.headers["Content-Length"])
+        return total_size
+
+    def download_with_progress(self, chunk_size: int = 1024) -> Iterator[int]:
+        fetched_size = 0
+        with atomic_open(self._dest) as f:
+            for chunk in self._response.iter_content(chunk_size=chunk_size):
                 if chunk:
                     f.write(chunk)
+                    fetched_size += len(chunk)
+                    yield fetched_size
 
 
 def get_package_version_display_string(
-    package, root=None
-):  # type: (Package, Optional[Path]) -> str
+    package: Package, root: Path | None = None
+) -> str:
     if package.source_type in ["file", "directory"] and root:
-        return "{} {}".format(
-            package.version,
-            Path(os.path.relpath(package.source_url, root.as_posix())).as_posix(),
-        )
+        assert package.source_url is not None
+        path = Path(os.path.relpath(package.source_url, root)).as_posix()
+        return f"{package.version} {path}"
 
-    return package.full_pretty_version
+    pretty_version: str = package.full_pretty_version
+    return pretty_version
 
 
-def paths_csv(paths):  # type: (List[Path]) -> str
-    return ", ".join('"{}"'.format(str(c)) for c in paths)
+def paths_csv(paths: list[Path]) -> str:
+    return ", ".join(f'"{c!s}"' for c in paths)
 
 
-def is_dir_writable(path, create=False):  # type: (Path, bool) -> bool
+def is_dir_writable(path: Path, create: bool = False) -> bool:
     try:
         if not path.exists():
             if not create:
@@ -129,7 +226,148 @@ def is_dir_writable(path, create=False):  # type: (Path, bool) -> bool
 
         with tempfile.TemporaryFile(dir=str(path)):
             pass
-    except (IOError, OSError):
+    except OSError:
         return False
     else:
         return True
+
+
+def pluralize(count: int, word: str = "") -> str:
+    if count == 1:
+        return word
+    return word + "s"
+
+
+def _get_win_folder_from_registry(csidl_name: str) -> str:
+    if sys.platform != "win32":
+        raise RuntimeError("Method can only be called on Windows.")
+
+    import winreg as _winreg
+
+    shell_folder_name = {
+        "CSIDL_APPDATA": "AppData",
+        "CSIDL_COMMON_APPDATA": "Common AppData",
+        "CSIDL_LOCAL_APPDATA": "Local AppData",
+        "CSIDL_PROGRAM_FILES": "Program Files",
+    }[csidl_name]
+
+    key = _winreg.OpenKey(
+        _winreg.HKEY_CURRENT_USER,
+        r"Software\Microsoft\Windows\CurrentVersion\Explorer\Shell Folders",
+    )
+    dir, type = _winreg.QueryValueEx(key, shell_folder_name)
+
+    assert isinstance(dir, str)
+    return dir
+
+
+def _get_win_folder_with_ctypes(csidl_name: str) -> str:
+    if sys.platform != "win32":
+        raise RuntimeError("Method can only be called on Windows.")
+
+    import ctypes
+
+    csidl_const = {
+        "CSIDL_APPDATA": 26,
+        "CSIDL_COMMON_APPDATA": 35,
+        "CSIDL_LOCAL_APPDATA": 28,
+        "CSIDL_PROGRAM_FILES": 38,
+    }[csidl_name]
+
+    buf = ctypes.create_unicode_buffer(1024)
+    ctypes.windll.shell32.SHGetFolderPathW(None, csidl_const, None, 0, buf)
+
+    # Downgrade to short path name if have highbit chars. See
+    # .
+    has_high_char = False
+    for c in buf:
+        if ord(c) > 255:
+            has_high_char = True
+            break
+    if has_high_char:
+        buf2 = ctypes.create_unicode_buffer(1024)
+        if ctypes.windll.kernel32.GetShortPathNameW(buf.value, buf2, 1024):
+            buf = buf2
+
+    return buf.value
+
+
+def get_win_folder(csidl_name: str) -> Path:
+    if sys.platform == "win32":
+        try:
+            from ctypes import windll  # noqa: F401
+
+            _get_win_folder = _get_win_folder_with_ctypes
+        except ImportError:
+            _get_win_folder = _get_win_folder_from_registry
+
+        return Path(_get_win_folder(csidl_name))
+
+    raise RuntimeError("Method can only be called on Windows.")
+
+
+def get_real_windows_path(path: Path) -> Path:
+    program_files = get_win_folder("CSIDL_PROGRAM_FILES")
+    local_appdata = get_win_folder("CSIDL_LOCAL_APPDATA")
+
+    path = Path(
+        str(path).replace(
+            str(program_files / "WindowsApps"),
+            str(local_appdata / "Microsoft/WindowsApps"),
+        )
+    )
+
+    if path.as_posix().startswith(local_appdata.as_posix()):
+        path = path.resolve()
+
+    return path
+
+
+def get_file_hash(path: Path, hash_name: str = "sha256") -> str:
+    h = hashlib.new(hash_name)
+    with path.open("rb") as fp:
+        for content in iter(lambda: fp.read(io.DEFAULT_BUFFER_SIZE), b""):
+            h.update(content)
+
+    return h.hexdigest()
+
+
+def get_highest_priority_hash_type(
+    hash_types: set[str], archive_name: str
+) -> str | None:
+    if not hash_types:
+        return None
+
+    for prioritised_hash_type in prioritised_hash_types:
+        if prioritised_hash_type in hash_types:
+            return prioritised_hash_type
+
+    logger.debug(
+        f"There are no known hash types for {archive_name} that are prioritised (known"
+        f" hash types: {hash_types!s})"
+    )
+
+    for available_hash_type in non_prioritised_available_hash_types:
+        if available_hash_type in hash_types:
+            return available_hash_type
+
+    return None
+
+
+def extractall(source: Path, dest: Path, zip: bool) -> None:
+    """Extract all members from either a zip or tar archive."""
+    if zip:
+        with zipfile.ZipFile(source) as archive:
+            archive.extractall(dest)
+    else:
+        # These versions of python shipped with a broken tarfile data_filter, per
+        # https://github.com/python/cpython/issues/107845.
+        broken_tarfile_filter = {(3, 8, 17), (3, 9, 17), (3, 10, 12), (3, 11, 4)}
+        with tarfile.open(source) as archive:
+            if (
+                hasattr(tarfile, "data_filter")
+                and sys.version_info[:3] not in broken_tarfile_filter
+            ):
+                archive.extractall(dest, filter="data")
+            else:
+                archive.extractall(dest)
diff --git a/conda_lock/_vendor/poetry/utils/password_manager.py b/conda_lock/_vendor/poetry/utils/password_manager.py
index 24a615a4..4766ec73 100644
--- a/conda_lock/_vendor/poetry/utils/password_manager.py
+++ b/conda_lock/_vendor/poetry/utils/password_manager.py
@@ -1,33 +1,64 @@
+from __future__ import annotations
+
+import dataclasses
+import functools
 import logging
 
+from contextlib import suppress
+from typing import TYPE_CHECKING
+
+
+if TYPE_CHECKING:
+    from keyring.backend import KeyringBackend
+
+    from conda_lock._vendor.poetry.config.config import Config
 
 logger = logging.getLogger(__name__)
 
 
 class PasswordManagerError(Exception):
-
     pass
 
 
-class KeyRingError(Exception):
-
+class PoetryKeyringError(Exception):
     pass
 
 
-class KeyRing:
-    def __init__(self, namespace):
+@dataclasses.dataclass
+class HTTPAuthCredential:
+    username: str | None = dataclasses.field(default=None)
+    password: str | None = dataclasses.field(default=None)
+
+
+class PoetryKeyring:
+    def __init__(self, namespace: str) -> None:
         self._namespace = namespace
-        self._is_available = True
 
-        self._check()
+    def get_credential(
+        self, *names: str, username: str | None = None
+    ) -> HTTPAuthCredential:
+        import keyring
 
-    def is_available(self):
-        return self._is_available
+        from keyring.errors import KeyringError
+        from keyring.errors import KeyringLocked
 
-    def get_password(self, name, username):
-        if not self.is_available():
-            return
+        for name in names:
+            credential = None
+            try:
+                credential = keyring.get_credential(name, username)
+            except KeyringLocked:
+                logger.debug("Keyring %s is locked", name)
+            except (KeyringError, RuntimeError):
+                logger.debug("Accessing keyring %s failed", name, exc_info=True)
+
+            if credential:
+                return HTTPAuthCredential(
+                    username=credential.username, password=credential.password
+                )
+
+        return HTTPAuthCredential(username=username, password=None)
 
+    def get_password(self, name: str, username: str) -> str | None:
         import keyring
         import keyring.errors
 
@@ -36,14 +67,11 @@ def get_password(self, name, username):
         try:
             return keyring.get_password(name, username)
         except (RuntimeError, keyring.errors.KeyringError):
-            raise KeyRingError(
-                "Unable to retrieve the password for {} from the key ring".format(name)
+            raise PoetryKeyringError(
+                f"Unable to retrieve the password for {name} from the key ring"
             )
 
-    def set_password(self, name, username, password):
-        if not self.is_available():
-            return
-
+    def set_password(self, name: str, username: str, password: str) -> None:
         import keyring
         import keyring.errors
 
@@ -52,17 +80,11 @@ def set_password(self, name, username, password):
         try:
             keyring.set_password(name, username, password)
         except (RuntimeError, keyring.errors.KeyringError) as e:
-            raise KeyRingError(
-                "Unable to store the password for {} in the key ring: {}".format(
-                    name, str(e)
-                )
+            raise PoetryKeyringError(
+                f"Unable to store the password for {name} in the key ring: {e}"
             )
 
-    def delete_password(self, name, username):
-        if not self.is_available():
-            return
-
-        import keyring
+    def delete_password(self, name: str, username: str) -> None:
         import keyring.errors
 
         name = self.get_entry_name(name)
@@ -70,123 +92,156 @@ def delete_password(self, name, username):
         try:
             keyring.delete_password(name, username)
         except (RuntimeError, keyring.errors.KeyringError):
-            raise KeyRingError(
-                "Unable to delete the password for {} from the key ring".format(name)
+            raise PoetryKeyringError(
+                f"Unable to delete the password for {name} from the key ring"
             )
 
-    def get_entry_name(self, name):
-        return "{}-{}".format(self._namespace, name)
+    def get_entry_name(self, name: str) -> str:
+        return f"{self._namespace}-{name}"
 
-    def _check(self):
+    @classmethod
+    def is_available(cls) -> bool:
+        logger.debug("Checking if keyring is available")
         try:
             import keyring
-        except Exception as e:
-            logger.debug("An error occurred while importing keyring: {}".format(str(e)))
-            self._is_available = False
-
-            return
+            import keyring.backend
+        except ImportError as e:
+            logger.debug("An error occurred while importing keyring: %s", e)
+            return False
+
+        def backend_name(backend: KeyringBackend) -> str:
+            name: str = backend.name
+            return name.split(" ")[0]
+
+        def backend_is_valid(backend: KeyringBackend) -> bool:
+            name = backend_name(backend)
+            if name in ("chainer", "fail", "null"):
+                logger.debug(f"Backend {backend.name!r} is not suitable")
+                return False
+            elif "plaintext" in backend.name.lower():
+                logger.debug(f"Not using plaintext keyring backend {backend.name!r}")
+                return False
+
+            return True
 
         backend = keyring.get_keyring()
-        name = backend.name.split(" ")[0]
-        if name == "fail":
-            logger.debug("No suitable keyring backend found")
-            self._is_available = False
-        elif "plaintext" in backend.name.lower():
-            logger.debug("Only a plaintext keyring backend is available. Not using it.")
-            self._is_available = False
-        elif name == "chainer":
-            try:
-                import keyring.backend
-
-                backends = keyring.backend.get_all_keyring()
-
-                self._is_available = any(
-                    [
-                        b.name.split(" ")[0] not in ["chainer", "fail"]
-                        and "plaintext" not in b.name.lower()
-                        for b in backends
-                    ]
-                )
-            except Exception:
-                self._is_available = False
+        if backend_name(backend) == "chainer":
+            backends = keyring.backend.get_all_keyring()
+            valid_backend = next((b for b in backends if backend_is_valid(b)), None)
+        else:
+            valid_backend = backend if backend_is_valid(backend) else None
 
-        if not self._is_available:
-            logger.warning("No suitable keyring backends were found")
+        if valid_backend is None:
+            logger.debug("No valid keyring backend was found")
+            return False
+        else:
+            logger.debug(f"Using keyring backend {backend.name!r}")
+            return True
 
 
 class PasswordManager:
-    def __init__(self, config):
+    def __init__(self, config: Config) -> None:
         self._config = config
-        self._keyring = None
-
-    @property
-    def keyring(self):
-        if self._keyring is None:
-            self._keyring = KeyRing("poetry-repository")
-            if not self._keyring.is_available():
-                logger.warning(
-                    "Using a plaintext file to store and retrieve credentials"
-                )
 
-        return self._keyring
+    @functools.cached_property
+    def use_keyring(self) -> bool:
+        return self._config.get("keyring.enabled") and PoetryKeyring.is_available()
 
-    def set_pypi_token(self, name, token):
-        if not self.keyring.is_available():
+    @functools.cached_property
+    def keyring(self) -> PoetryKeyring:
+        if not self.use_keyring:
+            raise PoetryKeyringError(
+                "Access to keyring was requested, but it is not available"
+            )
+
+        return PoetryKeyring("poetry-repository")
+
+    @staticmethod
+    def warn_plaintext_credentials_stored() -> None:
+        logger.warning("Using a plaintext file to store credentials")
+
+    def set_pypi_token(self, repo_name: str, token: str) -> None:
+        if not self.use_keyring:
+            self.warn_plaintext_credentials_stored()
             self._config.auth_config_source.add_property(
-                "pypi-token.{}".format(name), token
+                f"pypi-token.{repo_name}", token
             )
         else:
-            self.keyring.set_password(name, "__token__", token)
+            self.keyring.set_password(repo_name, "__token__", token)
+
+    def get_pypi_token(self, repo_name: str) -> str | None:
+        """Get PyPi token.
 
-    def get_pypi_token(self, name):
-        if not self.keyring.is_available():
-            return self._config.get("pypi-token.{}".format(name))
+        First checks the environment variables for a token,
+        then the configured username/password and the
+        available keyring.
 
-        return self.keyring.get_password(name, "__token__")
+        :param repo_name:  Name of repository.
+        :return: Returns a token as a string if found, otherwise None.
+        """
+        token: str | None = self._config.get(f"pypi-token.{repo_name}")
+        if token:
+            return token
 
-    def delete_pypi_token(self, name):
-        if not self.keyring.is_available():
+        if self.use_keyring:
+            return self.keyring.get_password(repo_name, "__token__")
+        else:
+            return None
+
+    def delete_pypi_token(self, repo_name: str) -> None:
+        if not self.use_keyring:
             return self._config.auth_config_source.remove_property(
-                "pypi-token.{}".format(name)
+                f"pypi-token.{repo_name}"
             )
 
-        self.keyring.delete_password(name, "__token__")
+        self.keyring.delete_password(repo_name, "__token__")
 
-    def get_http_auth(self, name):
-        auth = self._config.get("http-basic.{}".format(name))
-        if not auth:
-            username = self._config.get("http-basic.{}.username".format(name))
-            password = self._config.get("http-basic.{}.password".format(name))
-            if not username and not password:
+    def get_http_auth(self, repo_name: str) -> dict[str, str | None] | None:
+        username = self._config.get(f"http-basic.{repo_name}.username")
+        password = self._config.get(f"http-basic.{repo_name}.password")
+        if not username and not password:
+            return None
+
+        if not password:
+            if self.use_keyring:
+                password = self.keyring.get_password(repo_name, username)
+            else:
                 return None
-        else:
-            username, password = auth["username"], auth.get("password")
-            if password is None:
-                password = self.keyring.get_password(name, username)
 
         return {
             "username": username,
             "password": password,
         }
 
-    def set_http_password(self, name, username, password):
+    def set_http_password(self, repo_name: str, username: str, password: str) -> None:
         auth = {"username": username}
 
-        if not self.keyring.is_available():
+        if not self.use_keyring:
+            self.warn_plaintext_credentials_stored()
             auth["password"] = password
         else:
-            self.keyring.set_password(name, username, password)
+            self.keyring.set_password(repo_name, username, password)
 
-        self._config.auth_config_source.add_property("http-basic.{}".format(name), auth)
+        self._config.auth_config_source.add_property(f"http-basic.{repo_name}", auth)
 
-    def delete_http_password(self, name):
-        auth = self.get_http_auth(name)
-        if not auth or "username" not in auth:
+    def delete_http_password(self, repo_name: str) -> None:
+        auth = self.get_http_auth(repo_name)
+        if not auth:
             return
 
-        try:
-            self.keyring.delete_password(name, auth["username"])
-        except KeyRingError:
-            pass
+        username = auth.get("username")
+        if username is None:
+            return
+
+        with suppress(PoetryKeyringError):
+            self.keyring.delete_password(repo_name, username)
 
-        self._config.auth_config_source.remove_property("http-basic.{}".format(name))
+        self._config.auth_config_source.remove_property(f"http-basic.{repo_name}")
+
+    def get_credential(
+        self, *names: str, username: str | None = None
+    ) -> HTTPAuthCredential:
+        if self.use_keyring:
+            return self.keyring.get_credential(*names, username=username)
+        else:
+            return HTTPAuthCredential(username=username, password=None)
diff --git a/conda_lock/_vendor/poetry/utils/patterns.py b/conda_lock/_vendor/poetry/utils/patterns.py
index ec6c53d7..bf88e51b 100644
--- a/conda_lock/_vendor/poetry/utils/patterns.py
+++ b/conda_lock/_vendor/poetry/utils/patterns.py
@@ -1,3 +1,5 @@
+from __future__ import annotations
+
 import re
 
 
@@ -10,3 +12,8 @@
     r"\.whl|\.dist-info$",
     re.VERBOSE,
 )
+
+sdist_file_re = re.compile(
+    r"^(?P(?P.+?)-(?P\d.*?))"
+    r"(\.sdist)?\.(?P(zip|tar(\.(gz|bz2|xz|Z))?))$"
+)
diff --git a/conda_lock/_vendor/poetry/utils/pip.py b/conda_lock/_vendor/poetry/utils/pip.py
new file mode 100644
index 00000000..59163b43
--- /dev/null
+++ b/conda_lock/_vendor/poetry/utils/pip.py
@@ -0,0 +1,58 @@
+from __future__ import annotations
+
+from typing import TYPE_CHECKING
+
+from conda_lock._vendor.poetry.exceptions import PoetryException
+from conda_lock._vendor.poetry.utils.env import EnvCommandError
+
+
+if TYPE_CHECKING:
+    from pathlib import Path
+
+    from conda_lock._vendor.poetry.utils.env import Env
+
+
+def pip_install(
+    path: Path,
+    environment: Env,
+    editable: bool = False,
+    deps: bool = False,
+    upgrade: bool = False,
+) -> str:
+    is_wheel = path.suffix == ".whl"
+
+    # We disable version check here as we are already pinning to version available in
+    # either the virtual environment or the virtualenv package embedded wheel. Version
+    # checks are a wasteful network call that adds a lot of wait time when installing a
+    # lot of packages.
+    args = [
+        "install",
+        "--disable-pip-version-check",
+        "--isolated",
+        "--no-input",
+        "--prefix",
+        str(environment.path),
+    ]
+
+    if not is_wheel and not editable:
+        args.insert(1, "--use-pep517")
+
+    if upgrade:
+        args.append("--upgrade")
+
+    if not deps:
+        args.append("--no-deps")
+
+    if editable:
+        if not path.is_dir():
+            raise PoetryException(
+                "Cannot install non directory dependencies in editable mode"
+            )
+        args.append("-e")
+
+    args.append(str(path))
+
+    try:
+        return environment.run_pip(*args)
+    except EnvCommandError as e:
+        raise PoetryException(f"Failed to install {path}") from e
diff --git a/conda_lock/_vendor/poetry/utils/setup_reader.py b/conda_lock/_vendor/poetry/utils/setup_reader.py
index 8cd2ebd6..f50d8c2f 100644
--- a/conda_lock/_vendor/poetry/utils/setup_reader.py
+++ b/conda_lock/_vendor/poetry/utils/setup_reader.py
@@ -1,120 +1,100 @@
+from __future__ import annotations
+
 import ast
 
+from configparser import ConfigParser
+from typing import TYPE_CHECKING
 from typing import Any
-from typing import Dict
-from typing import Iterable
-from typing import List
-from typing import Optional
-from typing import Tuple
-from typing import Union
+from typing import ClassVar
+
+from conda_lock._vendor.poetry.core.constraints.version import Version
 
-from conda_lock._vendor.poetry.core.semver import Version
 
-from ._compat import PY35
-from ._compat import Path
-from ._compat import basestring
+if TYPE_CHECKING:
+    from pathlib import Path
 
 
-try:
-    from configparser import ConfigParser
-except ImportError:
-    from ConfigParser import ConfigParser
+class SetupReaderError(Exception):
+    pass
 
 
-class SetupReader(object):
+class SetupReader:
     """
     Class that reads a setup.py file without executing it.
     """
 
-    DEFAULT = {
+    DEFAULT: ClassVar[dict[str, Any]] = {
         "name": None,
         "version": None,
+        "description": None,
         "install_requires": [],
         "extras_require": {},
         "python_requires": None,
     }
 
-    FILES = ["setup.py", "setup.cfg"]
+    FILES: ClassVar[list[str]] = ["setup.py", "setup.cfg"]
 
     @classmethod
-    def read_from_directory(
-        cls, directory
-    ):  # type: (Union[basestring, Path]) -> Dict[str, Union[List, Dict]]
-        if isinstance(directory, basestring):
-            directory = Path(directory)
-
+    def read_from_directory(cls, directory: Path) -> dict[str, Any]:
         result = cls.DEFAULT.copy()
         for filename in cls.FILES:
             filepath = directory / filename
             if not filepath.exists():
                 continue
 
-            new_result = getattr(cls(), "read_{}".format(filename.replace(".", "_")))(
-                filepath
-            )
+            read_file_func = getattr(cls(), "read_" + filename.replace(".", "_"))
+            new_result = read_file_func(filepath)
 
-            for key in result.keys():
+            for key in result:
                 if new_result[key]:
                     result[key] = new_result[key]
 
         return result
 
-    @classmethod
-    def _is_empty_result(cls, result):  # type: (Dict[str, Any]) -> bool
-        return (
-            not result["install_requires"]
-            and not result["extras_require"]
-            and not result["python_requires"]
-        )
-
-    def read_setup_py(
-        self, filepath
-    ):  # type: (Union[basestring, Path]) -> Dict[str, Union[List, Dict]]
-        if not PY35:
-            return self.DEFAULT
-
-        if isinstance(filepath, basestring):
-            filepath = Path(filepath)
-
+    def read_setup_py(self, filepath: Path) -> dict[str, Any]:
         with filepath.open(encoding="utf-8") as f:
             content = f.read()
 
-        result = {}
+        result: dict[str, Any] = {}
 
         body = ast.parse(content).body
 
-        setup_call, body = self._find_setup_call(body)
-        if not setup_call:
+        setup_call = self._find_setup_call(body)
+        if setup_call is None:
             return self.DEFAULT
 
         # Inspecting keyword arguments
-        result["name"] = self._find_single_string(setup_call, body, "name")
-        result["version"] = self._find_single_string(setup_call, body, "version")
-        result["install_requires"] = self._find_install_requires(setup_call, body)
-        result["extras_require"] = self._find_extras_require(setup_call, body)
+        call, body = setup_call
+        result["name"] = self._find_single_string(call, body, "name")
+        result["version"] = self._find_single_string(call, body, "version")
+        result["description"] = self._find_single_string(call, body, "description")
+        result["install_requires"] = self._find_install_requires(call, body)
+        result["extras_require"] = self._find_extras_require(call, body)
         result["python_requires"] = self._find_single_string(
-            setup_call, body, "python_requires"
+            call, body, "python_requires"
         )
 
         return result
 
-    def read_setup_cfg(
-        self, filepath
-    ):  # type: (Union[basestring, Path]) -> Dict[str, Union[List, Dict]]
+    def read_setup_cfg(self, filepath: Path) -> dict[str, Any]:
         parser = ConfigParser()
 
         parser.read(str(filepath))
 
         name = None
         version = None
+        description = None
         if parser.has_option("metadata", "name"):
             name = parser.get("metadata", "name")
 
         if parser.has_option("metadata", "version"):
             version = Version.parse(parser.get("metadata", "version")).text
 
+        if parser.has_option("metadata", "description"):
+            description = parser.get("metadata", "description")
+
         install_requires = []
-        extras_require = {}
+        extras_require: dict[str, list[str]] = {}
         python_requires = None
         if parser.has_section("options"):
             if parser.has_option("options", "install_requires"):
@@ -142,15 +122,16 @@ def read_setup_cfg(
         return {
             "name": name,
             "version": version,
+            "description": description,
             "install_requires": install_requires,
             "extras_require": extras_require,
             "python_requires": python_requires,
         }
 
     def _find_setup_call(
-        self, elements
-    ):  # type: (List[Any]) -> Tuple[Optional[ast.Call], Optional[List[Any]]]
-        funcdefs = []
+        self, elements: list[ast.stmt]
+    ) -> tuple[ast.Call, list[ast.stmt]] | None:
+        funcdefs: list[ast.stmt] = []
         for i, element in enumerate(elements):
             if isinstance(element, ast.If) and i == len(elements) - 1:
                 # Checking if the last element is an if statement
@@ -167,11 +148,13 @@ def _find_setup_call(
                 if left.id != "__name__":
                     continue
 
-                setup_call, body = self._find_sub_setup_call([element])
-                if not setup_call:
+                setup_call = self._find_sub_setup_call([element])
+                if setup_call is None:
                     continue
 
-                return setup_call, body + elements
+                call, body = setup_call
+                return call, body + elements
+
             if not isinstance(element, ast.Expr):
                 if isinstance(element, ast.FunctionDef):
                     funcdefs.append(element)
@@ -185,8 +168,7 @@ def _find_setup_call(
             func = value.func
             if not (isinstance(func, ast.Name) and func.id == "setup") and not (
                 isinstance(func, ast.Attribute)
-                and hasattr(func.value, "id")
-                and func.value.id == "setuptools"
+                and getattr(func.value, "id", None) == "setuptools"
                 and func.attr == "setup"
             ):
                 continue
@@ -197,159 +179,163 @@ def _find_setup_call(
         return self._find_sub_setup_call(funcdefs)
 
     def _find_sub_setup_call(
-        self, elements
-    ):  # type: (List[Any]) -> Tuple[Optional[ast.Call], Optional[List[Any]]]
+        self, elements: list[ast.stmt]
+    ) -> tuple[ast.Call, list[ast.stmt]] | None:
         for element in elements:
             if not isinstance(element, (ast.FunctionDef, ast.If)):
                 continue
 
             setup_call = self._find_setup_call(element.body)
-            if setup_call != (None, None):
-                setup_call, body = setup_call
+            if setup_call is not None:
+                sub_call, body = setup_call
 
                 body = elements + body
 
-                return setup_call, body
+                return sub_call, body
 
-        return None, None
+        return None
 
-    def _find_install_requires(
-        self, call, body
-    ):  # type: (ast.Call, Iterable[Any]) -> List[str]
-        install_requires = []
+    def _find_install_requires(self, call: ast.Call, body: list[ast.stmt]) -> list[str]:
         value = self._find_in_call(call, "install_requires")
         if value is None:
             # Trying to find in kwargs
             kwargs = self._find_call_kwargs(call)
 
             if kwargs is None or not isinstance(kwargs, ast.Name):
-                return install_requires
+                return []
 
             variable = self._find_variable_in_body(body, kwargs.id)
-            if not isinstance(variable, (ast.Dict, ast.Call)):
-                return install_requires
-
-            if isinstance(variable, ast.Call):
-                if not isinstance(variable.func, ast.Name):
-                    return install_requires
 
-                if variable.func.id != "dict":
-                    return install_requires
+            if isinstance(variable, ast.Dict):
+                value = self._find_in_dict(variable, "install_requires")
 
+            elif (
+                isinstance(variable, ast.Call)
+                and isinstance(variable.func, ast.Name)
+                and variable.func.id == "dict"
+            ):
                 value = self._find_in_call(variable, "install_requires")
+
             else:
-                value = self._find_in_dict(variable, "install_requires")
+                raise SetupReaderError(f"Cannot handle variable {variable}")
 
         if value is None:
-            return install_requires
+            return []
 
-        if isinstance(value, ast.List):
-            for el in value.elts:
-                install_requires.append(el.s)
-        elif isinstance(value, ast.Name):
-            variable = self._find_variable_in_body(body, value.id)
+        if isinstance(value, ast.Name):
+            value = self._find_variable_in_body(body, value.id)
 
-            if variable is not None and isinstance(variable, ast.List):
-                for el in variable.elts:
-                    install_requires.append(el.s)
+        if isinstance(value, ast.Constant) and value.value is None:
+            return []
 
-        return install_requires
+        if isinstance(value, ast.List):
+            return string_list_values(value)
+
+        raise SetupReaderError(f"Cannot handle value of type {type(value)}")
 
     def _find_extras_require(
-        self, call, body
-    ):  # type: (ast.Call, Iterable[Any]) -> Dict[str, List]
-        extras_require = {}
+        self, call: ast.Call, body: list[ast.stmt]
+    ) -> dict[str, list[str]]:
         value = self._find_in_call(call, "extras_require")
         if value is None:
             # Trying to find in kwargs
             kwargs = self._find_call_kwargs(call)
 
             if kwargs is None or not isinstance(kwargs, ast.Name):
-                return extras_require
+                return {}
 
             variable = self._find_variable_in_body(body, kwargs.id)
-            if not isinstance(variable, (ast.Dict, ast.Call)):
-                return extras_require
-
-            if isinstance(variable, ast.Call):
-                if not isinstance(variable.func, ast.Name):
-                    return extras_require
-
-                if variable.func.id != "dict":
-                    return extras_require
+            if isinstance(variable, ast.Dict):
+                value = self._find_in_dict(variable, "extras_require")
 
+            elif (
+                isinstance(variable, ast.Call)
+                and isinstance(variable.func, ast.Name)
+                and variable.func.id == "dict"
+            ):
                 value = self._find_in_call(variable, "extras_require")
+
             else:
-                value = self._find_in_dict(variable, "extras_require")
+                raise SetupReaderError(f"Cannot handle variable {variable}")
 
         if value is None:
-            return extras_require
+            return {}
+
+        if isinstance(value, ast.Name):
+            value = self._find_variable_in_body(body, value.id)
+
+        if isinstance(value, ast.Constant) and value.value is None:
+            return {}
 
         if isinstance(value, ast.Dict):
+            extras_require: dict[str, list[str]] = {}
+            val: ast.expr | None
             for key, val in zip(value.keys, value.values):
+                if not isinstance(key, ast.Constant) or not isinstance(key.value, str):
+                    raise SetupReaderError(f"Cannot handle key {key}")
+
                 if isinstance(val, ast.Name):
                     val = self._find_variable_in_body(body, val.id)
 
-                if isinstance(val, ast.List):
-                    extras_require[key.s] = [e.s for e in val.elts]
-        elif isinstance(value, ast.Name):
-            variable = self._find_variable_in_body(body, value.id)
-
-            if variable is None or not isinstance(variable, ast.Dict):
-                return extras_require
+                if not isinstance(val, ast.List):
+                    raise SetupReaderError(f"Cannot handle value of type {type(val)}")
 
-            for key, val in zip(variable.keys, variable.values):
-                if isinstance(val, ast.Name):
-                    val = self._find_variable_in_body(body, val.id)
+                extras_require[key.value] = string_list_values(val)
 
-                if isinstance(val, ast.List):
-                    extras_require[key.s] = [e.s for e in val.elts]
+            return extras_require
 
-        return extras_require
+        raise SetupReaderError(f"Cannot handle value of type {type(value)}")
 
     def _find_single_string(
-        self, call, body, name
-    ):  # type: (ast.Call, List[Any], str) -> Optional[str]
+        self, call: ast.Call, body: list[ast.stmt], name: str
+    ) -> str | None:
         value = self._find_in_call(call, name)
         if value is None:
             # Trying to find in kwargs
             kwargs = self._find_call_kwargs(call)
 
             if kwargs is None or not isinstance(kwargs, ast.Name):
-                return
+                return None
 
             variable = self._find_variable_in_body(body, kwargs.id)
             if not isinstance(variable, (ast.Dict, ast.Call)):
-                return
+                return None
 
             if isinstance(variable, ast.Call):
                 if not isinstance(variable.func, ast.Name):
-                    return
+                    return None
 
                 if variable.func.id != "dict":
-                    return
+                    return None
 
                 value = self._find_in_call(variable, name)
             else:
                 value = self._find_in_dict(variable, name)
 
         if value is None:
-            return
+            return None
 
-        if isinstance(value, ast.Str):
-            return value.s
+        if isinstance(value, ast.Constant) and isinstance(value.value, str):
+            return value.value
         elif isinstance(value, ast.Name):
             variable = self._find_variable_in_body(body, value.id)
 
-            if variable is not None and isinstance(variable, ast.Str):
-                return variable.s
+            if (
+                variable is not None
+                and isinstance(variable, ast.Constant)
+                and isinstance(variable.value, str)
+            ):
+                return variable.value
+
+        return None
 
-    def _find_in_call(self, call, name):  # type: (ast.Call, str) -> Optional[Any]
+    def _find_in_call(self, call: ast.Call, name: str) -> Any | None:
         for keyword in call.keywords:
             if keyword.arg == name:
                 return keyword.value
+        return None
 
-    def _find_call_kwargs(self, call):  # type: (ast.Call) -> Optional[Any]
+    def _find_call_kwargs(self, call: ast.Call) -> Any | None:
         kwargs = None
         for keyword in call.keywords:
             if keyword.arg is None:
@@ -358,13 +344,9 @@ def _find_call_kwargs(self, call):  # type: (ast.Call) -> Optional[Any]
         return kwargs
 
     def _find_variable_in_body(
-        self, body, name
-    ):  # type: (Iterable[Any], str) -> Optional[Any]
-        found = None
+        self, body: list[ast.stmt], name: str
+    ) -> ast.expr | None:
         for elem in body:
-            if found:
-                break
-
             if not isinstance(elem, ast.Assign):
                 continue
 
@@ -375,7 +357,27 @@ def _find_variable_in_body(
                 if target.id == name:
                     return elem.value
 
-    def _find_in_dict(self, dict_, name):  # type: (ast.Call, str) -> Optional[Any]
+        return None
+
+    def _find_in_dict(self, dict_: ast.Dict, name: str) -> ast.expr | None:
         for key, val in zip(dict_.keys, dict_.values):
-            if isinstance(key, ast.Str) and key.s == name:
+            if (
+                isinstance(key, ast.Constant)
+                and isinstance(key.value, str)
+                and key.value == name
+            ):
                 return val
+
+        return None
+
+
+def string_list_values(value: ast.List) -> list[str]:
+    strings = []
+    for element in value.elts:
+        if isinstance(element, ast.Constant) and isinstance(element.value, str):
+            strings.append(element.value)
+
+        else:
+            raise SetupReaderError("Found non-string element in list")
+
+    return strings
diff --git a/conda_lock/_vendor/poetry/utils/source.py b/conda_lock/_vendor/poetry/utils/source.py
new file mode 100644
index 00000000..3fa6fb3d
--- /dev/null
+++ b/conda_lock/_vendor/poetry/utils/source.py
@@ -0,0 +1,20 @@
+from __future__ import annotations
+
+from typing import TYPE_CHECKING
+
+
+if TYPE_CHECKING:
+    from tomlkit.items import Table
+
+    from conda_lock._vendor.poetry.config.source import Source
+
+
+def source_to_table(source: Source) -> Table:
+    from tomlkit import nl
+    from tomlkit import table
+
+    source_table: Table = table()
+    for key, value in source.to_dict().items():
+        source_table.add(key, value)
+    source_table.add(nl())
+    return source_table
diff --git a/conda_lock/_vendor/poetry/utils/wheel.py b/conda_lock/_vendor/poetry/utils/wheel.py
new file mode 100644
index 00000000..ae2fb6f7
--- /dev/null
+++ b/conda_lock/_vendor/poetry/utils/wheel.py
@@ -0,0 +1,47 @@
+from __future__ import annotations
+
+import logging
+
+from typing import TYPE_CHECKING
+
+from packaging.tags import Tag
+
+from conda_lock._vendor.poetry.utils.patterns import wheel_file_re
+
+
+if TYPE_CHECKING:
+    from conda_lock._vendor.poetry.utils.env import Env
+
+
+logger = logging.getLogger(__name__)
+
+
+class InvalidWheelName(Exception):
+    pass
+
+
+class Wheel:
+    def __init__(self, filename: str) -> None:
+        wheel_info = wheel_file_re.match(filename)
+        if not wheel_info:
+            raise InvalidWheelName(f"{filename} is not a valid wheel filename.")
+
+        self.filename = filename
+        self.name = wheel_info.group("name").replace("_", "-")
+        self.version = wheel_info.group("ver").replace("_", "-")
+        self.build_tag = wheel_info.group("build")
+        self.pyversions = wheel_info.group("pyver").split(".")
+        self.abis = wheel_info.group("abi").split(".")
+        self.plats = wheel_info.group("plat").split(".")
+
+        self.tags = {
+            Tag(x, y, z) for x in self.pyversions for y in self.abis for z in self.plats
+        }
+
+    def get_minimum_supported_index(self, tags: list[Tag]) -> int | None:
+        indexes = [tags.index(t) for t in self.tags if t in tags]
+
+        return min(indexes) if indexes else None
+
+    def is_supported_by_environment(self, env: Env) -> bool:
+        return bool(set(env.supported_tags).intersection(self.tags))
diff --git a/conda_lock/_vendor/poetry/vcs/__init__.py b/conda_lock/_vendor/poetry/vcs/__init__.py
new file mode 100644
index 00000000..e69de29b
diff --git a/conda_lock/_vendor/poetry/vcs/git/__init__.py b/conda_lock/_vendor/poetry/vcs/git/__init__.py
new file mode 100644
index 00000000..2dc28b9e
--- /dev/null
+++ b/conda_lock/_vendor/poetry/vcs/git/__init__.py
@@ -0,0 +1,6 @@
+from __future__ import annotations
+
+from conda_lock._vendor.poetry.vcs.git.backend import Git
+
+
+__all__ = ["Git"]
diff --git a/conda_lock/_vendor/poetry/vcs/git/backend.py b/conda_lock/_vendor/poetry/vcs/git/backend.py
new file mode 100644
index 00000000..fad37d6e
--- /dev/null
+++ b/conda_lock/_vendor/poetry/vcs/git/backend.py
@@ -0,0 +1,501 @@
+from __future__ import annotations
+
+import dataclasses
+import logging
+import re
+
+from pathlib import Path
+from subprocess import CalledProcessError
+from typing import TYPE_CHECKING
+from urllib.parse import urljoin
+from urllib.parse import urlparse
+from urllib.parse import urlunparse
+
+from dulwich import porcelain
+from dulwich.client import HTTPUnauthorized
+from dulwich.client import get_transport_and_path
+from dulwich.config import ConfigFile
+from dulwich.config import parse_submodules
+from dulwich.errors import NotGitRepository
+from dulwich.index import IndexEntry
+from dulwich.refs import ANNOTATED_TAG_SUFFIX
+from dulwich.repo import Repo
+
+from conda_lock._vendor.poetry.console.exceptions import PoetryConsoleError
+from conda_lock._vendor.poetry.utils.authenticator import get_default_authenticator
+from conda_lock._vendor.poetry.utils.helpers import remove_directory
+
+
+if TYPE_CHECKING:
+    from dulwich.client import FetchPackResult
+    from dulwich.client import GitClient
+
+
+logger = logging.getLogger(__name__)
+
+# A relative URL by definition starts with ../ or ./
+RELATIVE_SUBMODULE_REGEX = re.compile(r"^\.{1,2}/")
+
+
+def is_revision_sha(revision: str | None) -> bool:
+    return re.match(r"^\b[0-9a-f]{5,40}\b$", revision or "") is not None
+
+
+def annotated_tag(ref: str | bytes) -> bytes:
+    if isinstance(ref, str):
+        ref = ref.encode("utf-8")
+    return ref + ANNOTATED_TAG_SUFFIX
+
+
+@dataclasses.dataclass
+class GitRefSpec:
+    branch: str | None = None
+    revision: str | None = None
+    tag: str | None = None
+    ref: bytes = dataclasses.field(default_factory=lambda: b"HEAD")
+
+    def resolve(self, remote_refs: FetchPackResult) -> None:
+        """
+        Resolve the ref using the provided remote refs.
+        """
+        self._normalise(remote_refs=remote_refs)
+        self._set_head(remote_refs=remote_refs)
+
+    def _normalise(self, remote_refs: FetchPackResult) -> None:
+        """
+        Internal helper method to determine if given revision is
+            1. a branch or tag; if so, set corresponding properties.
+            2. a short sha; if so, resolve full sha and set as revision
+        """
+        if self.revision:
+            ref = f"refs/tags/{self.revision}".encode()
+            if ref in remote_refs.refs or annotated_tag(ref) in remote_refs.refs:
+                # this is a tag, incorrectly specified as a revision, tags take priority
+                self.tag = self.revision
+                self.revision = None
+            elif (
+                self.revision.encode("utf-8") in remote_refs.refs
+                or f"refs/heads/{self.revision}".encode() in remote_refs.refs
+            ):
+                # this is most likely a ref spec or a branch incorrectly specified
+                self.branch = self.revision
+                self.revision = None
+        elif (
+            self.branch
+            and f"refs/heads/{self.branch}".encode() not in remote_refs.refs
+            and (
+                f"refs/tags/{self.branch}".encode() in remote_refs.refs
+                or annotated_tag(f"refs/tags/{self.branch}") in remote_refs.refs
+            )
+        ):
+            # this is a tag incorrectly specified as a branch
+            self.tag = self.branch
+            self.branch = None
+
+        if self.revision and self.is_sha_short:
+            # revision is a short sha, resolve to full sha
+            short_sha = self.revision.encode("utf-8")
+            for sha in remote_refs.refs.values():
+                if sha.startswith(short_sha):
+                    self.revision = sha.decode("utf-8")
+                    break
+
+    def _set_head(self, remote_refs: FetchPackResult) -> None:
+        """
+        Internal helper method to populate ref and set it's sha as the remote's head
+        and default ref.
+        """
+        self.ref = remote_refs.symrefs[b"HEAD"]
+
+        if self.revision:
+            head = self.revision.encode("utf-8")
+        else:
+            if self.tag:
+                ref = f"refs/tags/{self.tag}".encode()
+                annotated = annotated_tag(ref)
+                self.ref = annotated if annotated in remote_refs.refs else ref
+            elif self.branch:
+                self.ref = (
+                    self.branch.encode("utf-8")
+                    if self.is_ref
+                    else f"refs/heads/{self.branch}".encode()
+                )
+            head = remote_refs.refs[self.ref]
+
+        remote_refs.refs[self.ref] = remote_refs.refs[b"HEAD"] = head
+
+    @property
+    def key(self) -> str:
+        return self.revision or self.branch or self.tag or self.ref.decode("utf-8")
+
+    @property
+    def is_sha(self) -> bool:
+        return is_revision_sha(revision=self.revision)
+
+    @property
+    def is_ref(self) -> bool:
+        return self.branch is not None and self.branch.startswith("refs/")
+
+    @property
+    def is_sha_short(self) -> bool:
+        return self.revision is not None and self.is_sha and len(self.revision) < 40
+
+
+@dataclasses.dataclass
+class GitRepoLocalInfo:
+    repo: dataclasses.InitVar[Repo | Path]
+    origin: str = dataclasses.field(init=False)
+    revision: str = dataclasses.field(init=False)
+
+    def __post_init__(self, repo: Repo | Path) -> None:
+        repo = Git.as_repo(repo=repo) if not isinstance(repo, Repo) else repo
+        self.origin = Git.get_remote_url(repo=repo, remote="origin")
+        self.revision = Git.get_revision(repo=repo)
+
+
+class Git:
+    @staticmethod
+    def as_repo(repo: Path) -> Repo:
+        return Repo(str(repo))
+
+    @staticmethod
+    def get_remote_url(repo: Repo, remote: str = "origin") -> str:
+        with repo:
+            config = repo.get_config()
+            section = (b"remote", remote.encode("utf-8"))
+
+            url = ""
+            if config.has_section(section):
+                value = config.get(section, b"url")
+                url = value.decode("utf-8")
+
+            return url
+
+    @staticmethod
+    def get_revision(repo: Repo) -> str:
+        with repo:
+            return repo.head().decode("utf-8")
+
+    @classmethod
+    def info(cls, repo: Repo | Path) -> GitRepoLocalInfo:
+        return GitRepoLocalInfo(repo=repo)
+
+    @staticmethod
+    def get_name_from_source_url(url: str) -> str:
+        return re.sub(r"(.git)?$", "", url.rstrip("/").rsplit("/", 1)[-1])
+
+    @classmethod
+    def _fetch_remote_refs(cls, url: str, local: Repo) -> FetchPackResult:
+        """
+        Helper method to fetch remote refs.
+        """
+        client: GitClient
+        path: str
+
+        kwargs: dict[str, str] = {}
+        credentials = get_default_authenticator().get_credentials_for_git_url(url=url)
+
+        if credentials.password and credentials.username:
+            # we do this conditionally as otherwise, dulwich might complain if these
+            # parameters are passed in for an ssh url
+            kwargs["username"] = credentials.username
+            kwargs["password"] = credentials.password
+
+        config = local.get_config_stack()
+        client, path = get_transport_and_path(url, config=config, **kwargs)
+
+        with local:
+            result: FetchPackResult = client.fetch(
+                path,
+                local,
+                determine_wants=local.object_store.determine_wants_all,
+            )
+            return result
+
+    @staticmethod
+    def _clone_legacy(url: str, refspec: GitRefSpec, target: Path) -> Repo:
+        """
+        Helper method to facilitate fallback to using system provided git client via
+        subprocess calls.
+        """
+        from conda_lock._vendor.poetry.vcs.git.system import SystemGit
+
+        logger.debug("Cloning '%s' using system git client", url)
+
+        if target.exists():
+            remove_directory(path=target, force=True)
+
+        revision = refspec.tag or refspec.branch or refspec.revision or "HEAD"
+
+        try:
+            SystemGit.clone(url, target)
+        except CalledProcessError:
+            raise PoetryConsoleError(
+                f"Failed to clone {url}, check your git configuration and permissions"
+                " for this repository."
+            )
+
+        if revision:
+            revision.replace("refs/head/", "")
+            revision.replace("refs/tags/", "")
+
+        try:
+            SystemGit.checkout(revision, target)
+        except CalledProcessError:
+            raise PoetryConsoleError(f"Failed to checkout {url} at '{revision}'")
+
+        repo = Repo(str(target))
+        return repo
+
+    @classmethod
+    def _clone(cls, url: str, refspec: GitRefSpec, target: Path) -> Repo:
+        """
+        Helper method to clone a remove repository at the given `url` at the specified
+        ref spec.
+        """
+        local: Repo
+        if not target.exists():
+            local = Repo.init(str(target), mkdir=True)
+            porcelain.remote_add(local, "origin", url)
+        else:
+            local = Repo(str(target))
+
+        remote_refs = cls._fetch_remote_refs(url=url, local=local)
+
+        logger.debug(
+            "Cloning %s at '%s' to %s", url, refspec.key, target
+        )
+
+        try:
+            refspec.resolve(remote_refs=remote_refs)
+        except KeyError:  # branch / ref does not exist
+            raise PoetryConsoleError(
+                f"Failed to clone {url} at '{refspec.key}', verify ref exists on"
+                " remote."
+            )
+
+        # ensure local HEAD matches remote
+        local.refs[b"HEAD"] = remote_refs.refs[b"HEAD"]
+
+        if refspec.is_ref:
+            # set ref to current HEAD
+            local.refs[refspec.ref] = local.refs[b"HEAD"]
+
+        for base, prefix in {
+            (b"refs/remotes/origin", b"refs/heads/"),
+            (b"refs/tags", b"refs/tags"),
+        }:
+            local.refs.import_refs(
+                base=base,
+                other={
+                    n[len(prefix) :]: v
+                    for (n, v) in remote_refs.refs.items()
+                    if n.startswith(prefix) and not n.endswith(ANNOTATED_TAG_SUFFIX)
+                },
+            )
+
+        try:
+            with local:
+                local.reset_index()
+        except (AssertionError, KeyError) as e:
+            # this implies the ref we need does not exist or is invalid
+            if isinstance(e, KeyError):
+                # the local copy is at a bad state, lets remove it
+                logger.debug(
+                    "Removing local clone (%s) of repository as it is in a"
+                    " broken state.",
+                    local.path,
+                )
+                remove_directory(Path(local.path), force=True)
+
+            if isinstance(e, AssertionError) and "Invalid object name" not in str(e):
+                raise
+
+            logger.debug(
+                "\nRequested ref (%s) was not fetched to local copy and"
+                " cannot be used. The following error was"
+                " raised:\n\n\t%s",
+                refspec.key,
+                e,
+            )
+
+            raise PoetryConsoleError(
+                f"Failed to clone {url} at '{refspec.key}', verify ref exists on"
+                " remote."
+            )
+
+        return local
+
+    @classmethod
+    def _clone_submodules(cls, repo: Repo) -> None:
+        """
+        Helper method to identify configured submodules and clone them recursively.
+        """
+        repo_root = Path(repo.path)
+        for submodule in cls._get_submodules(repo):
+            path_absolute = repo_root / submodule.path
+            source_root = path_absolute.parent
+            source_root.mkdir(parents=True, exist_ok=True)
+            cls.clone(
+                url=submodule.url,
+                source_root=source_root,
+                name=path_absolute.name,
+                revision=submodule.revision,
+                clean=path_absolute.exists()
+                and not path_absolute.joinpath(".git").is_dir(),
+            )
+
+    @classmethod
+    def _get_submodules(cls, repo: Repo) -> list[SubmoduleInfo]:
+        modules_config = Path(repo.path, ".gitmodules")
+
+        if not modules_config.exists():
+            return []
+
+        config = ConfigFile.from_path(str(modules_config))
+
+        submodules: list[SubmoduleInfo] = []
+        for path, url, name in parse_submodules(config):
+            url_str = url.decode("utf-8")
+            path_str = path.decode("utf-8")
+            name_str = name.decode("utf-8")
+
+            if RELATIVE_SUBMODULE_REGEX.search(url_str):
+                url_str = urlpathjoin(f"{cls.get_remote_url(repo)}/", url_str)
+
+            with repo:
+                index = repo.open_index()
+
+                try:
+                    entry = index[path]
+                except KeyError:
+                    logger.debug(
+                        "Skip submodule %s in %s, path %s not found",
+                        name,
+                        repo.path,
+                        path,
+                    )
+                    continue
+
+                assert isinstance(entry, IndexEntry)
+                revision = entry.sha.decode("utf-8")
+
+            submodules.append(
+                SubmoduleInfo(
+                    path=path_str,
+                    url=url_str,
+                    name=name_str,
+                    revision=revision,
+                )
+            )
+
+        return submodules
+
+    @staticmethod
+    def is_using_legacy_client() -> bool:
+        from conda_lock._vendor.poetry.config.config import Config
+
+        legacy_client: bool = Config.create().get(
+            "experimental.system-git-client", False
+        )
+        return legacy_client
+
+    @staticmethod
+    def get_default_source_root() -> Path:
+        from conda_lock._vendor.poetry.config.config import Config
+
+        return Path(Config.create().get("cache-dir")) / "src"
+
+    @classmethod
+    def clone(
+        cls,
+        url: str,
+        name: str | None = None,
+        branch: str | None = None,
+        tag: str | None = None,
+        revision: str | None = None,
+        source_root: Path | None = None,
+        clean: bool = False,
+    ) -> Repo:
+        source_root = source_root or cls.get_default_source_root()
+        source_root.mkdir(parents=True, exist_ok=True)
+
+        name = name or cls.get_name_from_source_url(url=url)
+        target = source_root / name
+        refspec = GitRefSpec(branch=branch, revision=revision, tag=tag)
+
+        if target.exists():
+            if clean:
+                # force clean the local copy if it exists, do not reuse
+                remove_directory(target, force=True)
+            else:
+                # check if the current local copy matches the requested ref spec
+                try:
+                    current_repo = Repo(str(target))
+
+                    with current_repo:
+                        current_sha = current_repo.head().decode("utf-8")
+                except (NotGitRepository, AssertionError, KeyError):
+                    # something is wrong with the current checkout, clean it
+                    remove_directory(target, force=True)
+                else:
+                    if not is_revision_sha(revision=current_sha):
+                        # head is not a sha, this will cause issues later, lets reset
+                        remove_directory(target, force=True)
+                    elif (
+                        refspec.is_sha
+                        and refspec.revision is not None
+                        and current_sha.startswith(refspec.revision)
+                    ):
+                        # if revision is used short-circuit remote fetch head matches
+                        return current_repo
+
+        try:
+            if not cls.is_using_legacy_client():
+                local = cls._clone(url=url, refspec=refspec, target=target)
+                cls._clone_submodules(repo=local)
+                return local
+        except HTTPUnauthorized:
+            # we do this here to handle http authenticated repositories as dulwich
+            # does not currently support using credentials from git-credential helpers.
+            # upstream issue: https://github.com/jelmer/dulwich/issues/873
+            #
+            # this is a little inefficient, however preferred as this is transparent
+            # without additional configuration or changes for existing projects that
+            # use http basic auth credentials.
+            logger.debug(
+                "Unable to fetch from private repository '%s', falling back to"
+                " system git",
+                url,
+            )
+
+        # fallback to legacy git client
+        return cls._clone_legacy(url=url, refspec=refspec, target=target)
+
+
+def urlpathjoin(base: str, path: str) -> str:
+    """
+    Allow any URL to be joined with a path
+
+    This works around an issue with urllib.parse.urljoin where it only handles
+    relative URLs for protocols contained in urllib.parse.uses_relative. As it
+    happens common protocols used with git, like ssh or git+ssh are not in that
+    list.
+
+    Thus we need to implement our own version of urljoin that handles all URLs
+    protocols. This is accomplished by using urlparse and urlunparse to split
+    the URL into its components, join the path, and then reassemble the URL.
+
+    See: https://github.com/python-poetry/poetry/issues/6499#issuecomment-1564712609
+    """
+    parsed_base = urlparse(base)
+    new = parsed_base._replace(path=urljoin(parsed_base.path, path))
+    return urlunparse(new)
+
+
+@dataclasses.dataclass
+class SubmoduleInfo:
+    path: str
+    url: str
+    name: str
+    revision: str
diff --git a/conda_lock/_vendor/poetry/vcs/git/system.py b/conda_lock/_vendor/poetry/vcs/git/system.py
new file mode 100644
index 00000000..36610510
--- /dev/null
+++ b/conda_lock/_vendor/poetry/vcs/git/system.py
@@ -0,0 +1,57 @@
+from __future__ import annotations
+
+import os
+import subprocess
+
+from typing import TYPE_CHECKING
+
+from dulwich.client import find_git_command
+
+
+if TYPE_CHECKING:
+    from pathlib import Path
+    from typing import Any
+
+
+class SystemGit:
+    @classmethod
+    def clone(cls, repository: str, dest: Path) -> None:
+        cls._check_parameter(repository)
+
+        cls.run("clone", "--recurse-submodules", "--", repository, str(dest))
+
+    @classmethod
+    def checkout(cls, rev: str, target: Path | None = None) -> None:
+        cls._check_parameter(rev)
+        cls.run("checkout", rev, folder=target)
+
+    @staticmethod
+    def run(*args: Any, **kwargs: Any) -> None:
+        folder = kwargs.pop("folder", None)
+        if folder:
+            args = (
+                "--git-dir",
+                (folder / ".git").as_posix(),
+                "--work-tree",
+                folder.as_posix(),
+                *args,
+            )
+
+        git_command = find_git_command()
+        env = os.environ.copy()
+        env["GIT_TERMINAL_PROMPT"] = "0"
+        subprocess.check_call(
+            git_command + list(args),
+            stderr=subprocess.DEVNULL,
+            stdout=subprocess.DEVNULL,
+            env=env,
+            text=True,
+        )
+
+    @staticmethod
+    def _check_parameter(parameter: str) -> None:
+        """
+        Checks a git parameter to avoid unwanted code execution.
+        """
+        if parameter.strip().startswith("-"):
+            raise RuntimeError(f"Invalid Git parameter: {parameter}")
diff --git a/conda_lock/_vendor/poetry/version/version_selector.py b/conda_lock/_vendor/poetry/version/version_selector.py
index 08c9013c..22d960d5 100644
--- a/conda_lock/_vendor/poetry/version/version_selector.py
+++ b/conda_lock/_vendor/poetry/version/version_selector.py
@@ -1,20 +1,25 @@
-from typing import Union
+from __future__ import annotations
 
-from conda_lock._vendor.poetry.core.packages import Package
-from conda_lock._vendor.poetry.core.semver import Version
+from typing import TYPE_CHECKING
 
 
-class VersionSelector(object):
-    def __init__(self, pool):
+if TYPE_CHECKING:
+    from conda_lock._vendor.poetry.core.packages.package import Package
+
+    from conda_lock._vendor.poetry.repositories import RepositoryPool
+
+
+class VersionSelector:
+    def __init__(self, pool: RepositoryPool) -> None:
         self._pool = pool
 
     def find_best_candidate(
         self,
-        package_name,  # type: str
-        target_package_version=None,  # type:  Union[str, None]
-        allow_prereleases=False,  # type: bool
-        source=None,  # type: str
-    ):  # type: (...) -> Union[Package, bool]
+        package_name: str,
+        target_package_version: str | None = None,
+        allow_prereleases: bool = False,
+        source: str | None = None,
+    ) -> Package | None:
         """
         Given a package name and optional version,
         returns the latest Package that matches
@@ -25,15 +30,15 @@ def find_best_candidate(
             package_name,
             {
                 "version": target_package_version or "*",
-                "allow_prereleases": allow_prereleases,
+                "allow-prereleases": allow_prereleases,
                 "source": source,
             },
         )
         candidates = self._pool.find_packages(dependency)
-        only_prereleases = all([c.version.is_prerelease() for c in candidates])
+        only_prereleases = all(c.version.is_unstable() for c in candidates)
 
         if not candidates:
-            return False
+            return None
 
         package = None
         for candidate in candidates:
@@ -48,30 +53,4 @@ def find_best_candidate(
             if package is None or package.version < candidate.version:
                 package = candidate
 
-        if package is None:
-            return False
         return package
-
-    def find_recommended_require_version(self, package):
-        version = package.version
-
-        return self._transform_version(version.text, package.pretty_version)
-
-    def _transform_version(self, version, pretty_version):
-        try:
-            parsed = Version.parse(version)
-            parts = [parsed.major, parsed.minor, parsed.patch]
-        except ValueError:
-            return pretty_version
-
-        parts = parts[: parsed.precision]
-
-        # check to see if we have a semver-looking version
-        if len(parts) < 3:
-            version = pretty_version
-        else:
-            version = ".".join(str(p) for p in parts)
-            if parsed.is_prerelease():
-                version += "-{}".format(".".join(str(p) for p in parsed.prerelease))
-
-        return "^{}".format(version)
diff --git a/conda_lock/_vendor/poetry-core.LICENSE b/conda_lock/_vendor/poetry_core.LICENSE
similarity index 100%
rename from conda_lock/_vendor/poetry-core.LICENSE
rename to conda_lock/_vendor/poetry_core.LICENSE
diff --git a/conda_lock/_vendor/poetry_core.LICENSE.APACHE b/conda_lock/_vendor/poetry_core.LICENSE.APACHE
new file mode 100644
index 00000000..f433b1a5
--- /dev/null
+++ b/conda_lock/_vendor/poetry_core.LICENSE.APACHE
@@ -0,0 +1,177 @@
+
+                                 Apache License
+                           Version 2.0, January 2004
+                        http://www.apache.org/licenses/
+
+   TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+   1. Definitions.
+
+      "License" shall mean the terms and conditions for use, reproduction,
+      and distribution as defined by Sections 1 through 9 of this document.
+
+      "Licensor" shall mean the copyright owner or entity authorized by
+      the copyright owner that is granting the License.
+
+      "Legal Entity" shall mean the union of the acting entity and all
+      other entities that control, are controlled by, or are under common
+      control with that entity. For the purposes of this definition,
+      "control" means (i) the power, direct or indirect, to cause the
+      direction or management of such entity, whether by contract or
+      otherwise, or (ii) ownership of fifty percent (50%) or more of the
+      outstanding shares, or (iii) beneficial ownership of such entity.
+
+      "You" (or "Your") shall mean an individual or Legal Entity
+      exercising permissions granted by this License.
+
+      "Source" form shall mean the preferred form for making modifications,
+      including but not limited to software source code, documentation
+      source, and configuration files.
+
+      "Object" form shall mean any form resulting from mechanical
+      transformation or translation of a Source form, including but
+      not limited to compiled object code, generated documentation,
+      and conversions to other media types.
+
+      "Work" shall mean the work of authorship, whether in Source or
+      Object form, made available under the License, as indicated by a
+      copyright notice that is included in or attached to the work
+      (an example is provided in the Appendix below).
+
+      "Derivative Works" shall mean any work, whether in Source or Object
+      form, that is based on (or derived from) the Work and for which the
+      editorial revisions, annotations, elaborations, or other modifications
+      represent, as a whole, an original work of authorship. For the purposes
+      of this License, Derivative Works shall not include works that remain
+      separable from, or merely link (or bind by name) to the interfaces of,
+      the Work and Derivative Works thereof.
+
+      "Contribution" shall mean any work of authorship, including
+      the original version of the Work and any modifications or additions
+      to that Work or Derivative Works thereof, that is intentionally
+      submitted to Licensor for inclusion in the Work by the copyright owner
+      or by an individual or Legal Entity authorized to submit on behalf of
+      the copyright owner. For the purposes of this definition, "submitted"
+      means any form of electronic, verbal, or written communication sent
+      to the Licensor or its representatives, including but not limited to
+      communication on electronic mailing lists, source code control systems,
+      and issue tracking systems that are managed by, or on behalf of, the
+      Licensor for the purpose of discussing and improving the Work, but
+      excluding communication that is conspicuously marked or otherwise
+      designated in writing by the copyright owner as "Not a Contribution."
+
+      "Contributor" shall mean Licensor and any individual or Legal Entity
+      on behalf of whom a Contribution has been received by Licensor and
+      subsequently incorporated within the Work.
+
+   2. Grant of Copyright License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      copyright license to reproduce, prepare Derivative Works of,
+      publicly display, publicly perform, sublicense, and distribute the
+      Work and such Derivative Works in Source or Object form.
+
+   3. Grant of Patent License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      (except as stated in this section) patent license to make, have made,
+      use, offer to sell, sell, import, and otherwise transfer the Work,
+      where such license applies only to those patent claims licensable
+      by such Contributor that are necessarily infringed by their
+      Contribution(s) alone or by combination of their Contribution(s)
+      with the Work to which such Contribution(s) was submitted. If You
+      institute patent litigation against any entity (including a
+      cross-claim or counterclaim in a lawsuit) alleging that the Work
+      or a Contribution incorporated within the Work constitutes direct
+      or contributory patent infringement, then any patent licenses
+      granted to You under this License for that Work shall terminate
+      as of the date such litigation is filed.
+
+   4. Redistribution. You may reproduce and distribute copies of the
+      Work or Derivative Works thereof in any medium, with or without
+      modifications, and in Source or Object form, provided that You
+      meet the following conditions:
+
+      (a) You must give any other recipients of the Work or
+          Derivative Works a copy of this License; and
+
+      (b) You must cause any modified files to carry prominent notices
+          stating that You changed the files; and
+
+      (c) You must retain, in the Source form of any Derivative Works
+          that You distribute, all copyright, patent, trademark, and
+          attribution notices from the Source form of the Work,
+          excluding those notices that do not pertain to any part of
+          the Derivative Works; and
+
+      (d) If the Work includes a "NOTICE" text file as part of its
+          distribution, then any Derivative Works that You distribute must
+          include a readable copy of the attribution notices contained
+          within such NOTICE file, excluding those notices that do not
+          pertain to any part of the Derivative Works, in at least one
+          of the following places: within a NOTICE text file distributed
+          as part of the Derivative Works; within the Source form or
+          documentation, if provided along with the Derivative Works; or,
+          within a display generated by the Derivative Works, if and
+          wherever such third-party notices normally appear. The contents
+          of the NOTICE file are for informational purposes only and
+          do not modify the License. You may add Your own attribution
+          notices within Derivative Works that You distribute, alongside
+          or as an addendum to the NOTICE text from the Work, provided
+          that such additional attribution notices cannot be construed
+          as modifying the License.
+
+      You may add Your own copyright statement to Your modifications and
+      may provide additional or different license terms and conditions
+      for use, reproduction, or distribution of Your modifications, or
+      for any such Derivative Works as a whole, provided Your use,
+      reproduction, and distribution of the Work otherwise complies with
+      the conditions stated in this License.
+
+   5. Submission of Contributions. Unless You explicitly state otherwise,
+      any Contribution intentionally submitted for inclusion in the Work
+      by You to the Licensor shall be under the terms and conditions of
+      this License, without any additional terms or conditions.
+      Notwithstanding the above, nothing herein shall supersede or modify
+      the terms of any separate license agreement you may have executed
+      with Licensor regarding such Contributions.
+
+   6. Trademarks. This License does not grant permission to use the trade
+      names, trademarks, service marks, or product names of the Licensor,
+      except as required for reasonable and customary use in describing the
+      origin of the Work and reproducing the content of the NOTICE file.
+
+   7. Disclaimer of Warranty. Unless required by applicable law or
+      agreed to in writing, Licensor provides the Work (and each
+      Contributor provides its Contributions) on an "AS IS" BASIS,
+      WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+      implied, including, without limitation, any warranties or conditions
+      of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+      PARTICULAR PURPOSE. You are solely responsible for determining the
+      appropriateness of using or redistributing the Work and assume any
+      risks associated with Your exercise of permissions under this License.
+
+   8. Limitation of Liability. In no event and under no legal theory,
+      whether in tort (including negligence), contract, or otherwise,
+      unless required by applicable law (such as deliberate and grossly
+      negligent acts) or agreed to in writing, shall any Contributor be
+      liable to You for damages, including any direct, indirect, special,
+      incidental, or consequential damages of any character arising as a
+      result of this License or out of the use or inability to use the
+      Work (including but not limited to damages for loss of goodwill,
+      work stoppage, computer failure or malfunction, or any and all
+      other commercial damages or losses), even if such Contributor
+      has been advised of the possibility of such damages.
+
+   9. Accepting Warranty or Additional Liability. While redistributing
+      the Work or Derivative Works thereof, You may choose to offer,
+      and charge a fee for, acceptance of support, warranty, indemnity,
+      or other liability obligations and/or rights consistent with this
+      License. However, in accepting such obligations, You may act only
+      on Your own behalf and on Your sole responsibility, not on behalf
+      of any other Contributor, and only if You agree to indemnify,
+      defend, and hold each Contributor harmless for any liability
+      incurred by, or claims asserted against, such Contributor by reason
+      of your accepting any such warranty or additional liability.
+
+   END OF TERMS AND CONDITIONS
diff --git a/conda_lock/_vendor/poetry_core.LICENSE.BSD b/conda_lock/_vendor/poetry_core.LICENSE.BSD
new file mode 100644
index 00000000..42ce7b75
--- /dev/null
+++ b/conda_lock/_vendor/poetry_core.LICENSE.BSD
@@ -0,0 +1,23 @@
+Copyright (c) Donald Stufft and individual contributors.
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+    1. Redistributions of source code must retain the above copyright notice,
+       this list of conditions and the following disclaimer.
+
+    2. Redistributions in binary form must reproduce the above copyright
+       notice, this list of conditions and the following disclaimer in the
+       documentation and/or other materials provided with the distribution.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
+FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.