diff --git a/CHANGES.md b/CHANGES.md index bbbdc9c9..1201794a 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -10,6 +10,7 @@ * Update filelock 3.12.4 (c1163ae) to 3.14.0 (8556141) * Update idna library 3.4 (cab054c) to 3.7 (1d365e1) * Update imdbpie 5.6.4 (f695e87) to 5.6.5 (f8ed7a0) +* Update package resource API 68.1.2 (1ef36f2) to 68.2.2 (8ad627d) * Update profilehooks module 1.12.1 (c3fc078) to 1.13.0.dev0 (99f8a31) * Update pytz 2023.3/2023c (488d3eb) to 2024.1/2024a (3680953) * Update Rarfile 4.1a1 (8a72967) to 4.2 (db1df33) diff --git a/lib/pkg_resources/__init__.py b/lib/pkg_resources/__init__.py index 3baa1f3c..c10a88e5 100644 --- a/lib/pkg_resources/__init__.py +++ b/lib/pkg_resources/__init__.py @@ -1,3 +1,6 @@ +# TODO: Add Generic type annotations to initialized collections. +# For now we'd simply use implicit Any/Unknown which would add redundant annotations +# mypy: disable-error-code="var-annotated" """ Package resource API -------------------- @@ -18,11 +21,35 @@ This module is deprecated. Users are directed to :mod:`importlib.resources`, """ import sys + +if sys.version_info < (3, 8): + raise RuntimeError("Python 3.8 or later is required") + import os import io import time import re import types +from typing import ( + Any, + Mapping, + MutableSequence, + NamedTuple, + NoReturn, + Sequence, + Set, + Tuple, + Type, + Union, + TYPE_CHECKING, + List, + Protocol, + Callable, + Dict, + Iterable, + Optional, + TypeVar, +) import zipfile import zipimport import warnings @@ -41,21 +68,16 @@ import inspect import ntpath import posixpath import importlib +import importlib.abc +import importlib.machinery from pkgutil import get_importer -try: - import _imp -except ImportError: - # Python 3.2 compatibility - import imp as _imp - -try: - FileExistsError -except NameError: - FileExistsError = OSError +import _imp # capture these to bypass sandboxing from os import utime +from os import open as os_open +from os.path import isdir, split try: from os import mkdir, rename, unlink @@ -65,55 +87,19 @@ except ImportError: # no write support, probably under GAE WRITE_SUPPORT = False -from os import open as os_open -from os.path import isdir, split - -try: - import importlib.machinery as importlib_machinery - - # access attribute to force import under delayed import mechanisms. - importlib_machinery.__name__ -except ImportError: - importlib_machinery = None - from pkg_resources.extern.jaraco.text import ( yield_lines, drop_comment, join_continuation, ) +from pkg_resources.extern.packaging import markers as _packaging_markers +from pkg_resources.extern.packaging import requirements as _packaging_requirements +from pkg_resources.extern.packaging import utils as _packaging_utils +from pkg_resources.extern.packaging import version as _packaging_version +from pkg_resources.extern.platformdirs import user_cache_dir as _user_cache_dir -from pkg_resources.extern import platformdirs -from pkg_resources.extern import packaging - -__import__('pkg_resources.extern.packaging.version') -__import__('pkg_resources.extern.packaging.specifiers') -__import__('pkg_resources.extern.packaging.requirements') -__import__('pkg_resources.extern.packaging.markers') -__import__('pkg_resources.extern.packaging.utils') - -if sys.version_info < (3, 5): - raise RuntimeError("Python 3.5 or later is required") - -# declare some globals that will be defined later to -# satisfy the linters. -require = None -working_set = None -add_activation_listener = None -resources_stream = None -cleanup_resources = None -resource_dir = None -resource_stream = None -set_extraction_path = None -resource_isdir = None -resource_string = None -iter_entry_points = None -resource_listdir = None -resource_filename = None -resource_exists = None -_distribution_finders = None -_namespace_handlers = None -_namespace_packages = None - +if TYPE_CHECKING: + from _typeshed import StrPath warnings.warn( "pkg_resources is deprecated as an API. " @@ -123,6 +109,28 @@ warnings.warn( ) +_T = TypeVar("_T") +# Type aliases +_NestedStr = Union[str, Iterable[Union[str, Iterable["_NestedStr"]]]] +_InstallerType = Callable[["Requirement"], Optional["Distribution"]] +_PkgReqType = Union[str, "Requirement"] +_EPDistType = Union["Distribution", _PkgReqType] +_MetadataType = Optional["IResourceProvider"] +# Any object works, but let's indicate we expect something like a module (optionally has __loader__ or __file__) +_ModuleLike = Union[object, types.ModuleType] +_ProviderFactoryType = Callable[[_ModuleLike], "IResourceProvider"] +_DistFinderType = Callable[[_T, str, bool], Iterable["Distribution"]] +_NSHandlerType = Callable[[_T, str, str, types.ModuleType], Optional[str]] +_AdapterT = TypeVar( + "_AdapterT", _DistFinderType[Any], _ProviderFactoryType, _NSHandlerType[Any] +) + + +# Use _typeshed.importlib.LoaderProtocol once available https://github.com/python/typeshed/pull/11890 +class _LoaderProtocol(Protocol): + def load_module(self, fullname: str, /) -> types.ModuleType: ... + + _PEP440_FALLBACK = re.compile(r"^v?(?P(?:[0-9]+!)?[0-9]+(?:\.[0-9]+)*)", re.I) @@ -133,15 +141,15 @@ class PEP440Warning(RuntimeWarning): """ -parse_version = packaging.version.Version +parse_version = _packaging_version.Version -_state_vars = {} +_state_vars: Dict[str, str] = {} -def _declare_state(vartype, **kw): - globals().update(kw) - _state_vars.update(dict.fromkeys(kw, vartype)) +def _declare_state(vartype: str, varname: str, initial_value: _T) -> _T: + _state_vars[varname] = vartype + return initial_value def __getstate__(): @@ -317,7 +325,7 @@ class VersionConflict(ResolutionError): def report(self): return self._template.format(**locals()) - def with_context(self, required_by): + def with_context(self, required_by: Set[Union["Distribution", str]]): """ If required_by is non-empty, return a version of self that is a ContextualVersionConflict. @@ -374,7 +382,7 @@ class UnknownExtra(ResolutionError): """Distribution doesn't have an "extra feature" of the given name""" -_provider_factories = {} +_provider_factories: Dict[Type[_ModuleLike], _ProviderFactoryType] = {} PY_MAJOR = '{}.{}'.format(*sys.version_info) EGG_DIST = 3 @@ -384,7 +392,9 @@ CHECKOUT_DIST = 0 DEVELOP_DIST = -1 -def register_loader_type(loader_type, provider_factory): +def register_loader_type( + loader_type: Type[_ModuleLike], provider_factory: _ProviderFactoryType +): """Register `provider_factory` to make providers for `loader_type` `loader_type` is the type or class of a PEP 302 ``module.__loader__``, @@ -394,7 +404,7 @@ def register_loader_type(loader_type, provider_factory): _provider_factories[loader_type] = provider_factory -def get_provider(moduleOrReq): +def get_provider(moduleOrReq: Union[str, "Requirement"]): """Return an IResourceProvider for the named module or requirement""" if isinstance(moduleOrReq, Requirement): return working_set.find(moduleOrReq) or require(str(moduleOrReq))[0] @@ -407,20 +417,18 @@ def get_provider(moduleOrReq): return _find_adapter(_provider_factories, loader)(module) -def _macos_vers(_cache=[]): - if not _cache: - version = platform.mac_ver()[0] - # fallback for MacPorts - if version == '': - plist = '/System/Library/CoreServices/SystemVersion.plist' - if os.path.exists(plist): - if hasattr(plistlib, 'readPlist'): - plist_content = plistlib.readPlist(plist) - if 'ProductVersion' in plist_content: - version = plist_content['ProductVersion'] - - _cache.append(version.split('.')) - return _cache[0] +@functools.lru_cache(maxsize=None) +def _macos_vers(): + version = platform.mac_ver()[0] + # fallback for MacPorts + if version == '': + plist = '/System/Library/CoreServices/SystemVersion.plist' + if os.path.exists(plist): + with open(plist, 'rb') as fh: + plist_content = plistlib.load(fh) + if 'ProductVersion' in plist_content: + version = plist_content['ProductVersion'] + return version.split('.') def _macos_arch(machine): @@ -458,7 +466,7 @@ darwinVersionString = re.compile(r"darwin-(\d+)\.(\d+)\.(\d+)-(.*)") get_platform = get_build_platform -def compatible_platforms(provided, required): +def compatible_platforms(provided: Optional[str], required: Optional[str]): """Can code for the `provided` platform run on the `required` platform? Returns true if either platform is ``None``, or the platforms are equal. @@ -507,20 +515,7 @@ def compatible_platforms(provided, required): return False -def run_script(dist_spec, script_name): - """Locate distribution `dist_spec` and run its `script_name` script""" - ns = sys._getframe(1).f_globals - name = ns['__name__'] - ns.clear() - ns['__name__'] = name - require(dist_spec)[0].run_script(script_name, ns) - - -# backward compatibility -run_main = run_script - - -def get_distribution(dist): +def get_distribution(dist: _EPDistType): """Return a current distribution object for a Requirement or string""" if isinstance(dist, str): dist = Requirement.parse(dist) @@ -531,78 +526,80 @@ def get_distribution(dist): return dist -def load_entry_point(dist, group, name): +def load_entry_point(dist: _EPDistType, group: str, name: str): """Return `name` entry point of `group` for `dist` or raise ImportError""" return get_distribution(dist).load_entry_point(group, name) -def get_entry_map(dist, group=None): +def get_entry_map(dist: _EPDistType, group: Optional[str] = None): """Return the entry point map for `group`, or the full entry map""" return get_distribution(dist).get_entry_map(group) -def get_entry_info(dist, group, name): +def get_entry_info(dist: _EPDistType, group: str, name: str): """Return the EntryPoint object for `group`+`name`, or ``None``""" return get_distribution(dist).get_entry_info(group, name) -class IMetadataProvider: - def has_metadata(name): +class IMetadataProvider(Protocol): + def has_metadata(self, name: str) -> bool: """Does the package's distribution contain the named metadata?""" - def get_metadata(name): + def get_metadata(self, name: str): """The named metadata resource as a string""" - def get_metadata_lines(name): + def get_metadata_lines(self, name: str): """Yield named metadata resource as list of non-blank non-comment lines Leading and trailing whitespace is stripped from each line, and lines with ``#`` as the first non-blank character are omitted.""" - def metadata_isdir(name): + def metadata_isdir(self, name: str) -> bool: """Is the named metadata a directory? (like ``os.path.isdir()``)""" - def metadata_listdir(name): + def metadata_listdir(self, name: str): """List of metadata names in the directory (like ``os.listdir()``)""" - def run_script(script_name, namespace): + def run_script(self, script_name: str, namespace: Dict[str, Any]): """Execute the named script in the supplied namespace dictionary""" -class IResourceProvider(IMetadataProvider): +class IResourceProvider(IMetadataProvider, Protocol): """An object that provides access to package resources""" - def get_resource_filename(manager, resource_name): + def get_resource_filename(self, manager: "ResourceManager", resource_name: str): """Return a true filesystem path for `resource_name` - `manager` must be an ``IResourceManager``""" + `manager` must be a ``ResourceManager``""" - def get_resource_stream(manager, resource_name): + def get_resource_stream(self, manager: "ResourceManager", resource_name: str): """Return a readable file-like object for `resource_name` - `manager` must be an ``IResourceManager``""" + `manager` must be a ``ResourceManager``""" - def get_resource_string(manager, resource_name): - """Return a string containing the contents of `resource_name` + def get_resource_string( + self, manager: "ResourceManager", resource_name: str + ) -> bytes: + """Return the contents of `resource_name` as :obj:`bytes` - `manager` must be an ``IResourceManager``""" + `manager` must be a ``ResourceManager``""" - def has_resource(resource_name): + def has_resource(self, resource_name: str): """Does the package contain the named resource?""" - def resource_isdir(resource_name): + def resource_isdir(self, resource_name: str): """Is the named resource a directory? (like ``os.path.isdir()``)""" - def resource_listdir(resource_name): + def resource_listdir(self, resource_name: str): """List of resource names in the directory (like ``os.listdir()``)""" class WorkingSet: """A collection of active distributions on sys.path (or a similar list)""" - def __init__(self, entries=None): + def __init__(self, entries: Optional[Iterable[str]] = None): """Create working set from list of path entries (default=sys.path)""" - self.entries = [] + self.entries: List[str] = [] self.entry_keys = {} self.by_key = {} self.normalized_to_canonical_keys = {} @@ -656,7 +653,7 @@ class WorkingSet: sys.path[:] = ws.entries return ws - def add_entry(self, entry): + def add_entry(self, entry: str): """Add a path item to ``.entries``, finding any distributions on it ``find_distributions(entry, True)`` is used to find distributions @@ -671,11 +668,11 @@ class WorkingSet: for dist in find_distributions(entry, True): self.add(dist, entry, False) - def __contains__(self, dist): + def __contains__(self, dist: "Distribution"): """True if `dist` is the active distribution for its project""" return self.by_key.get(dist.key) == dist - def find(self, req): + def find(self, req: "Requirement"): """Find a distribution matching requirement `req` If there is an active distribution for the requested project, this @@ -699,7 +696,7 @@ class WorkingSet: raise VersionConflict(dist, req) return dist - def iter_entry_points(self, group, name=None): + def iter_entry_points(self, group: str, name: Optional[str] = None): """Yield entry point objects from `group` matching `name` If `name` is None, yields all entry points in `group` from all @@ -713,7 +710,7 @@ class WorkingSet: if name is None or name == entry.name ) - def run_script(self, requires, script_name): + def run_script(self, requires: str, script_name: str): """Locate distribution for `requires` and run `script_name` script""" ns = sys._getframe(1).f_globals name = ns['__name__'] @@ -738,7 +735,13 @@ class WorkingSet: seen[key] = 1 yield self.by_key[key] - def add(self, dist, entry=None, insert=True, replace=False): + def add( + self, + dist: "Distribution", + entry: Optional[str] = None, + insert: bool = True, + replace: bool = False, + ): """Add `dist` to working set, associated with `entry` If `entry` is unspecified, it defaults to the ``.location`` of `dist`. @@ -762,7 +765,7 @@ class WorkingSet: return self.by_key[dist.key] = dist - normalized_name = packaging.utils.canonicalize_name(dist.key) + normalized_name = _packaging_utils.canonicalize_name(dist.key) self.normalized_to_canonical_keys[normalized_name] = dist.key if dist.key not in keys: keys.append(dist.key) @@ -772,11 +775,11 @@ class WorkingSet: def resolve( self, - requirements, - env=None, - installer=None, - replace_conflicting=False, - extras=None, + requirements: Iterable["Requirement"], + env: Optional["Environment"] = None, + installer: Optional[_InstallerType] = None, + replace_conflicting: bool = False, + extras: Optional[Tuple[str, ...]] = None, ): """List all distributions needed to (recursively) meet `requirements` @@ -846,7 +849,7 @@ class WorkingSet: def _resolve_dist( self, req, best, replace_conflicting, env, installer, required_by, to_activate - ): + ) -> "Distribution": dist = best.get(req.key) if dist is None: # Find the best distribution and add it to the map @@ -875,7 +878,13 @@ class WorkingSet: raise VersionConflict(dist, req).with_context(dependent_req) return dist - def find_plugins(self, plugin_env, full_env=None, installer=None, fallback=True): + def find_plugins( + self, + plugin_env: "Environment", + full_env: Optional["Environment"] = None, + installer: Optional[_InstallerType] = None, + fallback: bool = True, + ): """Find all activatable distributions in `plugin_env` Example usage:: @@ -951,12 +960,12 @@ class WorkingSet: # success, no need to try any more versions of this project break - distributions = list(distributions) - distributions.sort() + sorted_distributions = list(distributions) + sorted_distributions.sort() - return distributions, error_info + return sorted_distributions, error_info - def require(self, *requirements): + def require(self, *requirements: _NestedStr): """Ensure that distributions matching `requirements` are activated `requirements` must be a string or a (possibly-nested) sequence @@ -972,7 +981,9 @@ class WorkingSet: return needed - def subscribe(self, callback, existing=True): + def subscribe( + self, callback: Callable[["Distribution"], object], existing: bool = True + ): """Invoke `callback` for all distributions If `existing=True` (default), @@ -1008,12 +1019,14 @@ class WorkingSet: self.callbacks = callbacks[:] -class _ReqExtras(dict): +class _ReqExtras(Dict["Requirement", Tuple[str, ...]]): """ Map each requirement to the extras that demanded it. """ - def markers_pass(self, req, extras=None): + def markers_pass( + self, req: "Requirement", extras: Optional[Tuple[str, ...]] = None + ): """ Evaluate markers for req against each extra that demanded it. @@ -1032,7 +1045,10 @@ class Environment: """Searchable snapshot of distributions on a search path""" def __init__( - self, search_path=None, platform=get_supported_platform(), python=PY_MAJOR + self, + search_path: Optional[Sequence[str]] = None, + platform: Optional[str] = get_supported_platform(), + python: Optional[str] = PY_MAJOR, ): """Snapshot distributions available on a search path @@ -1055,7 +1071,7 @@ class Environment: self.python = python self.scan(search_path) - def can_add(self, dist): + def can_add(self, dist: "Distribution"): """Is distribution `dist` acceptable for this environment? The distribution must match the platform and python version @@ -1069,11 +1085,11 @@ class Environment: ) return py_compat and compatible_platforms(dist.platform, self.platform) - def remove(self, dist): + def remove(self, dist: "Distribution"): """Remove `dist` from the environment""" self._distmap[dist.key].remove(dist) - def scan(self, search_path=None): + def scan(self, search_path: Optional[Sequence[str]] = None): """Scan `search_path` for distributions usable in this environment Any distributions found are added to the environment. @@ -1088,7 +1104,7 @@ class Environment: for dist in find_distributions(item): self.add(dist) - def __getitem__(self, project_name): + def __getitem__(self, project_name: str): """Return a newest-to-oldest list of distributions for `project_name` Uses case-insensitive `project_name` comparison, assuming all the @@ -1099,7 +1115,7 @@ class Environment: distribution_key = project_name.lower() return self._distmap.get(distribution_key, []) - def add(self, dist): + def add(self, dist: "Distribution"): """Add `dist` if we ``can_add()`` it and it has not already been added""" if self.can_add(dist) and dist.has_version(): dists = self._distmap.setdefault(dist.key, []) @@ -1107,7 +1123,13 @@ class Environment: dists.append(dist) dists.sort(key=operator.attrgetter('hashcmp'), reverse=True) - def best_match(self, req, working_set, installer=None, replace_conflicting=False): + def best_match( + self, + req: "Requirement", + working_set: WorkingSet, + installer: Optional[Callable[["Requirement"], Any]] = None, + replace_conflicting: bool = False, + ): """Find distribution best matching `req` and usable on `working_set` This calls the ``find(req)`` method of the `working_set` to see if a @@ -1134,7 +1156,11 @@ class Environment: # try to download/install return self.obtain(req, installer) - def obtain(self, requirement, installer=None): + def obtain( + self, + requirement: "Requirement", + installer: Optional[Callable[["Requirement"], Any]] = None, + ): """Obtain a distribution matching `requirement` (e.g. via download) Obtain a distro that matches requirement (e.g. via download). In the @@ -1143,8 +1169,7 @@ class Environment: None is returned instead. This method is a hook that allows subclasses to attempt other ways of obtaining a distribution before falling back to the `installer` argument.""" - if installer is not None: - return installer(requirement) + return installer(requirement) if installer else None def __iter__(self): """Yield the unique project names of the available distributions""" @@ -1152,7 +1177,7 @@ class Environment: if self[key]: yield key - def __iadd__(self, other): + def __iadd__(self, other: Union["Distribution", "Environment"]): """In-place addition of a distribution or environment""" if isinstance(other, Distribution): self.add(other) @@ -1164,7 +1189,7 @@ class Environment: raise TypeError("Can't add %r to environment" % (other,)) return self - def __add__(self, other): + def __add__(self, other: Union["Distribution", "Environment"]): """Add an environment or distribution to an environment""" new = self.__class__([], platform=None, python=None) for env in self, other: @@ -1191,46 +1216,54 @@ class ExtractionError(RuntimeError): The exception instance that caused extraction to fail """ + manager: "ResourceManager" + cache_path: str + original_error: Optional[BaseException] + class ResourceManager: """Manage resource extraction and packages""" - extraction_path = None + extraction_path: Optional[str] = None def __init__(self): self.cached_files = {} - def resource_exists(self, package_or_requirement, resource_name): + def resource_exists(self, package_or_requirement: _PkgReqType, resource_name: str): """Does the named resource exist?""" return get_provider(package_or_requirement).has_resource(resource_name) - def resource_isdir(self, package_or_requirement, resource_name): + def resource_isdir(self, package_or_requirement: _PkgReqType, resource_name: str): """Is the named resource an existing directory?""" return get_provider(package_or_requirement).resource_isdir(resource_name) - def resource_filename(self, package_or_requirement, resource_name): + def resource_filename( + self, package_or_requirement: _PkgReqType, resource_name: str + ): """Return a true filesystem path for specified resource""" return get_provider(package_or_requirement).get_resource_filename( self, resource_name ) - def resource_stream(self, package_or_requirement, resource_name): + def resource_stream(self, package_or_requirement: _PkgReqType, resource_name: str): """Return a readable file-like object for specified resource""" return get_provider(package_or_requirement).get_resource_stream( self, resource_name ) - def resource_string(self, package_or_requirement, resource_name): - """Return specified resource as a string""" + def resource_string( + self, package_or_requirement: _PkgReqType, resource_name: str + ) -> bytes: + """Return specified resource as :obj:`bytes`""" return get_provider(package_or_requirement).get_resource_string( self, resource_name ) - def resource_listdir(self, package_or_requirement, resource_name): + def resource_listdir(self, package_or_requirement: _PkgReqType, resource_name: str): """List the contents of the named resource directory""" return get_provider(package_or_requirement).resource_listdir(resource_name) - def extraction_error(self): + def extraction_error(self) -> NoReturn: """Give an error message for problems extracting file(s)""" old_exc = sys.exc_info()[1] @@ -1260,7 +1293,7 @@ class ResourceManager: err.original_error = old_exc raise err - def get_cache_path(self, archive_name, names=()): + def get_cache_path(self, archive_name: str, names: Iterable[str] = ()): """Return absolute location in cache for `archive_name` and `names` The parent directory of the resulting path will be created if it does @@ -1312,7 +1345,7 @@ class ResourceManager: ).format(**locals()) warnings.warn(msg, UserWarning) - def postprocess(self, tempname, filename): + def postprocess(self, tempname: str, filename: str): """Perform any platform-specific postprocessing of `tempname` This is where Mac header rewrites should be done; other platforms don't @@ -1332,7 +1365,7 @@ class ResourceManager: mode = ((os.stat(tempname).st_mode) | 0o555) & 0o7777 os.chmod(tempname, mode) - def set_extraction_path(self, path): + def set_extraction_path(self, path: str): """Set the base path where resources will be extracted to, if needed. If you do not call this routine before any extractions take place, the @@ -1356,7 +1389,7 @@ class ResourceManager: self.extraction_path = path - def cleanup_resources(self, force=False): + def cleanup_resources(self, force: bool = False) -> List[str]: """ Delete all extracted resource files and directories, returning a list of the file and directory names that could not be successfully removed. @@ -1368,6 +1401,7 @@ class ResourceManager: directory used for extractions. """ # XXX + return [] def get_default_cache(): @@ -1376,12 +1410,10 @@ def get_default_cache(): or a platform-relevant user cache dir for an app named "Python-Eggs". """ - return os.environ.get('PYTHON_EGG_CACHE') or platformdirs.user_cache_dir( - appname='Python-Eggs' - ) + return os.environ.get('PYTHON_EGG_CACHE') or _user_cache_dir(appname='Python-Eggs') -def safe_name(name): +def safe_name(name: str): """Convert an arbitrary string to a standard distribution name Any runs of non-alphanumeric/. characters are replaced with a single '-'. @@ -1389,14 +1421,14 @@ def safe_name(name): return re.sub('[^A-Za-z0-9.]+', '-', name) -def safe_version(version): +def safe_version(version: str): """ Convert an arbitrary string to a standard version string """ try: # normalize the version - return str(packaging.version.Version(version)) - except packaging.version.InvalidVersion: + return str(_packaging_version.Version(version)) + except _packaging_version.InvalidVersion: version = version.replace(' ', '.') return re.sub('[^A-Za-z0-9.]+', '-', version) @@ -1433,7 +1465,7 @@ def _safe_segment(segment): return re.sub(r'\.[^A-Za-z0-9]+', '.', segment).strip(".-") -def safe_extra(extra): +def safe_extra(extra: str): """Convert an arbitrary string to a standard 'extra' name Any runs of non-alphanumeric characters are replaced with a single '_', @@ -1442,7 +1474,7 @@ def safe_extra(extra): return re.sub('[^A-Za-z0-9.-]+', '_', extra).lower() -def to_filename(name): +def to_filename(name: str): """Convert a project or version name to its filename-escaped form Any '-' characters are currently replaced with '_'. @@ -1450,7 +1482,7 @@ def to_filename(name): return name.replace('-', '_') -def invalid_marker(text): +def invalid_marker(text: str): """ Validate text as a PEP 508 environment marker; return an exception if invalid or False otherwise. @@ -1464,7 +1496,7 @@ def invalid_marker(text): return False -def evaluate_marker(text, extra=None): +def evaluate_marker(text: str, extra: Optional[str] = None): """ Evaluate a PEP 508 environment marker. Return a boolean indicating the marker result in this environment. @@ -1473,46 +1505,49 @@ def evaluate_marker(text, extra=None): This implementation uses the 'pyparsing' module. """ try: - marker = packaging.markers.Marker(text) + marker = _packaging_markers.Marker(text) return marker.evaluate() - except packaging.markers.InvalidMarker as e: + except _packaging_markers.InvalidMarker as e: raise SyntaxError(e) from e class NullProvider: """Try to implement resources and metadata for arbitrary PEP 302 loaders""" - egg_name = None - egg_info = None - loader = None + egg_name: Optional[str] = None + egg_info: Optional[str] = None + loader: Optional[_LoaderProtocol] = None + module_path: Optional[str] # Some subclasses can have a None module_path - def __init__(self, module): + def __init__(self, module: _ModuleLike): self.loader = getattr(module, '__loader__', None) self.module_path = os.path.dirname(getattr(module, '__file__', '')) - def get_resource_filename(self, manager, resource_name): + def get_resource_filename(self, manager: ResourceManager, resource_name: str): return self._fn(self.module_path, resource_name) - def get_resource_stream(self, manager, resource_name): + def get_resource_stream(self, manager: ResourceManager, resource_name: str): return io.BytesIO(self.get_resource_string(manager, resource_name)) - def get_resource_string(self, manager, resource_name): + def get_resource_string( + self, manager: ResourceManager, resource_name: str + ) -> bytes: return self._get(self._fn(self.module_path, resource_name)) - def has_resource(self, resource_name): + def has_resource(self, resource_name: str): return self._has(self._fn(self.module_path, resource_name)) def _get_metadata_path(self, name): return self._fn(self.egg_info, name) - def has_metadata(self, name): + def has_metadata(self, name: str) -> bool: if not self.egg_info: - return self.egg_info + return False path = self._get_metadata_path(name) return self._has(path) - def get_metadata(self, name): + def get_metadata(self, name: str): if not self.egg_info: return "" path = self._get_metadata_path(name) @@ -1525,24 +1560,24 @@ class NullProvider: exc.reason += ' in {} file at path: {}'.format(name, path) raise - def get_metadata_lines(self, name): + def get_metadata_lines(self, name: str): return yield_lines(self.get_metadata(name)) - def resource_isdir(self, resource_name): + def resource_isdir(self, resource_name: str): return self._isdir(self._fn(self.module_path, resource_name)) - def metadata_isdir(self, name): - return self.egg_info and self._isdir(self._fn(self.egg_info, name)) + def metadata_isdir(self, name: str) -> bool: + return bool(self.egg_info and self._isdir(self._fn(self.egg_info, name))) - def resource_listdir(self, resource_name): + def resource_listdir(self, resource_name: str): return self._listdir(self._fn(self.module_path, resource_name)) - def metadata_listdir(self, name): + def metadata_listdir(self, name: str): if self.egg_info: return self._listdir(self._fn(self.egg_info, name)) return [] - def run_script(self, script_name, namespace): + def run_script(self, script_name: str, namespace: Dict[str, Any]): script = 'scripts/' + script_name if not self.has_metadata(script): raise ResolutionError( @@ -1555,8 +1590,7 @@ class NullProvider: script_filename = self._fn(self.egg_info, script) namespace['__file__'] = script_filename if os.path.exists(script_filename): - with open(script_filename) as fid: - source = fid.read() + source = _read_utf8_with_fallback(script_filename) code = compile(source, script_filename, 'exec') exec(code, namespace, namespace) else: @@ -1571,12 +1605,12 @@ class NullProvider: script_code = compile(script_text, script_filename, 'exec') exec(script_code, namespace, namespace) - def _has(self, path): + def _has(self, path) -> bool: raise NotImplementedError( "Can't perform this operation for unregistered loader type" ) - def _isdir(self, path): + def _isdir(self, path) -> bool: raise NotImplementedError( "Can't perform this operation for unregistered loader type" ) @@ -1586,7 +1620,7 @@ class NullProvider: "Can't perform this operation for unregistered loader type" ) - def _fn(self, base, resource_name): + def _fn(self, base, resource_name: str): self._validate_resource_path(resource_name) if resource_name: return os.path.join(base, *resource_name.split('/')) @@ -1649,6 +1683,7 @@ is not allowed. os.path.pardir in path.split(posixpath.sep) or posixpath.isabs(path) or ntpath.isabs(path) + or path.startswith("\\") ) if not invalid: return @@ -1656,7 +1691,7 @@ is not allowed. msg = "Use of .. or absolute path in a resource path is not allowed." # Aggressively disallow Windows absolute paths - if ntpath.isabs(path) and not posixpath.isabs(path): + if (path.startswith("\\") or ntpath.isabs(path)) and not posixpath.isabs(path): raise ValueError(msg) # for compatibility, warn; in future @@ -1666,9 +1701,10 @@ is not allowed. DeprecationWarning, ) - def _get(self, path): - if hasattr(self.loader, 'get_data'): - return self.loader.get_data(path) + def _get(self, path) -> bytes: + if hasattr(self.loader, 'get_data') and self.loader: + # Already checked get_data exists + return self.loader.get_data(path) # type: ignore[attr-defined] raise NotImplementedError( "Can't perform this operation for loaders without 'get_data()'" ) @@ -1691,7 +1727,7 @@ def _parents(path): class EggProvider(NullProvider): """Provider based on a virtual filesystem""" - def __init__(self, module): + def __init__(self, module: _ModuleLike): super().__init__(module) self._setup_prefix() @@ -1702,7 +1738,7 @@ class EggProvider(NullProvider): egg = next(eggs, None) egg and self._set_egg(egg) - def _set_egg(self, path): + def _set_egg(self, path: str): self.egg_name = os.path.basename(path) self.egg_info = os.path.join(path, 'EGG-INFO') self.egg_root = path @@ -1711,19 +1747,19 @@ class EggProvider(NullProvider): class DefaultProvider(EggProvider): """Provides access to package resources in the filesystem""" - def _has(self, path): + def _has(self, path) -> bool: return os.path.exists(path) - def _isdir(self, path): + def _isdir(self, path) -> bool: return os.path.isdir(path) def _listdir(self, path): return os.listdir(path) - def get_resource_stream(self, manager, resource_name): + def get_resource_stream(self, manager: object, resource_name: str): return open(self._fn(self.module_path, resource_name), 'rb') - def _get(self, path): + def _get(self, path) -> bytes: with open(path, 'rb') as stream: return stream.read() @@ -1734,7 +1770,7 @@ class DefaultProvider(EggProvider): 'SourcelessFileLoader', ) for name in loader_names: - loader_cls = getattr(importlib_machinery, name, type(None)) + loader_cls = getattr(importlib.machinery, name, type(None)) register_loader_type(loader_cls, cls) @@ -1748,8 +1784,8 @@ class EmptyProvider(NullProvider): _isdir = _has = lambda self, path: False - def _get(self, path): - return '' + def _get(self, path) -> bytes: + return b'' def _listdir(self, path): return [] @@ -1761,13 +1797,14 @@ class EmptyProvider(NullProvider): empty_provider = EmptyProvider() -class ZipManifests(dict): +class ZipManifests(Dict[str, "MemoizedZipManifests.manifest_mod"]): """ zip manifest builder """ + # `path` could be `Union["StrPath", IO[bytes]]` but that violates the LSP for `MemoizedZipManifests.load` @classmethod - def build(cls, path): + def build(cls, path: str): """ Build a dictionary similar to the zipimport directory caches, except instead of tuples, store ZipInfo objects. @@ -1793,9 +1830,11 @@ class MemoizedZipManifests(ZipManifests): Memoized zipfile manifests. """ - manifest_mod = collections.namedtuple('manifest_mod', 'manifest mtime') + class manifest_mod(NamedTuple): + manifest: Dict[str, zipfile.ZipInfo] + mtime: float - def load(self, path): + def load(self, path: str): # type: ignore[override] # ZipManifests.load is a classmethod """ Load a manifest at path or return a suitable manifest already loaded. """ @@ -1812,10 +1851,12 @@ class MemoizedZipManifests(ZipManifests): class ZipProvider(EggProvider): """Resource support for zips and eggs""" - eagers = None + eagers: Optional[List[str]] = None _zip_manifests = MemoizedZipManifests() + # ZipProvider's loader should always be a zipimporter or equivalent + loader: zipimport.zipimporter - def __init__(self, module): + def __init__(self, module: _ModuleLike): super().__init__(module) self.zip_pre = self.loader.archive + os.sep @@ -1841,7 +1882,7 @@ class ZipProvider(EggProvider): def zipinfo(self): return self._zip_manifests.load(self.loader.archive) - def get_resource_filename(self, manager, resource_name): + def get_resource_filename(self, manager: ResourceManager, resource_name: str): if not self.egg_name: raise NotImplementedError( "resource_filename() only supported for .egg, not .zip" @@ -1864,7 +1905,7 @@ class ZipProvider(EggProvider): return timestamp, size # FIXME: 'ZipProvider._extract_resource' is too complex (12) - def _extract_resource(self, manager, zip_path): # noqa: C901 + def _extract_resource(self, manager: ResourceManager, zip_path): # noqa: C901 if zip_path in self._index(): for name in self._index()[zip_path]: last = self._extract_resource(manager, os.path.join(zip_path, name)) @@ -1874,10 +1915,14 @@ class ZipProvider(EggProvider): timestamp, size = self._get_date_and_size(self.zipinfo[zip_path]) if not WRITE_SUPPORT: - raise IOError( + raise OSError( '"os.rename" and "os.unlink" are not supported ' 'on this platform' ) try: + if not self.egg_name: + raise OSError( + '"egg_name" is empty. This likely means no egg could be found from the "module_path".' + ) real_path = manager.get_cache_path(self.egg_name, self._parts(zip_path)) if self._is_current(real_path, zip_path): @@ -1895,7 +1940,7 @@ class ZipProvider(EggProvider): try: rename(tmpnam, real_path) - except os.error: + except OSError: if os.path.isfile(real_path): if self._is_current(real_path, zip_path): # the file became current since it was checked above, @@ -1908,7 +1953,7 @@ class ZipProvider(EggProvider): return real_path raise - except os.error: + except OSError: # report a user-friendly error manager.extraction_error() @@ -1956,20 +2001,20 @@ class ZipProvider(EggProvider): self._dirindex = ind return ind - def _has(self, fspath): + def _has(self, fspath) -> bool: zip_path = self._zipinfo_name(fspath) return zip_path in self.zipinfo or zip_path in self._index() - def _isdir(self, fspath): + def _isdir(self, fspath) -> bool: return self._zipinfo_name(fspath) in self._index() def _listdir(self, fspath): return list(self._index().get(self._zipinfo_name(fspath), ())) - def _eager_to_zip(self, resource_name): + def _eager_to_zip(self, resource_name: str): return self._zipinfo_name(self._fn(self.egg_root, resource_name)) - def _resource_to_zip(self, resource_name): + def _resource_to_zip(self, resource_name: str): return self._zipinfo_name(self._fn(self.module_path, resource_name)) @@ -1988,20 +2033,20 @@ class FileMetadata(EmptyProvider): the provided location. """ - def __init__(self, path): + def __init__(self, path: "StrPath"): self.path = path def _get_metadata_path(self, name): return self.path - def has_metadata(self, name): + def has_metadata(self, name: str) -> bool: return name == 'PKG-INFO' and os.path.isfile(self.path) def get_metadata(self, name): if name != 'PKG-INFO': raise KeyError("No metadata except PKG-INFO is available") - with io.open(self.path, encoding='utf-8', errors="replace") as f: + with open(self.path, encoding='utf-8', errors="replace") as f: metadata = f.read() self._warn_on_replacement(metadata) return metadata @@ -2037,7 +2082,7 @@ class PathMetadata(DefaultProvider): dist = Distribution.from_filename(egg_path, metadata=metadata) """ - def __init__(self, path, egg_info): + def __init__(self, path: str, egg_info: str): self.module_path = path self.egg_info = egg_info @@ -2045,7 +2090,7 @@ class PathMetadata(DefaultProvider): class EggMetadata(ZipProvider): """Metadata provider for .egg files""" - def __init__(self, importer): + def __init__(self, importer: zipimport.zipimporter): """Create a metadata provider from a zipimporter""" self.zip_pre = importer.archive + os.sep @@ -2057,10 +2102,12 @@ class EggMetadata(ZipProvider): self._setup_prefix() -_declare_state('dict', _distribution_finders={}) +_distribution_finders: Dict[type, _DistFinderType[Any]] = _declare_state( + 'dict', '_distribution_finders', {} +) -def register_finder(importer_type, distribution_finder): +def register_finder(importer_type: Type[_T], distribution_finder: _DistFinderType[_T]): """Register `distribution_finder` to find distributions in sys.path items `importer_type` is the type or class of a PEP 302 "Importer" (sys.path item @@ -2070,14 +2117,16 @@ def register_finder(importer_type, distribution_finder): _distribution_finders[importer_type] = distribution_finder -def find_distributions(path_item, only=False): +def find_distributions(path_item: str, only: bool = False): """Yield distributions accessible via `path_item`""" importer = get_importer(path_item) finder = _find_adapter(_distribution_finders, importer) return finder(importer, path_item, only) -def find_eggs_in_zip(importer, path_item, only=False): +def find_eggs_in_zip( + importer: zipimport.zipimporter, path_item: str, only: bool = False +): """ Find eggs in zip files; possibly multiple nested eggs. """ @@ -2095,8 +2144,7 @@ def find_eggs_in_zip(importer, path_item, only=False): if _is_egg_path(subitem): subpath = os.path.join(path_item, subitem) dists = find_eggs_in_zip(zipimport.zipimporter(subpath), subpath) - for dist in dists: - yield dist + yield from dists elif subitem.lower().endswith(('.dist-info', '.egg-info')): subpath = os.path.join(path_item, subitem) submeta = EggMetadata(zipimport.zipimporter(subpath)) @@ -2107,14 +2155,16 @@ def find_eggs_in_zip(importer, path_item, only=False): register_finder(zipimport.zipimporter, find_eggs_in_zip) -def find_nothing(importer, path_item, only=False): +def find_nothing( + importer: Optional[object], path_item: Optional[str], only: Optional[bool] = False +): return () register_finder(object, find_nothing) -def find_on_path(importer, path_item, only=False): +def find_on_path(importer: Optional[object], path_item, only=False): """Yield distributions accessible on a sys.path directory""" path_item = _normalize_cached(path_item) @@ -2131,8 +2181,7 @@ def find_on_path(importer, path_item, only=False): for entry in sorted(entries): fullpath = os.path.join(path_item, entry) factory = dist_factory(path_item, entry, only) - for dist in factory(fullpath): - yield dist + yield from factory(fullpath) def dist_factory(path_item, entry, only): @@ -2208,11 +2257,10 @@ def non_empty_lines(path): """ Yield non-empty lines from file at path """ - with open(path) as f: - for line in f: - line = line.strip() - if line: - yield line + for line in _read_utf8_with_fallback(path).splitlines(): + line = line.strip() + if line: + yield line def resolve_egg_link(path): @@ -2231,13 +2279,19 @@ def resolve_egg_link(path): if hasattr(pkgutil, 'ImpImporter'): register_finder(pkgutil.ImpImporter, find_on_path) -register_finder(importlib_machinery.FileFinder, find_on_path) +register_finder(importlib.machinery.FileFinder, find_on_path) -_declare_state('dict', _namespace_handlers={}) -_declare_state('dict', _namespace_packages={}) +_namespace_handlers: Dict[type, _NSHandlerType[Any]] = _declare_state( + 'dict', '_namespace_handlers', {} +) +_namespace_packages: Dict[Optional[str], List[str]] = _declare_state( + 'dict', '_namespace_packages', {} +) -def register_namespace_handler(importer_type, namespace_handler): +def register_namespace_handler( + importer_type: Type[_T], namespace_handler: _NSHandlerType[_T] +): """Register `namespace_handler` to declare namespace packages `importer_type` is the type or class of a PEP 302 "Importer" (sys.path item @@ -2292,7 +2346,7 @@ def _handle_ns(packageName, path_item): return subpath -def _rebuild_mod_path(orig_path, package_name, module): +def _rebuild_mod_path(orig_path, package_name, module: types.ModuleType): """ Rebuild module.__path__ ensuring that all entries are ordered corresponding to their sys.path order @@ -2326,7 +2380,7 @@ def _rebuild_mod_path(orig_path, package_name, module): module.__path__ = new_path -def declare_namespace(packageName): +def declare_namespace(packageName: str): """Declare that package 'packageName' is a namespace package""" msg = ( @@ -2343,7 +2397,7 @@ def declare_namespace(packageName): if packageName in _namespace_packages: return - path = sys.path + path: MutableSequence[str] = sys.path parent, _, _ = packageName.rpartition('.') if parent: @@ -2369,7 +2423,7 @@ def declare_namespace(packageName): _imp.release_lock() -def fixup_namespace_packages(path_item, parent=None): +def fixup_namespace_packages(path_item: str, parent: Optional[str] = None): """Ensure that previously-declared namespace packages include path_item""" _imp.acquire_lock() try: @@ -2381,7 +2435,12 @@ def fixup_namespace_packages(path_item, parent=None): _imp.release_lock() -def file_ns_handler(importer, path_item, packageName, module): +def file_ns_handler( + importer: object, + path_item: "StrPath", + packageName: str, + module: types.ModuleType, +): """Compute an ns-package subpath for a filesystem or zipfile importer""" subpath = os.path.join(path_item, packageName.split('.')[-1]) @@ -2398,22 +2457,27 @@ if hasattr(pkgutil, 'ImpImporter'): register_namespace_handler(pkgutil.ImpImporter, file_ns_handler) register_namespace_handler(zipimport.zipimporter, file_ns_handler) -register_namespace_handler(importlib_machinery.FileFinder, file_ns_handler) +register_namespace_handler(importlib.machinery.FileFinder, file_ns_handler) -def null_ns_handler(importer, path_item, packageName, module): +def null_ns_handler( + importer: object, + path_item: Optional[str], + packageName: Optional[str], + module: Optional[_ModuleLike], +): return None register_namespace_handler(object, null_ns_handler) -def normalize_path(filename): +def normalize_path(filename: "StrPath"): """Normalize a file/dir name for comparison purposes""" return os.path.normcase(os.path.realpath(os.path.normpath(_cygwin_patch(filename)))) -def _cygwin_patch(filename): # pragma: nocover +def _cygwin_patch(filename: "StrPath"): # pragma: nocover """ Contrary to POSIX 2008, on Cygwin, getcwd (3) contains symlink components. Using @@ -2424,12 +2488,9 @@ def _cygwin_patch(filename): # pragma: nocover return os.path.abspath(filename) if sys.platform == 'cygwin' else filename -def _normalize_cached(filename, _cache={}): - try: - return _cache[filename] - except KeyError: - _cache[filename] = result = normalize_path(filename) - return result +@functools.lru_cache(maxsize=None) +def _normalize_cached(filename): + return normalize_path(filename) def _is_egg_path(path): @@ -2482,7 +2543,14 @@ EGG_NAME = re.compile( class EntryPoint: """Object representing an advertised importable object""" - def __init__(self, name, module_name, attrs=(), extras=(), dist=None): + def __init__( + self, + name: str, + module_name: str, + attrs: Iterable[str] = (), + extras: Iterable[str] = (), + dist: Optional["Distribution"] = None, + ): if not MODULE(module_name): raise ValueError("Invalid module name", module_name) self.name = name @@ -2502,7 +2570,12 @@ class EntryPoint: def __repr__(self): return "EntryPoint.parse(%r)" % str(self) - def load(self, require=True, *args, **kwargs): + def load( + self, + require: bool = True, + *args: Optional[Union[Environment, _InstallerType]], + **kwargs: Optional[Union[Environment, _InstallerType]], + ): """ Require packages for this EntryPoint, then resolve it. """ @@ -2514,7 +2587,9 @@ class EntryPoint: stacklevel=2, ) if require: - self.require(*args, **kwargs) + # We could pass `env` and `installer` directly, + # but keeping `*args` and `**kwargs` for backwards compatibility + self.require(*args, **kwargs) # type: ignore return self.resolve() def resolve(self): @@ -2527,9 +2602,14 @@ class EntryPoint: except AttributeError as exc: raise ImportError(str(exc)) from exc - def require(self, env=None, installer=None): - if self.extras and not self.dist: - raise UnknownExtra("Can't require() without a distribution", self) + def require( + self, + env: Optional[Environment] = None, + installer: Optional[_InstallerType] = None, + ): + if not self.dist: + error_cls = UnknownExtra if self.extras else AttributeError + raise error_cls("Can't require() without a distribution", self) # Get the requirements for this entry point with all its extras and # then resolve them. We have to pass `extras` along when resolving so @@ -2550,7 +2630,7 @@ class EntryPoint: ) @classmethod - def parse(cls, src, dist=None): + def parse(cls, src: str, dist: Optional["Distribution"] = None): """Parse a single entry point from string `src` Entry point syntax follows the form:: @@ -2579,7 +2659,12 @@ class EntryPoint: return req.extras @classmethod - def parse_group(cls, group, lines, dist=None): + def parse_group( + cls, + group: str, + lines: _NestedStr, + dist: Optional["Distribution"] = None, + ): """Parse an entry point group""" if not MODULE(group): raise ValueError("Invalid group name", group) @@ -2592,14 +2677,18 @@ class EntryPoint: return this @classmethod - def parse_map(cls, data, dist=None): + def parse_map( + cls, + data: Union[str, Iterable[str], Dict[str, Union[str, Iterable[str]]]], + dist: Optional["Distribution"] = None, + ): """Parse a map of entry point groups""" if isinstance(data, dict): - data = data.items() + _data = data.items() else: - data = split_sections(data) - maps = {} - for group, lines in data: + _data = split_sections(data) + maps: Dict[str, Dict[str, EntryPoint]] = {} + for group, lines in _data: if group is None: if not lines: continue @@ -2633,13 +2722,13 @@ class Distribution: def __init__( self, - location=None, - metadata=None, - project_name=None, - version=None, - py_version=PY_MAJOR, - platform=None, - precedence=EGG_DIST, + location: Optional[str] = None, + metadata: _MetadataType = None, + project_name: Optional[str] = None, + version: Optional[str] = None, + py_version: Optional[str] = PY_MAJOR, + platform: Optional[str] = None, + precedence: int = EGG_DIST, ): self.project_name = safe_name(project_name or 'Unknown') if version is not None: @@ -2651,7 +2740,13 @@ class Distribution: self._provider = metadata or empty_provider @classmethod - def from_location(cls, location, basename, metadata=None, **kw): + def from_location( + cls, + location: str, + basename: str, + metadata: _MetadataType = None, + **kw: int, # We could set `precedence` explicitly, but keeping this as `**kw` for full backwards and subclassing compatibility + ): project_name, version, py_version, platform = [None] * 4 basename, ext = os.path.splitext(basename) if ext.lower() in _distributionImpl: @@ -2689,25 +2784,25 @@ class Distribution: def __hash__(self): return hash(self.hashcmp) - def __lt__(self, other): + def __lt__(self, other: "Distribution"): return self.hashcmp < other.hashcmp - def __le__(self, other): + def __le__(self, other: "Distribution"): return self.hashcmp <= other.hashcmp - def __gt__(self, other): + def __gt__(self, other: "Distribution"): return self.hashcmp > other.hashcmp - def __ge__(self, other): + def __ge__(self, other: "Distribution"): return self.hashcmp >= other.hashcmp - def __eq__(self, other): + def __eq__(self, other: object): if not isinstance(other, self.__class__): # It's not a Distribution, so they are not equal return False return self.hashcmp == other.hashcmp - def __ne__(self, other): + def __ne__(self, other: object): return not self == other # These properties have to be lazy so that we don't have to load any @@ -2727,12 +2822,12 @@ class Distribution: if not hasattr(self, "_parsed_version"): try: self._parsed_version = parse_version(self.version) - except packaging.version.InvalidVersion as ex: + except _packaging_version.InvalidVersion as ex: info = f"(package: {self.project_name})" if hasattr(ex, "add_note"): ex.add_note(info) # PEP 678 raise - raise packaging.version.InvalidVersion(f"{str(ex)} {info}") from None + raise _packaging_version.InvalidVersion(f"{str(ex)} {info}") from None return self._parsed_version @@ -2740,7 +2835,7 @@ class Distribution: def _forgiving_parsed_version(self): try: return self.parsed_version - except packaging.version.InvalidVersion as ex: + except _packaging_version.InvalidVersion as ex: self._parsed_version = parse_version(_forgiving_version(self.version)) notes = "\n".join(getattr(ex, "__notes__", [])) # PEP 678 @@ -2817,7 +2912,7 @@ class Distribution: dm.setdefault(extra, []).extend(parse_requirements(reqs)) return dm - def requires(self, extras=()): + def requires(self, extras: Iterable[str] = ()): """List of Requirements needed for this distro if `extras` are used""" dm = self._dep_map deps = [] @@ -2850,21 +2945,18 @@ class Distribution: def _get_metadata(self, name): if self.has_metadata(name): - for line in self.get_metadata_lines(name): - yield line + yield from self.get_metadata_lines(name) def _get_version(self): lines = self._get_metadata(self.PKG_INFO) - version = _version_from_file(lines) + return _version_from_file(lines) - return version - - def activate(self, path=None, replace=False): + def activate(self, path: Optional[List[str]] = None, replace: bool = False): """Ensure distribution is importable on `path` (default=sys.path)""" if path is None: path = sys.path self.insert_on(path, replace=replace) - if path is sys.path: + if path is sys.path and self.location is not None: fixup_namespace_packages(self.location) for pkg in self._get_metadata('namespace_packages.txt'): if pkg in sys.modules: @@ -2904,50 +2996,58 @@ class Distribution: def __dir__(self): return list( - set(super(Distribution, self).__dir__()) + set(super().__dir__()) | set(attr for attr in self._provider.__dir__() if not attr.startswith('_')) ) @classmethod - def from_filename(cls, filename, metadata=None, **kw): + def from_filename( + cls, + filename: str, + metadata: _MetadataType = None, + **kw: int, # We could set `precedence` explicitly, but keeping this as `**kw` for full backwards and subclassing compatibility + ): return cls.from_location( _normalize_cached(filename), os.path.basename(filename), metadata, **kw ) def as_requirement(self): """Return a ``Requirement`` that matches this distribution exactly""" - if isinstance(self.parsed_version, packaging.version.Version): + if isinstance(self.parsed_version, _packaging_version.Version): spec = "%s==%s" % (self.project_name, self.parsed_version) else: spec = "%s===%s" % (self.project_name, self.parsed_version) return Requirement.parse(spec) - def load_entry_point(self, group, name): + def load_entry_point(self, group: str, name: str): """Return the `name` entry point of `group` or raise ImportError""" ep = self.get_entry_info(group, name) if ep is None: raise ImportError("Entry point %r not found" % ((group, name),)) return ep.load() - def get_entry_map(self, group=None): + def get_entry_map(self, group: Optional[str] = None): """Return the entry point map for `group`, or the full entry map""" - try: - ep_map = self._ep_map - except AttributeError: - ep_map = self._ep_map = EntryPoint.parse_map( + if not hasattr(self, "_ep_map"): + self._ep_map = EntryPoint.parse_map( self._get_metadata('entry_points.txt'), self ) if group is not None: - return ep_map.get(group, {}) - return ep_map + return self._ep_map.get(group, {}) + return self._ep_map - def get_entry_info(self, group, name): + def get_entry_info(self, group: str, name: str): """Return the EntryPoint object for `group`+`name`, or ``None``""" return self.get_entry_map(group).get(name) # FIXME: 'Distribution.insert_on' is too complex (13) - def insert_on(self, path, loc=None, replace=False): # noqa: C901 + def insert_on( # noqa: C901 + self, + path: List[str], + loc=None, + replace: bool = False, + ): """Ensure self.location is on path If replace=False (default): @@ -3052,13 +3152,14 @@ class Distribution: return False return True - def clone(self, **kw): + def clone(self, **kw: Optional[Union[str, int, IResourceProvider]]): """Copy this distribution, substituting in any changed keyword args""" names = 'project_name version py_version platform location precedence' for attr in names.split(): kw.setdefault(attr, getattr(self, attr, None)) kw.setdefault('metadata', self._provider) - return self.__class__(**kw) + # Unsafely unpacking. But keeping **kw for backwards and subclassing compatibility + return self.__class__(**kw) # type:ignore[arg-type] @property def extras(self): @@ -3155,7 +3256,7 @@ def issue_warning(*args, **kw): warnings.warn(stacklevel=level + 1, *args, **kw) -def parse_requirements(strs): +def parse_requirements(strs: _NestedStr): """ Yield ``Requirement`` objects for each specification in `strs`. @@ -3164,19 +3265,20 @@ def parse_requirements(strs): return map(Requirement, join_continuation(map(drop_comment, yield_lines(strs)))) -class RequirementParseError(packaging.requirements.InvalidRequirement): +class RequirementParseError(_packaging_requirements.InvalidRequirement): "Compatibility wrapper for InvalidRequirement" -class Requirement(packaging.requirements.Requirement): - def __init__(self, requirement_string): +class Requirement(_packaging_requirements.Requirement): + def __init__(self, requirement_string: str): """DO NOT CALL THIS UNDOCUMENTED METHOD; use Requirement.parse()!""" - super(Requirement, self).__init__(requirement_string) + super().__init__(requirement_string) self.unsafe_name = self.name project_name = safe_name(self.name) self.project_name, self.key = project_name, project_name.lower() self.specs = [(spec.operator, spec.version) for spec in self.specifier] - self.extras = tuple(map(safe_extra, self.extras)) + # packaging.requirements.Requirement uses a set for its extras. We use a variable-length tuple + self.extras: Tuple[str] = tuple(map(safe_extra, self.extras)) self.hashCmp = ( self.key, self.url, @@ -3186,13 +3288,13 @@ class Requirement(packaging.requirements.Requirement): ) self.__hash = hash(self.hashCmp) - def __eq__(self, other): + def __eq__(self, other: object): return isinstance(other, Requirement) and self.hashCmp == other.hashCmp def __ne__(self, other): return not self == other - def __contains__(self, item): + def __contains__(self, item: Union[Distribution, str, Tuple[str, ...]]): if isinstance(item, Distribution): if item.key != self.key: return False @@ -3211,7 +3313,7 @@ class Requirement(packaging.requirements.Requirement): return "Requirement.parse(%r)" % str(self) @staticmethod - def parse(s): + def parse(s: Union[str, Iterable[str]]): (req,) = parse_requirements(s) return req @@ -3226,15 +3328,18 @@ def _always_object(classes): return classes -def _find_adapter(registry, ob): +def _find_adapter(registry: Mapping[type, _AdapterT], ob: object) -> _AdapterT: """Return an adapter factory for `ob` from `registry`""" types = _always_object(inspect.getmro(getattr(ob, '__class__', type(ob)))) for t in types: if t in registry: return registry[t] + # _find_adapter would previously return None, and immediately be called. + # So we're raising a TypeError to keep backward compatibility if anyone depended on that behaviour. + raise TypeError(f"Could not find adapter for {registry} and {ob}") -def ensure_directory(path): +def ensure_directory(path: str): """Ensure that the parent directory of `path` exists""" dirname = os.path.dirname(path) os.makedirs(dirname, exist_ok=True) @@ -3243,7 +3348,7 @@ def ensure_directory(path): def _bypass_ensure_directory(path): """Sandbox-bypassing version of ensure_directory()""" if not WRITE_SUPPORT: - raise IOError('"os.mkdir" not supported on this platform.') + raise OSError('"os.mkdir" not supported on this platform.') dirname, filename = split(path) if dirname and filename and not isdir(dirname): _bypass_ensure_directory(dirname) @@ -3253,7 +3358,7 @@ def _bypass_ensure_directory(path): pass -def split_sections(s): +def split_sections(s: _NestedStr): """Split a string or iterable thereof into (section, content) pairs Each ``section`` is a stripped version of the section header ("[section]") @@ -3297,6 +3402,15 @@ def _mkstemp(*args, **kw): warnings.filterwarnings("ignore", category=PEP440Warning, append=True) +class PkgResourcesDeprecationWarning(Warning): + """ + Base class for warning about deprecations in ``pkg_resources`` + + This class is not derived from ``DeprecationWarning``, and as such is + visible by default. + """ + + # from jaraco.functools 1.3 def _call_aside(f, *args, **kwargs): f(*args, **kwargs) @@ -3315,15 +3429,6 @@ def _initialize(g=globals()): ) -class PkgResourcesDeprecationWarning(Warning): - """ - Base class for warning about deprecations in ``pkg_resources`` - - This class is not derived from ``DeprecationWarning``, and as such is - visible by default. - """ - - @_call_aside def _initialize_master_working_set(): """ @@ -3337,8 +3442,7 @@ def _initialize_master_working_set(): Invocation by other packages is unsupported and done at their own risk. """ - working_set = WorkingSet._build_master() - _declare_state('object', working_set=working_set) + working_set = _declare_state('object', 'working_set', WorkingSet._build_master()) require = working_set.require iter_entry_points = working_set.iter_entry_points @@ -3359,3 +3463,55 @@ def _initialize_master_working_set(): # match order list(map(working_set.add_entry, sys.path)) globals().update(locals()) + + +if TYPE_CHECKING: + # All of these are set by the @_call_aside methods above + __resource_manager = ResourceManager() # Won't exist at runtime + resource_exists = __resource_manager.resource_exists + resource_isdir = __resource_manager.resource_isdir + resource_filename = __resource_manager.resource_filename + resource_stream = __resource_manager.resource_stream + resource_string = __resource_manager.resource_string + resource_listdir = __resource_manager.resource_listdir + set_extraction_path = __resource_manager.set_extraction_path + cleanup_resources = __resource_manager.cleanup_resources + + working_set = WorkingSet() + require = working_set.require + iter_entry_points = working_set.iter_entry_points + add_activation_listener = working_set.subscribe + run_script = working_set.run_script + run_main = run_script + + +# ---- Ported from ``setuptools`` to avoid introducing an import inter-dependency ---- +LOCALE_ENCODING = "locale" if sys.version_info >= (3, 10) else None + + +def _read_utf8_with_fallback(file: str, fallback_encoding=LOCALE_ENCODING) -> str: + """See setuptools.unicode_utils._read_utf8_with_fallback""" + try: + with open(file, "r", encoding="utf-8") as f: + return f.read() + except UnicodeDecodeError: # pragma: no cover + msg = f"""\ + ******************************************************************************** + `encoding="utf-8"` fails with {file!r}, trying `encoding={fallback_encoding!r}`. + + This fallback behaviour is considered **deprecated** and future versions of + `setuptools/pkg_resources` may not implement it. + + Please encode {file!r} with "utf-8" to ensure future builds will succeed. + + If this file was produced by `setuptools` itself, cleaning up the cached files + and re-building/re-installing the package with a newer version of `setuptools` + (e.g. by updating `build-system.requires` in its `pyproject.toml`) + might solve the problem. + ******************************************************************************** + """ + # TODO: Add a deadline? + # See comment in setuptools.unicode_utils._Utf8EncodingNeeded + warnings.warn(msg, PkgResourcesDeprecationWarning, stacklevel=2) + with open(file, "r", encoding=fallback_encoding) as f: + return f.read() diff --git a/lib/pkg_resources/_vendor/backports/__init__.py b/lib/pkg_resources/_vendor/backports/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/lib/pkg_resources/_vendor/backports/tarfile.py b/lib/pkg_resources/_vendor/backports/tarfile.py new file mode 100644 index 00000000..a7a9a6e7 --- /dev/null +++ b/lib/pkg_resources/_vendor/backports/tarfile.py @@ -0,0 +1,2900 @@ +#!/usr/bin/env python3 +#------------------------------------------------------------------- +# tarfile.py +#------------------------------------------------------------------- +# Copyright (C) 2002 Lars Gustaebel +# All rights reserved. +# +# Permission is hereby granted, free of charge, to any person +# obtaining a copy of this software and associated documentation +# files (the "Software"), to deal in the Software without +# restriction, including without limitation the rights to use, +# copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the +# Software is furnished to do so, subject to the following +# conditions: +# +# The above copyright notice and this permission notice shall be +# included in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES +# OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT +# HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR +# OTHER DEALINGS IN THE SOFTWARE. +# +"""Read from and write to tar format archives. +""" + +version = "0.9.0" +__author__ = "Lars Gust\u00e4bel (lars@gustaebel.de)" +__credits__ = "Gustavo Niemeyer, Niels Gust\u00e4bel, Richard Townsend." + +#--------- +# Imports +#--------- +from builtins import open as bltn_open +import sys +import os +import io +import shutil +import stat +import time +import struct +import copy +import re +import warnings + +try: + import pwd +except ImportError: + pwd = None +try: + import grp +except ImportError: + grp = None + +# os.symlink on Windows prior to 6.0 raises NotImplementedError +# OSError (winerror=1314) will be raised if the caller does not hold the +# SeCreateSymbolicLinkPrivilege privilege +symlink_exception = (AttributeError, NotImplementedError, OSError) + +# from tarfile import * +__all__ = ["TarFile", "TarInfo", "is_tarfile", "TarError", "ReadError", + "CompressionError", "StreamError", "ExtractError", "HeaderError", + "ENCODING", "USTAR_FORMAT", "GNU_FORMAT", "PAX_FORMAT", + "DEFAULT_FORMAT", "open","fully_trusted_filter", "data_filter", + "tar_filter", "FilterError", "AbsoluteLinkError", + "OutsideDestinationError", "SpecialFileError", "AbsolutePathError", + "LinkOutsideDestinationError"] + + +#--------------------------------------------------------- +# tar constants +#--------------------------------------------------------- +NUL = b"\0" # the null character +BLOCKSIZE = 512 # length of processing blocks +RECORDSIZE = BLOCKSIZE * 20 # length of records +GNU_MAGIC = b"ustar \0" # magic gnu tar string +POSIX_MAGIC = b"ustar\x0000" # magic posix tar string + +LENGTH_NAME = 100 # maximum length of a filename +LENGTH_LINK = 100 # maximum length of a linkname +LENGTH_PREFIX = 155 # maximum length of the prefix field + +REGTYPE = b"0" # regular file +AREGTYPE = b"\0" # regular file +LNKTYPE = b"1" # link (inside tarfile) +SYMTYPE = b"2" # symbolic link +CHRTYPE = b"3" # character special device +BLKTYPE = b"4" # block special device +DIRTYPE = b"5" # directory +FIFOTYPE = b"6" # fifo special device +CONTTYPE = b"7" # contiguous file + +GNUTYPE_LONGNAME = b"L" # GNU tar longname +GNUTYPE_LONGLINK = b"K" # GNU tar longlink +GNUTYPE_SPARSE = b"S" # GNU tar sparse file + +XHDTYPE = b"x" # POSIX.1-2001 extended header +XGLTYPE = b"g" # POSIX.1-2001 global header +SOLARIS_XHDTYPE = b"X" # Solaris extended header + +USTAR_FORMAT = 0 # POSIX.1-1988 (ustar) format +GNU_FORMAT = 1 # GNU tar format +PAX_FORMAT = 2 # POSIX.1-2001 (pax) format +DEFAULT_FORMAT = PAX_FORMAT + +#--------------------------------------------------------- +# tarfile constants +#--------------------------------------------------------- +# File types that tarfile supports: +SUPPORTED_TYPES = (REGTYPE, AREGTYPE, LNKTYPE, + SYMTYPE, DIRTYPE, FIFOTYPE, + CONTTYPE, CHRTYPE, BLKTYPE, + GNUTYPE_LONGNAME, GNUTYPE_LONGLINK, + GNUTYPE_SPARSE) + +# File types that will be treated as a regular file. +REGULAR_TYPES = (REGTYPE, AREGTYPE, + CONTTYPE, GNUTYPE_SPARSE) + +# File types that are part of the GNU tar format. +GNU_TYPES = (GNUTYPE_LONGNAME, GNUTYPE_LONGLINK, + GNUTYPE_SPARSE) + +# Fields from a pax header that override a TarInfo attribute. +PAX_FIELDS = ("path", "linkpath", "size", "mtime", + "uid", "gid", "uname", "gname") + +# Fields from a pax header that are affected by hdrcharset. +PAX_NAME_FIELDS = {"path", "linkpath", "uname", "gname"} + +# Fields in a pax header that are numbers, all other fields +# are treated as strings. +PAX_NUMBER_FIELDS = { + "atime": float, + "ctime": float, + "mtime": float, + "uid": int, + "gid": int, + "size": int +} + +#--------------------------------------------------------- +# initialization +#--------------------------------------------------------- +if os.name == "nt": + ENCODING = "utf-8" +else: + ENCODING = sys.getfilesystemencoding() + +#--------------------------------------------------------- +# Some useful functions +#--------------------------------------------------------- + +def stn(s, length, encoding, errors): + """Convert a string to a null-terminated bytes object. + """ + if s is None: + raise ValueError("metadata cannot contain None") + s = s.encode(encoding, errors) + return s[:length] + (length - len(s)) * NUL + +def nts(s, encoding, errors): + """Convert a null-terminated bytes object to a string. + """ + p = s.find(b"\0") + if p != -1: + s = s[:p] + return s.decode(encoding, errors) + +def nti(s): + """Convert a number field to a python number. + """ + # There are two possible encodings for a number field, see + # itn() below. + if s[0] in (0o200, 0o377): + n = 0 + for i in range(len(s) - 1): + n <<= 8 + n += s[i + 1] + if s[0] == 0o377: + n = -(256 ** (len(s) - 1) - n) + else: + try: + s = nts(s, "ascii", "strict") + n = int(s.strip() or "0", 8) + except ValueError: + raise InvalidHeaderError("invalid header") + return n + +def itn(n, digits=8, format=DEFAULT_FORMAT): + """Convert a python number to a number field. + """ + # POSIX 1003.1-1988 requires numbers to be encoded as a string of + # octal digits followed by a null-byte, this allows values up to + # (8**(digits-1))-1. GNU tar allows storing numbers greater than + # that if necessary. A leading 0o200 or 0o377 byte indicate this + # particular encoding, the following digits-1 bytes are a big-endian + # base-256 representation. This allows values up to (256**(digits-1))-1. + # A 0o200 byte indicates a positive number, a 0o377 byte a negative + # number. + original_n = n + n = int(n) + if 0 <= n < 8 ** (digits - 1): + s = bytes("%0*o" % (digits - 1, n), "ascii") + NUL + elif format == GNU_FORMAT and -256 ** (digits - 1) <= n < 256 ** (digits - 1): + if n >= 0: + s = bytearray([0o200]) + else: + s = bytearray([0o377]) + n = 256 ** digits + n + + for i in range(digits - 1): + s.insert(1, n & 0o377) + n >>= 8 + else: + raise ValueError("overflow in number field") + + return s + +def calc_chksums(buf): + """Calculate the checksum for a member's header by summing up all + characters except for the chksum field which is treated as if + it was filled with spaces. According to the GNU tar sources, + some tars (Sun and NeXT) calculate chksum with signed char, + which will be different if there are chars in the buffer with + the high bit set. So we calculate two checksums, unsigned and + signed. + """ + unsigned_chksum = 256 + sum(struct.unpack_from("148B8x356B", buf)) + signed_chksum = 256 + sum(struct.unpack_from("148b8x356b", buf)) + return unsigned_chksum, signed_chksum + +def copyfileobj(src, dst, length=None, exception=OSError, bufsize=None): + """Copy length bytes from fileobj src to fileobj dst. + If length is None, copy the entire content. + """ + bufsize = bufsize or 16 * 1024 + if length == 0: + return + if length is None: + shutil.copyfileobj(src, dst, bufsize) + return + + blocks, remainder = divmod(length, bufsize) + for b in range(blocks): + buf = src.read(bufsize) + if len(buf) < bufsize: + raise exception("unexpected end of data") + dst.write(buf) + + if remainder != 0: + buf = src.read(remainder) + if len(buf) < remainder: + raise exception("unexpected end of data") + dst.write(buf) + return + +def _safe_print(s): + encoding = getattr(sys.stdout, 'encoding', None) + if encoding is not None: + s = s.encode(encoding, 'backslashreplace').decode(encoding) + print(s, end=' ') + + +class TarError(Exception): + """Base exception.""" + pass +class ExtractError(TarError): + """General exception for extract errors.""" + pass +class ReadError(TarError): + """Exception for unreadable tar archives.""" + pass +class CompressionError(TarError): + """Exception for unavailable compression methods.""" + pass +class StreamError(TarError): + """Exception for unsupported operations on stream-like TarFiles.""" + pass +class HeaderError(TarError): + """Base exception for header errors.""" + pass +class EmptyHeaderError(HeaderError): + """Exception for empty headers.""" + pass +class TruncatedHeaderError(HeaderError): + """Exception for truncated headers.""" + pass +class EOFHeaderError(HeaderError): + """Exception for end of file headers.""" + pass +class InvalidHeaderError(HeaderError): + """Exception for invalid headers.""" + pass +class SubsequentHeaderError(HeaderError): + """Exception for missing and invalid extended headers.""" + pass + +#--------------------------- +# internal stream interface +#--------------------------- +class _LowLevelFile: + """Low-level file object. Supports reading and writing. + It is used instead of a regular file object for streaming + access. + """ + + def __init__(self, name, mode): + mode = { + "r": os.O_RDONLY, + "w": os.O_WRONLY | os.O_CREAT | os.O_TRUNC, + }[mode] + if hasattr(os, "O_BINARY"): + mode |= os.O_BINARY + self.fd = os.open(name, mode, 0o666) + + def close(self): + os.close(self.fd) + + def read(self, size): + return os.read(self.fd, size) + + def write(self, s): + os.write(self.fd, s) + +class _Stream: + """Class that serves as an adapter between TarFile and + a stream-like object. The stream-like object only + needs to have a read() or write() method that works with bytes, + and the method is accessed blockwise. + Use of gzip or bzip2 compression is possible. + A stream-like object could be for example: sys.stdin.buffer, + sys.stdout.buffer, a socket, a tape device etc. + + _Stream is intended to be used only internally. + """ + + def __init__(self, name, mode, comptype, fileobj, bufsize, + compresslevel): + """Construct a _Stream object. + """ + self._extfileobj = True + if fileobj is None: + fileobj = _LowLevelFile(name, mode) + self._extfileobj = False + + if comptype == '*': + # Enable transparent compression detection for the + # stream interface + fileobj = _StreamProxy(fileobj) + comptype = fileobj.getcomptype() + + self.name = name or "" + self.mode = mode + self.comptype = comptype + self.fileobj = fileobj + self.bufsize = bufsize + self.buf = b"" + self.pos = 0 + self.closed = False + + try: + if comptype == "gz": + try: + import zlib + except ImportError: + raise CompressionError("zlib module is not available") from None + self.zlib = zlib + self.crc = zlib.crc32(b"") + if mode == "r": + self.exception = zlib.error + self._init_read_gz() + else: + self._init_write_gz(compresslevel) + + elif comptype == "bz2": + try: + import bz2 + except ImportError: + raise CompressionError("bz2 module is not available") from None + if mode == "r": + self.dbuf = b"" + self.cmp = bz2.BZ2Decompressor() + self.exception = OSError + else: + self.cmp = bz2.BZ2Compressor(compresslevel) + + elif comptype == "xz": + try: + import lzma + except ImportError: + raise CompressionError("lzma module is not available") from None + if mode == "r": + self.dbuf = b"" + self.cmp = lzma.LZMADecompressor() + self.exception = lzma.LZMAError + else: + self.cmp = lzma.LZMACompressor() + + elif comptype != "tar": + raise CompressionError("unknown compression type %r" % comptype) + + except: + if not self._extfileobj: + self.fileobj.close() + self.closed = True + raise + + def __del__(self): + if hasattr(self, "closed") and not self.closed: + self.close() + + def _init_write_gz(self, compresslevel): + """Initialize for writing with gzip compression. + """ + self.cmp = self.zlib.compressobj(compresslevel, + self.zlib.DEFLATED, + -self.zlib.MAX_WBITS, + self.zlib.DEF_MEM_LEVEL, + 0) + timestamp = struct.pack(" self.bufsize: + self.fileobj.write(self.buf[:self.bufsize]) + self.buf = self.buf[self.bufsize:] + + def close(self): + """Close the _Stream object. No operation should be + done on it afterwards. + """ + if self.closed: + return + + self.closed = True + try: + if self.mode == "w" and self.comptype != "tar": + self.buf += self.cmp.flush() + + if self.mode == "w" and self.buf: + self.fileobj.write(self.buf) + self.buf = b"" + if self.comptype == "gz": + self.fileobj.write(struct.pack("= 0: + blocks, remainder = divmod(pos - self.pos, self.bufsize) + for i in range(blocks): + self.read(self.bufsize) + self.read(remainder) + else: + raise StreamError("seeking backwards is not allowed") + return self.pos + + def read(self, size): + """Return the next size number of bytes from the stream.""" + assert size is not None + buf = self._read(size) + self.pos += len(buf) + return buf + + def _read(self, size): + """Return size bytes from the stream. + """ + if self.comptype == "tar": + return self.__read(size) + + c = len(self.dbuf) + t = [self.dbuf] + while c < size: + # Skip underlying buffer to avoid unaligned double buffering. + if self.buf: + buf = self.buf + self.buf = b"" + else: + buf = self.fileobj.read(self.bufsize) + if not buf: + break + try: + buf = self.cmp.decompress(buf) + except self.exception as e: + raise ReadError("invalid compressed data") from e + t.append(buf) + c += len(buf) + t = b"".join(t) + self.dbuf = t[size:] + return t[:size] + + def __read(self, size): + """Return size bytes from stream. If internal buffer is empty, + read another block from the stream. + """ + c = len(self.buf) + t = [self.buf] + while c < size: + buf = self.fileobj.read(self.bufsize) + if not buf: + break + t.append(buf) + c += len(buf) + t = b"".join(t) + self.buf = t[size:] + return t[:size] +# class _Stream + +class _StreamProxy(object): + """Small proxy class that enables transparent compression + detection for the Stream interface (mode 'r|*'). + """ + + def __init__(self, fileobj): + self.fileobj = fileobj + self.buf = self.fileobj.read(BLOCKSIZE) + + def read(self, size): + self.read = self.fileobj.read + return self.buf + + def getcomptype(self): + if self.buf.startswith(b"\x1f\x8b\x08"): + return "gz" + elif self.buf[0:3] == b"BZh" and self.buf[4:10] == b"1AY&SY": + return "bz2" + elif self.buf.startswith((b"\x5d\x00\x00\x80", b"\xfd7zXZ")): + return "xz" + else: + return "tar" + + def close(self): + self.fileobj.close() +# class StreamProxy + +#------------------------ +# Extraction file object +#------------------------ +class _FileInFile(object): + """A thin wrapper around an existing file object that + provides a part of its data as an individual file + object. + """ + + def __init__(self, fileobj, offset, size, name, blockinfo=None): + self.fileobj = fileobj + self.offset = offset + self.size = size + self.position = 0 + self.name = name + self.closed = False + + if blockinfo is None: + blockinfo = [(0, size)] + + # Construct a map with data and zero blocks. + self.map_index = 0 + self.map = [] + lastpos = 0 + realpos = self.offset + for offset, size in blockinfo: + if offset > lastpos: + self.map.append((False, lastpos, offset, None)) + self.map.append((True, offset, offset + size, realpos)) + realpos += size + lastpos = offset + size + if lastpos < self.size: + self.map.append((False, lastpos, self.size, None)) + + def flush(self): + pass + + def readable(self): + return True + + def writable(self): + return False + + def seekable(self): + return self.fileobj.seekable() + + def tell(self): + """Return the current file position. + """ + return self.position + + def seek(self, position, whence=io.SEEK_SET): + """Seek to a position in the file. + """ + if whence == io.SEEK_SET: + self.position = min(max(position, 0), self.size) + elif whence == io.SEEK_CUR: + if position < 0: + self.position = max(self.position + position, 0) + else: + self.position = min(self.position + position, self.size) + elif whence == io.SEEK_END: + self.position = max(min(self.size + position, self.size), 0) + else: + raise ValueError("Invalid argument") + return self.position + + def read(self, size=None): + """Read data from the file. + """ + if size is None: + size = self.size - self.position + else: + size = min(size, self.size - self.position) + + buf = b"" + while size > 0: + while True: + data, start, stop, offset = self.map[self.map_index] + if start <= self.position < stop: + break + else: + self.map_index += 1 + if self.map_index == len(self.map): + self.map_index = 0 + length = min(size, stop - self.position) + if data: + self.fileobj.seek(offset + (self.position - start)) + b = self.fileobj.read(length) + if len(b) != length: + raise ReadError("unexpected end of data") + buf += b + else: + buf += NUL * length + size -= length + self.position += length + return buf + + def readinto(self, b): + buf = self.read(len(b)) + b[:len(buf)] = buf + return len(buf) + + def close(self): + self.closed = True +#class _FileInFile + +class ExFileObject(io.BufferedReader): + + def __init__(self, tarfile, tarinfo): + fileobj = _FileInFile(tarfile.fileobj, tarinfo.offset_data, + tarinfo.size, tarinfo.name, tarinfo.sparse) + super().__init__(fileobj) +#class ExFileObject + + +#----------------------------- +# extraction filters (PEP 706) +#----------------------------- + +class FilterError(TarError): + pass + +class AbsolutePathError(FilterError): + def __init__(self, tarinfo): + self.tarinfo = tarinfo + super().__init__(f'member {tarinfo.name!r} has an absolute path') + +class OutsideDestinationError(FilterError): + def __init__(self, tarinfo, path): + self.tarinfo = tarinfo + self._path = path + super().__init__(f'{tarinfo.name!r} would be extracted to {path!r}, ' + + 'which is outside the destination') + +class SpecialFileError(FilterError): + def __init__(self, tarinfo): + self.tarinfo = tarinfo + super().__init__(f'{tarinfo.name!r} is a special file') + +class AbsoluteLinkError(FilterError): + def __init__(self, tarinfo): + self.tarinfo = tarinfo + super().__init__(f'{tarinfo.name!r} is a link to an absolute path') + +class LinkOutsideDestinationError(FilterError): + def __init__(self, tarinfo, path): + self.tarinfo = tarinfo + self._path = path + super().__init__(f'{tarinfo.name!r} would link to {path!r}, ' + + 'which is outside the destination') + +def _get_filtered_attrs(member, dest_path, for_data=True): + new_attrs = {} + name = member.name + dest_path = os.path.realpath(dest_path) + # Strip leading / (tar's directory separator) from filenames. + # Include os.sep (target OS directory separator) as well. + if name.startswith(('/', os.sep)): + name = new_attrs['name'] = member.path.lstrip('/' + os.sep) + if os.path.isabs(name): + # Path is absolute even after stripping. + # For example, 'C:/foo' on Windows. + raise AbsolutePathError(member) + # Ensure we stay in the destination + target_path = os.path.realpath(os.path.join(dest_path, name)) + if os.path.commonpath([target_path, dest_path]) != dest_path: + raise OutsideDestinationError(member, target_path) + # Limit permissions (no high bits, and go-w) + mode = member.mode + if mode is not None: + # Strip high bits & group/other write bits + mode = mode & 0o755 + if for_data: + # For data, handle permissions & file types + if member.isreg() or member.islnk(): + if not mode & 0o100: + # Clear executable bits if not executable by user + mode &= ~0o111 + # Ensure owner can read & write + mode |= 0o600 + elif member.isdir() or member.issym(): + # Ignore mode for directories & symlinks + mode = None + else: + # Reject special files + raise SpecialFileError(member) + if mode != member.mode: + new_attrs['mode'] = mode + if for_data: + # Ignore ownership for 'data' + if member.uid is not None: + new_attrs['uid'] = None + if member.gid is not None: + new_attrs['gid'] = None + if member.uname is not None: + new_attrs['uname'] = None + if member.gname is not None: + new_attrs['gname'] = None + # Check link destination for 'data' + if member.islnk() or member.issym(): + if os.path.isabs(member.linkname): + raise AbsoluteLinkError(member) + if member.issym(): + target_path = os.path.join(dest_path, + os.path.dirname(name), + member.linkname) + else: + target_path = os.path.join(dest_path, + member.linkname) + target_path = os.path.realpath(target_path) + if os.path.commonpath([target_path, dest_path]) != dest_path: + raise LinkOutsideDestinationError(member, target_path) + return new_attrs + +def fully_trusted_filter(member, dest_path): + return member + +def tar_filter(member, dest_path): + new_attrs = _get_filtered_attrs(member, dest_path, False) + if new_attrs: + return member.replace(**new_attrs, deep=False) + return member + +def data_filter(member, dest_path): + new_attrs = _get_filtered_attrs(member, dest_path, True) + if new_attrs: + return member.replace(**new_attrs, deep=False) + return member + +_NAMED_FILTERS = { + "fully_trusted": fully_trusted_filter, + "tar": tar_filter, + "data": data_filter, +} + +#------------------ +# Exported Classes +#------------------ + +# Sentinel for replace() defaults, meaning "don't change the attribute" +_KEEP = object() + +class TarInfo(object): + """Informational class which holds the details about an + archive member given by a tar header block. + TarInfo objects are returned by TarFile.getmember(), + TarFile.getmembers() and TarFile.gettarinfo() and are + usually created internally. + """ + + __slots__ = dict( + name = 'Name of the archive member.', + mode = 'Permission bits.', + uid = 'User ID of the user who originally stored this member.', + gid = 'Group ID of the user who originally stored this member.', + size = 'Size in bytes.', + mtime = 'Time of last modification.', + chksum = 'Header checksum.', + type = ('File type. type is usually one of these constants: ' + 'REGTYPE, AREGTYPE, LNKTYPE, SYMTYPE, DIRTYPE, FIFOTYPE, ' + 'CONTTYPE, CHRTYPE, BLKTYPE, GNUTYPE_SPARSE.'), + linkname = ('Name of the target file name, which is only present ' + 'in TarInfo objects of type LNKTYPE and SYMTYPE.'), + uname = 'User name.', + gname = 'Group name.', + devmajor = 'Device major number.', + devminor = 'Device minor number.', + offset = 'The tar header starts here.', + offset_data = "The file's data starts here.", + pax_headers = ('A dictionary containing key-value pairs of an ' + 'associated pax extended header.'), + sparse = 'Sparse member information.', + tarfile = None, + _sparse_structs = None, + _link_target = None, + ) + + def __init__(self, name=""): + """Construct a TarInfo object. name is the optional name + of the member. + """ + self.name = name # member name + self.mode = 0o644 # file permissions + self.uid = 0 # user id + self.gid = 0 # group id + self.size = 0 # file size + self.mtime = 0 # modification time + self.chksum = 0 # header checksum + self.type = REGTYPE # member type + self.linkname = "" # link name + self.uname = "" # user name + self.gname = "" # group name + self.devmajor = 0 # device major number + self.devminor = 0 # device minor number + + self.offset = 0 # the tar header starts here + self.offset_data = 0 # the file's data starts here + + self.sparse = None # sparse member information + self.pax_headers = {} # pax header information + + @property + def path(self): + 'In pax headers, "name" is called "path".' + return self.name + + @path.setter + def path(self, name): + self.name = name + + @property + def linkpath(self): + 'In pax headers, "linkname" is called "linkpath".' + return self.linkname + + @linkpath.setter + def linkpath(self, linkname): + self.linkname = linkname + + def __repr__(self): + return "<%s %r at %#x>" % (self.__class__.__name__,self.name,id(self)) + + def replace(self, *, + name=_KEEP, mtime=_KEEP, mode=_KEEP, linkname=_KEEP, + uid=_KEEP, gid=_KEEP, uname=_KEEP, gname=_KEEP, + deep=True, _KEEP=_KEEP): + """Return a deep copy of self with the given attributes replaced. + """ + if deep: + result = copy.deepcopy(self) + else: + result = copy.copy(self) + if name is not _KEEP: + result.name = name + if mtime is not _KEEP: + result.mtime = mtime + if mode is not _KEEP: + result.mode = mode + if linkname is not _KEEP: + result.linkname = linkname + if uid is not _KEEP: + result.uid = uid + if gid is not _KEEP: + result.gid = gid + if uname is not _KEEP: + result.uname = uname + if gname is not _KEEP: + result.gname = gname + return result + + def get_info(self): + """Return the TarInfo's attributes as a dictionary. + """ + if self.mode is None: + mode = None + else: + mode = self.mode & 0o7777 + info = { + "name": self.name, + "mode": mode, + "uid": self.uid, + "gid": self.gid, + "size": self.size, + "mtime": self.mtime, + "chksum": self.chksum, + "type": self.type, + "linkname": self.linkname, + "uname": self.uname, + "gname": self.gname, + "devmajor": self.devmajor, + "devminor": self.devminor + } + + if info["type"] == DIRTYPE and not info["name"].endswith("/"): + info["name"] += "/" + + return info + + def tobuf(self, format=DEFAULT_FORMAT, encoding=ENCODING, errors="surrogateescape"): + """Return a tar header as a string of 512 byte blocks. + """ + info = self.get_info() + for name, value in info.items(): + if value is None: + raise ValueError("%s may not be None" % name) + + if format == USTAR_FORMAT: + return self.create_ustar_header(info, encoding, errors) + elif format == GNU_FORMAT: + return self.create_gnu_header(info, encoding, errors) + elif format == PAX_FORMAT: + return self.create_pax_header(info, encoding) + else: + raise ValueError("invalid format") + + def create_ustar_header(self, info, encoding, errors): + """Return the object as a ustar header block. + """ + info["magic"] = POSIX_MAGIC + + if len(info["linkname"].encode(encoding, errors)) > LENGTH_LINK: + raise ValueError("linkname is too long") + + if len(info["name"].encode(encoding, errors)) > LENGTH_NAME: + info["prefix"], info["name"] = self._posix_split_name(info["name"], encoding, errors) + + return self._create_header(info, USTAR_FORMAT, encoding, errors) + + def create_gnu_header(self, info, encoding, errors): + """Return the object as a GNU header block sequence. + """ + info["magic"] = GNU_MAGIC + + buf = b"" + if len(info["linkname"].encode(encoding, errors)) > LENGTH_LINK: + buf += self._create_gnu_long_header(info["linkname"], GNUTYPE_LONGLINK, encoding, errors) + + if len(info["name"].encode(encoding, errors)) > LENGTH_NAME: + buf += self._create_gnu_long_header(info["name"], GNUTYPE_LONGNAME, encoding, errors) + + return buf + self._create_header(info, GNU_FORMAT, encoding, errors) + + def create_pax_header(self, info, encoding): + """Return the object as a ustar header block. If it cannot be + represented this way, prepend a pax extended header sequence + with supplement information. + """ + info["magic"] = POSIX_MAGIC + pax_headers = self.pax_headers.copy() + + # Test string fields for values that exceed the field length or cannot + # be represented in ASCII encoding. + for name, hname, length in ( + ("name", "path", LENGTH_NAME), ("linkname", "linkpath", LENGTH_LINK), + ("uname", "uname", 32), ("gname", "gname", 32)): + + if hname in pax_headers: + # The pax header has priority. + continue + + # Try to encode the string as ASCII. + try: + info[name].encode("ascii", "strict") + except UnicodeEncodeError: + pax_headers[hname] = info[name] + continue + + if len(info[name]) > length: + pax_headers[hname] = info[name] + + # Test number fields for values that exceed the field limit or values + # that like to be stored as float. + for name, digits in (("uid", 8), ("gid", 8), ("size", 12), ("mtime", 12)): + needs_pax = False + + val = info[name] + val_is_float = isinstance(val, float) + val_int = round(val) if val_is_float else val + if not 0 <= val_int < 8 ** (digits - 1): + # Avoid overflow. + info[name] = 0 + needs_pax = True + elif val_is_float: + # Put rounded value in ustar header, and full + # precision value in pax header. + info[name] = val_int + needs_pax = True + + # The existing pax header has priority. + if needs_pax and name not in pax_headers: + pax_headers[name] = str(val) + + # Create a pax extended header if necessary. + if pax_headers: + buf = self._create_pax_generic_header(pax_headers, XHDTYPE, encoding) + else: + buf = b"" + + return buf + self._create_header(info, USTAR_FORMAT, "ascii", "replace") + + @classmethod + def create_pax_global_header(cls, pax_headers): + """Return the object as a pax global header block sequence. + """ + return cls._create_pax_generic_header(pax_headers, XGLTYPE, "utf-8") + + def _posix_split_name(self, name, encoding, errors): + """Split a name longer than 100 chars into a prefix + and a name part. + """ + components = name.split("/") + for i in range(1, len(components)): + prefix = "/".join(components[:i]) + name = "/".join(components[i:]) + if len(prefix.encode(encoding, errors)) <= LENGTH_PREFIX and \ + len(name.encode(encoding, errors)) <= LENGTH_NAME: + break + else: + raise ValueError("name is too long") + + return prefix, name + + @staticmethod + def _create_header(info, format, encoding, errors): + """Return a header block. info is a dictionary with file + information, format must be one of the *_FORMAT constants. + """ + has_device_fields = info.get("type") in (CHRTYPE, BLKTYPE) + if has_device_fields: + devmajor = itn(info.get("devmajor", 0), 8, format) + devminor = itn(info.get("devminor", 0), 8, format) + else: + devmajor = stn("", 8, encoding, errors) + devminor = stn("", 8, encoding, errors) + + # None values in metadata should cause ValueError. + # itn()/stn() do this for all fields except type. + filetype = info.get("type", REGTYPE) + if filetype is None: + raise ValueError("TarInfo.type must not be None") + + parts = [ + stn(info.get("name", ""), 100, encoding, errors), + itn(info.get("mode", 0) & 0o7777, 8, format), + itn(info.get("uid", 0), 8, format), + itn(info.get("gid", 0), 8, format), + itn(info.get("size", 0), 12, format), + itn(info.get("mtime", 0), 12, format), + b" ", # checksum field + filetype, + stn(info.get("linkname", ""), 100, encoding, errors), + info.get("magic", POSIX_MAGIC), + stn(info.get("uname", ""), 32, encoding, errors), + stn(info.get("gname", ""), 32, encoding, errors), + devmajor, + devminor, + stn(info.get("prefix", ""), 155, encoding, errors) + ] + + buf = struct.pack("%ds" % BLOCKSIZE, b"".join(parts)) + chksum = calc_chksums(buf[-BLOCKSIZE:])[0] + buf = buf[:-364] + bytes("%06o\0" % chksum, "ascii") + buf[-357:] + return buf + + @staticmethod + def _create_payload(payload): + """Return the string payload filled with zero bytes + up to the next 512 byte border. + """ + blocks, remainder = divmod(len(payload), BLOCKSIZE) + if remainder > 0: + payload += (BLOCKSIZE - remainder) * NUL + return payload + + @classmethod + def _create_gnu_long_header(cls, name, type, encoding, errors): + """Return a GNUTYPE_LONGNAME or GNUTYPE_LONGLINK sequence + for name. + """ + name = name.encode(encoding, errors) + NUL + + info = {} + info["name"] = "././@LongLink" + info["type"] = type + info["size"] = len(name) + info["magic"] = GNU_MAGIC + + # create extended header + name blocks. + return cls._create_header(info, USTAR_FORMAT, encoding, errors) + \ + cls._create_payload(name) + + @classmethod + def _create_pax_generic_header(cls, pax_headers, type, encoding): + """Return a POSIX.1-2008 extended or global header sequence + that contains a list of keyword, value pairs. The values + must be strings. + """ + # Check if one of the fields contains surrogate characters and thereby + # forces hdrcharset=BINARY, see _proc_pax() for more information. + binary = False + for keyword, value in pax_headers.items(): + try: + value.encode("utf-8", "strict") + except UnicodeEncodeError: + binary = True + break + + records = b"" + if binary: + # Put the hdrcharset field at the beginning of the header. + records += b"21 hdrcharset=BINARY\n" + + for keyword, value in pax_headers.items(): + keyword = keyword.encode("utf-8") + if binary: + # Try to restore the original byte representation of `value'. + # Needless to say, that the encoding must match the string. + value = value.encode(encoding, "surrogateescape") + else: + value = value.encode("utf-8") + + l = len(keyword) + len(value) + 3 # ' ' + '=' + '\n' + n = p = 0 + while True: + n = l + len(str(p)) + if n == p: + break + p = n + records += bytes(str(p), "ascii") + b" " + keyword + b"=" + value + b"\n" + + # We use a hardcoded "././@PaxHeader" name like star does + # instead of the one that POSIX recommends. + info = {} + info["name"] = "././@PaxHeader" + info["type"] = type + info["size"] = len(records) + info["magic"] = POSIX_MAGIC + + # Create pax header + record blocks. + return cls._create_header(info, USTAR_FORMAT, "ascii", "replace") + \ + cls._create_payload(records) + + @classmethod + def frombuf(cls, buf, encoding, errors): + """Construct a TarInfo object from a 512 byte bytes object. + """ + if len(buf) == 0: + raise EmptyHeaderError("empty header") + if len(buf) != BLOCKSIZE: + raise TruncatedHeaderError("truncated header") + if buf.count(NUL) == BLOCKSIZE: + raise EOFHeaderError("end of file header") + + chksum = nti(buf[148:156]) + if chksum not in calc_chksums(buf): + raise InvalidHeaderError("bad checksum") + + obj = cls() + obj.name = nts(buf[0:100], encoding, errors) + obj.mode = nti(buf[100:108]) + obj.uid = nti(buf[108:116]) + obj.gid = nti(buf[116:124]) + obj.size = nti(buf[124:136]) + obj.mtime = nti(buf[136:148]) + obj.chksum = chksum + obj.type = buf[156:157] + obj.linkname = nts(buf[157:257], encoding, errors) + obj.uname = nts(buf[265:297], encoding, errors) + obj.gname = nts(buf[297:329], encoding, errors) + obj.devmajor = nti(buf[329:337]) + obj.devminor = nti(buf[337:345]) + prefix = nts(buf[345:500], encoding, errors) + + # Old V7 tar format represents a directory as a regular + # file with a trailing slash. + if obj.type == AREGTYPE and obj.name.endswith("/"): + obj.type = DIRTYPE + + # The old GNU sparse format occupies some of the unused + # space in the buffer for up to 4 sparse structures. + # Save them for later processing in _proc_sparse(). + if obj.type == GNUTYPE_SPARSE: + pos = 386 + structs = [] + for i in range(4): + try: + offset = nti(buf[pos:pos + 12]) + numbytes = nti(buf[pos + 12:pos + 24]) + except ValueError: + break + structs.append((offset, numbytes)) + pos += 24 + isextended = bool(buf[482]) + origsize = nti(buf[483:495]) + obj._sparse_structs = (structs, isextended, origsize) + + # Remove redundant slashes from directories. + if obj.isdir(): + obj.name = obj.name.rstrip("/") + + # Reconstruct a ustar longname. + if prefix and obj.type not in GNU_TYPES: + obj.name = prefix + "/" + obj.name + return obj + + @classmethod + def fromtarfile(cls, tarfile): + """Return the next TarInfo object from TarFile object + tarfile. + """ + buf = tarfile.fileobj.read(BLOCKSIZE) + obj = cls.frombuf(buf, tarfile.encoding, tarfile.errors) + obj.offset = tarfile.fileobj.tell() - BLOCKSIZE + return obj._proc_member(tarfile) + + #-------------------------------------------------------------------------- + # The following are methods that are called depending on the type of a + # member. The entry point is _proc_member() which can be overridden in a + # subclass to add custom _proc_*() methods. A _proc_*() method MUST + # implement the following + # operations: + # 1. Set self.offset_data to the position where the data blocks begin, + # if there is data that follows. + # 2. Set tarfile.offset to the position where the next member's header will + # begin. + # 3. Return self or another valid TarInfo object. + def _proc_member(self, tarfile): + """Choose the right processing method depending on + the type and call it. + """ + if self.type in (GNUTYPE_LONGNAME, GNUTYPE_LONGLINK): + return self._proc_gnulong(tarfile) + elif self.type == GNUTYPE_SPARSE: + return self._proc_sparse(tarfile) + elif self.type in (XHDTYPE, XGLTYPE, SOLARIS_XHDTYPE): + return self._proc_pax(tarfile) + else: + return self._proc_builtin(tarfile) + + def _proc_builtin(self, tarfile): + """Process a builtin type or an unknown type which + will be treated as a regular file. + """ + self.offset_data = tarfile.fileobj.tell() + offset = self.offset_data + if self.isreg() or self.type not in SUPPORTED_TYPES: + # Skip the following data blocks. + offset += self._block(self.size) + tarfile.offset = offset + + # Patch the TarInfo object with saved global + # header information. + self._apply_pax_info(tarfile.pax_headers, tarfile.encoding, tarfile.errors) + + # Remove redundant slashes from directories. This is to be consistent + # with frombuf(). + if self.isdir(): + self.name = self.name.rstrip("/") + + return self + + def _proc_gnulong(self, tarfile): + """Process the blocks that hold a GNU longname + or longlink member. + """ + buf = tarfile.fileobj.read(self._block(self.size)) + + # Fetch the next header and process it. + try: + next = self.fromtarfile(tarfile) + except HeaderError as e: + raise SubsequentHeaderError(str(e)) from None + + # Patch the TarInfo object from the next header with + # the longname information. + next.offset = self.offset + if self.type == GNUTYPE_LONGNAME: + next.name = nts(buf, tarfile.encoding, tarfile.errors) + elif self.type == GNUTYPE_LONGLINK: + next.linkname = nts(buf, tarfile.encoding, tarfile.errors) + + # Remove redundant slashes from directories. This is to be consistent + # with frombuf(). + if next.isdir(): + next.name = next.name.removesuffix("/") + + return next + + def _proc_sparse(self, tarfile): + """Process a GNU sparse header plus extra headers. + """ + # We already collected some sparse structures in frombuf(). + structs, isextended, origsize = self._sparse_structs + del self._sparse_structs + + # Collect sparse structures from extended header blocks. + while isextended: + buf = tarfile.fileobj.read(BLOCKSIZE) + pos = 0 + for i in range(21): + try: + offset = nti(buf[pos:pos + 12]) + numbytes = nti(buf[pos + 12:pos + 24]) + except ValueError: + break + if offset and numbytes: + structs.append((offset, numbytes)) + pos += 24 + isextended = bool(buf[504]) + self.sparse = structs + + self.offset_data = tarfile.fileobj.tell() + tarfile.offset = self.offset_data + self._block(self.size) + self.size = origsize + return self + + def _proc_pax(self, tarfile): + """Process an extended or global header as described in + POSIX.1-2008. + """ + # Read the header information. + buf = tarfile.fileobj.read(self._block(self.size)) + + # A pax header stores supplemental information for either + # the following file (extended) or all following files + # (global). + if self.type == XGLTYPE: + pax_headers = tarfile.pax_headers + else: + pax_headers = tarfile.pax_headers.copy() + + # Check if the pax header contains a hdrcharset field. This tells us + # the encoding of the path, linkpath, uname and gname fields. Normally, + # these fields are UTF-8 encoded but since POSIX.1-2008 tar + # implementations are allowed to store them as raw binary strings if + # the translation to UTF-8 fails. + match = re.search(br"\d+ hdrcharset=([^\n]+)\n", buf) + if match is not None: + pax_headers["hdrcharset"] = match.group(1).decode("utf-8") + + # For the time being, we don't care about anything other than "BINARY". + # The only other value that is currently allowed by the standard is + # "ISO-IR 10646 2000 UTF-8" in other words UTF-8. + hdrcharset = pax_headers.get("hdrcharset") + if hdrcharset == "BINARY": + encoding = tarfile.encoding + else: + encoding = "utf-8" + + # Parse pax header information. A record looks like that: + # "%d %s=%s\n" % (length, keyword, value). length is the size + # of the complete record including the length field itself and + # the newline. keyword and value are both UTF-8 encoded strings. + regex = re.compile(br"(\d+) ([^=]+)=") + pos = 0 + while match := regex.match(buf, pos): + length, keyword = match.groups() + length = int(length) + if length == 0: + raise InvalidHeaderError("invalid header") + value = buf[match.end(2) + 1:match.start(1) + length - 1] + + # Normally, we could just use "utf-8" as the encoding and "strict" + # as the error handler, but we better not take the risk. For + # example, GNU tar <= 1.23 is known to store filenames it cannot + # translate to UTF-8 as raw strings (unfortunately without a + # hdrcharset=BINARY header). + # We first try the strict standard encoding, and if that fails we + # fall back on the user's encoding and error handler. + keyword = self._decode_pax_field(keyword, "utf-8", "utf-8", + tarfile.errors) + if keyword in PAX_NAME_FIELDS: + value = self._decode_pax_field(value, encoding, tarfile.encoding, + tarfile.errors) + else: + value = self._decode_pax_field(value, "utf-8", "utf-8", + tarfile.errors) + + pax_headers[keyword] = value + pos += length + + # Fetch the next header. + try: + next = self.fromtarfile(tarfile) + except HeaderError as e: + raise SubsequentHeaderError(str(e)) from None + + # Process GNU sparse information. + if "GNU.sparse.map" in pax_headers: + # GNU extended sparse format version 0.1. + self._proc_gnusparse_01(next, pax_headers) + + elif "GNU.sparse.size" in pax_headers: + # GNU extended sparse format version 0.0. + self._proc_gnusparse_00(next, pax_headers, buf) + + elif pax_headers.get("GNU.sparse.major") == "1" and pax_headers.get("GNU.sparse.minor") == "0": + # GNU extended sparse format version 1.0. + self._proc_gnusparse_10(next, pax_headers, tarfile) + + if self.type in (XHDTYPE, SOLARIS_XHDTYPE): + # Patch the TarInfo object with the extended header info. + next._apply_pax_info(pax_headers, tarfile.encoding, tarfile.errors) + next.offset = self.offset + + if "size" in pax_headers: + # If the extended header replaces the size field, + # we need to recalculate the offset where the next + # header starts. + offset = next.offset_data + if next.isreg() or next.type not in SUPPORTED_TYPES: + offset += next._block(next.size) + tarfile.offset = offset + + return next + + def _proc_gnusparse_00(self, next, pax_headers, buf): + """Process a GNU tar extended sparse header, version 0.0. + """ + offsets = [] + for match in re.finditer(br"\d+ GNU.sparse.offset=(\d+)\n", buf): + offsets.append(int(match.group(1))) + numbytes = [] + for match in re.finditer(br"\d+ GNU.sparse.numbytes=(\d+)\n", buf): + numbytes.append(int(match.group(1))) + next.sparse = list(zip(offsets, numbytes)) + + def _proc_gnusparse_01(self, next, pax_headers): + """Process a GNU tar extended sparse header, version 0.1. + """ + sparse = [int(x) for x in pax_headers["GNU.sparse.map"].split(",")] + next.sparse = list(zip(sparse[::2], sparse[1::2])) + + def _proc_gnusparse_10(self, next, pax_headers, tarfile): + """Process a GNU tar extended sparse header, version 1.0. + """ + fields = None + sparse = [] + buf = tarfile.fileobj.read(BLOCKSIZE) + fields, buf = buf.split(b"\n", 1) + fields = int(fields) + while len(sparse) < fields * 2: + if b"\n" not in buf: + buf += tarfile.fileobj.read(BLOCKSIZE) + number, buf = buf.split(b"\n", 1) + sparse.append(int(number)) + next.offset_data = tarfile.fileobj.tell() + next.sparse = list(zip(sparse[::2], sparse[1::2])) + + def _apply_pax_info(self, pax_headers, encoding, errors): + """Replace fields with supplemental information from a previous + pax extended or global header. + """ + for keyword, value in pax_headers.items(): + if keyword == "GNU.sparse.name": + setattr(self, "path", value) + elif keyword == "GNU.sparse.size": + setattr(self, "size", int(value)) + elif keyword == "GNU.sparse.realsize": + setattr(self, "size", int(value)) + elif keyword in PAX_FIELDS: + if keyword in PAX_NUMBER_FIELDS: + try: + value = PAX_NUMBER_FIELDS[keyword](value) + except ValueError: + value = 0 + if keyword == "path": + value = value.rstrip("/") + setattr(self, keyword, value) + + self.pax_headers = pax_headers.copy() + + def _decode_pax_field(self, value, encoding, fallback_encoding, fallback_errors): + """Decode a single field from a pax record. + """ + try: + return value.decode(encoding, "strict") + except UnicodeDecodeError: + return value.decode(fallback_encoding, fallback_errors) + + def _block(self, count): + """Round up a byte count by BLOCKSIZE and return it, + e.g. _block(834) => 1024. + """ + blocks, remainder = divmod(count, BLOCKSIZE) + if remainder: + blocks += 1 + return blocks * BLOCKSIZE + + def isreg(self): + 'Return True if the Tarinfo object is a regular file.' + return self.type in REGULAR_TYPES + + def isfile(self): + 'Return True if the Tarinfo object is a regular file.' + return self.isreg() + + def isdir(self): + 'Return True if it is a directory.' + return self.type == DIRTYPE + + def issym(self): + 'Return True if it is a symbolic link.' + return self.type == SYMTYPE + + def islnk(self): + 'Return True if it is a hard link.' + return self.type == LNKTYPE + + def ischr(self): + 'Return True if it is a character device.' + return self.type == CHRTYPE + + def isblk(self): + 'Return True if it is a block device.' + return self.type == BLKTYPE + + def isfifo(self): + 'Return True if it is a FIFO.' + return self.type == FIFOTYPE + + def issparse(self): + return self.sparse is not None + + def isdev(self): + 'Return True if it is one of character device, block device or FIFO.' + return self.type in (CHRTYPE, BLKTYPE, FIFOTYPE) +# class TarInfo + +class TarFile(object): + """The TarFile Class provides an interface to tar archives. + """ + + debug = 0 # May be set from 0 (no msgs) to 3 (all msgs) + + dereference = False # If true, add content of linked file to the + # tar file, else the link. + + ignore_zeros = False # If true, skips empty or invalid blocks and + # continues processing. + + errorlevel = 1 # If 0, fatal errors only appear in debug + # messages (if debug >= 0). If > 0, errors + # are passed to the caller as exceptions. + + format = DEFAULT_FORMAT # The format to use when creating an archive. + + encoding = ENCODING # Encoding for 8-bit character strings. + + errors = None # Error handler for unicode conversion. + + tarinfo = TarInfo # The default TarInfo class to use. + + fileobject = ExFileObject # The file-object for extractfile(). + + extraction_filter = None # The default filter for extraction. + + def __init__(self, name=None, mode="r", fileobj=None, format=None, + tarinfo=None, dereference=None, ignore_zeros=None, encoding=None, + errors="surrogateescape", pax_headers=None, debug=None, + errorlevel=None, copybufsize=None): + """Open an (uncompressed) tar archive `name'. `mode' is either 'r' to + read from an existing archive, 'a' to append data to an existing + file or 'w' to create a new file overwriting an existing one. `mode' + defaults to 'r'. + If `fileobj' is given, it is used for reading or writing data. If it + can be determined, `mode' is overridden by `fileobj's mode. + `fileobj' is not closed, when TarFile is closed. + """ + modes = {"r": "rb", "a": "r+b", "w": "wb", "x": "xb"} + if mode not in modes: + raise ValueError("mode must be 'r', 'a', 'w' or 'x'") + self.mode = mode + self._mode = modes[mode] + + if not fileobj: + if self.mode == "a" and not os.path.exists(name): + # Create nonexistent files in append mode. + self.mode = "w" + self._mode = "wb" + fileobj = bltn_open(name, self._mode) + self._extfileobj = False + else: + if (name is None and hasattr(fileobj, "name") and + isinstance(fileobj.name, (str, bytes))): + name = fileobj.name + if hasattr(fileobj, "mode"): + self._mode = fileobj.mode + self._extfileobj = True + self.name = os.path.abspath(name) if name else None + self.fileobj = fileobj + + # Init attributes. + if format is not None: + self.format = format + if tarinfo is not None: + self.tarinfo = tarinfo + if dereference is not None: + self.dereference = dereference + if ignore_zeros is not None: + self.ignore_zeros = ignore_zeros + if encoding is not None: + self.encoding = encoding + self.errors = errors + + if pax_headers is not None and self.format == PAX_FORMAT: + self.pax_headers = pax_headers + else: + self.pax_headers = {} + + if debug is not None: + self.debug = debug + if errorlevel is not None: + self.errorlevel = errorlevel + + # Init datastructures. + self.copybufsize = copybufsize + self.closed = False + self.members = [] # list of members as TarInfo objects + self._loaded = False # flag if all members have been read + self.offset = self.fileobj.tell() + # current position in the archive file + self.inodes = {} # dictionary caching the inodes of + # archive members already added + + try: + if self.mode == "r": + self.firstmember = None + self.firstmember = self.next() + + if self.mode == "a": + # Move to the end of the archive, + # before the first empty block. + while True: + self.fileobj.seek(self.offset) + try: + tarinfo = self.tarinfo.fromtarfile(self) + self.members.append(tarinfo) + except EOFHeaderError: + self.fileobj.seek(self.offset) + break + except HeaderError as e: + raise ReadError(str(e)) from None + + if self.mode in ("a", "w", "x"): + self._loaded = True + + if self.pax_headers: + buf = self.tarinfo.create_pax_global_header(self.pax_headers.copy()) + self.fileobj.write(buf) + self.offset += len(buf) + except: + if not self._extfileobj: + self.fileobj.close() + self.closed = True + raise + + #-------------------------------------------------------------------------- + # Below are the classmethods which act as alternate constructors to the + # TarFile class. The open() method is the only one that is needed for + # public use; it is the "super"-constructor and is able to select an + # adequate "sub"-constructor for a particular compression using the mapping + # from OPEN_METH. + # + # This concept allows one to subclass TarFile without losing the comfort of + # the super-constructor. A sub-constructor is registered and made available + # by adding it to the mapping in OPEN_METH. + + @classmethod + def open(cls, name=None, mode="r", fileobj=None, bufsize=RECORDSIZE, **kwargs): + r"""Open a tar archive for reading, writing or appending. Return + an appropriate TarFile class. + + mode: + 'r' or 'r:\*' open for reading with transparent compression + 'r:' open for reading exclusively uncompressed + 'r:gz' open for reading with gzip compression + 'r:bz2' open for reading with bzip2 compression + 'r:xz' open for reading with lzma compression + 'a' or 'a:' open for appending, creating the file if necessary + 'w' or 'w:' open for writing without compression + 'w:gz' open for writing with gzip compression + 'w:bz2' open for writing with bzip2 compression + 'w:xz' open for writing with lzma compression + + 'x' or 'x:' create a tarfile exclusively without compression, raise + an exception if the file is already created + 'x:gz' create a gzip compressed tarfile, raise an exception + if the file is already created + 'x:bz2' create a bzip2 compressed tarfile, raise an exception + if the file is already created + 'x:xz' create an lzma compressed tarfile, raise an exception + if the file is already created + + 'r|\*' open a stream of tar blocks with transparent compression + 'r|' open an uncompressed stream of tar blocks for reading + 'r|gz' open a gzip compressed stream of tar blocks + 'r|bz2' open a bzip2 compressed stream of tar blocks + 'r|xz' open an lzma compressed stream of tar blocks + 'w|' open an uncompressed stream for writing + 'w|gz' open a gzip compressed stream for writing + 'w|bz2' open a bzip2 compressed stream for writing + 'w|xz' open an lzma compressed stream for writing + """ + + if not name and not fileobj: + raise ValueError("nothing to open") + + if mode in ("r", "r:*"): + # Find out which *open() is appropriate for opening the file. + def not_compressed(comptype): + return cls.OPEN_METH[comptype] == 'taropen' + error_msgs = [] + for comptype in sorted(cls.OPEN_METH, key=not_compressed): + func = getattr(cls, cls.OPEN_METH[comptype]) + if fileobj is not None: + saved_pos = fileobj.tell() + try: + return func(name, "r", fileobj, **kwargs) + except (ReadError, CompressionError) as e: + error_msgs.append(f'- method {comptype}: {e!r}') + if fileobj is not None: + fileobj.seek(saved_pos) + continue + error_msgs_summary = '\n'.join(error_msgs) + raise ReadError(f"file could not be opened successfully:\n{error_msgs_summary}") + + elif ":" in mode: + filemode, comptype = mode.split(":", 1) + filemode = filemode or "r" + comptype = comptype or "tar" + + # Select the *open() function according to + # given compression. + if comptype in cls.OPEN_METH: + func = getattr(cls, cls.OPEN_METH[comptype]) + else: + raise CompressionError("unknown compression type %r" % comptype) + return func(name, filemode, fileobj, **kwargs) + + elif "|" in mode: + filemode, comptype = mode.split("|", 1) + filemode = filemode or "r" + comptype = comptype or "tar" + + if filemode not in ("r", "w"): + raise ValueError("mode must be 'r' or 'w'") + + compresslevel = kwargs.pop("compresslevel", 9) + stream = _Stream(name, filemode, comptype, fileobj, bufsize, + compresslevel) + try: + t = cls(name, filemode, stream, **kwargs) + except: + stream.close() + raise + t._extfileobj = False + return t + + elif mode in ("a", "w", "x"): + return cls.taropen(name, mode, fileobj, **kwargs) + + raise ValueError("undiscernible mode") + + @classmethod + def taropen(cls, name, mode="r", fileobj=None, **kwargs): + """Open uncompressed tar archive name for reading or writing. + """ + if mode not in ("r", "a", "w", "x"): + raise ValueError("mode must be 'r', 'a', 'w' or 'x'") + return cls(name, mode, fileobj, **kwargs) + + @classmethod + def gzopen(cls, name, mode="r", fileobj=None, compresslevel=9, **kwargs): + """Open gzip compressed tar archive name for reading or writing. + Appending is not allowed. + """ + if mode not in ("r", "w", "x"): + raise ValueError("mode must be 'r', 'w' or 'x'") + + try: + from gzip import GzipFile + except ImportError: + raise CompressionError("gzip module is not available") from None + + try: + fileobj = GzipFile(name, mode + "b", compresslevel, fileobj) + except OSError as e: + if fileobj is not None and mode == 'r': + raise ReadError("not a gzip file") from e + raise + + try: + t = cls.taropen(name, mode, fileobj, **kwargs) + except OSError as e: + fileobj.close() + if mode == 'r': + raise ReadError("not a gzip file") from e + raise + except: + fileobj.close() + raise + t._extfileobj = False + return t + + @classmethod + def bz2open(cls, name, mode="r", fileobj=None, compresslevel=9, **kwargs): + """Open bzip2 compressed tar archive name for reading or writing. + Appending is not allowed. + """ + if mode not in ("r", "w", "x"): + raise ValueError("mode must be 'r', 'w' or 'x'") + + try: + from bz2 import BZ2File + except ImportError: + raise CompressionError("bz2 module is not available") from None + + fileobj = BZ2File(fileobj or name, mode, compresslevel=compresslevel) + + try: + t = cls.taropen(name, mode, fileobj, **kwargs) + except (OSError, EOFError) as e: + fileobj.close() + if mode == 'r': + raise ReadError("not a bzip2 file") from e + raise + except: + fileobj.close() + raise + t._extfileobj = False + return t + + @classmethod + def xzopen(cls, name, mode="r", fileobj=None, preset=None, **kwargs): + """Open lzma compressed tar archive name for reading or writing. + Appending is not allowed. + """ + if mode not in ("r", "w", "x"): + raise ValueError("mode must be 'r', 'w' or 'x'") + + try: + from lzma import LZMAFile, LZMAError + except ImportError: + raise CompressionError("lzma module is not available") from None + + fileobj = LZMAFile(fileobj or name, mode, preset=preset) + + try: + t = cls.taropen(name, mode, fileobj, **kwargs) + except (LZMAError, EOFError) as e: + fileobj.close() + if mode == 'r': + raise ReadError("not an lzma file") from e + raise + except: + fileobj.close() + raise + t._extfileobj = False + return t + + # All *open() methods are registered here. + OPEN_METH = { + "tar": "taropen", # uncompressed tar + "gz": "gzopen", # gzip compressed tar + "bz2": "bz2open", # bzip2 compressed tar + "xz": "xzopen" # lzma compressed tar + } + + #-------------------------------------------------------------------------- + # The public methods which TarFile provides: + + def close(self): + """Close the TarFile. In write-mode, two finishing zero blocks are + appended to the archive. + """ + if self.closed: + return + + self.closed = True + try: + if self.mode in ("a", "w", "x"): + self.fileobj.write(NUL * (BLOCKSIZE * 2)) + self.offset += (BLOCKSIZE * 2) + # fill up the end with zero-blocks + # (like option -b20 for tar does) + blocks, remainder = divmod(self.offset, RECORDSIZE) + if remainder > 0: + self.fileobj.write(NUL * (RECORDSIZE - remainder)) + finally: + if not self._extfileobj: + self.fileobj.close() + + def getmember(self, name): + """Return a TarInfo object for member ``name``. If ``name`` can not be + found in the archive, KeyError is raised. If a member occurs more + than once in the archive, its last occurrence is assumed to be the + most up-to-date version. + """ + tarinfo = self._getmember(name.rstrip('/')) + if tarinfo is None: + raise KeyError("filename %r not found" % name) + return tarinfo + + def getmembers(self): + """Return the members of the archive as a list of TarInfo objects. The + list has the same order as the members in the archive. + """ + self._check() + if not self._loaded: # if we want to obtain a list of + self._load() # all members, we first have to + # scan the whole archive. + return self.members + + def getnames(self): + """Return the members of the archive as a list of their names. It has + the same order as the list returned by getmembers(). + """ + return [tarinfo.name for tarinfo in self.getmembers()] + + def gettarinfo(self, name=None, arcname=None, fileobj=None): + """Create a TarInfo object from the result of os.stat or equivalent + on an existing file. The file is either named by ``name``, or + specified as a file object ``fileobj`` with a file descriptor. If + given, ``arcname`` specifies an alternative name for the file in the + archive, otherwise, the name is taken from the 'name' attribute of + 'fileobj', or the 'name' argument. The name should be a text + string. + """ + self._check("awx") + + # When fileobj is given, replace name by + # fileobj's real name. + if fileobj is not None: + name = fileobj.name + + # Building the name of the member in the archive. + # Backward slashes are converted to forward slashes, + # Absolute paths are turned to relative paths. + if arcname is None: + arcname = name + drv, arcname = os.path.splitdrive(arcname) + arcname = arcname.replace(os.sep, "/") + arcname = arcname.lstrip("/") + + # Now, fill the TarInfo object with + # information specific for the file. + tarinfo = self.tarinfo() + tarinfo.tarfile = self # Not needed + + # Use os.stat or os.lstat, depending on if symlinks shall be resolved. + if fileobj is None: + if not self.dereference: + statres = os.lstat(name) + else: + statres = os.stat(name) + else: + statres = os.fstat(fileobj.fileno()) + linkname = "" + + stmd = statres.st_mode + if stat.S_ISREG(stmd): + inode = (statres.st_ino, statres.st_dev) + if not self.dereference and statres.st_nlink > 1 and \ + inode in self.inodes and arcname != self.inodes[inode]: + # Is it a hardlink to an already + # archived file? + type = LNKTYPE + linkname = self.inodes[inode] + else: + # The inode is added only if its valid. + # For win32 it is always 0. + type = REGTYPE + if inode[0]: + self.inodes[inode] = arcname + elif stat.S_ISDIR(stmd): + type = DIRTYPE + elif stat.S_ISFIFO(stmd): + type = FIFOTYPE + elif stat.S_ISLNK(stmd): + type = SYMTYPE + linkname = os.readlink(name) + elif stat.S_ISCHR(stmd): + type = CHRTYPE + elif stat.S_ISBLK(stmd): + type = BLKTYPE + else: + return None + + # Fill the TarInfo object with all + # information we can get. + tarinfo.name = arcname + tarinfo.mode = stmd + tarinfo.uid = statres.st_uid + tarinfo.gid = statres.st_gid + if type == REGTYPE: + tarinfo.size = statres.st_size + else: + tarinfo.size = 0 + tarinfo.mtime = statres.st_mtime + tarinfo.type = type + tarinfo.linkname = linkname + if pwd: + try: + tarinfo.uname = pwd.getpwuid(tarinfo.uid)[0] + except KeyError: + pass + if grp: + try: + tarinfo.gname = grp.getgrgid(tarinfo.gid)[0] + except KeyError: + pass + + if type in (CHRTYPE, BLKTYPE): + if hasattr(os, "major") and hasattr(os, "minor"): + tarinfo.devmajor = os.major(statres.st_rdev) + tarinfo.devminor = os.minor(statres.st_rdev) + return tarinfo + + def list(self, verbose=True, *, members=None): + """Print a table of contents to sys.stdout. If ``verbose`` is False, only + the names of the members are printed. If it is True, an `ls -l'-like + output is produced. ``members`` is optional and must be a subset of the + list returned by getmembers(). + """ + self._check() + + if members is None: + members = self + for tarinfo in members: + if verbose: + if tarinfo.mode is None: + _safe_print("??????????") + else: + _safe_print(stat.filemode(tarinfo.mode)) + _safe_print("%s/%s" % (tarinfo.uname or tarinfo.uid, + tarinfo.gname or tarinfo.gid)) + if tarinfo.ischr() or tarinfo.isblk(): + _safe_print("%10s" % + ("%d,%d" % (tarinfo.devmajor, tarinfo.devminor))) + else: + _safe_print("%10d" % tarinfo.size) + if tarinfo.mtime is None: + _safe_print("????-??-?? ??:??:??") + else: + _safe_print("%d-%02d-%02d %02d:%02d:%02d" \ + % time.localtime(tarinfo.mtime)[:6]) + + _safe_print(tarinfo.name + ("/" if tarinfo.isdir() else "")) + + if verbose: + if tarinfo.issym(): + _safe_print("-> " + tarinfo.linkname) + if tarinfo.islnk(): + _safe_print("link to " + tarinfo.linkname) + print() + + def add(self, name, arcname=None, recursive=True, *, filter=None): + """Add the file ``name`` to the archive. ``name`` may be any type of file + (directory, fifo, symbolic link, etc.). If given, ``arcname`` + specifies an alternative name for the file in the archive. + Directories are added recursively by default. This can be avoided by + setting ``recursive`` to False. ``filter`` is a function + that expects a TarInfo object argument and returns the changed + TarInfo object, if it returns None the TarInfo object will be + excluded from the archive. + """ + self._check("awx") + + if arcname is None: + arcname = name + + # Skip if somebody tries to archive the archive... + if self.name is not None and os.path.abspath(name) == self.name: + self._dbg(2, "tarfile: Skipped %r" % name) + return + + self._dbg(1, name) + + # Create a TarInfo object from the file. + tarinfo = self.gettarinfo(name, arcname) + + if tarinfo is None: + self._dbg(1, "tarfile: Unsupported type %r" % name) + return + + # Change or exclude the TarInfo object. + if filter is not None: + tarinfo = filter(tarinfo) + if tarinfo is None: + self._dbg(2, "tarfile: Excluded %r" % name) + return + + # Append the tar header and data to the archive. + if tarinfo.isreg(): + with bltn_open(name, "rb") as f: + self.addfile(tarinfo, f) + + elif tarinfo.isdir(): + self.addfile(tarinfo) + if recursive: + for f in sorted(os.listdir(name)): + self.add(os.path.join(name, f), os.path.join(arcname, f), + recursive, filter=filter) + + else: + self.addfile(tarinfo) + + def addfile(self, tarinfo, fileobj=None): + """Add the TarInfo object ``tarinfo`` to the archive. If ``fileobj`` is + given, it should be a binary file, and tarinfo.size bytes are read + from it and added to the archive. You can create TarInfo objects + directly, or by using gettarinfo(). + """ + self._check("awx") + + tarinfo = copy.copy(tarinfo) + + buf = tarinfo.tobuf(self.format, self.encoding, self.errors) + self.fileobj.write(buf) + self.offset += len(buf) + bufsize=self.copybufsize + # If there's data to follow, append it. + if fileobj is not None: + copyfileobj(fileobj, self.fileobj, tarinfo.size, bufsize=bufsize) + blocks, remainder = divmod(tarinfo.size, BLOCKSIZE) + if remainder > 0: + self.fileobj.write(NUL * (BLOCKSIZE - remainder)) + blocks += 1 + self.offset += blocks * BLOCKSIZE + + self.members.append(tarinfo) + + def _get_filter_function(self, filter): + if filter is None: + filter = self.extraction_filter + if filter is None: + warnings.warn( + 'Python 3.14 will, by default, filter extracted tar ' + + 'archives and reject files or modify their metadata. ' + + 'Use the filter argument to control this behavior.', + DeprecationWarning) + return fully_trusted_filter + if isinstance(filter, str): + raise TypeError( + 'String names are not supported for ' + + 'TarFile.extraction_filter. Use a function such as ' + + 'tarfile.data_filter directly.') + return filter + if callable(filter): + return filter + try: + return _NAMED_FILTERS[filter] + except KeyError: + raise ValueError(f"filter {filter!r} not found") from None + + def extractall(self, path=".", members=None, *, numeric_owner=False, + filter=None): + """Extract all members from the archive to the current working + directory and set owner, modification time and permissions on + directories afterwards. `path' specifies a different directory + to extract to. `members' is optional and must be a subset of the + list returned by getmembers(). If `numeric_owner` is True, only + the numbers for user/group names are used and not the names. + + The `filter` function will be called on each member just + before extraction. + It can return a changed TarInfo or None to skip the member. + String names of common filters are accepted. + """ + directories = [] + + filter_function = self._get_filter_function(filter) + if members is None: + members = self + + for member in members: + tarinfo = self._get_extract_tarinfo(member, filter_function, path) + if tarinfo is None: + continue + if tarinfo.isdir(): + # For directories, delay setting attributes until later, + # since permissions can interfere with extraction and + # extracting contents can reset mtime. + directories.append(tarinfo) + self._extract_one(tarinfo, path, set_attrs=not tarinfo.isdir(), + numeric_owner=numeric_owner) + + # Reverse sort directories. + directories.sort(key=lambda a: a.name, reverse=True) + + # Set correct owner, mtime and filemode on directories. + for tarinfo in directories: + dirpath = os.path.join(path, tarinfo.name) + try: + self.chown(tarinfo, dirpath, numeric_owner=numeric_owner) + self.utime(tarinfo, dirpath) + self.chmod(tarinfo, dirpath) + except ExtractError as e: + self._handle_nonfatal_error(e) + + def extract(self, member, path="", set_attrs=True, *, numeric_owner=False, + filter=None): + """Extract a member from the archive to the current working directory, + using its full name. Its file information is extracted as accurately + as possible. `member' may be a filename or a TarInfo object. You can + specify a different directory using `path'. File attributes (owner, + mtime, mode) are set unless `set_attrs' is False. If `numeric_owner` + is True, only the numbers for user/group names are used and not + the names. + + The `filter` function will be called before extraction. + It can return a changed TarInfo or None to skip the member. + String names of common filters are accepted. + """ + filter_function = self._get_filter_function(filter) + tarinfo = self._get_extract_tarinfo(member, filter_function, path) + if tarinfo is not None: + self._extract_one(tarinfo, path, set_attrs, numeric_owner) + + def _get_extract_tarinfo(self, member, filter_function, path): + """Get filtered TarInfo (or None) from member, which might be a str""" + if isinstance(member, str): + tarinfo = self.getmember(member) + else: + tarinfo = member + + unfiltered = tarinfo + try: + tarinfo = filter_function(tarinfo, path) + except (OSError, FilterError) as e: + self._handle_fatal_error(e) + except ExtractError as e: + self._handle_nonfatal_error(e) + if tarinfo is None: + self._dbg(2, "tarfile: Excluded %r" % unfiltered.name) + return None + # Prepare the link target for makelink(). + if tarinfo.islnk(): + tarinfo = copy.copy(tarinfo) + tarinfo._link_target = os.path.join(path, tarinfo.linkname) + return tarinfo + + def _extract_one(self, tarinfo, path, set_attrs, numeric_owner): + """Extract from filtered tarinfo to disk""" + self._check("r") + + try: + self._extract_member(tarinfo, os.path.join(path, tarinfo.name), + set_attrs=set_attrs, + numeric_owner=numeric_owner) + except OSError as e: + self._handle_fatal_error(e) + except ExtractError as e: + self._handle_nonfatal_error(e) + + def _handle_nonfatal_error(self, e): + """Handle non-fatal error (ExtractError) according to errorlevel""" + if self.errorlevel > 1: + raise + else: + self._dbg(1, "tarfile: %s" % e) + + def _handle_fatal_error(self, e): + """Handle "fatal" error according to self.errorlevel""" + if self.errorlevel > 0: + raise + elif isinstance(e, OSError): + if e.filename is None: + self._dbg(1, "tarfile: %s" % e.strerror) + else: + self._dbg(1, "tarfile: %s %r" % (e.strerror, e.filename)) + else: + self._dbg(1, "tarfile: %s %s" % (type(e).__name__, e)) + + def extractfile(self, member): + """Extract a member from the archive as a file object. ``member`` may be + a filename or a TarInfo object. If ``member`` is a regular file or + a link, an io.BufferedReader object is returned. For all other + existing members, None is returned. If ``member`` does not appear + in the archive, KeyError is raised. + """ + self._check("r") + + if isinstance(member, str): + tarinfo = self.getmember(member) + else: + tarinfo = member + + if tarinfo.isreg() or tarinfo.type not in SUPPORTED_TYPES: + # Members with unknown types are treated as regular files. + return self.fileobject(self, tarinfo) + + elif tarinfo.islnk() or tarinfo.issym(): + if isinstance(self.fileobj, _Stream): + # A small but ugly workaround for the case that someone tries + # to extract a (sym)link as a file-object from a non-seekable + # stream of tar blocks. + raise StreamError("cannot extract (sym)link as file object") + else: + # A (sym)link's file object is its target's file object. + return self.extractfile(self._find_link_target(tarinfo)) + else: + # If there's no data associated with the member (directory, chrdev, + # blkdev, etc.), return None instead of a file object. + return None + + def _extract_member(self, tarinfo, targetpath, set_attrs=True, + numeric_owner=False): + """Extract the TarInfo object tarinfo to a physical + file called targetpath. + """ + # Fetch the TarInfo object for the given name + # and build the destination pathname, replacing + # forward slashes to platform specific separators. + targetpath = targetpath.rstrip("/") + targetpath = targetpath.replace("/", os.sep) + + # Create all upper directories. + upperdirs = os.path.dirname(targetpath) + if upperdirs and not os.path.exists(upperdirs): + # Create directories that are not part of the archive with + # default permissions. + os.makedirs(upperdirs) + + if tarinfo.islnk() or tarinfo.issym(): + self._dbg(1, "%s -> %s" % (tarinfo.name, tarinfo.linkname)) + else: + self._dbg(1, tarinfo.name) + + if tarinfo.isreg(): + self.makefile(tarinfo, targetpath) + elif tarinfo.isdir(): + self.makedir(tarinfo, targetpath) + elif tarinfo.isfifo(): + self.makefifo(tarinfo, targetpath) + elif tarinfo.ischr() or tarinfo.isblk(): + self.makedev(tarinfo, targetpath) + elif tarinfo.islnk() or tarinfo.issym(): + self.makelink(tarinfo, targetpath) + elif tarinfo.type not in SUPPORTED_TYPES: + self.makeunknown(tarinfo, targetpath) + else: + self.makefile(tarinfo, targetpath) + + if set_attrs: + self.chown(tarinfo, targetpath, numeric_owner) + if not tarinfo.issym(): + self.chmod(tarinfo, targetpath) + self.utime(tarinfo, targetpath) + + #-------------------------------------------------------------------------- + # Below are the different file methods. They are called via + # _extract_member() when extract() is called. They can be replaced in a + # subclass to implement other functionality. + + def makedir(self, tarinfo, targetpath): + """Make a directory called targetpath. + """ + try: + if tarinfo.mode is None: + # Use the system's default mode + os.mkdir(targetpath) + else: + # Use a safe mode for the directory, the real mode is set + # later in _extract_member(). + os.mkdir(targetpath, 0o700) + except FileExistsError: + if not os.path.isdir(targetpath): + raise + + def makefile(self, tarinfo, targetpath): + """Make a file called targetpath. + """ + source = self.fileobj + source.seek(tarinfo.offset_data) + bufsize = self.copybufsize + with bltn_open(targetpath, "wb") as target: + if tarinfo.sparse is not None: + for offset, size in tarinfo.sparse: + target.seek(offset) + copyfileobj(source, target, size, ReadError, bufsize) + target.seek(tarinfo.size) + target.truncate() + else: + copyfileobj(source, target, tarinfo.size, ReadError, bufsize) + + def makeunknown(self, tarinfo, targetpath): + """Make a file from a TarInfo object with an unknown type + at targetpath. + """ + self.makefile(tarinfo, targetpath) + self._dbg(1, "tarfile: Unknown file type %r, " \ + "extracted as regular file." % tarinfo.type) + + def makefifo(self, tarinfo, targetpath): + """Make a fifo called targetpath. + """ + if hasattr(os, "mkfifo"): + os.mkfifo(targetpath) + else: + raise ExtractError("fifo not supported by system") + + def makedev(self, tarinfo, targetpath): + """Make a character or block device called targetpath. + """ + if not hasattr(os, "mknod") or not hasattr(os, "makedev"): + raise ExtractError("special devices not supported by system") + + mode = tarinfo.mode + if mode is None: + # Use mknod's default + mode = 0o600 + if tarinfo.isblk(): + mode |= stat.S_IFBLK + else: + mode |= stat.S_IFCHR + + os.mknod(targetpath, mode, + os.makedev(tarinfo.devmajor, tarinfo.devminor)) + + def makelink(self, tarinfo, targetpath): + """Make a (symbolic) link called targetpath. If it cannot be created + (platform limitation), we try to make a copy of the referenced file + instead of a link. + """ + try: + # For systems that support symbolic and hard links. + if tarinfo.issym(): + if os.path.lexists(targetpath): + # Avoid FileExistsError on following os.symlink. + os.unlink(targetpath) + os.symlink(tarinfo.linkname, targetpath) + else: + if os.path.exists(tarinfo._link_target): + os.link(tarinfo._link_target, targetpath) + else: + self._extract_member(self._find_link_target(tarinfo), + targetpath) + except symlink_exception: + try: + self._extract_member(self._find_link_target(tarinfo), + targetpath) + except KeyError: + raise ExtractError("unable to resolve link inside archive") from None + + def chown(self, tarinfo, targetpath, numeric_owner): + """Set owner of targetpath according to tarinfo. If numeric_owner + is True, use .gid/.uid instead of .gname/.uname. If numeric_owner + is False, fall back to .gid/.uid when the search based on name + fails. + """ + if hasattr(os, "geteuid") and os.geteuid() == 0: + # We have to be root to do so. + g = tarinfo.gid + u = tarinfo.uid + if not numeric_owner: + try: + if grp and tarinfo.gname: + g = grp.getgrnam(tarinfo.gname)[2] + except KeyError: + pass + try: + if pwd and tarinfo.uname: + u = pwd.getpwnam(tarinfo.uname)[2] + except KeyError: + pass + if g is None: + g = -1 + if u is None: + u = -1 + try: + if tarinfo.issym() and hasattr(os, "lchown"): + os.lchown(targetpath, u, g) + else: + os.chown(targetpath, u, g) + except OSError as e: + raise ExtractError("could not change owner") from e + + def chmod(self, tarinfo, targetpath): + """Set file permissions of targetpath according to tarinfo. + """ + if tarinfo.mode is None: + return + try: + os.chmod(targetpath, tarinfo.mode) + except OSError as e: + raise ExtractError("could not change mode") from e + + def utime(self, tarinfo, targetpath): + """Set modification time of targetpath according to tarinfo. + """ + mtime = tarinfo.mtime + if mtime is None: + return + if not hasattr(os, 'utime'): + return + try: + os.utime(targetpath, (mtime, mtime)) + except OSError as e: + raise ExtractError("could not change modification time") from e + + #-------------------------------------------------------------------------- + def next(self): + """Return the next member of the archive as a TarInfo object, when + TarFile is opened for reading. Return None if there is no more + available. + """ + self._check("ra") + if self.firstmember is not None: + m = self.firstmember + self.firstmember = None + return m + + # Advance the file pointer. + if self.offset != self.fileobj.tell(): + if self.offset == 0: + return None + self.fileobj.seek(self.offset - 1) + if not self.fileobj.read(1): + raise ReadError("unexpected end of data") + + # Read the next block. + tarinfo = None + while True: + try: + tarinfo = self.tarinfo.fromtarfile(self) + except EOFHeaderError as e: + if self.ignore_zeros: + self._dbg(2, "0x%X: %s" % (self.offset, e)) + self.offset += BLOCKSIZE + continue + except InvalidHeaderError as e: + if self.ignore_zeros: + self._dbg(2, "0x%X: %s" % (self.offset, e)) + self.offset += BLOCKSIZE + continue + elif self.offset == 0: + raise ReadError(str(e)) from None + except EmptyHeaderError: + if self.offset == 0: + raise ReadError("empty file") from None + except TruncatedHeaderError as e: + if self.offset == 0: + raise ReadError(str(e)) from None + except SubsequentHeaderError as e: + raise ReadError(str(e)) from None + except Exception as e: + try: + import zlib + if isinstance(e, zlib.error): + raise ReadError(f'zlib error: {e}') from None + else: + raise e + except ImportError: + raise e + break + + if tarinfo is not None: + self.members.append(tarinfo) + else: + self._loaded = True + + return tarinfo + + #-------------------------------------------------------------------------- + # Little helper methods: + + def _getmember(self, name, tarinfo=None, normalize=False): + """Find an archive member by name from bottom to top. + If tarinfo is given, it is used as the starting point. + """ + # Ensure that all members have been loaded. + members = self.getmembers() + + # Limit the member search list up to tarinfo. + skipping = False + if tarinfo is not None: + try: + index = members.index(tarinfo) + except ValueError: + # The given starting point might be a (modified) copy. + # We'll later skip members until we find an equivalent. + skipping = True + else: + # Happy fast path + members = members[:index] + + if normalize: + name = os.path.normpath(name) + + for member in reversed(members): + if skipping: + if tarinfo.offset == member.offset: + skipping = False + continue + if normalize: + member_name = os.path.normpath(member.name) + else: + member_name = member.name + + if name == member_name: + return member + + if skipping: + # Starting point was not found + raise ValueError(tarinfo) + + def _load(self): + """Read through the entire archive file and look for readable + members. + """ + while self.next() is not None: + pass + self._loaded = True + + def _check(self, mode=None): + """Check if TarFile is still open, and if the operation's mode + corresponds to TarFile's mode. + """ + if self.closed: + raise OSError("%s is closed" % self.__class__.__name__) + if mode is not None and self.mode not in mode: + raise OSError("bad operation for mode %r" % self.mode) + + def _find_link_target(self, tarinfo): + """Find the target member of a symlink or hardlink member in the + archive. + """ + if tarinfo.issym(): + # Always search the entire archive. + linkname = "/".join(filter(None, (os.path.dirname(tarinfo.name), tarinfo.linkname))) + limit = None + else: + # Search the archive before the link, because a hard link is + # just a reference to an already archived file. + linkname = tarinfo.linkname + limit = tarinfo + + member = self._getmember(linkname, tarinfo=limit, normalize=True) + if member is None: + raise KeyError("linkname %r not found" % linkname) + return member + + def __iter__(self): + """Provide an iterator object. + """ + if self._loaded: + yield from self.members + return + + # Yield items using TarFile's next() method. + # When all members have been read, set TarFile as _loaded. + index = 0 + # Fix for SF #1100429: Under rare circumstances it can + # happen that getmembers() is called during iteration, + # which will have already exhausted the next() method. + if self.firstmember is not None: + tarinfo = self.next() + index += 1 + yield tarinfo + + while True: + if index < len(self.members): + tarinfo = self.members[index] + elif not self._loaded: + tarinfo = self.next() + if not tarinfo: + self._loaded = True + return + else: + return + index += 1 + yield tarinfo + + def _dbg(self, level, msg): + """Write debugging output to sys.stderr. + """ + if level <= self.debug: + print(msg, file=sys.stderr) + + def __enter__(self): + self._check() + return self + + def __exit__(self, type, value, traceback): + if type is None: + self.close() + else: + # An exception occurred. We must not call close() because + # it would try to write end-of-archive blocks and padding. + if not self._extfileobj: + self.fileobj.close() + self.closed = True + +#-------------------- +# exported functions +#-------------------- + +def is_tarfile(name): + """Return True if name points to a tar archive that we + are able to handle, else return False. + + 'name' should be a string, file, or file-like object. + """ + try: + if hasattr(name, "read"): + pos = name.tell() + t = open(fileobj=name) + name.seek(pos) + else: + t = open(name) + t.close() + return True + except TarError: + return False + +open = TarFile.open + + +def main(): + import argparse + + description = 'A simple command-line interface for tarfile module.' + parser = argparse.ArgumentParser(description=description) + parser.add_argument('-v', '--verbose', action='store_true', default=False, + help='Verbose output') + parser.add_argument('--filter', metavar='', + choices=_NAMED_FILTERS, + help='Filter for extraction') + + group = parser.add_mutually_exclusive_group(required=True) + group.add_argument('-l', '--list', metavar='', + help='Show listing of a tarfile') + group.add_argument('-e', '--extract', nargs='+', + metavar=('', ''), + help='Extract tarfile into target dir') + group.add_argument('-c', '--create', nargs='+', + metavar=('', ''), + help='Create tarfile from sources') + group.add_argument('-t', '--test', metavar='', + help='Test if a tarfile is valid') + + args = parser.parse_args() + + if args.filter and args.extract is None: + parser.exit(1, '--filter is only valid for extraction\n') + + if args.test is not None: + src = args.test + if is_tarfile(src): + with open(src, 'r') as tar: + tar.getmembers() + print(tar.getmembers(), file=sys.stderr) + if args.verbose: + print('{!r} is a tar archive.'.format(src)) + else: + parser.exit(1, '{!r} is not a tar archive.\n'.format(src)) + + elif args.list is not None: + src = args.list + if is_tarfile(src): + with TarFile.open(src, 'r:*') as tf: + tf.list(verbose=args.verbose) + else: + parser.exit(1, '{!r} is not a tar archive.\n'.format(src)) + + elif args.extract is not None: + if len(args.extract) == 1: + src = args.extract[0] + curdir = os.curdir + elif len(args.extract) == 2: + src, curdir = args.extract + else: + parser.exit(1, parser.format_help()) + + if is_tarfile(src): + with TarFile.open(src, 'r:*') as tf: + tf.extractall(path=curdir, filter=args.filter) + if args.verbose: + if curdir == '.': + msg = '{!r} file is extracted.'.format(src) + else: + msg = ('{!r} file is extracted ' + 'into {!r} directory.').format(src, curdir) + print(msg) + else: + parser.exit(1, '{!r} is not a tar archive.\n'.format(src)) + + elif args.create is not None: + tar_name = args.create.pop(0) + _, ext = os.path.splitext(tar_name) + compressions = { + # gz + '.gz': 'gz', + '.tgz': 'gz', + # xz + '.xz': 'xz', + '.txz': 'xz', + # bz2 + '.bz2': 'bz2', + '.tbz': 'bz2', + '.tbz2': 'bz2', + '.tb2': 'bz2', + } + tar_mode = 'w:' + compressions[ext] if ext in compressions else 'w' + tar_files = args.create + + with TarFile.open(tar_name, tar_mode) as tf: + for file_name in tar_files: + tf.add(file_name) + + if args.verbose: + print('{!r} file created.'.format(tar_name)) + +if __name__ == '__main__': + main() diff --git a/lib/pkg_resources/_vendor/jaraco/context.py b/lib/pkg_resources/_vendor/jaraco/context.py index b0d1ef37..c42f6135 100644 --- a/lib/pkg_resources/_vendor/jaraco/context.py +++ b/lib/pkg_resources/_vendor/jaraco/context.py @@ -1,15 +1,26 @@ -import os -import subprocess +from __future__ import annotations + import contextlib import functools -import tempfile -import shutil import operator +import os +import shutil +import subprocess +import sys +import tempfile +import urllib.request import warnings +from typing import Iterator + + +if sys.version_info < (3, 12): + from pkg_resources.extern.backports import tarfile +else: + import tarfile @contextlib.contextmanager -def pushd(dir): +def pushd(dir: str | os.PathLike) -> Iterator[str | os.PathLike]: """ >>> tmp_path = getfixture('tmp_path') >>> with pushd(tmp_path): @@ -26,33 +37,88 @@ def pushd(dir): @contextlib.contextmanager -def tarball_context(url, target_dir=None, runner=None, pushd=pushd): +def tarball( + url, target_dir: str | os.PathLike | None = None +) -> Iterator[str | os.PathLike]: """ - Get a tarball, extract it, change to that directory, yield, then - clean up. - `runner` is the function to invoke commands. - `pushd` is a context manager for changing the directory. + Get a tarball, extract it, yield, then clean up. + + >>> import urllib.request + >>> url = getfixture('tarfile_served') + >>> target = getfixture('tmp_path') / 'out' + >>> tb = tarball(url, target_dir=target) + >>> import pathlib + >>> with tb as extracted: + ... contents = pathlib.Path(extracted, 'contents.txt').read_text(encoding='utf-8') + >>> assert not os.path.exists(extracted) """ if target_dir is None: target_dir = os.path.basename(url).replace('.tar.gz', '').replace('.tgz', '') - if runner is None: - runner = functools.partial(subprocess.check_call, shell=True) - else: - warnings.warn("runner parameter is deprecated", DeprecationWarning) # In the tar command, use --strip-components=1 to strip the first path and # then # use -C to cause the files to be extracted to {target_dir}. This ensures # that we always know where the files were extracted. - runner('mkdir {target_dir}'.format(**vars())) + os.mkdir(target_dir) try: - getter = 'wget {url} -O -' - extract = 'tar x{compression} --strip-components=1 -C {target_dir}' - cmd = ' | '.join((getter, extract)) - runner(cmd.format(compression=infer_compression(url), **vars())) - with pushd(target_dir): - yield target_dir + req = urllib.request.urlopen(url) + with tarfile.open(fileobj=req, mode='r|*') as tf: + tf.extractall(path=target_dir, filter=strip_first_component) + yield target_dir finally: - runner('rm -Rf {target_dir}'.format(**vars())) + shutil.rmtree(target_dir) + + +def strip_first_component( + member: tarfile.TarInfo, + path, +) -> tarfile.TarInfo: + _, member.name = member.name.split('/', 1) + return member + + +def _compose(*cmgrs): + """ + Compose any number of dependent context managers into a single one. + + The last, innermost context manager may take arbitrary arguments, but + each successive context manager should accept the result from the + previous as a single parameter. + + Like :func:`jaraco.functools.compose`, behavior works from right to + left, so the context manager should be indicated from outermost to + innermost. + + Example, to create a context manager to change to a temporary + directory: + + >>> temp_dir_as_cwd = _compose(pushd, temp_dir) + >>> with temp_dir_as_cwd() as dir: + ... assert os.path.samefile(os.getcwd(), dir) + """ + + def compose_two(inner, outer): + def composed(*args, **kwargs): + with inner(*args, **kwargs) as saved, outer(saved) as res: + yield res + + return contextlib.contextmanager(composed) + + return functools.reduce(compose_two, reversed(cmgrs)) + + +tarball_cwd = _compose(pushd, tarball) + + +@contextlib.contextmanager +def tarball_context(*args, **kwargs): + warnings.warn( + "tarball_context is deprecated. Use tarball or tarball_cwd instead.", + DeprecationWarning, + stacklevel=2, + ) + pushd_ctx = kwargs.pop('pushd', pushd) + with tarball(*args, **kwargs) as tball, pushd_ctx(tball) as dir: + yield dir def infer_compression(url): @@ -68,6 +134,11 @@ def infer_compression(url): >>> infer_compression('file.xz') 'J' """ + warnings.warn( + "infer_compression is deprecated with no replacement", + DeprecationWarning, + stacklevel=2, + ) # cheat and just assume it's the last two characters compression_indicator = url[-2:] mapping = dict(gz='z', bz='j', xz='J') @@ -84,7 +155,7 @@ def temp_dir(remover=shutil.rmtree): >>> import pathlib >>> with temp_dir() as the_dir: ... assert os.path.isdir(the_dir) - ... _ = pathlib.Path(the_dir).joinpath('somefile').write_text('contents') + ... _ = pathlib.Path(the_dir).joinpath('somefile').write_text('contents', encoding='utf-8') >>> assert not os.path.exists(the_dir) """ temp_dir = tempfile.mkdtemp() @@ -113,15 +184,23 @@ def repo_context(url, branch=None, quiet=True, dest_ctx=temp_dir): yield repo_dir -@contextlib.contextmanager def null(): """ A null context suitable to stand in for a meaningful context. >>> with null() as value: ... assert value is None + + This context is most useful when dealing with two or more code + branches but only some need a context. Wrap the others in a null + context to provide symmetry across all options. """ - yield + warnings.warn( + "null is deprecated. Use contextlib.nullcontext", + DeprecationWarning, + stacklevel=2, + ) + return contextlib.nullcontext() class ExceptionTrap: @@ -267,13 +346,7 @@ class on_interrupt(contextlib.ContextDecorator): ... on_interrupt('ignore')(do_interrupt)() """ - def __init__( - self, - action='error', - # py3.7 compat - # /, - code=1, - ): + def __init__(self, action='error', /, code=1): self.action = action self.code = code diff --git a/lib/pkg_resources/_vendor/jaraco/functools.py b/lib/pkg_resources/_vendor/jaraco/functools/__init__.py similarity index 79% rename from lib/pkg_resources/_vendor/jaraco/functools.py rename to lib/pkg_resources/_vendor/jaraco/functools/__init__.py index 67aeadc3..f523099c 100644 --- a/lib/pkg_resources/_vendor/jaraco/functools.py +++ b/lib/pkg_resources/_vendor/jaraco/functools/__init__.py @@ -1,18 +1,14 @@ +import collections.abc import functools -import time import inspect -import collections -import types import itertools +import operator +import time +import types import warnings import pkg_resources.extern.more_itertools -from typing import Callable, TypeVar - - -CallableT = TypeVar("CallableT", bound=Callable[..., object]) - def compose(*funcs): """ @@ -38,24 +34,6 @@ def compose(*funcs): return functools.reduce(compose_two, funcs) -def method_caller(method_name, *args, **kwargs): - """ - Return a function that will call a named method on the - target object with optional positional and keyword - arguments. - - >>> lower = method_caller('lower') - >>> lower('MyString') - 'mystring' - """ - - def call_method(target): - func = getattr(target, method_name) - return func(*args, **kwargs) - - return call_method - - def once(func): """ Decorate func so it's only ever called the first time. @@ -98,12 +76,7 @@ def once(func): return wrapper -def method_cache( - method: CallableT, - cache_wrapper: Callable[ - [CallableT], CallableT - ] = functools.lru_cache(), # type: ignore[assignment] -) -> CallableT: +def method_cache(method, cache_wrapper=functools.lru_cache()): """ Wrap lru_cache to support storing the cache data in the object instances. @@ -171,21 +144,17 @@ def method_cache( for another implementation and additional justification. """ - def wrapper(self: object, *args: object, **kwargs: object) -> object: + def wrapper(self, *args, **kwargs): # it's the first call, replace the method with a cached, bound method - bound_method: CallableT = types.MethodType( # type: ignore[assignment] - method, self - ) + bound_method = types.MethodType(method, self) cached_method = cache_wrapper(bound_method) setattr(self, method.__name__, cached_method) return cached_method(*args, **kwargs) # Support cache clear even before cache has been created. - wrapper.cache_clear = lambda: None # type: ignore[attr-defined] + wrapper.cache_clear = lambda: None - return ( # type: ignore[return-value] - _special_method_cache(method, cache_wrapper) or wrapper - ) + return _special_method_cache(method, cache_wrapper) or wrapper def _special_method_cache(method, cache_wrapper): @@ -201,12 +170,13 @@ def _special_method_cache(method, cache_wrapper): """ name = method.__name__ special_names = '__getattr__', '__getitem__' + if name not in special_names: - return + return None wrapper_name = '__cached' + name - def proxy(self, *args, **kwargs): + def proxy(self, /, *args, **kwargs): if wrapper_name not in vars(self): bound = types.MethodType(method, self) cache = cache_wrapper(bound) @@ -243,7 +213,7 @@ def result_invoke(action): r""" Decorate a function with an action function that is invoked on the results returned from the decorated - function (for its side-effect), then return the original + function (for its side effect), then return the original result. >>> @result_invoke(print) @@ -267,7 +237,7 @@ def result_invoke(action): return wrap -def invoke(f, *args, **kwargs): +def invoke(f, /, *args, **kwargs): """ Call a function for its side effect after initialization. @@ -302,25 +272,15 @@ def invoke(f, *args, **kwargs): Use functools.partial to pass parameters to the initial call >>> @functools.partial(invoke, name='bingo') - ... def func(name): print("called with", name) + ... def func(name): print('called with', name) called with bingo """ f(*args, **kwargs) return f -def call_aside(*args, **kwargs): - """ - Deprecated name for invoke. - """ - warnings.warn("call_aside is deprecated, use invoke", DeprecationWarning) - return invoke(*args, **kwargs) - - class Throttler: - """ - Rate-limit a function (or other callable) - """ + """Rate-limit a function (or other callable).""" def __init__(self, func, max_rate=float('Inf')): if isinstance(func, Throttler): @@ -337,20 +297,20 @@ class Throttler: return self.func(*args, **kwargs) def _wait(self): - "ensure at least 1/max_rate seconds from last call" + """Ensure at least 1/max_rate seconds from last call.""" elapsed = time.time() - self.last_called must_wait = 1 / self.max_rate - elapsed time.sleep(max(0, must_wait)) self.last_called = time.time() - def __get__(self, obj, type=None): + def __get__(self, obj, owner=None): return first_invoke(self._wait, functools.partial(self.func, obj)) def first_invoke(func1, func2): """ Return a function that when invoked will invoke func1 without - any parameters (for its side-effect) and then invoke func2 + any parameters (for its side effect) and then invoke func2 with whatever parameters were passed, returning its result. """ @@ -361,6 +321,17 @@ def first_invoke(func1, func2): return wrapper +method_caller = first_invoke( + lambda: warnings.warn( + '`jaraco.functools.method_caller` is deprecated, ' + 'use `operator.methodcaller` instead', + DeprecationWarning, + stacklevel=3, + ), + operator.methodcaller, +) + + def retry_call(func, cleanup=lambda: None, retries=0, trap=()): """ Given a callable func, trap the indicated exceptions @@ -369,7 +340,7 @@ def retry_call(func, cleanup=lambda: None, retries=0, trap=()): to propagate. """ attempts = itertools.count() if retries == float('inf') else range(retries) - for attempt in attempts: + for _ in attempts: try: return func() except trap: @@ -406,7 +377,7 @@ def retry(*r_args, **r_kwargs): def print_yielded(func): """ - Convert a generator into a function that prints all yielded elements + Convert a generator into a function that prints all yielded elements. >>> @print_yielded ... def x(): @@ -422,7 +393,7 @@ def print_yielded(func): def pass_none(func): """ - Wrap func so it's not called if its first param is None + Wrap func so it's not called if its first param is None. >>> print_text = pass_none(print) >>> print_text('text') @@ -431,9 +402,10 @@ def pass_none(func): """ @functools.wraps(func) - def wrapper(param, *args, **kwargs): + def wrapper(param, /, *args, **kwargs): if param is not None: return func(param, *args, **kwargs) + return None return wrapper @@ -507,7 +479,7 @@ def save_method_args(method): args_and_kwargs = collections.namedtuple('args_and_kwargs', 'args kwargs') @functools.wraps(method) - def wrapper(self, *args, **kwargs): + def wrapper(self, /, *args, **kwargs): attr_name = '_saved_' + method.__name__ attr = args_and_kwargs(args, kwargs) setattr(self, attr_name, attr) @@ -554,3 +526,108 @@ def except_(*exceptions, replace=None, use=None): return wrapper return decorate + + +def identity(x): + """ + Return the argument. + + >>> o = object() + >>> identity(o) is o + True + """ + return x + + +def bypass_when(check, *, _op=identity): + """ + Decorate a function to return its parameter when ``check``. + + >>> bypassed = [] # False + + >>> @bypass_when(bypassed) + ... def double(x): + ... return x * 2 + >>> double(2) + 4 + >>> bypassed[:] = [object()] # True + >>> double(2) + 2 + """ + + def decorate(func): + @functools.wraps(func) + def wrapper(param, /): + return param if _op(check) else func(param) + + return wrapper + + return decorate + + +def bypass_unless(check): + """ + Decorate a function to return its parameter unless ``check``. + + >>> enabled = [object()] # True + + >>> @bypass_unless(enabled) + ... def double(x): + ... return x * 2 + >>> double(2) + 4 + >>> del enabled[:] # False + >>> double(2) + 2 + """ + return bypass_when(check, _op=operator.not_) + + +@functools.singledispatch +def _splat_inner(args, func): + """Splat args to func.""" + return func(*args) + + +@_splat_inner.register +def _(args: collections.abc.Mapping, func): + """Splat kargs to func as kwargs.""" + return func(**args) + + +def splat(func): + """ + Wrap func to expect its parameters to be passed positionally in a tuple. + + Has a similar effect to that of ``itertools.starmap`` over + simple ``map``. + + >>> pairs = [(-1, 1), (0, 2)] + >>> pkg_resources.extern.more_itertools.consume(itertools.starmap(print, pairs)) + -1 1 + 0 2 + >>> pkg_resources.extern.more_itertools.consume(map(splat(print), pairs)) + -1 1 + 0 2 + + The approach generalizes to other iterators that don't have a "star" + equivalent, such as a "starfilter". + + >>> list(filter(splat(operator.add), pairs)) + [(0, 2)] + + Splat also accepts a mapping argument. + + >>> def is_nice(msg, code): + ... return "smile" in msg or code == 0 + >>> msgs = [ + ... dict(msg='smile!', code=20), + ... dict(msg='error :(', code=1), + ... dict(msg='unknown', code=0), + ... ] + >>> for msg in filter(splat(is_nice), msgs): + ... print(msg) + {'msg': 'smile!', 'code': 20} + {'msg': 'unknown', 'code': 0} + """ + return functools.wraps(func)(functools.partial(_splat_inner, func=func)) diff --git a/lib/pkg_resources/_vendor/jaraco/functools/__init__.pyi b/lib/pkg_resources/_vendor/jaraco/functools/__init__.pyi new file mode 100644 index 00000000..c2b9ab17 --- /dev/null +++ b/lib/pkg_resources/_vendor/jaraco/functools/__init__.pyi @@ -0,0 +1,128 @@ +from collections.abc import Callable, Hashable, Iterator +from functools import partial +from operator import methodcaller +import sys +from typing import ( + Any, + Generic, + Protocol, + TypeVar, + overload, +) + +if sys.version_info >= (3, 10): + from typing import Concatenate, ParamSpec +else: + from typing_extensions import Concatenate, ParamSpec + +_P = ParamSpec('_P') +_R = TypeVar('_R') +_T = TypeVar('_T') +_R1 = TypeVar('_R1') +_R2 = TypeVar('_R2') +_V = TypeVar('_V') +_S = TypeVar('_S') +_R_co = TypeVar('_R_co', covariant=True) + +class _OnceCallable(Protocol[_P, _R]): + saved_result: _R + reset: Callable[[], None] + def __call__(self, *args: _P.args, **kwargs: _P.kwargs) -> _R: ... + +class _ProxyMethodCacheWrapper(Protocol[_R_co]): + cache_clear: Callable[[], None] + def __call__(self, *args: Hashable, **kwargs: Hashable) -> _R_co: ... + +class _MethodCacheWrapper(Protocol[_R_co]): + def cache_clear(self) -> None: ... + def __call__(self, *args: Hashable, **kwargs: Hashable) -> _R_co: ... + +# `compose()` overloads below will cover most use cases. + +@overload +def compose( + __func1: Callable[[_R], _T], + __func2: Callable[_P, _R], + /, +) -> Callable[_P, _T]: ... +@overload +def compose( + __func1: Callable[[_R], _T], + __func2: Callable[[_R1], _R], + __func3: Callable[_P, _R1], + /, +) -> Callable[_P, _T]: ... +@overload +def compose( + __func1: Callable[[_R], _T], + __func2: Callable[[_R2], _R], + __func3: Callable[[_R1], _R2], + __func4: Callable[_P, _R1], + /, +) -> Callable[_P, _T]: ... +def once(func: Callable[_P, _R]) -> _OnceCallable[_P, _R]: ... +def method_cache( + method: Callable[..., _R], + cache_wrapper: Callable[[Callable[..., _R]], _MethodCacheWrapper[_R]] = ..., +) -> _MethodCacheWrapper[_R] | _ProxyMethodCacheWrapper[_R]: ... +def apply( + transform: Callable[[_R], _T] +) -> Callable[[Callable[_P, _R]], Callable[_P, _T]]: ... +def result_invoke( + action: Callable[[_R], Any] +) -> Callable[[Callable[_P, _R]], Callable[_P, _R]]: ... +def invoke( + f: Callable[_P, _R], /, *args: _P.args, **kwargs: _P.kwargs +) -> Callable[_P, _R]: ... +def call_aside( + f: Callable[_P, _R], *args: _P.args, **kwargs: _P.kwargs +) -> Callable[_P, _R]: ... + +class Throttler(Generic[_R]): + last_called: float + func: Callable[..., _R] + max_rate: float + def __init__( + self, func: Callable[..., _R] | Throttler[_R], max_rate: float = ... + ) -> None: ... + def reset(self) -> None: ... + def __call__(self, *args: Any, **kwargs: Any) -> _R: ... + def __get__(self, obj: Any, owner: type[Any] | None = ...) -> Callable[..., _R]: ... + +def first_invoke( + func1: Callable[..., Any], func2: Callable[_P, _R] +) -> Callable[_P, _R]: ... + +method_caller: Callable[..., methodcaller] + +def retry_call( + func: Callable[..., _R], + cleanup: Callable[..., None] = ..., + retries: int | float = ..., + trap: type[BaseException] | tuple[type[BaseException], ...] = ..., +) -> _R: ... +def retry( + cleanup: Callable[..., None] = ..., + retries: int | float = ..., + trap: type[BaseException] | tuple[type[BaseException], ...] = ..., +) -> Callable[[Callable[..., _R]], Callable[..., _R]]: ... +def print_yielded(func: Callable[_P, Iterator[Any]]) -> Callable[_P, None]: ... +def pass_none( + func: Callable[Concatenate[_T, _P], _R] +) -> Callable[Concatenate[_T, _P], _R]: ... +def assign_params( + func: Callable[..., _R], namespace: dict[str, Any] +) -> partial[_R]: ... +def save_method_args( + method: Callable[Concatenate[_S, _P], _R] +) -> Callable[Concatenate[_S, _P], _R]: ... +def except_( + *exceptions: type[BaseException], replace: Any = ..., use: Any = ... +) -> Callable[[Callable[_P, Any]], Callable[_P, Any]]: ... +def identity(x: _T) -> _T: ... +def bypass_when( + check: _V, *, _op: Callable[[_V], Any] = ... +) -> Callable[[Callable[[_T], _R]], Callable[[_T], _T | _R]]: ... +def bypass_unless( + check: Any, +) -> Callable[[Callable[[_T], _R]], Callable[[_T], _T | _R]]: ... diff --git a/lib/pkg_resources/_vendor/jaraco/functools/py.typed b/lib/pkg_resources/_vendor/jaraco/functools/py.typed new file mode 100644 index 00000000..e69de29b diff --git a/lib/pkg_resources/_vendor/more_itertools/__init__.py b/lib/pkg_resources/_vendor/more_itertools/__init__.py index 66443971..aff94a9a 100644 --- a/lib/pkg_resources/_vendor/more_itertools/__init__.py +++ b/lib/pkg_resources/_vendor/more_itertools/__init__.py @@ -3,4 +3,4 @@ from .more import * # noqa from .recipes import * # noqa -__version__ = '9.1.0' +__version__ = '10.2.0' diff --git a/lib/pkg_resources/_vendor/more_itertools/more.py b/lib/pkg_resources/_vendor/more_itertools/more.py index e0e2d3de..d0957681 100644 --- a/lib/pkg_resources/_vendor/more_itertools/more.py +++ b/lib/pkg_resources/_vendor/more_itertools/more.py @@ -2,7 +2,7 @@ import warnings from collections import Counter, defaultdict, deque, abc from collections.abc import Sequence -from functools import partial, reduce, wraps +from functools import cached_property, partial, reduce, wraps from heapq import heapify, heapreplace, heappop from itertools import ( chain, @@ -17,8 +17,9 @@ from itertools import ( takewhile, tee, zip_longest, + product, ) -from math import exp, factorial, floor, log +from math import exp, factorial, floor, log, perm, comb from queue import Empty, Queue from random import random, randrange, uniform from operator import itemgetter, mul, sub, gt, lt, ge, le @@ -36,6 +37,7 @@ from .recipes import ( take, unique_everseen, all_equal, + batched, ) __all__ = [ @@ -53,6 +55,7 @@ __all__ = [ 'circular_shifts', 'collapse', 'combination_index', + 'combination_with_replacement_index', 'consecutive_groups', 'constrained_batches', 'consumer', @@ -65,8 +68,10 @@ __all__ = [ 'divide', 'duplicates_everseen', 'duplicates_justseen', + 'classify_unique', 'exactly_n', 'filter_except', + 'filter_map', 'first', 'gray_product', 'groupby_transform', @@ -80,6 +85,7 @@ __all__ = [ 'is_sorted', 'islice_extended', 'iterate', + 'iter_suppress', 'last', 'locate', 'longest_common_prefix', @@ -93,10 +99,13 @@ __all__ = [ 'nth_or_last', 'nth_permutation', 'nth_product', + 'nth_combination_with_replacement', 'numeric_range', 'one', 'only', + 'outer_product', 'padded', + 'partial_product', 'partitions', 'peekable', 'permutation_index', @@ -125,6 +134,7 @@ __all__ = [ 'strictly_n', 'substrings', 'substrings_indexes', + 'takewhile_inclusive', 'time_limited', 'unique_in_window', 'unique_to_each', @@ -191,15 +201,14 @@ def first(iterable, default=_marker): ``next(iter(iterable), default)``. """ - try: - return next(iter(iterable)) - except StopIteration as e: - if default is _marker: - raise ValueError( - 'first() was called on an empty iterable, and no ' - 'default value was provided.' - ) from e - return default + for item in iterable: + return item + if default is _marker: + raise ValueError( + 'first() was called on an empty iterable, and no ' + 'default value was provided.' + ) + return default def last(iterable, default=_marker): @@ -472,7 +481,10 @@ def iterate(func, start): """ while True: yield start - start = func(start) + try: + start = func(start) + except StopIteration: + break def with_iter(context_manager): @@ -572,6 +584,9 @@ def strictly_n(iterable, n, too_short=None, too_long=None): >>> list(strictly_n(iterable, n)) ['a', 'b', 'c', 'd'] + Note that the returned iterable must be consumed in order for the check to + be made. + By default, *too_short* and *too_long* are functions that raise ``ValueError``. @@ -909,7 +924,7 @@ def substrings_indexes(seq, reverse=False): class bucket: - """Wrap *iterable* and return an object that buckets it iterable into + """Wrap *iterable* and return an object that buckets the iterable into child iterables based on a *key* function. >>> iterable = ['a1', 'b1', 'c1', 'a2', 'b2', 'c2', 'b3'] @@ -2069,7 +2084,6 @@ class numeric_range(abc.Sequence, abc.Hashable): if self._step == self._zero: raise ValueError('numeric_range() arg 3 must not be zero') self._growing = self._step > self._zero - self._init_len() def __bool__(self): if self._growing: @@ -2145,7 +2159,8 @@ class numeric_range(abc.Sequence, abc.Hashable): def __len__(self): return self._len - def _init_len(self): + @cached_property + def _len(self): if self._growing: start = self._start stop = self._stop @@ -2156,10 +2171,10 @@ class numeric_range(abc.Sequence, abc.Hashable): step = -self._step distance = stop - start if distance <= self._zero: - self._len = 0 + return 0 else: # distance > 0 and step > 0: regular euclidean division q, r = divmod(distance, step) - self._len = int(q) + int(r != self._zero) + return int(q) + int(r != self._zero) def __reduce__(self): return numeric_range, (self._start, self._stop, self._step) @@ -2699,6 +2714,9 @@ class seekable: >>> it.seek(10) >>> next(it) '10' + >>> it.relative_seek(-2) # Seeking relative to the current position + >>> next(it) + '9' >>> it.seek(20) # Seeking past the end of the source isn't a problem >>> list(it) [] @@ -2812,6 +2830,10 @@ class seekable: if remainder > 0: consume(self, remainder) + def relative_seek(self, count): + index = len(self._cache) + self.seek(max(index + count, 0)) + class run_length: """ @@ -3205,6 +3227,8 @@ class time_limited: stops if the time elapsed is greater than *limit_seconds*. If your time limit is 1 second, but it takes 2 seconds to generate the first item from the iterable, the function will run for 2 seconds and not yield anything. + As a special case, when *limit_seconds* is zero, the iterator never + returns anything. """ @@ -3220,6 +3244,9 @@ class time_limited: return self def __next__(self): + if self.limit_seconds == 0: + self.timed_out = True + raise StopIteration item = next(self._iterable) if monotonic() - self._start_time > self.limit_seconds: self.timed_out = True @@ -3339,7 +3366,7 @@ def iequals(*iterables): >>> iequals("abc", "acb") False - Not to be confused with :func:`all_equals`, which checks whether all + Not to be confused with :func:`all_equal`, which checks whether all elements of iterable are equal to each other. """ @@ -3835,7 +3862,7 @@ def nth_permutation(iterable, r, index): elif not 0 <= r < n: raise ValueError else: - c = factorial(n) // factorial(n - r) + c = perm(n, r) if index < 0: index += c @@ -3858,6 +3885,52 @@ def nth_permutation(iterable, r, index): return tuple(map(pool.pop, result)) +def nth_combination_with_replacement(iterable, r, index): + """Equivalent to + ``list(combinations_with_replacement(iterable, r))[index]``. + + + The subsequences with repetition of *iterable* that are of length *r* can + be ordered lexicographically. :func:`nth_combination_with_replacement` + computes the subsequence at sort position *index* directly, without + computing the previous subsequences with replacement. + + >>> nth_combination_with_replacement(range(5), 3, 5) + (0, 1, 1) + + ``ValueError`` will be raised If *r* is negative or greater than the length + of *iterable*. + ``IndexError`` will be raised if the given *index* is invalid. + """ + pool = tuple(iterable) + n = len(pool) + if (r < 0) or (r > n): + raise ValueError + + c = comb(n + r - 1, r) + + if index < 0: + index += c + + if (index < 0) or (index >= c): + raise IndexError + + result = [] + i = 0 + while r: + r -= 1 + while n >= 0: + num_combs = comb(n + r - 1, r) + if index < num_combs: + break + n -= 1 + i += 1 + index -= num_combs + result.append(pool[i]) + + return tuple(result) + + def value_chain(*args): """Yield all arguments passed to the function in the same order in which they were passed. If an argument itself is iterable then iterate over its @@ -3949,9 +4022,66 @@ def combination_index(element, iterable): for i, j in enumerate(reversed(indexes), start=1): j = n - j if i <= j: - index += factorial(j) // (factorial(i) * factorial(j - i)) + index += comb(j, i) - return factorial(n + 1) // (factorial(k + 1) * factorial(n - k)) - index + return comb(n + 1, k + 1) - index + + +def combination_with_replacement_index(element, iterable): + """Equivalent to + ``list(combinations_with_replacement(iterable, r)).index(element)`` + + The subsequences with repetition of *iterable* that are of length *r* can + be ordered lexicographically. :func:`combination_with_replacement_index` + computes the index of the first *element*, without computing the previous + combinations with replacement. + + >>> combination_with_replacement_index('adf', 'abcdefg') + 20 + + ``ValueError`` will be raised if the given *element* isn't one of the + combinations with replacement of *iterable*. + """ + element = tuple(element) + l = len(element) + element = enumerate(element) + + k, y = next(element, (None, None)) + if k is None: + return 0 + + indexes = [] + pool = tuple(iterable) + for n, x in enumerate(pool): + while x == y: + indexes.append(n) + tmp, y = next(element, (None, None)) + if tmp is None: + break + else: + k = tmp + if y is None: + break + else: + raise ValueError( + 'element is not a combination with replacement of iterable' + ) + + n = len(pool) + occupations = [0] * n + for p in indexes: + occupations[p] += 1 + + index = 0 + cumulative_sum = 0 + for k in range(1, n): + cumulative_sum += occupations[k - 1] + j = l + n - 1 - k - cumulative_sum + i = n - k + if i <= j: + index += comb(j, i) + + return index def permutation_index(element, iterable): @@ -4056,26 +4186,20 @@ def _chunked_even_finite(iterable, N, n): num_full = N - partial_size * num_lists num_partial = num_lists - num_full - buffer = [] - iterator = iter(iterable) - # Yield num_full lists of full_size - for x in iterator: - buffer.append(x) - if len(buffer) == full_size: - yield buffer - buffer = [] - num_full -= 1 - if num_full <= 0: - break + partial_start_idx = num_full * full_size + if full_size > 0: + for i in range(0, partial_start_idx, full_size): + yield list(islice(iterable, i, i + full_size)) # Yield num_partial lists of partial_size - for x in iterator: - buffer.append(x) - if len(buffer) == partial_size: - yield buffer - buffer = [] - num_partial -= 1 + if partial_size > 0: + for i in range( + partial_start_idx, + partial_start_idx + (num_partial * partial_size), + partial_size, + ): + yield list(islice(iterable, i, i + partial_size)) def zip_broadcast(*objects, scalar_types=(str, bytes), strict=False): @@ -4114,30 +4238,23 @@ def zip_broadcast(*objects, scalar_types=(str, bytes), strict=False): if not size: return + new_item = [None] * size iterables, iterable_positions = [], [] - scalars, scalar_positions = [], [] for i, obj in enumerate(objects): if is_scalar(obj): - scalars.append(obj) - scalar_positions.append(i) + new_item[i] = obj else: iterables.append(iter(obj)) iterable_positions.append(i) - if len(scalars) == size: + if not iterables: yield tuple(objects) return zipper = _zip_equal if strict else zip for item in zipper(*iterables): - new_item = [None] * size - - for i, elem in zip(iterable_positions, item): - new_item[i] = elem - - for i, elem in zip(scalar_positions, scalars): - new_item[i] = elem - + for i, new_item[i] in zip(iterable_positions, item): + pass yield tuple(new_item) @@ -4162,22 +4279,23 @@ def unique_in_window(iterable, n, key=None): raise ValueError('n must be greater than 0') window = deque(maxlen=n) - uniques = set() + counts = defaultdict(int) use_key = key is not None for item in iterable: + if len(window) == n: + to_discard = window[0] + if counts[to_discard] == 1: + del counts[to_discard] + else: + counts[to_discard] -= 1 + k = key(item) if use_key else item - if k in uniques: - continue - - if len(uniques) == n: - uniques.discard(window[0]) - - uniques.add(k) + if k not in counts: + yield item + counts[k] += 1 window.append(k) - yield item - def duplicates_everseen(iterable, key=None): """Yield duplicate elements after their first appearance. @@ -4187,7 +4305,7 @@ def duplicates_everseen(iterable, key=None): >>> list(duplicates_everseen('AaaBbbCccAaa', str.lower)) ['a', 'a', 'b', 'b', 'c', 'c', 'A', 'a', 'a'] - This function is analagous to :func:`unique_everseen` and is subject to + This function is analogous to :func:`unique_everseen` and is subject to the same performance considerations. """ @@ -4217,15 +4335,52 @@ def duplicates_justseen(iterable, key=None): >>> list(duplicates_justseen('AaaBbbCccAaa', str.lower)) ['a', 'a', 'b', 'b', 'c', 'c', 'a', 'a'] - This function is analagous to :func:`unique_justseen`. + This function is analogous to :func:`unique_justseen`. """ - return flatten( - map( - lambda group_tuple: islice_extended(group_tuple[1])[1:], - groupby(iterable, key), - ) - ) + return flatten(g for _, g in groupby(iterable, key) for _ in g) + + +def classify_unique(iterable, key=None): + """Classify each element in terms of its uniqueness. + + For each element in the input iterable, return a 3-tuple consisting of: + + 1. The element itself + 2. ``False`` if the element is equal to the one preceding it in the input, + ``True`` otherwise (i.e. the equivalent of :func:`unique_justseen`) + 3. ``False`` if this element has been seen anywhere in the input before, + ``True`` otherwise (i.e. the equivalent of :func:`unique_everseen`) + + >>> list(classify_unique('otto')) # doctest: +NORMALIZE_WHITESPACE + [('o', True, True), + ('t', True, True), + ('t', False, False), + ('o', True, False)] + + This function is analogous to :func:`unique_everseen` and is subject to + the same performance considerations. + + """ + seen_set = set() + seen_list = [] + use_key = key is not None + previous = None + + for i, element in enumerate(iterable): + k = key(element) if use_key else element + is_unique_justseen = not i or previous != k + previous = k + is_unique_everseen = False + try: + if k not in seen_set: + seen_set.add(k) + is_unique_everseen = True + except TypeError: + if k not in seen_list: + seen_list.append(k) + is_unique_everseen = True + yield element, is_unique_justseen, is_unique_everseen def minmax(iterable_or_value, *others, key=None, default=_marker): @@ -4389,3 +4544,112 @@ def gray_product(*iterables): o[j] = -o[j] f[j] = f[j + 1] f[j + 1] = j + 1 + + +def partial_product(*iterables): + """Yields tuples containing one item from each iterator, with subsequent + tuples changing a single item at a time by advancing each iterator until it + is exhausted. This sequence guarantees every value in each iterable is + output at least once without generating all possible combinations. + + This may be useful, for example, when testing an expensive function. + + >>> list(partial_product('AB', 'C', 'DEF')) + [('A', 'C', 'D'), ('B', 'C', 'D'), ('B', 'C', 'E'), ('B', 'C', 'F')] + """ + + iterators = list(map(iter, iterables)) + + try: + prod = [next(it) for it in iterators] + except StopIteration: + return + yield tuple(prod) + + for i, it in enumerate(iterators): + for prod[i] in it: + yield tuple(prod) + + +def takewhile_inclusive(predicate, iterable): + """A variant of :func:`takewhile` that yields one additional element. + + >>> list(takewhile_inclusive(lambda x: x < 5, [1, 4, 6, 4, 1])) + [1, 4, 6] + + :func:`takewhile` would return ``[1, 4]``. + """ + for x in iterable: + yield x + if not predicate(x): + break + + +def outer_product(func, xs, ys, *args, **kwargs): + """A generalized outer product that applies a binary function to all + pairs of items. Returns a 2D matrix with ``len(xs)`` rows and ``len(ys)`` + columns. + Also accepts ``*args`` and ``**kwargs`` that are passed to ``func``. + + Multiplication table: + + >>> list(outer_product(mul, range(1, 4), range(1, 6))) + [(1, 2, 3, 4, 5), (2, 4, 6, 8, 10), (3, 6, 9, 12, 15)] + + Cross tabulation: + + >>> xs = ['A', 'B', 'A', 'A', 'B', 'B', 'A', 'A', 'B', 'B'] + >>> ys = ['X', 'X', 'X', 'Y', 'Z', 'Z', 'Y', 'Y', 'Z', 'Z'] + >>> rows = list(zip(xs, ys)) + >>> count_rows = lambda x, y: rows.count((x, y)) + >>> list(outer_product(count_rows, sorted(set(xs)), sorted(set(ys)))) + [(2, 3, 0), (1, 0, 4)] + + Usage with ``*args`` and ``**kwargs``: + + >>> animals = ['cat', 'wolf', 'mouse'] + >>> list(outer_product(min, animals, animals, key=len)) + [('cat', 'cat', 'cat'), ('cat', 'wolf', 'wolf'), ('cat', 'wolf', 'mouse')] + """ + ys = tuple(ys) + return batched( + starmap(lambda x, y: func(x, y, *args, **kwargs), product(xs, ys)), + n=len(ys), + ) + + +def iter_suppress(iterable, *exceptions): + """Yield each of the items from *iterable*. If the iteration raises one of + the specified *exceptions*, that exception will be suppressed and iteration + will stop. + + >>> from itertools import chain + >>> def breaks_at_five(x): + ... while True: + ... if x >= 5: + ... raise RuntimeError + ... yield x + ... x += 1 + >>> it_1 = iter_suppress(breaks_at_five(1), RuntimeError) + >>> it_2 = iter_suppress(breaks_at_five(2), RuntimeError) + >>> list(chain(it_1, it_2)) + [1, 2, 3, 4, 2, 3, 4] + """ + try: + yield from iterable + except exceptions: + return + + +def filter_map(func, iterable): + """Apply *func* to every element of *iterable*, yielding only those which + are not ``None``. + + >>> elems = ['1', 'a', '2', 'b', '3'] + >>> list(filter_map(lambda s: int(s) if s.isnumeric() else None, elems)) + [1, 2, 3] + """ + for x in iterable: + y = func(x) + if y is not None: + yield y diff --git a/lib/pkg_resources/_vendor/more_itertools/more.pyi b/lib/pkg_resources/_vendor/more_itertools/more.pyi index 75c5232c..9a5fc911 100644 --- a/lib/pkg_resources/_vendor/more_itertools/more.pyi +++ b/lib/pkg_resources/_vendor/more_itertools/more.pyi @@ -29,7 +29,7 @@ _U = TypeVar('_U') _V = TypeVar('_V') _W = TypeVar('_W') _T_co = TypeVar('_T_co', covariant=True) -_GenFn = TypeVar('_GenFn', bound=Callable[..., Iterator[object]]) +_GenFn = TypeVar('_GenFn', bound=Callable[..., Iterator[Any]]) _Raisable = BaseException | Type[BaseException] @type_check_only @@ -74,7 +74,7 @@ class peekable(Generic[_T], Iterator[_T]): def __getitem__(self, index: slice) -> list[_T]: ... def consumer(func: _GenFn) -> _GenFn: ... -def ilen(iterable: Iterable[object]) -> int: ... +def ilen(iterable: Iterable[_T]) -> int: ... def iterate(func: Callable[[_T], _T], start: _T) -> Iterator[_T]: ... def with_iter( context_manager: ContextManager[Iterable[_T]], @@ -116,7 +116,7 @@ class bucket(Generic[_T, _U], Container[_U]): self, iterable: Iterable[_T], key: Callable[[_T], _U], - validator: Callable[[object], object] | None = ..., + validator: Callable[[_U], object] | None = ..., ) -> None: ... def __contains__(self, value: object) -> bool: ... def __iter__(self) -> Iterator[_U]: ... @@ -383,7 +383,7 @@ def mark_ends( iterable: Iterable[_T], ) -> Iterable[tuple[bool, bool, _T]]: ... def locate( - iterable: Iterable[object], + iterable: Iterable[_T], pred: Callable[..., Any] = ..., window_size: int | None = ..., ) -> Iterator[int]: ... @@ -440,6 +440,7 @@ class seekable(Generic[_T], Iterator[_T]): def peek(self, default: _U) -> _T | _U: ... def elements(self) -> SequenceView[_T]: ... def seek(self, index: int) -> None: ... + def relative_seek(self, count: int) -> None: ... class run_length: @staticmethod @@ -578,6 +579,9 @@ def all_unique( iterable: Iterable[_T], key: Callable[[_T], _U] | None = ... ) -> bool: ... def nth_product(index: int, *args: Iterable[_T]) -> tuple[_T, ...]: ... +def nth_combination_with_replacement( + iterable: Iterable[_T], r: int, index: int +) -> tuple[_T, ...]: ... def nth_permutation( iterable: Iterable[_T], r: int, index: int ) -> tuple[_T, ...]: ... @@ -586,6 +590,9 @@ def product_index(element: Iterable[_T], *args: Iterable[_T]) -> int: ... def combination_index( element: Iterable[_T], iterable: Iterable[_T] ) -> int: ... +def combination_with_replacement_index( + element: Iterable[_T], iterable: Iterable[_T] +) -> int: ... def permutation_index( element: Iterable[_T], iterable: Iterable[_T] ) -> int: ... @@ -611,6 +618,9 @@ def duplicates_everseen( def duplicates_justseen( iterable: Iterable[_T], key: Callable[[_T], _U] | None = ... ) -> Iterator[_T]: ... +def classify_unique( + iterable: Iterable[_T], key: Callable[[_T], _U] | None = ... +) -> Iterator[tuple[_T, bool, bool]]: ... class _SupportsLessThan(Protocol): def __lt__(self, __other: Any) -> bool: ... @@ -655,12 +665,31 @@ def minmax( def longest_common_prefix( iterables: Iterable[Iterable[_T]], ) -> Iterator[_T]: ... -def iequals(*iterables: Iterable[object]) -> bool: ... +def iequals(*iterables: Iterable[Any]) -> bool: ... def constrained_batches( - iterable: Iterable[object], + iterable: Iterable[_T], max_size: int, max_count: int | None = ..., get_len: Callable[[_T], object] = ..., strict: bool = ..., ) -> Iterator[tuple[_T]]: ... def gray_product(*iterables: Iterable[_T]) -> Iterator[tuple[_T, ...]]: ... +def partial_product(*iterables: Iterable[_T]) -> Iterator[tuple[_T, ...]]: ... +def takewhile_inclusive( + predicate: Callable[[_T], bool], iterable: Iterable[_T] +) -> Iterator[_T]: ... +def outer_product( + func: Callable[[_T, _U], _V], + xs: Iterable[_T], + ys: Iterable[_U], + *args: Any, + **kwargs: Any, +) -> Iterator[tuple[_V, ...]]: ... +def iter_suppress( + iterable: Iterable[_T], + *exceptions: Type[BaseException], +) -> Iterator[_T]: ... +def filter_map( + func: Callable[[_T], _V | None], + iterable: Iterable[_T], +) -> Iterator[_V]: ... diff --git a/lib/pkg_resources/_vendor/more_itertools/recipes.py b/lib/pkg_resources/_vendor/more_itertools/recipes.py index 3facc2e3..145e3cb5 100644 --- a/lib/pkg_resources/_vendor/more_itertools/recipes.py +++ b/lib/pkg_resources/_vendor/more_itertools/recipes.py @@ -9,11 +9,10 @@ Some backward-compatible usability improvements have been made. """ import math import operator -import warnings from collections import deque from collections.abc import Sized -from functools import reduce +from functools import partial, reduce from itertools import ( chain, combinations, @@ -52,10 +51,13 @@ __all__ = [ 'pad_none', 'pairwise', 'partition', + 'polynomial_eval', 'polynomial_from_roots', + 'polynomial_derivative', 'powerset', 'prepend', 'quantify', + 'reshape', 'random_combination_with_replacement', 'random_combination', 'random_permutation', @@ -65,9 +67,11 @@ __all__ = [ 'sieve', 'sliding_window', 'subslices', + 'sum_of_squares', 'tabulate', 'tail', 'take', + 'totient', 'transpose', 'triplewise', 'unique_everseen', @@ -77,6 +81,18 @@ __all__ = [ _marker = object() +# zip with strict is available for Python 3.10+ +try: + zip(strict=True) +except TypeError: + _zip_strict = zip +else: + _zip_strict = partial(zip, strict=True) + +# math.sumprod is available for Python 3.12+ +_sumprod = getattr(math, 'sumprod', lambda x, y: dotproduct(x, y)) + + def take(n, iterable): """Return first *n* items of the iterable as a list. @@ -293,7 +309,7 @@ def _pairwise(iterable): """ a, b = tee(iterable) next(b, None) - yield from zip(a, b) + return zip(a, b) try: @@ -303,7 +319,7 @@ except ImportError: else: def pairwise(iterable): - yield from itertools_pairwise(iterable) + return itertools_pairwise(iterable) pairwise.__doc__ = _pairwise.__doc__ @@ -334,13 +350,9 @@ def _zip_equal(*iterables): for i, it in enumerate(iterables[1:], 1): size = len(it) if size != first_size: - break - else: - # If we didn't break out, we can use the built-in zip. - return zip(*iterables) - - # If we did break out, there was a mismatch. - raise UnequalIterablesError(details=(first_size, i, size)) + raise UnequalIterablesError(details=(first_size, i, size)) + # All sizes are equal, we can use the built-in zip. + return zip(*iterables) # If any one of the iterables didn't have a length, start reading # them until one runs out. except TypeError: @@ -433,12 +445,9 @@ def partition(pred, iterable): if pred is None: pred = bool - evaluations = ((pred(x), x) for x in iterable) - t1, t2 = tee(evaluations) - return ( - (x for (cond, x) in t1 if not cond), - (x for (cond, x) in t2 if cond), - ) + t1, t2, p = tee(iterable, 3) + p1, p2 = tee(map(pred, p)) + return (compress(t1, map(operator.not_, p1)), compress(t2, p2)) def powerset(iterable): @@ -486,7 +495,7 @@ def unique_everseen(iterable, key=None): >>> list(unique_everseen(iterable, key=tuple)) # Faster [[1, 2], [2, 3]] - Similary, you may want to convert unhashable ``set`` objects with + Similarly, you may want to convert unhashable ``set`` objects with ``key=frozenset``. For ``dict`` objects, ``key=lambda x: frozenset(x.items())`` can be used. @@ -518,6 +527,9 @@ def unique_justseen(iterable, key=None): ['A', 'B', 'C', 'A', 'D'] """ + if key is None: + return map(operator.itemgetter(0), groupby(iterable)) + return map(next, map(operator.itemgetter(1), groupby(iterable, key))) @@ -712,12 +724,14 @@ def convolve(signal, kernel): is immediately consumed and stored. """ + # This implementation intentionally doesn't match the one in the itertools + # documentation. kernel = tuple(kernel)[::-1] n = len(kernel) window = deque([0], maxlen=n) * n for x in chain(signal, repeat(0, n - 1)): window.append(x) - yield sum(map(operator.mul, kernel, window)) + yield _sumprod(kernel, window) def before_and_after(predicate, it): @@ -778,9 +792,7 @@ def sliding_window(iterable, n): For a variant with more features, see :func:`windowed`. """ it = iter(iterable) - window = deque(islice(it, n), maxlen=n) - if len(window) == n: - yield tuple(window) + window = deque(islice(it, n - 1), maxlen=n) for x in it: window.append(x) yield tuple(window) @@ -807,39 +819,38 @@ def polynomial_from_roots(roots): >>> polynomial_from_roots(roots) # x^3 - 4 * x^2 - 17 * x + 60 [1, -4, -17, 60] """ - # Use math.prod for Python 3.8+, - prod = getattr(math, 'prod', lambda x: reduce(operator.mul, x, 1)) - roots = list(map(operator.neg, roots)) - return [ - sum(map(prod, combinations(roots, k))) for k in range(len(roots) + 1) - ] + factors = zip(repeat(1), map(operator.neg, roots)) + return list(reduce(convolve, factors, [1])) -def iter_index(iterable, value, start=0): +def iter_index(iterable, value, start=0, stop=None): """Yield the index of each place in *iterable* that *value* occurs, - beginning with index *start*. + beginning with index *start* and ending before index *stop*. See :func:`locate` for a more general means of finding the indexes associated with particular values. >>> list(iter_index('AABCADEAF', 'A')) [0, 1, 4, 7] + >>> list(iter_index('AABCADEAF', 'A', 1)) # start index is inclusive + [1, 4, 7] + >>> list(iter_index('AABCADEAF', 'A', 1, 7)) # stop index is not inclusive + [1, 4] """ - try: - seq_index = iterable.index - except AttributeError: + seq_index = getattr(iterable, 'index', None) + if seq_index is None: # Slow path for general iterables - it = islice(iterable, start, None) + it = islice(iterable, start, stop) for i, element in enumerate(it, start): if element is value or element == value: yield i else: # Fast path for sequences + stop = len(iterable) if stop is None else stop i = start - 1 try: while True: - i = seq_index(value, i + 1) - yield i + yield (i := seq_index(value, i + 1, stop)) except ValueError: pass @@ -850,81 +861,152 @@ def sieve(n): >>> list(sieve(30)) [2, 3, 5, 7, 11, 13, 17, 19, 23, 29] """ - isqrt = getattr(math, 'isqrt', lambda x: int(math.sqrt(x))) + if n > 2: + yield 2 + start = 3 data = bytearray((0, 1)) * (n // 2) - data[:3] = 0, 0, 0 - limit = isqrt(n) + 1 - for p in compress(range(limit), data): + limit = math.isqrt(n) + 1 + for p in iter_index(data, 1, start, limit): + yield from iter_index(data, 1, start, p * p) data[p * p : n : p + p] = bytes(len(range(p * p, n, p + p))) - data[2] = 1 - return iter_index(data, 1) if n > 2 else iter([]) + start = p * p + yield from iter_index(data, 1, start) -def batched(iterable, n): - """Batch data into lists of length *n*. The last batch may be shorter. +def _batched(iterable, n, *, strict=False): + """Batch data into tuples of length *n*. If the number of items in + *iterable* is not divisible by *n*: + * The last batch will be shorter if *strict* is ``False``. + * :exc:`ValueError` will be raised if *strict* is ``True``. >>> list(batched('ABCDEFG', 3)) - [['A', 'B', 'C'], ['D', 'E', 'F'], ['G']] + [('A', 'B', 'C'), ('D', 'E', 'F'), ('G',)] - This recipe is from the ``itertools`` docs. This library also provides - :func:`chunked`, which has a different implementation. + On Python 3.13 and above, this is an alias for :func:`itertools.batched`. """ - if hexversion >= 0x30C00A0: # Python 3.12.0a0 - warnings.warn( - ( - 'batched will be removed in a future version of ' - 'more-itertools. Use the standard library ' - 'itertools.batched function instead' - ), - DeprecationWarning, - ) - + if n < 1: + raise ValueError('n must be at least one') it = iter(iterable) - while True: - batch = list(islice(it, n)) - if not batch: - break + while batch := tuple(islice(it, n)): + if strict and len(batch) != n: + raise ValueError('batched(): incomplete batch') yield batch +if hexversion >= 0x30D00A2: + from itertools import batched as itertools_batched + + def batched(iterable, n, *, strict=False): + return itertools_batched(iterable, n, strict=strict) + +else: + batched = _batched + + batched.__doc__ = _batched.__doc__ + + def transpose(it): - """Swap the rows and columns of the input. + """Swap the rows and columns of the input matrix. >>> list(transpose([(1, 2, 3), (11, 22, 33)])) [(1, 11), (2, 22), (3, 33)] The caller should ensure that the dimensions of the input are compatible. + If the input is empty, no output will be produced. """ - # TODO: when 3.9 goes end-of-life, add stric=True to this. - return zip(*it) + return _zip_strict(*it) + + +def reshape(matrix, cols): + """Reshape the 2-D input *matrix* to have a column count given by *cols*. + + >>> matrix = [(0, 1), (2, 3), (4, 5)] + >>> cols = 3 + >>> list(reshape(matrix, cols)) + [(0, 1, 2), (3, 4, 5)] + """ + return batched(chain.from_iterable(matrix), cols) def matmul(m1, m2): """Multiply two matrices. + >>> list(matmul([(7, 5), (3, 5)], [(2, 5), (7, 9)])) - [[49, 80], [41, 60]] + [(49, 80), (41, 60)] The caller should ensure that the dimensions of the input matrices are compatible with each other. """ n = len(m2[0]) - return batched(starmap(dotproduct, product(m1, transpose(m2))), n) + return batched(starmap(_sumprod, product(m1, transpose(m2))), n) def factor(n): """Yield the prime factors of n. + >>> list(factor(360)) [2, 2, 2, 3, 3, 5] """ - isqrt = getattr(math, 'isqrt', lambda x: int(math.sqrt(x))) - for prime in sieve(isqrt(n) + 1): - while True: - quotient, remainder = divmod(n, prime) - if remainder: - break + for prime in sieve(math.isqrt(n) + 1): + while not n % prime: yield prime - n = quotient + n //= prime if n == 1: return - if n >= 2: + if n > 1: yield n + + +def polynomial_eval(coefficients, x): + """Evaluate a polynomial at a specific value. + + Example: evaluating x^3 - 4 * x^2 - 17 * x + 60 at x = 2.5: + + >>> coefficients = [1, -4, -17, 60] + >>> x = 2.5 + >>> polynomial_eval(coefficients, x) + 8.125 + """ + n = len(coefficients) + if n == 0: + return x * 0 # coerce zero to the type of x + powers = map(pow, repeat(x), reversed(range(n))) + return _sumprod(coefficients, powers) + + +def sum_of_squares(it): + """Return the sum of the squares of the input values. + + >>> sum_of_squares([10, 20, 30]) + 1400 + """ + return _sumprod(*tee(it)) + + +def polynomial_derivative(coefficients): + """Compute the first derivative of a polynomial. + + Example: evaluating the derivative of x^3 - 4 * x^2 - 17 * x + 60 + + >>> coefficients = [1, -4, -17, 60] + >>> derivative_coefficients = polynomial_derivative(coefficients) + >>> derivative_coefficients + [3, -8, -17] + """ + n = len(coefficients) + powers = reversed(range(1, n)) + return list(map(operator.mul, coefficients, powers)) + + +def totient(n): + """Return the count of natural numbers up to *n* that are coprime with *n*. + + >>> totient(9) + 6 + >>> totient(12) + 4 + """ + for p in unique_justseen(factor(n)): + n = n // p * (p - 1) + + return n diff --git a/lib/pkg_resources/_vendor/more_itertools/recipes.pyi b/lib/pkg_resources/_vendor/more_itertools/recipes.pyi index 0267ed56..ed4c19db 100644 --- a/lib/pkg_resources/_vendor/more_itertools/recipes.pyi +++ b/lib/pkg_resources/_vendor/more_itertools/recipes.pyi @@ -14,6 +14,8 @@ from typing import ( # Type and type variable definitions _T = TypeVar('_T') +_T1 = TypeVar('_T1') +_T2 = TypeVar('_T2') _U = TypeVar('_U') def take(n: int, iterable: Iterable[_T]) -> list[_T]: ... @@ -21,19 +23,19 @@ def tabulate( function: Callable[[int], _T], start: int = ... ) -> Iterator[_T]: ... def tail(n: int, iterable: Iterable[_T]) -> Iterator[_T]: ... -def consume(iterator: Iterable[object], n: int | None = ...) -> None: ... +def consume(iterator: Iterable[_T], n: int | None = ...) -> None: ... @overload def nth(iterable: Iterable[_T], n: int) -> _T | None: ... @overload def nth(iterable: Iterable[_T], n: int, default: _U) -> _T | _U: ... -def all_equal(iterable: Iterable[object]) -> bool: ... +def all_equal(iterable: Iterable[_T]) -> bool: ... def quantify( iterable: Iterable[_T], pred: Callable[[_T], bool] = ... ) -> int: ... def pad_none(iterable: Iterable[_T]) -> Iterator[_T | None]: ... def padnone(iterable: Iterable[_T]) -> Iterator[_T | None]: ... def ncycles(iterable: Iterable[_T], n: int) -> Iterator[_T]: ... -def dotproduct(vec1: Iterable[object], vec2: Iterable[object]) -> object: ... +def dotproduct(vec1: Iterable[_T1], vec2: Iterable[_T2]) -> Any: ... def flatten(listOfLists: Iterable[Iterable[_T]]) -> Iterator[_T]: ... def repeatfunc( func: Callable[..., _U], times: int | None = ..., *args: Any @@ -101,19 +103,26 @@ def sliding_window( iterable: Iterable[_T], n: int ) -> Iterator[tuple[_T, ...]]: ... def subslices(iterable: Iterable[_T]) -> Iterator[list[_T]]: ... -def polynomial_from_roots(roots: Sequence[int]) -> list[int]: ... +def polynomial_from_roots(roots: Sequence[_T]) -> list[_T]: ... def iter_index( - iterable: Iterable[object], + iterable: Iterable[_T], value: Any, start: int | None = ..., + stop: int | None = ..., ) -> Iterator[int]: ... def sieve(n: int) -> Iterator[int]: ... def batched( - iterable: Iterable[_T], - n: int, -) -> Iterator[list[_T]]: ... + iterable: Iterable[_T], n: int, *, strict: bool = False +) -> Iterator[tuple[_T]]: ... def transpose( it: Iterable[Iterable[_T]], -) -> tuple[Iterator[_T], ...]: ... -def matmul(m1: Sequence[_T], m2: Sequence[_T]) -> Iterator[list[_T]]: ... +) -> Iterator[tuple[_T, ...]]: ... +def reshape( + matrix: Iterable[Iterable[_T]], cols: int +) -> Iterator[tuple[_T, ...]]: ... +def matmul(m1: Sequence[_T], m2: Sequence[_T]) -> Iterator[tuple[_T]]: ... def factor(n: int) -> Iterator[int]: ... +def polynomial_eval(coefficients: Sequence[_T], x: _U) -> _U: ... +def sum_of_squares(it: Iterable[_T]) -> _T: ... +def polynomial_derivative(coefficients: Sequence[_T]) -> list[_T]: ... +def totient(n: int) -> int: ... diff --git a/lib/pkg_resources/_vendor/packaging/__init__.py b/lib/pkg_resources/_vendor/packaging/__init__.py index 13cadc7f..e7c0aa12 100644 --- a/lib/pkg_resources/_vendor/packaging/__init__.py +++ b/lib/pkg_resources/_vendor/packaging/__init__.py @@ -6,10 +6,10 @@ __title__ = "packaging" __summary__ = "Core utilities for Python packages" __uri__ = "https://github.com/pypa/packaging" -__version__ = "23.1" +__version__ = "24.0" __author__ = "Donald Stufft and individual contributors" __email__ = "donald@stufft.io" __license__ = "BSD-2-Clause or Apache-2.0" -__copyright__ = "2014-2019 %s" % __author__ +__copyright__ = "2014 %s" % __author__ diff --git a/lib/pkg_resources/_vendor/packaging/_manylinux.py b/lib/pkg_resources/_vendor/packaging/_manylinux.py index 449c655b..ad62505f 100644 --- a/lib/pkg_resources/_vendor/packaging/_manylinux.py +++ b/lib/pkg_resources/_vendor/packaging/_manylinux.py @@ -5,7 +5,7 @@ import os import re import sys import warnings -from typing import Dict, Generator, Iterator, NamedTuple, Optional, Tuple +from typing import Dict, Generator, Iterator, NamedTuple, Optional, Sequence, Tuple from ._elffile import EIClass, EIData, ELFFile, EMachine @@ -50,12 +50,21 @@ def _is_linux_i686(executable: str) -> bool: ) -def _have_compatible_abi(executable: str, arch: str) -> bool: - if arch == "armv7l": +def _have_compatible_abi(executable: str, archs: Sequence[str]) -> bool: + if "armv7l" in archs: return _is_linux_armhf(executable) - if arch == "i686": + if "i686" in archs: return _is_linux_i686(executable) - return arch in {"x86_64", "aarch64", "ppc64", "ppc64le", "s390x"} + allowed_archs = { + "x86_64", + "aarch64", + "ppc64", + "ppc64le", + "s390x", + "loongarch64", + "riscv64", + } + return any(arch in allowed_archs for arch in archs) # If glibc ever changes its major version, we need to know what the last @@ -81,7 +90,7 @@ def _glibc_version_string_confstr() -> Optional[str]: # https://github.com/python/cpython/blob/fcf1d003bf4f0100c/Lib/platform.py#L175-L183 try: # Should be a string like "glibc 2.17". - version_string: str = getattr(os, "confstr")("CS_GNU_LIBC_VERSION") + version_string: Optional[str] = os.confstr("CS_GNU_LIBC_VERSION") assert version_string is not None _, version = version_string.rsplit() except (AssertionError, AttributeError, OSError, ValueError): @@ -167,13 +176,13 @@ def _get_glibc_version() -> Tuple[int, int]: # From PEP 513, PEP 600 -def _is_compatible(name: str, arch: str, version: _GLibCVersion) -> bool: +def _is_compatible(arch: str, version: _GLibCVersion) -> bool: sys_glibc = _get_glibc_version() if sys_glibc < version: return False # Check for presence of _manylinux module. try: - import _manylinux # noqa + import _manylinux except ImportError: return True if hasattr(_manylinux, "manylinux_compatible"): @@ -203,12 +212,22 @@ _LEGACY_MANYLINUX_MAP = { } -def platform_tags(linux: str, arch: str) -> Iterator[str]: - if not _have_compatible_abi(sys.executable, arch): +def platform_tags(archs: Sequence[str]) -> Iterator[str]: + """Generate manylinux tags compatible to the current platform. + + :param archs: Sequence of compatible architectures. + The first one shall be the closest to the actual architecture and be the part of + platform tag after the ``linux_`` prefix, e.g. ``x86_64``. + The ``linux_`` prefix is assumed as a prerequisite for the current platform to + be manylinux-compatible. + + :returns: An iterator of compatible manylinux tags. + """ + if not _have_compatible_abi(sys.executable, archs): return # Oldest glibc to be supported regardless of architecture is (2, 17). too_old_glibc2 = _GLibCVersion(2, 16) - if arch in {"x86_64", "i686"}: + if set(archs) & {"x86_64", "i686"}: # On x86/i686 also oldest glibc to be supported is (2, 5). too_old_glibc2 = _GLibCVersion(2, 4) current_glibc = _GLibCVersion(*_get_glibc_version()) @@ -222,19 +241,20 @@ def platform_tags(linux: str, arch: str) -> Iterator[str]: for glibc_major in range(current_glibc.major - 1, 1, -1): glibc_minor = _LAST_GLIBC_MINOR[glibc_major] glibc_max_list.append(_GLibCVersion(glibc_major, glibc_minor)) - for glibc_max in glibc_max_list: - if glibc_max.major == too_old_glibc2.major: - min_minor = too_old_glibc2.minor - else: - # For other glibc major versions oldest supported is (x, 0). - min_minor = -1 - for glibc_minor in range(glibc_max.minor, min_minor, -1): - glibc_version = _GLibCVersion(glibc_max.major, glibc_minor) - tag = "manylinux_{}_{}".format(*glibc_version) - if _is_compatible(tag, arch, glibc_version): - yield linux.replace("linux", tag) - # Handle the legacy manylinux1, manylinux2010, manylinux2014 tags. - if glibc_version in _LEGACY_MANYLINUX_MAP: - legacy_tag = _LEGACY_MANYLINUX_MAP[glibc_version] - if _is_compatible(legacy_tag, arch, glibc_version): - yield linux.replace("linux", legacy_tag) + for arch in archs: + for glibc_max in glibc_max_list: + if glibc_max.major == too_old_glibc2.major: + min_minor = too_old_glibc2.minor + else: + # For other glibc major versions oldest supported is (x, 0). + min_minor = -1 + for glibc_minor in range(glibc_max.minor, min_minor, -1): + glibc_version = _GLibCVersion(glibc_max.major, glibc_minor) + tag = "manylinux_{}_{}".format(*glibc_version) + if _is_compatible(arch, glibc_version): + yield f"{tag}_{arch}" + # Handle the legacy manylinux1, manylinux2010, manylinux2014 tags. + if glibc_version in _LEGACY_MANYLINUX_MAP: + legacy_tag = _LEGACY_MANYLINUX_MAP[glibc_version] + if _is_compatible(arch, glibc_version): + yield f"{legacy_tag}_{arch}" diff --git a/lib/pkg_resources/_vendor/packaging/_musllinux.py b/lib/pkg_resources/_vendor/packaging/_musllinux.py index 706ba600..86419df9 100644 --- a/lib/pkg_resources/_vendor/packaging/_musllinux.py +++ b/lib/pkg_resources/_vendor/packaging/_musllinux.py @@ -8,7 +8,7 @@ import functools import re import subprocess import sys -from typing import Iterator, NamedTuple, Optional +from typing import Iterator, NamedTuple, Optional, Sequence from ._elffile import ELFFile @@ -47,24 +47,27 @@ def _get_musl_version(executable: str) -> Optional[_MuslVersion]: return None if ld is None or "musl" not in ld: return None - proc = subprocess.run([ld], stderr=subprocess.PIPE, universal_newlines=True) + proc = subprocess.run([ld], stderr=subprocess.PIPE, text=True) return _parse_musl_version(proc.stderr) -def platform_tags(arch: str) -> Iterator[str]: +def platform_tags(archs: Sequence[str]) -> Iterator[str]: """Generate musllinux tags compatible to the current platform. - :param arch: Should be the part of platform tag after the ``linux_`` - prefix, e.g. ``x86_64``. The ``linux_`` prefix is assumed as a - prerequisite for the current platform to be musllinux-compatible. + :param archs: Sequence of compatible architectures. + The first one shall be the closest to the actual architecture and be the part of + platform tag after the ``linux_`` prefix, e.g. ``x86_64``. + The ``linux_`` prefix is assumed as a prerequisite for the current platform to + be musllinux-compatible. :returns: An iterator of compatible musllinux tags. """ sys_musl = _get_musl_version(sys.executable) if sys_musl is None: # Python not dynamically linked against musl. return - for minor in range(sys_musl.minor, -1, -1): - yield f"musllinux_{sys_musl.major}_{minor}_{arch}" + for arch in archs: + for minor in range(sys_musl.minor, -1, -1): + yield f"musllinux_{sys_musl.major}_{minor}_{arch}" if __name__ == "__main__": # pragma: no cover diff --git a/lib/pkg_resources/_vendor/packaging/_parser.py b/lib/pkg_resources/_vendor/packaging/_parser.py index 5a18b758..684df754 100644 --- a/lib/pkg_resources/_vendor/packaging/_parser.py +++ b/lib/pkg_resources/_vendor/packaging/_parser.py @@ -252,7 +252,13 @@ def _parse_version_many(tokenizer: Tokenizer) -> str: # Recursive descent parser for marker expression # -------------------------------------------------------------------------------------- def parse_marker(source: str) -> MarkerList: - return _parse_marker(Tokenizer(source, rules=DEFAULT_RULES)) + return _parse_full_marker(Tokenizer(source, rules=DEFAULT_RULES)) + + +def _parse_full_marker(tokenizer: Tokenizer) -> MarkerList: + retval = _parse_marker(tokenizer) + tokenizer.expect("END", expected="end of marker expression") + return retval def _parse_marker(tokenizer: Tokenizer) -> MarkerList: @@ -318,10 +324,7 @@ def _parse_marker_var(tokenizer: Tokenizer) -> MarkerVar: def process_env_var(env_var: str) -> Variable: - if ( - env_var == "platform_python_implementation" - or env_var == "python_implementation" - ): + if env_var in ("platform_python_implementation", "python_implementation"): return Variable("platform_python_implementation") else: return Variable(env_var) diff --git a/lib/pkg_resources/_vendor/packaging/metadata.py b/lib/pkg_resources/_vendor/packaging/metadata.py index e76a60c3..fb274930 100644 --- a/lib/pkg_resources/_vendor/packaging/metadata.py +++ b/lib/pkg_resources/_vendor/packaging/metadata.py @@ -5,23 +5,77 @@ import email.parser import email.policy import sys import typing -from typing import Dict, List, Optional, Tuple, Union, cast +from typing import ( + Any, + Callable, + Dict, + Generic, + List, + Optional, + Tuple, + Type, + Union, + cast, +) -if sys.version_info >= (3, 8): # pragma: no cover - from typing import TypedDict +from . import requirements, specifiers, utils, version as version_module + +T = typing.TypeVar("T") +if sys.version_info[:2] >= (3, 8): # pragma: no cover + from typing import Literal, TypedDict else: # pragma: no cover if typing.TYPE_CHECKING: - from typing_extensions import TypedDict + from typing_extensions import Literal, TypedDict else: try: - from typing_extensions import TypedDict + from typing_extensions import Literal, TypedDict except ImportError: + class Literal: + def __init_subclass__(*_args, **_kwargs): + pass + class TypedDict: def __init_subclass__(*_args, **_kwargs): pass +try: + ExceptionGroup +except NameError: # pragma: no cover + + class ExceptionGroup(Exception): # noqa: N818 + """A minimal implementation of :external:exc:`ExceptionGroup` from Python 3.11. + + If :external:exc:`ExceptionGroup` is already defined by Python itself, + that version is used instead. + """ + + message: str + exceptions: List[Exception] + + def __init__(self, message: str, exceptions: List[Exception]) -> None: + self.message = message + self.exceptions = exceptions + + def __repr__(self) -> str: + return f"{self.__class__.__name__}({self.message!r}, {self.exceptions!r})" + +else: # pragma: no cover + ExceptionGroup = ExceptionGroup + + +class InvalidMetadata(ValueError): + """A metadata field contains invalid data.""" + + field: str + """The name of the field that contains invalid data.""" + + def __init__(self, field: str, message: str) -> None: + self.field = field + super().__init__(message) + + # The RawMetadata class attempts to make as few assumptions about the underlying # serialization formats as possible. The idea is that as long as a serialization # formats offer some very basic primitives in *some* way then we can support @@ -33,7 +87,8 @@ class RawMetadata(TypedDict, total=False): provided). The key is lower-case and underscores are used instead of dashes compared to the equivalent core metadata field. Any core metadata field that can be specified multiple times or can hold multiple values in a single - field have a key with a plural name. + field have a key with a plural name. See :class:`Metadata` whose attributes + match the keys of this dictionary. Core metadata fields that can be specified multiple times are stored as a list or dict depending on which is appropriate for the field. Any fields @@ -77,7 +132,7 @@ class RawMetadata(TypedDict, total=False): # but got stuck without ever being able to build consensus on # it and ultimately ended up withdrawn. # - # However, a number of tools had started emiting METADATA with + # However, a number of tools had started emitting METADATA with # `2.0` Metadata-Version, so for historical reasons, this version # was skipped. @@ -110,7 +165,7 @@ _STRING_FIELDS = { "version", } -_LIST_STRING_FIELDS = { +_LIST_FIELDS = { "classifiers", "dynamic", "obsoletes", @@ -125,6 +180,10 @@ _LIST_STRING_FIELDS = { "supported_platforms", } +_DICT_FIELDS = { + "project_urls", +} + def _parse_keywords(data: str) -> List[str]: """Split a string of comma-separate keyboards into a list of keywords.""" @@ -230,10 +289,11 @@ _EMAIL_TO_RAW_MAPPING = { "supported-platform": "supported_platforms", "version": "version", } +_RAW_TO_EMAIL_MAPPING = {raw: email for email, raw in _EMAIL_TO_RAW_MAPPING.items()} def parse_email(data: Union[bytes, str]) -> Tuple[RawMetadata, Dict[str, List[str]]]: - """Parse a distribution's metadata. + """Parse a distribution's metadata stored as email headers (e.g. from ``METADATA``). This function returns a two-item tuple of dicts. The first dict is of recognized fields from the core metadata specification. Fields that can be @@ -267,7 +327,7 @@ def parse_email(data: Union[bytes, str]) -> Tuple[RawMetadata, Dict[str, List[st # We use get_all() here, even for fields that aren't multiple use, # because otherwise someone could have e.g. two Name fields, and we # would just silently ignore it rather than doing something about it. - headers = parsed.get_all(name) + headers = parsed.get_all(name) or [] # The way the email module works when parsing bytes is that it # unconditionally decodes the bytes as ascii using the surrogateescape @@ -349,7 +409,7 @@ def parse_email(data: Union[bytes, str]) -> Tuple[RawMetadata, Dict[str, List[st # If this is one of our list of string fields, then we can just assign # the value, since email *only* has strings, and our get_all() call # above ensures that this is a list. - elif raw_name in _LIST_STRING_FIELDS: + elif raw_name in _LIST_FIELDS: raw[raw_name] = value # Special Case: Keywords # The keywords field is implemented in the metadata spec as a str, @@ -406,3 +466,360 @@ def parse_email(data: Union[bytes, str]) -> Tuple[RawMetadata, Dict[str, List[st # way this function is implemented, our `TypedDict` can only have valid key # names. return cast(RawMetadata, raw), unparsed + + +_NOT_FOUND = object() + + +# Keep the two values in sync. +_VALID_METADATA_VERSIONS = ["1.0", "1.1", "1.2", "2.1", "2.2", "2.3"] +_MetadataVersion = Literal["1.0", "1.1", "1.2", "2.1", "2.2", "2.3"] + +_REQUIRED_ATTRS = frozenset(["metadata_version", "name", "version"]) + + +class _Validator(Generic[T]): + """Validate a metadata field. + + All _process_*() methods correspond to a core metadata field. The method is + called with the field's raw value. If the raw value is valid it is returned + in its "enriched" form (e.g. ``version.Version`` for the ``Version`` field). + If the raw value is invalid, :exc:`InvalidMetadata` is raised (with a cause + as appropriate). + """ + + name: str + raw_name: str + added: _MetadataVersion + + def __init__( + self, + *, + added: _MetadataVersion = "1.0", + ) -> None: + self.added = added + + def __set_name__(self, _owner: "Metadata", name: str) -> None: + self.name = name + self.raw_name = _RAW_TO_EMAIL_MAPPING[name] + + def __get__(self, instance: "Metadata", _owner: Type["Metadata"]) -> T: + # With Python 3.8, the caching can be replaced with functools.cached_property(). + # No need to check the cache as attribute lookup will resolve into the + # instance's __dict__ before __get__ is called. + cache = instance.__dict__ + value = instance._raw.get(self.name) + + # To make the _process_* methods easier, we'll check if the value is None + # and if this field is NOT a required attribute, and if both of those + # things are true, we'll skip the the converter. This will mean that the + # converters never have to deal with the None union. + if self.name in _REQUIRED_ATTRS or value is not None: + try: + converter: Callable[[Any], T] = getattr(self, f"_process_{self.name}") + except AttributeError: + pass + else: + value = converter(value) + + cache[self.name] = value + try: + del instance._raw[self.name] # type: ignore[misc] + except KeyError: + pass + + return cast(T, value) + + def _invalid_metadata( + self, msg: str, cause: Optional[Exception] = None + ) -> InvalidMetadata: + exc = InvalidMetadata( + self.raw_name, msg.format_map({"field": repr(self.raw_name)}) + ) + exc.__cause__ = cause + return exc + + def _process_metadata_version(self, value: str) -> _MetadataVersion: + # Implicitly makes Metadata-Version required. + if value not in _VALID_METADATA_VERSIONS: + raise self._invalid_metadata(f"{value!r} is not a valid metadata version") + return cast(_MetadataVersion, value) + + def _process_name(self, value: str) -> str: + if not value: + raise self._invalid_metadata("{field} is a required field") + # Validate the name as a side-effect. + try: + utils.canonicalize_name(value, validate=True) + except utils.InvalidName as exc: + raise self._invalid_metadata( + f"{value!r} is invalid for {{field}}", cause=exc + ) + else: + return value + + def _process_version(self, value: str) -> version_module.Version: + if not value: + raise self._invalid_metadata("{field} is a required field") + try: + return version_module.parse(value) + except version_module.InvalidVersion as exc: + raise self._invalid_metadata( + f"{value!r} is invalid for {{field}}", cause=exc + ) + + def _process_summary(self, value: str) -> str: + """Check the field contains no newlines.""" + if "\n" in value: + raise self._invalid_metadata("{field} must be a single line") + return value + + def _process_description_content_type(self, value: str) -> str: + content_types = {"text/plain", "text/x-rst", "text/markdown"} + message = email.message.EmailMessage() + message["content-type"] = value + + content_type, parameters = ( + # Defaults to `text/plain` if parsing failed. + message.get_content_type().lower(), + message["content-type"].params, + ) + # Check if content-type is valid or defaulted to `text/plain` and thus was + # not parseable. + if content_type not in content_types or content_type not in value.lower(): + raise self._invalid_metadata( + f"{{field}} must be one of {list(content_types)}, not {value!r}" + ) + + charset = parameters.get("charset", "UTF-8") + if charset != "UTF-8": + raise self._invalid_metadata( + f"{{field}} can only specify the UTF-8 charset, not {list(charset)}" + ) + + markdown_variants = {"GFM", "CommonMark"} + variant = parameters.get("variant", "GFM") # Use an acceptable default. + if content_type == "text/markdown" and variant not in markdown_variants: + raise self._invalid_metadata( + f"valid Markdown variants for {{field}} are {list(markdown_variants)}, " + f"not {variant!r}", + ) + return value + + def _process_dynamic(self, value: List[str]) -> List[str]: + for dynamic_field in map(str.lower, value): + if dynamic_field in {"name", "version", "metadata-version"}: + raise self._invalid_metadata( + f"{value!r} is not allowed as a dynamic field" + ) + elif dynamic_field not in _EMAIL_TO_RAW_MAPPING: + raise self._invalid_metadata(f"{value!r} is not a valid dynamic field") + return list(map(str.lower, value)) + + def _process_provides_extra( + self, + value: List[str], + ) -> List[utils.NormalizedName]: + normalized_names = [] + try: + for name in value: + normalized_names.append(utils.canonicalize_name(name, validate=True)) + except utils.InvalidName as exc: + raise self._invalid_metadata( + f"{name!r} is invalid for {{field}}", cause=exc + ) + else: + return normalized_names + + def _process_requires_python(self, value: str) -> specifiers.SpecifierSet: + try: + return specifiers.SpecifierSet(value) + except specifiers.InvalidSpecifier as exc: + raise self._invalid_metadata( + f"{value!r} is invalid for {{field}}", cause=exc + ) + + def _process_requires_dist( + self, + value: List[str], + ) -> List[requirements.Requirement]: + reqs = [] + try: + for req in value: + reqs.append(requirements.Requirement(req)) + except requirements.InvalidRequirement as exc: + raise self._invalid_metadata(f"{req!r} is invalid for {{field}}", cause=exc) + else: + return reqs + + +class Metadata: + """Representation of distribution metadata. + + Compared to :class:`RawMetadata`, this class provides objects representing + metadata fields instead of only using built-in types. Any invalid metadata + will cause :exc:`InvalidMetadata` to be raised (with a + :py:attr:`~BaseException.__cause__` attribute as appropriate). + """ + + _raw: RawMetadata + + @classmethod + def from_raw(cls, data: RawMetadata, *, validate: bool = True) -> "Metadata": + """Create an instance from :class:`RawMetadata`. + + If *validate* is true, all metadata will be validated. All exceptions + related to validation will be gathered and raised as an :class:`ExceptionGroup`. + """ + ins = cls() + ins._raw = data.copy() # Mutations occur due to caching enriched values. + + if validate: + exceptions: List[Exception] = [] + try: + metadata_version = ins.metadata_version + metadata_age = _VALID_METADATA_VERSIONS.index(metadata_version) + except InvalidMetadata as metadata_version_exc: + exceptions.append(metadata_version_exc) + metadata_version = None + + # Make sure to check for the fields that are present, the required + # fields (so their absence can be reported). + fields_to_check = frozenset(ins._raw) | _REQUIRED_ATTRS + # Remove fields that have already been checked. + fields_to_check -= {"metadata_version"} + + for key in fields_to_check: + try: + if metadata_version: + # Can't use getattr() as that triggers descriptor protocol which + # will fail due to no value for the instance argument. + try: + field_metadata_version = cls.__dict__[key].added + except KeyError: + exc = InvalidMetadata(key, f"unrecognized field: {key!r}") + exceptions.append(exc) + continue + field_age = _VALID_METADATA_VERSIONS.index( + field_metadata_version + ) + if field_age > metadata_age: + field = _RAW_TO_EMAIL_MAPPING[key] + exc = InvalidMetadata( + field, + "{field} introduced in metadata version " + "{field_metadata_version}, not {metadata_version}", + ) + exceptions.append(exc) + continue + getattr(ins, key) + except InvalidMetadata as exc: + exceptions.append(exc) + + if exceptions: + raise ExceptionGroup("invalid metadata", exceptions) + + return ins + + @classmethod + def from_email( + cls, data: Union[bytes, str], *, validate: bool = True + ) -> "Metadata": + """Parse metadata from email headers. + + If *validate* is true, the metadata will be validated. All exceptions + related to validation will be gathered and raised as an :class:`ExceptionGroup`. + """ + raw, unparsed = parse_email(data) + + if validate: + exceptions: list[Exception] = [] + for unparsed_key in unparsed: + if unparsed_key in _EMAIL_TO_RAW_MAPPING: + message = f"{unparsed_key!r} has invalid data" + else: + message = f"unrecognized field: {unparsed_key!r}" + exceptions.append(InvalidMetadata(unparsed_key, message)) + + if exceptions: + raise ExceptionGroup("unparsed", exceptions) + + try: + return cls.from_raw(raw, validate=validate) + except ExceptionGroup as exc_group: + raise ExceptionGroup( + "invalid or unparsed metadata", exc_group.exceptions + ) from None + + metadata_version: _Validator[_MetadataVersion] = _Validator() + """:external:ref:`core-metadata-metadata-version` + (required; validated to be a valid metadata version)""" + name: _Validator[str] = _Validator() + """:external:ref:`core-metadata-name` + (required; validated using :func:`~packaging.utils.canonicalize_name` and its + *validate* parameter)""" + version: _Validator[version_module.Version] = _Validator() + """:external:ref:`core-metadata-version` (required)""" + dynamic: _Validator[Optional[List[str]]] = _Validator( + added="2.2", + ) + """:external:ref:`core-metadata-dynamic` + (validated against core metadata field names and lowercased)""" + platforms: _Validator[Optional[List[str]]] = _Validator() + """:external:ref:`core-metadata-platform`""" + supported_platforms: _Validator[Optional[List[str]]] = _Validator(added="1.1") + """:external:ref:`core-metadata-supported-platform`""" + summary: _Validator[Optional[str]] = _Validator() + """:external:ref:`core-metadata-summary` (validated to contain no newlines)""" + description: _Validator[Optional[str]] = _Validator() # TODO 2.1: can be in body + """:external:ref:`core-metadata-description`""" + description_content_type: _Validator[Optional[str]] = _Validator(added="2.1") + """:external:ref:`core-metadata-description-content-type` (validated)""" + keywords: _Validator[Optional[List[str]]] = _Validator() + """:external:ref:`core-metadata-keywords`""" + home_page: _Validator[Optional[str]] = _Validator() + """:external:ref:`core-metadata-home-page`""" + download_url: _Validator[Optional[str]] = _Validator(added="1.1") + """:external:ref:`core-metadata-download-url`""" + author: _Validator[Optional[str]] = _Validator() + """:external:ref:`core-metadata-author`""" + author_email: _Validator[Optional[str]] = _Validator() + """:external:ref:`core-metadata-author-email`""" + maintainer: _Validator[Optional[str]] = _Validator(added="1.2") + """:external:ref:`core-metadata-maintainer`""" + maintainer_email: _Validator[Optional[str]] = _Validator(added="1.2") + """:external:ref:`core-metadata-maintainer-email`""" + license: _Validator[Optional[str]] = _Validator() + """:external:ref:`core-metadata-license`""" + classifiers: _Validator[Optional[List[str]]] = _Validator(added="1.1") + """:external:ref:`core-metadata-classifier`""" + requires_dist: _Validator[Optional[List[requirements.Requirement]]] = _Validator( + added="1.2" + ) + """:external:ref:`core-metadata-requires-dist`""" + requires_python: _Validator[Optional[specifiers.SpecifierSet]] = _Validator( + added="1.2" + ) + """:external:ref:`core-metadata-requires-python`""" + # Because `Requires-External` allows for non-PEP 440 version specifiers, we + # don't do any processing on the values. + requires_external: _Validator[Optional[List[str]]] = _Validator(added="1.2") + """:external:ref:`core-metadata-requires-external`""" + project_urls: _Validator[Optional[Dict[str, str]]] = _Validator(added="1.2") + """:external:ref:`core-metadata-project-url`""" + # PEP 685 lets us raise an error if an extra doesn't pass `Name` validation + # regardless of metadata version. + provides_extra: _Validator[Optional[List[utils.NormalizedName]]] = _Validator( + added="2.1", + ) + """:external:ref:`core-metadata-provides-extra`""" + provides_dist: _Validator[Optional[List[str]]] = _Validator(added="1.2") + """:external:ref:`core-metadata-provides-dist`""" + obsoletes_dist: _Validator[Optional[List[str]]] = _Validator(added="1.2") + """:external:ref:`core-metadata-obsoletes-dist`""" + requires: _Validator[Optional[List[str]]] = _Validator(added="1.1") + """``Requires`` (deprecated)""" + provides: _Validator[Optional[List[str]]] = _Validator(added="1.1") + """``Provides`` (deprecated)""" + obsoletes: _Validator[Optional[List[str]]] = _Validator(added="1.1") + """``Obsoletes`` (deprecated)""" diff --git a/lib/pkg_resources/_vendor/packaging/requirements.py b/lib/pkg_resources/_vendor/packaging/requirements.py index f34bfa85..bdc43a7e 100644 --- a/lib/pkg_resources/_vendor/packaging/requirements.py +++ b/lib/pkg_resources/_vendor/packaging/requirements.py @@ -2,13 +2,13 @@ # 2.0, and the BSD License. See the LICENSE file in the root of this repository # for complete details. -import urllib.parse -from typing import Any, List, Optional, Set +from typing import Any, Iterator, Optional, Set from ._parser import parse_requirement as _parse_requirement from ._tokenizer import ParserSyntaxError from .markers import Marker, _normalize_extra_values from .specifiers import SpecifierSet +from .utils import canonicalize_name class InvalidRequirement(ValueError): @@ -37,57 +37,52 @@ class Requirement: raise InvalidRequirement(str(e)) from e self.name: str = parsed.name - if parsed.url: - parsed_url = urllib.parse.urlparse(parsed.url) - if parsed_url.scheme == "file": - if urllib.parse.urlunparse(parsed_url) != parsed.url: - raise InvalidRequirement("Invalid URL given") - elif not (parsed_url.scheme and parsed_url.netloc) or ( - not parsed_url.scheme and not parsed_url.netloc - ): - raise InvalidRequirement(f"Invalid URL: {parsed.url}") - self.url: Optional[str] = parsed.url - else: - self.url = None - self.extras: Set[str] = set(parsed.extras if parsed.extras else []) + self.url: Optional[str] = parsed.url or None + self.extras: Set[str] = set(parsed.extras or []) self.specifier: SpecifierSet = SpecifierSet(parsed.specifier) self.marker: Optional[Marker] = None if parsed.marker is not None: self.marker = Marker.__new__(Marker) self.marker._markers = _normalize_extra_values(parsed.marker) - def __str__(self) -> str: - parts: List[str] = [self.name] + def _iter_parts(self, name: str) -> Iterator[str]: + yield name if self.extras: formatted_extras = ",".join(sorted(self.extras)) - parts.append(f"[{formatted_extras}]") + yield f"[{formatted_extras}]" if self.specifier: - parts.append(str(self.specifier)) + yield str(self.specifier) if self.url: - parts.append(f"@ {self.url}") + yield f"@ {self.url}" if self.marker: - parts.append(" ") + yield " " if self.marker: - parts.append(f"; {self.marker}") + yield f"; {self.marker}" - return "".join(parts) + def __str__(self) -> str: + return "".join(self._iter_parts(self.name)) def __repr__(self) -> str: return f"" def __hash__(self) -> int: - return hash((self.__class__.__name__, str(self))) + return hash( + ( + self.__class__.__name__, + *self._iter_parts(canonicalize_name(self.name)), + ) + ) def __eq__(self, other: Any) -> bool: if not isinstance(other, Requirement): return NotImplemented return ( - self.name == other.name + canonicalize_name(self.name) == canonicalize_name(other.name) and self.extras == other.extras and self.specifier == other.specifier and self.url == other.url diff --git a/lib/pkg_resources/_vendor/packaging/specifiers.py b/lib/pkg_resources/_vendor/packaging/specifiers.py index ba8fe37b..2d015bab 100644 --- a/lib/pkg_resources/_vendor/packaging/specifiers.py +++ b/lib/pkg_resources/_vendor/packaging/specifiers.py @@ -11,17 +11,7 @@ import abc import itertools import re -from typing import ( - Callable, - Iterable, - Iterator, - List, - Optional, - Set, - Tuple, - TypeVar, - Union, -) +from typing import Callable, Iterable, Iterator, List, Optional, Tuple, TypeVar, Union from .utils import canonicalize_version from .version import Version @@ -383,7 +373,7 @@ class Specifier(BaseSpecifier): # We want everything but the last item in the version, but we want to # ignore suffix segments. - prefix = ".".join( + prefix = _version_join( list(itertools.takewhile(_is_not_suffix, _version_split(spec)))[:-1] ) @@ -404,13 +394,13 @@ class Specifier(BaseSpecifier): ) # Get the normalized version string ignoring the trailing .* normalized_spec = canonicalize_version(spec[:-2], strip_trailing_zero=False) - # Split the spec out by dots, and pretend that there is an implicit - # dot in between a release segment and a pre-release segment. + # Split the spec out by bangs and dots, and pretend that there is + # an implicit dot in between a release segment and a pre-release segment. split_spec = _version_split(normalized_spec) - # Split the prospective version out by dots, and pretend that there - # is an implicit dot in between a release segment and a pre-release - # segment. + # Split the prospective version out by bangs and dots, and pretend + # that there is an implicit dot in between a release segment and + # a pre-release segment. split_prospective = _version_split(normalized_prospective) # 0-pad the prospective version before shortening it to get the correct @@ -644,8 +634,19 @@ _prefix_regex = re.compile(r"^([0-9]+)((?:a|b|c|rc)[0-9]+)$") def _version_split(version: str) -> List[str]: + """Split version into components. + + The split components are intended for version comparison. The logic does + not attempt to retain the original version string, so joining the + components back with :func:`_version_join` may not produce the original + version string. + """ result: List[str] = [] - for item in version.split("."): + + epoch, _, rest = version.rpartition("!") + result.append(epoch or "0") + + for item in rest.split("."): match = _prefix_regex.search(item) if match: result.extend(match.groups()) @@ -654,6 +655,17 @@ def _version_split(version: str) -> List[str]: return result +def _version_join(components: List[str]) -> str: + """Join split version components into a version string. + + This function assumes the input came from :func:`_version_split`, where the + first component must be the epoch (either empty or numeric), and all other + components numeric. + """ + epoch, *rest = components + return f"{epoch}!{'.'.join(rest)}" + + def _is_not_suffix(segment: str) -> bool: return not any( segment.startswith(prefix) for prefix in ("dev", "a", "b", "rc", "post") @@ -675,7 +687,10 @@ def _pad_version(left: List[str], right: List[str]) -> Tuple[List[str], List[str left_split.insert(1, ["0"] * max(0, len(right_split[0]) - len(left_split[0]))) right_split.insert(1, ["0"] * max(0, len(left_split[0]) - len(right_split[0]))) - return (list(itertools.chain(*left_split)), list(itertools.chain(*right_split))) + return ( + list(itertools.chain.from_iterable(left_split)), + list(itertools.chain.from_iterable(right_split)), + ) class SpecifierSet(BaseSpecifier): @@ -707,14 +722,8 @@ class SpecifierSet(BaseSpecifier): # strip each item to remove leading/trailing whitespace. split_specifiers = [s.strip() for s in specifiers.split(",") if s.strip()] - # Parsed each individual specifier, attempting first to make it a - # Specifier. - parsed: Set[Specifier] = set() - for specifier in split_specifiers: - parsed.add(Specifier(specifier)) - - # Turn our parsed specifiers into a frozen set and save them for later. - self._specs = frozenset(parsed) + # Make each individual specifier a Specifier and save in a frozen set for later. + self._specs = frozenset(map(Specifier, split_specifiers)) # Store our prereleases value so we can use it later to determine if # we accept prereleases or not. diff --git a/lib/pkg_resources/_vendor/packaging/tags.py b/lib/pkg_resources/_vendor/packaging/tags.py index 76d24341..89f19261 100644 --- a/lib/pkg_resources/_vendor/packaging/tags.py +++ b/lib/pkg_resources/_vendor/packaging/tags.py @@ -4,6 +4,8 @@ import logging import platform +import re +import struct import subprocess import sys import sysconfig @@ -37,7 +39,7 @@ INTERPRETER_SHORT_NAMES: Dict[str, str] = { } -_32_BIT_INTERPRETER = sys.maxsize <= 2**32 +_32_BIT_INTERPRETER = struct.calcsize("P") == 4 class Tag: @@ -123,20 +125,37 @@ def _normalize_string(string: str) -> str: return string.replace(".", "_").replace("-", "_").replace(" ", "_") -def _abi3_applies(python_version: PythonVersion) -> bool: +def _is_threaded_cpython(abis: List[str]) -> bool: + """ + Determine if the ABI corresponds to a threaded (`--disable-gil`) build. + + The threaded builds are indicated by a "t" in the abiflags. + """ + if len(abis) == 0: + return False + # expect e.g., cp313 + m = re.match(r"cp\d+(.*)", abis[0]) + if not m: + return False + abiflags = m.group(1) + return "t" in abiflags + + +def _abi3_applies(python_version: PythonVersion, threading: bool) -> bool: """ Determine if the Python version supports abi3. - PEP 384 was first implemented in Python 3.2. + PEP 384 was first implemented in Python 3.2. The threaded (`--disable-gil`) + builds do not support abi3. """ - return len(python_version) > 1 and tuple(python_version) >= (3, 2) + return len(python_version) > 1 and tuple(python_version) >= (3, 2) and not threading def _cpython_abis(py_version: PythonVersion, warn: bool = False) -> List[str]: py_version = tuple(py_version) # To allow for version comparison. abis = [] version = _version_nodot(py_version[:2]) - debug = pymalloc = ucs4 = "" + threading = debug = pymalloc = ucs4 = "" with_debug = _get_config_var("Py_DEBUG", warn) has_refcount = hasattr(sys, "gettotalrefcount") # Windows doesn't set Py_DEBUG, so checking for support of debug-compiled @@ -145,6 +164,8 @@ def _cpython_abis(py_version: PythonVersion, warn: bool = False) -> List[str]: has_ext = "_d.pyd" in EXTENSION_SUFFIXES if with_debug or (with_debug is None and (has_refcount or has_ext)): debug = "d" + if py_version >= (3, 13) and _get_config_var("Py_GIL_DISABLED", warn): + threading = "t" if py_version < (3, 8): with_pymalloc = _get_config_var("WITH_PYMALLOC", warn) if with_pymalloc or with_pymalloc is None: @@ -158,13 +179,8 @@ def _cpython_abis(py_version: PythonVersion, warn: bool = False) -> List[str]: elif debug: # Debug builds can also load "normal" extension modules. # We can also assume no UCS-4 or pymalloc requirement. - abis.append(f"cp{version}") - abis.insert( - 0, - "cp{version}{debug}{pymalloc}{ucs4}".format( - version=version, debug=debug, pymalloc=pymalloc, ucs4=ucs4 - ), - ) + abis.append(f"cp{version}{threading}") + abis.insert(0, f"cp{version}{threading}{debug}{pymalloc}{ucs4}") return abis @@ -212,11 +228,14 @@ def cpython_tags( for abi in abis: for platform_ in platforms: yield Tag(interpreter, abi, platform_) - if _abi3_applies(python_version): + + threading = _is_threaded_cpython(abis) + use_abi3 = _abi3_applies(python_version, threading) + if use_abi3: yield from (Tag(interpreter, "abi3", platform_) for platform_ in platforms) yield from (Tag(interpreter, "none", platform_) for platform_ in platforms) - if _abi3_applies(python_version): + if use_abi3: for minor_version in range(python_version[1] - 1, 1, -1): for platform_ in platforms: interpreter = "cp{version}".format( @@ -406,7 +425,7 @@ def mac_platforms( check=True, env={"SYSTEM_VERSION_COMPAT": "0"}, stdout=subprocess.PIPE, - universal_newlines=True, + text=True, ).stdout version = cast("MacVersion", tuple(map(int, version_str.split(".")[:2]))) else: @@ -469,15 +488,21 @@ def mac_platforms( def _linux_platforms(is_32bit: bool = _32_BIT_INTERPRETER) -> Iterator[str]: linux = _normalize_string(sysconfig.get_platform()) + if not linux.startswith("linux_"): + # we should never be here, just yield the sysconfig one and return + yield linux + return if is_32bit: if linux == "linux_x86_64": linux = "linux_i686" elif linux == "linux_aarch64": - linux = "linux_armv7l" + linux = "linux_armv8l" _, arch = linux.split("_", 1) - yield from _manylinux.platform_tags(linux, arch) - yield from _musllinux.platform_tags(arch) - yield linux + archs = {"armv8l": ["armv8l", "armv7l"]}.get(arch, [arch]) + yield from _manylinux.platform_tags(archs) + yield from _musllinux.platform_tags(archs) + for arch in archs: + yield f"linux_{arch}" def _generic_platforms() -> Iterator[str]: diff --git a/lib/pkg_resources/_vendor/packaging/utils.py b/lib/pkg_resources/_vendor/packaging/utils.py index 33c613b7..c2c2f75a 100644 --- a/lib/pkg_resources/_vendor/packaging/utils.py +++ b/lib/pkg_resources/_vendor/packaging/utils.py @@ -12,6 +12,12 @@ BuildTag = Union[Tuple[()], Tuple[int, str]] NormalizedName = NewType("NormalizedName", str) +class InvalidName(ValueError): + """ + An invalid distribution name; users should refer to the packaging user guide. + """ + + class InvalidWheelFilename(ValueError): """ An invalid wheel filename was found, users should refer to PEP 427. @@ -24,17 +30,28 @@ class InvalidSdistFilename(ValueError): """ +# Core metadata spec for `Name` +_validate_regex = re.compile( + r"^([A-Z0-9]|[A-Z0-9][A-Z0-9._-]*[A-Z0-9])$", re.IGNORECASE +) _canonicalize_regex = re.compile(r"[-_.]+") +_normalized_regex = re.compile(r"^([a-z0-9]|[a-z0-9]([a-z0-9-](?!--))*[a-z0-9])$") # PEP 427: The build number must start with a digit. _build_tag_regex = re.compile(r"(\d+)(.*)") -def canonicalize_name(name: str) -> NormalizedName: +def canonicalize_name(name: str, *, validate: bool = False) -> NormalizedName: + if validate and not _validate_regex.match(name): + raise InvalidName(f"name is invalid: {name!r}") # This is taken from PEP 503. value = _canonicalize_regex.sub("-", name).lower() return cast(NormalizedName, value) +def is_normalized_name(name: str) -> bool: + return _normalized_regex.match(name) is not None + + def canonicalize_version( version: Union[Version, str], *, strip_trailing_zero: bool = True ) -> str: @@ -100,11 +117,18 @@ def parse_wheel_filename( parts = filename.split("-", dashes - 2) name_part = parts[0] - # See PEP 427 for the rules on escaping the project name + # See PEP 427 for the rules on escaping the project name. if "__" in name_part or re.match(r"^[\w\d._]*$", name_part, re.UNICODE) is None: raise InvalidWheelFilename(f"Invalid project name: {filename}") name = canonicalize_name(name_part) - version = Version(parts[1]) + + try: + version = Version(parts[1]) + except InvalidVersion as e: + raise InvalidWheelFilename( + f"Invalid wheel filename (invalid version): {filename}" + ) from e + if dashes == 5: build_part = parts[2] build_match = _build_tag_regex.match(build_part) @@ -137,5 +161,12 @@ def parse_sdist_filename(filename: str) -> Tuple[NormalizedName, Version]: raise InvalidSdistFilename(f"Invalid sdist filename: {filename}") name = canonicalize_name(name_part) - version = Version(version_part) + + try: + version = Version(version_part) + except InvalidVersion as e: + raise InvalidSdistFilename( + f"Invalid sdist filename (invalid version): {filename}" + ) from e + return (name, version) diff --git a/lib/pkg_resources/_vendor/packaging/version.py b/lib/pkg_resources/_vendor/packaging/version.py index b30e8cbf..5faab9bd 100644 --- a/lib/pkg_resources/_vendor/packaging/version.py +++ b/lib/pkg_resources/_vendor/packaging/version.py @@ -7,37 +7,39 @@ from packaging.version import parse, Version """ -import collections import itertools import re -from typing import Any, Callable, Optional, SupportsInt, Tuple, Union +from typing import Any, Callable, NamedTuple, Optional, SupportsInt, Tuple, Union from ._structures import Infinity, InfinityType, NegativeInfinity, NegativeInfinityType __all__ = ["VERSION_PATTERN", "parse", "Version", "InvalidVersion"] -InfiniteTypes = Union[InfinityType, NegativeInfinityType] -PrePostDevType = Union[InfiniteTypes, Tuple[str, int]] -SubLocalType = Union[InfiniteTypes, int, str] -LocalType = Union[ +LocalType = Tuple[Union[int, str], ...] + +CmpPrePostDevType = Union[InfinityType, NegativeInfinityType, Tuple[str, int]] +CmpLocalType = Union[ NegativeInfinityType, - Tuple[ - Union[ - SubLocalType, - Tuple[SubLocalType, str], - Tuple[NegativeInfinityType, SubLocalType], - ], - ..., - ], + Tuple[Union[Tuple[int, str], Tuple[NegativeInfinityType, Union[int, str]]], ...], ] CmpKey = Tuple[ - int, Tuple[int, ...], PrePostDevType, PrePostDevType, PrePostDevType, LocalType + int, + Tuple[int, ...], + CmpPrePostDevType, + CmpPrePostDevType, + CmpPrePostDevType, + CmpLocalType, ] VersionComparisonMethod = Callable[[CmpKey, CmpKey], bool] -_Version = collections.namedtuple( - "_Version", ["epoch", "release", "dev", "pre", "post", "local"] -) + +class _Version(NamedTuple): + epoch: int + release: Tuple[int, ...] + dev: Optional[Tuple[str, int]] + pre: Optional[Tuple[str, int]] + post: Optional[Tuple[str, int]] + local: Optional[LocalType] def parse(version: str) -> "Version": @@ -117,7 +119,7 @@ _VERSION_PATTERN = r""" (?P[0-9]+(?:\.[0-9]+)*) # release segment (?P
                                          # pre-release
             [-_\.]?
-            (?P(a|b|c|rc|alpha|beta|pre|preview))
+            (?Palpha|a|beta|b|preview|pre|c|rc)
             [-_\.]?
             (?P[0-9]+)?
         )?
@@ -269,8 +271,7 @@ class Version(_BaseVersion):
         >>> Version("1!2.0.0").epoch
         1
         """
-        _epoch: int = self._version.epoch
-        return _epoch
+        return self._version.epoch
 
     @property
     def release(self) -> Tuple[int, ...]:
@@ -286,8 +287,7 @@ class Version(_BaseVersion):
         Includes trailing zeroes but not the epoch or any pre-release / development /
         post-release suffixes.
         """
-        _release: Tuple[int, ...] = self._version.release
-        return _release
+        return self._version.release
 
     @property
     def pre(self) -> Optional[Tuple[str, int]]:
@@ -302,8 +302,7 @@ class Version(_BaseVersion):
         >>> Version("1.2.3rc1").pre
         ('rc', 1)
         """
-        _pre: Optional[Tuple[str, int]] = self._version.pre
-        return _pre
+        return self._version.pre
 
     @property
     def post(self) -> Optional[int]:
@@ -451,7 +450,7 @@ class Version(_BaseVersion):
 
 
 def _parse_letter_version(
-    letter: str, number: Union[str, bytes, SupportsInt]
+    letter: Optional[str], number: Union[str, bytes, SupportsInt, None]
 ) -> Optional[Tuple[str, int]]:
 
     if letter:
@@ -489,7 +488,7 @@ def _parse_letter_version(
 _local_version_separators = re.compile(r"[\._-]")
 
 
-def _parse_local_version(local: str) -> Optional[LocalType]:
+def _parse_local_version(local: Optional[str]) -> Optional[LocalType]:
     """
     Takes a string like abc.1.twelve and turns it into ("abc", 1, "twelve").
     """
@@ -507,7 +506,7 @@ def _cmpkey(
     pre: Optional[Tuple[str, int]],
     post: Optional[Tuple[str, int]],
     dev: Optional[Tuple[str, int]],
-    local: Optional[Tuple[SubLocalType]],
+    local: Optional[LocalType],
 ) -> CmpKey:
 
     # When we compare a release version, we want to compare it with all of the
@@ -524,7 +523,7 @@ def _cmpkey(
     # if there is not a pre or a post segment. If we have one of those then
     # the normal sorting rules will handle this case correctly.
     if pre is None and post is None and dev is not None:
-        _pre: PrePostDevType = NegativeInfinity
+        _pre: CmpPrePostDevType = NegativeInfinity
     # Versions without a pre-release (except as noted above) should sort after
     # those with one.
     elif pre is None:
@@ -534,21 +533,21 @@ def _cmpkey(
 
     # Versions without a post segment should sort before those with one.
     if post is None:
-        _post: PrePostDevType = NegativeInfinity
+        _post: CmpPrePostDevType = NegativeInfinity
 
     else:
         _post = post
 
     # Versions without a development segment should sort after those with one.
     if dev is None:
-        _dev: PrePostDevType = Infinity
+        _dev: CmpPrePostDevType = Infinity
 
     else:
         _dev = dev
 
     if local is None:
         # Versions without a local segment should sort before those with one.
-        _local: LocalType = NegativeInfinity
+        _local: CmpLocalType = NegativeInfinity
     else:
         # Versions with a local segment need that segment parsed to implement
         # the sorting rules in PEP440.
diff --git a/lib/pkg_resources/_vendor/ruff.toml b/lib/pkg_resources/_vendor/ruff.toml
new file mode 100644
index 00000000..00fee625
--- /dev/null
+++ b/lib/pkg_resources/_vendor/ruff.toml
@@ -0,0 +1 @@
+exclude = ["*"]
diff --git a/lib/pkg_resources/_vendor/typing_extensions.py b/lib/pkg_resources/_vendor/typing_extensions.py
deleted file mode 100644
index ef42417c..00000000
--- a/lib/pkg_resources/_vendor/typing_extensions.py
+++ /dev/null
@@ -1,2209 +0,0 @@
-import abc
-import collections
-import collections.abc
-import functools
-import operator
-import sys
-import types as _types
-import typing
-
-
-__all__ = [
-    # Super-special typing primitives.
-    'Any',
-    'ClassVar',
-    'Concatenate',
-    'Final',
-    'LiteralString',
-    'ParamSpec',
-    'ParamSpecArgs',
-    'ParamSpecKwargs',
-    'Self',
-    'Type',
-    'TypeVar',
-    'TypeVarTuple',
-    'Unpack',
-
-    # ABCs (from collections.abc).
-    'Awaitable',
-    'AsyncIterator',
-    'AsyncIterable',
-    'Coroutine',
-    'AsyncGenerator',
-    'AsyncContextManager',
-    'ChainMap',
-
-    # Concrete collection types.
-    'ContextManager',
-    'Counter',
-    'Deque',
-    'DefaultDict',
-    'NamedTuple',
-    'OrderedDict',
-    'TypedDict',
-
-    # Structural checks, a.k.a. protocols.
-    'SupportsIndex',
-
-    # One-off things.
-    'Annotated',
-    'assert_never',
-    'assert_type',
-    'clear_overloads',
-    'dataclass_transform',
-    'get_overloads',
-    'final',
-    'get_args',
-    'get_origin',
-    'get_type_hints',
-    'IntVar',
-    'is_typeddict',
-    'Literal',
-    'NewType',
-    'overload',
-    'override',
-    'Protocol',
-    'reveal_type',
-    'runtime',
-    'runtime_checkable',
-    'Text',
-    'TypeAlias',
-    'TypeGuard',
-    'TYPE_CHECKING',
-    'Never',
-    'NoReturn',
-    'Required',
-    'NotRequired',
-]
-
-# for backward compatibility
-PEP_560 = True
-GenericMeta = type
-
-# The functions below are modified copies of typing internal helpers.
-# They are needed by _ProtocolMeta and they provide support for PEP 646.
-
-_marker = object()
-
-
-def _check_generic(cls, parameters, elen=_marker):
-    """Check correct count for parameters of a generic cls (internal helper).
-    This gives a nice error message in case of count mismatch.
-    """
-    if not elen:
-        raise TypeError(f"{cls} is not a generic class")
-    if elen is _marker:
-        if not hasattr(cls, "__parameters__") or not cls.__parameters__:
-            raise TypeError(f"{cls} is not a generic class")
-        elen = len(cls.__parameters__)
-    alen = len(parameters)
-    if alen != elen:
-        if hasattr(cls, "__parameters__"):
-            parameters = [p for p in cls.__parameters__ if not _is_unpack(p)]
-            num_tv_tuples = sum(isinstance(p, TypeVarTuple) for p in parameters)
-            if (num_tv_tuples > 0) and (alen >= elen - num_tv_tuples):
-                return
-        raise TypeError(f"Too {'many' if alen > elen else 'few'} parameters for {cls};"
-                        f" actual {alen}, expected {elen}")
-
-
-if sys.version_info >= (3, 10):
-    def _should_collect_from_parameters(t):
-        return isinstance(
-            t, (typing._GenericAlias, _types.GenericAlias, _types.UnionType)
-        )
-elif sys.version_info >= (3, 9):
-    def _should_collect_from_parameters(t):
-        return isinstance(t, (typing._GenericAlias, _types.GenericAlias))
-else:
-    def _should_collect_from_parameters(t):
-        return isinstance(t, typing._GenericAlias) and not t._special
-
-
-def _collect_type_vars(types, typevar_types=None):
-    """Collect all type variable contained in types in order of
-    first appearance (lexicographic order). For example::
-
-        _collect_type_vars((T, List[S, T])) == (T, S)
-    """
-    if typevar_types is None:
-        typevar_types = typing.TypeVar
-    tvars = []
-    for t in types:
-        if (
-            isinstance(t, typevar_types) and
-            t not in tvars and
-            not _is_unpack(t)
-        ):
-            tvars.append(t)
-        if _should_collect_from_parameters(t):
-            tvars.extend([t for t in t.__parameters__ if t not in tvars])
-    return tuple(tvars)
-
-
-NoReturn = typing.NoReturn
-
-# Some unconstrained type variables.  These are used by the container types.
-# (These are not for export.)
-T = typing.TypeVar('T')  # Any type.
-KT = typing.TypeVar('KT')  # Key type.
-VT = typing.TypeVar('VT')  # Value type.
-T_co = typing.TypeVar('T_co', covariant=True)  # Any type covariant containers.
-T_contra = typing.TypeVar('T_contra', contravariant=True)  # Ditto contravariant.
-
-
-if sys.version_info >= (3, 11):
-    from typing import Any
-else:
-
-    class _AnyMeta(type):
-        def __instancecheck__(self, obj):
-            if self is Any:
-                raise TypeError("typing_extensions.Any cannot be used with isinstance()")
-            return super().__instancecheck__(obj)
-
-        def __repr__(self):
-            if self is Any:
-                return "typing_extensions.Any"
-            return super().__repr__()
-
-    class Any(metaclass=_AnyMeta):
-        """Special type indicating an unconstrained type.
-        - Any is compatible with every type.
-        - Any assumed to have all methods.
-        - All values assumed to be instances of Any.
-        Note that all the above statements are true from the point of view of
-        static type checkers. At runtime, Any should not be used with instance
-        checks.
-        """
-        def __new__(cls, *args, **kwargs):
-            if cls is Any:
-                raise TypeError("Any cannot be instantiated")
-            return super().__new__(cls, *args, **kwargs)
-
-
-ClassVar = typing.ClassVar
-
-# On older versions of typing there is an internal class named "Final".
-# 3.8+
-if hasattr(typing, 'Final') and sys.version_info[:2] >= (3, 7):
-    Final = typing.Final
-# 3.7
-else:
-    class _FinalForm(typing._SpecialForm, _root=True):
-
-        def __repr__(self):
-            return 'typing_extensions.' + self._name
-
-        def __getitem__(self, parameters):
-            item = typing._type_check(parameters,
-                                      f'{self._name} accepts only a single type.')
-            return typing._GenericAlias(self, (item,))
-
-    Final = _FinalForm('Final',
-                       doc="""A special typing construct to indicate that a name
-                       cannot be re-assigned or overridden in a subclass.
-                       For example:
-
-                           MAX_SIZE: Final = 9000
-                           MAX_SIZE += 1  # Error reported by type checker
-
-                           class Connection:
-                               TIMEOUT: Final[int] = 10
-                           class FastConnector(Connection):
-                               TIMEOUT = 1  # Error reported by type checker
-
-                       There is no runtime checking of these properties.""")
-
-if sys.version_info >= (3, 11):
-    final = typing.final
-else:
-    # @final exists in 3.8+, but we backport it for all versions
-    # before 3.11 to keep support for the __final__ attribute.
-    # See https://bugs.python.org/issue46342
-    def final(f):
-        """This decorator can be used to indicate to type checkers that
-        the decorated method cannot be overridden, and decorated class
-        cannot be subclassed. For example:
-
-            class Base:
-                @final
-                def done(self) -> None:
-                    ...
-            class Sub(Base):
-                def done(self) -> None:  # Error reported by type checker
-                    ...
-            @final
-            class Leaf:
-                ...
-            class Other(Leaf):  # Error reported by type checker
-                ...
-
-        There is no runtime checking of these properties. The decorator
-        sets the ``__final__`` attribute to ``True`` on the decorated object
-        to allow runtime introspection.
-        """
-        try:
-            f.__final__ = True
-        except (AttributeError, TypeError):
-            # Skip the attribute silently if it is not writable.
-            # AttributeError happens if the object has __slots__ or a
-            # read-only property, TypeError if it's a builtin class.
-            pass
-        return f
-
-
-def IntVar(name):
-    return typing.TypeVar(name)
-
-
-# 3.8+:
-if hasattr(typing, 'Literal'):
-    Literal = typing.Literal
-# 3.7:
-else:
-    class _LiteralForm(typing._SpecialForm, _root=True):
-
-        def __repr__(self):
-            return 'typing_extensions.' + self._name
-
-        def __getitem__(self, parameters):
-            return typing._GenericAlias(self, parameters)
-
-    Literal = _LiteralForm('Literal',
-                           doc="""A type that can be used to indicate to type checkers
-                           that the corresponding value has a value literally equivalent
-                           to the provided parameter. For example:
-
-                               var: Literal[4] = 4
-
-                           The type checker understands that 'var' is literally equal to
-                           the value 4 and no other value.
-
-                           Literal[...] cannot be subclassed. There is no runtime
-                           checking verifying that the parameter is actually a value
-                           instead of a type.""")
-
-
-_overload_dummy = typing._overload_dummy  # noqa
-
-
-if hasattr(typing, "get_overloads"):  # 3.11+
-    overload = typing.overload
-    get_overloads = typing.get_overloads
-    clear_overloads = typing.clear_overloads
-else:
-    # {module: {qualname: {firstlineno: func}}}
-    _overload_registry = collections.defaultdict(
-        functools.partial(collections.defaultdict, dict)
-    )
-
-    def overload(func):
-        """Decorator for overloaded functions/methods.
-
-        In a stub file, place two or more stub definitions for the same
-        function in a row, each decorated with @overload.  For example:
-
-        @overload
-        def utf8(value: None) -> None: ...
-        @overload
-        def utf8(value: bytes) -> bytes: ...
-        @overload
-        def utf8(value: str) -> bytes: ...
-
-        In a non-stub file (i.e. a regular .py file), do the same but
-        follow it with an implementation.  The implementation should *not*
-        be decorated with @overload.  For example:
-
-        @overload
-        def utf8(value: None) -> None: ...
-        @overload
-        def utf8(value: bytes) -> bytes: ...
-        @overload
-        def utf8(value: str) -> bytes: ...
-        def utf8(value):
-            # implementation goes here
-
-        The overloads for a function can be retrieved at runtime using the
-        get_overloads() function.
-        """
-        # classmethod and staticmethod
-        f = getattr(func, "__func__", func)
-        try:
-            _overload_registry[f.__module__][f.__qualname__][
-                f.__code__.co_firstlineno
-            ] = func
-        except AttributeError:
-            # Not a normal function; ignore.
-            pass
-        return _overload_dummy
-
-    def get_overloads(func):
-        """Return all defined overloads for *func* as a sequence."""
-        # classmethod and staticmethod
-        f = getattr(func, "__func__", func)
-        if f.__module__ not in _overload_registry:
-            return []
-        mod_dict = _overload_registry[f.__module__]
-        if f.__qualname__ not in mod_dict:
-            return []
-        return list(mod_dict[f.__qualname__].values())
-
-    def clear_overloads():
-        """Clear all overloads in the registry."""
-        _overload_registry.clear()
-
-
-# This is not a real generic class.  Don't use outside annotations.
-Type = typing.Type
-
-# Various ABCs mimicking those in collections.abc.
-# A few are simply re-exported for completeness.
-
-
-Awaitable = typing.Awaitable
-Coroutine = typing.Coroutine
-AsyncIterable = typing.AsyncIterable
-AsyncIterator = typing.AsyncIterator
-Deque = typing.Deque
-ContextManager = typing.ContextManager
-AsyncContextManager = typing.AsyncContextManager
-DefaultDict = typing.DefaultDict
-
-# 3.7.2+
-if hasattr(typing, 'OrderedDict'):
-    OrderedDict = typing.OrderedDict
-# 3.7.0-3.7.2
-else:
-    OrderedDict = typing._alias(collections.OrderedDict, (KT, VT))
-
-Counter = typing.Counter
-ChainMap = typing.ChainMap
-AsyncGenerator = typing.AsyncGenerator
-NewType = typing.NewType
-Text = typing.Text
-TYPE_CHECKING = typing.TYPE_CHECKING
-
-
-_PROTO_WHITELIST = ['Callable', 'Awaitable',
-                    'Iterable', 'Iterator', 'AsyncIterable', 'AsyncIterator',
-                    'Hashable', 'Sized', 'Container', 'Collection', 'Reversible',
-                    'ContextManager', 'AsyncContextManager']
-
-
-def _get_protocol_attrs(cls):
-    attrs = set()
-    for base in cls.__mro__[:-1]:  # without object
-        if base.__name__ in ('Protocol', 'Generic'):
-            continue
-        annotations = getattr(base, '__annotations__', {})
-        for attr in list(base.__dict__.keys()) + list(annotations.keys()):
-            if (not attr.startswith('_abc_') and attr not in (
-                    '__abstractmethods__', '__annotations__', '__weakref__',
-                    '_is_protocol', '_is_runtime_protocol', '__dict__',
-                    '__args__', '__slots__',
-                    '__next_in_mro__', '__parameters__', '__origin__',
-                    '__orig_bases__', '__extra__', '__tree_hash__',
-                    '__doc__', '__subclasshook__', '__init__', '__new__',
-                    '__module__', '_MutableMapping__marker', '_gorg')):
-                attrs.add(attr)
-    return attrs
-
-
-def _is_callable_members_only(cls):
-    return all(callable(getattr(cls, attr, None)) for attr in _get_protocol_attrs(cls))
-
-
-def _maybe_adjust_parameters(cls):
-    """Helper function used in Protocol.__init_subclass__ and _TypedDictMeta.__new__.
-
-    The contents of this function are very similar
-    to logic found in typing.Generic.__init_subclass__
-    on the CPython main branch.
-    """
-    tvars = []
-    if '__orig_bases__' in cls.__dict__:
-        tvars = typing._collect_type_vars(cls.__orig_bases__)
-        # Look for Generic[T1, ..., Tn] or Protocol[T1, ..., Tn].
-        # If found, tvars must be a subset of it.
-        # If not found, tvars is it.
-        # Also check for and reject plain Generic,
-        # and reject multiple Generic[...] and/or Protocol[...].
-        gvars = None
-        for base in cls.__orig_bases__:
-            if (isinstance(base, typing._GenericAlias) and
-                    base.__origin__ in (typing.Generic, Protocol)):
-                # for error messages
-                the_base = base.__origin__.__name__
-                if gvars is not None:
-                    raise TypeError(
-                        "Cannot inherit from Generic[...]"
-                        " and/or Protocol[...] multiple types.")
-                gvars = base.__parameters__
-        if gvars is None:
-            gvars = tvars
-        else:
-            tvarset = set(tvars)
-            gvarset = set(gvars)
-            if not tvarset <= gvarset:
-                s_vars = ', '.join(str(t) for t in tvars if t not in gvarset)
-                s_args = ', '.join(str(g) for g in gvars)
-                raise TypeError(f"Some type variables ({s_vars}) are"
-                                f" not listed in {the_base}[{s_args}]")
-            tvars = gvars
-    cls.__parameters__ = tuple(tvars)
-
-
-# 3.8+
-if hasattr(typing, 'Protocol'):
-    Protocol = typing.Protocol
-# 3.7
-else:
-
-    def _no_init(self, *args, **kwargs):
-        if type(self)._is_protocol:
-            raise TypeError('Protocols cannot be instantiated')
-
-    class _ProtocolMeta(abc.ABCMeta):  # noqa: B024
-        # This metaclass is a bit unfortunate and exists only because of the lack
-        # of __instancehook__.
-        def __instancecheck__(cls, instance):
-            # We need this method for situations where attributes are
-            # assigned in __init__.
-            if ((not getattr(cls, '_is_protocol', False) or
-                 _is_callable_members_only(cls)) and
-                    issubclass(instance.__class__, cls)):
-                return True
-            if cls._is_protocol:
-                if all(hasattr(instance, attr) and
-                       (not callable(getattr(cls, attr, None)) or
-                        getattr(instance, attr) is not None)
-                       for attr in _get_protocol_attrs(cls)):
-                    return True
-            return super().__instancecheck__(instance)
-
-    class Protocol(metaclass=_ProtocolMeta):
-        # There is quite a lot of overlapping code with typing.Generic.
-        # Unfortunately it is hard to avoid this while these live in two different
-        # modules. The duplicated code will be removed when Protocol is moved to typing.
-        """Base class for protocol classes. Protocol classes are defined as::
-
-            class Proto(Protocol):
-                def meth(self) -> int:
-                    ...
-
-        Such classes are primarily used with static type checkers that recognize
-        structural subtyping (static duck-typing), for example::
-
-            class C:
-                def meth(self) -> int:
-                    return 0
-
-            def func(x: Proto) -> int:
-                return x.meth()
-
-            func(C())  # Passes static type check
-
-        See PEP 544 for details. Protocol classes decorated with
-        @typing_extensions.runtime act as simple-minded runtime protocol that checks
-        only the presence of given attributes, ignoring their type signatures.
-
-        Protocol classes can be generic, they are defined as::
-
-            class GenProto(Protocol[T]):
-                def meth(self) -> T:
-                    ...
-        """
-        __slots__ = ()
-        _is_protocol = True
-
-        def __new__(cls, *args, **kwds):
-            if cls is Protocol:
-                raise TypeError("Type Protocol cannot be instantiated; "
-                                "it can only be used as a base class")
-            return super().__new__(cls)
-
-        @typing._tp_cache
-        def __class_getitem__(cls, params):
-            if not isinstance(params, tuple):
-                params = (params,)
-            if not params and cls is not typing.Tuple:
-                raise TypeError(
-                    f"Parameter list to {cls.__qualname__}[...] cannot be empty")
-            msg = "Parameters to generic types must be types."
-            params = tuple(typing._type_check(p, msg) for p in params)  # noqa
-            if cls is Protocol:
-                # Generic can only be subscripted with unique type variables.
-                if not all(isinstance(p, typing.TypeVar) for p in params):
-                    i = 0
-                    while isinstance(params[i], typing.TypeVar):
-                        i += 1
-                    raise TypeError(
-                        "Parameters to Protocol[...] must all be type variables."
-                        f" Parameter {i + 1} is {params[i]}")
-                if len(set(params)) != len(params):
-                    raise TypeError(
-                        "Parameters to Protocol[...] must all be unique")
-            else:
-                # Subscripting a regular Generic subclass.
-                _check_generic(cls, params, len(cls.__parameters__))
-            return typing._GenericAlias(cls, params)
-
-        def __init_subclass__(cls, *args, **kwargs):
-            if '__orig_bases__' in cls.__dict__:
-                error = typing.Generic in cls.__orig_bases__
-            else:
-                error = typing.Generic in cls.__bases__
-            if error:
-                raise TypeError("Cannot inherit from plain Generic")
-            _maybe_adjust_parameters(cls)
-
-            # Determine if this is a protocol or a concrete subclass.
-            if not cls.__dict__.get('_is_protocol', None):
-                cls._is_protocol = any(b is Protocol for b in cls.__bases__)
-
-            # Set (or override) the protocol subclass hook.
-            def _proto_hook(other):
-                if not cls.__dict__.get('_is_protocol', None):
-                    return NotImplemented
-                if not getattr(cls, '_is_runtime_protocol', False):
-                    if sys._getframe(2).f_globals['__name__'] in ['abc', 'functools']:
-                        return NotImplemented
-                    raise TypeError("Instance and class checks can only be used with"
-                                    " @runtime protocols")
-                if not _is_callable_members_only(cls):
-                    if sys._getframe(2).f_globals['__name__'] in ['abc', 'functools']:
-                        return NotImplemented
-                    raise TypeError("Protocols with non-method members"
-                                    " don't support issubclass()")
-                if not isinstance(other, type):
-                    # Same error as for issubclass(1, int)
-                    raise TypeError('issubclass() arg 1 must be a class')
-                for attr in _get_protocol_attrs(cls):
-                    for base in other.__mro__:
-                        if attr in base.__dict__:
-                            if base.__dict__[attr] is None:
-                                return NotImplemented
-                            break
-                        annotations = getattr(base, '__annotations__', {})
-                        if (isinstance(annotations, typing.Mapping) and
-                                attr in annotations and
-                                isinstance(other, _ProtocolMeta) and
-                                other._is_protocol):
-                            break
-                    else:
-                        return NotImplemented
-                return True
-            if '__subclasshook__' not in cls.__dict__:
-                cls.__subclasshook__ = _proto_hook
-
-            # We have nothing more to do for non-protocols.
-            if not cls._is_protocol:
-                return
-
-            # Check consistency of bases.
-            for base in cls.__bases__:
-                if not (base in (object, typing.Generic) or
-                        base.__module__ == 'collections.abc' and
-                        base.__name__ in _PROTO_WHITELIST or
-                        isinstance(base, _ProtocolMeta) and base._is_protocol):
-                    raise TypeError('Protocols can only inherit from other'
-                                    f' protocols, got {repr(base)}')
-            cls.__init__ = _no_init
-
-
-# 3.8+
-if hasattr(typing, 'runtime_checkable'):
-    runtime_checkable = typing.runtime_checkable
-# 3.7
-else:
-    def runtime_checkable(cls):
-        """Mark a protocol class as a runtime protocol, so that it
-        can be used with isinstance() and issubclass(). Raise TypeError
-        if applied to a non-protocol class.
-
-        This allows a simple-minded structural check very similar to the
-        one-offs in collections.abc such as Hashable.
-        """
-        if not isinstance(cls, _ProtocolMeta) or not cls._is_protocol:
-            raise TypeError('@runtime_checkable can be only applied to protocol classes,'
-                            f' got {cls!r}')
-        cls._is_runtime_protocol = True
-        return cls
-
-
-# Exists for backwards compatibility.
-runtime = runtime_checkable
-
-
-# 3.8+
-if hasattr(typing, 'SupportsIndex'):
-    SupportsIndex = typing.SupportsIndex
-# 3.7
-else:
-    @runtime_checkable
-    class SupportsIndex(Protocol):
-        __slots__ = ()
-
-        @abc.abstractmethod
-        def __index__(self) -> int:
-            pass
-
-
-if hasattr(typing, "Required"):
-    # The standard library TypedDict in Python 3.8 does not store runtime information
-    # about which (if any) keys are optional.  See https://bugs.python.org/issue38834
-    # The standard library TypedDict in Python 3.9.0/1 does not honour the "total"
-    # keyword with old-style TypedDict().  See https://bugs.python.org/issue42059
-    # The standard library TypedDict below Python 3.11 does not store runtime
-    # information about optional and required keys when using Required or NotRequired.
-    # Generic TypedDicts are also impossible using typing.TypedDict on Python <3.11.
-    TypedDict = typing.TypedDict
-    _TypedDictMeta = typing._TypedDictMeta
-    is_typeddict = typing.is_typeddict
-else:
-    def _check_fails(cls, other):
-        try:
-            if sys._getframe(1).f_globals['__name__'] not in ['abc',
-                                                              'functools',
-                                                              'typing']:
-                # Typed dicts are only for static structural subtyping.
-                raise TypeError('TypedDict does not support instance and class checks')
-        except (AttributeError, ValueError):
-            pass
-        return False
-
-    def _dict_new(*args, **kwargs):
-        if not args:
-            raise TypeError('TypedDict.__new__(): not enough arguments')
-        _, args = args[0], args[1:]  # allow the "cls" keyword be passed
-        return dict(*args, **kwargs)
-
-    _dict_new.__text_signature__ = '($cls, _typename, _fields=None, /, **kwargs)'
-
-    def _typeddict_new(*args, total=True, **kwargs):
-        if not args:
-            raise TypeError('TypedDict.__new__(): not enough arguments')
-        _, args = args[0], args[1:]  # allow the "cls" keyword be passed
-        if args:
-            typename, args = args[0], args[1:]  # allow the "_typename" keyword be passed
-        elif '_typename' in kwargs:
-            typename = kwargs.pop('_typename')
-            import warnings
-            warnings.warn("Passing '_typename' as keyword argument is deprecated",
-                          DeprecationWarning, stacklevel=2)
-        else:
-            raise TypeError("TypedDict.__new__() missing 1 required positional "
-                            "argument: '_typename'")
-        if args:
-            try:
-                fields, = args  # allow the "_fields" keyword be passed
-            except ValueError:
-                raise TypeError('TypedDict.__new__() takes from 2 to 3 '
-                                f'positional arguments but {len(args) + 2} '
-                                'were given')
-        elif '_fields' in kwargs and len(kwargs) == 1:
-            fields = kwargs.pop('_fields')
-            import warnings
-            warnings.warn("Passing '_fields' as keyword argument is deprecated",
-                          DeprecationWarning, stacklevel=2)
-        else:
-            fields = None
-
-        if fields is None:
-            fields = kwargs
-        elif kwargs:
-            raise TypeError("TypedDict takes either a dict or keyword arguments,"
-                            " but not both")
-
-        ns = {'__annotations__': dict(fields)}
-        try:
-            # Setting correct module is necessary to make typed dict classes pickleable.
-            ns['__module__'] = sys._getframe(1).f_globals.get('__name__', '__main__')
-        except (AttributeError, ValueError):
-            pass
-
-        return _TypedDictMeta(typename, (), ns, total=total)
-
-    _typeddict_new.__text_signature__ = ('($cls, _typename, _fields=None,'
-                                         ' /, *, total=True, **kwargs)')
-
-    class _TypedDictMeta(type):
-        def __init__(cls, name, bases, ns, total=True):
-            super().__init__(name, bases, ns)
-
-        def __new__(cls, name, bases, ns, total=True):
-            # Create new typed dict class object.
-            # This method is called directly when TypedDict is subclassed,
-            # or via _typeddict_new when TypedDict is instantiated. This way
-            # TypedDict supports all three syntaxes described in its docstring.
-            # Subclasses and instances of TypedDict return actual dictionaries
-            # via _dict_new.
-            ns['__new__'] = _typeddict_new if name == 'TypedDict' else _dict_new
-            # Don't insert typing.Generic into __bases__ here,
-            # or Generic.__init_subclass__ will raise TypeError
-            # in the super().__new__() call.
-            # Instead, monkey-patch __bases__ onto the class after it's been created.
-            tp_dict = super().__new__(cls, name, (dict,), ns)
-
-            if any(issubclass(base, typing.Generic) for base in bases):
-                tp_dict.__bases__ = (typing.Generic, dict)
-                _maybe_adjust_parameters(tp_dict)
-
-            annotations = {}
-            own_annotations = ns.get('__annotations__', {})
-            msg = "TypedDict('Name', {f0: t0, f1: t1, ...}); each t must be a type"
-            own_annotations = {
-                n: typing._type_check(tp, msg) for n, tp in own_annotations.items()
-            }
-            required_keys = set()
-            optional_keys = set()
-
-            for base in bases:
-                annotations.update(base.__dict__.get('__annotations__', {}))
-                required_keys.update(base.__dict__.get('__required_keys__', ()))
-                optional_keys.update(base.__dict__.get('__optional_keys__', ()))
-
-            annotations.update(own_annotations)
-            for annotation_key, annotation_type in own_annotations.items():
-                annotation_origin = get_origin(annotation_type)
-                if annotation_origin is Annotated:
-                    annotation_args = get_args(annotation_type)
-                    if annotation_args:
-                        annotation_type = annotation_args[0]
-                        annotation_origin = get_origin(annotation_type)
-
-                if annotation_origin is Required:
-                    required_keys.add(annotation_key)
-                elif annotation_origin is NotRequired:
-                    optional_keys.add(annotation_key)
-                elif total:
-                    required_keys.add(annotation_key)
-                else:
-                    optional_keys.add(annotation_key)
-
-            tp_dict.__annotations__ = annotations
-            tp_dict.__required_keys__ = frozenset(required_keys)
-            tp_dict.__optional_keys__ = frozenset(optional_keys)
-            if not hasattr(tp_dict, '__total__'):
-                tp_dict.__total__ = total
-            return tp_dict
-
-        __instancecheck__ = __subclasscheck__ = _check_fails
-
-    TypedDict = _TypedDictMeta('TypedDict', (dict,), {})
-    TypedDict.__module__ = __name__
-    TypedDict.__doc__ = \
-        """A simple typed name space. At runtime it is equivalent to a plain dict.
-
-        TypedDict creates a dictionary type that expects all of its
-        instances to have a certain set of keys, with each key
-        associated with a value of a consistent type. This expectation
-        is not checked at runtime but is only enforced by type checkers.
-        Usage::
-
-            class Point2D(TypedDict):
-                x: int
-                y: int
-                label: str
-
-            a: Point2D = {'x': 1, 'y': 2, 'label': 'good'}  # OK
-            b: Point2D = {'z': 3, 'label': 'bad'}           # Fails type check
-
-            assert Point2D(x=1, y=2, label='first') == dict(x=1, y=2, label='first')
-
-        The type info can be accessed via the Point2D.__annotations__ dict, and
-        the Point2D.__required_keys__ and Point2D.__optional_keys__ frozensets.
-        TypedDict supports two additional equivalent forms::
-
-            Point2D = TypedDict('Point2D', x=int, y=int, label=str)
-            Point2D = TypedDict('Point2D', {'x': int, 'y': int, 'label': str})
-
-        The class syntax is only supported in Python 3.6+, while two other
-        syntax forms work for Python 2.7 and 3.2+
-        """
-
-    if hasattr(typing, "_TypedDictMeta"):
-        _TYPEDDICT_TYPES = (typing._TypedDictMeta, _TypedDictMeta)
-    else:
-        _TYPEDDICT_TYPES = (_TypedDictMeta,)
-
-    def is_typeddict(tp):
-        """Check if an annotation is a TypedDict class
-
-        For example::
-            class Film(TypedDict):
-                title: str
-                year: int
-
-            is_typeddict(Film)  # => True
-            is_typeddict(Union[list, str])  # => False
-        """
-        return isinstance(tp, tuple(_TYPEDDICT_TYPES))
-
-
-if hasattr(typing, "assert_type"):
-    assert_type = typing.assert_type
-
-else:
-    def assert_type(__val, __typ):
-        """Assert (to the type checker) that the value is of the given type.
-
-        When the type checker encounters a call to assert_type(), it
-        emits an error if the value is not of the specified type::
-
-            def greet(name: str) -> None:
-                assert_type(name, str)  # ok
-                assert_type(name, int)  # type checker error
-
-        At runtime this returns the first argument unchanged and otherwise
-        does nothing.
-        """
-        return __val
-
-
-if hasattr(typing, "Required"):
-    get_type_hints = typing.get_type_hints
-else:
-    import functools
-    import types
-
-    # replaces _strip_annotations()
-    def _strip_extras(t):
-        """Strips Annotated, Required and NotRequired from a given type."""
-        if isinstance(t, _AnnotatedAlias):
-            return _strip_extras(t.__origin__)
-        if hasattr(t, "__origin__") and t.__origin__ in (Required, NotRequired):
-            return _strip_extras(t.__args__[0])
-        if isinstance(t, typing._GenericAlias):
-            stripped_args = tuple(_strip_extras(a) for a in t.__args__)
-            if stripped_args == t.__args__:
-                return t
-            return t.copy_with(stripped_args)
-        if hasattr(types, "GenericAlias") and isinstance(t, types.GenericAlias):
-            stripped_args = tuple(_strip_extras(a) for a in t.__args__)
-            if stripped_args == t.__args__:
-                return t
-            return types.GenericAlias(t.__origin__, stripped_args)
-        if hasattr(types, "UnionType") and isinstance(t, types.UnionType):
-            stripped_args = tuple(_strip_extras(a) for a in t.__args__)
-            if stripped_args == t.__args__:
-                return t
-            return functools.reduce(operator.or_, stripped_args)
-
-        return t
-
-    def get_type_hints(obj, globalns=None, localns=None, include_extras=False):
-        """Return type hints for an object.
-
-        This is often the same as obj.__annotations__, but it handles
-        forward references encoded as string literals, adds Optional[t] if a
-        default value equal to None is set and recursively replaces all
-        'Annotated[T, ...]', 'Required[T]' or 'NotRequired[T]' with 'T'
-        (unless 'include_extras=True').
-
-        The argument may be a module, class, method, or function. The annotations
-        are returned as a dictionary. For classes, annotations include also
-        inherited members.
-
-        TypeError is raised if the argument is not of a type that can contain
-        annotations, and an empty dictionary is returned if no annotations are
-        present.
-
-        BEWARE -- the behavior of globalns and localns is counterintuitive
-        (unless you are familiar with how eval() and exec() work).  The
-        search order is locals first, then globals.
-
-        - If no dict arguments are passed, an attempt is made to use the
-          globals from obj (or the respective module's globals for classes),
-          and these are also used as the locals.  If the object does not appear
-          to have globals, an empty dictionary is used.
-
-        - If one dict argument is passed, it is used for both globals and
-          locals.
-
-        - If two dict arguments are passed, they specify globals and
-          locals, respectively.
-        """
-        if hasattr(typing, "Annotated"):
-            hint = typing.get_type_hints(
-                obj, globalns=globalns, localns=localns, include_extras=True
-            )
-        else:
-            hint = typing.get_type_hints(obj, globalns=globalns, localns=localns)
-        if include_extras:
-            return hint
-        return {k: _strip_extras(t) for k, t in hint.items()}
-
-
-# Python 3.9+ has PEP 593 (Annotated)
-if hasattr(typing, 'Annotated'):
-    Annotated = typing.Annotated
-    # Not exported and not a public API, but needed for get_origin() and get_args()
-    # to work.
-    _AnnotatedAlias = typing._AnnotatedAlias
-# 3.7-3.8
-else:
-    class _AnnotatedAlias(typing._GenericAlias, _root=True):
-        """Runtime representation of an annotated type.
-
-        At its core 'Annotated[t, dec1, dec2, ...]' is an alias for the type 't'
-        with extra annotations. The alias behaves like a normal typing alias,
-        instantiating is the same as instantiating the underlying type, binding
-        it to types is also the same.
-        """
-        def __init__(self, origin, metadata):
-            if isinstance(origin, _AnnotatedAlias):
-                metadata = origin.__metadata__ + metadata
-                origin = origin.__origin__
-            super().__init__(origin, origin)
-            self.__metadata__ = metadata
-
-        def copy_with(self, params):
-            assert len(params) == 1
-            new_type = params[0]
-            return _AnnotatedAlias(new_type, self.__metadata__)
-
-        def __repr__(self):
-            return (f"typing_extensions.Annotated[{typing._type_repr(self.__origin__)}, "
-                    f"{', '.join(repr(a) for a in self.__metadata__)}]")
-
-        def __reduce__(self):
-            return operator.getitem, (
-                Annotated, (self.__origin__,) + self.__metadata__
-            )
-
-        def __eq__(self, other):
-            if not isinstance(other, _AnnotatedAlias):
-                return NotImplemented
-            if self.__origin__ != other.__origin__:
-                return False
-            return self.__metadata__ == other.__metadata__
-
-        def __hash__(self):
-            return hash((self.__origin__, self.__metadata__))
-
-    class Annotated:
-        """Add context specific metadata to a type.
-
-        Example: Annotated[int, runtime_check.Unsigned] indicates to the
-        hypothetical runtime_check module that this type is an unsigned int.
-        Every other consumer of this type can ignore this metadata and treat
-        this type as int.
-
-        The first argument to Annotated must be a valid type (and will be in
-        the __origin__ field), the remaining arguments are kept as a tuple in
-        the __extra__ field.
-
-        Details:
-
-        - It's an error to call `Annotated` with less than two arguments.
-        - Nested Annotated are flattened::
-
-            Annotated[Annotated[T, Ann1, Ann2], Ann3] == Annotated[T, Ann1, Ann2, Ann3]
-
-        - Instantiating an annotated type is equivalent to instantiating the
-        underlying type::
-
-            Annotated[C, Ann1](5) == C(5)
-
-        - Annotated can be used as a generic type alias::
-
-            Optimized = Annotated[T, runtime.Optimize()]
-            Optimized[int] == Annotated[int, runtime.Optimize()]
-
-            OptimizedList = Annotated[List[T], runtime.Optimize()]
-            OptimizedList[int] == Annotated[List[int], runtime.Optimize()]
-        """
-
-        __slots__ = ()
-
-        def __new__(cls, *args, **kwargs):
-            raise TypeError("Type Annotated cannot be instantiated.")
-
-        @typing._tp_cache
-        def __class_getitem__(cls, params):
-            if not isinstance(params, tuple) or len(params) < 2:
-                raise TypeError("Annotated[...] should be used "
-                                "with at least two arguments (a type and an "
-                                "annotation).")
-            allowed_special_forms = (ClassVar, Final)
-            if get_origin(params[0]) in allowed_special_forms:
-                origin = params[0]
-            else:
-                msg = "Annotated[t, ...]: t must be a type."
-                origin = typing._type_check(params[0], msg)
-            metadata = tuple(params[1:])
-            return _AnnotatedAlias(origin, metadata)
-
-        def __init_subclass__(cls, *args, **kwargs):
-            raise TypeError(
-                f"Cannot subclass {cls.__module__}.Annotated"
-            )
-
-# Python 3.8 has get_origin() and get_args() but those implementations aren't
-# Annotated-aware, so we can't use those. Python 3.9's versions don't support
-# ParamSpecArgs and ParamSpecKwargs, so only Python 3.10's versions will do.
-if sys.version_info[:2] >= (3, 10):
-    get_origin = typing.get_origin
-    get_args = typing.get_args
-# 3.7-3.9
-else:
-    try:
-        # 3.9+
-        from typing import _BaseGenericAlias
-    except ImportError:
-        _BaseGenericAlias = typing._GenericAlias
-    try:
-        # 3.9+
-        from typing import GenericAlias as _typing_GenericAlias
-    except ImportError:
-        _typing_GenericAlias = typing._GenericAlias
-
-    def get_origin(tp):
-        """Get the unsubscripted version of a type.
-
-        This supports generic types, Callable, Tuple, Union, Literal, Final, ClassVar
-        and Annotated. Return None for unsupported types. Examples::
-
-            get_origin(Literal[42]) is Literal
-            get_origin(int) is None
-            get_origin(ClassVar[int]) is ClassVar
-            get_origin(Generic) is Generic
-            get_origin(Generic[T]) is Generic
-            get_origin(Union[T, int]) is Union
-            get_origin(List[Tuple[T, T]][int]) == list
-            get_origin(P.args) is P
-        """
-        if isinstance(tp, _AnnotatedAlias):
-            return Annotated
-        if isinstance(tp, (typing._GenericAlias, _typing_GenericAlias, _BaseGenericAlias,
-                           ParamSpecArgs, ParamSpecKwargs)):
-            return tp.__origin__
-        if tp is typing.Generic:
-            return typing.Generic
-        return None
-
-    def get_args(tp):
-        """Get type arguments with all substitutions performed.
-
-        For unions, basic simplifications used by Union constructor are performed.
-        Examples::
-            get_args(Dict[str, int]) == (str, int)
-            get_args(int) == ()
-            get_args(Union[int, Union[T, int], str][int]) == (int, str)
-            get_args(Union[int, Tuple[T, int]][str]) == (int, Tuple[str, int])
-            get_args(Callable[[], T][int]) == ([], int)
-        """
-        if isinstance(tp, _AnnotatedAlias):
-            return (tp.__origin__,) + tp.__metadata__
-        if isinstance(tp, (typing._GenericAlias, _typing_GenericAlias)):
-            if getattr(tp, "_special", False):
-                return ()
-            res = tp.__args__
-            if get_origin(tp) is collections.abc.Callable and res[0] is not Ellipsis:
-                res = (list(res[:-1]), res[-1])
-            return res
-        return ()
-
-
-# 3.10+
-if hasattr(typing, 'TypeAlias'):
-    TypeAlias = typing.TypeAlias
-# 3.9
-elif sys.version_info[:2] >= (3, 9):
-    class _TypeAliasForm(typing._SpecialForm, _root=True):
-        def __repr__(self):
-            return 'typing_extensions.' + self._name
-
-    @_TypeAliasForm
-    def TypeAlias(self, parameters):
-        """Special marker indicating that an assignment should
-        be recognized as a proper type alias definition by type
-        checkers.
-
-        For example::
-
-            Predicate: TypeAlias = Callable[..., bool]
-
-        It's invalid when used anywhere except as in the example above.
-        """
-        raise TypeError(f"{self} is not subscriptable")
-# 3.7-3.8
-else:
-    class _TypeAliasForm(typing._SpecialForm, _root=True):
-        def __repr__(self):
-            return 'typing_extensions.' + self._name
-
-    TypeAlias = _TypeAliasForm('TypeAlias',
-                               doc="""Special marker indicating that an assignment should
-                               be recognized as a proper type alias definition by type
-                               checkers.
-
-                               For example::
-
-                                   Predicate: TypeAlias = Callable[..., bool]
-
-                               It's invalid when used anywhere except as in the example
-                               above.""")
-
-
-class _DefaultMixin:
-    """Mixin for TypeVarLike defaults."""
-
-    __slots__ = ()
-
-    def __init__(self, default):
-        if isinstance(default, (tuple, list)):
-            self.__default__ = tuple((typing._type_check(d, "Default must be a type")
-                                      for d in default))
-        elif default:
-            self.__default__ = typing._type_check(default, "Default must be a type")
-        else:
-            self.__default__ = None
-
-
-# Add default and infer_variance parameters from PEP 696 and 695
-class TypeVar(typing.TypeVar, _DefaultMixin, _root=True):
-    """Type variable."""
-
-    __module__ = 'typing'
-
-    def __init__(self, name, *constraints, bound=None,
-                 covariant=False, contravariant=False,
-                 default=None, infer_variance=False):
-        super().__init__(name, *constraints, bound=bound, covariant=covariant,
-                         contravariant=contravariant)
-        _DefaultMixin.__init__(self, default)
-        self.__infer_variance__ = infer_variance
-
-        # for pickling:
-        try:
-            def_mod = sys._getframe(1).f_globals.get('__name__', '__main__')
-        except (AttributeError, ValueError):
-            def_mod = None
-        if def_mod != 'typing_extensions':
-            self.__module__ = def_mod
-
-
-# Python 3.10+ has PEP 612
-if hasattr(typing, 'ParamSpecArgs'):
-    ParamSpecArgs = typing.ParamSpecArgs
-    ParamSpecKwargs = typing.ParamSpecKwargs
-# 3.7-3.9
-else:
-    class _Immutable:
-        """Mixin to indicate that object should not be copied."""
-        __slots__ = ()
-
-        def __copy__(self):
-            return self
-
-        def __deepcopy__(self, memo):
-            return self
-
-    class ParamSpecArgs(_Immutable):
-        """The args for a ParamSpec object.
-
-        Given a ParamSpec object P, P.args is an instance of ParamSpecArgs.
-
-        ParamSpecArgs objects have a reference back to their ParamSpec:
-
-        P.args.__origin__ is P
-
-        This type is meant for runtime introspection and has no special meaning to
-        static type checkers.
-        """
-        def __init__(self, origin):
-            self.__origin__ = origin
-
-        def __repr__(self):
-            return f"{self.__origin__.__name__}.args"
-
-        def __eq__(self, other):
-            if not isinstance(other, ParamSpecArgs):
-                return NotImplemented
-            return self.__origin__ == other.__origin__
-
-    class ParamSpecKwargs(_Immutable):
-        """The kwargs for a ParamSpec object.
-
-        Given a ParamSpec object P, P.kwargs is an instance of ParamSpecKwargs.
-
-        ParamSpecKwargs objects have a reference back to their ParamSpec:
-
-        P.kwargs.__origin__ is P
-
-        This type is meant for runtime introspection and has no special meaning to
-        static type checkers.
-        """
-        def __init__(self, origin):
-            self.__origin__ = origin
-
-        def __repr__(self):
-            return f"{self.__origin__.__name__}.kwargs"
-
-        def __eq__(self, other):
-            if not isinstance(other, ParamSpecKwargs):
-                return NotImplemented
-            return self.__origin__ == other.__origin__
-
-# 3.10+
-if hasattr(typing, 'ParamSpec'):
-
-    # Add default Parameter - PEP 696
-    class ParamSpec(typing.ParamSpec, _DefaultMixin, _root=True):
-        """Parameter specification variable."""
-
-        __module__ = 'typing'
-
-        def __init__(self, name, *, bound=None, covariant=False, contravariant=False,
-                     default=None):
-            super().__init__(name, bound=bound, covariant=covariant,
-                             contravariant=contravariant)
-            _DefaultMixin.__init__(self, default)
-
-            # for pickling:
-            try:
-                def_mod = sys._getframe(1).f_globals.get('__name__', '__main__')
-            except (AttributeError, ValueError):
-                def_mod = None
-            if def_mod != 'typing_extensions':
-                self.__module__ = def_mod
-
-# 3.7-3.9
-else:
-
-    # Inherits from list as a workaround for Callable checks in Python < 3.9.2.
-    class ParamSpec(list, _DefaultMixin):
-        """Parameter specification variable.
-
-        Usage::
-
-           P = ParamSpec('P')
-
-        Parameter specification variables exist primarily for the benefit of static
-        type checkers.  They are used to forward the parameter types of one
-        callable to another callable, a pattern commonly found in higher order
-        functions and decorators.  They are only valid when used in ``Concatenate``,
-        or s the first argument to ``Callable``. In Python 3.10 and higher,
-        they are also supported in user-defined Generics at runtime.
-        See class Generic for more information on generic types.  An
-        example for annotating a decorator::
-
-           T = TypeVar('T')
-           P = ParamSpec('P')
-
-           def add_logging(f: Callable[P, T]) -> Callable[P, T]:
-               '''A type-safe decorator to add logging to a function.'''
-               def inner(*args: P.args, **kwargs: P.kwargs) -> T:
-                   logging.info(f'{f.__name__} was called')
-                   return f(*args, **kwargs)
-               return inner
-
-           @add_logging
-           def add_two(x: float, y: float) -> float:
-               '''Add two numbers together.'''
-               return x + y
-
-        Parameter specification variables defined with covariant=True or
-        contravariant=True can be used to declare covariant or contravariant
-        generic types.  These keyword arguments are valid, but their actual semantics
-        are yet to be decided.  See PEP 612 for details.
-
-        Parameter specification variables can be introspected. e.g.:
-
-           P.__name__ == 'T'
-           P.__bound__ == None
-           P.__covariant__ == False
-           P.__contravariant__ == False
-
-        Note that only parameter specification variables defined in global scope can
-        be pickled.
-        """
-
-        # Trick Generic __parameters__.
-        __class__ = typing.TypeVar
-
-        @property
-        def args(self):
-            return ParamSpecArgs(self)
-
-        @property
-        def kwargs(self):
-            return ParamSpecKwargs(self)
-
-        def __init__(self, name, *, bound=None, covariant=False, contravariant=False,
-                     default=None):
-            super().__init__([self])
-            self.__name__ = name
-            self.__covariant__ = bool(covariant)
-            self.__contravariant__ = bool(contravariant)
-            if bound:
-                self.__bound__ = typing._type_check(bound, 'Bound must be a type.')
-            else:
-                self.__bound__ = None
-            _DefaultMixin.__init__(self, default)
-
-            # for pickling:
-            try:
-                def_mod = sys._getframe(1).f_globals.get('__name__', '__main__')
-            except (AttributeError, ValueError):
-                def_mod = None
-            if def_mod != 'typing_extensions':
-                self.__module__ = def_mod
-
-        def __repr__(self):
-            if self.__covariant__:
-                prefix = '+'
-            elif self.__contravariant__:
-                prefix = '-'
-            else:
-                prefix = '~'
-            return prefix + self.__name__
-
-        def __hash__(self):
-            return object.__hash__(self)
-
-        def __eq__(self, other):
-            return self is other
-
-        def __reduce__(self):
-            return self.__name__
-
-        # Hack to get typing._type_check to pass.
-        def __call__(self, *args, **kwargs):
-            pass
-
-
-# 3.7-3.9
-if not hasattr(typing, 'Concatenate'):
-    # Inherits from list as a workaround for Callable checks in Python < 3.9.2.
-    class _ConcatenateGenericAlias(list):
-
-        # Trick Generic into looking into this for __parameters__.
-        __class__ = typing._GenericAlias
-
-        # Flag in 3.8.
-        _special = False
-
-        def __init__(self, origin, args):
-            super().__init__(args)
-            self.__origin__ = origin
-            self.__args__ = args
-
-        def __repr__(self):
-            _type_repr = typing._type_repr
-            return (f'{_type_repr(self.__origin__)}'
-                    f'[{", ".join(_type_repr(arg) for arg in self.__args__)}]')
-
-        def __hash__(self):
-            return hash((self.__origin__, self.__args__))
-
-        # Hack to get typing._type_check to pass in Generic.
-        def __call__(self, *args, **kwargs):
-            pass
-
-        @property
-        def __parameters__(self):
-            return tuple(
-                tp for tp in self.__args__ if isinstance(tp, (typing.TypeVar, ParamSpec))
-            )
-
-
-# 3.7-3.9
-@typing._tp_cache
-def _concatenate_getitem(self, parameters):
-    if parameters == ():
-        raise TypeError("Cannot take a Concatenate of no types.")
-    if not isinstance(parameters, tuple):
-        parameters = (parameters,)
-    if not isinstance(parameters[-1], ParamSpec):
-        raise TypeError("The last parameter to Concatenate should be a "
-                        "ParamSpec variable.")
-    msg = "Concatenate[arg, ...]: each arg must be a type."
-    parameters = tuple(typing._type_check(p, msg) for p in parameters)
-    return _ConcatenateGenericAlias(self, parameters)
-
-
-# 3.10+
-if hasattr(typing, 'Concatenate'):
-    Concatenate = typing.Concatenate
-    _ConcatenateGenericAlias = typing._ConcatenateGenericAlias # noqa
-# 3.9
-elif sys.version_info[:2] >= (3, 9):
-    @_TypeAliasForm
-    def Concatenate(self, parameters):
-        """Used in conjunction with ``ParamSpec`` and ``Callable`` to represent a
-        higher order function which adds, removes or transforms parameters of a
-        callable.
-
-        For example::
-
-           Callable[Concatenate[int, P], int]
-
-        See PEP 612 for detailed information.
-        """
-        return _concatenate_getitem(self, parameters)
-# 3.7-8
-else:
-    class _ConcatenateForm(typing._SpecialForm, _root=True):
-        def __repr__(self):
-            return 'typing_extensions.' + self._name
-
-        def __getitem__(self, parameters):
-            return _concatenate_getitem(self, parameters)
-
-    Concatenate = _ConcatenateForm(
-        'Concatenate',
-        doc="""Used in conjunction with ``ParamSpec`` and ``Callable`` to represent a
-        higher order function which adds, removes or transforms parameters of a
-        callable.
-
-        For example::
-
-           Callable[Concatenate[int, P], int]
-
-        See PEP 612 for detailed information.
-        """)
-
-# 3.10+
-if hasattr(typing, 'TypeGuard'):
-    TypeGuard = typing.TypeGuard
-# 3.9
-elif sys.version_info[:2] >= (3, 9):
-    class _TypeGuardForm(typing._SpecialForm, _root=True):
-        def __repr__(self):
-            return 'typing_extensions.' + self._name
-
-    @_TypeGuardForm
-    def TypeGuard(self, parameters):
-        """Special typing form used to annotate the return type of a user-defined
-        type guard function.  ``TypeGuard`` only accepts a single type argument.
-        At runtime, functions marked this way should return a boolean.
-
-        ``TypeGuard`` aims to benefit *type narrowing* -- a technique used by static
-        type checkers to determine a more precise type of an expression within a
-        program's code flow.  Usually type narrowing is done by analyzing
-        conditional code flow and applying the narrowing to a block of code.  The
-        conditional expression here is sometimes referred to as a "type guard".
-
-        Sometimes it would be convenient to use a user-defined boolean function
-        as a type guard.  Such a function should use ``TypeGuard[...]`` as its
-        return type to alert static type checkers to this intention.
-
-        Using  ``-> TypeGuard`` tells the static type checker that for a given
-        function:
-
-        1. The return value is a boolean.
-        2. If the return value is ``True``, the type of its argument
-        is the type inside ``TypeGuard``.
-
-        For example::
-
-            def is_str(val: Union[str, float]):
-                # "isinstance" type guard
-                if isinstance(val, str):
-                    # Type of ``val`` is narrowed to ``str``
-                    ...
-                else:
-                    # Else, type of ``val`` is narrowed to ``float``.
-                    ...
-
-        Strict type narrowing is not enforced -- ``TypeB`` need not be a narrower
-        form of ``TypeA`` (it can even be a wider form) and this may lead to
-        type-unsafe results.  The main reason is to allow for things like
-        narrowing ``List[object]`` to ``List[str]`` even though the latter is not
-        a subtype of the former, since ``List`` is invariant.  The responsibility of
-        writing type-safe type guards is left to the user.
-
-        ``TypeGuard`` also works with type variables.  For more information, see
-        PEP 647 (User-Defined Type Guards).
-        """
-        item = typing._type_check(parameters, f'{self} accepts only a single type.')
-        return typing._GenericAlias(self, (item,))
-# 3.7-3.8
-else:
-    class _TypeGuardForm(typing._SpecialForm, _root=True):
-
-        def __repr__(self):
-            return 'typing_extensions.' + self._name
-
-        def __getitem__(self, parameters):
-            item = typing._type_check(parameters,
-                                      f'{self._name} accepts only a single type')
-            return typing._GenericAlias(self, (item,))
-
-    TypeGuard = _TypeGuardForm(
-        'TypeGuard',
-        doc="""Special typing form used to annotate the return type of a user-defined
-        type guard function.  ``TypeGuard`` only accepts a single type argument.
-        At runtime, functions marked this way should return a boolean.
-
-        ``TypeGuard`` aims to benefit *type narrowing* -- a technique used by static
-        type checkers to determine a more precise type of an expression within a
-        program's code flow.  Usually type narrowing is done by analyzing
-        conditional code flow and applying the narrowing to a block of code.  The
-        conditional expression here is sometimes referred to as a "type guard".
-
-        Sometimes it would be convenient to use a user-defined boolean function
-        as a type guard.  Such a function should use ``TypeGuard[...]`` as its
-        return type to alert static type checkers to this intention.
-
-        Using  ``-> TypeGuard`` tells the static type checker that for a given
-        function:
-
-        1. The return value is a boolean.
-        2. If the return value is ``True``, the type of its argument
-        is the type inside ``TypeGuard``.
-
-        For example::
-
-            def is_str(val: Union[str, float]):
-                # "isinstance" type guard
-                if isinstance(val, str):
-                    # Type of ``val`` is narrowed to ``str``
-                    ...
-                else:
-                    # Else, type of ``val`` is narrowed to ``float``.
-                    ...
-
-        Strict type narrowing is not enforced -- ``TypeB`` need not be a narrower
-        form of ``TypeA`` (it can even be a wider form) and this may lead to
-        type-unsafe results.  The main reason is to allow for things like
-        narrowing ``List[object]`` to ``List[str]`` even though the latter is not
-        a subtype of the former, since ``List`` is invariant.  The responsibility of
-        writing type-safe type guards is left to the user.
-
-        ``TypeGuard`` also works with type variables.  For more information, see
-        PEP 647 (User-Defined Type Guards).
-        """)
-
-
-# Vendored from cpython typing._SpecialFrom
-class _SpecialForm(typing._Final, _root=True):
-    __slots__ = ('_name', '__doc__', '_getitem')
-
-    def __init__(self, getitem):
-        self._getitem = getitem
-        self._name = getitem.__name__
-        self.__doc__ = getitem.__doc__
-
-    def __getattr__(self, item):
-        if item in {'__name__', '__qualname__'}:
-            return self._name
-
-        raise AttributeError(item)
-
-    def __mro_entries__(self, bases):
-        raise TypeError(f"Cannot subclass {self!r}")
-
-    def __repr__(self):
-        return f'typing_extensions.{self._name}'
-
-    def __reduce__(self):
-        return self._name
-
-    def __call__(self, *args, **kwds):
-        raise TypeError(f"Cannot instantiate {self!r}")
-
-    def __or__(self, other):
-        return typing.Union[self, other]
-
-    def __ror__(self, other):
-        return typing.Union[other, self]
-
-    def __instancecheck__(self, obj):
-        raise TypeError(f"{self} cannot be used with isinstance()")
-
-    def __subclasscheck__(self, cls):
-        raise TypeError(f"{self} cannot be used with issubclass()")
-
-    @typing._tp_cache
-    def __getitem__(self, parameters):
-        return self._getitem(self, parameters)
-
-
-if hasattr(typing, "LiteralString"):
-    LiteralString = typing.LiteralString
-else:
-    @_SpecialForm
-    def LiteralString(self, params):
-        """Represents an arbitrary literal string.
-
-        Example::
-
-          from typing_extensions import LiteralString
-
-          def query(sql: LiteralString) -> ...:
-              ...
-
-          query("SELECT * FROM table")  # ok
-          query(f"SELECT * FROM {input()}")  # not ok
-
-        See PEP 675 for details.
-
-        """
-        raise TypeError(f"{self} is not subscriptable")
-
-
-if hasattr(typing, "Self"):
-    Self = typing.Self
-else:
-    @_SpecialForm
-    def Self(self, params):
-        """Used to spell the type of "self" in classes.
-
-        Example::
-
-          from typing import Self
-
-          class ReturnsSelf:
-              def parse(self, data: bytes) -> Self:
-                  ...
-                  return self
-
-        """
-
-        raise TypeError(f"{self} is not subscriptable")
-
-
-if hasattr(typing, "Never"):
-    Never = typing.Never
-else:
-    @_SpecialForm
-    def Never(self, params):
-        """The bottom type, a type that has no members.
-
-        This can be used to define a function that should never be
-        called, or a function that never returns::
-
-            from typing_extensions import Never
-
-            def never_call_me(arg: Never) -> None:
-                pass
-
-            def int_or_str(arg: int | str) -> None:
-                never_call_me(arg)  # type checker error
-                match arg:
-                    case int():
-                        print("It's an int")
-                    case str():
-                        print("It's a str")
-                    case _:
-                        never_call_me(arg)  # ok, arg is of type Never
-
-        """
-
-        raise TypeError(f"{self} is not subscriptable")
-
-
-if hasattr(typing, 'Required'):
-    Required = typing.Required
-    NotRequired = typing.NotRequired
-elif sys.version_info[:2] >= (3, 9):
-    class _ExtensionsSpecialForm(typing._SpecialForm, _root=True):
-        def __repr__(self):
-            return 'typing_extensions.' + self._name
-
-    @_ExtensionsSpecialForm
-    def Required(self, parameters):
-        """A special typing construct to mark a key of a total=False TypedDict
-        as required. For example:
-
-            class Movie(TypedDict, total=False):
-                title: Required[str]
-                year: int
-
-            m = Movie(
-                title='The Matrix',  # typechecker error if key is omitted
-                year=1999,
-            )
-
-        There is no runtime checking that a required key is actually provided
-        when instantiating a related TypedDict.
-        """
-        item = typing._type_check(parameters, f'{self._name} accepts only a single type.')
-        return typing._GenericAlias(self, (item,))
-
-    @_ExtensionsSpecialForm
-    def NotRequired(self, parameters):
-        """A special typing construct to mark a key of a TypedDict as
-        potentially missing. For example:
-
-            class Movie(TypedDict):
-                title: str
-                year: NotRequired[int]
-
-            m = Movie(
-                title='The Matrix',  # typechecker error if key is omitted
-                year=1999,
-            )
-        """
-        item = typing._type_check(parameters, f'{self._name} accepts only a single type.')
-        return typing._GenericAlias(self, (item,))
-
-else:
-    class _RequiredForm(typing._SpecialForm, _root=True):
-        def __repr__(self):
-            return 'typing_extensions.' + self._name
-
-        def __getitem__(self, parameters):
-            item = typing._type_check(parameters,
-                                      f'{self._name} accepts only a single type.')
-            return typing._GenericAlias(self, (item,))
-
-    Required = _RequiredForm(
-        'Required',
-        doc="""A special typing construct to mark a key of a total=False TypedDict
-        as required. For example:
-
-            class Movie(TypedDict, total=False):
-                title: Required[str]
-                year: int
-
-            m = Movie(
-                title='The Matrix',  # typechecker error if key is omitted
-                year=1999,
-            )
-
-        There is no runtime checking that a required key is actually provided
-        when instantiating a related TypedDict.
-        """)
-    NotRequired = _RequiredForm(
-        'NotRequired',
-        doc="""A special typing construct to mark a key of a TypedDict as
-        potentially missing. For example:
-
-            class Movie(TypedDict):
-                title: str
-                year: NotRequired[int]
-
-            m = Movie(
-                title='The Matrix',  # typechecker error if key is omitted
-                year=1999,
-            )
-        """)
-
-
-if hasattr(typing, "Unpack"):  # 3.11+
-    Unpack = typing.Unpack
-elif sys.version_info[:2] >= (3, 9):
-    class _UnpackSpecialForm(typing._SpecialForm, _root=True):
-        def __repr__(self):
-            return 'typing_extensions.' + self._name
-
-    class _UnpackAlias(typing._GenericAlias, _root=True):
-        __class__ = typing.TypeVar
-
-    @_UnpackSpecialForm
-    def Unpack(self, parameters):
-        """A special typing construct to unpack a variadic type. For example:
-
-            Shape = TypeVarTuple('Shape')
-            Batch = NewType('Batch', int)
-
-            def add_batch_axis(
-                x: Array[Unpack[Shape]]
-            ) -> Array[Batch, Unpack[Shape]]: ...
-
-        """
-        item = typing._type_check(parameters, f'{self._name} accepts only a single type.')
-        return _UnpackAlias(self, (item,))
-
-    def _is_unpack(obj):
-        return isinstance(obj, _UnpackAlias)
-
-else:
-    class _UnpackAlias(typing._GenericAlias, _root=True):
-        __class__ = typing.TypeVar
-
-    class _UnpackForm(typing._SpecialForm, _root=True):
-        def __repr__(self):
-            return 'typing_extensions.' + self._name
-
-        def __getitem__(self, parameters):
-            item = typing._type_check(parameters,
-                                      f'{self._name} accepts only a single type.')
-            return _UnpackAlias(self, (item,))
-
-    Unpack = _UnpackForm(
-        'Unpack',
-        doc="""A special typing construct to unpack a variadic type. For example:
-
-            Shape = TypeVarTuple('Shape')
-            Batch = NewType('Batch', int)
-
-            def add_batch_axis(
-                x: Array[Unpack[Shape]]
-            ) -> Array[Batch, Unpack[Shape]]: ...
-
-        """)
-
-    def _is_unpack(obj):
-        return isinstance(obj, _UnpackAlias)
-
-
-if hasattr(typing, "TypeVarTuple"):  # 3.11+
-
-    # Add default Parameter - PEP 696
-    class TypeVarTuple(typing.TypeVarTuple, _DefaultMixin, _root=True):
-        """Type variable tuple."""
-
-        def __init__(self, name, *, default=None):
-            super().__init__(name)
-            _DefaultMixin.__init__(self, default)
-
-            # for pickling:
-            try:
-                def_mod = sys._getframe(1).f_globals.get('__name__', '__main__')
-            except (AttributeError, ValueError):
-                def_mod = None
-            if def_mod != 'typing_extensions':
-                self.__module__ = def_mod
-
-else:
-    class TypeVarTuple(_DefaultMixin):
-        """Type variable tuple.
-
-        Usage::
-
-            Ts = TypeVarTuple('Ts')
-
-        In the same way that a normal type variable is a stand-in for a single
-        type such as ``int``, a type variable *tuple* is a stand-in for a *tuple*
-        type such as ``Tuple[int, str]``.
-
-        Type variable tuples can be used in ``Generic`` declarations.
-        Consider the following example::
-
-            class Array(Generic[*Ts]): ...
-
-        The ``Ts`` type variable tuple here behaves like ``tuple[T1, T2]``,
-        where ``T1`` and ``T2`` are type variables. To use these type variables
-        as type parameters of ``Array``, we must *unpack* the type variable tuple using
-        the star operator: ``*Ts``. The signature of ``Array`` then behaves
-        as if we had simply written ``class Array(Generic[T1, T2]): ...``.
-        In contrast to ``Generic[T1, T2]``, however, ``Generic[*Shape]`` allows
-        us to parameterise the class with an *arbitrary* number of type parameters.
-
-        Type variable tuples can be used anywhere a normal ``TypeVar`` can.
-        This includes class definitions, as shown above, as well as function
-        signatures and variable annotations::
-
-            class Array(Generic[*Ts]):
-
-                def __init__(self, shape: Tuple[*Ts]):
-                    self._shape: Tuple[*Ts] = shape
-
-                def get_shape(self) -> Tuple[*Ts]:
-                    return self._shape
-
-            shape = (Height(480), Width(640))
-            x: Array[Height, Width] = Array(shape)
-            y = abs(x)  # Inferred type is Array[Height, Width]
-            z = x + x   #        ...    is Array[Height, Width]
-            x.get_shape()  #     ...    is tuple[Height, Width]
-
-        """
-
-        # Trick Generic __parameters__.
-        __class__ = typing.TypeVar
-
-        def __iter__(self):
-            yield self.__unpacked__
-
-        def __init__(self, name, *, default=None):
-            self.__name__ = name
-            _DefaultMixin.__init__(self, default)
-
-            # for pickling:
-            try:
-                def_mod = sys._getframe(1).f_globals.get('__name__', '__main__')
-            except (AttributeError, ValueError):
-                def_mod = None
-            if def_mod != 'typing_extensions':
-                self.__module__ = def_mod
-
-            self.__unpacked__ = Unpack[self]
-
-        def __repr__(self):
-            return self.__name__
-
-        def __hash__(self):
-            return object.__hash__(self)
-
-        def __eq__(self, other):
-            return self is other
-
-        def __reduce__(self):
-            return self.__name__
-
-        def __init_subclass__(self, *args, **kwds):
-            if '_root' not in kwds:
-                raise TypeError("Cannot subclass special typing classes")
-
-
-if hasattr(typing, "reveal_type"):
-    reveal_type = typing.reveal_type
-else:
-    def reveal_type(__obj: T) -> T:
-        """Reveal the inferred type of a variable.
-
-        When a static type checker encounters a call to ``reveal_type()``,
-        it will emit the inferred type of the argument::
-
-            x: int = 1
-            reveal_type(x)
-
-        Running a static type checker (e.g., ``mypy``) on this example
-        will produce output similar to 'Revealed type is "builtins.int"'.
-
-        At runtime, the function prints the runtime type of the
-        argument and returns it unchanged.
-
-        """
-        print(f"Runtime type is {type(__obj).__name__!r}", file=sys.stderr)
-        return __obj
-
-
-if hasattr(typing, "assert_never"):
-    assert_never = typing.assert_never
-else:
-    def assert_never(__arg: Never) -> Never:
-        """Assert to the type checker that a line of code is unreachable.
-
-        Example::
-
-            def int_or_str(arg: int | str) -> None:
-                match arg:
-                    case int():
-                        print("It's an int")
-                    case str():
-                        print("It's a str")
-                    case _:
-                        assert_never(arg)
-
-        If a type checker finds that a call to assert_never() is
-        reachable, it will emit an error.
-
-        At runtime, this throws an exception when called.
-
-        """
-        raise AssertionError("Expected code to be unreachable")
-
-
-if hasattr(typing, 'dataclass_transform'):
-    dataclass_transform = typing.dataclass_transform
-else:
-    def dataclass_transform(
-        *,
-        eq_default: bool = True,
-        order_default: bool = False,
-        kw_only_default: bool = False,
-        field_specifiers: typing.Tuple[
-            typing.Union[typing.Type[typing.Any], typing.Callable[..., typing.Any]],
-            ...
-        ] = (),
-        **kwargs: typing.Any,
-    ) -> typing.Callable[[T], T]:
-        """Decorator that marks a function, class, or metaclass as providing
-        dataclass-like behavior.
-
-        Example:
-
-            from typing_extensions import dataclass_transform
-
-            _T = TypeVar("_T")
-
-            # Used on a decorator function
-            @dataclass_transform()
-            def create_model(cls: type[_T]) -> type[_T]:
-                ...
-                return cls
-
-            @create_model
-            class CustomerModel:
-                id: int
-                name: str
-
-            # Used on a base class
-            @dataclass_transform()
-            class ModelBase: ...
-
-            class CustomerModel(ModelBase):
-                id: int
-                name: str
-
-            # Used on a metaclass
-            @dataclass_transform()
-            class ModelMeta(type): ...
-
-            class ModelBase(metaclass=ModelMeta): ...
-
-            class CustomerModel(ModelBase):
-                id: int
-                name: str
-
-        Each of the ``CustomerModel`` classes defined in this example will now
-        behave similarly to a dataclass created with the ``@dataclasses.dataclass``
-        decorator. For example, the type checker will synthesize an ``__init__``
-        method.
-
-        The arguments to this decorator can be used to customize this behavior:
-        - ``eq_default`` indicates whether the ``eq`` parameter is assumed to be
-          True or False if it is omitted by the caller.
-        - ``order_default`` indicates whether the ``order`` parameter is
-          assumed to be True or False if it is omitted by the caller.
-        - ``kw_only_default`` indicates whether the ``kw_only`` parameter is
-          assumed to be True or False if it is omitted by the caller.
-        - ``field_specifiers`` specifies a static list of supported classes
-          or functions that describe fields, similar to ``dataclasses.field()``.
-
-        At runtime, this decorator records its arguments in the
-        ``__dataclass_transform__`` attribute on the decorated object.
-
-        See PEP 681 for details.
-
-        """
-        def decorator(cls_or_fn):
-            cls_or_fn.__dataclass_transform__ = {
-                "eq_default": eq_default,
-                "order_default": order_default,
-                "kw_only_default": kw_only_default,
-                "field_specifiers": field_specifiers,
-                "kwargs": kwargs,
-            }
-            return cls_or_fn
-        return decorator
-
-
-if hasattr(typing, "override"):
-    override = typing.override
-else:
-    _F = typing.TypeVar("_F", bound=typing.Callable[..., typing.Any])
-
-    def override(__arg: _F) -> _F:
-        """Indicate that a method is intended to override a method in a base class.
-
-        Usage:
-
-            class Base:
-                def method(self) -> None: ...
-                    pass
-
-            class Child(Base):
-                @override
-                def method(self) -> None:
-                    super().method()
-
-        When this decorator is applied to a method, the type checker will
-        validate that it overrides a method with the same name on a base class.
-        This helps prevent bugs that may occur when a base class is changed
-        without an equivalent change to a child class.
-
-        See PEP 698 for details.
-
-        """
-        return __arg
-
-
-# We have to do some monkey patching to deal with the dual nature of
-# Unpack/TypeVarTuple:
-# - We want Unpack to be a kind of TypeVar so it gets accepted in
-#   Generic[Unpack[Ts]]
-# - We want it to *not* be treated as a TypeVar for the purposes of
-#   counting generic parameters, so that when we subscript a generic,
-#   the runtime doesn't try to substitute the Unpack with the subscripted type.
-if not hasattr(typing, "TypeVarTuple"):
-    typing._collect_type_vars = _collect_type_vars
-    typing._check_generic = _check_generic
-
-
-# Backport typing.NamedTuple as it exists in Python 3.11.
-# In 3.11, the ability to define generic `NamedTuple`s was supported.
-# This was explicitly disallowed in 3.9-3.10, and only half-worked in <=3.8.
-if sys.version_info >= (3, 11):
-    NamedTuple = typing.NamedTuple
-else:
-    def _caller():
-        try:
-            return sys._getframe(2).f_globals.get('__name__', '__main__')
-        except (AttributeError, ValueError):  # For platforms without _getframe()
-            return None
-
-    def _make_nmtuple(name, types, module, defaults=()):
-        fields = [n for n, t in types]
-        annotations = {n: typing._type_check(t, f"field {n} annotation must be a type")
-                       for n, t in types}
-        nm_tpl = collections.namedtuple(name, fields,
-                                        defaults=defaults, module=module)
-        nm_tpl.__annotations__ = nm_tpl.__new__.__annotations__ = annotations
-        # The `_field_types` attribute was removed in 3.9;
-        # in earlier versions, it is the same as the `__annotations__` attribute
-        if sys.version_info < (3, 9):
-            nm_tpl._field_types = annotations
-        return nm_tpl
-
-    _prohibited_namedtuple_fields = typing._prohibited
-    _special_namedtuple_fields = frozenset({'__module__', '__name__', '__annotations__'})
-
-    class _NamedTupleMeta(type):
-        def __new__(cls, typename, bases, ns):
-            assert _NamedTuple in bases
-            for base in bases:
-                if base is not _NamedTuple and base is not typing.Generic:
-                    raise TypeError(
-                        'can only inherit from a NamedTuple type and Generic')
-            bases = tuple(tuple if base is _NamedTuple else base for base in bases)
-            types = ns.get('__annotations__', {})
-            default_names = []
-            for field_name in types:
-                if field_name in ns:
-                    default_names.append(field_name)
-                elif default_names:
-                    raise TypeError(f"Non-default namedtuple field {field_name} "
-                                    f"cannot follow default field"
-                                    f"{'s' if len(default_names) > 1 else ''} "
-                                    f"{', '.join(default_names)}")
-            nm_tpl = _make_nmtuple(
-                typename, types.items(),
-                defaults=[ns[n] for n in default_names],
-                module=ns['__module__']
-            )
-            nm_tpl.__bases__ = bases
-            if typing.Generic in bases:
-                class_getitem = typing.Generic.__class_getitem__.__func__
-                nm_tpl.__class_getitem__ = classmethod(class_getitem)
-            # update from user namespace without overriding special namedtuple attributes
-            for key in ns:
-                if key in _prohibited_namedtuple_fields:
-                    raise AttributeError("Cannot overwrite NamedTuple attribute " + key)
-                elif key not in _special_namedtuple_fields and key not in nm_tpl._fields:
-                    setattr(nm_tpl, key, ns[key])
-            if typing.Generic in bases:
-                nm_tpl.__init_subclass__()
-            return nm_tpl
-
-    def NamedTuple(__typename, __fields=None, **kwargs):
-        if __fields is None:
-            __fields = kwargs.items()
-        elif kwargs:
-            raise TypeError("Either list of fields or keywords"
-                            " can be provided to NamedTuple, not both")
-        return _make_nmtuple(__typename, __fields, module=_caller())
-
-    NamedTuple.__doc__ = typing.NamedTuple.__doc__
-    _NamedTuple = type.__new__(_NamedTupleMeta, 'NamedTuple', (), {})
-
-    # On 3.8+, alter the signature so that it matches typing.NamedTuple.
-    # The signature of typing.NamedTuple on >=3.8 is invalid syntax in Python 3.7,
-    # so just leave the signature as it is on 3.7.
-    if sys.version_info >= (3, 8):
-        NamedTuple.__text_signature__ = '(typename, fields=None, /, **kwargs)'
-
-    def _namedtuple_mro_entries(bases):
-        assert NamedTuple in bases
-        return (_NamedTuple,)
-
-    NamedTuple.__mro_entries__ = _namedtuple_mro_entries
diff --git a/lib/pkg_resources/_vendor/vendored.txt b/lib/pkg_resources/_vendor/vendored.txt
index 4cd4ab8c..0c8fdc38 100644
--- a/lib/pkg_resources/_vendor/vendored.txt
+++ b/lib/pkg_resources/_vendor/vendored.txt
@@ -1,11 +1,13 @@
-packaging==23.1
+packaging==24
 
 platformdirs==2.6.2
-# required for platformdirs on Python < 3.8
-typing_extensions==4.4.0
 
 jaraco.text==3.7.0
 # required for jaraco.text on older Pythons
 importlib_resources==5.10.2
 # required for importlib_resources on older Pythons
 zipp==3.7.0
+# required for jaraco.functools
+more_itertools==10.2.0
+# required for jaraco.context on older Pythons
+backports.tarfile
diff --git a/lib/pkg_resources/extern/__init__.py b/lib/pkg_resources/extern/__init__.py
index 948bcc60..a1b7490d 100644
--- a/lib/pkg_resources/extern/__init__.py
+++ b/lib/pkg_resources/extern/__init__.py
@@ -1,5 +1,8 @@
+from importlib.machinery import ModuleSpec
 import importlib.util
 import sys
+from types import ModuleType
+from typing import Iterable, Optional, Sequence
 
 
 class VendorImporter:
@@ -8,7 +11,12 @@ class VendorImporter:
     or otherwise naturally-installed packages from root_name.
     """
 
-    def __init__(self, root_name, vendored_names=(), vendor_pkg=None):
+    def __init__(
+        self,
+        root_name: str,
+        vendored_names: Iterable[str] = (),
+        vendor_pkg: Optional[str] = None,
+    ):
         self.root_name = root_name
         self.vendored_names = set(vendored_names)
         self.vendor_pkg = vendor_pkg or root_name.replace('extern', '_vendor')
@@ -26,7 +34,7 @@ class VendorImporter:
         root, base, target = fullname.partition(self.root_name + '.')
         return not root and any(map(target.startswith, self.vendored_names))
 
-    def load_module(self, fullname):
+    def load_module(self, fullname: str):
         """
         Iterate over the search path to locate and load fullname.
         """
@@ -48,16 +56,22 @@ class VendorImporter:
                 "distribution.".format(**locals())
             )
 
-    def create_module(self, spec):
+    def create_module(self, spec: ModuleSpec):
         return self.load_module(spec.name)
 
-    def exec_module(self, module):
+    def exec_module(self, module: ModuleType):
         pass
 
-    def find_spec(self, fullname, path=None, target=None):
+    def find_spec(
+        self,
+        fullname: str,
+        path: Optional[Sequence[str]] = None,
+        target: Optional[ModuleType] = None,
+    ):
         """Return a module spec for vendored names."""
         return (
-            importlib.util.spec_from_loader(fullname, self)
+            # This should fix itself next mypy release https://github.com/python/typeshed/pull/11890
+            importlib.util.spec_from_loader(fullname, self)  # type: ignore[arg-type]
             if self._module_matches_namespace(fullname)
             else None
         )
@@ -70,11 +84,20 @@ class VendorImporter:
             sys.meta_path.append(self)
 
 
+# [[[cog
+# import cog
+# from tools.vendored import yield_top_level
+# names = "\n".join(f"    {x!r}," for x in yield_top_level('pkg_resources'))
+# cog.outl(f"names = (\n{names}\n)")
+# ]]]
 names = (
+    'backports',
+    'importlib_resources',
+    'jaraco',
+    'more_itertools',
     'packaging',
     'platformdirs',
-    'jaraco',
-    'importlib_resources',
-    'more_itertools',
+    'zipp',
 )
+# [[[end]]]
 VendorImporter(__name__, names).install()