mirror of
https://github.com/SickGear/SickGear.git
synced 2024-12-18 08:43:37 +00:00
Update package resource API 67.5.1 (f51eccd) → 68.1.2 (1ef36f2).
This commit is contained in:
parent
2ea74f3cce
commit
b68ce0378a
19 changed files with 847 additions and 226 deletions
|
@ -3,6 +3,7 @@
|
|||
* Update Beautiful Soup 4.11.1 (r642) to 4.12.2
|
||||
* Update certifi 2023.05.07 to 2023.07.22
|
||||
* Update feedparser 6.0.10 (859ac57) to 6.0.10 (9865dec)
|
||||
* Update package resource API 67.5.1 (f51eccd) to 68.1.2 (1ef36f2)
|
||||
* Update soupsieve 2.3.2.post1 (792d566) to 2.4.1 (2e66beb)
|
||||
* Update Tornado Web Server 6.3.2 (e3aa6c5) to 6.3.3 (e4d6984)
|
||||
* Fix regex that was not using py312 notation
|
||||
|
|
|
@ -13,11 +13,8 @@ The package resource API is designed to work with normal filesystem packages,
|
|||
.zip files and with custom PEP 302 loaders that support the ``get_data()``
|
||||
method.
|
||||
|
||||
This module is deprecated. Users are directed to
|
||||
`importlib.resources <https://docs.python.org/3/library/importlib.resources.html>`_
|
||||
and
|
||||
`importlib.metadata <https://docs.python.org/3/library/importlib.metadata.html>`_
|
||||
instead.
|
||||
This module is deprecated. Users are directed to :mod:`importlib.resources`,
|
||||
:mod:`importlib.metadata` and :pypi:`packaging` instead.
|
||||
"""
|
||||
|
||||
import sys
|
||||
|
@ -118,7 +115,12 @@ _namespace_handlers = None
|
|||
_namespace_packages = None
|
||||
|
||||
|
||||
warnings.warn("pkg_resources is deprecated as an API", DeprecationWarning)
|
||||
warnings.warn(
|
||||
"pkg_resources is deprecated as an API. "
|
||||
"See https://setuptools.pypa.io/en/latest/pkg_resources.html",
|
||||
DeprecationWarning,
|
||||
stacklevel=2,
|
||||
)
|
||||
|
||||
|
||||
_PEP440_FALLBACK = re.compile(r"^v?(?P<safe>(?:[0-9]+!)?[0-9]+(?:\.[0-9]+)*)", re.I)
|
||||
|
@ -1416,7 +1418,7 @@ def _forgiving_version(version):
|
|||
match = _PEP440_FALLBACK.search(version)
|
||||
if match:
|
||||
safe = match["safe"]
|
||||
rest = version[len(safe):]
|
||||
rest = version[len(safe) :]
|
||||
else:
|
||||
safe = "0"
|
||||
rest = version
|
||||
|
@ -1659,10 +1661,9 @@ is not allowed.
|
|||
|
||||
# for compatibility, warn; in future
|
||||
# raise ValueError(msg)
|
||||
warnings.warn(
|
||||
issue_warning(
|
||||
msg[:-1] + " and will raise exceptions in a future release.",
|
||||
DeprecationWarning,
|
||||
stacklevel=4,
|
||||
)
|
||||
|
||||
def _get(self, path):
|
||||
|
@ -3046,6 +3047,9 @@ class Distribution:
|
|||
except ValueError:
|
||||
issue_warning("Unbuilt egg for " + repr(self))
|
||||
return False
|
||||
except SystemError:
|
||||
# TODO: remove this except clause when python/cpython#103632 is fixed.
|
||||
return False
|
||||
return True
|
||||
|
||||
def clone(self, **kw):
|
||||
|
|
|
@ -4,6 +4,7 @@ import inspect
|
|||
import collections
|
||||
import types
|
||||
import itertools
|
||||
import warnings
|
||||
|
||||
import pkg_resources.extern.more_itertools
|
||||
|
||||
|
@ -266,11 +267,33 @@ def result_invoke(action):
|
|||
return wrap
|
||||
|
||||
|
||||
def call_aside(f, *args, **kwargs):
|
||||
def invoke(f, *args, **kwargs):
|
||||
"""
|
||||
Call a function for its side effect after initialization.
|
||||
|
||||
>>> @call_aside
|
||||
The benefit of using the decorator instead of simply invoking a function
|
||||
after defining it is that it makes explicit the author's intent for the
|
||||
function to be called immediately. Whereas if one simply calls the
|
||||
function immediately, it's less obvious if that was intentional or
|
||||
incidental. It also avoids repeating the name - the two actions, defining
|
||||
the function and calling it immediately are modeled separately, but linked
|
||||
by the decorator construct.
|
||||
|
||||
The benefit of having a function construct (opposed to just invoking some
|
||||
behavior inline) is to serve as a scope in which the behavior occurs. It
|
||||
avoids polluting the global namespace with local variables, provides an
|
||||
anchor on which to attach documentation (docstring), keeps the behavior
|
||||
logically separated (instead of conceptually separated or not separated at
|
||||
all), and provides potential to re-use the behavior for testing or other
|
||||
purposes.
|
||||
|
||||
This function is named as a pithy way to communicate, "call this function
|
||||
primarily for its side effect", or "while defining this function, also
|
||||
take it aside and call it". It exists because there's no Python construct
|
||||
for "define and call" (nor should there be, as decorators serve this need
|
||||
just fine). The behavior happens immediately and synchronously.
|
||||
|
||||
>>> @invoke
|
||||
... def func(): print("called")
|
||||
called
|
||||
>>> func()
|
||||
|
@ -278,7 +301,7 @@ def call_aside(f, *args, **kwargs):
|
|||
|
||||
Use functools.partial to pass parameters to the initial call
|
||||
|
||||
>>> @functools.partial(call_aside, name='bingo')
|
||||
>>> @functools.partial(invoke, name='bingo')
|
||||
... def func(name): print("called with", name)
|
||||
called with bingo
|
||||
"""
|
||||
|
@ -286,6 +309,14 @@ def call_aside(f, *args, **kwargs):
|
|||
return f
|
||||
|
||||
|
||||
def call_aside(*args, **kwargs):
|
||||
"""
|
||||
Deprecated name for invoke.
|
||||
"""
|
||||
warnings.warn("call_aside is deprecated, use invoke", DeprecationWarning)
|
||||
return invoke(*args, **kwargs)
|
||||
|
||||
|
||||
class Throttler:
|
||||
"""
|
||||
Rate-limit a function (or other callable)
|
||||
|
|
|
@ -3,4 +3,4 @@
|
|||
from .more import * # noqa
|
||||
from .recipes import * # noqa
|
||||
|
||||
__version__ = '9.0.0'
|
||||
__version__ = '9.1.0'
|
||||
|
|
|
@ -68,6 +68,7 @@ __all__ = [
|
|||
'exactly_n',
|
||||
'filter_except',
|
||||
'first',
|
||||
'gray_product',
|
||||
'groupby_transform',
|
||||
'ichunked',
|
||||
'iequals',
|
||||
|
@ -658,6 +659,7 @@ def distinct_permutations(iterable, r=None):
|
|||
[(0, 1), (0, 2), (1, 0), (1, 2), (2, 0), (2, 1)]
|
||||
|
||||
"""
|
||||
|
||||
# Algorithm: https://w.wiki/Qai
|
||||
def _full(A):
|
||||
while True:
|
||||
|
@ -1301,7 +1303,7 @@ def split_at(iterable, pred, maxsplit=-1, keep_separator=False):
|
|||
[[0], [2], [4, 5, 6, 7, 8, 9]]
|
||||
|
||||
By default, the delimiting items are not included in the output.
|
||||
The include them, set *keep_separator* to ``True``.
|
||||
To include them, set *keep_separator* to ``True``.
|
||||
|
||||
>>> list(split_at('abcdcba', lambda x: x == 'b', keep_separator=True))
|
||||
[['a'], ['b'], ['c', 'd', 'c'], ['b'], ['a']]
|
||||
|
@ -1391,7 +1393,9 @@ def split_after(iterable, pred, maxsplit=-1):
|
|||
if pred(item) and buf:
|
||||
yield buf
|
||||
if maxsplit == 1:
|
||||
yield list(it)
|
||||
buf = list(it)
|
||||
if buf:
|
||||
yield buf
|
||||
return
|
||||
buf = []
|
||||
maxsplit -= 1
|
||||
|
@ -2914,6 +2918,7 @@ def make_decorator(wrapping_func, result_index=0):
|
|||
'7'
|
||||
|
||||
"""
|
||||
|
||||
# See https://sites.google.com/site/bbayles/index/decorator_factory for
|
||||
# notes on how this works.
|
||||
def decorator(*wrapping_args, **wrapping_kwargs):
|
||||
|
@ -3464,7 +3469,6 @@ def _sample_unweighted(iterable, k):
|
|||
next_index = k + floor(log(random()) / log(1 - W))
|
||||
|
||||
for index, element in enumerate(iterable, k):
|
||||
|
||||
if index == next_index:
|
||||
reservoir[randrange(k)] = element
|
||||
# The new W is the largest in a sample of k U(0, `old_W`) numbers
|
||||
|
@ -4283,7 +4287,6 @@ def minmax(iterable_or_value, *others, key=None, default=_marker):
|
|||
lo_key = hi_key = key(lo)
|
||||
|
||||
for x, y in zip_longest(it, it, fillvalue=lo):
|
||||
|
||||
x_key, y_key = key(x), key(y)
|
||||
|
||||
if y_key < x_key:
|
||||
|
@ -4344,3 +4347,45 @@ def constrained_batches(
|
|||
|
||||
if batch:
|
||||
yield tuple(batch)
|
||||
|
||||
|
||||
def gray_product(*iterables):
|
||||
"""Like :func:`itertools.product`, but return tuples in an order such
|
||||
that only one element in the generated tuple changes from one iteration
|
||||
to the next.
|
||||
|
||||
>>> list(gray_product('AB','CD'))
|
||||
[('A', 'C'), ('B', 'C'), ('B', 'D'), ('A', 'D')]
|
||||
|
||||
This function consumes all of the input iterables before producing output.
|
||||
If any of the input iterables have fewer than two items, ``ValueError``
|
||||
is raised.
|
||||
|
||||
For information on the algorithm, see
|
||||
`this section <https://www-cs-faculty.stanford.edu/~knuth/fasc2a.ps.gz>`__
|
||||
of Donald Knuth's *The Art of Computer Programming*.
|
||||
"""
|
||||
all_iterables = tuple(tuple(x) for x in iterables)
|
||||
iterable_count = len(all_iterables)
|
||||
for iterable in all_iterables:
|
||||
if len(iterable) < 2:
|
||||
raise ValueError("each iterable must have two or more items")
|
||||
|
||||
# This is based on "Algorithm H" from section 7.2.1.1, page 20.
|
||||
# a holds the indexes of the source iterables for the n-tuple to be yielded
|
||||
# f is the array of "focus pointers"
|
||||
# o is the array of "directions"
|
||||
a = [0] * iterable_count
|
||||
f = list(range(iterable_count + 1))
|
||||
o = [1] * iterable_count
|
||||
while True:
|
||||
yield tuple(all_iterables[i][a[i]] for i in range(iterable_count))
|
||||
j = f[0]
|
||||
f[0] = 0
|
||||
if j == iterable_count:
|
||||
break
|
||||
a[j] = a[j] + o[j]
|
||||
if a[j] == 0 or a[j] == len(all_iterables[j]) - 1:
|
||||
o[j] = -o[j]
|
||||
f[j] = f[j + 1]
|
||||
f[j + 1] = j + 1
|
||||
|
|
|
@ -1,26 +1,25 @@
|
|||
"""Stubs for more_itertools.more"""
|
||||
from __future__ import annotations
|
||||
|
||||
from types import TracebackType
|
||||
from typing import (
|
||||
Any,
|
||||
Callable,
|
||||
Container,
|
||||
Dict,
|
||||
ContextManager,
|
||||
Generic,
|
||||
Hashable,
|
||||
Iterable,
|
||||
Iterator,
|
||||
List,
|
||||
Optional,
|
||||
overload,
|
||||
Reversible,
|
||||
Sequence,
|
||||
Sized,
|
||||
Tuple,
|
||||
Union,
|
||||
Type,
|
||||
TypeVar,
|
||||
type_check_only,
|
||||
)
|
||||
from types import TracebackType
|
||||
from typing_extensions import ContextManager, Protocol, Type, overload
|
||||
from typing_extensions import Protocol
|
||||
|
||||
# Type and type variable definitions
|
||||
_T = TypeVar('_T')
|
||||
|
@ -31,7 +30,7 @@ _V = TypeVar('_V')
|
|||
_W = TypeVar('_W')
|
||||
_T_co = TypeVar('_T_co', covariant=True)
|
||||
_GenFn = TypeVar('_GenFn', bound=Callable[..., Iterator[object]])
|
||||
_Raisable = Union[BaseException, 'Type[BaseException]']
|
||||
_Raisable = BaseException | Type[BaseException]
|
||||
|
||||
@type_check_only
|
||||
class _SizedIterable(Protocol[_T_co], Sized, Iterable[_T_co]): ...
|
||||
|
@ -39,23 +38,25 @@ class _SizedIterable(Protocol[_T_co], Sized, Iterable[_T_co]): ...
|
|||
@type_check_only
|
||||
class _SizedReversible(Protocol[_T_co], Sized, Reversible[_T_co]): ...
|
||||
|
||||
@type_check_only
|
||||
class _SupportsSlicing(Protocol[_T_co]):
|
||||
def __getitem__(self, __k: slice) -> _T_co: ...
|
||||
|
||||
def chunked(
|
||||
iterable: Iterable[_T], n: Optional[int], strict: bool = ...
|
||||
) -> Iterator[List[_T]]: ...
|
||||
iterable: Iterable[_T], n: int | None, strict: bool = ...
|
||||
) -> Iterator[list[_T]]: ...
|
||||
@overload
|
||||
def first(iterable: Iterable[_T]) -> _T: ...
|
||||
@overload
|
||||
def first(iterable: Iterable[_T], default: _U) -> Union[_T, _U]: ...
|
||||
def first(iterable: Iterable[_T], default: _U) -> _T | _U: ...
|
||||
@overload
|
||||
def last(iterable: Iterable[_T]) -> _T: ...
|
||||
@overload
|
||||
def last(iterable: Iterable[_T], default: _U) -> Union[_T, _U]: ...
|
||||
def last(iterable: Iterable[_T], default: _U) -> _T | _U: ...
|
||||
@overload
|
||||
def nth_or_last(iterable: Iterable[_T], n: int) -> _T: ...
|
||||
@overload
|
||||
def nth_or_last(
|
||||
iterable: Iterable[_T], n: int, default: _U
|
||||
) -> Union[_T, _U]: ...
|
||||
def nth_or_last(iterable: Iterable[_T], n: int, default: _U) -> _T | _U: ...
|
||||
|
||||
class peekable(Generic[_T], Iterator[_T]):
|
||||
def __init__(self, iterable: Iterable[_T]) -> None: ...
|
||||
|
@ -64,13 +65,13 @@ class peekable(Generic[_T], Iterator[_T]):
|
|||
@overload
|
||||
def peek(self) -> _T: ...
|
||||
@overload
|
||||
def peek(self, default: _U) -> Union[_T, _U]: ...
|
||||
def peek(self, default: _U) -> _T | _U: ...
|
||||
def prepend(self, *items: _T) -> None: ...
|
||||
def __next__(self) -> _T: ...
|
||||
@overload
|
||||
def __getitem__(self, index: int) -> _T: ...
|
||||
@overload
|
||||
def __getitem__(self, index: slice) -> List[_T]: ...
|
||||
def __getitem__(self, index: slice) -> list[_T]: ...
|
||||
|
||||
def consumer(func: _GenFn) -> _GenFn: ...
|
||||
def ilen(iterable: Iterable[object]) -> int: ...
|
||||
|
@ -80,42 +81,42 @@ def with_iter(
|
|||
) -> Iterator[_T]: ...
|
||||
def one(
|
||||
iterable: Iterable[_T],
|
||||
too_short: Optional[_Raisable] = ...,
|
||||
too_long: Optional[_Raisable] = ...,
|
||||
too_short: _Raisable | None = ...,
|
||||
too_long: _Raisable | None = ...,
|
||||
) -> _T: ...
|
||||
def raise_(exception: _Raisable, *args: Any) -> None: ...
|
||||
def strictly_n(
|
||||
iterable: Iterable[_T],
|
||||
n: int,
|
||||
too_short: Optional[_GenFn] = ...,
|
||||
too_long: Optional[_GenFn] = ...,
|
||||
) -> List[_T]: ...
|
||||
too_short: _GenFn | None = ...,
|
||||
too_long: _GenFn | None = ...,
|
||||
) -> list[_T]: ...
|
||||
def distinct_permutations(
|
||||
iterable: Iterable[_T], r: Optional[int] = ...
|
||||
) -> Iterator[Tuple[_T, ...]]: ...
|
||||
iterable: Iterable[_T], r: int | None = ...
|
||||
) -> Iterator[tuple[_T, ...]]: ...
|
||||
def intersperse(
|
||||
e: _U, iterable: Iterable[_T], n: int = ...
|
||||
) -> Iterator[Union[_T, _U]]: ...
|
||||
def unique_to_each(*iterables: Iterable[_T]) -> List[List[_T]]: ...
|
||||
) -> Iterator[_T | _U]: ...
|
||||
def unique_to_each(*iterables: Iterable[_T]) -> list[list[_T]]: ...
|
||||
@overload
|
||||
def windowed(
|
||||
seq: Iterable[_T], n: int, *, step: int = ...
|
||||
) -> Iterator[Tuple[Optional[_T], ...]]: ...
|
||||
) -> Iterator[tuple[_T | None, ...]]: ...
|
||||
@overload
|
||||
def windowed(
|
||||
seq: Iterable[_T], n: int, fillvalue: _U, step: int = ...
|
||||
) -> Iterator[Tuple[Union[_T, _U], ...]]: ...
|
||||
def substrings(iterable: Iterable[_T]) -> Iterator[Tuple[_T, ...]]: ...
|
||||
) -> Iterator[tuple[_T | _U, ...]]: ...
|
||||
def substrings(iterable: Iterable[_T]) -> Iterator[tuple[_T, ...]]: ...
|
||||
def substrings_indexes(
|
||||
seq: Sequence[_T], reverse: bool = ...
|
||||
) -> Iterator[Tuple[Sequence[_T], int, int]]: ...
|
||||
) -> Iterator[tuple[Sequence[_T], int, int]]: ...
|
||||
|
||||
class bucket(Generic[_T, _U], Container[_U]):
|
||||
def __init__(
|
||||
self,
|
||||
iterable: Iterable[_T],
|
||||
key: Callable[[_T], _U],
|
||||
validator: Optional[Callable[[object], object]] = ...,
|
||||
validator: Callable[[object], object] | None = ...,
|
||||
) -> None: ...
|
||||
def __contains__(self, value: object) -> bool: ...
|
||||
def __iter__(self) -> Iterator[_U]: ...
|
||||
|
@ -123,109 +124,105 @@ class bucket(Generic[_T, _U], Container[_U]):
|
|||
|
||||
def spy(
|
||||
iterable: Iterable[_T], n: int = ...
|
||||
) -> Tuple[List[_T], Iterator[_T]]: ...
|
||||
) -> tuple[list[_T], Iterator[_T]]: ...
|
||||
def interleave(*iterables: Iterable[_T]) -> Iterator[_T]: ...
|
||||
def interleave_longest(*iterables: Iterable[_T]) -> Iterator[_T]: ...
|
||||
def interleave_evenly(
|
||||
iterables: List[Iterable[_T]], lengths: Optional[List[int]] = ...
|
||||
iterables: list[Iterable[_T]], lengths: list[int] | None = ...
|
||||
) -> Iterator[_T]: ...
|
||||
def collapse(
|
||||
iterable: Iterable[Any],
|
||||
base_type: Optional[type] = ...,
|
||||
levels: Optional[int] = ...,
|
||||
base_type: type | None = ...,
|
||||
levels: int | None = ...,
|
||||
) -> Iterator[Any]: ...
|
||||
@overload
|
||||
def side_effect(
|
||||
func: Callable[[_T], object],
|
||||
iterable: Iterable[_T],
|
||||
chunk_size: None = ...,
|
||||
before: Optional[Callable[[], object]] = ...,
|
||||
after: Optional[Callable[[], object]] = ...,
|
||||
before: Callable[[], object] | None = ...,
|
||||
after: Callable[[], object] | None = ...,
|
||||
) -> Iterator[_T]: ...
|
||||
@overload
|
||||
def side_effect(
|
||||
func: Callable[[List[_T]], object],
|
||||
func: Callable[[list[_T]], object],
|
||||
iterable: Iterable[_T],
|
||||
chunk_size: int,
|
||||
before: Optional[Callable[[], object]] = ...,
|
||||
after: Optional[Callable[[], object]] = ...,
|
||||
before: Callable[[], object] | None = ...,
|
||||
after: Callable[[], object] | None = ...,
|
||||
) -> Iterator[_T]: ...
|
||||
def sliced(
|
||||
seq: Sequence[_T], n: int, strict: bool = ...
|
||||
) -> Iterator[Sequence[_T]]: ...
|
||||
seq: _SupportsSlicing[_T], n: int, strict: bool = ...
|
||||
) -> Iterator[_T]: ...
|
||||
def split_at(
|
||||
iterable: Iterable[_T],
|
||||
pred: Callable[[_T], object],
|
||||
maxsplit: int = ...,
|
||||
keep_separator: bool = ...,
|
||||
) -> Iterator[List[_T]]: ...
|
||||
) -> Iterator[list[_T]]: ...
|
||||
def split_before(
|
||||
iterable: Iterable[_T], pred: Callable[[_T], object], maxsplit: int = ...
|
||||
) -> Iterator[List[_T]]: ...
|
||||
) -> Iterator[list[_T]]: ...
|
||||
def split_after(
|
||||
iterable: Iterable[_T], pred: Callable[[_T], object], maxsplit: int = ...
|
||||
) -> Iterator[List[_T]]: ...
|
||||
) -> Iterator[list[_T]]: ...
|
||||
def split_when(
|
||||
iterable: Iterable[_T],
|
||||
pred: Callable[[_T, _T], object],
|
||||
maxsplit: int = ...,
|
||||
) -> Iterator[List[_T]]: ...
|
||||
) -> Iterator[list[_T]]: ...
|
||||
def split_into(
|
||||
iterable: Iterable[_T], sizes: Iterable[Optional[int]]
|
||||
) -> Iterator[List[_T]]: ...
|
||||
iterable: Iterable[_T], sizes: Iterable[int | None]
|
||||
) -> Iterator[list[_T]]: ...
|
||||
@overload
|
||||
def padded(
|
||||
iterable: Iterable[_T],
|
||||
*,
|
||||
n: Optional[int] = ...,
|
||||
n: int | None = ...,
|
||||
next_multiple: bool = ...,
|
||||
) -> Iterator[Optional[_T]]: ...
|
||||
) -> Iterator[_T | None]: ...
|
||||
@overload
|
||||
def padded(
|
||||
iterable: Iterable[_T],
|
||||
fillvalue: _U,
|
||||
n: Optional[int] = ...,
|
||||
n: int | None = ...,
|
||||
next_multiple: bool = ...,
|
||||
) -> Iterator[Union[_T, _U]]: ...
|
||||
) -> Iterator[_T | _U]: ...
|
||||
@overload
|
||||
def repeat_last(iterable: Iterable[_T]) -> Iterator[_T]: ...
|
||||
@overload
|
||||
def repeat_last(
|
||||
iterable: Iterable[_T], default: _U
|
||||
) -> Iterator[Union[_T, _U]]: ...
|
||||
def distribute(n: int, iterable: Iterable[_T]) -> List[Iterator[_T]]: ...
|
||||
def repeat_last(iterable: Iterable[_T], default: _U) -> Iterator[_T | _U]: ...
|
||||
def distribute(n: int, iterable: Iterable[_T]) -> list[Iterator[_T]]: ...
|
||||
@overload
|
||||
def stagger(
|
||||
iterable: Iterable[_T],
|
||||
offsets: _SizedIterable[int] = ...,
|
||||
longest: bool = ...,
|
||||
) -> Iterator[Tuple[Optional[_T], ...]]: ...
|
||||
) -> Iterator[tuple[_T | None, ...]]: ...
|
||||
@overload
|
||||
def stagger(
|
||||
iterable: Iterable[_T],
|
||||
offsets: _SizedIterable[int] = ...,
|
||||
longest: bool = ...,
|
||||
fillvalue: _U = ...,
|
||||
) -> Iterator[Tuple[Union[_T, _U], ...]]: ...
|
||||
) -> Iterator[tuple[_T | _U, ...]]: ...
|
||||
|
||||
class UnequalIterablesError(ValueError):
|
||||
def __init__(
|
||||
self, details: Optional[Tuple[int, int, int]] = ...
|
||||
) -> None: ...
|
||||
def __init__(self, details: tuple[int, int, int] | None = ...) -> None: ...
|
||||
|
||||
@overload
|
||||
def zip_equal(__iter1: Iterable[_T1]) -> Iterator[Tuple[_T1]]: ...
|
||||
def zip_equal(__iter1: Iterable[_T1]) -> Iterator[tuple[_T1]]: ...
|
||||
@overload
|
||||
def zip_equal(
|
||||
__iter1: Iterable[_T1], __iter2: Iterable[_T2]
|
||||
) -> Iterator[Tuple[_T1, _T2]]: ...
|
||||
) -> Iterator[tuple[_T1, _T2]]: ...
|
||||
@overload
|
||||
def zip_equal(
|
||||
__iter1: Iterable[_T],
|
||||
__iter2: Iterable[_T],
|
||||
__iter3: Iterable[_T],
|
||||
*iterables: Iterable[_T],
|
||||
) -> Iterator[Tuple[_T, ...]]: ...
|
||||
) -> Iterator[tuple[_T, ...]]: ...
|
||||
@overload
|
||||
def zip_offset(
|
||||
__iter1: Iterable[_T1],
|
||||
|
@ -233,7 +230,7 @@ def zip_offset(
|
|||
offsets: _SizedIterable[int],
|
||||
longest: bool = ...,
|
||||
fillvalue: None = None,
|
||||
) -> Iterator[Tuple[Optional[_T1]]]: ...
|
||||
) -> Iterator[tuple[_T1 | None]]: ...
|
||||
@overload
|
||||
def zip_offset(
|
||||
__iter1: Iterable[_T1],
|
||||
|
@ -242,7 +239,7 @@ def zip_offset(
|
|||
offsets: _SizedIterable[int],
|
||||
longest: bool = ...,
|
||||
fillvalue: None = None,
|
||||
) -> Iterator[Tuple[Optional[_T1], Optional[_T2]]]: ...
|
||||
) -> Iterator[tuple[_T1 | None, _T2 | None]]: ...
|
||||
@overload
|
||||
def zip_offset(
|
||||
__iter1: Iterable[_T],
|
||||
|
@ -252,7 +249,7 @@ def zip_offset(
|
|||
offsets: _SizedIterable[int],
|
||||
longest: bool = ...,
|
||||
fillvalue: None = None,
|
||||
) -> Iterator[Tuple[Optional[_T], ...]]: ...
|
||||
) -> Iterator[tuple[_T | None, ...]]: ...
|
||||
@overload
|
||||
def zip_offset(
|
||||
__iter1: Iterable[_T1],
|
||||
|
@ -260,7 +257,7 @@ def zip_offset(
|
|||
offsets: _SizedIterable[int],
|
||||
longest: bool = ...,
|
||||
fillvalue: _U,
|
||||
) -> Iterator[Tuple[Union[_T1, _U]]]: ...
|
||||
) -> Iterator[tuple[_T1 | _U]]: ...
|
||||
@overload
|
||||
def zip_offset(
|
||||
__iter1: Iterable[_T1],
|
||||
|
@ -269,7 +266,7 @@ def zip_offset(
|
|||
offsets: _SizedIterable[int],
|
||||
longest: bool = ...,
|
||||
fillvalue: _U,
|
||||
) -> Iterator[Tuple[Union[_T1, _U], Union[_T2, _U]]]: ...
|
||||
) -> Iterator[tuple[_T1 | _U, _T2 | _U]]: ...
|
||||
@overload
|
||||
def zip_offset(
|
||||
__iter1: Iterable[_T],
|
||||
|
@ -279,82 +276,80 @@ def zip_offset(
|
|||
offsets: _SizedIterable[int],
|
||||
longest: bool = ...,
|
||||
fillvalue: _U,
|
||||
) -> Iterator[Tuple[Union[_T, _U], ...]]: ...
|
||||
) -> Iterator[tuple[_T | _U, ...]]: ...
|
||||
def sort_together(
|
||||
iterables: Iterable[Iterable[_T]],
|
||||
key_list: Iterable[int] = ...,
|
||||
key: Optional[Callable[..., Any]] = ...,
|
||||
key: Callable[..., Any] | None = ...,
|
||||
reverse: bool = ...,
|
||||
) -> List[Tuple[_T, ...]]: ...
|
||||
def unzip(iterable: Iterable[Sequence[_T]]) -> Tuple[Iterator[_T], ...]: ...
|
||||
def divide(n: int, iterable: Iterable[_T]) -> List[Iterator[_T]]: ...
|
||||
) -> list[tuple[_T, ...]]: ...
|
||||
def unzip(iterable: Iterable[Sequence[_T]]) -> tuple[Iterator[_T], ...]: ...
|
||||
def divide(n: int, iterable: Iterable[_T]) -> list[Iterator[_T]]: ...
|
||||
def always_iterable(
|
||||
obj: object,
|
||||
base_type: Union[
|
||||
type, Tuple[Union[type, Tuple[Any, ...]], ...], None
|
||||
] = ...,
|
||||
base_type: type | tuple[type | tuple[Any, ...], ...] | None = ...,
|
||||
) -> Iterator[Any]: ...
|
||||
def adjacent(
|
||||
predicate: Callable[[_T], bool],
|
||||
iterable: Iterable[_T],
|
||||
distance: int = ...,
|
||||
) -> Iterator[Tuple[bool, _T]]: ...
|
||||
) -> Iterator[tuple[bool, _T]]: ...
|
||||
@overload
|
||||
def groupby_transform(
|
||||
iterable: Iterable[_T],
|
||||
keyfunc: None = None,
|
||||
valuefunc: None = None,
|
||||
reducefunc: None = None,
|
||||
) -> Iterator[Tuple[_T, Iterator[_T]]]: ...
|
||||
) -> Iterator[tuple[_T, Iterator[_T]]]: ...
|
||||
@overload
|
||||
def groupby_transform(
|
||||
iterable: Iterable[_T],
|
||||
keyfunc: Callable[[_T], _U],
|
||||
valuefunc: None,
|
||||
reducefunc: None,
|
||||
) -> Iterator[Tuple[_U, Iterator[_T]]]: ...
|
||||
) -> Iterator[tuple[_U, Iterator[_T]]]: ...
|
||||
@overload
|
||||
def groupby_transform(
|
||||
iterable: Iterable[_T],
|
||||
keyfunc: None,
|
||||
valuefunc: Callable[[_T], _V],
|
||||
reducefunc: None,
|
||||
) -> Iterable[Tuple[_T, Iterable[_V]]]: ...
|
||||
) -> Iterable[tuple[_T, Iterable[_V]]]: ...
|
||||
@overload
|
||||
def groupby_transform(
|
||||
iterable: Iterable[_T],
|
||||
keyfunc: Callable[[_T], _U],
|
||||
valuefunc: Callable[[_T], _V],
|
||||
reducefunc: None,
|
||||
) -> Iterable[Tuple[_U, Iterator[_V]]]: ...
|
||||
) -> Iterable[tuple[_U, Iterator[_V]]]: ...
|
||||
@overload
|
||||
def groupby_transform(
|
||||
iterable: Iterable[_T],
|
||||
keyfunc: None,
|
||||
valuefunc: None,
|
||||
reducefunc: Callable[[Iterator[_T]], _W],
|
||||
) -> Iterable[Tuple[_T, _W]]: ...
|
||||
) -> Iterable[tuple[_T, _W]]: ...
|
||||
@overload
|
||||
def groupby_transform(
|
||||
iterable: Iterable[_T],
|
||||
keyfunc: Callable[[_T], _U],
|
||||
valuefunc: None,
|
||||
reducefunc: Callable[[Iterator[_T]], _W],
|
||||
) -> Iterable[Tuple[_U, _W]]: ...
|
||||
) -> Iterable[tuple[_U, _W]]: ...
|
||||
@overload
|
||||
def groupby_transform(
|
||||
iterable: Iterable[_T],
|
||||
keyfunc: None,
|
||||
valuefunc: Callable[[_T], _V],
|
||||
reducefunc: Callable[[Iterable[_V]], _W],
|
||||
) -> Iterable[Tuple[_T, _W]]: ...
|
||||
) -> Iterable[tuple[_T, _W]]: ...
|
||||
@overload
|
||||
def groupby_transform(
|
||||
iterable: Iterable[_T],
|
||||
keyfunc: Callable[[_T], _U],
|
||||
valuefunc: Callable[[_T], _V],
|
||||
reducefunc: Callable[[Iterable[_V]], _W],
|
||||
) -> Iterable[Tuple[_U, _W]]: ...
|
||||
) -> Iterable[tuple[_U, _W]]: ...
|
||||
|
||||
class numeric_range(Generic[_T, _U], Sequence[_T], Hashable, Reversible[_T]):
|
||||
@overload
|
||||
|
@ -375,22 +370,22 @@ class numeric_range(Generic[_T, _U], Sequence[_T], Hashable, Reversible[_T]):
|
|||
def __len__(self) -> int: ...
|
||||
def __reduce__(
|
||||
self,
|
||||
) -> Tuple[Type[numeric_range[_T, _U]], Tuple[_T, _T, _U]]: ...
|
||||
) -> tuple[Type[numeric_range[_T, _U]], tuple[_T, _T, _U]]: ...
|
||||
def __repr__(self) -> str: ...
|
||||
def __reversed__(self) -> Iterator[_T]: ...
|
||||
def count(self, value: _T) -> int: ...
|
||||
def index(self, value: _T) -> int: ... # type: ignore
|
||||
|
||||
def count_cycle(
|
||||
iterable: Iterable[_T], n: Optional[int] = ...
|
||||
) -> Iterable[Tuple[int, _T]]: ...
|
||||
iterable: Iterable[_T], n: int | None = ...
|
||||
) -> Iterable[tuple[int, _T]]: ...
|
||||
def mark_ends(
|
||||
iterable: Iterable[_T],
|
||||
) -> Iterable[Tuple[bool, bool, _T]]: ...
|
||||
) -> Iterable[tuple[bool, bool, _T]]: ...
|
||||
def locate(
|
||||
iterable: Iterable[object],
|
||||
pred: Callable[..., Any] = ...,
|
||||
window_size: Optional[int] = ...,
|
||||
window_size: int | None = ...,
|
||||
) -> Iterator[int]: ...
|
||||
def lstrip(
|
||||
iterable: Iterable[_T], pred: Callable[[_T], object]
|
||||
|
@ -403,9 +398,7 @@ def strip(
|
|||
) -> Iterator[_T]: ...
|
||||
|
||||
class islice_extended(Generic[_T], Iterator[_T]):
|
||||
def __init__(
|
||||
self, iterable: Iterable[_T], *args: Optional[int]
|
||||
) -> None: ...
|
||||
def __init__(self, iterable: Iterable[_T], *args: int | None) -> None: ...
|
||||
def __iter__(self) -> islice_extended[_T]: ...
|
||||
def __next__(self) -> _T: ...
|
||||
def __getitem__(self, index: slice) -> islice_extended[_T]: ...
|
||||
|
@ -420,7 +413,7 @@ def difference(
|
|||
func: Callable[[_T, _T], _U] = ...,
|
||||
*,
|
||||
initial: None = ...,
|
||||
) -> Iterator[Union[_T, _U]]: ...
|
||||
) -> Iterator[_T | _U]: ...
|
||||
@overload
|
||||
def difference(
|
||||
iterable: Iterable[_T], func: Callable[[_T, _T], _U] = ..., *, initial: _U
|
||||
|
@ -436,7 +429,7 @@ class SequenceView(Generic[_T], Sequence[_T]):
|
|||
|
||||
class seekable(Generic[_T], Iterator[_T]):
|
||||
def __init__(
|
||||
self, iterable: Iterable[_T], maxlen: Optional[int] = ...
|
||||
self, iterable: Iterable[_T], maxlen: int | None = ...
|
||||
) -> None: ...
|
||||
def __iter__(self) -> seekable[_T]: ...
|
||||
def __next__(self) -> _T: ...
|
||||
|
@ -444,20 +437,20 @@ class seekable(Generic[_T], Iterator[_T]):
|
|||
@overload
|
||||
def peek(self) -> _T: ...
|
||||
@overload
|
||||
def peek(self, default: _U) -> Union[_T, _U]: ...
|
||||
def peek(self, default: _U) -> _T | _U: ...
|
||||
def elements(self) -> SequenceView[_T]: ...
|
||||
def seek(self, index: int) -> None: ...
|
||||
|
||||
class run_length:
|
||||
@staticmethod
|
||||
def encode(iterable: Iterable[_T]) -> Iterator[Tuple[_T, int]]: ...
|
||||
def encode(iterable: Iterable[_T]) -> Iterator[tuple[_T, int]]: ...
|
||||
@staticmethod
|
||||
def decode(iterable: Iterable[Tuple[_T, int]]) -> Iterator[_T]: ...
|
||||
def decode(iterable: Iterable[tuple[_T, int]]) -> Iterator[_T]: ...
|
||||
|
||||
def exactly_n(
|
||||
iterable: Iterable[_T], n: int, predicate: Callable[[_T], object] = ...
|
||||
) -> bool: ...
|
||||
def circular_shifts(iterable: Iterable[_T]) -> List[Tuple[_T, ...]]: ...
|
||||
def circular_shifts(iterable: Iterable[_T]) -> list[tuple[_T, ...]]: ...
|
||||
def make_decorator(
|
||||
wrapping_func: Callable[..., _U], result_index: int = ...
|
||||
) -> Callable[..., Callable[[Callable[..., Any]], Callable[..., _U]]]: ...
|
||||
|
@ -467,44 +460,44 @@ def map_reduce(
|
|||
keyfunc: Callable[[_T], _U],
|
||||
valuefunc: None = ...,
|
||||
reducefunc: None = ...,
|
||||
) -> Dict[_U, List[_T]]: ...
|
||||
) -> dict[_U, list[_T]]: ...
|
||||
@overload
|
||||
def map_reduce(
|
||||
iterable: Iterable[_T],
|
||||
keyfunc: Callable[[_T], _U],
|
||||
valuefunc: Callable[[_T], _V],
|
||||
reducefunc: None = ...,
|
||||
) -> Dict[_U, List[_V]]: ...
|
||||
) -> dict[_U, list[_V]]: ...
|
||||
@overload
|
||||
def map_reduce(
|
||||
iterable: Iterable[_T],
|
||||
keyfunc: Callable[[_T], _U],
|
||||
valuefunc: None = ...,
|
||||
reducefunc: Callable[[List[_T]], _W] = ...,
|
||||
) -> Dict[_U, _W]: ...
|
||||
reducefunc: Callable[[list[_T]], _W] = ...,
|
||||
) -> dict[_U, _W]: ...
|
||||
@overload
|
||||
def map_reduce(
|
||||
iterable: Iterable[_T],
|
||||
keyfunc: Callable[[_T], _U],
|
||||
valuefunc: Callable[[_T], _V],
|
||||
reducefunc: Callable[[List[_V]], _W],
|
||||
) -> Dict[_U, _W]: ...
|
||||
reducefunc: Callable[[list[_V]], _W],
|
||||
) -> dict[_U, _W]: ...
|
||||
def rlocate(
|
||||
iterable: Iterable[_T],
|
||||
pred: Callable[..., object] = ...,
|
||||
window_size: Optional[int] = ...,
|
||||
window_size: int | None = ...,
|
||||
) -> Iterator[int]: ...
|
||||
def replace(
|
||||
iterable: Iterable[_T],
|
||||
pred: Callable[..., object],
|
||||
substitutes: Iterable[_U],
|
||||
count: Optional[int] = ...,
|
||||
count: int | None = ...,
|
||||
window_size: int = ...,
|
||||
) -> Iterator[Union[_T, _U]]: ...
|
||||
def partitions(iterable: Iterable[_T]) -> Iterator[List[List[_T]]]: ...
|
||||
) -> Iterator[_T | _U]: ...
|
||||
def partitions(iterable: Iterable[_T]) -> Iterator[list[list[_T]]]: ...
|
||||
def set_partitions(
|
||||
iterable: Iterable[_T], k: Optional[int] = ...
|
||||
) -> Iterator[List[List[_T]]]: ...
|
||||
iterable: Iterable[_T], k: int | None = ...
|
||||
) -> Iterator[list[list[_T]]]: ...
|
||||
|
||||
class time_limited(Generic[_T], Iterator[_T]):
|
||||
def __init__(
|
||||
|
@ -515,16 +508,16 @@ class time_limited(Generic[_T], Iterator[_T]):
|
|||
|
||||
@overload
|
||||
def only(
|
||||
iterable: Iterable[_T], *, too_long: Optional[_Raisable] = ...
|
||||
) -> Optional[_T]: ...
|
||||
iterable: Iterable[_T], *, too_long: _Raisable | None = ...
|
||||
) -> _T | None: ...
|
||||
@overload
|
||||
def only(
|
||||
iterable: Iterable[_T], default: _U, too_long: Optional[_Raisable] = ...
|
||||
) -> Union[_T, _U]: ...
|
||||
iterable: Iterable[_T], default: _U, too_long: _Raisable | None = ...
|
||||
) -> _T | _U: ...
|
||||
def ichunked(iterable: Iterable[_T], n: int) -> Iterator[Iterator[_T]]: ...
|
||||
def distinct_combinations(
|
||||
iterable: Iterable[_T], r: int
|
||||
) -> Iterator[Tuple[_T, ...]]: ...
|
||||
) -> Iterator[tuple[_T, ...]]: ...
|
||||
def filter_except(
|
||||
validator: Callable[[Any], object],
|
||||
iterable: Iterable[_T],
|
||||
|
@ -539,16 +532,16 @@ def map_if(
|
|||
iterable: Iterable[Any],
|
||||
pred: Callable[[Any], bool],
|
||||
func: Callable[[Any], Any],
|
||||
func_else: Optional[Callable[[Any], Any]] = ...,
|
||||
func_else: Callable[[Any], Any] | None = ...,
|
||||
) -> Iterator[Any]: ...
|
||||
def sample(
|
||||
iterable: Iterable[_T],
|
||||
k: int,
|
||||
weights: Optional[Iterable[float]] = ...,
|
||||
) -> List[_T]: ...
|
||||
weights: Iterable[float] | None = ...,
|
||||
) -> list[_T]: ...
|
||||
def is_sorted(
|
||||
iterable: Iterable[_T],
|
||||
key: Optional[Callable[[_T], _U]] = ...,
|
||||
key: Callable[[_T], _U] | None = ...,
|
||||
reverse: bool = False,
|
||||
strict: bool = False,
|
||||
) -> bool: ...
|
||||
|
@ -566,10 +559,10 @@ class callback_iter(Generic[_T], Iterator[_T]):
|
|||
def __enter__(self) -> callback_iter[_T]: ...
|
||||
def __exit__(
|
||||
self,
|
||||
exc_type: Optional[Type[BaseException]],
|
||||
exc_value: Optional[BaseException],
|
||||
traceback: Optional[TracebackType],
|
||||
) -> Optional[bool]: ...
|
||||
exc_type: Type[BaseException] | None,
|
||||
exc_value: BaseException | None,
|
||||
traceback: TracebackType | None,
|
||||
) -> bool | None: ...
|
||||
def __iter__(self) -> callback_iter[_T]: ...
|
||||
def __next__(self) -> _T: ...
|
||||
def _reader(self) -> Iterator[_T]: ...
|
||||
|
@ -580,15 +573,15 @@ class callback_iter(Generic[_T], Iterator[_T]):
|
|||
|
||||
def windowed_complete(
|
||||
iterable: Iterable[_T], n: int
|
||||
) -> Iterator[Tuple[_T, ...]]: ...
|
||||
) -> Iterator[tuple[_T, ...]]: ...
|
||||
def all_unique(
|
||||
iterable: Iterable[_T], key: Optional[Callable[[_T], _U]] = ...
|
||||
iterable: Iterable[_T], key: Callable[[_T], _U] | None = ...
|
||||
) -> bool: ...
|
||||
def nth_product(index: int, *args: Iterable[_T]) -> Tuple[_T, ...]: ...
|
||||
def nth_product(index: int, *args: Iterable[_T]) -> tuple[_T, ...]: ...
|
||||
def nth_permutation(
|
||||
iterable: Iterable[_T], r: int, index: int
|
||||
) -> Tuple[_T, ...]: ...
|
||||
def value_chain(*args: Union[_T, Iterable[_T]]) -> Iterable[_T]: ...
|
||||
) -> tuple[_T, ...]: ...
|
||||
def value_chain(*args: _T | Iterable[_T]) -> Iterable[_T]: ...
|
||||
def product_index(element: Iterable[_T], *args: Iterable[_T]) -> int: ...
|
||||
def combination_index(
|
||||
element: Iterable[_T], iterable: Iterable[_T]
|
||||
|
@ -603,22 +596,20 @@ class countable(Generic[_T], Iterator[_T]):
|
|||
def __iter__(self) -> countable[_T]: ...
|
||||
def __next__(self) -> _T: ...
|
||||
|
||||
def chunked_even(iterable: Iterable[_T], n: int) -> Iterator[List[_T]]: ...
|
||||
def chunked_even(iterable: Iterable[_T], n: int) -> Iterator[list[_T]]: ...
|
||||
def zip_broadcast(
|
||||
*objects: Union[_T, Iterable[_T]],
|
||||
scalar_types: Union[
|
||||
type, Tuple[Union[type, Tuple[Any, ...]], ...], None
|
||||
] = ...,
|
||||
*objects: _T | Iterable[_T],
|
||||
scalar_types: type | tuple[type | tuple[Any, ...], ...] | None = ...,
|
||||
strict: bool = ...,
|
||||
) -> Iterable[Tuple[_T, ...]]: ...
|
||||
) -> Iterable[tuple[_T, ...]]: ...
|
||||
def unique_in_window(
|
||||
iterable: Iterable[_T], n: int, key: Optional[Callable[[_T], _U]] = ...
|
||||
iterable: Iterable[_T], n: int, key: Callable[[_T], _U] | None = ...
|
||||
) -> Iterator[_T]: ...
|
||||
def duplicates_everseen(
|
||||
iterable: Iterable[_T], key: Optional[Callable[[_T], _U]] = ...
|
||||
iterable: Iterable[_T], key: Callable[[_T], _U] | None = ...
|
||||
) -> Iterator[_T]: ...
|
||||
def duplicates_justseen(
|
||||
iterable: Iterable[_T], key: Optional[Callable[[_T], _U]] = ...
|
||||
iterable: Iterable[_T], key: Callable[[_T], _U] | None = ...
|
||||
) -> Iterator[_T]: ...
|
||||
|
||||
class _SupportsLessThan(Protocol):
|
||||
|
@ -629,38 +620,38 @@ _SupportsLessThanT = TypeVar("_SupportsLessThanT", bound=_SupportsLessThan)
|
|||
@overload
|
||||
def minmax(
|
||||
iterable_or_value: Iterable[_SupportsLessThanT], *, key: None = None
|
||||
) -> Tuple[_SupportsLessThanT, _SupportsLessThanT]: ...
|
||||
) -> tuple[_SupportsLessThanT, _SupportsLessThanT]: ...
|
||||
@overload
|
||||
def minmax(
|
||||
iterable_or_value: Iterable[_T], *, key: Callable[[_T], _SupportsLessThan]
|
||||
) -> Tuple[_T, _T]: ...
|
||||
) -> tuple[_T, _T]: ...
|
||||
@overload
|
||||
def minmax(
|
||||
iterable_or_value: Iterable[_SupportsLessThanT],
|
||||
*,
|
||||
key: None = None,
|
||||
default: _U,
|
||||
) -> Union[_U, Tuple[_SupportsLessThanT, _SupportsLessThanT]]: ...
|
||||
) -> _U | tuple[_SupportsLessThanT, _SupportsLessThanT]: ...
|
||||
@overload
|
||||
def minmax(
|
||||
iterable_or_value: Iterable[_T],
|
||||
*,
|
||||
key: Callable[[_T], _SupportsLessThan],
|
||||
default: _U,
|
||||
) -> Union[_U, Tuple[_T, _T]]: ...
|
||||
) -> _U | tuple[_T, _T]: ...
|
||||
@overload
|
||||
def minmax(
|
||||
iterable_or_value: _SupportsLessThanT,
|
||||
__other: _SupportsLessThanT,
|
||||
*others: _SupportsLessThanT,
|
||||
) -> Tuple[_SupportsLessThanT, _SupportsLessThanT]: ...
|
||||
) -> tuple[_SupportsLessThanT, _SupportsLessThanT]: ...
|
||||
@overload
|
||||
def minmax(
|
||||
iterable_or_value: _T,
|
||||
__other: _T,
|
||||
*others: _T,
|
||||
key: Callable[[_T], _SupportsLessThan],
|
||||
) -> Tuple[_T, _T]: ...
|
||||
) -> tuple[_T, _T]: ...
|
||||
def longest_common_prefix(
|
||||
iterables: Iterable[Iterable[_T]],
|
||||
) -> Iterator[_T]: ...
|
||||
|
@ -668,7 +659,8 @@ def iequals(*iterables: Iterable[object]) -> bool: ...
|
|||
def constrained_batches(
|
||||
iterable: Iterable[object],
|
||||
max_size: int,
|
||||
max_count: Optional[int] = ...,
|
||||
max_count: int | None = ...,
|
||||
get_len: Callable[[_T], object] = ...,
|
||||
strict: bool = ...,
|
||||
) -> Iterator[Tuple[_T]]: ...
|
||||
) -> Iterator[tuple[_T]]: ...
|
||||
def gray_product(*iterables: Iterable[_T]) -> Iterator[tuple[_T, ...]]: ...
|
||||
|
|
|
@ -9,6 +9,7 @@ Some backward-compatible usability improvements have been made.
|
|||
"""
|
||||
import math
|
||||
import operator
|
||||
import warnings
|
||||
|
||||
from collections import deque
|
||||
from collections.abc import Sized
|
||||
|
@ -21,12 +22,14 @@ from itertools import (
|
|||
cycle,
|
||||
groupby,
|
||||
islice,
|
||||
product,
|
||||
repeat,
|
||||
starmap,
|
||||
tee,
|
||||
zip_longest,
|
||||
)
|
||||
from random import randrange, sample, choice
|
||||
from sys import hexversion
|
||||
|
||||
__all__ = [
|
||||
'all_equal',
|
||||
|
@ -36,9 +39,12 @@ __all__ = [
|
|||
'convolve',
|
||||
'dotproduct',
|
||||
'first_true',
|
||||
'factor',
|
||||
'flatten',
|
||||
'grouper',
|
||||
'iter_except',
|
||||
'iter_index',
|
||||
'matmul',
|
||||
'ncycles',
|
||||
'nth',
|
||||
'nth_combination',
|
||||
|
@ -62,6 +68,7 @@ __all__ = [
|
|||
'tabulate',
|
||||
'tail',
|
||||
'take',
|
||||
'transpose',
|
||||
'triplewise',
|
||||
'unique_everseen',
|
||||
'unique_justseen',
|
||||
|
@ -808,6 +815,35 @@ def polynomial_from_roots(roots):
|
|||
]
|
||||
|
||||
|
||||
def iter_index(iterable, value, start=0):
|
||||
"""Yield the index of each place in *iterable* that *value* occurs,
|
||||
beginning with index *start*.
|
||||
|
||||
See :func:`locate` for a more general means of finding the indexes
|
||||
associated with particular values.
|
||||
|
||||
>>> list(iter_index('AABCADEAF', 'A'))
|
||||
[0, 1, 4, 7]
|
||||
"""
|
||||
try:
|
||||
seq_index = iterable.index
|
||||
except AttributeError:
|
||||
# Slow path for general iterables
|
||||
it = islice(iterable, start, None)
|
||||
for i, element in enumerate(it, start):
|
||||
if element is value or element == value:
|
||||
yield i
|
||||
else:
|
||||
# Fast path for sequences
|
||||
i = start - 1
|
||||
try:
|
||||
while True:
|
||||
i = seq_index(value, i + 1)
|
||||
yield i
|
||||
except ValueError:
|
||||
pass
|
||||
|
||||
|
||||
def sieve(n):
|
||||
"""Yield the primes less than n.
|
||||
|
||||
|
@ -815,13 +851,13 @@ def sieve(n):
|
|||
[2, 3, 5, 7, 11, 13, 17, 19, 23, 29]
|
||||
"""
|
||||
isqrt = getattr(math, 'isqrt', lambda x: int(math.sqrt(x)))
|
||||
data = bytearray((0, 1)) * (n // 2)
|
||||
data[:3] = 0, 0, 0
|
||||
limit = isqrt(n) + 1
|
||||
data = bytearray([1]) * n
|
||||
data[:2] = 0, 0
|
||||
for p in compress(range(limit), data):
|
||||
data[p + p : n : p] = bytearray(len(range(p + p, n, p)))
|
||||
|
||||
return compress(count(), data)
|
||||
data[p * p : n : p + p] = bytes(len(range(p * p, n, p + p)))
|
||||
data[2] = 1
|
||||
return iter_index(data, 1) if n > 2 else iter([])
|
||||
|
||||
|
||||
def batched(iterable, n):
|
||||
|
@ -833,9 +869,62 @@ def batched(iterable, n):
|
|||
This recipe is from the ``itertools`` docs. This library also provides
|
||||
:func:`chunked`, which has a different implementation.
|
||||
"""
|
||||
if hexversion >= 0x30C00A0: # Python 3.12.0a0
|
||||
warnings.warn(
|
||||
(
|
||||
'batched will be removed in a future version of '
|
||||
'more-itertools. Use the standard library '
|
||||
'itertools.batched function instead'
|
||||
),
|
||||
DeprecationWarning,
|
||||
)
|
||||
|
||||
it = iter(iterable)
|
||||
while True:
|
||||
batch = list(islice(it, n))
|
||||
if not batch:
|
||||
break
|
||||
yield batch
|
||||
|
||||
|
||||
def transpose(it):
|
||||
"""Swap the rows and columns of the input.
|
||||
|
||||
>>> list(transpose([(1, 2, 3), (11, 22, 33)]))
|
||||
[(1, 11), (2, 22), (3, 33)]
|
||||
|
||||
The caller should ensure that the dimensions of the input are compatible.
|
||||
"""
|
||||
# TODO: when 3.9 goes end-of-life, add stric=True to this.
|
||||
return zip(*it)
|
||||
|
||||
|
||||
def matmul(m1, m2):
|
||||
"""Multiply two matrices.
|
||||
>>> list(matmul([(7, 5), (3, 5)], [(2, 5), (7, 9)]))
|
||||
[[49, 80], [41, 60]]
|
||||
|
||||
The caller should ensure that the dimensions of the input matrices are
|
||||
compatible with each other.
|
||||
"""
|
||||
n = len(m2[0])
|
||||
return batched(starmap(dotproduct, product(m1, transpose(m2))), n)
|
||||
|
||||
|
||||
def factor(n):
|
||||
"""Yield the prime factors of n.
|
||||
>>> list(factor(360))
|
||||
[2, 2, 2, 3, 3, 5]
|
||||
"""
|
||||
isqrt = getattr(math, 'isqrt', lambda x: int(math.sqrt(x)))
|
||||
for prime in sieve(isqrt(n) + 1):
|
||||
while True:
|
||||
quotient, remainder = divmod(n, prime)
|
||||
if remainder:
|
||||
break
|
||||
yield prime
|
||||
n = quotient
|
||||
if n == 1:
|
||||
return
|
||||
if n >= 2:
|
||||
yield n
|
||||
|
|
|
@ -1,110 +1,119 @@
|
|||
"""Stubs for more_itertools.recipes"""
|
||||
from __future__ import annotations
|
||||
|
||||
from typing import (
|
||||
Any,
|
||||
Callable,
|
||||
Iterable,
|
||||
Iterator,
|
||||
List,
|
||||
Optional,
|
||||
overload,
|
||||
Sequence,
|
||||
Tuple,
|
||||
Type,
|
||||
TypeVar,
|
||||
Union,
|
||||
)
|
||||
from typing_extensions import overload, Type
|
||||
|
||||
# Type and type variable definitions
|
||||
_T = TypeVar('_T')
|
||||
_U = TypeVar('_U')
|
||||
|
||||
def take(n: int, iterable: Iterable[_T]) -> List[_T]: ...
|
||||
def take(n: int, iterable: Iterable[_T]) -> list[_T]: ...
|
||||
def tabulate(
|
||||
function: Callable[[int], _T], start: int = ...
|
||||
) -> Iterator[_T]: ...
|
||||
def tail(n: int, iterable: Iterable[_T]) -> Iterator[_T]: ...
|
||||
def consume(iterator: Iterable[object], n: Optional[int] = ...) -> None: ...
|
||||
def consume(iterator: Iterable[object], n: int | None = ...) -> None: ...
|
||||
@overload
|
||||
def nth(iterable: Iterable[_T], n: int) -> Optional[_T]: ...
|
||||
def nth(iterable: Iterable[_T], n: int) -> _T | None: ...
|
||||
@overload
|
||||
def nth(iterable: Iterable[_T], n: int, default: _U) -> Union[_T, _U]: ...
|
||||
def nth(iterable: Iterable[_T], n: int, default: _U) -> _T | _U: ...
|
||||
def all_equal(iterable: Iterable[object]) -> bool: ...
|
||||
def quantify(
|
||||
iterable: Iterable[_T], pred: Callable[[_T], bool] = ...
|
||||
) -> int: ...
|
||||
def pad_none(iterable: Iterable[_T]) -> Iterator[Optional[_T]]: ...
|
||||
def padnone(iterable: Iterable[_T]) -> Iterator[Optional[_T]]: ...
|
||||
def pad_none(iterable: Iterable[_T]) -> Iterator[_T | None]: ...
|
||||
def padnone(iterable: Iterable[_T]) -> Iterator[_T | None]: ...
|
||||
def ncycles(iterable: Iterable[_T], n: int) -> Iterator[_T]: ...
|
||||
def dotproduct(vec1: Iterable[object], vec2: Iterable[object]) -> object: ...
|
||||
def flatten(listOfLists: Iterable[Iterable[_T]]) -> Iterator[_T]: ...
|
||||
def repeatfunc(
|
||||
func: Callable[..., _U], times: Optional[int] = ..., *args: Any
|
||||
func: Callable[..., _U], times: int | None = ..., *args: Any
|
||||
) -> Iterator[_U]: ...
|
||||
def pairwise(iterable: Iterable[_T]) -> Iterator[Tuple[_T, _T]]: ...
|
||||
def pairwise(iterable: Iterable[_T]) -> Iterator[tuple[_T, _T]]: ...
|
||||
def grouper(
|
||||
iterable: Iterable[_T],
|
||||
n: int,
|
||||
incomplete: str = ...,
|
||||
fillvalue: _U = ...,
|
||||
) -> Iterator[Tuple[Union[_T, _U], ...]]: ...
|
||||
) -> Iterator[tuple[_T | _U, ...]]: ...
|
||||
def roundrobin(*iterables: Iterable[_T]) -> Iterator[_T]: ...
|
||||
def partition(
|
||||
pred: Optional[Callable[[_T], object]], iterable: Iterable[_T]
|
||||
) -> Tuple[Iterator[_T], Iterator[_T]]: ...
|
||||
def powerset(iterable: Iterable[_T]) -> Iterator[Tuple[_T, ...]]: ...
|
||||
pred: Callable[[_T], object] | None, iterable: Iterable[_T]
|
||||
) -> tuple[Iterator[_T], Iterator[_T]]: ...
|
||||
def powerset(iterable: Iterable[_T]) -> Iterator[tuple[_T, ...]]: ...
|
||||
def unique_everseen(
|
||||
iterable: Iterable[_T], key: Optional[Callable[[_T], _U]] = ...
|
||||
iterable: Iterable[_T], key: Callable[[_T], _U] | None = ...
|
||||
) -> Iterator[_T]: ...
|
||||
def unique_justseen(
|
||||
iterable: Iterable[_T], key: Optional[Callable[[_T], object]] = ...
|
||||
iterable: Iterable[_T], key: Callable[[_T], object] | None = ...
|
||||
) -> Iterator[_T]: ...
|
||||
@overload
|
||||
def iter_except(
|
||||
func: Callable[[], _T],
|
||||
exception: Union[Type[BaseException], Tuple[Type[BaseException], ...]],
|
||||
exception: Type[BaseException] | tuple[Type[BaseException], ...],
|
||||
first: None = ...,
|
||||
) -> Iterator[_T]: ...
|
||||
@overload
|
||||
def iter_except(
|
||||
func: Callable[[], _T],
|
||||
exception: Union[Type[BaseException], Tuple[Type[BaseException], ...]],
|
||||
exception: Type[BaseException] | tuple[Type[BaseException], ...],
|
||||
first: Callable[[], _U],
|
||||
) -> Iterator[Union[_T, _U]]: ...
|
||||
) -> Iterator[_T | _U]: ...
|
||||
@overload
|
||||
def first_true(
|
||||
iterable: Iterable[_T], *, pred: Optional[Callable[[_T], object]] = ...
|
||||
) -> Optional[_T]: ...
|
||||
iterable: Iterable[_T], *, pred: Callable[[_T], object] | None = ...
|
||||
) -> _T | None: ...
|
||||
@overload
|
||||
def first_true(
|
||||
iterable: Iterable[_T],
|
||||
default: _U,
|
||||
pred: Optional[Callable[[_T], object]] = ...,
|
||||
) -> Union[_T, _U]: ...
|
||||
pred: Callable[[_T], object] | None = ...,
|
||||
) -> _T | _U: ...
|
||||
def random_product(
|
||||
*args: Iterable[_T], repeat: int = ...
|
||||
) -> Tuple[_T, ...]: ...
|
||||
) -> tuple[_T, ...]: ...
|
||||
def random_permutation(
|
||||
iterable: Iterable[_T], r: Optional[int] = ...
|
||||
) -> Tuple[_T, ...]: ...
|
||||
def random_combination(iterable: Iterable[_T], r: int) -> Tuple[_T, ...]: ...
|
||||
iterable: Iterable[_T], r: int | None = ...
|
||||
) -> tuple[_T, ...]: ...
|
||||
def random_combination(iterable: Iterable[_T], r: int) -> tuple[_T, ...]: ...
|
||||
def random_combination_with_replacement(
|
||||
iterable: Iterable[_T], r: int
|
||||
) -> Tuple[_T, ...]: ...
|
||||
) -> tuple[_T, ...]: ...
|
||||
def nth_combination(
|
||||
iterable: Iterable[_T], r: int, index: int
|
||||
) -> Tuple[_T, ...]: ...
|
||||
def prepend(value: _T, iterator: Iterable[_U]) -> Iterator[Union[_T, _U]]: ...
|
||||
) -> tuple[_T, ...]: ...
|
||||
def prepend(value: _T, iterator: Iterable[_U]) -> Iterator[_T | _U]: ...
|
||||
def convolve(signal: Iterable[_T], kernel: Iterable[_T]) -> Iterator[_T]: ...
|
||||
def before_and_after(
|
||||
predicate: Callable[[_T], bool], it: Iterable[_T]
|
||||
) -> Tuple[Iterator[_T], Iterator[_T]]: ...
|
||||
def triplewise(iterable: Iterable[_T]) -> Iterator[Tuple[_T, _T, _T]]: ...
|
||||
) -> tuple[Iterator[_T], Iterator[_T]]: ...
|
||||
def triplewise(iterable: Iterable[_T]) -> Iterator[tuple[_T, _T, _T]]: ...
|
||||
def sliding_window(
|
||||
iterable: Iterable[_T], n: int
|
||||
) -> Iterator[Tuple[_T, ...]]: ...
|
||||
def subslices(iterable: Iterable[_T]) -> Iterator[List[_T]]: ...
|
||||
def polynomial_from_roots(roots: Sequence[int]) -> List[int]: ...
|
||||
) -> Iterator[tuple[_T, ...]]: ...
|
||||
def subslices(iterable: Iterable[_T]) -> Iterator[list[_T]]: ...
|
||||
def polynomial_from_roots(roots: Sequence[int]) -> list[int]: ...
|
||||
def iter_index(
|
||||
iterable: Iterable[object],
|
||||
value: Any,
|
||||
start: int | None = ...,
|
||||
) -> Iterator[int]: ...
|
||||
def sieve(n: int) -> Iterator[int]: ...
|
||||
def batched(
|
||||
iterable: Iterable[_T],
|
||||
n: int,
|
||||
) -> Iterator[List[_T]]: ...
|
||||
) -> Iterator[list[_T]]: ...
|
||||
def transpose(
|
||||
it: Iterable[Iterable[_T]],
|
||||
) -> tuple[Iterator[_T], ...]: ...
|
||||
def matmul(m1: Sequence[_T], m2: Sequence[_T]) -> Iterator[list[_T]]: ...
|
||||
def factor(n: int) -> Iterator[int]: ...
|
||||
|
|
|
@ -6,7 +6,7 @@ __title__ = "packaging"
|
|||
__summary__ = "Core utilities for Python packages"
|
||||
__uri__ = "https://github.com/pypa/packaging"
|
||||
|
||||
__version__ = "23.0"
|
||||
__version__ = "23.1"
|
||||
|
||||
__author__ = "Donald Stufft and individual contributors"
|
||||
__email__ = "donald@stufft.io"
|
||||
|
|
|
@ -14,6 +14,8 @@ EF_ARM_ABI_VER5 = 0x05000000
|
|||
EF_ARM_ABI_FLOAT_HARD = 0x00000400
|
||||
|
||||
|
||||
# `os.PathLike` not a generic type until Python 3.9, so sticking with `str`
|
||||
# as the type for `path` until then.
|
||||
@contextlib.contextmanager
|
||||
def _parse_elf(path: str) -> Generator[Optional[ELFFile], None, None]:
|
||||
try:
|
||||
|
|
|
@ -163,7 +163,11 @@ def _parse_extras(tokenizer: Tokenizer) -> List[str]:
|
|||
if not tokenizer.check("LEFT_BRACKET", peek=True):
|
||||
return []
|
||||
|
||||
with tokenizer.enclosing_tokens("LEFT_BRACKET", "RIGHT_BRACKET"):
|
||||
with tokenizer.enclosing_tokens(
|
||||
"LEFT_BRACKET",
|
||||
"RIGHT_BRACKET",
|
||||
around="extras",
|
||||
):
|
||||
tokenizer.consume("WS")
|
||||
extras = _parse_extras_list(tokenizer)
|
||||
tokenizer.consume("WS")
|
||||
|
@ -203,7 +207,11 @@ def _parse_specifier(tokenizer: Tokenizer) -> str:
|
|||
specifier = LEFT_PARENTHESIS WS? version_many WS? RIGHT_PARENTHESIS
|
||||
| WS? version_many WS?
|
||||
"""
|
||||
with tokenizer.enclosing_tokens("LEFT_PARENTHESIS", "RIGHT_PARENTHESIS"):
|
||||
with tokenizer.enclosing_tokens(
|
||||
"LEFT_PARENTHESIS",
|
||||
"RIGHT_PARENTHESIS",
|
||||
around="version specifier",
|
||||
):
|
||||
tokenizer.consume("WS")
|
||||
parsed_specifiers = _parse_version_many(tokenizer)
|
||||
tokenizer.consume("WS")
|
||||
|
@ -217,7 +225,20 @@ def _parse_version_many(tokenizer: Tokenizer) -> str:
|
|||
"""
|
||||
parsed_specifiers = ""
|
||||
while tokenizer.check("SPECIFIER"):
|
||||
span_start = tokenizer.position
|
||||
parsed_specifiers += tokenizer.read().text
|
||||
if tokenizer.check("VERSION_PREFIX_TRAIL", peek=True):
|
||||
tokenizer.raise_syntax_error(
|
||||
".* suffix can only be used with `==` or `!=` operators",
|
||||
span_start=span_start,
|
||||
span_end=tokenizer.position + 1,
|
||||
)
|
||||
if tokenizer.check("VERSION_LOCAL_LABEL_TRAIL", peek=True):
|
||||
tokenizer.raise_syntax_error(
|
||||
"Local version label can only be used with `==` or `!=` operators",
|
||||
span_start=span_start,
|
||||
span_end=tokenizer.position,
|
||||
)
|
||||
tokenizer.consume("WS")
|
||||
if not tokenizer.check("COMMA"):
|
||||
break
|
||||
|
@ -254,7 +275,11 @@ def _parse_marker_atom(tokenizer: Tokenizer) -> MarkerAtom:
|
|||
|
||||
tokenizer.consume("WS")
|
||||
if tokenizer.check("LEFT_PARENTHESIS", peek=True):
|
||||
with tokenizer.enclosing_tokens("LEFT_PARENTHESIS", "RIGHT_PARENTHESIS"):
|
||||
with tokenizer.enclosing_tokens(
|
||||
"LEFT_PARENTHESIS",
|
||||
"RIGHT_PARENTHESIS",
|
||||
around="marker expression",
|
||||
):
|
||||
tokenizer.consume("WS")
|
||||
marker: MarkerAtom = _parse_marker(tokenizer)
|
||||
tokenizer.consume("WS")
|
||||
|
|
|
@ -78,6 +78,8 @@ DEFAULT_RULES: "Dict[str, Union[str, re.Pattern[str]]]" = {
|
|||
"AT": r"\@",
|
||||
"URL": r"[^ \t]+",
|
||||
"IDENTIFIER": r"\b[a-zA-Z0-9][a-zA-Z0-9._-]*\b",
|
||||
"VERSION_PREFIX_TRAIL": r"\.\*",
|
||||
"VERSION_LOCAL_LABEL_TRAIL": r"\+[a-z0-9]+(?:[-_\.][a-z0-9]+)*",
|
||||
"WS": r"[ \t]+",
|
||||
"END": r"$",
|
||||
}
|
||||
|
@ -167,21 +169,23 @@ class Tokenizer:
|
|||
)
|
||||
|
||||
@contextlib.contextmanager
|
||||
def enclosing_tokens(self, open_token: str, close_token: str) -> Iterator[bool]:
|
||||
def enclosing_tokens(
|
||||
self, open_token: str, close_token: str, *, around: str
|
||||
) -> Iterator[None]:
|
||||
if self.check(open_token):
|
||||
open_position = self.position
|
||||
self.read()
|
||||
else:
|
||||
open_position = None
|
||||
|
||||
yield open_position is not None
|
||||
yield
|
||||
|
||||
if open_position is None:
|
||||
return
|
||||
|
||||
if not self.check(close_token):
|
||||
self.raise_syntax_error(
|
||||
f"Expected closing {close_token}",
|
||||
f"Expected matching {close_token} for {open_token}, after {around}",
|
||||
span_start=open_position,
|
||||
)
|
||||
|
||||
|
|
|
@ -8,7 +8,14 @@ import platform
|
|||
import sys
|
||||
from typing import Any, Callable, Dict, List, Optional, Tuple, Union
|
||||
|
||||
from ._parser import MarkerAtom, MarkerList, Op, Value, Variable, parse_marker
|
||||
from ._parser import (
|
||||
MarkerAtom,
|
||||
MarkerList,
|
||||
Op,
|
||||
Value,
|
||||
Variable,
|
||||
parse_marker as _parse_marker,
|
||||
)
|
||||
from ._tokenizer import ParserSyntaxError
|
||||
from .specifiers import InvalidSpecifier, Specifier
|
||||
from .utils import canonicalize_name
|
||||
|
@ -189,7 +196,7 @@ class Marker:
|
|||
# packaging.requirements.Requirement. If any additional logic is
|
||||
# added here, make sure to mirror/adapt Requirement.
|
||||
try:
|
||||
self._markers = _normalize_extra_values(parse_marker(marker))
|
||||
self._markers = _normalize_extra_values(_parse_marker(marker))
|
||||
# The attribute `_markers` can be described in terms of a recursive type:
|
||||
# MarkerList = List[Union[Tuple[Node, ...], str, MarkerList]]
|
||||
#
|
||||
|
|
408
lib/pkg_resources/_vendor/packaging/metadata.py
Normal file
408
lib/pkg_resources/_vendor/packaging/metadata.py
Normal file
|
@ -0,0 +1,408 @@
|
|||
import email.feedparser
|
||||
import email.header
|
||||
import email.message
|
||||
import email.parser
|
||||
import email.policy
|
||||
import sys
|
||||
import typing
|
||||
from typing import Dict, List, Optional, Tuple, Union, cast
|
||||
|
||||
if sys.version_info >= (3, 8): # pragma: no cover
|
||||
from typing import TypedDict
|
||||
else: # pragma: no cover
|
||||
if typing.TYPE_CHECKING:
|
||||
from typing_extensions import TypedDict
|
||||
else:
|
||||
try:
|
||||
from typing_extensions import TypedDict
|
||||
except ImportError:
|
||||
|
||||
class TypedDict:
|
||||
def __init_subclass__(*_args, **_kwargs):
|
||||
pass
|
||||
|
||||
|
||||
# The RawMetadata class attempts to make as few assumptions about the underlying
|
||||
# serialization formats as possible. The idea is that as long as a serialization
|
||||
# formats offer some very basic primitives in *some* way then we can support
|
||||
# serializing to and from that format.
|
||||
class RawMetadata(TypedDict, total=False):
|
||||
"""A dictionary of raw core metadata.
|
||||
|
||||
Each field in core metadata maps to a key of this dictionary (when data is
|
||||
provided). The key is lower-case and underscores are used instead of dashes
|
||||
compared to the equivalent core metadata field. Any core metadata field that
|
||||
can be specified multiple times or can hold multiple values in a single
|
||||
field have a key with a plural name.
|
||||
|
||||
Core metadata fields that can be specified multiple times are stored as a
|
||||
list or dict depending on which is appropriate for the field. Any fields
|
||||
which hold multiple values in a single field are stored as a list.
|
||||
|
||||
"""
|
||||
|
||||
# Metadata 1.0 - PEP 241
|
||||
metadata_version: str
|
||||
name: str
|
||||
version: str
|
||||
platforms: List[str]
|
||||
summary: str
|
||||
description: str
|
||||
keywords: List[str]
|
||||
home_page: str
|
||||
author: str
|
||||
author_email: str
|
||||
license: str
|
||||
|
||||
# Metadata 1.1 - PEP 314
|
||||
supported_platforms: List[str]
|
||||
download_url: str
|
||||
classifiers: List[str]
|
||||
requires: List[str]
|
||||
provides: List[str]
|
||||
obsoletes: List[str]
|
||||
|
||||
# Metadata 1.2 - PEP 345
|
||||
maintainer: str
|
||||
maintainer_email: str
|
||||
requires_dist: List[str]
|
||||
provides_dist: List[str]
|
||||
obsoletes_dist: List[str]
|
||||
requires_python: str
|
||||
requires_external: List[str]
|
||||
project_urls: Dict[str, str]
|
||||
|
||||
# Metadata 2.0
|
||||
# PEP 426 attempted to completely revamp the metadata format
|
||||
# but got stuck without ever being able to build consensus on
|
||||
# it and ultimately ended up withdrawn.
|
||||
#
|
||||
# However, a number of tools had started emiting METADATA with
|
||||
# `2.0` Metadata-Version, so for historical reasons, this version
|
||||
# was skipped.
|
||||
|
||||
# Metadata 2.1 - PEP 566
|
||||
description_content_type: str
|
||||
provides_extra: List[str]
|
||||
|
||||
# Metadata 2.2 - PEP 643
|
||||
dynamic: List[str]
|
||||
|
||||
# Metadata 2.3 - PEP 685
|
||||
# No new fields were added in PEP 685, just some edge case were
|
||||
# tightened up to provide better interoptability.
|
||||
|
||||
|
||||
_STRING_FIELDS = {
|
||||
"author",
|
||||
"author_email",
|
||||
"description",
|
||||
"description_content_type",
|
||||
"download_url",
|
||||
"home_page",
|
||||
"license",
|
||||
"maintainer",
|
||||
"maintainer_email",
|
||||
"metadata_version",
|
||||
"name",
|
||||
"requires_python",
|
||||
"summary",
|
||||
"version",
|
||||
}
|
||||
|
||||
_LIST_STRING_FIELDS = {
|
||||
"classifiers",
|
||||
"dynamic",
|
||||
"obsoletes",
|
||||
"obsoletes_dist",
|
||||
"platforms",
|
||||
"provides",
|
||||
"provides_dist",
|
||||
"provides_extra",
|
||||
"requires",
|
||||
"requires_dist",
|
||||
"requires_external",
|
||||
"supported_platforms",
|
||||
}
|
||||
|
||||
|
||||
def _parse_keywords(data: str) -> List[str]:
|
||||
"""Split a string of comma-separate keyboards into a list of keywords."""
|
||||
return [k.strip() for k in data.split(",")]
|
||||
|
||||
|
||||
def _parse_project_urls(data: List[str]) -> Dict[str, str]:
|
||||
"""Parse a list of label/URL string pairings separated by a comma."""
|
||||
urls = {}
|
||||
for pair in data:
|
||||
# Our logic is slightly tricky here as we want to try and do
|
||||
# *something* reasonable with malformed data.
|
||||
#
|
||||
# The main thing that we have to worry about, is data that does
|
||||
# not have a ',' at all to split the label from the Value. There
|
||||
# isn't a singular right answer here, and we will fail validation
|
||||
# later on (if the caller is validating) so it doesn't *really*
|
||||
# matter, but since the missing value has to be an empty str
|
||||
# and our return value is dict[str, str], if we let the key
|
||||
# be the missing value, then they'd have multiple '' values that
|
||||
# overwrite each other in a accumulating dict.
|
||||
#
|
||||
# The other potentional issue is that it's possible to have the
|
||||
# same label multiple times in the metadata, with no solid "right"
|
||||
# answer with what to do in that case. As such, we'll do the only
|
||||
# thing we can, which is treat the field as unparseable and add it
|
||||
# to our list of unparsed fields.
|
||||
parts = [p.strip() for p in pair.split(",", 1)]
|
||||
parts.extend([""] * (max(0, 2 - len(parts)))) # Ensure 2 items
|
||||
|
||||
# TODO: The spec doesn't say anything about if the keys should be
|
||||
# considered case sensitive or not... logically they should
|
||||
# be case-preserving and case-insensitive, but doing that
|
||||
# would open up more cases where we might have duplicate
|
||||
# entries.
|
||||
label, url = parts
|
||||
if label in urls:
|
||||
# The label already exists in our set of urls, so this field
|
||||
# is unparseable, and we can just add the whole thing to our
|
||||
# unparseable data and stop processing it.
|
||||
raise KeyError("duplicate labels in project urls")
|
||||
urls[label] = url
|
||||
|
||||
return urls
|
||||
|
||||
|
||||
def _get_payload(msg: email.message.Message, source: Union[bytes, str]) -> str:
|
||||
"""Get the body of the message."""
|
||||
# If our source is a str, then our caller has managed encodings for us,
|
||||
# and we don't need to deal with it.
|
||||
if isinstance(source, str):
|
||||
payload: str = msg.get_payload()
|
||||
return payload
|
||||
# If our source is a bytes, then we're managing the encoding and we need
|
||||
# to deal with it.
|
||||
else:
|
||||
bpayload: bytes = msg.get_payload(decode=True)
|
||||
try:
|
||||
return bpayload.decode("utf8", "strict")
|
||||
except UnicodeDecodeError:
|
||||
raise ValueError("payload in an invalid encoding")
|
||||
|
||||
|
||||
# The various parse_FORMAT functions here are intended to be as lenient as
|
||||
# possible in their parsing, while still returning a correctly typed
|
||||
# RawMetadata.
|
||||
#
|
||||
# To aid in this, we also generally want to do as little touching of the
|
||||
# data as possible, except where there are possibly some historic holdovers
|
||||
# that make valid data awkward to work with.
|
||||
#
|
||||
# While this is a lower level, intermediate format than our ``Metadata``
|
||||
# class, some light touch ups can make a massive difference in usability.
|
||||
|
||||
# Map METADATA fields to RawMetadata.
|
||||
_EMAIL_TO_RAW_MAPPING = {
|
||||
"author": "author",
|
||||
"author-email": "author_email",
|
||||
"classifier": "classifiers",
|
||||
"description": "description",
|
||||
"description-content-type": "description_content_type",
|
||||
"download-url": "download_url",
|
||||
"dynamic": "dynamic",
|
||||
"home-page": "home_page",
|
||||
"keywords": "keywords",
|
||||
"license": "license",
|
||||
"maintainer": "maintainer",
|
||||
"maintainer-email": "maintainer_email",
|
||||
"metadata-version": "metadata_version",
|
||||
"name": "name",
|
||||
"obsoletes": "obsoletes",
|
||||
"obsoletes-dist": "obsoletes_dist",
|
||||
"platform": "platforms",
|
||||
"project-url": "project_urls",
|
||||
"provides": "provides",
|
||||
"provides-dist": "provides_dist",
|
||||
"provides-extra": "provides_extra",
|
||||
"requires": "requires",
|
||||
"requires-dist": "requires_dist",
|
||||
"requires-external": "requires_external",
|
||||
"requires-python": "requires_python",
|
||||
"summary": "summary",
|
||||
"supported-platform": "supported_platforms",
|
||||
"version": "version",
|
||||
}
|
||||
|
||||
|
||||
def parse_email(data: Union[bytes, str]) -> Tuple[RawMetadata, Dict[str, List[str]]]:
|
||||
"""Parse a distribution's metadata.
|
||||
|
||||
This function returns a two-item tuple of dicts. The first dict is of
|
||||
recognized fields from the core metadata specification. Fields that can be
|
||||
parsed and translated into Python's built-in types are converted
|
||||
appropriately. All other fields are left as-is. Fields that are allowed to
|
||||
appear multiple times are stored as lists.
|
||||
|
||||
The second dict contains all other fields from the metadata. This includes
|
||||
any unrecognized fields. It also includes any fields which are expected to
|
||||
be parsed into a built-in type but were not formatted appropriately. Finally,
|
||||
any fields that are expected to appear only once but are repeated are
|
||||
included in this dict.
|
||||
|
||||
"""
|
||||
raw: Dict[str, Union[str, List[str], Dict[str, str]]] = {}
|
||||
unparsed: Dict[str, List[str]] = {}
|
||||
|
||||
if isinstance(data, str):
|
||||
parsed = email.parser.Parser(policy=email.policy.compat32).parsestr(data)
|
||||
else:
|
||||
parsed = email.parser.BytesParser(policy=email.policy.compat32).parsebytes(data)
|
||||
|
||||
# We have to wrap parsed.keys() in a set, because in the case of multiple
|
||||
# values for a key (a list), the key will appear multiple times in the
|
||||
# list of keys, but we're avoiding that by using get_all().
|
||||
for name in frozenset(parsed.keys()):
|
||||
# Header names in RFC are case insensitive, so we'll normalize to all
|
||||
# lower case to make comparisons easier.
|
||||
name = name.lower()
|
||||
|
||||
# We use get_all() here, even for fields that aren't multiple use,
|
||||
# because otherwise someone could have e.g. two Name fields, and we
|
||||
# would just silently ignore it rather than doing something about it.
|
||||
headers = parsed.get_all(name)
|
||||
|
||||
# The way the email module works when parsing bytes is that it
|
||||
# unconditionally decodes the bytes as ascii using the surrogateescape
|
||||
# handler. When you pull that data back out (such as with get_all() ),
|
||||
# it looks to see if the str has any surrogate escapes, and if it does
|
||||
# it wraps it in a Header object instead of returning the string.
|
||||
#
|
||||
# As such, we'll look for those Header objects, and fix up the encoding.
|
||||
value = []
|
||||
# Flag if we have run into any issues processing the headers, thus
|
||||
# signalling that the data belongs in 'unparsed'.
|
||||
valid_encoding = True
|
||||
for h in headers:
|
||||
# It's unclear if this can return more types than just a Header or
|
||||
# a str, so we'll just assert here to make sure.
|
||||
assert isinstance(h, (email.header.Header, str))
|
||||
|
||||
# If it's a header object, we need to do our little dance to get
|
||||
# the real data out of it. In cases where there is invalid data
|
||||
# we're going to end up with mojibake, but there's no obvious, good
|
||||
# way around that without reimplementing parts of the Header object
|
||||
# ourselves.
|
||||
#
|
||||
# That should be fine since, if mojibacked happens, this key is
|
||||
# going into the unparsed dict anyways.
|
||||
if isinstance(h, email.header.Header):
|
||||
# The Header object stores it's data as chunks, and each chunk
|
||||
# can be independently encoded, so we'll need to check each
|
||||
# of them.
|
||||
chunks: List[Tuple[bytes, Optional[str]]] = []
|
||||
for bin, encoding in email.header.decode_header(h):
|
||||
try:
|
||||
bin.decode("utf8", "strict")
|
||||
except UnicodeDecodeError:
|
||||
# Enable mojibake.
|
||||
encoding = "latin1"
|
||||
valid_encoding = False
|
||||
else:
|
||||
encoding = "utf8"
|
||||
chunks.append((bin, encoding))
|
||||
|
||||
# Turn our chunks back into a Header object, then let that
|
||||
# Header object do the right thing to turn them into a
|
||||
# string for us.
|
||||
value.append(str(email.header.make_header(chunks)))
|
||||
# This is already a string, so just add it.
|
||||
else:
|
||||
value.append(h)
|
||||
|
||||
# We've processed all of our values to get them into a list of str,
|
||||
# but we may have mojibake data, in which case this is an unparsed
|
||||
# field.
|
||||
if not valid_encoding:
|
||||
unparsed[name] = value
|
||||
continue
|
||||
|
||||
raw_name = _EMAIL_TO_RAW_MAPPING.get(name)
|
||||
if raw_name is None:
|
||||
# This is a bit of a weird situation, we've encountered a key that
|
||||
# we don't know what it means, so we don't know whether it's meant
|
||||
# to be a list or not.
|
||||
#
|
||||
# Since we can't really tell one way or another, we'll just leave it
|
||||
# as a list, even though it may be a single item list, because that's
|
||||
# what makes the most sense for email headers.
|
||||
unparsed[name] = value
|
||||
continue
|
||||
|
||||
# If this is one of our string fields, then we'll check to see if our
|
||||
# value is a list of a single item. If it is then we'll assume that
|
||||
# it was emitted as a single string, and unwrap the str from inside
|
||||
# the list.
|
||||
#
|
||||
# If it's any other kind of data, then we haven't the faintest clue
|
||||
# what we should parse it as, and we have to just add it to our list
|
||||
# of unparsed stuff.
|
||||
if raw_name in _STRING_FIELDS and len(value) == 1:
|
||||
raw[raw_name] = value[0]
|
||||
# If this is one of our list of string fields, then we can just assign
|
||||
# the value, since email *only* has strings, and our get_all() call
|
||||
# above ensures that this is a list.
|
||||
elif raw_name in _LIST_STRING_FIELDS:
|
||||
raw[raw_name] = value
|
||||
# Special Case: Keywords
|
||||
# The keywords field is implemented in the metadata spec as a str,
|
||||
# but it conceptually is a list of strings, and is serialized using
|
||||
# ", ".join(keywords), so we'll do some light data massaging to turn
|
||||
# this into what it logically is.
|
||||
elif raw_name == "keywords" and len(value) == 1:
|
||||
raw[raw_name] = _parse_keywords(value[0])
|
||||
# Special Case: Project-URL
|
||||
# The project urls is implemented in the metadata spec as a list of
|
||||
# specially-formatted strings that represent a key and a value, which
|
||||
# is fundamentally a mapping, however the email format doesn't support
|
||||
# mappings in a sane way, so it was crammed into a list of strings
|
||||
# instead.
|
||||
#
|
||||
# We will do a little light data massaging to turn this into a map as
|
||||
# it logically should be.
|
||||
elif raw_name == "project_urls":
|
||||
try:
|
||||
raw[raw_name] = _parse_project_urls(value)
|
||||
except KeyError:
|
||||
unparsed[name] = value
|
||||
# Nothing that we've done has managed to parse this, so it'll just
|
||||
# throw it in our unparseable data and move on.
|
||||
else:
|
||||
unparsed[name] = value
|
||||
|
||||
# We need to support getting the Description from the message payload in
|
||||
# addition to getting it from the the headers. This does mean, though, there
|
||||
# is the possibility of it being set both ways, in which case we put both
|
||||
# in 'unparsed' since we don't know which is right.
|
||||
try:
|
||||
payload = _get_payload(parsed, data)
|
||||
except ValueError:
|
||||
unparsed.setdefault("description", []).append(
|
||||
parsed.get_payload(decode=isinstance(data, bytes))
|
||||
)
|
||||
else:
|
||||
if payload:
|
||||
# Check to see if we've already got a description, if so then both
|
||||
# it, and this body move to unparseable.
|
||||
if "description" in raw:
|
||||
description_header = cast(str, raw.pop("description"))
|
||||
unparsed.setdefault("description", []).extend(
|
||||
[description_header, payload]
|
||||
)
|
||||
elif "description" in unparsed:
|
||||
unparsed["description"].append(payload)
|
||||
else:
|
||||
raw["description"] = payload
|
||||
|
||||
# We need to cast our `raw` to a metadata, because a TypedDict only support
|
||||
# literal key names, but we're computing our key names on purpose, but the
|
||||
# way this function is implemented, our `TypedDict` can only have valid key
|
||||
# names.
|
||||
return cast(RawMetadata, raw), unparsed
|
|
@ -5,7 +5,7 @@
|
|||
import urllib.parse
|
||||
from typing import Any, List, Optional, Set
|
||||
|
||||
from ._parser import parse_requirement
|
||||
from ._parser import parse_requirement as _parse_requirement
|
||||
from ._tokenizer import ParserSyntaxError
|
||||
from .markers import Marker, _normalize_extra_values
|
||||
from .specifiers import SpecifierSet
|
||||
|
@ -32,7 +32,7 @@ class Requirement:
|
|||
|
||||
def __init__(self, requirement_string: str) -> None:
|
||||
try:
|
||||
parsed = parse_requirement(requirement_string)
|
||||
parsed = _parse_requirement(requirement_string)
|
||||
except ParserSyntaxError as e:
|
||||
raise InvalidRequirement(str(e)) from e
|
||||
|
||||
|
|
|
@ -252,7 +252,8 @@ class Specifier(BaseSpecifier):
|
|||
# Store whether or not this Specifier should accept prereleases
|
||||
self._prereleases = prereleases
|
||||
|
||||
@property
|
||||
# https://github.com/python/mypy/pull/13475#pullrequestreview-1079784515
|
||||
@property # type: ignore[override]
|
||||
def prereleases(self) -> bool:
|
||||
# If there is an explicit prereleases set for this, then we'll just
|
||||
# blindly use that.
|
||||
|
@ -398,7 +399,9 @@ class Specifier(BaseSpecifier):
|
|||
# We need special logic to handle prefix matching
|
||||
if spec.endswith(".*"):
|
||||
# In the case of prefix matching we want to ignore local segment.
|
||||
normalized_prospective = canonicalize_version(prospective.public)
|
||||
normalized_prospective = canonicalize_version(
|
||||
prospective.public, strip_trailing_zero=False
|
||||
)
|
||||
# Get the normalized version string ignoring the trailing .*
|
||||
normalized_spec = canonicalize_version(spec[:-2], strip_trailing_zero=False)
|
||||
# Split the spec out by dots, and pretend that there is an implicit
|
||||
|
|
|
@ -111,7 +111,7 @@ def parse_tag(tag: str) -> FrozenSet[Tag]:
|
|||
|
||||
|
||||
def _get_config_var(name: str, warn: bool = False) -> Union[int, str, None]:
|
||||
value = sysconfig.get_config_var(name)
|
||||
value: Union[int, str, None] = sysconfig.get_config_var(name)
|
||||
if value is None and warn:
|
||||
logger.debug(
|
||||
"Config variable '%s' is unset, Python ABI tag may be incorrect", name
|
||||
|
@ -120,7 +120,7 @@ def _get_config_var(name: str, warn: bool = False) -> Union[int, str, None]:
|
|||
|
||||
|
||||
def _normalize_string(string: str) -> str:
|
||||
return string.replace(".", "_").replace("-", "_")
|
||||
return string.replace(".", "_").replace("-", "_").replace(" ", "_")
|
||||
|
||||
|
||||
def _abi3_applies(python_version: PythonVersion) -> bool:
|
||||
|
|
|
@ -10,7 +10,7 @@
|
|||
import collections
|
||||
import itertools
|
||||
import re
|
||||
from typing import Callable, Optional, SupportsInt, Tuple, Union
|
||||
from typing import Any, Callable, Optional, SupportsInt, Tuple, Union
|
||||
|
||||
from ._structures import Infinity, InfinityType, NegativeInfinity, NegativeInfinityType
|
||||
|
||||
|
@ -63,7 +63,7 @@ class InvalidVersion(ValueError):
|
|||
|
||||
|
||||
class _BaseVersion:
|
||||
_key: CmpKey
|
||||
_key: Tuple[Any, ...]
|
||||
|
||||
def __hash__(self) -> int:
|
||||
return hash(self._key)
|
||||
|
@ -179,6 +179,7 @@ class Version(_BaseVersion):
|
|||
"""
|
||||
|
||||
_regex = re.compile(r"^\s*" + VERSION_PATTERN + r"\s*$", re.VERBOSE | re.IGNORECASE)
|
||||
_key: CmpKey
|
||||
|
||||
def __init__(self, version: str) -> None:
|
||||
"""Initialize a Version object.
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
packaging==23.0
|
||||
packaging==23.1
|
||||
|
||||
platformdirs==2.6.2
|
||||
# required for platformdirs on Python < 3.8
|
||||
|
|
Loading…
Reference in a new issue