Skip to content

Commit

Permalink
chore: update vendored packages (#655)
Browse files Browse the repository at this point in the history
  • Loading branch information
radoering authored Oct 30, 2023
1 parent 5462ec4 commit 0d60109
Show file tree
Hide file tree
Showing 36 changed files with 1,018 additions and 380 deletions.
3 changes: 2 additions & 1 deletion src/poetry/core/_vendor/fastjsonschema/ref_resolver.py
Original file line number Diff line number Diff line change
Expand Up @@ -11,7 +11,6 @@
import re
from urllib import parse as urlparse
from urllib.parse import unquote
from urllib.request import urlopen

from .exceptions import JsonSchemaDefinitionException

Expand Down Expand Up @@ -59,6 +58,8 @@ def resolve_remote(uri, handlers):
if scheme in handlers:
result = handlers[scheme](uri)
else:
from urllib.request import urlopen

req = urlopen(uri)
encoding = req.info().get_content_charset() or 'utf-8'
try:
Expand Down
2 changes: 1 addition & 1 deletion src/poetry/core/_vendor/fastjsonschema/version.py
Original file line number Diff line number Diff line change
@@ -1 +1 @@
VERSION = '2.18.0'
VERSION = '2.18.1'
2 changes: 1 addition & 1 deletion src/poetry/core/_vendor/lark/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,7 @@
from .utils import logger
from .visitors import Discard, Transformer, Transformer_NonRecursive, Visitor, v_args

__version__: str = "1.1.7"
__version__: str = "1.1.8"

__all__ = (
"GrammarError",
Expand Down
2 changes: 1 addition & 1 deletion src/poetry/core/_vendor/lark/ast_utils.py
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
"""
Module of utilities for transforming a lark.Tree into a custom Abstract Syntax Tree
Module of utilities for transforming a lark.Tree into a custom Abstract Syntax Tree (AST defined in classes)
"""

import inspect, re
Expand Down
20 changes: 12 additions & 8 deletions src/poetry/core/_vendor/lark/common.py
Original file line number Diff line number Diff line change
@@ -1,11 +1,12 @@
from copy import deepcopy
import sys
from types import ModuleType
from typing import Callable, Collection, Dict, Optional, TYPE_CHECKING
from typing import Callable, Collection, Dict, Optional, TYPE_CHECKING, List

if TYPE_CHECKING:
from .lark import PostLex
from .lexer import Lexer
from .grammar import Rule
from typing import Union, Type
if sys.version_info >= (3, 8):
from typing import Literal
Expand All @@ -23,7 +24,8 @@

_ParserArgType: 'TypeAlias' = 'Literal["earley", "lalr", "cyk", "auto"]'
_LexerArgType: 'TypeAlias' = 'Union[Literal["auto", "basic", "contextual", "dynamic", "dynamic_complete"], Type[Lexer]]'
_Callback = Callable[[Token], Token]
_LexerCallback = Callable[[Token], Token]
ParserCallbacks = Dict[str, Callable]

class LexerConf(Serialize):
__serialize_fields__ = 'terminals', 'ignore', 'g_regex_flags', 'use_bytes', 'lexer_type'
Expand All @@ -33,15 +35,15 @@ class LexerConf(Serialize):
re_module: ModuleType
ignore: Collection[str]
postlex: 'Optional[PostLex]'
callbacks: Dict[str, _Callback]
callbacks: Dict[str, _LexerCallback]
g_regex_flags: int
skip_validation: bool
use_bytes: bool
lexer_type: Optional[_LexerArgType]
strict: bool

def __init__(self, terminals: Collection[TerminalDef], re_module: ModuleType, ignore: Collection[str]=(), postlex: 'Optional[PostLex]'=None,
callbacks: Optional[Dict[str, _Callback]]=None, g_regex_flags: int=0, skip_validation: bool=False, use_bytes: bool=False, strict: bool=False):
callbacks: Optional[Dict[str, _LexerCallback]]=None, g_regex_flags: int=0, skip_validation: bool=False, use_bytes: bool=False, strict: bool=False):
self.terminals = terminals
self.terminals_by_name = {t.name: t for t in self.terminals}
assert len(self.terminals) == len(self.terminals_by_name)
Expand Down Expand Up @@ -70,16 +72,18 @@ def __deepcopy__(self, memo=None):
deepcopy(self.use_bytes, memo),
)


class ParserConf(Serialize):
__serialize_fields__ = 'rules', 'start', 'parser_type'

def __init__(self, rules, callbacks, start):
rules: List['Rule']
callbacks: ParserCallbacks
start: List[str]
parser_type: _ParserArgType

def __init__(self, rules: List['Rule'], callbacks: ParserCallbacks, start: List[str]):
assert isinstance(start, list)
self.rules = rules
self.callbacks = callbacks
self.start = start

self.parser_type = None

###}
2 changes: 1 addition & 1 deletion src/poetry/core/_vendor/lark/exceptions.py
Original file line number Diff line number Diff line change
Expand Up @@ -50,6 +50,7 @@ class UnexpectedInput(LarkError):
pos_in_stream = None
state: Any
_terminals_by_name = None
interactive_parser: 'InteractiveParser'

def get_context(self, text: str, span: int=40) -> str:
"""Returns a pretty string pinpointing the error in the text,
Expand Down Expand Up @@ -225,7 +226,6 @@ class UnexpectedToken(ParseError, UnexpectedInput):

expected: Set[str]
considered_rules: Set[str]
interactive_parser: 'InteractiveParser'

def __init__(self, token, expected, considered_rules=None, state=None, interactive_parser=None, terminals_by_name=None, token_history=None):
super(UnexpectedToken, self).__init__()
Expand Down
12 changes: 10 additions & 2 deletions src/poetry/core/_vendor/lark/grammar.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
from typing import Optional, Tuple, ClassVar
from typing import Optional, Tuple, ClassVar, Sequence

from .utils import Serialize

Expand Down Expand Up @@ -93,7 +93,15 @@ class Rule(Serialize):
__serialize_fields__ = 'origin', 'expansion', 'order', 'alias', 'options'
__serialize_namespace__ = Terminal, NonTerminal, RuleOptions

def __init__(self, origin, expansion, order=0, alias=None, options=None):
origin: NonTerminal
expansion: Sequence[Symbol]
order: int
alias: Optional[str]
options: RuleOptions
_hash: int

def __init__(self, origin: NonTerminal, expansion: Sequence[Symbol],
order: int=0, alias: Optional[str]=None, options: Optional[RuleOptions]=None):
self.origin = origin
self.expansion = expansion
self.alias = alias
Expand Down
2 changes: 1 addition & 1 deletion src/poetry/core/_vendor/lark/indenter.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
"Provides Indentation services for languages with indentation similar to Python"
"Provides a post-lexer for implementing Python-style indentation."

from abc import ABC, abstractmethod
from typing import List, Iterator
Expand Down
10 changes: 8 additions & 2 deletions src/poetry/core/_vendor/lark/lark.py
Original file line number Diff line number Diff line change
Expand Up @@ -62,19 +62,20 @@ class LarkOptions(Serialize):
regex: bool
g_regex_flags: int
keep_all_tokens: bool
tree_class: Any
tree_class: Optional[Callable[[str, List], Any]]
parser: _ParserArgType
lexer: _LexerArgType
ambiguity: 'Literal["auto", "resolve", "explicit", "forest"]'
postlex: Optional[PostLex]
priority: 'Optional[Literal["auto", "normal", "invert"]]'
lexer_callbacks: Dict[str, Callable[[Token], Token]]
use_bytes: bool
ordered_sets: bool
edit_terminals: Optional[Callable[[TerminalDef], TerminalDef]]
import_paths: 'List[Union[str, Callable[[Union[None, str, PackageResource], str], Tuple[str, str]]]]'
source_path: Optional[str]

OPTIONS_DOC = """
OPTIONS_DOC = r"""
**=== General Options ===**
start
Expand Down Expand Up @@ -141,6 +142,8 @@ class LarkOptions(Serialize):
Dictionary of callbacks for the lexer. May alter tokens during lexing. Use with caution.
use_bytes
Accept an input of type ``bytes`` instead of ``str``.
ordered_sets
Should Earley use ordered-sets to achieve stable output (~10% slower than regular sets. Default: True)
edit_terminals
A callback for editing the terminals before parse.
import_paths
Expand Down Expand Up @@ -179,6 +182,7 @@ class LarkOptions(Serialize):
'edit_terminals': None,
'g_regex_flags': 0,
'use_bytes': False,
'ordered_sets': True,
'import_paths': [],
'source_path': None,
'_plugins': {},
Expand Down Expand Up @@ -494,6 +498,8 @@ def save(self, f, exclude_options: Collection[str] = ()) -> None:
Useful for caching and multiprocessing.
"""
if self.options.parser != 'lalr':
raise NotImplementedError("Lark.save() is only implemented for the LALR(1) parser.")
data, m = self.memo_serialize([TerminalDef, Rule])
if exclude_options:
data["options"] = {n: v for n, v in data["options"].items() if n not in exclude_options}
Expand Down
63 changes: 40 additions & 23 deletions src/poetry/core/_vendor/lark/lexer.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,6 +15,7 @@
pass
if TYPE_CHECKING:
from .common import LexerConf
from .parsers.lalr_parser_state import ParserState

from .utils import classify, get_regexp_width, Serialize, logger
from .exceptions import UnexpectedCharacters, LexError, UnexpectedToken
Expand All @@ -30,6 +31,7 @@
has_interegular = False

class Pattern(Serialize, ABC):
"An abstraction over regular expressions."

value: str
flags: Collection[str]
Expand Down Expand Up @@ -112,6 +114,7 @@ def max_width(self) -> int:


class TerminalDef(Serialize):
"A definition of a terminal"
__serialize_fields__ = 'name', 'pattern', 'priority'
__serialize_namespace__ = PatternStr, PatternRE

Expand Down Expand Up @@ -269,6 +272,8 @@ def __eq__(self, other):


class LineCounter:
"A utility class for keeping track of line & column information"

__slots__ = 'char_pos', 'line', 'column', 'line_start_pos', 'newline_char'

def __init__(self, newline_char):
Expand Down Expand Up @@ -432,7 +437,7 @@ def __init__(self, lexer: 'Lexer', lexer_state: LexerState):
self.state = lexer_state

@classmethod
def from_text(cls, lexer: 'Lexer', text: str):
def from_text(cls, lexer: 'Lexer', text: str) -> 'LexerThread':
return cls(lexer, LexerState(text))

def lex(self, parser_state):
Expand Down Expand Up @@ -493,7 +498,24 @@ def _check_regex_collisions(terminal_to_regexp: Dict[TerminalDef, str], comparat
return


class BasicLexer(Lexer):
class AbstractBasicLexer(Lexer):
terminals_by_name: Dict[str, TerminalDef]

@abstractmethod
def __init__(self, conf: 'LexerConf', comparator=None) -> None:
...

@abstractmethod
def next_token(self, lex_state: LexerState, parser_state: Any = None) -> Token:
...

def lex(self, state: LexerState, parser_state: Any) -> Iterator[Token]:
with suppress(EOFError):
while True:
yield self.next_token(state, parser_state)


class BasicLexer(AbstractBasicLexer):
terminals: Collection[TerminalDef]
ignore_types: FrozenSet[str]
newline_types: FrozenSet[str]
Expand Down Expand Up @@ -565,11 +587,6 @@ def scanner(self):
def match(self, text, pos):
return self.scanner.match(text, pos)

def lex(self, state: LexerState, parser_state: Any) -> Iterator[Token]:
with suppress(EOFError):
while True:
yield self.next_token(state, parser_state)

def next_token(self, lex_state: LexerState, parser_state: Any = None) -> Token:
line_ctr = lex_state.line_ctr
while line_ctr.char_pos < len(lex_state.text):
Expand All @@ -584,34 +601,34 @@ def next_token(self, lex_state: LexerState, parser_state: Any = None) -> Token:

value, type_ = res

if type_ not in self.ignore_types:
ignored = type_ in self.ignore_types
t = None
if not ignored or type_ in self.callback:
t = Token(type_, value, line_ctr.char_pos, line_ctr.line, line_ctr.column)
line_ctr.feed(value, type_ in self.newline_types)
line_ctr.feed(value, type_ in self.newline_types)
if t is not None:
t.end_line = line_ctr.line
t.end_column = line_ctr.column
t.end_pos = line_ctr.char_pos
if t.type in self.callback:
t = self.callback[t.type](t)
if not ignored:
if not isinstance(t, Token):
raise LexError("Callbacks must return a token (returned %r)" % t)
lex_state.last_token = t
return t
else:
if type_ in self.callback:
t2 = Token(type_, value, line_ctr.char_pos, line_ctr.line, line_ctr.column)
self.callback[type_](t2)
line_ctr.feed(value, type_ in self.newline_types)
lex_state.last_token = t
return t

# EOF
raise EOFError(self)


class ContextualLexer(Lexer):
lexers: Dict[int, AbstractBasicLexer]
root_lexer: AbstractBasicLexer

lexers: Dict[str, BasicLexer]
root_lexer: BasicLexer
BasicLexer: Type[AbstractBasicLexer] = BasicLexer

def __init__(self, conf: 'LexerConf', states: Dict[str, Collection[str]], always_accept: Collection[str]=()) -> None:
def __init__(self, conf: 'LexerConf', states: Dict[int, Collection[str]], always_accept: Collection[str]=()) -> None:
terminals = list(conf.terminals)
terminals_by_name = conf.terminals_by_name

Expand All @@ -622,7 +639,7 @@ def __init__(self, conf: 'LexerConf', states: Dict[str, Collection[str]], always
comparator = interegular.Comparator.from_regexes({t: t.pattern.to_regexp() for t in terminals})
else:
comparator = None
lexer_by_tokens: Dict[FrozenSet[str], BasicLexer] = {}
lexer_by_tokens: Dict[FrozenSet[str], AbstractBasicLexer] = {}
self.lexers = {}
for state, accepts in states.items():
key = frozenset(accepts)
Expand All @@ -632,16 +649,16 @@ def __init__(self, conf: 'LexerConf', states: Dict[str, Collection[str]], always
accepts = set(accepts) | set(conf.ignore) | set(always_accept)
lexer_conf = copy(trad_conf)
lexer_conf.terminals = [terminals_by_name[n] for n in accepts if n in terminals_by_name]
lexer = BasicLexer(lexer_conf, comparator)
lexer = self.BasicLexer(lexer_conf, comparator)
lexer_by_tokens[key] = lexer

self.lexers[state] = lexer

assert trad_conf.terminals is terminals
trad_conf.skip_validation = True # We don't need to verify all terminals again
self.root_lexer = BasicLexer(trad_conf, comparator)
self.root_lexer = self.BasicLexer(trad_conf, comparator)

def lex(self, lexer_state: LexerState, parser_state: Any) -> Iterator[Token]:
def lex(self, lexer_state: LexerState, parser_state: 'ParserState') -> Iterator[Token]:
try:
while True:
lexer = self.lexers[parser_state.position]
Expand Down
Loading

0 comments on commit 0d60109

Please sign in to comment.