From 55ec1aad749df0d9dd4197eb7de82d809de52edf Mon Sep 17 00:00:00 2001 From: Johann Hemmann Date: Tue, 12 Mar 2024 19:41:17 +0100 Subject: [PATCH] Sync with ferrocene/ferrocene/pull/348 --- .gitignore | 1 + exts/ferrocene_autoglossary/__init__.py | 9 ++++----- exts/ferrocene_autoglossary/debug.py | 4 ++-- exts/ferrocene_autoglossary/lexer.py | 12 ++++++------ 4 files changed, 13 insertions(+), 13 deletions(-) diff --git a/.gitignore b/.gitignore index fac8ef1..e8c3bdb 100644 --- a/.gitignore +++ b/.gitignore @@ -3,3 +3,4 @@ __pycache__ *.py[co] +.env diff --git a/exts/ferrocene_autoglossary/__init__.py b/exts/ferrocene_autoglossary/__init__.py index d4f7b61..096e6b0 100644 --- a/exts/ferrocene_autoglossary/__init__.py +++ b/exts/ferrocene_autoglossary/__init__.py @@ -58,15 +58,15 @@ def apply(self): def apply_to_node(self, state, node): lexed = list(lexer.lexer(node.astext(), state.terms)) - if len(lexed) == 1 and type(lexed[0]) == str: + if len(lexed) == 1 and type(lexed[0]) is str: # Do nothing if the lexed version returned the same string. pass else: container = nodes.inline() for part in lexed: - if type(part) == str: + if type(part) is str: container.append(nodes.Text(part)) - elif type(part) == MatchedTerm: + elif type(part) is MatchedTerm: container.append(self.make_link(part)) else: raise RuntimeError("unexpected result of lexer") @@ -88,7 +88,6 @@ class PruneGlossaryTransform(SphinxTransform): default_priority = 500 def apply(self): - state = State.get(self.env) glossaries = list(self.document.findall(addnodes.glossary)) if glossaries: used_terms = self.discover_used_terms() @@ -116,7 +115,7 @@ def discover_used_terms(self): doctree = self.env.get_doctree(docname) for node in lexer.find_lexable_nodes(doctree): for part in lexer.lexer(node.node.astext(), state.terms): - if type(part) != MatchedTerm: + if type(part) is not MatchedTerm: continue name = part.term.name # Join the list of dependencies, setting None when either diff --git a/exts/ferrocene_autoglossary/debug.py b/exts/ferrocene_autoglossary/debug.py index 9c6182c..1739955 100644 --- a/exts/ferrocene_autoglossary/debug.py +++ b/exts/ferrocene_autoglossary/debug.py @@ -46,10 +46,10 @@ def render_lexed_node(terms, node): has_matches = False for token in lexer.lexer(node.astext(), terms): - if type(token) == MatchedTerm: + if type(token) is MatchedTerm: result += f"[{token.text}]" has_matches = True - elif type(token) == str: + elif type(token) is str: result += token else: raise RuntimeError("invalid token type") diff --git a/exts/ferrocene_autoglossary/lexer.py b/exts/ferrocene_autoglossary/lexer.py index 2a2b943..a237f64 100644 --- a/exts/ferrocene_autoglossary/lexer.py +++ b/exts/ferrocene_autoglossary/lexer.py @@ -15,11 +15,11 @@ def find_lexable_nodes(node, *, inside_glossary=False, inside_definition_of=None): - if type(node) == nodes.Text: + if type(node) is nodes.Text: yield LexableNode(node=node, inside_definition_of=inside_definition_of) - elif type(node) == addnodes.glossary: + elif type(node) is addnodes.glossary: inside_glossary = True - elif inside_glossary and type(node) == nodes.definition_list_item: + elif inside_glossary and type(node) is nodes.definition_list_item: inside_definition_of = {term.astext() for term in node.findall(nodes.term)} elif type(node) in ( nodes.reference, @@ -30,7 +30,7 @@ def find_lexable_nodes(node, *, inside_glossary=False, inside_definition_of=None return for child in node.children: - if inside_glossary and type(child) == nodes.term: + if inside_glossary and type(child) is nodes.term: continue for result in find_lexable_nodes( child, @@ -56,12 +56,12 @@ def _filter_matches(matches): for token in matches: # Convert a match into a token if the previous token doesn't allow the # following token to be a match. - if type(token) == MatchedTerm and not previous_token_allows_match: + if type(token) is MatchedTerm and not previous_token_allows_match: token = token.text # Only allow the next token to be a match if this is a text token # that doesn't end with forbidden chars. - previous_token_allows_match = type(token) == str and ( + previous_token_allows_match = type(token) is str and ( not token or token[-1] not in FORBID_MATCH_WHEN_PREVIOUS_ENDS_WITH )