Skip to content

Commit

Permalink
test: two tests for #1828
Browse files Browse the repository at this point in the history
  • Loading branch information
nedbat committed Aug 3, 2024
1 parent 9aaa404 commit dc819ff
Show file tree
Hide file tree
Showing 3 changed files with 43 additions and 2 deletions.
4 changes: 2 additions & 2 deletions coverage/phystokens.py
Original file line number Diff line number Diff line change
Expand Up @@ -57,14 +57,14 @@ def _phys_tokens(toks: TokenInfos) -> TokenInfos:
if last_ttext.endswith("\\"):
inject_backslash = False
elif ttype == token.STRING:
if last_line.endswith(last_ttext+"\\\n"):
if last_line.endswith(last_ttext + "\\\n"):
# Deal with special cases like such code::
#
# a = ["aaa",\
# "bbb \
# ccc"]
#
pass
inject_backslash = True
elif "\n" in ttext and ttext.split("\n", 1)[0][-1] == "\\":
# It's a multi-line string and the first line ends with
# a backslash, so we don't need to inject another.
Expand Down
22 changes: 22 additions & 0 deletions tests/test_html.py
Original file line number Diff line number Diff line change
Expand Up @@ -1131,6 +1131,28 @@ def test_tabbed(self) -> None:

doesnt_contain("out/tabbed_py.html", "\t")

def test_bug_1828(self) -> None:
# https://github.com/nedbat/coveragepy/pull/1828
self.make_file("backslashes.py", """\
a = ["aaa",\\
"bbb \\
ccc"]
""")

cov = coverage.Coverage()
backslashes = self.start_import_stop(cov, "backslashes")
cov.html_report(backslashes, directory="out")

contains(
"out/backslashes_py.html",
# line 2 is `"bbb \`
r'<a id="t2" href="#t2">2</a></span>'
+ r'<span class="t"> <span class="str">"bbb \</span>',
# line 3 is `ccc"]`
r'<a id="t3" href="#t3">3</a></span>'
+ r'<span class="t"><span class="str"> ccc"</span><span class="op">]</span>',
)

def test_unicode(self) -> None:
surrogate = "\U000e0100"

Expand Down
19 changes: 19 additions & 0 deletions tests/test_phystokens.py
Original file line number Diff line number Diff line change
Expand Up @@ -98,6 +98,24 @@ def test_tokenize_real_file(self) -> None:
real_file = os.path.join(TESTS_DIR, "test_coverage.py")
self.check_file_tokenization(real_file)

def test_1828(self) -> None:
# https://github.com/nedbat/coveragepy/pull/1828
tokens = list(source_token_lines(textwrap.dedent("""
x = \
1
a = ["aaa",\\
"bbb \\
ccc"]
""")))
assert tokens == [
[],
[('nam', 'x'), ('ws', ' '), ('op', '='), ('ws', ' '), ('num', '1')],
[('nam', 'a'), ('ws', ' '), ('op', '='), ('ws', ' '),
('op', '['), ('str', '"aaa"'), ('op', ','), ('xx', '\\')],
[('ws', ' '), ('str', '"bbb \\')],
[('str', ' ccc"'), ('op', ']')],
]

@pytest.mark.parametrize("fname", [
"stress_phystoken.tok",
"stress_phystoken_dos.tok",
Expand All @@ -113,6 +131,7 @@ def test_stress(self, fname: str) -> None:
with open(stress) as fstress:
assert re.search(r"(?m) $", fstress.read()), f"{stress} needs a trailing space."


@pytest.mark.skipif(not env.PYBEHAVIOR.soft_keywords, reason="Soft keywords are new in Python 3.10")
class SoftKeywordTest(CoverageTest):
"""Tests the tokenizer handling soft keywords."""
Expand Down

0 comments on commit dc819ff

Please sign in to comment.