Skip to content

Commit

Permalink
gh-99581: Fix a buffer overflow in the tokenizer when copying lines t…
Browse files Browse the repository at this point in the history
…hat fill the available buffer

Signed-off-by: Pablo Galindo <pablogsal@gmail.com>
  • Loading branch information
pablogsal committed Nov 20, 2022
1 parent b0e1f9c commit a674cef
Show file tree
Hide file tree
Showing 3 changed files with 27 additions and 3 deletions.
20 changes: 18 additions & 2 deletions Lib/test/test_tokenize.py
Original file line number Diff line number Diff line change
Expand Up @@ -10,6 +10,8 @@
from unittest import TestCase, mock
from test.test_grammar import (VALID_UNDERSCORE_LITERALS,
INVALID_UNDERSCORE_LITERALS)
from test.support import os_helper
from test.support.script_helper import run_test_script, make_script
import os
import token

Expand Down Expand Up @@ -1026,7 +1028,7 @@ def test_utf8_coding_cookie_and_utf8_bom(self):
def test_bad_coding_cookie(self):
self.assertRaises(SyntaxError, self._testFile, 'bad_coding.py')
self.assertRaises(SyntaxError, self._testFile, 'bad_coding2.py')


class Test_Tokenize(TestCase):

Expand Down Expand Up @@ -2631,5 +2633,19 @@ def fib(n):
self.assertEqual(get_tokens(code), get_tokens(code_no_cont))


class CTokenizerBufferTests(unittest.TestCase):
def test_newline_at_the_end_of_buffer(self):
# See issue 99581: Make sure that if we need to add a new line at the
# end of the buffer, we have enough space in the buffer, specially when
# the current line is as long as the buffer space available.
test_script = f"""\
#coding: latin-1
#{"a"*10000}
#{"a"*10002}"""
with os_helper.temp_dir() as temp_dir:
file_name = make_script(temp_dir, 'foo', test_script)
run_test_script(file_name)


if __name__ == "__main__":
unittest.main()
unittest.main()
Original file line number Diff line number Diff line change
@@ -0,0 +1,3 @@
Fixed a bug that was causing a buffer overflow if the tokenizer copies a
line missing the newline caracter from a file that is as long as the
available tokenizer buffer. Patch by Pablo galindo
7 changes: 6 additions & 1 deletion Parser/tokenizer.c
Original file line number Diff line number Diff line change
Expand Up @@ -413,7 +413,11 @@ tok_readline_recode(struct tok_state *tok) {
error_ret(tok);
goto error;
}
if (!tok_reserve_buf(tok, buflen + 1)) {
// Make room for the null terminator *and* potentially
// an extra newline character that we may need to artificially
// add.
size_t buffer_size = buflen + 2;
if (!tok_reserve_buf(tok, buffer_size)) {
goto error;
}
memcpy(tok->inp, buf, buflen);
Expand Down Expand Up @@ -1000,6 +1004,7 @@ tok_underflow_file(struct tok_state *tok) {
return 0;
}
if (tok->inp[-1] != '\n') {
assert(tok->inp + 1 < tok->end);
/* Last line does not end in \n, fake one */
*tok->inp++ = '\n';
*tok->inp = '\0';
Expand Down

0 comments on commit a674cef

Please sign in to comment.