Skip to content

Commit

Permalink
Merge branch '3.12' into backport-d5dbbf4-3.12
Browse files Browse the repository at this point in the history
  • Loading branch information
graingert authored Oct 16, 2024
2 parents 0ae2c31 + 4256847 commit c6894b1
Show file tree
Hide file tree
Showing 14 changed files with 107 additions and 57 deletions.
2 changes: 1 addition & 1 deletion Doc/conf.py
Original file line number Diff line number Diff line change
Expand Up @@ -593,7 +593,7 @@
# Sphinx 8.1 has in-built CVE and CWE roles.
extlinks |= {
"cve": (
"https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-%s",
"https://www.cve.org/CVERecord?id=CVE-%s",
"CVE-%s",
),
"cwe": ("https://cwe.mitre.org/data/definitions/%s.html", "CWE-%s"),
Expand Down
8 changes: 7 additions & 1 deletion Doc/library/argparse.rst
Original file line number Diff line number Diff line change
Expand Up @@ -1740,7 +1740,8 @@ FileType objects
Argument groups
^^^^^^^^^^^^^^^

.. method:: ArgumentParser.add_argument_group(title=None, description=None)
.. method:: ArgumentParser.add_argument_group(title=None, description=None, *, \
[argument_default], [conflict_handler])

By default, :class:`ArgumentParser` groups command-line arguments into
"positional arguments" and "options" when displaying help
Expand Down Expand Up @@ -1785,6 +1786,11 @@ Argument groups

--bar BAR bar help

The optional, keyword-only parameters argument_default_ and conflict_handler_
allow for finer-grained control of the behavior of the argument group. These
parameters have the same meaning as in the :class:`ArgumentParser` constructor,
but apply specifically to the argument group rather than the entire parser.

Note that any arguments not in your user-defined groups will end up back
in the usual "positional arguments" and "optional arguments" sections.

Expand Down
5 changes: 3 additions & 2 deletions Lib/bdb.py
Original file line number Diff line number Diff line change
Expand Up @@ -295,9 +295,10 @@ def _set_caller_tracefunc(self, current_frame):
# Issue #13183: pdb skips frames after hitting a breakpoint and running
# step commands.
# Restore the trace function in the caller (that may not have been set
# for performance reasons) when returning from the current frame.
# for performance reasons) when returning from the current frame, unless
# the caller is the botframe.
caller_frame = current_frame.f_back
if caller_frame and not caller_frame.f_trace:
if caller_frame and not caller_frame.f_trace and caller_frame is not self.botframe:
caller_frame.f_trace = self.trace_dispatch

# Derived classes and clients can call the following methods
Expand Down
52 changes: 22 additions & 30 deletions Lib/concurrent/futures/process.py
Original file line number Diff line number Diff line change
Expand Up @@ -68,27 +68,31 @@
class _ThreadWakeup:
def __init__(self):
self._closed = False
self._lock = threading.Lock()
self._reader, self._writer = mp.Pipe(duplex=False)

def close(self):
# Please note that we do not take the shutdown lock when
# Please note that we do not take the self._lock when
# calling clear() (to avoid deadlocking) so this method can
# only be called safely from the same thread as all calls to
# clear() even if you hold the shutdown lock. Otherwise we
# clear() even if you hold the lock. Otherwise we
# might try to read from the closed pipe.
if not self._closed:
self._closed = True
self._writer.close()
self._reader.close()
with self._lock:
if not self._closed:
self._closed = True
self._writer.close()
self._reader.close()

def wakeup(self):
if not self._closed:
self._writer.send_bytes(b"")
with self._lock:
if not self._closed:
self._writer.send_bytes(b"")

def clear(self):
if not self._closed:
while self._reader.poll():
self._reader.recv_bytes()
if self._closed:
raise RuntimeError('operation on closed _ThreadWakeup')
while self._reader.poll():
self._reader.recv_bytes()


def _python_exit():
Expand Down Expand Up @@ -167,10 +171,8 @@ def __init__(self, work_id, fn, args, kwargs):

class _SafeQueue(Queue):
"""Safe Queue set exception to the future object linked to a job"""
def __init__(self, max_size=0, *, ctx, pending_work_items, shutdown_lock,
thread_wakeup):
def __init__(self, max_size=0, *, ctx, pending_work_items, thread_wakeup):
self.pending_work_items = pending_work_items
self.shutdown_lock = shutdown_lock
self.thread_wakeup = thread_wakeup
super().__init__(max_size, ctx=ctx)

Expand All @@ -179,8 +181,7 @@ def _on_queue_feeder_error(self, e, obj):
tb = format_exception(type(e), e, e.__traceback__)
e.__cause__ = _RemoteTraceback('\n"""\n{}"""'.format(''.join(tb)))
work_item = self.pending_work_items.pop(obj.work_id, None)
with self.shutdown_lock:
self.thread_wakeup.wakeup()
self.thread_wakeup.wakeup()
# work_item can be None if another process terminated. In this
# case, the executor_manager_thread fails all work_items
# with BrokenProcessPool
Expand Down Expand Up @@ -305,12 +306,10 @@ def __init__(self, executor):
# will wake up the queue management thread so that it can terminate
# if there is no pending work item.
def weakref_cb(_,
thread_wakeup=self.thread_wakeup,
shutdown_lock=self.shutdown_lock):
thread_wakeup=self.thread_wakeup):
mp.util.debug('Executor collected: triggering callback for'
' QueueManager wakeup')
with shutdown_lock:
thread_wakeup.wakeup()
thread_wakeup.wakeup()

self.executor_reference = weakref.ref(executor, weakref_cb)

Expand Down Expand Up @@ -438,11 +437,6 @@ def wait_result_broken_or_wakeup(self):
elif wakeup_reader in ready:
is_broken = False

# No need to hold the _shutdown_lock here because:
# 1. we're the only thread to use the wakeup reader
# 2. we're also the only thread to call thread_wakeup.close()
# 3. we want to avoid a possible deadlock when both reader and writer
# would block (gh-105829)
self.thread_wakeup.clear()

return result_item, is_broken, cause
Expand Down Expand Up @@ -740,10 +734,9 @@ def __init__(self, max_workers=None, mp_context=None,
# as it could result in a deadlock if a worker process dies with the
# _result_queue write lock still acquired.
#
# _shutdown_lock must be locked to access _ThreadWakeup.close() and
# .wakeup(). Care must also be taken to not call clear or close from
# more than one thread since _ThreadWakeup.clear() is not protected by
# the _shutdown_lock
# Care must be taken to only call clear and close from the
# executor_manager_thread, since _ThreadWakeup.clear() is not protected
# by a lock.
self._executor_manager_thread_wakeup = _ThreadWakeup()

# Create communication channels for the executor
Expand All @@ -754,7 +747,6 @@ def __init__(self, max_workers=None, mp_context=None,
self._call_queue = _SafeQueue(
max_size=queue_size, ctx=self._mp_context,
pending_work_items=self._pending_work_items,
shutdown_lock=self._shutdown_lock,
thread_wakeup=self._executor_manager_thread_wakeup)
# Killed worker processes can produce spurious "broken pipe"
# tracebacks in the queue's own worker thread. But we detect killed
Expand Down
3 changes: 1 addition & 2 deletions Lib/pdb.py
Original file line number Diff line number Diff line change
Expand Up @@ -321,8 +321,7 @@ def user_call(self, frame, argument_list):
def user_line(self, frame):
"""This function is called when we stop or break at this line."""
if self._wait_for_mainpyfile:
if (self.mainpyfile != self.canonic(frame.f_code.co_filename)
or frame.f_lineno <= 0):
if (self.mainpyfile != self.canonic(frame.f_code.co_filename)):
return
self._wait_for_mainpyfile = False
if self.bp_commands(frame):
Expand Down
6 changes: 3 additions & 3 deletions Lib/test/support/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -2450,9 +2450,9 @@ def adjust_int_max_str_digits(max_digits):
else:
C_RECURSION_LIMIT = 10000

#Windows doesn't have os.uname() but it doesn't support s390x.
skip_on_s390x = unittest.skipIf(hasattr(os, 'uname') and os.uname().machine == 's390x',
'skipped on s390x')
# Windows doesn't have os.uname() but it doesn't support s390x.
is_s390x = hasattr(os, 'uname') and os.uname().machine == 's390x'
skip_on_s390x = unittest.skipIf(is_s390x, 'skipped on s390x')

_BASE_COPY_SRC_DIR_IGNORED_NAMES = frozenset({
# SRC_DIR/.git
Expand Down
13 changes: 13 additions & 0 deletions Lib/test/test_bdb.py
Original file line number Diff line number Diff line change
Expand Up @@ -1203,6 +1203,19 @@ def main():
with TracerRun(self) as tracer:
tracer.runcall(tfunc_import)

def test_next_to_botframe(self):
# gh-125422
# Check that next command won't go to the bottom frame.
code = """
lno = 2
"""
self.expect_set = [
('line', 2, '<module>'), ('step', ),
('return', 2, '<module>'), ('next', ),
]
with TracerRun(self) as tracer:
tracer.run(compile(textwrap.dedent(code), '<string>', 'exec'))


class TestRegressions(unittest.TestCase):
def test_format_stack_entry_no_lineno(self):
Expand Down
24 changes: 24 additions & 0 deletions Lib/test/test_pdb.py
Original file line number Diff line number Diff line change
Expand Up @@ -2283,6 +2283,20 @@ def test_issue26053(self):
self.assertRegex(res, "Restarting .* with arguments:\na b c")
self.assertRegex(res, "Restarting .* with arguments:\nd e f")

def test_step_into_botframe(self):
# gh-125422
# pdb should not be able to step into the botframe (bdb.py)
script = "x = 1"
commands = """
step
step
step
quit
"""
stdout, _ = self.run_pdb_script(script, commands)
self.assertIn("The program finished", stdout)
self.assertNotIn("bdb.py", stdout)

def test_pdbrc_basic(self):
script = textwrap.dedent("""
a = 1
Expand Down Expand Up @@ -2740,6 +2754,16 @@ def _create_fake_frozen_module():
# verify that pdb found the source of the "frozen" function
self.assertIn('x = "Sentinel string for gh-93696"', stdout, "Sentinel statement not found")

def test_empty_file(self):
script = ''
commands = 'q\n'
# We check that pdb stopped at line 0, but anything reasonable
# is acceptable here, as long as it does not halt
stdout, _ = self.run_pdb_script(script, commands)
self.assertIn('main.py(0)', stdout)
stdout, _ = self.run_pdb_module(script, commands)
self.assertIn('__main__.py(0)', stdout)

def test_non_utf8_encoding(self):
script_dir = os.path.join(os.path.dirname(__file__), 'encoded_modules')
for filename in os.listdir(script_dir):
Expand Down
19 changes: 10 additions & 9 deletions Lib/test/test_traceback.py
Original file line number Diff line number Diff line change
Expand Up @@ -138,7 +138,7 @@ def test_no_caret_with_no_debug_ranges_flag_python_traceback(self):
import traceback
try:
x = 1 / 0
except:
except ZeroDivisionError:
traceback.print_exc()
""")
try:
Expand Down Expand Up @@ -386,9 +386,10 @@ class PurePythonExceptionFormattingMixin:
def get_exception(self, callable, slice_start=0, slice_end=-1):
try:
callable()
self.fail("No exception thrown.")
except:
except BaseException:
return traceback.format_exc().splitlines()[slice_start:slice_end]
else:
self.fail("No exception thrown.")

callable_line = get_exception.__code__.co_firstlineno + 2

Expand Down Expand Up @@ -1490,7 +1491,7 @@ def test_context_suppression(self):
try:
try:
raise Exception
except:
except Exception:
raise ZeroDivisionError from None
except ZeroDivisionError as _:
e = _
Expand Down Expand Up @@ -1838,9 +1839,9 @@ def exc():
try:
try:
raise EG("eg1", [ValueError(1), TypeError(2)])
except:
except EG:
raise EG("eg2", [ValueError(3), TypeError(4)])
except:
except EG:
raise ImportError(5)

expected = (
Expand Down Expand Up @@ -1889,7 +1890,7 @@ def exc():
except Exception as e:
exc = e
raise EG("eg", [VE(1), exc, VE(4)])
except:
except EG:
raise EG("top", [VE(5)])

expected = (f' + Exception Group Traceback (most recent call last):\n'
Expand Down Expand Up @@ -2642,7 +2643,7 @@ def test_long_context_chain(self):
def f():
try:
1/0
except:
except ZeroDivisionError:
f()

try:
Expand Down Expand Up @@ -2731,7 +2732,7 @@ def test_comparison_params_variations(self):
def raise_exc():
try:
raise ValueError('bad value')
except:
except ValueError:
raise

def raise_with_locals():
Expand Down
25 changes: 16 additions & 9 deletions Lib/test/test_zlib.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,7 @@
import pickle
import random
import sys
from test.support import bigmemtest, _1G, _4G, skip_on_s390x
from test.support import bigmemtest, _1G, _4G, is_s390x


zlib = import_helper.import_module('zlib')
Expand All @@ -34,8 +34,9 @@ def _zlib_runtime_version_tuple(zlib_version=zlib.ZLIB_RUNTIME_VERSION):
ZLIB_RUNTIME_VERSION_TUPLE = _zlib_runtime_version_tuple()


# bpo-46623: On s390x, when a hardware accelerator is used, using different
# ways to compress data with zlib can produce different compressed data.
# bpo-46623: When a hardware accelerator is used (currently only on s390x),
# using different ways to compress data with zlib can produce different
# compressed data.
# Simplified test_pair() code:
#
# def func1(data):
Expand All @@ -58,8 +59,10 @@ def _zlib_runtime_version_tuple(zlib_version=zlib.ZLIB_RUNTIME_VERSION):
#
# zlib.decompress(func1(data)) == zlib.decompress(func2(data)) == data
#
# Make the assumption that s390x always has an accelerator to simplify the skip
# condition.
# To simplify the skip condition, make the assumption that s390x always has an
# accelerator, and nothing else has it.
HW_ACCELERATED = is_s390x


class VersionTestCase(unittest.TestCase):

Expand Down Expand Up @@ -224,12 +227,14 @@ def test_keywords(self):
bufsize=zlib.DEF_BUF_SIZE),
HAMLET_SCENE)

@skip_on_s390x
def test_speech128(self):
# compress more data
data = HAMLET_SCENE * 128
x = zlib.compress(data)
self.assertEqual(zlib.compress(bytearray(data)), x)
# With hardware acceleration, the compressed bytes
# might not be identical.
if not HW_ACCELERATED:
self.assertEqual(zlib.compress(bytearray(data)), x)
for ob in x, bytearray(x):
self.assertEqual(zlib.decompress(ob), data)

Expand Down Expand Up @@ -276,7 +281,6 @@ def test_64bit_compress(self, size):

class CompressObjectTestCase(BaseCompressTestCase, unittest.TestCase):
# Test compression object
@skip_on_s390x
def test_pair(self):
# straightforward compress/decompress objects
datasrc = HAMLET_SCENE * 128
Expand All @@ -287,7 +291,10 @@ def test_pair(self):
x1 = co.compress(data)
x2 = co.flush()
self.assertRaises(zlib.error, co.flush) # second flush should not work
self.assertEqual(x1 + x2, datazip)
# With hardware acceleration, the compressed bytes might not
# be identical.
if not HW_ACCELERATED:
self.assertEqual(x1 + x2, datazip)
for v1, v2 in ((x1, x2), (bytearray(x1), bytearray(x2))):
dco = zlib.decompressobj()
y1 = dco.decompress(v1 + v2)
Expand Down
Original file line number Diff line number Diff line change
@@ -0,0 +1 @@
Fixed the bug where :mod:`pdb` will be stuck in an infinite loop when debugging an empty file.
Original file line number Diff line number Diff line change
@@ -0,0 +1 @@
Fixed the bug where :mod:`pdb` and :mod:`bdb` can step into the bottom caller frame.
Original file line number Diff line number Diff line change
@@ -0,0 +1,2 @@
Fix deadlock when :class:`concurrent.futures.ProcessPoolExecutor` shuts down
concurrently with an error when feeding a job to a worker process.
Original file line number Diff line number Diff line change
@@ -0,0 +1,3 @@
Re-enable skipped tests for :mod:`zlib` on the s390x architecture: only skip
checks of the compressed bytes, which can be different between zlib's
software implementation and the hardware-accelerated implementation.

0 comments on commit c6894b1

Please sign in to comment.