Skip to content

Commit

Permalink
Merge branch 'master' into exceptiongroup
Browse files Browse the repository at this point in the history
  • Loading branch information
agronholm committed Jan 24, 2022
2 parents f9f56f1 + 39d01b2 commit cb48061
Show file tree
Hide file tree
Showing 15 changed files with 22 additions and 221 deletions.
8 changes: 0 additions & 8 deletions ci.sh
Original file line number Diff line number Diff line change
Expand Up @@ -72,14 +72,6 @@ python -m pip --version
python setup.py sdist --formats=zip
python -m pip install dist/*.zip

if python -c 'import sys; sys.exit(sys.version_info >= (3, 7))'; then
# Python < 3.7, select last ipython with 3.6 support
# macOS requires the suffix for --in-place or you get an undefined label error
sed -i'.bak' 's/ipython==[^ ]*/ipython==7.16.1/' test-requirements.txt
sed -i'.bak' 's/traitlets==[^ ]*/traitlets==4.3.3/' test-requirements.txt
git diff test-requirements.txt
fi

if [ "$CHECK_FORMATTING" = "1" ]; then
python -m pip install -r test-requirements.txt
source check.sh
Expand Down
5 changes: 2 additions & 3 deletions docs/source/reference-io.rst
Original file line number Diff line number Diff line change
Expand Up @@ -237,8 +237,7 @@ other constants and functions in the :mod:`ssl` module.

.. warning:: Avoid instantiating :class:`ssl.SSLContext` directly.
A newly constructed :class:`~ssl.SSLContext` has less secure
defaults than one returned by :func:`ssl.create_default_context`,
dramatically so before Python 3.6.
defaults than one returned by :func:`ssl.create_default_context`.

Instead of using :meth:`ssl.SSLContext.wrap_socket`, you
create a :class:`SSLStream`:
Expand Down Expand Up @@ -722,7 +721,7 @@ subprocess`` in order to access constants such as ``PIPE`` or

Currently, Trio always uses unbuffered byte streams for communicating
with a process, so it does not support the ``encoding``, ``errors``,
``universal_newlines`` (alias ``text`` in 3.7+), and ``bufsize``
``universal_newlines`` (alias ``text``), and ``bufsize``
options.


Expand Down
2 changes: 1 addition & 1 deletion setup.py
Original file line number Diff line number Diff line change
Expand Up @@ -44,7 +44,7 @@
Vital statistics:
* Supported environments: Linux, macOS, or Windows running some kind of Python
3.6-or-better (either CPython or PyPy3 is fine). \\*BSD and illumos likely
3.7-or-better (either CPython or PyPy3 is fine). \\*BSD and illumos likely
work too, but are not tested.
* Install: ``python3 -m pip install -U trio`` (or on Windows, maybe
Expand Down
6 changes: 0 additions & 6 deletions test-requirements.in
Original file line number Diff line number Diff line change
Expand Up @@ -23,15 +23,9 @@ typing-extensions; implementation_name == "cpython"

# Trio's own dependencies
cffi; os_name == "nt"
contextvars; python_version < "3.7"
attrs >= 19.2.0
sortedcontainers
async_generator >= 1.9
idna
outcome
sniffio

# Required by contextvars, but harmless to install everywhere.
# dependabot drops the contextvars dependency because it runs
# on 3.7.
immutables >= 0.6
4 changes: 2 additions & 2 deletions trio/_core/_entry_queue.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,8 +15,8 @@ class EntryQueue:
# not signal-safe. deque is implemented in C, so each operation is atomic
# WRT threads (and this is guaranteed in the docs), AND each operation is
# atomic WRT signal delivery (signal handlers can run on either side, but
# not *during* a deque operation). dict makes similar guarantees - and on
# CPython 3.6 and PyPy, it's even ordered!
# not *during* a deque operation). dict makes similar guarantees - and
# it's even ordered!
queue = attr.ib(factory=deque)
idempotent_queue = attr.ib(factory=dict)

Expand Down
18 changes: 3 additions & 15 deletions trio/_core/_run.py
Original file line number Diff line number Diff line change
@@ -1,18 +1,12 @@
# coding: utf-8

import functools
import itertools
import logging
import os
import random
import select
import sys
import threading
from collections import deque
import collections.abc
from contextlib import contextmanager
import warnings
import weakref
import enum

from contextvars import copy_context
Expand Down Expand Up @@ -46,7 +40,6 @@
from ._thread_cache import start_thread_soon
from ._instrumentation import Instruments
from .. import _core
from .._deprecate import warn_deprecated
from .._util import Final, NoPublicConstructor, coroutine_or_error

if sys.version_info < (3, 11):
Expand All @@ -72,13 +65,8 @@ def _public(fn):
_r = random.Random()


# On 3.7+, Context.run() is implemented in C and doesn't show up in
# tracebacks. On 3.6, we use the contextvars backport, which is
# currently implemented in Python and adds 1 frame to tracebacks. So this
# function is a super-overkill version of "0 if sys.version_info >= (3, 7)
# else 1". But if Context.run ever changes, we'll be ready!
#
# This can all be removed once we drop support for 3.6.
# On CPython, Context.run() is implemented in C and doesn't show up in
# tracebacks. On PyPy, it is implemented in Python and adds 1 frame to tracebacks.
def _count_context_run_tb_frames():
def function_with_unique_name_xyzzy():
1 / 0
Expand Down Expand Up @@ -2202,7 +2190,7 @@ def unrolled_run(runner, async_fn, args, host_uses_signal_set_wakeup_fd=False):
try:
# We used to unwrap the Outcome object here and send/throw
# its contents in directly, but it turns out that .throw()
# is buggy, at least on CPython 3.6:
# is buggy, at least before CPython 3.9:
# https://bugs.python.org/issue29587
# https://bugs.python.org/issue29590
# So now we send in the Outcome object and unwrap it on the
Expand Down
30 changes: 3 additions & 27 deletions trio/_core/_wakeup_socketpair.py
Original file line number Diff line number Diff line change
@@ -1,27 +1,11 @@
import socket
import sys
import signal
import warnings

from .. import _core
from .._util import is_main_thread


def _has_warn_on_full_buffer():
if "__pypy__" not in sys.builtin_module_names:
# CPython has warn_on_full_buffer. Don't need to inspect.
# Also, CPython doesn't support inspecting built-in functions.
return True

import inspect

args_spec = inspect.getfullargspec(signal.set_wakeup_fd)
return "warn_on_full_buffer" in args_spec.kwonlyargs


HAVE_WARN_ON_FULL_BUFFER = _has_warn_on_full_buffer()


class WakeupSocketpair:
def __init__(self):
self.wakeup_sock, self.write_sock = socket.socketpair()
Expand All @@ -35,13 +19,8 @@ def __init__(self):
# Windows 10: 525347
# Windows you're weird. (And on Windows setting SNDBUF to 0 makes send
# blocking, even on non-blocking sockets, so don't do that.)
#
# But, if we're on an old Python and can't control the signal module's
# warn-on-full-buffer behavior, then we need to leave things alone, so
# the signal module won't spam the console with spurious warnings.
if HAVE_WARN_ON_FULL_BUFFER:
self.wakeup_sock.setsockopt(socket.SOL_SOCKET, socket.SO_RCVBUF, 1)
self.write_sock.setsockopt(socket.SOL_SOCKET, socket.SO_SNDBUF, 1)
self.wakeup_sock.setsockopt(socket.SOL_SOCKET, socket.SO_RCVBUF, 1)
self.write_sock.setsockopt(socket.SOL_SOCKET, socket.SO_SNDBUF, 1)
# On Windows this is a TCP socket so this might matter. On other
# platforms this fails b/c AF_UNIX sockets aren't actually TCP.
try:
Expand Down Expand Up @@ -72,10 +51,7 @@ def wakeup_on_signals(self):
if not is_main_thread():
return
fd = self.write_sock.fileno()
if HAVE_WARN_ON_FULL_BUFFER:
self.old_wakeup_fd = signal.set_wakeup_fd(fd, warn_on_full_buffer=False)
else:
self.old_wakeup_fd = signal.set_wakeup_fd(fd)
self.old_wakeup_fd = signal.set_wakeup_fd(fd, warn_on_full_buffer=False)
if self.old_wakeup_fd != -1:
warnings.warn(
RuntimeWarning(
Expand Down
112 changes: 0 additions & 112 deletions trio/_core/tests/test_ki.py
Original file line number Diff line number Diff line change
Expand Up @@ -490,115 +490,3 @@ async def inner():
_core.run(inner)
finally:
threading._active[thread.ident] = original


# For details on why this test is non-trivial, see:
# https://github.com/python-trio/trio/issues/42
# https://github.com/python-trio/trio/issues/109
@slow
def test_ki_wakes_us_up():
assert is_main_thread()

# This test is flaky due to a race condition on Windows; see:
# https://github.com/python-trio/trio/issues/119
# https://bugs.python.org/issue30038
# I think the only fix is to wait for fixed CPython to be released, so in
# the mean time, on affected versions we send two signals (equivalent to
# hitting control-C twice). This works because the problem is that the C
# level signal handler does
#
# write-to-fd -> set-flags
#
# and we need
#
# set-flags -> write-to-fd
#
# so running the C level signal handler twice does
#
# write-to-fd -> set-flags -> write-to-fd -> set-flags
#
# which contains the desired sequence.
#
# Affected version of CPython include 3.6.1 and earlier.
# It's fixed in 3.6.2 and 3.7+
#
# PyPy was never affected.
#
# The problem technically can occur on Unix as well, if a signal is
# delivered to a non-main thread, though we haven't observed this in
# practice.
#
# There's also this theoretical problem, but hopefully it won't actually
# bite us in practice:
# https://bugs.python.org/issue31119
# https://bitbucket.org/pypy/pypy/issues/2623
import platform

# lock is only needed to avoid an annoying race condition where the
# *second* ki_self() call arrives *after* the first one woke us up and its
# KeyboardInterrupt was caught, and then generates a second
# KeyboardInterrupt that aborts the test run. The kill_soon thread holds
# the lock while doing the calls to ki_self, which means that it holds it
# while the C-level signal handler is running. Then in the main thread,
# when we're woken up we know that ki_self() has been run at least once;
# if we then take the lock it guaranteeds that ki_self() has been run
# twice, so if a second KeyboardInterrupt is going to arrive it should
# arrive by the time we've acquired the lock. This lets us force it to
# happen inside the pytest.raises block.
#
# It will be very nice when the buggy_wakeup_fd bug is fixed.
lock = threading.Lock()

def kill_soon():
# We want the signal to be raised after the main thread has entered
# the IO manager blocking primitive. There really is no way to
# deterministically interlock with that, so we have to use sleep and
# hope it's long enough.
time.sleep(1.1)
with lock:
print("thread doing ki_self()")
ki_self()

async def main():
thread = threading.Thread(target=kill_soon)
print("Starting thread")
thread.start()
try:
with pytest.raises(KeyboardInterrupt):
# To limit the damage on CI if this does get broken (as
# compared to sleep_forever())
print("Going to sleep")
try:
await sleep(20)
print("Woke without raising?!") # pragma: no cover
# The only purpose of this finally: block is to soak up the
# second KeyboardInterrupt that might arrive on
# buggy_wakeup_fd platforms. So it might get aborted at any
# moment randomly on some runs, so pragma: no cover avoids
# coverage flapping:
finally: # pragma: no cover
print("waiting for lock")
with lock:
print("got lock")
# And then we want to force a PyErr_CheckSignals. Which is
# not so easy on Windows. Weird kluge: builtin_repr calls
# PyObject_Repr, which does an unconditional
# PyErr_CheckSignals for some reason.
print(repr(None))
# And finally, it's possible that the signal was delivered
# but at a moment when we had KI protection enabled, so we
# need to execute a checkpoint to ensure it's delivered
# before we exit main().
await _core.checkpoint()
finally:
print("joining thread", sys.exc_info())
thread.join()

start = time.perf_counter()
try:
_core.run(main)
finally:
end = time.perf_counter()
print("duration", end - start)
print("sys.exc_info", sys.exc_info())
assert 1.0 <= (end - start) < 2
2 changes: 1 addition & 1 deletion trio/_core/tests/test_run.py
Original file line number Diff line number Diff line change
Expand Up @@ -1060,7 +1060,7 @@ async def child2():
]


# At least as of CPython 3.6, using .throw() to raise an exception inside a
# Before CPython 3.9, using .throw() to raise an exception inside a
# coroutine/generator causes the original exc_info state to be lost, so things
# like re-raising and exception chaining are broken.
#
Expand Down
1 change: 0 additions & 1 deletion trio/_highlevel_ssl_helpers.py
Original file line number Diff line number Diff line change
Expand Up @@ -19,7 +19,6 @@ async def open_ssl_over_tcp_stream(
*,
https_compatible=False,
ssl_context=None,
# No trailing comma b/c bpo-9232 (fixed in py36)
happy_eyeballs_delay=DEFAULT_DELAY,
):
"""Make a TLS-encrypted Connection to the given host and port over TCP.
Expand Down
2 changes: 1 addition & 1 deletion trio/_socket.py
Original file line number Diff line number Diff line change
Expand Up @@ -50,7 +50,7 @@ async def __aexit__(self, etype, value, tb):
try:
from socket import IPPROTO_IPV6
except ImportError:
# As of at least 3.6, python on Windows is missing IPPROTO_IPV6
# Before Python 3.8, Windows is missing IPPROTO_IPV6
# https://bugs.python.org/issue29515
if sys.platform == "win32": # pragma: no branch
IPPROTO_IPV6 = 41
Expand Down
16 changes: 1 addition & 15 deletions trio/_util.py
Original file line number Diff line number Diff line change
Expand Up @@ -266,21 +266,7 @@ def __getitem__(self, _):
return self


# If a new class inherits from any ABC, then the new class's metaclass has to
# inherit from ABCMeta. If a new class inherits from typing.Generic, and
# you're using Python 3.6, then the new class's metaclass has to
# inherit from typing.GenericMeta. Some of the classes that want to use Final
# or NoPublicConstructor inherit from ABCs and generics, so Final has to
# inherit from these metaclasses. Fortunately, GenericMeta inherits from
# ABCMeta, so inheriting from GenericMeta alone is sufficient (when it
# exists at all).
if not t.TYPE_CHECKING and hasattr(t, "GenericMeta"):
BaseMeta = t.GenericMeta
else:
BaseMeta = ABCMeta


class Final(BaseMeta):
class Final(ABCMeta):
"""Metaclass that enforces a class to be final (i.e., subclass not allowed).
If a class uses this metaclass like this::
Expand Down
12 changes: 0 additions & 12 deletions trio/socket.py
Original file line number Diff line number Diff line change
Expand Up @@ -188,18 +188,6 @@
# get names used by Trio that we define on our own
from ._socket import IPPROTO_IPV6

# Not defined in all python versions and platforms but sometimes needed
if not _t.TYPE_CHECKING:
try:
TCP_NOTSENT_LOWAT
except NameError:
# Hopefully will show up in 3.7:
# https://github.com/python/cpython/pull/477
if sys.platform == "darwin":
TCP_NOTSENT_LOWAT = 0x201
elif sys.platform == "linux":
TCP_NOTSENT_LOWAT = 25

if _t.TYPE_CHECKING:
IP_BIND_ADDRESS_NO_PORT: int
else:
Expand Down
21 changes: 6 additions & 15 deletions trio/tests/test_ssl.py
Original file line number Diff line number Diff line change
Expand Up @@ -59,23 +59,14 @@

TRIO_TEST_1_CERT.configure_cert(SERVER_CTX)


# TLS 1.3 has a lot of changes from previous versions. So we want to run tests
# with both TLS 1.3, and TLS 1.2.
if hasattr(ssl, "OP_NO_TLSv1_3"):
# "tls13" means that we're willing to negotiate TLS 1.3. Usually that's
# what will happen, but the renegotiation tests explicitly force a
# downgrade on the server side. "tls12" means we refuse to negotiate TLS
# 1.3, so we'll almost certainly use TLS 1.2.
client_ctx_params = ["tls13", "tls12"]
else:
# We can't control whether we use TLS 1.3, so we just have to accept
# whatever openssl wants to use. This might be TLS 1.2 (if openssl is
# old), or it might be TLS 1.3 (if openssl is new, but our python version
# is too old to expose the configuration knobs).
client_ctx_params = ["default"]


@pytest.fixture(scope="module", params=client_ctx_params)
# "tls13" means that we're willing to negotiate TLS 1.3. Usually that's
# what will happen, but the renegotiation tests explicitly force a
# downgrade on the server side. "tls12" means we refuse to negotiate TLS
# 1.3, so we'll almost certainly use TLS 1.2.
@pytest.fixture(scope="module", params=["tls13", "tls12"])
def client_ctx(request):
ctx = ssl.create_default_context()

Expand Down
Loading

0 comments on commit cb48061

Please sign in to comment.