From ee8ede0fb8f388fb11e979e0b27022f634d35f62 Mon Sep 17 00:00:00 2001 From: GitHub Action <41898282+github-actions[bot]@users.noreply.github.com> Date: Fri, 8 Sep 2023 14:21:09 +0000 Subject: [PATCH] Deployed 2d0945c with MkDocs version: 1.4.2 --- .nojekyll | 0 404.html | 414 + assets/_mkdocstrings.css | 16 + assets/images/favicon.png | Bin 0 -> 1870 bytes assets/javascripts/bundle.b4d07000.min.js | 29 + assets/javascripts/bundle.b4d07000.min.js.map | 8 + assets/javascripts/lunr/min/lunr.ar.min.js | 1 + assets/javascripts/lunr/min/lunr.da.min.js | 18 + assets/javascripts/lunr/min/lunr.de.min.js | 18 + assets/javascripts/lunr/min/lunr.du.min.js | 18 + assets/javascripts/lunr/min/lunr.es.min.js | 18 + assets/javascripts/lunr/min/lunr.fi.min.js | 18 + assets/javascripts/lunr/min/lunr.fr.min.js | 18 + assets/javascripts/lunr/min/lunr.hi.min.js | 1 + assets/javascripts/lunr/min/lunr.hu.min.js | 18 + assets/javascripts/lunr/min/lunr.hy.min.js | 1 + assets/javascripts/lunr/min/lunr.it.min.js | 18 + assets/javascripts/lunr/min/lunr.ja.min.js | 1 + assets/javascripts/lunr/min/lunr.jp.min.js | 1 + assets/javascripts/lunr/min/lunr.kn.min.js | 1 + assets/javascripts/lunr/min/lunr.ko.min.js | 1 + assets/javascripts/lunr/min/lunr.multi.min.js | 1 + assets/javascripts/lunr/min/lunr.nl.min.js | 18 + assets/javascripts/lunr/min/lunr.no.min.js | 18 + assets/javascripts/lunr/min/lunr.pt.min.js | 18 + assets/javascripts/lunr/min/lunr.ro.min.js | 18 + assets/javascripts/lunr/min/lunr.ru.min.js | 18 + assets/javascripts/lunr/min/lunr.sa.min.js | 1 + .../lunr/min/lunr.stemmer.support.min.js | 1 + assets/javascripts/lunr/min/lunr.sv.min.js | 18 + assets/javascripts/lunr/min/lunr.ta.min.js | 1 + assets/javascripts/lunr/min/lunr.te.min.js | 1 + assets/javascripts/lunr/min/lunr.th.min.js | 1 + assets/javascripts/lunr/min/lunr.tr.min.js | 18 + assets/javascripts/lunr/min/lunr.vi.min.js | 1 + assets/javascripts/lunr/min/lunr.zh.min.js | 1 + assets/javascripts/lunr/tinyseg.js | 206 + assets/javascripts/lunr/wordcut.js | 6708 +++++++++++++++++ .../workers/search.208ed371.min.js | 42 + .../workers/search.208ed371.min.js.map | 8 + assets/stylesheets/main.26e3688c.min.css | 1 + assets/stylesheets/main.26e3688c.min.css.map | 1 + assets/stylesheets/palette.ecc896b0.min.css | 1 + .../stylesheets/palette.ecc896b0.min.css.map | 1 + async/index.html | 1193 +++ connection-pools/index.html | 930 +++ connections/index.html | 829 ++ exceptions/index.html | 469 ++ extensions/index.html | 928 +++ http2/index.html | 734 ++ index.html | 574 ++ logging/index.html | 482 ++ network-backends/index.html | 878 +++ objects.inv | Bin 0 -> 461 bytes proxies/index.html | 785 ++ quickstart/index.html | 818 ++ requests-responses-urls/index.html | 845 +++ search/search_index.json | 1 + sitemap.xml | 68 + sitemap.xml.gz | Bin 0 -> 315 bytes table-of-contents/index.html | 512 ++ 61 files changed, 17766 insertions(+) create mode 100644 .nojekyll create mode 100644 404.html create mode 100644 assets/_mkdocstrings.css create mode 100644 assets/images/favicon.png create mode 100644 assets/javascripts/bundle.b4d07000.min.js create mode 100644 assets/javascripts/bundle.b4d07000.min.js.map create mode 100644 assets/javascripts/lunr/min/lunr.ar.min.js create mode 100644 assets/javascripts/lunr/min/lunr.da.min.js create mode 100644 assets/javascripts/lunr/min/lunr.de.min.js create mode 100644 assets/javascripts/lunr/min/lunr.du.min.js create mode 100644 assets/javascripts/lunr/min/lunr.es.min.js create mode 100644 assets/javascripts/lunr/min/lunr.fi.min.js create mode 100644 assets/javascripts/lunr/min/lunr.fr.min.js create mode 100644 assets/javascripts/lunr/min/lunr.hi.min.js create mode 100644 assets/javascripts/lunr/min/lunr.hu.min.js create mode 100644 assets/javascripts/lunr/min/lunr.hy.min.js create mode 100644 assets/javascripts/lunr/min/lunr.it.min.js create mode 100644 assets/javascripts/lunr/min/lunr.ja.min.js create mode 100644 assets/javascripts/lunr/min/lunr.jp.min.js create mode 100644 assets/javascripts/lunr/min/lunr.kn.min.js create mode 100644 assets/javascripts/lunr/min/lunr.ko.min.js create mode 100644 assets/javascripts/lunr/min/lunr.multi.min.js create mode 100644 assets/javascripts/lunr/min/lunr.nl.min.js create mode 100644 assets/javascripts/lunr/min/lunr.no.min.js create mode 100644 assets/javascripts/lunr/min/lunr.pt.min.js create mode 100644 assets/javascripts/lunr/min/lunr.ro.min.js create mode 100644 assets/javascripts/lunr/min/lunr.ru.min.js create mode 100644 assets/javascripts/lunr/min/lunr.sa.min.js create mode 100644 assets/javascripts/lunr/min/lunr.stemmer.support.min.js create mode 100644 assets/javascripts/lunr/min/lunr.sv.min.js create mode 100644 assets/javascripts/lunr/min/lunr.ta.min.js create mode 100644 assets/javascripts/lunr/min/lunr.te.min.js create mode 100644 assets/javascripts/lunr/min/lunr.th.min.js create mode 100644 assets/javascripts/lunr/min/lunr.tr.min.js create mode 100644 assets/javascripts/lunr/min/lunr.vi.min.js create mode 100644 assets/javascripts/lunr/min/lunr.zh.min.js create mode 100644 assets/javascripts/lunr/tinyseg.js create mode 100644 assets/javascripts/lunr/wordcut.js create mode 100644 assets/javascripts/workers/search.208ed371.min.js create mode 100644 assets/javascripts/workers/search.208ed371.min.js.map create mode 100644 assets/stylesheets/main.26e3688c.min.css create mode 100644 assets/stylesheets/main.26e3688c.min.css.map create mode 100644 assets/stylesheets/palette.ecc896b0.min.css create mode 100644 assets/stylesheets/palette.ecc896b0.min.css.map create mode 100644 async/index.html create mode 100644 connection-pools/index.html create mode 100644 connections/index.html create mode 100644 exceptions/index.html create mode 100644 extensions/index.html create mode 100644 http2/index.html create mode 100644 index.html create mode 100644 logging/index.html create mode 100644 network-backends/index.html create mode 100644 objects.inv create mode 100644 proxies/index.html create mode 100644 quickstart/index.html create mode 100644 requests-responses-urls/index.html create mode 100644 search/search_index.json create mode 100644 sitemap.xml create mode 100644 sitemap.xml.gz create mode 100644 table-of-contents/index.html diff --git a/.nojekyll b/.nojekyll new file mode 100644 index 00000000..e69de29b diff --git a/404.html b/404.html new file mode 100644 index 00000000..32b10b16 --- /dev/null +++ b/404.html @@ -0,0 +1,414 @@ + + + +
+ + + + + + + + + + + + + + + +HTTPX offers a standard synchronous API by default, but also gives you the option of an async client if you need it.
+Async is a concurrency model that is far more efficient than multi-threading, and can provide significant performance benefits and enable the use of long-lived network connections such as WebSockets.
+If you're working with an async web framework then you'll also want to use an async client for sending outgoing HTTP requests.
+Launching concurrent async tasks is far more resource efficient than spawning multiple threads. The Python interpreter should be able to comfortably handle switching between over 1000 concurrent tasks, while a sensible number of threads in a thread pool might be to enable around 10 or 20 concurrent threads.
+When using async support, you need make sure to use an async connection pool class:
+# The async variation of `httpcore.ConnectionPool`
+async with httpcore.AsyncConnectionPool() as http:
+ ...
+
Or if connecting via a proxy:
+# The async variation of `httpcore.HTTPProxy`
+async with httpcore.AsyncHTTPProxy() as proxy:
+ ...
+
Sending requests with the async version of httpcore
requires the await
keyword:
import asyncio
+import httpcore
+
+async def main():
+ async with httpcore.AsyncConnectionPool() as http:
+ response = await http.request("GET", "https://www.example.com/")
+
+
+asyncio.run(main())
+
When including content in the request, the content must either be bytes or an async iterable yielding bytes.
+Streaming responses also require a slightly different interface to the sync version:
+with <pool>.stream(...) as response
→ async with <pool>.stream() as response
.for chunk in response.iter_stream()
→ async for chunk in response.aiter_stream()
.response.read()
→ await response.aread()
.response.close()
→ await response.aclose()
For example:
+import asyncio
+import httpcore
+
+
+async def main():
+ async with httpcore.AsyncConnectionPool() as http:
+ async with http.stream("GET", "https://www.example.com/") as response:
+ async for chunk in response.aiter_stream():
+ print(f"Downloaded: {chunk}")
+
+
+asyncio.run(main())
+
When using httpcore
in an async environment it is strongly recommended that you instantiate and use connection pools using the context managed style:
async with httpcore.AsyncConnectionPool() as http:
+ ...
+
To benefit from connection pooling it is recommended that you instantiate a single connection pool in this style, and pass it around throughout your application.
+If you do want to use a connection pool without this style then you'll need to ensure that you explicitly close the pool once it is no longer required:
+try:
+ http = httpcore.AsyncConnectionPool()
+ ...
+finally:
+ await http.aclose()
+
This is a little different to the threaded context, where it's okay to simply instantiate a globally available connection pool, and then allow Python's garbage collection to deal with closing any connections in the pool, once the __del__
method is called.
The reason for this difference is that asynchronous code is not able to run within the context of the synchronous __del__
method, so there is no way for connections to be automatically closed at the point of garbage collection. This can lead to unterminated TCP connections still remaining after the Python interpreter quits.
HTTPX supports either asyncio
or trio
as an async environment.
It will auto-detect which of those two to use as the backend for socket operations and concurrency primitives.
+AsyncIO is Python's built-in library for writing concurrent code with the async/await syntax.
+Let's take a look at sending several outgoing HTTP requests concurrently, using asyncio
:
import asyncio
+import httpcore
+import time
+
+
+async def download(http, year):
+ await http.request("GET", f"https://en.wikipedia.org/wiki/{year}")
+
+
+async def main():
+ async with httpcore.AsyncConnectionPool() as http:
+ started = time.time()
+ # Here we use `asyncio.gather()` in order to run several tasks concurrently...
+ tasks = [download(http, year) for year in range(2000, 2020)]
+ await asyncio.gather(*tasks)
+ complete = time.time()
+
+ for connection in http.connections:
+ print(connection)
+ print("Complete in %.3f seconds" % (complete - started))
+
+
+asyncio.run(main())
+
Trio is an alternative async library, designed around the the principles of structured concurrency.
+import httpcore
+import trio
+import time
+
+
+async def download(http, year):
+ await http.request("GET", f"https://en.wikipedia.org/wiki/{year}")
+
+
+async def main():
+ async with httpcore.AsyncConnectionPool() as http:
+ started = time.time()
+ async with trio.open_nursery() as nursery:
+ for year in range(2000, 2020):
+ nursery.start_soon(download, http, year)
+ complete = time.time()
+
+ for connection in http.connections:
+ print(connection)
+ print("Complete in %.3f seconds" % (complete - started))
+
+
+trio.run(main)
+
AnyIO is an asynchronous networking and concurrency library that works on top of either asyncio or trio. It blends in with native libraries of your chosen backend (defaults to asyncio).
+The anyio
library is designed around the the principles of structured concurrency, and brings many of the same correctness and usability benefits that Trio provides, while interoperating with existing asyncio
libraries.
import httpcore
+import anyio
+import time
+
+
+async def download(http, year):
+ await http.request("GET", f"https://en.wikipedia.org/wiki/{year}")
+
+
+async def main():
+ async with httpcore.AsyncConnectionPool() as http:
+ started = time.time()
+ async with anyio.create_task_group() as task_group:
+ for year in range(2000, 2020):
+ task_group.start_soon(download, http, year)
+ complete = time.time()
+
+ for connection in http.connections:
+ print(connection)
+ print("Complete in %.3f seconds" % (complete - started))
+
+
+anyio.run(main)
+
httpcore.AsyncConnectionPool
A connection pool for making HTTP requests.
+ + + + +connections: List[httpcore.AsyncConnectionInterface]
+
+
+ property
+ readonly
+
+
+Return a list of the connections currently in the pool.
+For example:
+>>> pool.connections
+[
+ <AsyncHTTPConnection ['https://example.com:443', HTTP/1.1, ACTIVE, Request Count: 6]>,
+ <AsyncHTTPConnection ['https://example.com:443', HTTP/1.1, IDLE, Request Count: 9]> ,
+ <AsyncHTTPConnection ['http://example.com:80', HTTP/1.1, IDLE, Request Count: 1]>,
+]
+
__init__(self, ssl_context=None, max_connections=10, max_keepalive_connections=None, keepalive_expiry=None, http1=True, http2=False, retries=0, local_address=None, uds=None, network_backend=None, socket_options=None)
+
+
+ special
+
+
+A connection pool for making HTTP requests.
+ +Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
ssl_context |
+ Optional[ssl.SSLContext] |
+ An SSL context to use for verifying connections.
+If not specified, the default |
+ None |
+
max_connections |
+ Optional[int] |
+ The maximum number of concurrent HTTP connections that +the pool should allow. Any attempt to send a request on a pool that +would exceed this amount will block until a connection is available. |
+ 10 |
+
max_keepalive_connections |
+ Optional[int] |
+ The maximum number of idle HTTP connections +that will be maintained in the pool. |
+ None |
+
keepalive_expiry |
+ Optional[float] |
+ The duration in seconds that an idle HTTP connection +may be maintained for before being expired from the pool. |
+ None |
+
http1 |
+ bool |
+ A boolean indicating if HTTP/1.1 requests should be supported +by the connection pool. Defaults to True. |
+ True |
+
http2 |
+ bool |
+ A boolean indicating if HTTP/2 requests should be supported by +the connection pool. Defaults to False. |
+ False |
+
retries |
+ int |
+ The maximum number of retries when trying to establish a +connection. |
+ 0 |
+
local_address |
+ Optional[str] |
+ Local address to connect from. Can also be used to connect
+using a particular address family. Using |
+ None |
+
uds |
+ Optional[str] |
+ Path to a Unix Domain Socket to use instead of TCP sockets. |
+ None |
+
network_backend |
+ Optional[httpcore.AsyncNetworkBackend] |
+ A backend instance to use for handling network I/O. |
+ None |
+
socket_options |
+ Optional[Iterable[Union[Tuple[int, int, int], Tuple[int, int, Union[bytes, bytearray]], Tuple[int, int, NoneType, int]]]] |
+ Socket options that have to be included +in the TCP socket when the connection was established. |
+ None |
+
aclose(self)
+
+
+ async
+
+
+Close any connections in the pool.
+ +handle_async_request(self, request)
+
+
+ async
+
+
+Send an HTTP request, and return an HTTP response.
+This is the core implementation that is called into by .request()
or .stream()
.
response_closed(self, status)
+
+
+ async
+
+
+This method acts as a callback once the request/response cycle is complete.
+It is called into from the ConnectionPoolByteStream.aclose()
method.
httpcore.AsyncHTTPProxy
A connection pool that sends requests via an HTTP proxy.
+ + + + +__init__(self, proxy_url, proxy_auth=None, proxy_headers=None, ssl_context=None, proxy_ssl_context=None, max_connections=10, max_keepalive_connections=None, keepalive_expiry=None, http1=True, http2=False, retries=0, local_address=None, uds=None, network_backend=None, socket_options=None)
+
+
+ special
+
+
+A connection pool for making HTTP requests.
+ +Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
proxy_url |
+ Union[httpcore.URL, bytes, str] |
+ The URL to use when connecting to the proxy server.
+For example |
+ required | +
proxy_auth |
+ Optional[Tuple[Union[bytes, str], Union[bytes, str]]] |
+ Any proxy authentication as a two-tuple of +(username, password). May be either bytes or ascii-only str. |
+ None |
+
proxy_headers |
+ Union[Mapping[Union[bytes, str], Union[bytes, str]], Sequence[Tuple[Union[bytes, str], Union[bytes, str]]]] |
+ Any HTTP headers to use for the proxy requests.
+For example |
+ None |
+
ssl_context |
+ Optional[ssl.SSLContext] |
+ An SSL context to use for verifying connections.
+If not specified, the default |
+ None |
+
proxy_ssl_context |
+ Optional[ssl.SSLContext] |
+ The same as |
+ None |
+
max_connections |
+ Optional[int] |
+ The maximum number of concurrent HTTP connections that +the pool should allow. Any attempt to send a request on a pool that +would exceed this amount will block until a connection is available. |
+ 10 |
+
max_keepalive_connections |
+ Optional[int] |
+ The maximum number of idle HTTP connections +that will be maintained in the pool. |
+ None |
+
keepalive_expiry |
+ Optional[float] |
+ The duration in seconds that an idle HTTP connection +may be maintained for before being expired from the pool. |
+ None |
+
http1 |
+ bool |
+ A boolean indicating if HTTP/1.1 requests should be supported +by the connection pool. Defaults to True. |
+ True |
+
http2 |
+ bool |
+ A boolean indicating if HTTP/2 requests should be supported by +the connection pool. Defaults to False. |
+ False |
+
retries |
+ int |
+ The maximum number of retries when trying to establish +a connection. |
+ 0 |
+
local_address |
+ Optional[str] |
+ Local address to connect from. Can also be used to
+connect using a particular address family. Using
+ |
+ None |
+
uds |
+ Optional[str] |
+ Path to a Unix Domain Socket to use instead of TCP sockets. |
+ None |
+
network_backend |
+ Optional[httpcore.AsyncNetworkBackend] |
+ A backend instance to use for handling network I/O. |
+ None |
+
While the top-level API provides convenience functions for working with httpcore
,
+in practice you'll almost always want to take advantage of the connection pooling
+functionality that it provides.
To do so, instantiate a pool instance, and use it to send requests:
+import httpcore
+
+http = httpcore.ConnectionPool()
+r = http.request("GET", "https://www.example.com/")
+
+print(r)
+# <Response [200]>
+
Connection pools support the same .request()
and .stream()
APIs as described in the Quickstart.
We can observe the benefits of connection pooling with a simple script like so:
+import httpcore
+import time
+
+
+http = httpcore.ConnectionPool()
+for counter in range(5):
+ started = time.time()
+ response = http.request("GET", "https://www.example.com/")
+ complete = time.time()
+ print(response, "in %.3f seconds" % (complete - started))
+
The output should demonstrate the initial request as being substantially slower than the subsequent requests:
+<Response [200]> in {0.529} seconds
+<Response [200]> in {0.096} seconds
+<Response [200]> in {0.097} seconds
+<Response [200]> in {0.095} seconds
+<Response [200]> in {0.098} seconds
+
This is to be expected. Once we've established a connection to "www.example.com"
we're able to reuse it for following requests.
The connection pool instance is also the main point of configuration. Let's take a look at the various options that it provides:
+ssl_context
: An SSL context to use for verifying connections.
+ If not specified, the default httpcore.default_ssl_context()
+ will be used.max_connections
: The maximum number of concurrent HTTP connections that the pool
+ should allow. Any attempt to send a request on a pool that would
+ exceed this amount will block until a connection is available.max_keepalive_connections
: The maximum number of idle HTTP connections that will
+ be maintained in the pool.keepalive_expiry
: The duration in seconds that an idle HTTP connection may be
+ maintained for before being expired from the pool.http1
: A boolean indicating if HTTP/1.1 requests should be supported by the connection
+ pool. Defaults to True
.http2
: A boolean indicating if HTTP/2 requests should be supported by the connection
+ pool. Defaults to False
.retries
: The maximum number of retries when trying to establish a connection.local_address
: Local address to connect from. Can also be used to connect using
+ a particular address family. Using local_address="0.0.0.0"
will
+ connect using an AF_INET
address (IPv4), while using local_address="::"
+ will connect using an AF_INET6
address (IPv6).uds
: Path to a Unix Domain Socket to use instead of TCP sockets.network_backend
: A backend instance to use for handling network I/O.socket_options
: Socket options that have to be included in the TCP socket when the connection was established.Because connection pools hold onto network resources, careful developers may want to ensure that instances are properly closed once they are no longer required.
+Working with a single global instance isn't a bad idea for many use case, since the connection pool will automatically be closed when the __del__
method is called on it:
# This is perfectly fine for most purposes.
+# The connection pool will automatically be closed when it is garbage collected,
+# or when the Python interpreter exits.
+http = httpcore.ConnectionPool()
+
However, to be more explicit around the resource usage, we can use the connection pool within a context manager:
+with httpcore.ConnectionPool() as http:
+ ...
+
Or else close the pool explicitly:
+http = httpcore.ConnectionPool()
+try:
+ ...
+finally:
+ http.close()
+
Connection pools are designed to be thread-safe. Similarly, when using httpcore
in an async context connection pools are task-safe.
This means that you can have a single connection pool instance shared by multiple threads.
+httpcore.ConnectionPool
A connection pool for making HTTP requests.
+ + + + +connections: List[httpcore.ConnectionInterface]
+
+
+ property
+ readonly
+
+
+Return a list of the connections currently in the pool.
+For example:
+>>> pool.connections
+[
+ <HTTPConnection ['https://example.com:443', HTTP/1.1, ACTIVE, Request Count: 6]>,
+ <HTTPConnection ['https://example.com:443', HTTP/1.1, IDLE, Request Count: 9]> ,
+ <HTTPConnection ['http://example.com:80', HTTP/1.1, IDLE, Request Count: 1]>,
+]
+
__init__(self, ssl_context=None, max_connections=10, max_keepalive_connections=None, keepalive_expiry=None, http1=True, http2=False, retries=0, local_address=None, uds=None, network_backend=None, socket_options=None)
+
+
+ special
+
+
+A connection pool for making HTTP requests.
+ +Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
ssl_context |
+ Optional[ssl.SSLContext] |
+ An SSL context to use for verifying connections.
+If not specified, the default |
+ None |
+
max_connections |
+ Optional[int] |
+ The maximum number of concurrent HTTP connections that +the pool should allow. Any attempt to send a request on a pool that +would exceed this amount will block until a connection is available. |
+ 10 |
+
max_keepalive_connections |
+ Optional[int] |
+ The maximum number of idle HTTP connections +that will be maintained in the pool. |
+ None |
+
keepalive_expiry |
+ Optional[float] |
+ The duration in seconds that an idle HTTP connection +may be maintained for before being expired from the pool. |
+ None |
+
http1 |
+ bool |
+ A boolean indicating if HTTP/1.1 requests should be supported +by the connection pool. Defaults to True. |
+ True |
+
http2 |
+ bool |
+ A boolean indicating if HTTP/2 requests should be supported by +the connection pool. Defaults to False. |
+ False |
+
retries |
+ int |
+ The maximum number of retries when trying to establish a +connection. |
+ 0 |
+
local_address |
+ Optional[str] |
+ Local address to connect from. Can also be used to connect
+using a particular address family. Using |
+ None |
+
uds |
+ Optional[str] |
+ Path to a Unix Domain Socket to use instead of TCP sockets. |
+ None |
+
network_backend |
+ Optional[httpcore.NetworkBackend] |
+ A backend instance to use for handling network I/O. |
+ None |
+
socket_options |
+ Optional[Iterable[Union[Tuple[int, int, int], Tuple[int, int, Union[bytes, bytearray]], Tuple[int, int, NoneType, int]]]] |
+ Socket options that have to be included +in the TCP socket when the connection was established. |
+ None |
+
close(self)
+
+
+Close any connections in the pool.
+ +handle_request(self, request)
+
+
+Send an HTTP request, and return an HTTP response.
+This is the core implementation that is called into by .request()
or .stream()
.
response_closed(self, status)
+
+
+This method acts as a callback once the request/response cycle is complete.
+It is called into from the ConnectionPoolByteStream.close()
method.
TODO
+httpcore.HTTPConnection
has_expired(self)
+
+
+Return True
if the connection is in a state where it should be closed.
This either means that the connection is idle and it has passed the +expiry time on its keep-alive, or that server has sent an EOF.
+ +is_available(self)
+
+
+Return True
if the connection is currently able to accept an
+outgoing request.
An HTTP/1.1 connection will only be available if it is currently idle.
+An HTTP/2 connection will be available so long as the stream ID space is +not yet exhausted, and the connection is not in an error state.
+While the connection is being established we may not yet know if it is going
+to result in an HTTP/1.1 or HTTP/2 connection. The connection should be
+treated as being available, but might ultimately raise NewConnectionRequired
+required exceptions if multiple requests are attempted over a connection
+that ends up being established as HTTP/1.1.
is_closed(self)
+
+
+Return True
if the connection has been closed.
Used when a response is closed to determine if the connection may be +returned to the connection pool or not.
+ +is_idle(self)
+
+
+Return True
if the connection is currently idle.
httpcore.HTTP11Connection
has_expired(self)
+
+
+Return True
if the connection is in a state where it should be closed.
This either means that the connection is idle and it has passed the +expiry time on its keep-alive, or that server has sent an EOF.
+ +is_available(self)
+
+
+Return True
if the connection is currently able to accept an
+outgoing request.
An HTTP/1.1 connection will only be available if it is currently idle.
+An HTTP/2 connection will be available so long as the stream ID space is +not yet exhausted, and the connection is not in an error state.
+While the connection is being established we may not yet know if it is going
+to result in an HTTP/1.1 or HTTP/2 connection. The connection should be
+treated as being available, but might ultimately raise NewConnectionRequired
+required exceptions if multiple requests are attempted over a connection
+that ends up being established as HTTP/1.1.
is_closed(self)
+
+
+Return True
if the connection has been closed.
Used when a response is closed to determine if the connection may be +returned to the connection pool or not.
+ +is_idle(self)
+
+
+Return True
if the connection is currently idle.
httpcore.HTTP2Connection
has_expired(self)
+
+
+Return True
if the connection is in a state where it should be closed.
This either means that the connection is idle and it has passed the +expiry time on its keep-alive, or that server has sent an EOF.
+ +is_available(self)
+
+
+Return True
if the connection is currently able to accept an
+outgoing request.
An HTTP/1.1 connection will only be available if it is currently idle.
+An HTTP/2 connection will be available so long as the stream ID space is +not yet exhausted, and the connection is not in an error state.
+While the connection is being established we may not yet know if it is going
+to result in an HTTP/1.1 or HTTP/2 connection. The connection should be
+treated as being available, but might ultimately raise NewConnectionRequired
+required exceptions if multiple requests are attempted over a connection
+that ends up being established as HTTP/1.1.
is_closed(self)
+
+
+Return True
if the connection has been closed.
Used when a response is closed to determine if the connection may be +returned to the connection pool or not.
+ +is_idle(self)
+
+
+Return True
if the connection is currently idle.
The following exceptions may be raised when sending a request:
+httpcore.TimeoutException
httpcore.PoolTimeout
httpcore.ConnectTimeout
httpcore.ReadTimeout
httpcore.WriteTimeout
httpcore.NetworkError
httpcore.ConnectError
httpcore.ReadError
httpcore.WriteError
httpcore.ProtocolError
httpcore.RemoteProtocolError
httpcore.LocalProtocolError
httpcore.ProxyError
httpcore.UnsupportedProtocol
The request/response API used by httpcore
is kept deliberately simple and explicit.
The Request
and Response
models are pretty slim wrappers around this core API:
# Pseudo-code expressing the essentials of the request/response model.
+(
+ status_code: int,
+ headers: List[Tuple(bytes, bytes)],
+ stream: Iterable[bytes]
+) = handle_request(
+ method: bytes,
+ url: URL,
+ headers: List[Tuple(bytes, bytes)],
+ stream: Iterable[bytes]
+)
+
This is everything that's needed in order to represent an HTTP exchange.
+Well... almost.
+There is a maxim in Computer Science that "All non-trivial abstractions, to some degree, are leaky". When an expression is leaky, it's important that it ought to at least leak only in well-defined places.
+In order to handle cases that don't otherwise fit inside this core abstraction, httpcore
requests and responses have 'extensions'. These are a dictionary of optional additional information.
Let's expand on our request/response abstraction...
+# Pseudo-code expressing the essentials of the request/response model,
+# plus extensions allowing for additional API that does not fit into
+# this abstraction.
+(
+ status_code: int,
+ headers: List[Tuple(bytes, bytes)],
+ stream: Iterable[bytes],
+ extensions: dict
+) = handle_request(
+ method: bytes,
+ url: URL,
+ headers: List[Tuple(bytes, bytes)],
+ stream: Iterable[bytes],
+ extensions: dict
+)
+
Several extensions are supported both on the request:
+r = httpcore.request(
+ "GET",
+ "https://www.example.com",
+ extensions={"timeout": {"connect": 5.0}}
+)
+
And on the response:
+r = httpcore.request("GET", "https://www.example.com")
+
+print(r.extensions["http_version"])
+# When using HTTP/1.1 on the client side, the server HTTP response
+# could feasibly be one of b"HTTP/0.9", b"HTTP/1.0", or b"HTTP/1.1".
+
"timeout"
A dictionary of str: Optional[float]
timeout values.
May include values for 'connect'
, 'read'
, 'write'
, or 'pool'
.
For example:
+# Timeout if a connection takes more than 5 seconds to established, or if
+# we are blocked waiting on the connection pool for more than 10 seconds.
+r = httpcore.request(
+ "GET",
+ "https://www.example.com",
+ extensions={"timeout": {"connect": 5.0, "pool": 10.0}}
+)
+
"trace"
The trace extension allows a callback handler to be installed to monitor the internal
+flow of events within httpcore
. The simplest way to explain this is with an example:
import httpcore
+
+def log(event_name, info):
+ print(event_name, info)
+
+r = httpcore.request("GET", "https://www.example.com/", extensions={"trace": log})
+# connection.connect_tcp.started {'host': 'www.example.com', 'port': 443, 'local_address': None, 'timeout': None}
+# connection.connect_tcp.complete {'return_value': <httpcore.backends.sync.SyncStream object at 0x1093f94d0>}
+# connection.start_tls.started {'ssl_context': <ssl.SSLContext object at 0x1093ee750>, 'server_hostname': b'www.example.com', 'timeout': None}
+# connection.start_tls.complete {'return_value': <httpcore.backends.sync.SyncStream object at 0x1093f9450>}
+# http11.send_request_headers.started {'request': <Request [b'GET']>}
+# http11.send_request_headers.complete {'return_value': None}
+# http11.send_request_body.started {'request': <Request [b'GET']>}
+# http11.send_request_body.complete {'return_value': None}
+# http11.receive_response_headers.started {'request': <Request [b'GET']>}
+# http11.receive_response_headers.complete {'return_value': (b'HTTP/1.1', 200, b'OK', [(b'Age', b'553715'), (b'Cache-Control', b'max-age=604800'), (b'Content-Type', b'text/html; charset=UTF-8'), (b'Date', b'Thu, 21 Oct 2021 17:08:42 GMT'), (b'Etag', b'"3147526947+ident"'), (b'Expires', b'Thu, 28 Oct 2021 17:08:42 GMT'), (b'Last-Modified', b'Thu, 17 Oct 2019 07:18:26 GMT'), (b'Server', b'ECS (nyb/1DCD)'), (b'Vary', b'Accept-Encoding'), (b'X-Cache', b'HIT'), (b'Content-Length', b'1256')])}
+# http11.receive_response_body.started {'request': <Request [b'GET']>}
+# http11.receive_response_body.complete {'return_value': None}
+# http11.response_closed.started {}
+# http11.response_closed.complete {'return_value': None}
+
The event_name
and info
arguments here will be one of the following:
{event_type}.{event_name}.started
, <dictionary of keyword arguments>
{event_type}.{event_name}.complete
, {"return_value": <...>}
{event_type}.{event_name}.failed
, {"exception": <...>}
Note that when using the async variant of httpcore
the handler function passed to "trace"
must be an async def ...
function.
The following event types are currently exposed...
+Establishing the connection
+"connection.connect_tcp"
"connection.connect_unix_socket"
"connection.start_tls"
HTTP/1.1 events
+"http11.send_request_headers"
"http11.send_request_body"
"http11.receive_response"
"http11.receive_response_body"
"http11.response_closed"
HTTP/2 events
+"http2.send_connection_init"
"http2.send_request_headers"
"http2.send_request_body"
"http2.receive_response_headers"
"http2.receive_response_body"
"http2.response_closed"
"sni_hostname"
The server's hostname, which is used to confirm the hostname supplied by the SSL certificate.
+For example:
+headers = {"Host": "www.encode.io"}
+extensions = {"sni_hostname": "www.encode.io"}
+response = httpcore.request(
+ "GET",
+ "https://185.199.108.153",
+ headers=headers,
+ extensions=extensions
+)
+
"http_version"
The HTTP version, as bytes. Eg. b"HTTP/1.1"
.
When using HTTP/1.1 the response line includes an explicit version, and the value of this key could feasibly be one of b"HTTP/0.9"
, b"HTTP/1.0"
, or b"HTTP/1.1"
.
When using HTTP/2 there is no further response versioning included in the protocol, and the value of this key will always be b"HTTP/2"
.
"reason_phrase"
The reason-phrase of the HTTP response, as bytes. For example b"OK"
. Some servers may include a custom reason phrase, although this is not recommended.
HTTP/2 onwards does not include a reason phrase on the wire.
+When no key is included, a default based on the status code may be used.
+"stream_id"
When HTTP/2 is being used the "stream_id"
response extension can be accessed to determine the ID of the data stream that the response was sent on.
"network_stream"
The "network_stream"
extension allows developers to handle HTTP CONNECT
and Upgrade
requests, by providing an API that steps outside the standard request/response model, and can directly read or write to the network.
The interface provided by the network stream:
+read(max_bytes, timeout = None) -> bytes
write(buffer, timeout = None)
close()
start_tls(ssl_context, server_hostname = None, timeout = None) -> NetworkStream
get_extra_info(info) -> Any
This API can be used as the foundation for working with HTTP proxies, WebSocket upgrades, and other advanced use-cases.
+See the network backends documentation for more information on working directly with network streams.
+CONNECT
requestsA proxy CONNECT request using the network stream:
+# Formulate a CONNECT request...
+#
+# This will establish a connection to 127.0.0.1:8080, and then send the following...
+#
+# CONNECT http://www.example.com HTTP/1.1
+# Host: 127.0.0.1:8080
+url = httpcore.URL(b"http", b"127.0.0.1", 8080, b"http://www.example.com")
+with httpcore.stream("CONNECT", url) as response:
+ network_stream = response.extensions["network_stream"]
+
+ # Upgrade to an SSL stream...
+ network_stream = network_stream.start_tls(
+ ssl_context=httpcore.default_ssl_context(),
+ hostname=b"www.example.com",
+ )
+
+ # Manually send an HTTP request over the network stream, and read the response...
+ #
+ # For a more complete example see the httpcore `TunnelHTTPConnection` implementation.
+ network_stream.write(b"GET / HTTP/1.1\r\nHost: example.com\r\n\r\n")
+ data = network_stream.read()
+ print(data)
+
Upgrade
requestsUsing the wsproto
package to handle a websockets session:
import httpcore
+import wsproto
+import os
+import base64
+
+
+url = "http://127.0.0.1:8000/"
+headers = {
+ b"Connection": b"Upgrade",
+ b"Upgrade": b"WebSocket",
+ b"Sec-WebSocket-Key": base64.b64encode(os.urandom(16)),
+ b"Sec-WebSocket-Version": b"13"
+}
+with httpcore.stream("GET", url, headers=headers) as response:
+ if response.status != 101:
+ raise Exception("Failed to upgrade to websockets", response)
+
+ # Get the raw network stream.
+ network_steam = response.extensions["network_stream"]
+
+ # Write a WebSocket text frame to the stream.
+ ws_connection = wsproto.Connection(wsproto.ConnectionType.CLIENT)
+ message = wsproto.events.TextMessage("hello, world!")
+ outgoing_data = ws_connection.send(message)
+ network_steam.write(outgoing_data)
+
+ # Wait for a response.
+ incoming_data = network_steam.read(max_bytes=4096)
+ ws_connection.receive_data(incoming_data)
+ for event in ws_connection.events():
+ if isinstance(event, wsproto.events.TextMessage):
+ print("Got data:", event.data)
+
+ # Write a WebSocket close to the stream.
+ message = wsproto.events.CloseConnection(code=1000)
+ outgoing_data = ws_connection.send(message)
+ network_steam.write(outgoing_data)
+
The network stream abstraction also allows access to various low-level information that may be exposed by the underlying socket:
+response = httpcore.request("GET", "https://www.example.com")
+network_stream = response.extensions["network_stream"]
+
+client_addr = network_stream.get_extra_info("client_addr")
+server_addr = network_stream.get_extra_info("server_addr")
+print("Client address", client_addr)
+print("Server address", server_addr)
+
The socket SSL information is also available through this interface, although you need to ensure that the underlying connection is still open, in order to access it...
+with httpcore.stream("GET", "https://www.example.com") as response:
+ network_stream = response.extensions["network_stream"]
+
+ ssl_object = network_stream.get_extra_info("ssl_object")
+ print("TLS version", ssl_object.version())
+
HTTP/2 is a major new iteration of the HTTP protocol, that provides a more efficient transport, with potential performance benefits. HTTP/2 does not change the core semantics of the request or response, but alters the way that data is sent to and from the server.
+Rather than the text format that HTTP/1.1 uses, HTTP/2 is a binary format. The binary format provides full request and response multiplexing, and efficient compression of HTTP headers. The stream multiplexing means that where HTTP/1.1 requires one TCP stream for each concurrent request, HTTP/2 allows a single TCP stream to handle multiple concurrent requests.
+HTTP/2 also provides support for functionality such as response prioritization, and server push.
+For a comprehensive guide to HTTP/2 you may want to check out "HTTP2 Explained".
+When using the httpcore
client, HTTP/2 support is not enabled by default, because HTTP/1.1 is a mature, battle-hardened transport layer, and our HTTP/1.1 implementation may be considered the more robust option at this point in time. It is possible that a future version of httpcore
may enable HTTP/2 support by default.
If you're issuing highly concurrent requests you might want to consider trying out our HTTP/2 support. You can do so by first making sure to install the optional HTTP/2 dependencies...
+$ pip install httpcore[http2]
+
And then instantiating a connection pool with HTTP/2 support enabled:
+import httpcore
+
+pool = httpcore.ConnectionPool(http2=True)
+
We can take a look at the difference in behaviour by issuing several outgoing requests in parallel.
+Start out by using a standard HTTP/1.1 connection pool:
+import httpcore
+import concurrent.futures
+import time
+
+
+def download(http, year):
+ http.request("GET", f"https://en.wikipedia.org/wiki/{year}")
+
+
+def main():
+ with httpcore.ConnectionPool() as http:
+ started = time.time()
+ with concurrent.futures.ThreadPoolExecutor(max_workers=10) as threads:
+ for year in range(2000, 2020):
+ threads.submit(download, http, year)
+ complete = time.time()
+
+ for connection in http.connections:
+ print(connection)
+ print("Complete in %.3f seconds" % (complete - started))
+
+
+main()
+
If you run this with an HTTP/1.1 connection pool, you ought to see output similar to the following:
+<HTTPConnection ['https://en.wikipedia.org:443', HTTP/1.1, IDLE, Request Count: 2]>,
+<HTTPConnection ['https://en.wikipedia.org:443', HTTP/1.1, IDLE, Request Count: 3]>,
+<HTTPConnection ['https://en.wikipedia.org:443', HTTP/1.1, IDLE, Request Count: 6]>,
+<HTTPConnection ['https://en.wikipedia.org:443', HTTP/1.1, IDLE, Request Count: 5]>,
+<HTTPConnection ['https://en.wikipedia.org:443', HTTP/1.1, IDLE, Request Count: 1]>,
+<HTTPConnection ['https://en.wikipedia.org:443', HTTP/1.1, IDLE, Request Count: 1]>,
+<HTTPConnection ['https://en.wikipedia.org:443', HTTP/1.1, IDLE, Request Count: 1]>,
+<HTTPConnection ['https://en.wikipedia.org:443', HTTP/1.1, IDLE, Request Count: 1]>
+Complete in 0.586 seconds
+
We can see that the connection pool required a number of connections in order to handle the parallel requests.
+If we now upgrade our connection pool to support HTTP/2:
+with httpcore.ConnectionPool(http2=True) as http:
+ ...
+
And run the same script again, we should end up with something like this:
+<HTTPConnection ['https://en.wikipedia.org:443', HTTP/2, IDLE, Request Count: 20]>
+Complete in 0.573 seconds
+
All of our requests have been handled over a single connection.
+Switching to HTTP/2 should not necessarily be considered an "upgrade". It is more complex, and requires more computational power, and so particularly in an interpreted language like Python it could be slower in some instances. Moreover, utilising multiple connections may end up connecting to multiple hosts, and could sometimes appear faster to the client, at the cost of requiring more server resources. Enabling HTTP/2 is most likely to be beneficial if you are sending requests in high concurrency, and may often be more well suited to an async context, rather than multi-threading.
+Enabling HTTP/2 support on the client does not necessarily mean that your requests and responses will be transported over HTTP/2, since both the client and the server need to support HTTP/2. If you connect to a server that only supports HTTP/1.1 the client will use a standard HTTP/1.1 connection instead.
+You can determine which version of the HTTP protocol was used by examining the "http_version"
response extension.
import httpcore
+
+pool = httpcore.ConnectionPool(http2=True)
+response = pool.request("GET", "https://www.example.com/")
+
+# Should be one of b"HTTP/2", b"HTTP/1.1", b"HTTP/1.0", or b"HTTP/0.9".
+print(response.extensions["http_version"])
+
See the extensions documentation for more details.
+Robust servers need to support both HTTP/2 and HTTP/1.1 capable clients, and so need some way to "negotiate" with the client which protocol version will be used.
+Generally the method used is for the server to advertise if it has HTTP/2 support during the part of the SSL connection handshake. This is known as ALPN - "Application Layer Protocol Negotiation".
+Most browsers only provide HTTP/2 support over HTTPS connections, and this is also the default behaviour that httpcore
provides. If you enable HTTP/2 support you should still expect to see HTTP/1.1 connections for any http://
URLs.
Servers can optionally also support HTTP/2 over HTTP by supporting the Upgrade: h2c
header.
This mechanism is not supported by httpcore
. It requires an additional round-trip between the client and server, and also requires any request body to be sent twice.
If you know in advance that the server you are communicating with will support HTTP/2, then you can enforce that the client uses HTTP/2, without requiring either ALPN support or an HTTP Upgrade: h2c
header.
This is managed by disabling HTTP/1.1 support on the connection pool:
+pool = httpcore.ConnectionPool(http1=False, http2=True)
+
Because HTTP/2 frames the requests and responses somewhat differently to HTTP/1.1, there is a difference in some of the headers that are used.
+In order for the httpcore
library to support both HTTP/1.1 and HTTP/2 transparently, the HTTP/1.1 style is always used throughout the API. Any differences in header styles are only mapped onto HTTP/2 at the internal network layer.
The following pseudo-headers are used by HTTP/2 in the request:
+:method
- The request method.:path
- Taken from the URL of the request.:authority
- Equivalent to the Host
header in HTTP/1.1. In httpcore
this is represented using the request Host
header, which is automatically populated from the request URL if no Host
header is explicitly included.:scheme
- Taken from the URL of the request.These pseudo-headers are included in httpcore
as part of the request.method
and request.url
attributes, and through the request.headers["Host"]
header. They are not exposed directly by their psuedo-header names.
The one other difference to be aware of is the Transfer-Encoding: chunked
header.
In HTTP/2 this header is never used, since streaming data is framed using a different mechanism.
+In httpcore
the Transfer-Encoding: chunked
header is always used to represent the presence of a streaming body on the request, and is automatically populated if required. However the header is only sent if the underlying connection ends up being HTTP/1.1, and is omitted if the underlying connection ends up being HTTP/2.
The following pseudo-header is used by HTTP/2 in the response:
+:status
- The response status code.In httpcore
this is represented by the response.status
attribute, rather than being exposed as a psuedo-header.
++Do one thing, and do it well.
+
The HTTP Core package provides a minimal low-level HTTP client, which does +one thing only. Sending HTTP requests.
+It does not provide any high level model abstractions over the API, +does not handle redirects, multipart uploads, building authentication headers, +transparent HTTP caching, URL parsing, session cookie handling, +content or charset decoding, handling JSON, environment based configuration +defaults, or any of that Jazz.
+Some things HTTP Core does do:
+asyncio
and trio
.Python 3.8+
+For HTTP/1.1 only support, install with:
+$ pip install httpcore
+
For HTTP/1.1 and HTTP/2 support, install with:
+$ pip install httpcore[http2]
+
For SOCKS proxy support, install with:
+$ pip install httpcore[socks]
+
Let's check we're able to send HTTP requests:
+import httpcore
+
+response = httpcore.request("GET", "https://www.example.com/")
+
+print(response)
+# <Response [200]>
+print(response.status)
+# 200
+print(response.headers)
+# [(b'Accept-Ranges', b'bytes'), (b'Age', b'557328'), (b'Cache-Control', b'max-age=604800'), ...]
+print(response.content)
+# b'<!doctype html>\n<html>\n<head>\n<title>Example Domain</title>\n\n<meta charset="utf-8"/>\n ...'
+
Ready to get going?
+Head over to the quickstart documentation.
+ + + + + + +If you need to inspect the internal behaviour of httpcore
, you can use Python's standard logging to output debug level information.
For example, the following configuration...
+import logging
+import httpcore
+
+logging.basicConfig(
+ format="%(levelname)s [%(asctime)s] %(name)s - %(message)s",
+ datefmt="%Y-%m-%d %H:%M:%S",
+ level=logging.DEBUG
+)
+
+httpcore.request('GET', 'https://www.example.com')
+
Will send debug level output to the console, or wherever stdout
is directed too...
DEBUG [2023-01-09 14:44:00] httpcore.connection - connect_tcp.started host='www.example.com' port=443 local_address=None timeout=None
+DEBUG [2023-01-09 14:44:00] httpcore.connection - connect_tcp.complete return_value=<httpcore.backends.sync.SyncStream object at 0x109ba6610>
+DEBUG [2023-01-09 14:44:00] httpcore.connection - start_tls.started ssl_context=<ssl.SSLContext object at 0x109e427b0> server_hostname='www.example.com' timeout=None
+DEBUG [2023-01-09 14:44:00] httpcore.connection - start_tls.complete return_value=<httpcore.backends.sync.SyncStream object at 0x109e8b050>
+DEBUG [2023-01-09 14:44:00] httpcore.http11 - send_request_headers.started request=<Request [b'GET']>
+DEBUG [2023-01-09 14:44:00] httpcore.http11 - send_request_headers.complete
+DEBUG [2023-01-09 14:44:00] httpcore.http11 - send_request_body.started request=<Request [b'GET']>
+DEBUG [2023-01-09 14:44:00] httpcore.http11 - send_request_body.complete
+DEBUG [2023-01-09 14:44:00] httpcore.http11 - receive_response_headers.started request=<Request [b'GET']>
+DEBUG [2023-01-09 14:44:00] httpcore.http11 - receive_response_headers.complete return_value=(b'HTTP/1.1', 200, b'OK', [(b'Age', b'572646'), (b'Cache-Control', b'max-age=604800'), (b'Content-Type', b'text/html; charset=UTF-8'), (b'Date', b'Mon, 09 Jan 2023 14:44:00 GMT'), (b'Etag', b'"3147526947+ident"'), (b'Expires', b'Mon, 16 Jan 2023 14:44:00 GMT'), (b'Last-Modified', b'Thu, 17 Oct 2019 07:18:26 GMT'), (b'Server', b'ECS (nyb/1D18)'), (b'Vary', b'Accept-Encoding'), (b'X-Cache', b'HIT'), (b'Content-Length', b'1256')])
+DEBUG [2023-01-09 14:44:00] httpcore.http11 - receive_response_body.started request=<Request [b'GET']>
+DEBUG [2023-01-09 14:44:00] httpcore.http11 - receive_response_body.complete
+DEBUG [2023-01-09 14:44:00] httpcore.http11 - response_closed.started
+DEBUG [2023-01-09 14:44:00] httpcore.http11 - response_closed.complete
+DEBUG [2023-01-09 14:44:00] httpcore.connection - close.started
+DEBUG [2023-01-09 14:44:00] httpcore.connection - close.complete
+
The exact formatting of the debug logging may be subject to change across different versions of httpcore
. If you need to rely on a particular format it is recommended that you pin installation of the package to a fixed version.
The API layer at which httpcore
interacts with the network is described as the network backend. Various backend implementations are provided, allowing httpcore
to handle networking in different runtime contexts.
Typically you won't need to specify a network backend, as a default will automatically be selected. However, understanding how the network backends fit in may be useful if you want to better understand the underlying architecture. Let's start by seeing how we can explicitly select the network backend.
+First we're making a standard HTTP request, using a connection pool:
+import httpcore
+
+with httpcore.ConnectionPool() as http:
+ response = http.request('GET', 'https://www.example.com')
+ print(response)
+
We can also have the same behavior, but be explicit with our selection of the network backend:
+import httpcore
+
+network_backend = httpcore.SyncBackend()
+with httpcore.ConnectionPool(network_backend=network_backend) as http:
+ response = http.request('GET', 'https://www.example.com')
+ print(response)
+
The httpcore.SyncBackend()
implementation handles the opening of TCP connections, and operations on the socket stream, such as reading, writing, and closing the connection.
We can get a better understanding of this by using a network backend to send a basic HTTP/1.1 request directly:
+import httpcore
+
+# Create an SSL context using 'certifi' for the certificates.
+ssl_context = httpcore.default_ssl_context()
+
+# A basic HTTP/1.1 request as a plain bytestring.
+request = b'\r\n'.join([
+ b'GET / HTTP/1.1',
+ b'Host: www.example.com',
+ b'Accept: */*',
+ b'Connection: close',
+ b''
+])
+
+# Open a TCP stream and upgrade it to SSL.
+network_backend = httpcore.SyncBackend()
+network_stream = network_backend.connect_tcp("www.example.com", 443)
+network_stream = network_stream.start_tls(ssl_context, server_hostname="www.example.com")
+
+# Send the HTTP request.
+network_stream.write(request)
+
+# Read the HTTP response.
+while True:
+ response = network_stream.read(max_bytes=4096)
+ if response == b'':
+ break
+ print(response)
+
+# The output should look something like this:
+#
+# b'HTTP/1.1 200 OK\r\nAge: 600005\r\n [...] Content-Length: 1256\r\nConnection: close\r\n\r\n'
+# b'<!doctype html>\n<html>\n<head>\n <title>Example Domain</title> [...] </html>\n'
+
If we're working with an async
codebase, then we need to select a different backend.
The httpcore.AnyIOBackend
is suitable for usage if you're running under asyncio
. This is a networking backend implemented using the anyio
package.
import httpcore
+import asyncio
+
+async def main():
+ network_backend = httpcore.AnyIOBackend()
+ async with httpcore.AsyncConnectionPool(network_backend=network_backend) as http:
+ response = await http.request('GET', 'https://www.example.com')
+ print(response)
+
+asyncio.run(main())
+
The AnyIOBackend
will work when running under either asyncio
or trio
. However, if you're working with async using the trio
framework, then we recommend using the httpcore.TrioBackend
.
This will give you the same kind of networking behavior you'd have using AnyIOBackend
, but there will be a little less indirection so it will be marginally more efficient and will present cleaner tracebacks in error cases.
import httpcore
+import trio
+
+async def main():
+ network_backend = httpcore.TrioBackend()
+ async with httpcore.AsyncConnectionPool(network_backend=network_backend) as http:
+ response = await http.request('GET', 'https://www.example.com')
+ print(response)
+
+trio.run(main)
+
There are also mock network backends available that can be useful for testing purposes. +These backends accept a list of bytes, and return network stream interfaces that return those byte streams.
+Here's an example of mocking a simple HTTP/1.1 response...
+import httpcore
+
+network_backend = httpcore.MockBackend([
+ b"HTTP/1.1 200 OK\r\n",
+ b"Content-Type: plain/text\r\n",
+ b"Content-Length: 13\r\n",
+ b"\r\n",
+ b"Hello, world!",
+])
+with httpcore.ConnectionPool(network_backend=network_backend) as http:
+ response = http.request("GET", "https://example.com/")
+ print(response.extensions['http_version'])
+ print(response.status)
+ print(response.content)
+
Mocking a HTTP/2 response is more complex, since it uses a binary format...
+import hpack
+import hyperframe.frame
+import httpcore
+
+content = [
+ hyperframe.frame.SettingsFrame().serialize(),
+ hyperframe.frame.HeadersFrame(
+ stream_id=1,
+ data=hpack.Encoder().encode(
+ [
+ (b":status", b"200"),
+ (b"content-type", b"plain/text"),
+ ]
+ ),
+ flags=["END_HEADERS"],
+ ).serialize(),
+ hyperframe.frame.DataFrame(
+ stream_id=1, data=b"Hello, world!", flags=["END_STREAM"]
+ ).serialize(),
+]
+# Note that we instantiate the mock backend with an `http2=True` argument.
+# This ensures that the mock network stream acts as if the `h2` ALPN flag has been set,
+# and causes the connection pool to interact with the connection using HTTP/2.
+network_backend = httpcore.MockBackend(content, http2=True)
+with httpcore.ConnectionPool(network_backend=network_backend) as http:
+ response = http.request("GET", "https://example.com/")
+ print(response.extensions['http_version'])
+ print(response.status)
+ print(response.content)
+
The base interface for network backends is provided as public API, allowing you to implement custom networking behavior.
+You can use this to provide advanced networking functionality such as:
+Here's an example that records the network response to a file on disk:
+import httpcore
+
+
+class RecordingNetworkStream(httpcore.NetworkStream):
+ def __init__(self, record_file, stream):
+ self.record_file = record_file
+ self.stream = stream
+
+ def read(self, max_bytes, timeout=None):
+ data = self.stream.read(max_bytes, timeout=timeout)
+ self.record_file.write(data)
+ return data
+
+ def write(self, buffer, timeout=None):
+ self.stream.write(buffer, timeout=timeout)
+
+ def close(self) -> None:
+ self.stream.close()
+
+ def start_tls(
+ self,
+ ssl_context,
+ server_hostname=None,
+ timeout=None,
+ ):
+ self.stream = self.stream.start_tls(
+ ssl_context, server_hostname=server_hostname, timeout=timeout
+ )
+ return self
+
+ def get_extra_info(self, info):
+ return self.stream.get_extra_info(info)
+
+
+class RecordingNetworkBackend(httpcore.NetworkBackend):
+ """
+ A custom network backend that records network responses.
+ """
+ def __init__(self, record_file):
+ self.record_file = record_file
+ self.backend = httpcore.SyncBackend()
+
+ def connect_tcp(
+ self,
+ host,
+ port,
+ timeout=None,
+ local_address=None,
+ socket_options=None,
+ ):
+ # Note that we're only using a single record file here,
+ # so even if multiple connections are opened the network
+ # traffic will all write to the same file.
+
+ # An alternative implementation might automatically use
+ # a new file for each opened connection.
+ stream = self.backend.connect_tcp(
+ host,
+ port,
+ timeout=timeout,
+ local_address=local_address,
+ socket_options=socket_options
+ )
+ return RecordingNetworkStream(self.record_file, stream)
+
+
+# Once you make the request, the raw HTTP/1.1 response will be available
+# in the 'network-recording' file.
+#
+# Try switching to `http2=True` to see the difference when recording HTTP/2 binary network traffic,
+# or add `headers={'Accept-Encoding': 'gzip'}` to see HTTP content compression.
+with open("network-recording", "wb") as record_file:
+ network_backend = RecordingNetworkBackend(record_file)
+ with httpcore.ConnectionPool(network_backend=network_backend) as http:
+ response = http.request("GET", "https://www.example.com/")
+ print(response)
+
httpcore.SyncBackend
httpcore.AnyIOBackend
httpcore.TrioBackend
httpcore.MockBackend
httpcore.MockStream
httpcore.AsyncMockBackend
httpcore.AsyncMockStream
httpcore.NetworkBackend
httpcore.NetworkStream
httpcore.AsyncNetworkBackend
httpcore.AsyncNetworkStream
The httpcore
package provides support for HTTP proxies, using either "HTTP Forwarding" or "HTTP Tunnelling". Forwarding is a proxy mechanism for sending requests to http
URLs via an intermediate proxy. Tunnelling is a proxy mechanism for sending requests to https
URLs via an intermediate proxy.
Sending requests via a proxy is very similar to sending requests using a standard connection pool:
+import httpcore
+
+proxy = httpcore.HTTPProxy(proxy_url="http://127.0.0.1:8080/")
+r = proxy.request("GET", "https://www.example.com/")
+
+print(r)
+# <Response [200]>
+
You can test the httpcore
proxy support, using the Python proxy.py
tool:
$ pip install proxy.py
+$ proxy --hostname 127.0.0.1 --port 8080
+
Requests will automatically use either forwarding or tunnelling, depending on if the scheme is http
or https
.
Proxy authentication can be included in the initial configuration:
+import httpcore
+
+# A `Proxy-Authorization` header will be included on the initial proxy connection.
+proxy = httpcore.HTTPProxy(
+ proxy_url="http://127.0.0.1:8080/",
+ proxy_auth=("<username>", "<password>")
+)
+
Custom headers can also be included:
+import httpcore
+import base64
+
+# Construct and include a `Proxy-Authorization` header.
+auth = base64.b64encode(b"<username>:<password>")
+proxy = httpcore.HTTPProxy(
+ proxy_url="http://127.0.0.1:8080/",
+ proxy_headers={"Proxy-Authorization": b"Basic " + auth}
+)
+
The httpcore
package also supports HTTPS proxies for http and https destinations.
HTTPS proxies can be used in the same way that HTTP proxies are.
+proxy = httpcore.HTTPProxy(proxy_url="https://127.0.0.1:8080/")
+
Also, when using HTTPS proxies, you may need to configure the SSL context, which you can do with the proxy_ssl_context
argument.
import ssl
+import httpcore
+
+proxy_ssl_context = ssl.create_default_context()
+proxy_ssl_context.check_hostname = False
+
+proxy = httpcore.HTTPProxy('https://127.0.0.1:8080/', proxy_ssl_context=proxy_ssl_context)
+
It is important to note that the ssl_context
argument is always used for the remote connection, and the proxy_ssl_context
argument is always used for the proxy connection.
If you use proxies, keep in mind that the httpcore
package only supports proxies to HTTP/1.1 servers.
The httpcore
package also supports proxies using the SOCKS5 protocol.
Make sure to install the optional dependancy using pip install httpcore[socks]
.
The SOCKSProxy
class should be using instead of a standard connection pool:
import httpcore
+
+# Note that the SOCKS port is 1080.
+proxy = httpcore.SOCKSProxy(proxy_url="socks5://127.0.0.1:1080/")
+r = proxy.request("GET", "https://www.example.com/")
+
Authentication via SOCKS is also supported:
+import httpcore
+
+proxy = httpcore.SOCKSProxy(
+ proxy_url="socks5://127.0.0.1:8080/",
+ proxy_auth=("<username>", "<password>")
+)
+r = proxy.request("GET", "https://www.example.com/")
+
httpcore.HTTPProxy
A connection pool that sends requests via an HTTP proxy.
+ + + + +__init__(self, proxy_url, proxy_auth=None, proxy_headers=None, ssl_context=None, proxy_ssl_context=None, max_connections=10, max_keepalive_connections=None, keepalive_expiry=None, http1=True, http2=False, retries=0, local_address=None, uds=None, network_backend=None, socket_options=None)
+
+
+ special
+
+
+A connection pool for making HTTP requests.
+ +Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
proxy_url |
+ Union[httpcore.URL, bytes, str] |
+ The URL to use when connecting to the proxy server.
+For example |
+ required | +
proxy_auth |
+ Optional[Tuple[Union[bytes, str], Union[bytes, str]]] |
+ Any proxy authentication as a two-tuple of +(username, password). May be either bytes or ascii-only str. |
+ None |
+
proxy_headers |
+ Union[Mapping[Union[bytes, str], Union[bytes, str]], Sequence[Tuple[Union[bytes, str], Union[bytes, str]]]] |
+ Any HTTP headers to use for the proxy requests.
+For example |
+ None |
+
ssl_context |
+ Optional[ssl.SSLContext] |
+ An SSL context to use for verifying connections.
+If not specified, the default |
+ None |
+
proxy_ssl_context |
+ Optional[ssl.SSLContext] |
+ The same as |
+ None |
+
max_connections |
+ Optional[int] |
+ The maximum number of concurrent HTTP connections that +the pool should allow. Any attempt to send a request on a pool that +would exceed this amount will block until a connection is available. |
+ 10 |
+
max_keepalive_connections |
+ Optional[int] |
+ The maximum number of idle HTTP connections +that will be maintained in the pool. |
+ None |
+
keepalive_expiry |
+ Optional[float] |
+ The duration in seconds that an idle HTTP connection +may be maintained for before being expired from the pool. |
+ None |
+
http1 |
+ bool |
+ A boolean indicating if HTTP/1.1 requests should be supported +by the connection pool. Defaults to True. |
+ True |
+
http2 |
+ bool |
+ A boolean indicating if HTTP/2 requests should be supported by +the connection pool. Defaults to False. |
+ False |
+
retries |
+ int |
+ The maximum number of retries when trying to establish +a connection. |
+ 0 |
+
local_address |
+ Optional[str] |
+ Local address to connect from. Can also be used to
+connect using a particular address family. Using
+ |
+ None |
+
uds |
+ Optional[str] |
+ Path to a Unix Domain Socket to use instead of TCP sockets. |
+ None |
+
network_backend |
+ Optional[httpcore.NetworkBackend] |
+ A backend instance to use for handling network I/O. |
+ None |
+
For convenience, the httpcore
package provides a couple of top-level functions that you can use for sending HTTP requests. You probably don't want to integrate against functions if you're writing a library that uses httpcore
, but you might find them useful for testing httpcore
from the command-line, or if you're writing a simple script that doesn't require any of the connection pooling or advanced configuration that httpcore
offers.
We'll start off by sending a request...
+import httpcore
+
+response = httpcore.request("GET", "https://www.example.com/")
+
+print(response)
+# <Response [200]>
+print(response.status)
+# 200
+print(response.headers)
+# [(b'Accept-Ranges', b'bytes'), (b'Age', b'557328'), (b'Cache-Control', b'max-age=604800'), ...]
+print(response.content)
+# b'<!doctype html>\n<html>\n<head>\n<title>Example Domain</title>\n\n<meta charset="utf-8"/>\n ...'
+
Request headers may be included either in a dictionary style, or as a list of two-tuples.
+import httpcore
+import json
+
+headers = {'User-Agent': 'httpcore'}
+r = httpcore.request('GET', 'https://httpbin.org/headers', headers=headers)
+
+print(json.loads(r.content))
+# {
+# 'headers': {
+# 'Host': 'httpbin.org',
+# 'User-Agent': 'httpcore',
+# 'X-Amzn-Trace-Id': 'Root=1-616ff5de-5ea1b7e12766f1cf3b8e3a33'
+# }
+# }
+
The keys and values may either be provided as strings or as bytes. Where strings are provided they may only contain characters within the ASCII range chr(0)
- chr(127)
. To include characters outside this range you must deal with any character encoding explicitly, and pass bytes as the header keys/values.
The Host
header will always be automatically included in any outgoing request, as it is strictly required to be present by the HTTP protocol.
Note that the X-Amzn-Trace-Id
header shown in the example above is not an outgoing request header, but has been added by a gateway server.
A request body can be included either as bytes...
+import httpcore
+import json
+
+r = httpcore.request('POST', 'https://httpbin.org/post', content=b'Hello, world')
+
+print(json.loads(r.content))
+# {
+# 'args': {},
+# 'data': 'Hello, world',
+# 'files': {},
+# 'form': {},
+# 'headers': {
+# 'Host': 'httpbin.org',
+# 'Content-Length': '12',
+# 'X-Amzn-Trace-Id': 'Root=1-61700258-00e338a124ca55854bf8435f'
+# },
+# 'json': None,
+# 'origin': '68.41.35.196',
+# 'url': 'https://httpbin.org/post'
+# }
+
Or as an iterable that returns bytes...
+import httpcore
+import json
+
+with open("hello-world.txt", "rb") as input_file:
+ r = httpcore.request('POST', 'https://httpbin.org/post', content=input_file)
+
+print(json.loads(r.content))
+# {
+# 'args': {},
+# 'data': 'Hello, world',
+# 'files': {},
+# 'form': {},
+# 'headers': {
+# 'Host': 'httpbin.org',
+# 'Transfer-Encoding': 'chunked',
+# 'X-Amzn-Trace-Id': 'Root=1-61700258-00e338a124ca55854bf8435f'
+# },
+# 'json': None,
+# 'origin': '68.41.35.196',
+# 'url': 'https://httpbin.org/post'
+# }
+
When a request body is included, either a Content-Length
header or a Transfer-Encoding: chunked
header will be automatically included.
The Content-Length
header is used when passing bytes, and indicates an HTTP request with a body of a pre-determined length.
The Transfer-Encoding: chunked
header is the mechanism that HTTP/1.1 uses for sending HTTP request bodies without a pre-determined length.
When using the httpcore.request()
function, the response body will automatically be read to completion, and made available in the response.content
attribute.
Sometimes you may be dealing with large responses and not want to read the entire response into memory. The httpcore.stream()
function provides a mechanism for sending a request and dealing with a streaming response:
import httpcore
+
+with httpcore.stream('GET', 'https://example.com') as response:
+ for chunk in response.iter_stream():
+ print(f"Downloaded: {chunk}")
+
Here's a more complete example that demonstrates downloading a response:
+import httpcore
+
+with httpcore.stream('GET', 'https://speed.hetzner.de/100MB.bin') as response:
+ with open("download.bin", "wb") as output_file:
+ for chunk in response.iter_stream():
+ output_file.write(chunk)
+
The httpcore.stream()
API also allows you to conditionally read the response...
import httpcore
+
+with httpcore.stream('GET', 'https://example.com') as response:
+ content_length = [int(v) for k, v in response.headers if k.lower() == b'content-length'][0]
+ if content_length > 100_000_000:
+ raise Exception("Response too large.")
+ response.read() # `response.content` is now available.
+
httpcore.request()
Sends an HTTP request, returning the response.
+response = httpcore.request("GET", "https://www.example.com/")
+
Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
method |
+ Union[bytes, str] |
+ The HTTP method for the request. Typically one of |
+ required | +
url |
+ Union[httpcore.URL, bytes, str] |
+ The URL of the HTTP request. Either as an instance of |
+ required | +
headers |
+ Union[Sequence[Tuple[Union[bytes, str], Union[bytes, str]]], Mapping[Union[bytes, str], Union[bytes, str]]] |
+ The HTTP request headers. Either as a dictionary of str/bytes, +or as a list of two-tuples of str/bytes. |
+ None |
+
content |
+ Union[bytes, Iterator[bytes]] |
+ The content of the request body. Either as bytes, +or as a bytes iterator. |
+ None |
+
extensions |
+ Optional[MutableMapping[str, Any]] |
+ A dictionary of optional extra information included on the request.
+Possible keys include |
+ None |
+
Returns:
+Type | +Description | +
---|---|
Response |
+ An instance of |
+
httpcore.stream()
Sends an HTTP request, returning the response within a content manager.
+with httpcore.stream("GET", "https://www.example.com/") as response:
+ ...
+
When using the stream()
function, the body of the response will not be
+automatically read. If you want to access the response body you should
+either use content = response.read()
, or for chunk in response.iter_content()
.
Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
method |
+ Union[bytes, str] |
+ The HTTP method for the request. Typically one of |
+ required | +
url |
+ Union[httpcore.URL, bytes, str] |
+ The URL of the HTTP request. Either as an instance of |
+ required | +
headers |
+ Union[Sequence[Tuple[Union[bytes, str], Union[bytes, str]]], Mapping[Union[bytes, str], Union[bytes, str]]] |
+ The HTTP request headers. Either as a dictionary of str/bytes, +or as a list of two-tuples of str/bytes. |
+ None |
+
content |
+ Union[bytes, Iterator[bytes]] |
+ The content of the request body. Either as bytes, +or as a bytes iterator. |
+ None |
+
extensions |
+ Optional[MutableMapping[str, Any]] |
+ A dictionary of optional extra information included on the request.
+Possible keys include |
+ None |
+
Returns:
+Type | +Description | +
---|---|
Iterator[httpcore.Response] |
+ An instance of |
+
TODO
+Request instances in httpcore
are deliberately simple, and only include the essential information required to represent an HTTP request.
Properties on the request are plain byte-wise representations.
+>>> request = httpcore.Request("GET", "https://www.example.com/")
+>>> request.method
+b"GET"
+>>> request.url
+httpcore.URL(scheme=b"https", host=b"www.example.com", port=None, target=b"/")
+>>> request.headers
+[(b'Host', b'www.example.com')]
+>>> request.stream
+<httpcore.ByteStream [0 bytes]>
+
The interface is liberal in the types that it accepts, but specific in the properties that it uses to represent them. For example, headers may be specified as a dictionary of strings, but internally are represented as a list of (byte, byte)
tuples.
```python
+++++++headers = {"User-Agent": "custom"} +request = httpcore.Request("GET", "https://www.example.com/", headers=headers) +request.headers +[(b'Host', b'www.example.com'), (b"User-Agent", b"custom")]
+
...
+...
+httpcore.Request
An HTTP request.
+ + + + +__init__(self, method, url, *, headers=None, content=None, extensions=None)
+
+
+ special
+
+
+Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
method |
+ Union[bytes, str] |
+ The HTTP request method, either as a string or bytes.
+For example: |
+ required | +
url |
+ Union[httpcore.URL, bytes, str] |
+ The request URL, either as a |
+ required | +
headers |
+ Union[Sequence[Tuple[Union[bytes, str], Union[bytes, str]]], Mapping[Union[bytes, str], Union[bytes, str]]] |
+ The HTTP request headers. |
+ None |
+
content |
+ Union[bytes, Iterable[bytes], AsyncIterable[bytes]] |
+ The content of the response body. |
+ None |
+
extensions |
+ Optional[MutableMapping[str, Any]] |
+ A dictionary of optional extra information included on
+the request. Possible keys include |
+ None |
+
httpcore.Response
An HTTP response.
+ + + + +__init__(self, status, *, headers=None, content=None, extensions=None)
+
+
+ special
+
+
+Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
status |
+ int |
+ The HTTP status code of the response. For example |
+ required | +
headers |
+ Union[Sequence[Tuple[Union[bytes, str], Union[bytes, str]]], Mapping[Union[bytes, str], Union[bytes, str]]] |
+ The HTTP response headers. |
+ None |
+
content |
+ Union[bytes, Iterable[bytes], AsyncIterable[bytes]] |
+ The content of the response body. |
+ None |
+
extensions |
+ Optional[MutableMapping[str, Any]] |
+ A dictionary of optional extra information included on
+the responseself.Possible keys include |
+ None |
+
httpcore.URL
Represents the URL against which an HTTP request may be made.
+The URL may either be specified as a plain string, for convienence:
+url = httpcore.URL("https://www.example.com/")
+
Or be constructed with explicitily pre-parsed components:
+url = httpcore.URL(scheme=b'https', host=b'www.example.com', port=None, target=b'/')
+
Using this second more explicit style allows integrations that are using
+httpcore
to pass through URLs that have already been parsed in order to use
+libraries such as rfc-3986
rather than relying on the stdlib. It also ensures
+that URL parsing is treated identically at both the networking level and at any
+higher layers of abstraction.
The four components are important here, as they allow the URL to be precisely +specified in a pre-parsed format. They also allow certain types of request to +be created that could not otherwise be expressed.
+For example, an HTTP request to http://www.example.com/
forwarded via a proxy
+at http://localhost:8080
...
# Constructs an HTTP request with a complete URL as the target:
+# GET https://www.example.com/ HTTP/1.1
+url = httpcore.URL(
+ scheme=b'http',
+ host=b'localhost',
+ port=8080,
+ target=b'https://www.example.com/'
+)
+request = httpcore.Request(
+ method="GET",
+ url=url
+)
+
Another example is constructing an OPTIONS *
request...
# Constructs an 'OPTIONS *' HTTP request:
+# OPTIONS * HTTP/1.1
+url = httpcore.URL(scheme=b'https', host=b'www.example.com', target=b'*')
+request = httpcore.Request(method="OPTIONS", url=url)
+
This kind of request is not possible to formulate with a URL string,
+because the /
delimiter is always used to demark the target from the
+host/port portion of the URL.
For convenience, string-like arguments may be specified either as strings or +as bytes. However, once a request is being issue over-the-wire, the URL +components are always ultimately required to be a bytewise representation.
+In order to avoid any ambiguity over character encodings, when strings are used
+as arguments, they must be strictly limited to the ASCII range chr(0)
-chr(127)
.
+If you require a bytewise representation that is outside this range you must
+handle the character encoding directly, and pass a bytes instance.
__init__(self, url='', *, scheme=b'', host=b'', port=None, target=b'')
+
+
+ special
+
+
+Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
url |
+ Union[bytes, str] |
+ The complete URL as a string or bytes. |
+ '' |
+
scheme |
+ Union[bytes, str] |
+ The URL scheme as a string or bytes.
+Typically either |
+ b'' |
+
host |
+ Union[bytes, str] |
+ The URL host as a string or bytes. Such as |
+ b'' |
+
port |
+ Optional[int] |
+ The port to connect to. Either an integer or |
+ None |
+
target |
+ Union[bytes, str] |
+ The target of the HTTP request. Such as |
+ b'' |
+
Do one thing, and do it well.
The HTTP Core package provides a minimal low-level HTTP client, which does one thing only. Sending HTTP requests.
It does not provide any high level model abstractions over the API, does not handle redirects, multipart uploads, building authentication headers, transparent HTTP caching, URL parsing, session cookie handling, content or charset decoding, handling JSON, environment based configuration defaults, or any of that Jazz.
Some things HTTP Core does do:
asyncio
and trio
.Python 3.8+
"},{"location":"#installation","title":"Installation","text":"For HTTP/1.1 only support, install with:
$ pip install httpcore\n
For HTTP/1.1 and HTTP/2 support, install with:
$ pip install httpcore[http2]\n
For SOCKS proxy support, install with:
$ pip install httpcore[socks]\n
"},{"location":"#example","title":"Example","text":"Let's check we're able to send HTTP requests:
import httpcore\n\nresponse = httpcore.request(\"GET\", \"https://www.example.com/\")\n\nprint(response)\n# <Response [200]>\nprint(response.status)\n# 200\nprint(response.headers)\n# [(b'Accept-Ranges', b'bytes'), (b'Age', b'557328'), (b'Cache-Control', b'max-age=604800'), ...]\nprint(response.content)\n# b'<!doctype html>\\n<html>\\n<head>\\n<title>Example Domain</title>\\n\\n<meta charset=\"utf-8\"/>\\n ...'\n
Ready to get going?
Head over to the quickstart documentation.
"},{"location":"async/","title":"Async Support","text":"HTTPX offers a standard synchronous API by default, but also gives you the option of an async client if you need it.
Async is a concurrency model that is far more efficient than multi-threading, and can provide significant performance benefits and enable the use of long-lived network connections such as WebSockets.
If you're working with an async web framework then you'll also want to use an async client for sending outgoing HTTP requests.
Launching concurrent async tasks is far more resource efficient than spawning multiple threads. The Python interpreter should be able to comfortably handle switching between over 1000 concurrent tasks, while a sensible number of threads in a thread pool might be to enable around 10 or 20 concurrent threads.
"},{"location":"async/#api-differences","title":"API differences","text":"When using async support, you need make sure to use an async connection pool class:
# The async variation of `httpcore.ConnectionPool`\nasync with httpcore.AsyncConnectionPool() as http:\n ...\n
Or if connecting via a proxy:
# The async variation of `httpcore.HTTPProxy`\nasync with httpcore.AsyncHTTPProxy() as proxy:\n ...\n
"},{"location":"async/#sending-requests","title":"Sending requests","text":"Sending requests with the async version of httpcore
requires the await
keyword:
import asyncio\nimport httpcore\n\nasync def main():\n async with httpcore.AsyncConnectionPool() as http:\n response = await http.request(\"GET\", \"https://www.example.com/\")\n\n\nasyncio.run(main())\n
When including content in the request, the content must either be bytes or an async iterable yielding bytes.
"},{"location":"async/#streaming-responses","title":"Streaming responses","text":"Streaming responses also require a slightly different interface to the sync version:
with <pool>.stream(...) as response
\u2192 async with <pool>.stream() as response
.for chunk in response.iter_stream()
\u2192 async for chunk in response.aiter_stream()
.response.read()
\u2192 await response.aread()
.response.close()
\u2192 await response.aclose()
For example:
import asyncio\nimport httpcore\n\n\nasync def main():\n async with httpcore.AsyncConnectionPool() as http:\n async with http.stream(\"GET\", \"https://www.example.com/\") as response:\n async for chunk in response.aiter_stream():\n print(f\"Downloaded: {chunk}\")\n\n\nasyncio.run(main())\n
"},{"location":"async/#pool-lifespans","title":"Pool lifespans","text":"When using httpcore
in an async environment it is strongly recommended that you instantiate and use connection pools using the context managed style:
async with httpcore.AsyncConnectionPool() as http:\n ...\n
To benefit from connection pooling it is recommended that you instantiate a single connection pool in this style, and pass it around throughout your application.
If you do want to use a connection pool without this style then you'll need to ensure that you explicitly close the pool once it is no longer required:
try:\n http = httpcore.AsyncConnectionPool()\n ...\nfinally:\n await http.aclose()\n
This is a little different to the threaded context, where it's okay to simply instantiate a globally available connection pool, and then allow Python's garbage collection to deal with closing any connections in the pool, once the __del__
method is called.
The reason for this difference is that asynchronous code is not able to run within the context of the synchronous __del__
method, so there is no way for connections to be automatically closed at the point of garbage collection. This can lead to unterminated TCP connections still remaining after the Python interpreter quits.
HTTPX supports either asyncio
or trio
as an async environment.
It will auto-detect which of those two to use as the backend for socket operations and concurrency primitives.
"},{"location":"async/#asyncio","title":"AsyncIO","text":"AsyncIO is Python's built-in library for writing concurrent code with the async/await syntax.
Let's take a look at sending several outgoing HTTP requests concurrently, using asyncio
:
import asyncio\nimport httpcore\nimport time\n\n\nasync def download(http, year):\n await http.request(\"GET\", f\"https://en.wikipedia.org/wiki/{year}\")\n\n\nasync def main():\n async with httpcore.AsyncConnectionPool() as http:\n started = time.time()\n # Here we use `asyncio.gather()` in order to run several tasks concurrently...\n tasks = [download(http, year) for year in range(2000, 2020)]\n await asyncio.gather(*tasks)\n complete = time.time()\n\n for connection in http.connections:\n print(connection)\n print(\"Complete in %.3f seconds\" % (complete - started))\n\n\nasyncio.run(main())\n
"},{"location":"async/#trio","title":"Trio","text":"Trio is an alternative async library, designed around the the principles of structured concurrency.
import httpcore\nimport trio\nimport time\n\n\nasync def download(http, year):\n await http.request(\"GET\", f\"https://en.wikipedia.org/wiki/{year}\")\n\n\nasync def main():\n async with httpcore.AsyncConnectionPool() as http:\n started = time.time()\n async with trio.open_nursery() as nursery:\n for year in range(2000, 2020):\n nursery.start_soon(download, http, year)\n complete = time.time()\n\n for connection in http.connections:\n print(connection)\n print(\"Complete in %.3f seconds\" % (complete - started))\n\n\ntrio.run(main)\n
"},{"location":"async/#anyio","title":"AnyIO","text":"AnyIO is an asynchronous networking and concurrency library that works on top of either asyncio or trio. It blends in with native libraries of your chosen backend (defaults to asyncio).
The anyio
library is designed around the the principles of structured concurrency, and brings many of the same correctness and usability benefits that Trio provides, while interoperating with existing asyncio
libraries.
import httpcore\nimport anyio\nimport time\n\n\nasync def download(http, year):\n await http.request(\"GET\", f\"https://en.wikipedia.org/wiki/{year}\")\n\n\nasync def main():\n async with httpcore.AsyncConnectionPool() as http:\n started = time.time()\n async with anyio.create_task_group() as task_group:\n for year in range(2000, 2020):\n task_group.start_soon(download, http, year)\n complete = time.time()\n\n for connection in http.connections:\n print(connection)\n print(\"Complete in %.3f seconds\" % (complete - started))\n\n\nanyio.run(main)\n
"},{"location":"async/#reference","title":"Reference","text":""},{"location":"async/#httpcoreasyncconnectionpool","title":"httpcore.AsyncConnectionPool
","text":"A connection pool for making HTTP requests.
"},{"location":"async/#httpcore.AsyncConnectionPool.connections","title":"connections: List[httpcore.AsyncConnectionInterface]
property
readonly
","text":"Return a list of the connections currently in the pool.
For example:
>>> pool.connections\n[\n <AsyncHTTPConnection ['https://example.com:443', HTTP/1.1, ACTIVE, Request Count: 6]>,\n <AsyncHTTPConnection ['https://example.com:443', HTTP/1.1, IDLE, Request Count: 9]> ,\n <AsyncHTTPConnection ['http://example.com:80', HTTP/1.1, IDLE, Request Count: 1]>,\n]\n
"},{"location":"async/#httpcore.AsyncConnectionPool.__init__","title":"__init__(self, ssl_context=None, max_connections=10, max_keepalive_connections=None, keepalive_expiry=None, http1=True, http2=False, retries=0, local_address=None, uds=None, network_backend=None, socket_options=None)
special
","text":"A connection pool for making HTTP requests.
Parameters:
Name Type Description Defaultssl_context
Optional[ssl.SSLContext]
An SSL context to use for verifying connections. If not specified, the default httpcore.default_ssl_context()
will be used.
None
max_connections
Optional[int]
The maximum number of concurrent HTTP connections that the pool should allow. Any attempt to send a request on a pool that would exceed this amount will block until a connection is available.
10
max_keepalive_connections
Optional[int]
The maximum number of idle HTTP connections that will be maintained in the pool.
None
keepalive_expiry
Optional[float]
The duration in seconds that an idle HTTP connection may be maintained for before being expired from the pool.
None
http1
bool
A boolean indicating if HTTP/1.1 requests should be supported by the connection pool. Defaults to True.
True
http2
bool
A boolean indicating if HTTP/2 requests should be supported by the connection pool. Defaults to False.
False
retries
int
The maximum number of retries when trying to establish a connection.
0
local_address
Optional[str]
Local address to connect from. Can also be used to connect using a particular address family. Using local_address=\"0.0.0.0\"
will connect using an AF_INET
address (IPv4), while using local_address=\"::\"
will connect using an AF_INET6
address (IPv6).
None
uds
Optional[str]
Path to a Unix Domain Socket to use instead of TCP sockets.
None
network_backend
Optional[httpcore.AsyncNetworkBackend]
A backend instance to use for handling network I/O.
None
socket_options
Optional[Iterable[Union[Tuple[int, int, int], Tuple[int, int, Union[bytes, bytearray]], Tuple[int, int, NoneType, int]]]]
Socket options that have to be included in the TCP socket when the connection was established.
None
"},{"location":"async/#httpcore.AsyncConnectionPool.aclose","title":"aclose(self)
async
","text":"Close any connections in the pool.
"},{"location":"async/#httpcore.AsyncConnectionPool.handle_async_request","title":"handle_async_request(self, request)
async
","text":"Send an HTTP request, and return an HTTP response.
This is the core implementation that is called into by .request()
or .stream()
.
response_closed(self, status)
async
","text":"This method acts as a callback once the request/response cycle is complete.
It is called into from the ConnectionPoolByteStream.aclose()
method.
httpcore.AsyncHTTPProxy
","text":"A connection pool that sends requests via an HTTP proxy.
"},{"location":"async/#httpcore.AsyncHTTPProxy.__init__","title":"__init__(self, proxy_url, proxy_auth=None, proxy_headers=None, ssl_context=None, proxy_ssl_context=None, max_connections=10, max_keepalive_connections=None, keepalive_expiry=None, http1=True, http2=False, retries=0, local_address=None, uds=None, network_backend=None, socket_options=None)
special
","text":"A connection pool for making HTTP requests.
Parameters:
Name Type Description Defaultproxy_url
Union[httpcore.URL, bytes, str]
The URL to use when connecting to the proxy server. For example \"http://127.0.0.1:8080/\"
.
proxy_auth
Optional[Tuple[Union[bytes, str], Union[bytes, str]]]
Any proxy authentication as a two-tuple of (username, password). May be either bytes or ascii-only str.
None
proxy_headers
Union[Mapping[Union[bytes, str], Union[bytes, str]], Sequence[Tuple[Union[bytes, str], Union[bytes, str]]]]
Any HTTP headers to use for the proxy requests. For example {\"Proxy-Authorization\": \"Basic <username>:<password>\"}
.
None
ssl_context
Optional[ssl.SSLContext]
An SSL context to use for verifying connections. If not specified, the default httpcore.default_ssl_context()
will be used.
None
proxy_ssl_context
Optional[ssl.SSLContext]
The same as ssl_context
, but for a proxy server rather than a remote origin.
None
max_connections
Optional[int]
The maximum number of concurrent HTTP connections that the pool should allow. Any attempt to send a request on a pool that would exceed this amount will block until a connection is available.
10
max_keepalive_connections
Optional[int]
The maximum number of idle HTTP connections that will be maintained in the pool.
None
keepalive_expiry
Optional[float]
The duration in seconds that an idle HTTP connection may be maintained for before being expired from the pool.
None
http1
bool
A boolean indicating if HTTP/1.1 requests should be supported by the connection pool. Defaults to True.
True
http2
bool
A boolean indicating if HTTP/2 requests should be supported by the connection pool. Defaults to False.
False
retries
int
The maximum number of retries when trying to establish a connection.
0
local_address
Optional[str]
Local address to connect from. Can also be used to connect using a particular address family. Using local_address=\"0.0.0.0\"
will connect using an AF_INET
address (IPv4), while using local_address=\"::\"
will connect using an AF_INET6
address (IPv6).
None
uds
Optional[str]
Path to a Unix Domain Socket to use instead of TCP sockets.
None
network_backend
Optional[httpcore.AsyncNetworkBackend]
A backend instance to use for handling network I/O.
None
"},{"location":"connection-pools/","title":"Connection Pools","text":"While the top-level API provides convenience functions for working with httpcore
, in practice you'll almost always want to take advantage of the connection pooling functionality that it provides.
To do so, instantiate a pool instance, and use it to send requests:
import httpcore\n\nhttp = httpcore.ConnectionPool()\nr = http.request(\"GET\", \"https://www.example.com/\")\n\nprint(r)\n# <Response [200]>\n
Connection pools support the same .request()
and .stream()
APIs as described in the Quickstart.
We can observe the benefits of connection pooling with a simple script like so:
import httpcore\nimport time\n\n\nhttp = httpcore.ConnectionPool()\nfor counter in range(5):\n started = time.time()\n response = http.request(\"GET\", \"https://www.example.com/\")\n complete = time.time()\n print(response, \"in %.3f seconds\" % (complete - started))\n
The output should demonstrate the initial request as being substantially slower than the subsequent requests:
<Response [200]> in {0.529} seconds\n<Response [200]> in {0.096} seconds\n<Response [200]> in {0.097} seconds\n<Response [200]> in {0.095} seconds\n<Response [200]> in {0.098} seconds\n
This is to be expected. Once we've established a connection to \"www.example.com\"
we're able to reuse it for following requests.
The connection pool instance is also the main point of configuration. Let's take a look at the various options that it provides:
"},{"location":"connection-pools/#ssl-configuration","title":"SSL configuration","text":"ssl_context
: An SSL context to use for verifying connections. If not specified, the default httpcore.default_ssl_context()
will be used.max_connections
: The maximum number of concurrent HTTP connections that the pool should allow. Any attempt to send a request on a pool that would exceed this amount will block until a connection is available.max_keepalive_connections
: The maximum number of idle HTTP connections that will be maintained in the pool.keepalive_expiry
: The duration in seconds that an idle HTTP connection may be maintained for before being expired from the pool.http1
: A boolean indicating if HTTP/1.1 requests should be supported by the connection pool. Defaults to True
.http2
: A boolean indicating if HTTP/2 requests should be supported by the connection pool. Defaults to False
.retries
: The maximum number of retries when trying to establish a connection.local_address
: Local address to connect from. Can also be used to connect using a particular address family. Using local_address=\"0.0.0.0\"
will connect using an AF_INET
address (IPv4), while using local_address=\"::\"
will connect using an AF_INET6
address (IPv6).uds
: Path to a Unix Domain Socket to use instead of TCP sockets.network_backend
: A backend instance to use for handling network I/O.socket_options
: Socket options that have to be included in the TCP socket when the connection was established.Because connection pools hold onto network resources, careful developers may want to ensure that instances are properly closed once they are no longer required.
Working with a single global instance isn't a bad idea for many use case, since the connection pool will automatically be closed when the __del__
method is called on it:
# This is perfectly fine for most purposes.\n# The connection pool will automatically be closed when it is garbage collected,\n# or when the Python interpreter exits.\nhttp = httpcore.ConnectionPool()\n
However, to be more explicit around the resource usage, we can use the connection pool within a context manager:
with httpcore.ConnectionPool() as http:\n ...\n
Or else close the pool explicitly:
http = httpcore.ConnectionPool()\ntry:\n ...\nfinally:\n http.close()\n
"},{"location":"connection-pools/#thread-and-task-safety","title":"Thread and task safety","text":"Connection pools are designed to be thread-safe. Similarly, when using httpcore
in an async context connection pools are task-safe.
This means that you can have a single connection pool instance shared by multiple threads.
"},{"location":"connection-pools/#reference","title":"Reference","text":""},{"location":"connection-pools/#httpcoreconnectionpool","title":"httpcore.ConnectionPool
","text":"A connection pool for making HTTP requests.
"},{"location":"connection-pools/#httpcore.ConnectionPool.connections","title":"connections: List[httpcore.ConnectionInterface]
property
readonly
","text":"Return a list of the connections currently in the pool.
For example:
>>> pool.connections\n[\n <HTTPConnection ['https://example.com:443', HTTP/1.1, ACTIVE, Request Count: 6]>,\n <HTTPConnection ['https://example.com:443', HTTP/1.1, IDLE, Request Count: 9]> ,\n <HTTPConnection ['http://example.com:80', HTTP/1.1, IDLE, Request Count: 1]>,\n]\n
"},{"location":"connection-pools/#httpcore.ConnectionPool.__init__","title":"__init__(self, ssl_context=None, max_connections=10, max_keepalive_connections=None, keepalive_expiry=None, http1=True, http2=False, retries=0, local_address=None, uds=None, network_backend=None, socket_options=None)
special
","text":"A connection pool for making HTTP requests.
Parameters:
Name Type Description Defaultssl_context
Optional[ssl.SSLContext]
An SSL context to use for verifying connections. If not specified, the default httpcore.default_ssl_context()
will be used.
None
max_connections
Optional[int]
The maximum number of concurrent HTTP connections that the pool should allow. Any attempt to send a request on a pool that would exceed this amount will block until a connection is available.
10
max_keepalive_connections
Optional[int]
The maximum number of idle HTTP connections that will be maintained in the pool.
None
keepalive_expiry
Optional[float]
The duration in seconds that an idle HTTP connection may be maintained for before being expired from the pool.
None
http1
bool
A boolean indicating if HTTP/1.1 requests should be supported by the connection pool. Defaults to True.
True
http2
bool
A boolean indicating if HTTP/2 requests should be supported by the connection pool. Defaults to False.
False
retries
int
The maximum number of retries when trying to establish a connection.
0
local_address
Optional[str]
Local address to connect from. Can also be used to connect using a particular address family. Using local_address=\"0.0.0.0\"
will connect using an AF_INET
address (IPv4), while using local_address=\"::\"
will connect using an AF_INET6
address (IPv6).
None
uds
Optional[str]
Path to a Unix Domain Socket to use instead of TCP sockets.
None
network_backend
Optional[httpcore.NetworkBackend]
A backend instance to use for handling network I/O.
None
socket_options
Optional[Iterable[Union[Tuple[int, int, int], Tuple[int, int, Union[bytes, bytearray]], Tuple[int, int, NoneType, int]]]]
Socket options that have to be included in the TCP socket when the connection was established.
None
"},{"location":"connection-pools/#httpcore.ConnectionPool.close","title":"close(self)
","text":"Close any connections in the pool.
"},{"location":"connection-pools/#httpcore.ConnectionPool.handle_request","title":"handle_request(self, request)
","text":"Send an HTTP request, and return an HTTP response.
This is the core implementation that is called into by .request()
or .stream()
.
response_closed(self, status)
","text":"This method acts as a callback once the request/response cycle is complete.
It is called into from the ConnectionPoolByteStream.close()
method.
TODO
"},{"location":"connections/#reference","title":"Reference","text":""},{"location":"connections/#httpcorehttpconnection","title":"httpcore.HTTPConnection
","text":""},{"location":"connections/#httpcore.HTTPConnection.has_expired","title":"has_expired(self)
","text":"Return True
if the connection is in a state where it should be closed.
This either means that the connection is idle and it has passed the expiry time on its keep-alive, or that server has sent an EOF.
"},{"location":"connections/#httpcore.HTTPConnection.is_available","title":"is_available(self)
","text":"Return True
if the connection is currently able to accept an outgoing request.
An HTTP/1.1 connection will only be available if it is currently idle.
An HTTP/2 connection will be available so long as the stream ID space is not yet exhausted, and the connection is not in an error state.
While the connection is being established we may not yet know if it is going to result in an HTTP/1.1 or HTTP/2 connection. The connection should be treated as being available, but might ultimately raise NewConnectionRequired
required exceptions if multiple requests are attempted over a connection that ends up being established as HTTP/1.1.
is_closed(self)
","text":"Return True
if the connection has been closed.
Used when a response is closed to determine if the connection may be returned to the connection pool or not.
"},{"location":"connections/#httpcore.HTTPConnection.is_idle","title":"is_idle(self)
","text":"Return True
if the connection is currently idle.
httpcore.HTTP11Connection
","text":""},{"location":"connections/#httpcore.HTTP11Connection.has_expired","title":"has_expired(self)
","text":"Return True
if the connection is in a state where it should be closed.
This either means that the connection is idle and it has passed the expiry time on its keep-alive, or that server has sent an EOF.
"},{"location":"connections/#httpcore.HTTP11Connection.is_available","title":"is_available(self)
","text":"Return True
if the connection is currently able to accept an outgoing request.
An HTTP/1.1 connection will only be available if it is currently idle.
An HTTP/2 connection will be available so long as the stream ID space is not yet exhausted, and the connection is not in an error state.
While the connection is being established we may not yet know if it is going to result in an HTTP/1.1 or HTTP/2 connection. The connection should be treated as being available, but might ultimately raise NewConnectionRequired
required exceptions if multiple requests are attempted over a connection that ends up being established as HTTP/1.1.
is_closed(self)
","text":"Return True
if the connection has been closed.
Used when a response is closed to determine if the connection may be returned to the connection pool or not.
"},{"location":"connections/#httpcore.HTTP11Connection.is_idle","title":"is_idle(self)
","text":"Return True
if the connection is currently idle.
httpcore.HTTP2Connection
","text":""},{"location":"connections/#httpcore.HTTP2Connection.has_expired","title":"has_expired(self)
","text":"Return True
if the connection is in a state where it should be closed.
This either means that the connection is idle and it has passed the expiry time on its keep-alive, or that server has sent an EOF.
"},{"location":"connections/#httpcore.HTTP2Connection.is_available","title":"is_available(self)
","text":"Return True
if the connection is currently able to accept an outgoing request.
An HTTP/1.1 connection will only be available if it is currently idle.
An HTTP/2 connection will be available so long as the stream ID space is not yet exhausted, and the connection is not in an error state.
While the connection is being established we may not yet know if it is going to result in an HTTP/1.1 or HTTP/2 connection. The connection should be treated as being available, but might ultimately raise NewConnectionRequired
required exceptions if multiple requests are attempted over a connection that ends up being established as HTTP/1.1.
is_closed(self)
","text":"Return True
if the connection has been closed.
Used when a response is closed to determine if the connection may be returned to the connection pool or not.
"},{"location":"connections/#httpcore.HTTP2Connection.is_idle","title":"is_idle(self)
","text":"Return True
if the connection is currently idle.
The following exceptions may be raised when sending a request:
httpcore.TimeoutException
httpcore.PoolTimeout
httpcore.ConnectTimeout
httpcore.ReadTimeout
httpcore.WriteTimeout
httpcore.NetworkError
httpcore.ConnectError
httpcore.ReadError
httpcore.WriteError
httpcore.ProtocolError
httpcore.RemoteProtocolError
httpcore.LocalProtocolError
httpcore.ProxyError
httpcore.UnsupportedProtocol
The request/response API used by httpcore
is kept deliberately simple and explicit.
The Request
and Response
models are pretty slim wrappers around this core API:
# Pseudo-code expressing the essentials of the request/response model.\n(\nstatus_code: int,\nheaders: List[Tuple(bytes, bytes)],\nstream: Iterable[bytes]\n) = handle_request(\nmethod: bytes,\nurl: URL,\nheaders: List[Tuple(bytes, bytes)],\nstream: Iterable[bytes]\n)\n
This is everything that's needed in order to represent an HTTP exchange.
Well... almost.
There is a maxim in Computer Science that \"All non-trivial abstractions, to some degree, are leaky\". When an expression is leaky, it's important that it ought to at least leak only in well-defined places.
In order to handle cases that don't otherwise fit inside this core abstraction, httpcore
requests and responses have 'extensions'. These are a dictionary of optional additional information.
Let's expand on our request/response abstraction...
# Pseudo-code expressing the essentials of the request/response model,\n# plus extensions allowing for additional API that does not fit into\n# this abstraction.\n(\nstatus_code: int,\nheaders: List[Tuple(bytes, bytes)],\nstream: Iterable[bytes],\nextensions: dict\n) = handle_request(\nmethod: bytes,\nurl: URL,\nheaders: List[Tuple(bytes, bytes)],\nstream: Iterable[bytes],\nextensions: dict\n)\n
Several extensions are supported both on the request:
r = httpcore.request(\n \"GET\",\n \"https://www.example.com\",\n extensions={\"timeout\": {\"connect\": 5.0}}\n)\n
And on the response:
r = httpcore.request(\"GET\", \"https://www.example.com\")\n\nprint(r.extensions[\"http_version\"])\n# When using HTTP/1.1 on the client side, the server HTTP response\n# could feasibly be one of b\"HTTP/0.9\", b\"HTTP/1.0\", or b\"HTTP/1.1\".\n
"},{"location":"extensions/#request-extensions","title":"Request Extensions","text":""},{"location":"extensions/#timeout","title":"\"timeout\"
","text":"A dictionary of str: Optional[float]
timeout values.
May include values for 'connect'
, 'read'
, 'write'
, or 'pool'
.
For example:
# Timeout if a connection takes more than 5 seconds to established, or if\n# we are blocked waiting on the connection pool for more than 10 seconds.\nr = httpcore.request(\n \"GET\",\n \"https://www.example.com\",\n extensions={\"timeout\": {\"connect\": 5.0, \"pool\": 10.0}}\n)\n
"},{"location":"extensions/#trace","title":"\"trace\"
","text":"The trace extension allows a callback handler to be installed to monitor the internal flow of events within httpcore
. The simplest way to explain this is with an example:
import httpcore\n\ndef log(event_name, info):\n print(event_name, info)\n\nr = httpcore.request(\"GET\", \"https://www.example.com/\", extensions={\"trace\": log})\n# connection.connect_tcp.started {'host': 'www.example.com', 'port': 443, 'local_address': None, 'timeout': None}\n# connection.connect_tcp.complete {'return_value': <httpcore.backends.sync.SyncStream object at 0x1093f94d0>}\n# connection.start_tls.started {'ssl_context': <ssl.SSLContext object at 0x1093ee750>, 'server_hostname': b'www.example.com', 'timeout': None}\n# connection.start_tls.complete {'return_value': <httpcore.backends.sync.SyncStream object at 0x1093f9450>}\n# http11.send_request_headers.started {'request': <Request [b'GET']>}\n# http11.send_request_headers.complete {'return_value': None}\n# http11.send_request_body.started {'request': <Request [b'GET']>}\n# http11.send_request_body.complete {'return_value': None}\n# http11.receive_response_headers.started {'request': <Request [b'GET']>}\n# http11.receive_response_headers.complete {'return_value': (b'HTTP/1.1', 200, b'OK', [(b'Age', b'553715'), (b'Cache-Control', b'max-age=604800'), (b'Content-Type', b'text/html; charset=UTF-8'), (b'Date', b'Thu, 21 Oct 2021 17:08:42 GMT'), (b'Etag', b'\"3147526947+ident\"'), (b'Expires', b'Thu, 28 Oct 2021 17:08:42 GMT'), (b'Last-Modified', b'Thu, 17 Oct 2019 07:18:26 GMT'), (b'Server', b'ECS (nyb/1DCD)'), (b'Vary', b'Accept-Encoding'), (b'X-Cache', b'HIT'), (b'Content-Length', b'1256')])}\n# http11.receive_response_body.started {'request': <Request [b'GET']>}\n# http11.receive_response_body.complete {'return_value': None}\n# http11.response_closed.started {}\n# http11.response_closed.complete {'return_value': None}\n
The event_name
and info
arguments here will be one of the following:
{event_type}.{event_name}.started
, <dictionary of keyword arguments>
{event_type}.{event_name}.complete
, {\"return_value\": <...>}
{event_type}.{event_name}.failed
, {\"exception\": <...>}
Note that when using the async variant of httpcore
the handler function passed to \"trace\"
must be an async def ...
function.
The following event types are currently exposed...
Establishing the connection
\"connection.connect_tcp\"
\"connection.connect_unix_socket\"
\"connection.start_tls\"
HTTP/1.1 events
\"http11.send_request_headers\"
\"http11.send_request_body\"
\"http11.receive_response\"
\"http11.receive_response_body\"
\"http11.response_closed\"
HTTP/2 events
\"http2.send_connection_init\"
\"http2.send_request_headers\"
\"http2.send_request_body\"
\"http2.receive_response_headers\"
\"http2.receive_response_body\"
\"http2.response_closed\"
\"sni_hostname\"
","text":"The server's hostname, which is used to confirm the hostname supplied by the SSL certificate.
For example:
headers = {\"Host\": \"www.encode.io\"}\nextensions = {\"sni_hostname\": \"www.encode.io\"}\nresponse = httpcore.request(\n \"GET\",\n \"https://185.199.108.153\",\n headers=headers,\n extensions=extensions\n)\n
"},{"location":"extensions/#response-extensions","title":"Response Extensions","text":""},{"location":"extensions/#http_version","title":"\"http_version\"
","text":"The HTTP version, as bytes. Eg. b\"HTTP/1.1\"
.
When using HTTP/1.1 the response line includes an explicit version, and the value of this key could feasibly be one of b\"HTTP/0.9\"
, b\"HTTP/1.0\"
, or b\"HTTP/1.1\"
.
When using HTTP/2 there is no further response versioning included in the protocol, and the value of this key will always be b\"HTTP/2\"
.
\"reason_phrase\"
","text":"The reason-phrase of the HTTP response, as bytes. For example b\"OK\"
. Some servers may include a custom reason phrase, although this is not recommended.
HTTP/2 onwards does not include a reason phrase on the wire.
When no key is included, a default based on the status code may be used.
"},{"location":"extensions/#stream_id","title":"\"stream_id\"
","text":"When HTTP/2 is being used the \"stream_id\"
response extension can be accessed to determine the ID of the data stream that the response was sent on.
\"network_stream\"
","text":"The \"network_stream\"
extension allows developers to handle HTTP CONNECT
and Upgrade
requests, by providing an API that steps outside the standard request/response model, and can directly read or write to the network.
The interface provided by the network stream:
read(max_bytes, timeout = None) -> bytes
write(buffer, timeout = None)
close()
start_tls(ssl_context, server_hostname = None, timeout = None) -> NetworkStream
get_extra_info(info) -> Any
This API can be used as the foundation for working with HTTP proxies, WebSocket upgrades, and other advanced use-cases.
See the network backends documentation for more information on working directly with network streams.
"},{"location":"extensions/#connect-requests","title":"CONNECT
requests","text":"A proxy CONNECT request using the network stream:
# Formulate a CONNECT request...\n#\n# This will establish a connection to 127.0.0.1:8080, and then send the following...\n#\n# CONNECT http://www.example.com HTTP/1.1\n# Host: 127.0.0.1:8080\nurl = httpcore.URL(b\"http\", b\"127.0.0.1\", 8080, b\"http://www.example.com\")\nwith httpcore.stream(\"CONNECT\", url) as response:\n network_stream = response.extensions[\"network_stream\"]\n\n # Upgrade to an SSL stream...\n network_stream = network_stream.start_tls(\n ssl_context=httpcore.default_ssl_context(),\n hostname=b\"www.example.com\",\n )\n\n # Manually send an HTTP request over the network stream, and read the response...\n #\n # For a more complete example see the httpcore `TunnelHTTPConnection` implementation.\n network_stream.write(b\"GET / HTTP/1.1\\r\\nHost: example.com\\r\\n\\r\\n\")\n data = network_stream.read()\n print(data)\n
"},{"location":"extensions/#upgrade-requests","title":"Upgrade
requests","text":"Using the wsproto
package to handle a websockets session:
import httpcore\nimport wsproto\nimport os\nimport base64\n\n\nurl = \"http://127.0.0.1:8000/\"\nheaders = {\n b\"Connection\": b\"Upgrade\",\n b\"Upgrade\": b\"WebSocket\",\n b\"Sec-WebSocket-Key\": base64.b64encode(os.urandom(16)),\n b\"Sec-WebSocket-Version\": b\"13\"\n}\nwith httpcore.stream(\"GET\", url, headers=headers) as response:\n if response.status != 101:\n raise Exception(\"Failed to upgrade to websockets\", response)\n\n # Get the raw network stream.\n network_steam = response.extensions[\"network_stream\"]\n\n # Write a WebSocket text frame to the stream.\n ws_connection = wsproto.Connection(wsproto.ConnectionType.CLIENT)\n message = wsproto.events.TextMessage(\"hello, world!\")\n outgoing_data = ws_connection.send(message)\n network_steam.write(outgoing_data)\n\n # Wait for a response.\n incoming_data = network_steam.read(max_bytes=4096)\n ws_connection.receive_data(incoming_data)\n for event in ws_connection.events():\n if isinstance(event, wsproto.events.TextMessage):\n print(\"Got data:\", event.data)\n\n # Write a WebSocket close to the stream.\n message = wsproto.events.CloseConnection(code=1000)\n outgoing_data = ws_connection.send(message)\n network_steam.write(outgoing_data)\n
"},{"location":"extensions/#extra-network-information","title":"Extra network information","text":"The network stream abstraction also allows access to various low-level information that may be exposed by the underlying socket:
response = httpcore.request(\"GET\", \"https://www.example.com\")\nnetwork_stream = response.extensions[\"network_stream\"]\n\nclient_addr = network_stream.get_extra_info(\"client_addr\")\nserver_addr = network_stream.get_extra_info(\"server_addr\")\nprint(\"Client address\", client_addr)\nprint(\"Server address\", server_addr)\n
The socket SSL information is also available through this interface, although you need to ensure that the underlying connection is still open, in order to access it...
with httpcore.stream(\"GET\", \"https://www.example.com\") as response:\n network_stream = response.extensions[\"network_stream\"]\n\n ssl_object = network_stream.get_extra_info(\"ssl_object\")\n print(\"TLS version\", ssl_object.version())\n
"},{"location":"http2/","title":"HTTP/2","text":"HTTP/2 is a major new iteration of the HTTP protocol, that provides a more efficient transport, with potential performance benefits. HTTP/2 does not change the core semantics of the request or response, but alters the way that data is sent to and from the server.
Rather than the text format that HTTP/1.1 uses, HTTP/2 is a binary format. The binary format provides full request and response multiplexing, and efficient compression of HTTP headers. The stream multiplexing means that where HTTP/1.1 requires one TCP stream for each concurrent request, HTTP/2 allows a single TCP stream to handle multiple concurrent requests.
HTTP/2 also provides support for functionality such as response prioritization, and server push.
For a comprehensive guide to HTTP/2 you may want to check out \"HTTP2 Explained\".
"},{"location":"http2/#enabling-http2","title":"Enabling HTTP/2","text":"When using the httpcore
client, HTTP/2 support is not enabled by default, because HTTP/1.1 is a mature, battle-hardened transport layer, and our HTTP/1.1 implementation may be considered the more robust option at this point in time. It is possible that a future version of httpcore
may enable HTTP/2 support by default.
If you're issuing highly concurrent requests you might want to consider trying out our HTTP/2 support. You can do so by first making sure to install the optional HTTP/2 dependencies...
$ pip install httpcore[http2]\n
And then instantiating a connection pool with HTTP/2 support enabled:
import httpcore\n\npool = httpcore.ConnectionPool(http2=True)\n
We can take a look at the difference in behaviour by issuing several outgoing requests in parallel.
Start out by using a standard HTTP/1.1 connection pool:
import httpcore\nimport concurrent.futures\nimport time\n\n\ndef download(http, year):\n http.request(\"GET\", f\"https://en.wikipedia.org/wiki/{year}\")\n\n\ndef main():\n with httpcore.ConnectionPool() as http:\n started = time.time()\n with concurrent.futures.ThreadPoolExecutor(max_workers=10) as threads:\n for year in range(2000, 2020):\n threads.submit(download, http, year)\n complete = time.time()\n\n for connection in http.connections:\n print(connection)\n print(\"Complete in %.3f seconds\" % (complete - started))\n\n\nmain()\n
If you run this with an HTTP/1.1 connection pool, you ought to see output similar to the following:
<HTTPConnection ['https://en.wikipedia.org:443', HTTP/1.1, IDLE, Request Count: 2]>,\n<HTTPConnection ['https://en.wikipedia.org:443', HTTP/1.1, IDLE, Request Count: 3]>,\n<HTTPConnection ['https://en.wikipedia.org:443', HTTP/1.1, IDLE, Request Count: 6]>,\n<HTTPConnection ['https://en.wikipedia.org:443', HTTP/1.1, IDLE, Request Count: 5]>,\n<HTTPConnection ['https://en.wikipedia.org:443', HTTP/1.1, IDLE, Request Count: 1]>,\n<HTTPConnection ['https://en.wikipedia.org:443', HTTP/1.1, IDLE, Request Count: 1]>,\n<HTTPConnection ['https://en.wikipedia.org:443', HTTP/1.1, IDLE, Request Count: 1]>,\n<HTTPConnection ['https://en.wikipedia.org:443', HTTP/1.1, IDLE, Request Count: 1]>\nComplete in 0.586 seconds\n
We can see that the connection pool required a number of connections in order to handle the parallel requests.
If we now upgrade our connection pool to support HTTP/2:
with httpcore.ConnectionPool(http2=True) as http:\n ...\n
And run the same script again, we should end up with something like this:
<HTTPConnection ['https://en.wikipedia.org:443', HTTP/2, IDLE, Request Count: 20]>\nComplete in 0.573 seconds\n
All of our requests have been handled over a single connection.
Switching to HTTP/2 should not necessarily be considered an \"upgrade\". It is more complex, and requires more computational power, and so particularly in an interpreted language like Python it could be slower in some instances. Moreover, utilising multiple connections may end up connecting to multiple hosts, and could sometimes appear faster to the client, at the cost of requiring more server resources. Enabling HTTP/2 is most likely to be beneficial if you are sending requests in high concurrency, and may often be more well suited to an async context, rather than multi-threading.
"},{"location":"http2/#inspecting-the-http-version","title":"Inspecting the HTTP version","text":"Enabling HTTP/2 support on the client does not necessarily mean that your requests and responses will be transported over HTTP/2, since both the client and the server need to support HTTP/2. If you connect to a server that only supports HTTP/1.1 the client will use a standard HTTP/1.1 connection instead.
You can determine which version of the HTTP protocol was used by examining the \"http_version\"
response extension.
import httpcore\n\npool = httpcore.ConnectionPool(http2=True)\nresponse = pool.request(\"GET\", \"https://www.example.com/\")\n\n# Should be one of b\"HTTP/2\", b\"HTTP/1.1\", b\"HTTP/1.0\", or b\"HTTP/0.9\".\nprint(response.extensions[\"http_version\"])\n
See the extensions documentation for more details.
"},{"location":"http2/#http2-negotiation","title":"HTTP/2 negotiation","text":"Robust servers need to support both HTTP/2 and HTTP/1.1 capable clients, and so need some way to \"negotiate\" with the client which protocol version will be used.
"},{"location":"http2/#http2-over-https","title":"HTTP/2 over HTTPS","text":"Generally the method used is for the server to advertise if it has HTTP/2 support during the part of the SSL connection handshake. This is known as ALPN - \"Application Layer Protocol Negotiation\".
Most browsers only provide HTTP/2 support over HTTPS connections, and this is also the default behaviour that httpcore
provides. If you enable HTTP/2 support you should still expect to see HTTP/1.1 connections for any http://
URLs.
Servers can optionally also support HTTP/2 over HTTP by supporting the Upgrade: h2c
header.
This mechanism is not supported by httpcore
. It requires an additional round-trip between the client and server, and also requires any request body to be sent twice.
If you know in advance that the server you are communicating with will support HTTP/2, then you can enforce that the client uses HTTP/2, without requiring either ALPN support or an HTTP Upgrade: h2c
header.
This is managed by disabling HTTP/1.1 support on the connection pool:
pool = httpcore.ConnectionPool(http1=False, http2=True)\n
"},{"location":"http2/#request-response-headers","title":"Request & response headers","text":"Because HTTP/2 frames the requests and responses somewhat differently to HTTP/1.1, there is a difference in some of the headers that are used.
In order for the httpcore
library to support both HTTP/1.1 and HTTP/2 transparently, the HTTP/1.1 style is always used throughout the API. Any differences in header styles are only mapped onto HTTP/2 at the internal network layer.
The following pseudo-headers are used by HTTP/2 in the request:
:method
- The request method.:path
- Taken from the URL of the request.:authority
- Equivalent to the Host
header in HTTP/1.1. In httpcore
this is represented using the request Host
header, which is automatically populated from the request URL if no Host
header is explicitly included.:scheme
- Taken from the URL of the request.These pseudo-headers are included in httpcore
as part of the request.method
and request.url
attributes, and through the request.headers[\"Host\"]
header. They are not exposed directly by their psuedo-header names.
The one other difference to be aware of is the Transfer-Encoding: chunked
header.
In HTTP/2 this header is never used, since streaming data is framed using a different mechanism.
In httpcore
the Transfer-Encoding: chunked
header is always used to represent the presence of a streaming body on the request, and is automatically populated if required. However the header is only sent if the underlying connection ends up being HTTP/1.1, and is omitted if the underlying connection ends up being HTTP/2.
The following pseudo-header is used by HTTP/2 in the response:
:status
- The response status code.In httpcore
this is represented by the response.status
attribute, rather than being exposed as a psuedo-header.
If you need to inspect the internal behaviour of httpcore
, you can use Python's standard logging to output debug level information.
For example, the following configuration...
import logging\nimport httpcore\n\nlogging.basicConfig(\n format=\"%(levelname)s [%(asctime)s] %(name)s - %(message)s\",\n datefmt=\"%Y-%m-%d %H:%M:%S\",\n level=logging.DEBUG\n)\n\nhttpcore.request('GET', 'https://www.example.com')\n
Will send debug level output to the console, or wherever stdout
is directed too...
DEBUG [2023-01-09 14:44:00] httpcore.connection - connect_tcp.started host='www.example.com' port=443 local_address=None timeout=None\nDEBUG [2023-01-09 14:44:00] httpcore.connection - connect_tcp.complete return_value=<httpcore.backends.sync.SyncStream object at 0x109ba6610>\nDEBUG [2023-01-09 14:44:00] httpcore.connection - start_tls.started ssl_context=<ssl.SSLContext object at 0x109e427b0> server_hostname='www.example.com' timeout=None\nDEBUG [2023-01-09 14:44:00] httpcore.connection - start_tls.complete return_value=<httpcore.backends.sync.SyncStream object at 0x109e8b050>\nDEBUG [2023-01-09 14:44:00] httpcore.http11 - send_request_headers.started request=<Request [b'GET']>\nDEBUG [2023-01-09 14:44:00] httpcore.http11 - send_request_headers.complete\nDEBUG [2023-01-09 14:44:00] httpcore.http11 - send_request_body.started request=<Request [b'GET']>\nDEBUG [2023-01-09 14:44:00] httpcore.http11 - send_request_body.complete\nDEBUG [2023-01-09 14:44:00] httpcore.http11 - receive_response_headers.started request=<Request [b'GET']>\nDEBUG [2023-01-09 14:44:00] httpcore.http11 - receive_response_headers.complete return_value=(b'HTTP/1.1', 200, b'OK', [(b'Age', b'572646'), (b'Cache-Control', b'max-age=604800'), (b'Content-Type', b'text/html; charset=UTF-8'), (b'Date', b'Mon, 09 Jan 2023 14:44:00 GMT'), (b'Etag', b'\"3147526947+ident\"'), (b'Expires', b'Mon, 16 Jan 2023 14:44:00 GMT'), (b'Last-Modified', b'Thu, 17 Oct 2019 07:18:26 GMT'), (b'Server', b'ECS (nyb/1D18)'), (b'Vary', b'Accept-Encoding'), (b'X-Cache', b'HIT'), (b'Content-Length', b'1256')])\nDEBUG [2023-01-09 14:44:00] httpcore.http11 - receive_response_body.started request=<Request [b'GET']>\nDEBUG [2023-01-09 14:44:00] httpcore.http11 - receive_response_body.complete\nDEBUG [2023-01-09 14:44:00] httpcore.http11 - response_closed.started\nDEBUG [2023-01-09 14:44:00] httpcore.http11 - response_closed.complete\nDEBUG [2023-01-09 14:44:00] httpcore.connection - close.started\nDEBUG [2023-01-09 14:44:00] httpcore.connection - close.complete\n
The exact formatting of the debug logging may be subject to change across different versions of httpcore
. If you need to rely on a particular format it is recommended that you pin installation of the package to a fixed version.
The API layer at which httpcore
interacts with the network is described as the network backend. Various backend implementations are provided, allowing httpcore
to handle networking in different runtime contexts.
Typically you won't need to specify a network backend, as a default will automatically be selected. However, understanding how the network backends fit in may be useful if you want to better understand the underlying architecture. Let's start by seeing how we can explicitly select the network backend.
First we're making a standard HTTP request, using a connection pool:
import httpcore\n\nwith httpcore.ConnectionPool() as http:\n response = http.request('GET', 'https://www.example.com')\n print(response)\n
We can also have the same behavior, but be explicit with our selection of the network backend:
import httpcore\n\nnetwork_backend = httpcore.SyncBackend()\nwith httpcore.ConnectionPool(network_backend=network_backend) as http:\n response = http.request('GET', 'https://www.example.com')\n print(response)\n
The httpcore.SyncBackend()
implementation handles the opening of TCP connections, and operations on the socket stream, such as reading, writing, and closing the connection.
We can get a better understanding of this by using a network backend to send a basic HTTP/1.1 request directly:
import httpcore\n\n# Create an SSL context using 'certifi' for the certificates.\nssl_context = httpcore.default_ssl_context()\n\n# A basic HTTP/1.1 request as a plain bytestring.\nrequest = b'\\r\\n'.join([\n b'GET / HTTP/1.1',\n b'Host: www.example.com',\n b'Accept: */*',\n b'Connection: close',\n b''\n])\n\n# Open a TCP stream and upgrade it to SSL.\nnetwork_backend = httpcore.SyncBackend()\nnetwork_stream = network_backend.connect_tcp(\"www.example.com\", 443)\nnetwork_stream = network_stream.start_tls(ssl_context, server_hostname=\"www.example.com\")\n\n# Send the HTTP request.\nnetwork_stream.write(request)\n\n# Read the HTTP response.\nwhile True:\n response = network_stream.read(max_bytes=4096)\n if response == b'':\n break\n print(response)\n\n# The output should look something like this:\n#\n# b'HTTP/1.1 200 OK\\r\\nAge: 600005\\r\\n [...] Content-Length: 1256\\r\\nConnection: close\\r\\n\\r\\n'\n# b'<!doctype html>\\n<html>\\n<head>\\n <title>Example Domain</title> [...] </html>\\n'\n
"},{"location":"network-backends/#async-network-backends","title":"Async network backends","text":"If we're working with an async
codebase, then we need to select a different backend.
The httpcore.AnyIOBackend
is suitable for usage if you're running under asyncio
. This is a networking backend implemented using the anyio
package.
import httpcore\nimport asyncio\n\nasync def main():\n network_backend = httpcore.AnyIOBackend()\n async with httpcore.AsyncConnectionPool(network_backend=network_backend) as http:\n response = await http.request('GET', 'https://www.example.com')\n print(response)\n\nasyncio.run(main())\n
The AnyIOBackend
will work when running under either asyncio
or trio
. However, if you're working with async using the trio
framework, then we recommend using the httpcore.TrioBackend
.
This will give you the same kind of networking behavior you'd have using AnyIOBackend
, but there will be a little less indirection so it will be marginally more efficient and will present cleaner tracebacks in error cases.
import httpcore\nimport trio\n\nasync def main():\n network_backend = httpcore.TrioBackend()\n async with httpcore.AsyncConnectionPool(network_backend=network_backend) as http:\n response = await http.request('GET', 'https://www.example.com')\n print(response)\n\ntrio.run(main)\n
"},{"location":"network-backends/#mock-network-backends","title":"Mock network backends","text":"There are also mock network backends available that can be useful for testing purposes. These backends accept a list of bytes, and return network stream interfaces that return those byte streams.
Here's an example of mocking a simple HTTP/1.1 response...
import httpcore\n\nnetwork_backend = httpcore.MockBackend([\n b\"HTTP/1.1 200 OK\\r\\n\",\n b\"Content-Type: plain/text\\r\\n\",\n b\"Content-Length: 13\\r\\n\",\n b\"\\r\\n\",\n b\"Hello, world!\",\n])\nwith httpcore.ConnectionPool(network_backend=network_backend) as http:\n response = http.request(\"GET\", \"https://example.com/\")\n print(response.extensions['http_version'])\n print(response.status)\n print(response.content)\n
Mocking a HTTP/2 response is more complex, since it uses a binary format...
import hpack\nimport hyperframe.frame\nimport httpcore\n\ncontent = [\n hyperframe.frame.SettingsFrame().serialize(),\n hyperframe.frame.HeadersFrame(\n stream_id=1,\n data=hpack.Encoder().encode(\n [\n (b\":status\", b\"200\"),\n (b\"content-type\", b\"plain/text\"),\n ]\n ),\n flags=[\"END_HEADERS\"],\n ).serialize(),\n hyperframe.frame.DataFrame(\n stream_id=1, data=b\"Hello, world!\", flags=[\"END_STREAM\"]\n ).serialize(),\n]\n# Note that we instantiate the mock backend with an `http2=True` argument.\n# This ensures that the mock network stream acts as if the `h2` ALPN flag has been set,\n# and causes the connection pool to interact with the connection using HTTP/2.\nnetwork_backend = httpcore.MockBackend(content, http2=True)\nwith httpcore.ConnectionPool(network_backend=network_backend) as http:\n response = http.request(\"GET\", \"https://example.com/\")\n print(response.extensions['http_version'])\n print(response.status)\n print(response.content)\n
"},{"location":"network-backends/#custom-network-backends","title":"Custom network backends","text":"The base interface for network backends is provided as public API, allowing you to implement custom networking behavior.
You can use this to provide advanced networking functionality such as:
Here's an example that records the network response to a file on disk:
import httpcore\n\n\nclass RecordingNetworkStream(httpcore.NetworkStream):\n def __init__(self, record_file, stream):\n self.record_file = record_file\n self.stream = stream\n\n def read(self, max_bytes, timeout=None):\n data = self.stream.read(max_bytes, timeout=timeout)\n self.record_file.write(data)\n return data\n\n def write(self, buffer, timeout=None):\n self.stream.write(buffer, timeout=timeout)\n\n def close(self) -> None:\n self.stream.close()\n\n def start_tls(\n self,\n ssl_context,\n server_hostname=None,\n timeout=None,\n ):\n self.stream = self.stream.start_tls(\n ssl_context, server_hostname=server_hostname, timeout=timeout\n )\n return self\n\n def get_extra_info(self, info):\n return self.stream.get_extra_info(info)\n\n\nclass RecordingNetworkBackend(httpcore.NetworkBackend):\n\"\"\"\n A custom network backend that records network responses.\n \"\"\"\n def __init__(self, record_file):\n self.record_file = record_file\n self.backend = httpcore.SyncBackend()\n\n def connect_tcp(\n self,\n host,\n port,\n timeout=None,\n local_address=None,\n socket_options=None,\n ):\n # Note that we're only using a single record file here,\n # so even if multiple connections are opened the network\n # traffic will all write to the same file.\n\n # An alternative implementation might automatically use\n # a new file for each opened connection.\n stream = self.backend.connect_tcp(\n host,\n port,\n timeout=timeout,\n local_address=local_address,\n socket_options=socket_options\n )\n return RecordingNetworkStream(self.record_file, stream)\n\n\n# Once you make the request, the raw HTTP/1.1 response will be available\n#\u00a0in the 'network-recording' file.\n#\n# Try switching to `http2=True` to see the difference when recording HTTP/2 binary network traffic,\n# or add `headers={'Accept-Encoding': 'gzip'}` to see HTTP content compression.\nwith open(\"network-recording\", \"wb\") as record_file:\n network_backend = RecordingNetworkBackend(record_file)\n with httpcore.ConnectionPool(network_backend=network_backend) as http:\n response = http.request(\"GET\", \"https://www.example.com/\")\n print(response)\n
"},{"location":"network-backends/#reference","title":"Reference","text":""},{"location":"network-backends/#networking-backends","title":"Networking Backends","text":"httpcore.SyncBackend
httpcore.AnyIOBackend
httpcore.TrioBackend
httpcore.MockBackend
httpcore.MockStream
httpcore.AsyncMockBackend
httpcore.AsyncMockStream
httpcore.NetworkBackend
httpcore.NetworkStream
httpcore.AsyncNetworkBackend
httpcore.AsyncNetworkStream
The httpcore
package provides support for HTTP proxies, using either \"HTTP Forwarding\" or \"HTTP Tunnelling\". Forwarding is a proxy mechanism for sending requests to http
URLs via an intermediate proxy. Tunnelling is a proxy mechanism for sending requests to https
URLs via an intermediate proxy.
Sending requests via a proxy is very similar to sending requests using a standard connection pool:
import httpcore\n\nproxy = httpcore.HTTPProxy(proxy_url=\"http://127.0.0.1:8080/\")\nr = proxy.request(\"GET\", \"https://www.example.com/\")\n\nprint(r)\n# <Response [200]>\n
You can test the httpcore
proxy support, using the Python proxy.py
tool:
$ pip install proxy.py\n$ proxy --hostname 127.0.0.1 --port 8080\n
Requests will automatically use either forwarding or tunnelling, depending on if the scheme is http
or https
.
Proxy authentication can be included in the initial configuration:
import httpcore\n\n# A `Proxy-Authorization` header will be included on the initial proxy connection.\nproxy = httpcore.HTTPProxy(\n proxy_url=\"http://127.0.0.1:8080/\",\n proxy_auth=(\"<username>\", \"<password>\")\n)\n
Custom headers can also be included:
import httpcore\nimport base64\n\n# Construct and include a `Proxy-Authorization` header.\nauth = base64.b64encode(b\"<username>:<password>\")\nproxy = httpcore.HTTPProxy(\n proxy_url=\"http://127.0.0.1:8080/\",\n proxy_headers={\"Proxy-Authorization\": b\"Basic \" + auth}\n)\n
"},{"location":"proxies/#proxy-ssl","title":"Proxy SSL","text":"The httpcore
package also supports HTTPS proxies for http and https destinations.
HTTPS proxies can be used in the same way that HTTP proxies are.
proxy = httpcore.HTTPProxy(proxy_url=\"https://127.0.0.1:8080/\")\n
Also, when using HTTPS proxies, you may need to configure the SSL context, which you can do with the proxy_ssl_context
argument.
import ssl\nimport httpcore\n\nproxy_ssl_context = ssl.create_default_context()\nproxy_ssl_context.check_hostname = False\n\nproxy = httpcore.HTTPProxy('https://127.0.0.1:8080/', proxy_ssl_context=proxy_ssl_context)\n
It is important to note that the ssl_context
argument is always used for the remote connection, and the proxy_ssl_context
argument is always used for the proxy connection.
If you use proxies, keep in mind that the httpcore
package only supports proxies to HTTP/1.1 servers.
The httpcore
package also supports proxies using the SOCKS5 protocol.
Make sure to install the optional dependancy using pip install httpcore[socks]
.
The SOCKSProxy
class should be using instead of a standard connection pool:
import httpcore\n\n# Note that the SOCKS port is 1080.\nproxy = httpcore.SOCKSProxy(proxy_url=\"socks5://127.0.0.1:1080/\")\nr = proxy.request(\"GET\", \"https://www.example.com/\")\n
Authentication via SOCKS is also supported:
import httpcore\n\nproxy = httpcore.SOCKSProxy(\n proxy_url=\"socks5://127.0.0.1:8080/\",\n proxy_auth=(\"<username>\", \"<password>\")\n)\nr = proxy.request(\"GET\", \"https://www.example.com/\")\n
"},{"location":"proxies/#reference","title":"Reference","text":""},{"location":"proxies/#httpcorehttpproxy","title":"httpcore.HTTPProxy
","text":"A connection pool that sends requests via an HTTP proxy.
"},{"location":"proxies/#httpcore.HTTPProxy.__init__","title":"__init__(self, proxy_url, proxy_auth=None, proxy_headers=None, ssl_context=None, proxy_ssl_context=None, max_connections=10, max_keepalive_connections=None, keepalive_expiry=None, http1=True, http2=False, retries=0, local_address=None, uds=None, network_backend=None, socket_options=None)
special
","text":"A connection pool for making HTTP requests.
Parameters:
Name Type Description Defaultproxy_url
Union[httpcore.URL, bytes, str]
The URL to use when connecting to the proxy server. For example \"http://127.0.0.1:8080/\"
.
proxy_auth
Optional[Tuple[Union[bytes, str], Union[bytes, str]]]
Any proxy authentication as a two-tuple of (username, password). May be either bytes or ascii-only str.
None
proxy_headers
Union[Mapping[Union[bytes, str], Union[bytes, str]], Sequence[Tuple[Union[bytes, str], Union[bytes, str]]]]
Any HTTP headers to use for the proxy requests. For example {\"Proxy-Authorization\": \"Basic <username>:<password>\"}
.
None
ssl_context
Optional[ssl.SSLContext]
An SSL context to use for verifying connections. If not specified, the default httpcore.default_ssl_context()
will be used.
None
proxy_ssl_context
Optional[ssl.SSLContext]
The same as ssl_context
, but for a proxy server rather than a remote origin.
None
max_connections
Optional[int]
The maximum number of concurrent HTTP connections that the pool should allow. Any attempt to send a request on a pool that would exceed this amount will block until a connection is available.
10
max_keepalive_connections
Optional[int]
The maximum number of idle HTTP connections that will be maintained in the pool.
None
keepalive_expiry
Optional[float]
The duration in seconds that an idle HTTP connection may be maintained for before being expired from the pool.
None
http1
bool
A boolean indicating if HTTP/1.1 requests should be supported by the connection pool. Defaults to True.
True
http2
bool
A boolean indicating if HTTP/2 requests should be supported by the connection pool. Defaults to False.
False
retries
int
The maximum number of retries when trying to establish a connection.
0
local_address
Optional[str]
Local address to connect from. Can also be used to connect using a particular address family. Using local_address=\"0.0.0.0\"
will connect using an AF_INET
address (IPv4), while using local_address=\"::\"
will connect using an AF_INET6
address (IPv6).
None
uds
Optional[str]
Path to a Unix Domain Socket to use instead of TCP sockets.
None
network_backend
Optional[httpcore.NetworkBackend]
A backend instance to use for handling network I/O.
None
"},{"location":"quickstart/","title":"Quickstart","text":"For convenience, the httpcore
package provides a couple of top-level functions that you can use for sending HTTP requests. You probably don't want to integrate against functions if you're writing a library that uses httpcore
, but you might find them useful for testing httpcore
from the command-line, or if you're writing a simple script that doesn't require any of the connection pooling or advanced configuration that httpcore
offers.
We'll start off by sending a request...
import httpcore\n\nresponse = httpcore.request(\"GET\", \"https://www.example.com/\")\n\nprint(response)\n# <Response [200]>\nprint(response.status)\n# 200\nprint(response.headers)\n# [(b'Accept-Ranges', b'bytes'), (b'Age', b'557328'), (b'Cache-Control', b'max-age=604800'), ...]\nprint(response.content)\n# b'<!doctype html>\\n<html>\\n<head>\\n<title>Example Domain</title>\\n\\n<meta charset=\"utf-8\"/>\\n ...'\n
"},{"location":"quickstart/#request-headers","title":"Request headers","text":"Request headers may be included either in a dictionary style, or as a list of two-tuples.
import httpcore\nimport json\n\nheaders = {'User-Agent': 'httpcore'}\nr = httpcore.request('GET', 'https://httpbin.org/headers', headers=headers)\n\nprint(json.loads(r.content))\n# {\n# 'headers': {\n# 'Host': 'httpbin.org',\n# 'User-Agent': 'httpcore',\n# 'X-Amzn-Trace-Id': 'Root=1-616ff5de-5ea1b7e12766f1cf3b8e3a33'\n# }\n# }\n
The keys and values may either be provided as strings or as bytes. Where strings are provided they may only contain characters within the ASCII range chr(0)
- chr(127)
. To include characters outside this range you must deal with any character encoding explicitly, and pass bytes as the header keys/values.
The Host
header will always be automatically included in any outgoing request, as it is strictly required to be present by the HTTP protocol.
Note that the X-Amzn-Trace-Id
header shown in the example above is not an outgoing request header, but has been added by a gateway server.
A request body can be included either as bytes...
import httpcore\nimport json\n\nr = httpcore.request('POST', 'https://httpbin.org/post', content=b'Hello, world')\n\nprint(json.loads(r.content))\n# {\n# 'args': {},\n# 'data': 'Hello, world',\n# 'files': {},\n# 'form': {},\n# 'headers': {\n# 'Host': 'httpbin.org',\n# 'Content-Length': '12',\n# 'X-Amzn-Trace-Id': 'Root=1-61700258-00e338a124ca55854bf8435f'\n# },\n# 'json': None,\n# 'origin': '68.41.35.196',\n# 'url': 'https://httpbin.org/post'\n# }\n
Or as an iterable that returns bytes...
import httpcore\nimport json\n\nwith open(\"hello-world.txt\", \"rb\") as input_file:\n r = httpcore.request('POST', 'https://httpbin.org/post', content=input_file)\n\nprint(json.loads(r.content))\n# {\n# 'args': {},\n# 'data': 'Hello, world',\n# 'files': {},\n# 'form': {},\n# 'headers': {\n# 'Host': 'httpbin.org',\n# 'Transfer-Encoding': 'chunked',\n# 'X-Amzn-Trace-Id': 'Root=1-61700258-00e338a124ca55854bf8435f'\n# },\n# 'json': None,\n# 'origin': '68.41.35.196',\n# 'url': 'https://httpbin.org/post'\n# }\n
When a request body is included, either a Content-Length
header or a Transfer-Encoding: chunked
header will be automatically included.
The Content-Length
header is used when passing bytes, and indicates an HTTP request with a body of a pre-determined length.
The Transfer-Encoding: chunked
header is the mechanism that HTTP/1.1 uses for sending HTTP request bodies without a pre-determined length.
When using the httpcore.request()
function, the response body will automatically be read to completion, and made available in the response.content
attribute.
Sometimes you may be dealing with large responses and not want to read the entire response into memory. The httpcore.stream()
function provides a mechanism for sending a request and dealing with a streaming response:
import httpcore\n\nwith httpcore.stream('GET', 'https://example.com') as response:\n for chunk in response.iter_stream():\n print(f\"Downloaded: {chunk}\")\n
Here's a more complete example that demonstrates downloading a response:
import httpcore\n\nwith httpcore.stream('GET', 'https://speed.hetzner.de/100MB.bin') as response:\n with open(\"download.bin\", \"wb\") as output_file:\n for chunk in response.iter_stream():\n output_file.write(chunk)\n
The httpcore.stream()
API also allows you to conditionally read the response...
import httpcore\n\nwith httpcore.stream('GET', 'https://example.com') as response:\n content_length = [int(v) for k, v in response.headers if k.lower() == b'content-length'][0]\n if content_length > 100_000_000:\n raise Exception(\"Response too large.\")\n response.read() # `response.content` is now available.\n
"},{"location":"quickstart/#reference","title":"Reference","text":""},{"location":"quickstart/#httpcorerequest","title":"httpcore.request()
","text":"Sends an HTTP request, returning the response.
response = httpcore.request(\"GET\", \"https://www.example.com/\")\n
Parameters:
Name Type Description Defaultmethod
Union[bytes, str]
The HTTP method for the request. Typically one of \"GET\"
, \"OPTIONS\"
, \"HEAD\"
, \"POST\"
, \"PUT\"
, \"PATCH\"
, or \"DELETE\"
.
url
Union[httpcore.URL, bytes, str]
The URL of the HTTP request. Either as an instance of httpcore.URL
, or as str/bytes.
headers
Union[Sequence[Tuple[Union[bytes, str], Union[bytes, str]]], Mapping[Union[bytes, str], Union[bytes, str]]]
The HTTP request headers. Either as a dictionary of str/bytes, or as a list of two-tuples of str/bytes.
None
content
Union[bytes, Iterator[bytes]]
The content of the request body. Either as bytes, or as a bytes iterator.
None
extensions
Optional[MutableMapping[str, Any]]
A dictionary of optional extra information included on the request. Possible keys include \"timeout\"
.
None
Returns:
Type DescriptionResponse
An instance of httpcore.Response
.
httpcore.stream()
","text":"Sends an HTTP request, returning the response within a content manager.
with httpcore.stream(\"GET\", \"https://www.example.com/\") as response:\n ...\n
When using the stream()
function, the body of the response will not be automatically read. If you want to access the response body you should either use content = response.read()
, or for chunk in response.iter_content()
.
Parameters:
Name Type Description Defaultmethod
Union[bytes, str]
The HTTP method for the request. Typically one of \"GET\"
, \"OPTIONS\"
, \"HEAD\"
, \"POST\"
, \"PUT\"
, \"PATCH\"
, or \"DELETE\"
.
url
Union[httpcore.URL, bytes, str]
The URL of the HTTP request. Either as an instance of httpcore.URL
, or as str/bytes.
headers
Union[Sequence[Tuple[Union[bytes, str], Union[bytes, str]]], Mapping[Union[bytes, str], Union[bytes, str]]]
The HTTP request headers. Either as a dictionary of str/bytes, or as a list of two-tuples of str/bytes.
None
content
Union[bytes, Iterator[bytes]]
The content of the request body. Either as bytes, or as a bytes iterator.
None
extensions
Optional[MutableMapping[str, Any]]
A dictionary of optional extra information included on the request. Possible keys include \"timeout\"
.
None
Returns:
Type DescriptionIterator[httpcore.Response]
An instance of httpcore.Response
.
TODO
"},{"location":"requests-responses-urls/#requests","title":"Requests","text":"Request instances in httpcore
are deliberately simple, and only include the essential information required to represent an HTTP request.
Properties on the request are plain byte-wise representations.
>>> request = httpcore.Request(\"GET\", \"https://www.example.com/\")\n>>> request.method\nb\"GET\"\n>>> request.url\nhttpcore.URL(scheme=b\"https\", host=b\"www.example.com\", port=None, target=b\"/\")\n>>> request.headers\n[(b'Host', b'www.example.com')]\n>>> request.stream\n<httpcore.ByteStream [0 bytes]>\n
The interface is liberal in the types that it accepts, but specific in the properties that it uses to represent them. For example, headers may be specified as a dictionary of strings, but internally are represented as a list of (byte, byte)
tuples.
```python
headers = {\"User-Agent\": \"custom\"} request = httpcore.Request(\"GET\", \"https://www.example.com/\", headers=headers) request.headers [(b'Host', b'www.example.com'), (b\"User-Agent\", b\"custom\")]
"},{"location":"requests-responses-urls/#responses","title":"Responses","text":"...
"},{"location":"requests-responses-urls/#urls","title":"URLs","text":"...
"},{"location":"requests-responses-urls/#reference","title":"Reference","text":""},{"location":"requests-responses-urls/#httpcorerequest","title":"httpcore.Request
","text":"An HTTP request.
"},{"location":"requests-responses-urls/#httpcore.Request.__init__","title":"__init__(self, method, url, *, headers=None, content=None, extensions=None)
special
","text":"Parameters:
Name Type Description Defaultmethod
Union[bytes, str]
The HTTP request method, either as a string or bytes. For example: GET
.
url
Union[httpcore.URL, bytes, str]
The request URL, either as a URL
instance, or as a string or bytes. For example: \"https://www.example.com\".
headers
Union[Sequence[Tuple[Union[bytes, str], Union[bytes, str]]], Mapping[Union[bytes, str], Union[bytes, str]]]
The HTTP request headers.
None
content
Union[bytes, Iterable[bytes], AsyncIterable[bytes]]
The content of the response body.
None
extensions
Optional[MutableMapping[str, Any]]
A dictionary of optional extra information included on the request. Possible keys include \"timeout\"
, and \"trace\"
.
None
"},{"location":"requests-responses-urls/#httpcoreresponse","title":"httpcore.Response
","text":"An HTTP response.
"},{"location":"requests-responses-urls/#httpcore.Response.__init__","title":"__init__(self, status, *, headers=None, content=None, extensions=None)
special
","text":"Parameters:
Name Type Description Defaultstatus
int
The HTTP status code of the response. For example 200
.
headers
Union[Sequence[Tuple[Union[bytes, str], Union[bytes, str]]], Mapping[Union[bytes, str], Union[bytes, str]]]
The HTTP response headers.
None
content
Union[bytes, Iterable[bytes], AsyncIterable[bytes]]
The content of the response body.
None
extensions
Optional[MutableMapping[str, Any]]
A dictionary of optional extra information included on the responseself.Possible keys include \"http_version\"
, \"reason_phrase\"
, and \"network_stream\"
.
None
"},{"location":"requests-responses-urls/#httpcoreurl","title":"httpcore.URL
","text":"Represents the URL against which an HTTP request may be made.
The URL may either be specified as a plain string, for convienence:
url = httpcore.URL(\"https://www.example.com/\")\n
Or be constructed with explicitily pre-parsed components:
url = httpcore.URL(scheme=b'https', host=b'www.example.com', port=None, target=b'/')\n
Using this second more explicit style allows integrations that are using httpcore
to pass through URLs that have already been parsed in order to use libraries such as rfc-3986
rather than relying on the stdlib. It also ensures that URL parsing is treated identically at both the networking level and at any higher layers of abstraction.
The four components are important here, as they allow the URL to be precisely specified in a pre-parsed format. They also allow certain types of request to be created that could not otherwise be expressed.
For example, an HTTP request to http://www.example.com/
forwarded via a proxy at http://localhost:8080
...
# Constructs an HTTP request with a complete URL as the target:\n# GET https://www.example.com/ HTTP/1.1\nurl = httpcore.URL(\n scheme=b'http',\n host=b'localhost',\n port=8080,\n target=b'https://www.example.com/'\n)\nrequest = httpcore.Request(\n method=\"GET\",\n url=url\n)\n
Another example is constructing an OPTIONS *
request...
# Constructs an 'OPTIONS *' HTTP request:\n# OPTIONS * HTTP/1.1\nurl = httpcore.URL(scheme=b'https', host=b'www.example.com', target=b'*')\nrequest = httpcore.Request(method=\"OPTIONS\", url=url)\n
This kind of request is not possible to formulate with a URL string, because the /
delimiter is always used to demark the target from the host/port portion of the URL.
For convenience, string-like arguments may be specified either as strings or as bytes. However, once a request is being issue over-the-wire, the URL components are always ultimately required to be a bytewise representation.
In order to avoid any ambiguity over character encodings, when strings are used as arguments, they must be strictly limited to the ASCII range chr(0)
-chr(127)
. If you require a bytewise representation that is outside this range you must handle the character encoding directly, and pass a bytes instance.
__init__(self, url='', *, scheme=b'', host=b'', port=None, target=b'')
special
","text":"Parameters:
Name Type Description Defaulturl
Union[bytes, str]
The complete URL as a string or bytes.
''
scheme
Union[bytes, str]
The URL scheme as a string or bytes. Typically either \"http\"
or \"https\"
.
b''
host
Union[bytes, str]
The URL host as a string or bytes. Such as \"www.example.com\"
.
b''
port
Optional[int]
The port to connect to. Either an integer or None
.
None
target
Union[bytes, str]
The target of the HTTP request. Such as \"/items?search=red\"
.
b''
"},{"location":"table-of-contents/","title":"API Reference","text":"httpcore.request()
httpcore.stream()
httpcore.Request
httpcore.Response
httpcore.URL
httpcore.ConnectionPool
httpcore.HTTPProxy
httpcore.HTTPConnection
httpcore.HTTP11Connection
httpcore.HTTP2Connection
httpcore.AsyncConnectionPool
httpcore.AsyncHTTPProxy
httpcore.AsyncHTTPConnection
httpcore.AsyncHTTP11Connection
httpcore.AsyncHTTP2Connection
httpcore.backends.sync.SyncBackend
httpcore.backends.mock.MockBackend
httpcore.backends.auto.AutoBackend
httpcore.backends.asyncio.AsyncioBackend
httpcore.backends.trio.TrioBackend
httpcore.backends.mock.AsyncMockBackend
httpcore.backends.base.NetworkBackend
httpcore.backends.base.AsyncNetworkBackend
httpcore.TimeoutException
httpcore.PoolTimeout
httpcore.ConnectTimeout
httpcore.ReadTimeout
httpcore.WriteTimeout
httpcore.NetworkError
httpcore.ConnectError
httpcore.ReadError
httpcore.WriteError
httpcore.ProtocolError
httpcore.RemoteProtocolError
httpcore.LocalProtocolError
httpcore.ProxyError
httpcore.UnsupportedProtocol
httpcore.request()
httpcore.stream()
httpcore.Request
httpcore.Response
httpcore.URL
httpcore.ConnectionPool
httpcore.HTTPProxy
httpcore.HTTPConnection
httpcore.HTTP11Connection
httpcore.HTTP2Connection
httpcore.AsyncConnectionPool
httpcore.AsyncHTTPProxy
httpcore.AsyncHTTPConnection
httpcore.AsyncHTTP11Connection
httpcore.AsyncHTTP2Connection
httpcore.backends.sync.SyncBackend
httpcore.backends.mock.MockBackend
httpcore.backends.auto.AutoBackend
httpcore.backends.asyncio.AsyncioBackend
httpcore.backends.trio.TrioBackend
httpcore.backends.mock.AsyncMockBackend
httpcore.backends.base.NetworkBackend
httpcore.backends.base.AsyncNetworkBackend
httpcore.TimeoutException
httpcore.PoolTimeout
httpcore.ConnectTimeout
httpcore.ReadTimeout
httpcore.WriteTimeout
httpcore.NetworkError
httpcore.ConnectError
httpcore.ReadError
httpcore.WriteError
httpcore.ProtocolError
httpcore.RemoteProtocolError
httpcore.LocalProtocolError
httpcore.ProxyError
httpcore.UnsupportedProtocol