From 358269c7619d4fdbe634ba4ee582c967ff5715f2 Mon Sep 17 00:00:00 2001 From: cclauss Date: Sun, 19 Jun 2016 15:12:51 +0200 Subject: [PATCH 1/3] Retrieve four things from the web with concurrency Add a simple example to show how to create a client that gathers multiple web resources _concurrently_ . I am not sure if this example is serial or concurrent. :-( If it is serial then it would be quite helpful for readers to understand how to make it concurrent. --- README.rst | 25 +++++++++++++++++++++++++ 1 file changed, 25 insertions(+) diff --git a/README.rst b/README.rst index 1be665b5bf..049af087d3 100644 --- a/README.rst +++ b/README.rst @@ -51,6 +51,31 @@ To retrieve something from the web: print(html) +To retrieve four things from the web with concurrency: + +.. code-block:: python + + import aiohttp + import asyncio + + async def fetch(session, url): + with aiohttp.Timeout(10): + async with session.get(url) as response: + return await response.text() + + URLS = '''http://python.org + http://golang.org + http://perl.org + http://ruby-lang.org'''.split() + + if __name__ == '__main__': + loop = asyncio.get_event_loop() + with aiohttp.ClientSession(loop=loop) as session: + coroutines = [fetch(session, url) for url in URLS] + pages = [loop.run_until_complete(coroutine) for coroutine in coroutines] + for url, page in zip(URLS, pages): + print('{:>15}\n{}\n{:.100}\n'.format(url, '=' * len(url), page)) + Server ^^^^^^ From 6ed388cdf6b03044805fdcfd759ff27bfbbb5cbd Mon Sep 17 00:00:00 2001 From: cclauss Date: Sun, 19 Jun 2016 21:38:28 +0200 Subject: [PATCH 2/3] Update README.rst --- README.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.rst b/README.rst index 049af087d3..e044ee88e9 100644 --- a/README.rst +++ b/README.rst @@ -74,7 +74,7 @@ To retrieve four things from the web with concurrency: coroutines = [fetch(session, url) for url in URLS] pages = [loop.run_until_complete(coroutine) for coroutine in coroutines] for url, page in zip(URLS, pages): - print('{:>15}\n{}\n{:.100}\n'.format(url, '=' * len(url), page)) + print('{}:\n{}=\n{:.100}\n'.format(url, '=' * len(url), page)) Server ^^^^^^ From 36789d564096593c3443167392f217a73fa4637d Mon Sep 17 00:00:00 2001 From: cclauss Date: Tue, 21 Jun 2016 09:13:21 +0200 Subject: [PATCH 3/3] Use asyncio.gather() Thanks @bhuman, asyncio.gather() was the correct way to go... I had been doing [other experiments](https://github.com/cclauss/asyncio_hacks/blob/master/factorial_futures.py) to get these asynchronous execution ideas clear in my head. --- README.rst | 23 +++++++++++++---------- 1 file changed, 13 insertions(+), 10 deletions(-) diff --git a/README.rst b/README.rst index e044ee88e9..1b2b4a3d31 100644 --- a/README.rst +++ b/README.rst @@ -58,23 +58,26 @@ To retrieve four things from the web with concurrency: import aiohttp import asyncio - async def fetch(session, url): - with aiohttp.Timeout(10): - async with session.get(url) as response: - return await response.text() - URLS = '''http://python.org http://golang.org http://perl.org http://ruby-lang.org'''.split() - if __name__ == '__main__': + async def fetch(session, url): + with aiohttp.Timeout(10): + async with session.get(url) as response: + return await response.text() + + async def fetch_many(urls): loop = asyncio.get_event_loop() with aiohttp.ClientSession(loop=loop) as session: - coroutines = [fetch(session, url) for url in URLS] - pages = [loop.run_until_complete(coroutine) for coroutine in coroutines] - for url, page in zip(URLS, pages): - print('{}:\n{}=\n{:.100}\n'.format(url, '=' * len(url), page)) + return await asyncio.gather(*[fetch(session, url) for url in urls]) + + if __name__ == '__main__': + loop = asyncio.get_event_loop() + pages = loop.run_until_complete(fetch_many(URLS)) + for url, page in zip(URLS, pages): + print('{}:\n{}=\n{:.100}\n'.format(url, '=' * len(url), page)) Server ^^^^^^