diff --git a/doc/benchmarks.rst b/doc/benchmarks.rst index b8c330fd..b0cff367 100644 --- a/doc/benchmarks.rst +++ b/doc/benchmarks.rst @@ -66,12 +66,15 @@ Async workload benchmark, which calls ``asyncio.gather()`` on a tree (6 levels d * ``async_tree``: no actual async work at any leaf node. * ``async_tree_io``: all leaf nodes simulate async IO workload (async sleep 50ms). -* ``async_tree_memoization``: all leaf nodes simulate async IO workload with 90% of +* ``async_tree_memoization``: all leaf nodes simulate async IO workload with 90% of the data memoized. * ``async_tree_cpu_io_mixed``: half of the leaf nodes simulate CPU-bound workload - (``math.factorial(500)``) and the other half simulate the same workload as the + (``math.factorial(500)``) and the other half simulate the same workload as the ``async_tree_memoization`` variant. +These benchmarks also have an "eager" flavor that uses asyncio eager task factory, +if available. + chameleon --------- diff --git a/pyperformance/data-files/benchmarks/MANIFEST b/pyperformance/data-files/benchmarks/MANIFEST index 7d62c5a0..1898bc0c 100644 --- a/pyperformance/data-files/benchmarks/MANIFEST +++ b/pyperformance/data-files/benchmarks/MANIFEST @@ -7,6 +7,10 @@ async_tree async_tree_cpu_io_mixed async_tree_io async_tree_memoization +async_tree_eager +async_tree_eager_cpu_io_mixed +async_tree_eager_io +async_tree_eager_memoization asyncio_tcp asyncio_tcp_ssl concurrent_imap diff --git a/pyperformance/data-files/benchmarks/bm_async_tree/bm_async_tree_cpu_io_mixed.toml b/pyperformance/data-files/benchmarks/bm_async_tree/bm_async_tree_cpu_io_mixed.toml index 7fae0d4a..8303bc74 100644 --- a/pyperformance/data-files/benchmarks/bm_async_tree/bm_async_tree_cpu_io_mixed.toml +++ b/pyperformance/data-files/benchmarks/bm_async_tree/bm_async_tree_cpu_io_mixed.toml @@ -1,4 +1,3 @@ [tool.pyperformance] name = "async_tree_cpu_io_mixed" extra_opts = ["cpu_io_mixed"] - diff --git a/pyperformance/data-files/benchmarks/bm_async_tree/bm_async_tree_eager.toml b/pyperformance/data-files/benchmarks/bm_async_tree/bm_async_tree_eager.toml new file mode 100644 index 00000000..09d16ee8 --- /dev/null +++ b/pyperformance/data-files/benchmarks/bm_async_tree/bm_async_tree_eager.toml @@ -0,0 +1,3 @@ +[tool.pyperformance] +name = "async_tree_eager" +extra_opts = ["eager"] diff --git a/pyperformance/data-files/benchmarks/bm_async_tree/bm_async_tree_eager_cpu_io_mixed.toml b/pyperformance/data-files/benchmarks/bm_async_tree/bm_async_tree_eager_cpu_io_mixed.toml new file mode 100644 index 00000000..4766cb23 --- /dev/null +++ b/pyperformance/data-files/benchmarks/bm_async_tree/bm_async_tree_eager_cpu_io_mixed.toml @@ -0,0 +1,3 @@ +[tool.pyperformance] +name = "async_tree_eager_cpu_io_mixed" +extra_opts = ["eager_cpu_io_mixed"] diff --git a/pyperformance/data-files/benchmarks/bm_async_tree/bm_async_tree_eager_io.toml b/pyperformance/data-files/benchmarks/bm_async_tree/bm_async_tree_eager_io.toml new file mode 100644 index 00000000..de1dfb2a --- /dev/null +++ b/pyperformance/data-files/benchmarks/bm_async_tree/bm_async_tree_eager_io.toml @@ -0,0 +1,3 @@ +[tool.pyperformance] +name = "async_tree_eager_io" +extra_opts = ["eager_io"] diff --git a/pyperformance/data-files/benchmarks/bm_async_tree/bm_async_tree_eager_memoization.toml b/pyperformance/data-files/benchmarks/bm_async_tree/bm_async_tree_eager_memoization.toml new file mode 100644 index 00000000..ec199382 --- /dev/null +++ b/pyperformance/data-files/benchmarks/bm_async_tree/bm_async_tree_eager_memoization.toml @@ -0,0 +1,3 @@ +[tool.pyperformance] +name = "async_tree_eager_memoization" +extra_opts = ["eager_memoization"] diff --git a/pyperformance/data-files/benchmarks/bm_async_tree/bm_async_tree_io.toml b/pyperformance/data-files/benchmarks/bm_async_tree/bm_async_tree_io.toml index 86898965..c8fab8da 100644 --- a/pyperformance/data-files/benchmarks/bm_async_tree/bm_async_tree_io.toml +++ b/pyperformance/data-files/benchmarks/bm_async_tree/bm_async_tree_io.toml @@ -1,4 +1,3 @@ [tool.pyperformance] name = "async_tree_io" extra_opts = ["io"] - diff --git a/pyperformance/data-files/benchmarks/bm_async_tree/bm_async_tree_memoization.toml b/pyperformance/data-files/benchmarks/bm_async_tree/bm_async_tree_memoization.toml index e644c4ea..4d394e38 100644 --- a/pyperformance/data-files/benchmarks/bm_async_tree/bm_async_tree_memoization.toml +++ b/pyperformance/data-files/benchmarks/bm_async_tree/bm_async_tree_memoization.toml @@ -1,4 +1,3 @@ [tool.pyperformance] name = "async_tree_memoization" extra_opts = ["memoization"] - diff --git a/pyperformance/data-files/benchmarks/bm_async_tree/run_benchmark.py b/pyperformance/data-files/benchmarks/bm_async_tree/run_benchmark.py index ebffd5de..72fc917c 100644 --- a/pyperformance/data-files/benchmarks/bm_async_tree/run_benchmark.py +++ b/pyperformance/data-files/benchmarks/bm_async_tree/run_benchmark.py @@ -6,11 +6,14 @@ 1) "none": No actual async work in the async tree. 2) "io": All leaf nodes simulate async IO workload (async sleep 50ms). -3) "memoization": All leaf nodes simulate async IO workload with 90% of +3) "memoization": All leaf nodes simulate async IO workload with 90% of the data memoized -4) "cpu_io_mixed": Half of the leaf nodes simulate CPU-bound workload and - the other half simulate the same workload as the +4) "cpu_io_mixed": Half of the leaf nodes simulate CPU-bound workload and + the other half simulate the same workload as the "memoization" variant. + +All variants also have an "eager" flavor that uses +the asyncio eager task factory (if available). """ @@ -57,16 +60,32 @@ async def run(self): await self.recurse(NUM_RECURSE_LEVELS) +class EagerMixin: + async def run(self): + loop = asyncio.get_running_loop() + if hasattr(asyncio, 'eager_task_factory'): + loop.set_task_factory(asyncio.eager_task_factory) + return await super().run() + + class NoneAsyncTree(AsyncTree): async def workload_func(self): return +class EagerAsyncTree(EagerMixin, NoneAsyncTree): + pass + + class IOAsyncTree(AsyncTree): async def workload_func(self): await self.mock_io_call() +class EagerIOAsyncTree(EagerMixin, IOAsyncTree): + pass + + class MemoizationAsyncTree(AsyncTree): async def workload_func(self): # deterministic random, seed set in AsyncTree.__init__() @@ -82,6 +101,10 @@ async def workload_func(self): return data +class EagerMemoizationAsyncTree(EagerMixin, MemoizationAsyncTree): + pass + + class CpuIoMixedAsyncTree(MemoizationAsyncTree): async def workload_func(self): # deterministic random, seed set in AsyncTree.__init__() @@ -92,6 +115,10 @@ async def workload_func(self): return await MemoizationAsyncTree.workload_func(self) +class EagerCpuIoMixedAsyncTree(EagerMixin, CpuIoMixedAsyncTree): + pass + + def add_metadata(runner): runner.metadata["description"] = "Async tree workloads." runner.metadata["async_tree_recurse_levels"] = NUM_RECURSE_LEVELS @@ -115,10 +142,10 @@ def add_parser_args(parser): Determines which benchmark to run. Options: 1) "none": No actual async work in the async tree. 2) "io": All leaf nodes simulate async IO workload (async sleep 50ms). -3) "memoization": All leaf nodes simulate async IO workload with 90% of +3) "memoization": All leaf nodes simulate async IO workload with 90% of the data memoized -4) "cpu_io_mixed": Half of the leaf nodes simulate CPU-bound workload and - the other half simulate the same workload as the +4) "cpu_io_mixed": Half of the leaf nodes simulate CPU-bound workload and + the other half simulate the same workload as the "memoization" variant. """, ) @@ -126,9 +153,13 @@ def add_parser_args(parser): BENCHMARKS = { "none": NoneAsyncTree, + "eager": EagerAsyncTree, "io": IOAsyncTree, + "eager_io": EagerIOAsyncTree, "memoization": MemoizationAsyncTree, + "eager_memoization": EagerMemoizationAsyncTree, "cpu_io_mixed": CpuIoMixedAsyncTree, + "eager_cpu_io_mixed": EagerCpuIoMixedAsyncTree, } @@ -142,4 +173,3 @@ def add_parser_args(parser): async_tree_class = BENCHMARKS[benchmark] async_tree = async_tree_class() runner.bench_async_func(f"async_tree_{benchmark}", async_tree.run) -