Skip to content

Commit

Permalink
Refactor into single run_benchmark
Browse files Browse the repository at this point in the history
  • Loading branch information
savannahostrowski committed Nov 3, 2024
1 parent a1c1826 commit ee12b27
Show file tree
Hide file tree
Showing 4 changed files with 55 additions and 26 deletions.
4 changes: 2 additions & 2 deletions pyperformance/data-files/benchmarks/MANIFEST
Original file line number Diff line number Diff line change
Expand Up @@ -2,8 +2,8 @@

name metafile
2to3 <local>
argparse_args <local:argparse>
argparse_subparser <local:argparse>
argparse <local>
argparse_subparsers <local:argparse>
async_generators <local>
async_tree <local>
async_tree_cpu_io_mixed <local:async_tree>
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -6,5 +6,5 @@ urls = {repository = "https://github.com/python/pyperformance"}
dynamic = ["version"]

[tool.pyperformance]
name = "argparse_args"
tag = "argparse"
name = "argparse_subparsers"
extra_opts = ["subparsers"]
Original file line number Diff line number Diff line change
Expand Up @@ -6,5 +6,5 @@ urls = {repository = "https://github.com/python/pyperformance"}
dynamic = ["version"]

[tool.pyperformance]
name = "argparse_subparser"
tag = "argparse"
name = "argparse_many_optionals"
extra_opts = ["many_optionals"]
Original file line number Diff line number Diff line change
@@ -1,6 +1,7 @@
"""
Benchmark an argparse program with multiple subparsers, each with their own
subcommands, and then parse a series of command-line arguments.
Benchmark argparse programs with:
1) multiple subparsers, each with their own subcommands, and then parse a series of command-line arguments.
2) a large number of optional arguments, and then parse a series of command-line arguments.
Author: Savannah Ostrowski
"""
Expand All @@ -9,7 +10,14 @@
import pyperf


def create_parser() -> argparse.ArgumentParser:
def generate_arguments(i: int) -> list:
arguments = ["input.txt", "output.txt"]
for i in range(i):
arguments.extend([f"--option{i}", f"value{i}"])
return arguments


def bm_many_optionals() -> argparse.ArgumentParser:
parser = argparse.ArgumentParser(description="A version control system CLI")

parser.add_argument("--version", action="version", version="1.0")
Expand Down Expand Up @@ -37,9 +45,7 @@ def create_parser() -> argparse.ArgumentParser:
)

network_group = push_parser.add_argument_group("Network options")
network_group.add_argument(
"--dryrun", action="store_true", help="Simulate changes"
)
network_group.add_argument("--dryrun", action="store_true", help="Simulate changes")
network_group.add_argument(
"--timeout", type=int, default=30, help="Timeout in seconds"
)
Expand All @@ -56,10 +62,6 @@ def create_parser() -> argparse.ArgumentParser:
global_group.add_argument("--verbose", action="store_true", help="Verbose output")
global_group.add_argument("--quiet", action="store_true", help="Quiet output")

return parser


def bench_argparse(loops: int) -> None:
argument_lists = [
["--verbose", "add", "file1.txt", "file2.txt"],
["add", "file1.txt", "file2.txt"],
Expand All @@ -77,19 +79,46 @@ def bench_argparse(loops: int) -> None:
],
]

parser = create_parser()
range_it = range(loops)
t0 = pyperf.perf_counter()
for arguments in argument_lists:
parser.parse_args(arguments)


def bm_subparsers() -> argparse.ArgumentParser:
parser = argparse.ArgumentParser()

parser.add_argument("input_file", type=str, help="The input file")
parser.add_argument("output_file", type=str, help="The output file")

for i in range(1000):
parser.add_argument(f"--option{i}", type=str, help=f"Optional argument {i}")

argument_lists = [
generate_arguments(500),
generate_arguments(1000),
]

for args in argument_lists:
parser.parse_args(args)


BENCHMARKS = {
"many_optionals": bm_many_optionals,
"subparsers": bm_subparsers,
}


for _ in range_it:
for args in argument_lists:
parser.parse_args(args)
def add_cmdline_args(cmd, args):
cmd.append(args.benchmark)

return pyperf.perf_counter() - t0

def add_parser_args(parser):
parser.add_argument("benchmark", choices=BENCHMARKS, help="Which benchmark to run.")

if __name__ == "__main__":
runner = pyperf.Runner()
runner.metadata["description"] = "Benchmark an argparse program with subparsers"
runner = pyperf.Runner(add_cmdline_args=add_cmdline_args)
runner.metadata["description"] = "Argparse benchmark"
add_parser_args(runner.argparser)
args = runner.parse_args()
benchmark = args.benchmark

runner.bench_time_func("argparse", bench_argparse)
runner.bench_func(args.benchmark, BENCHMARKS[args.benchmark])

0 comments on commit ee12b27

Please sign in to comment.