Skip to content
Merged
Show file tree
Hide file tree
Changes from 1 commit
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Prev Previous commit
Next Next commit
Add benchmark for testing lots of args
  • Loading branch information
savannahostrowski committed Nov 2, 2024
commit 03c72951913d0e247f078208ac674b1ecff1c474
4 changes: 3 additions & 1 deletion pyperformance/data-files/benchmarks/MANIFEST
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,8 @@

name metafile
2to3 <local>
argparse <local>
argparse_subparser <local>
argparse_args <local>
async_generators <local>
async_tree <local>
async_tree_cpu_io_mixed <local:async_tree>
Expand Down Expand Up @@ -100,6 +101,7 @@ xml_etree <local>


#[groups]
#argparse
#asyncio
#startup
#regex
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -6,4 +6,5 @@ urls = {repository = "https://github.com/python/pyperformance"}
dynamic = ["version"]

[tool.pyperformance]
name = "argparse"
name = "argparse_args"
tag = "argparse"
Original file line number Diff line number Diff line change
@@ -0,0 +1,49 @@
"""
Benchmark argparse program with many optional arguments.

Author: Savannah Ostrowski
"""

import pyperf
import argparse

def create_parser() -> argparse.ArgumentParser:
parser = argparse.ArgumentParser(
description="A CLI tool with many optional and positional arguments"
)

parser.add_argument("input_file", type=str, help="The input file")
parser.add_argument("output_file", type=str, help="The output file")

for i in range(1000):
parser.add_argument(f"--option{i}", type=str, help=f"Optional argument {i}")

return parser

def generate_arguments(i: int) -> list:
arguments = ["input.txt", "output.txt"]
for i in range(i):
arguments.extend([f"--option{i}", f"value{i}"])
return arguments

def bench_argparse(loops: int) -> None:
argument_lists = [
generate_arguments(500),
generate_arguments(1000),
]

parser = create_parser()
range_it = range(loops)
t0 = pyperf.perf_counter()

for _ in range_it:
for args in argument_lists:
parser.parse_args(args)

return pyperf.perf_counter() - t0

if __name__ == "__main__":
runner = pyperf.Runner()
runner.metadata['description'] = "Benchmark the argparse program with many optional arguments"

runner.bench_time_func('argparse', bench_argparse)
Original file line number Diff line number Diff line change
@@ -0,0 +1,10 @@
[project]
name = "pyperformance_bm_argparse"
requires-python = ">=3.8"
dependencies = ["pyperf"]
urls = {repository = "https://github.com/python/pyperformance"}
dynamic = ["version"]

[tool.pyperformance]
name = "argparse_subparser"
tag = "argparse"
Original file line number Diff line number Diff line change
@@ -1,8 +1,7 @@
"""Benchmark the argparse module.

This is a benchmark that simulates a command-line application with
multiple subparsers, each with their own options, and then
parses a series of command-line arguments.
"""
Benchmark the argparse module with multiple subparsers,
each with their own options, and then parses a series of
command-line arguments.

Author: Savannah Ostrowski
"""
Expand Down