diff --git a/.coveragerc b/.coveragerc index d39d3d5f02b..e5a68198b84 100644 --- a/.coveragerc +++ b/.coveragerc @@ -10,6 +10,9 @@ include = *\Lib\site-packages\pytest.py parallel = 1 branch = 1 +# The sysmon core (default since Python 3.14) is much slower. +# Perhaps: https://github.com/coveragepy/coveragepy/issues/2082 +core = ctrace [paths] source = src/ @@ -25,6 +28,7 @@ exclude_lines = ^\s*raise NotImplementedError\b ^\s*return NotImplemented\b ^\s*assert False(,|$) + ^\s*case unreachable: ^\s*assert_never\( ^\s*if TYPE_CHECKING: diff --git a/.github/workflows/deploy.yml b/.github/workflows/deploy.yml index 9e63bc68cfd..c44ef2d8210 100644 --- a/.github/workflows/deploy.yml +++ b/.github/workflows/deploy.yml @@ -25,41 +25,85 @@ jobs: attestations: write steps: - - uses: actions/checkout@v4 + - uses: actions/checkout@v5 with: fetch-depth: 0 persist-credentials: false - name: Build and Check Package - uses: hynek/build-and-inspect-python-package@b5076c307dc91924a82ad150cdd1533b444d3310 + uses: hynek/build-and-inspect-python-package@efb823f52190ad02594531168b7a2d5790e66516 with: attest-build-provenance-github: 'true' - deploy: - if: github.repository == 'pytest-dev/pytest' + generate-gh-release-notes: needs: [package] runs-on: ubuntu-latest - environment: deploy timeout-minutes: 30 permissions: - id-token: write - contents: write + contents: read steps: - - uses: actions/checkout@v4 + - uses: actions/checkout@v5 with: - persist-credentials: true + fetch-depth: 0 + persist-credentials: false + + - name: Set up Python + uses: actions/setup-python@v6 + with: + python-version: "3.13" + + - name: Install tox + run: | + python -m pip install --upgrade pip + pip install --upgrade tox + + - name: Generate release notes + env: + VERSION: ${{ github.event.inputs.version }} + run: | + tox -e generate-gh-release-notes -- "$VERSION" gh-release-notes.md + + - name: Upload release notes + uses: actions/upload-artifact@v4 + with: + name: release-notes + path: gh-release-notes.md + retention-days: 1 + publish-to-pypi: + if: github.repository == 'pytest-dev/pytest' + # Need generate-gh-release-notes only for ordering. + # Don't want to release to PyPI if generating GitHub release notes fails. + needs: [package, generate-gh-release-notes] + runs-on: ubuntu-latest + environment: deploy + timeout-minutes: 30 + permissions: + id-token: write + steps: - name: Download Package - uses: actions/download-artifact@v4 + uses: actions/download-artifact@v6 with: name: Packages path: dist - name: Publish package to PyPI - uses: pypa/gh-action-pypi-publish@76f52bc884231f62b9a034ebfe128415bbaabdfc + uses: pypa/gh-action-pypi-publish@ed0c53931b1dc9bd32cbe73a98c7f6766f8a527e with: attestations: true + push-tag: + needs: [publish-to-pypi] + runs-on: ubuntu-latest + timeout-minutes: 10 + permissions: + contents: write + steps: + - uses: actions/checkout@v5 + with: + fetch-depth: 0 + persist-credentials: true + - name: Push tag env: VERSION: ${{ github.event.inputs.version }} @@ -69,48 +113,29 @@ jobs: git tag --annotate --message=v"$VERSION" "$VERSION" ${{ github.sha }} git push origin "$VERSION" - release-notes: - - # todo: generate the content in the build job - # the goal being of using a github action script to push the release data - # after success instead of creating a complete python/tox env - needs: [deploy] + create-github-release: + needs: [push-tag, generate-gh-release-notes] runs-on: ubuntu-latest - timeout-minutes: 30 + timeout-minutes: 10 permissions: contents: write steps: - - uses: actions/checkout@v4 - with: - fetch-depth: 0 - persist-credentials: false - - name: Download Package - uses: actions/download-artifact@v4 + uses: actions/download-artifact@v6 with: name: Packages path: dist - - name: Set up Python - uses: actions/setup-python@v5 + - name: Download release notes + uses: actions/download-artifact@v6 with: - python-version: "3.11" - - - name: Install tox - run: | - python -m pip install --upgrade pip - pip install --upgrade tox + name: release-notes + path: . - - name: Generate release notes + - name: Publish GitHub Release env: VERSION: ${{ github.event.inputs.version }} + GH_REPO: ${{ github.repository }} + GH_TOKEN: ${{ secrets.GITHUB_TOKEN }} run: | - sudo apt-get install pandoc - tox -e generate-gh-release-notes -- "$VERSION" scripts/latest-release-notes.md - - - name: Publish GitHub Release - uses: softprops/action-gh-release@da05d552573ad5aba039eaac05058a918a7bf631 - with: - body_path: scripts/latest-release-notes.md - files: dist/* - tag_name: ${{ github.event.inputs.version }} + gh release create --notes-file gh-release-notes.md --verify-tag "$VERSION" dist/* diff --git a/.github/workflows/doc-check-links.yml b/.github/workflows/doc-check-links.yml new file mode 100644 index 00000000000..497ec73500a --- /dev/null +++ b/.github/workflows/doc-check-links.yml @@ -0,0 +1,37 @@ +name: Doc Check Links + +on: + schedule: + # At 00:00 on Sunday. + # https://crontab.guru + - cron: '0 0 * * 0' + workflow_dispatch: + +# Set permissions at the job level. +permissions: {} + +jobs: + doc-check-links: + if: github.repository_owner == 'pytest-dev' + runs-on: ubuntu-latest + + steps: + - name: Checkout + uses: actions/checkout@v5 + with: + fetch-depth: 0 + persist-credentials: false + + - name: Setup Python + uses: actions/setup-python@v6 + with: + python-version: "3.13" + cache: pip + + - name: Install dependencies + run: | + python -m pip install --upgrade pip + pip install tox + + - name: Run sphinx linkcheck via tox + run: tox -e docs-checklinks diff --git a/.github/workflows/prepare-release-pr.yml b/.github/workflows/prepare-release-pr.yml index b21ca70cb46..9dcfea7bae5 100644 --- a/.github/workflows/prepare-release-pr.yml +++ b/.github/workflows/prepare-release-pr.yml @@ -27,16 +27,16 @@ jobs: pull-requests: write steps: - - uses: actions/checkout@v4 + - uses: actions/checkout@v5 with: fetch-depth: 0 # persist-credentials is needed in order for us to push the release branch. persist-credentials: true - name: Set up Python - uses: actions/setup-python@v5 + uses: actions/setup-python@v6 with: - python-version: "3.x" + python-version: "3.13" - name: Install dependencies run: | diff --git a/.github/workflows/stale.yml b/.github/workflows/stale.yml index 82f9a1f2579..aeac36cea60 100644 --- a/.github/workflows/stale.yml +++ b/.github/workflows/stale.yml @@ -10,7 +10,7 @@ jobs: permissions: issues: write steps: - - uses: actions/stale@v9 + - uses: actions/stale@v10 with: debug-only: false days-before-issue-stale: 14 diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml index 3ecc133878f..b7f0634d08d 100644 --- a/.github/workflows/test.yml +++ b/.github/workflows/test.yml @@ -20,6 +20,8 @@ on: - reopened # default - ready_for_review # used in PRs created from the release workflow + workflow_dispatch: # allows manual triggering of the workflow + env: PYTEST_ADDOPTS: "--color=yes" @@ -35,12 +37,12 @@ jobs: package: runs-on: ubuntu-latest steps: - - uses: actions/checkout@v4 + - uses: actions/checkout@v5 with: fetch-depth: 0 persist-credentials: false - name: Build and Check Package - uses: hynek/build-and-inspect-python-package@b5076c307dc91924a82ad150cdd1533b444d3310 + uses: hynek/build-and-inspect-python-package@efb823f52190ad02594531168b7a2d5790e66516 build: needs: [package] @@ -54,19 +56,22 @@ jobs: fail-fast: false matrix: name: [ - "windows-py39-unittestextras", - "windows-py39-pluggy", - "windows-py39-xdist", - "windows-py310", + "windows-py310-unittest-asynctest", + "windows-py310-unittest-twisted24", + "windows-py310-unittest-twisted25", + "windows-py310-pluggy", + "windows-py310-xdist", "windows-py311", "windows-py312", "windows-py313", "windows-py314", - "ubuntu-py39-lsof-numpy-pexpect", - "ubuntu-py39-pluggy", - "ubuntu-py39-freeze", - "ubuntu-py39-xdist", + "ubuntu-py310-unittest-asynctest", + "ubuntu-py310-unittest-twisted24", + "ubuntu-py310-unittest-twisted25", + "ubuntu-py310-lsof-numpy-pexpect", + "ubuntu-py310-pluggy", + "ubuntu-py310-freeze", "ubuntu-py310-xdist", "ubuntu-py311", "ubuntu-py312", @@ -74,7 +79,6 @@ jobs: "ubuntu-py314", "ubuntu-pypy3-xdist", - "macos-py39", "macos-py310", "macos-py312", "macos-py313", @@ -85,23 +89,32 @@ jobs: ] include: - - name: "windows-py39-unittestextras" - python: "3.9" + # Use separate jobs for different unittest flavors (twisted, asynctest) to ensure proper coverage. + - name: "windows-py310-unittest-asynctest" + python: "3.10" + os: windows-latest + tox_env: "py310-asynctest" + use_coverage: true + + - name: "windows-py310-unittest-twisted24" + python: "3.10" os: windows-latest - tox_env: "py39-unittestextras" + tox_env: "py310-twisted24" use_coverage: true - - name: "windows-py39-pluggy" - python: "3.9" + - name: "windows-py310-unittest-twisted25" + python: "3.10" os: windows-latest - tox_env: "py39-pluggymain-pylib-xdist" + tox_env: "py310-twisted25" + use_coverage: true - - name: "windows-py39-xdist" - python: "3.9" + - name: "windows-py310-pluggy" + python: "3.10" os: windows-latest - tox_env: "py39-xdist" + tox_env: "py310-pluggymain-pylib-xdist" + xfail: true - - name: "windows-py310" + - name: "windows-py310-xdist" python: "3.10" os: windows-latest tox_env: "py310-xdist" @@ -120,32 +133,50 @@ jobs: python: "3.13" os: windows-latest tox_env: "py313" + xfail: true - name: "windows-py314" python: "3.14" os: windows-latest tox_env: "py314" + use_coverage: true - - name: "ubuntu-py39-lsof-numpy-pexpect" - python: "3.9" + # Use separate jobs for different unittest flavors (twisted, asynctest) to ensure proper coverage. + - name: "ubuntu-py310-unittest-asynctest" + python: "3.10" os: ubuntu-latest - tox_env: "py39-lsof-numpy-pexpect" + tox_env: "py310-asynctest" use_coverage: true - - name: "ubuntu-py39-pluggy" - python: "3.9" + - name: "ubuntu-py310-unittest-twisted24" + python: "3.10" os: ubuntu-latest - tox_env: "py39-pluggymain-pylib-xdist" + tox_env: "py310-twisted24" + use_coverage: true - - name: "ubuntu-py39-freeze" - python: "3.9" + - name: "ubuntu-py310-unittest-twisted25" + python: "3.10" os: ubuntu-latest - tox_env: "py39-freeze" + tox_env: "py310-twisted25" + use_coverage: true - - name: "ubuntu-py39-xdist" - python: "3.9" + - name: "ubuntu-py310-lsof-numpy-pexpect" + python: "3.10" os: ubuntu-latest - tox_env: "py39-xdist" + tox_env: "py310-lsof-numpy-pexpect" + use_coverage: true + + - name: "ubuntu-py310-pluggy" + python: "3.10" + os: ubuntu-latest + tox_env: "py310-pluggymain-pylib-xdist" + xfail: true + + - name: "ubuntu-py310-freeze" + python: "3.10" + os: ubuntu-latest + tox_env: "py310-freeze" + xfail: true - name: "ubuntu-py310-xdist" python: "3.10" @@ -169,6 +200,7 @@ jobs: os: ubuntu-latest tox_env: "py313-pexpect" use_coverage: true + xfail: true - name: "ubuntu-py314" python: "3.14" @@ -177,21 +209,16 @@ jobs: use_coverage: true - name: "ubuntu-pypy3-xdist" - python: "pypy-3.9" + python: "pypy-3.10" os: ubuntu-latest tox_env: "pypy3-xdist" - - name: "macos-py39" - python: "3.9" - os: macos-latest - tox_env: "py39-xdist" - use_coverage: true - - name: "macos-py310" python: "3.10" os: macos-latest tox_env: "py310-xdist" + xfail: true - name: "macos-py312" python: "3.12" @@ -202,6 +229,7 @@ jobs: python: "3.13" os: macos-latest tox_env: "py313-xdist" + xfail: true - name: "macos-py314" python: "3.14" @@ -215,45 +243,27 @@ jobs: - name: "doctesting" - python: "3.9" + python: "3.10" os: ubuntu-latest tox_env: "doctesting" use_coverage: true - continue-on-error: >- - ${{ - contains( - fromJSON( - '[ - "windows-py39-pluggy", - "windows-py313", - "ubuntu-py39-pluggy", - "ubuntu-py39-freeze", - "ubuntu-py313", - "macos-py39", - "macos-py313" - ]' - ), - matrix.name - ) - && true - || false - }} + continue-on-error: ${{ matrix.xfail && true || false }} steps: - - uses: actions/checkout@v4 + - uses: actions/checkout@v5 with: fetch-depth: 0 persist-credentials: false - name: Download Package - uses: actions/download-artifact@v4 + uses: actions/download-artifact@v6 with: name: Packages path: dist - name: Set up Python ${{ matrix.python }} - uses: actions/setup-python@v5 + uses: actions/setup-python@v6 with: python-version: ${{ matrix.python }} check-latest: true @@ -267,11 +277,15 @@ jobs: - name: Test without coverage if: "! matrix.use_coverage" shell: bash + env: + _PYTEST_TOX_POSARGS_JUNIT: --junitxml=junit.xml run: tox run -e ${{ matrix.tox_env }} --installpkg `find dist/*.tar.gz` - name: Test with coverage if: "matrix.use_coverage" shell: bash + env: + _PYTEST_TOX_POSARGS_JUNIT: --junitxml=junit.xml run: tox run -e ${{ matrix.tox_env }}-coverage --installpkg `find dist/*.tar.gz` - name: Generate coverage report @@ -280,12 +294,20 @@ jobs: - name: Upload coverage to Codecov if: "matrix.use_coverage" - uses: codecov/codecov-action@18283e04ce6e62d37312384ff67231eb8fd56d24 + uses: codecov/codecov-action@5a1091511ad55cbe89839c7260b706298ca349f7 with: fail_ci_if_error: false files: ./coverage.xml verbose: true + - name: Upload JUnit report to Codecov + uses: codecov/codecov-action@5a1091511ad55cbe89839c7260b706298ca349f7 + with: + fail_ci_if_error: false + files: junit.xml + report_type: test_results + verbose: true + check: # This job does nothing and is only used for the branch protection if: always() @@ -296,6 +318,6 @@ jobs: steps: - name: Decide whether the needed jobs succeeded or failed - uses: re-actors/alls-green@223e4bb7a751b91f43eda76992bcfbf23b8b0302 + uses: re-actors/alls-green@a638d6464689bbb24c325bb3fe9404d63a913030 with: jobs: ${{ toJSON(needs) }} diff --git a/.github/workflows/update-plugin-list.yml b/.github/workflows/update-plugin-list.yml index c10aefa3a55..b396d6e19d4 100644 --- a/.github/workflows/update-plugin-list.yml +++ b/.github/workflows/update-plugin-list.yml @@ -20,16 +20,15 @@ jobs: steps: - name: Checkout - uses: actions/checkout@v4 + uses: actions/checkout@v5 with: fetch-depth: 0 persist-credentials: false - name: Setup Python - uses: actions/setup-python@v5 + uses: actions/setup-python@v6 with: - python-version: "3.11" - cache: pip + python-version: "3.13" - name: requests-cache uses: actions/cache@v4 @@ -38,13 +37,13 @@ jobs: key: plugins-http-cache-${{ github.run_id }} # Can use time based key as well restore-keys: plugins-http-cache- - - name: Install dependencies + - name: Install tox run: | python -m pip install --upgrade pip - pip install packaging requests tabulate[widechars] tqdm requests-cache platformdirs + pip install --upgrade tox - name: Update Plugin List - run: python scripts/update-plugin-list.py + run: tox -e update-plugin-list - name: Create Pull Request id: pr diff --git a/.gitignore b/.gitignore index c4557b33a1c..d0e8dc54ba1 100644 --- a/.gitignore +++ b/.gitignore @@ -51,6 +51,7 @@ coverage.xml .vscode __pycache__/ .python-version +.claude/settings.local.json # generated by pip pip-wheel-metadata/ diff --git a/.mailmap b/.mailmap new file mode 100644 index 00000000000..682334c7430 --- /dev/null +++ b/.mailmap @@ -0,0 +1,2 @@ +Freya Bruhin +Freya Bruhin diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 149f7e6af59..2f9f56256b6 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -1,22 +1,23 @@ +minimum_pre_commit_version: "4.4.0" repos: - repo: https://github.com/astral-sh/ruff-pre-commit - rev: "v0.11.11" + rev: "v0.14.3" hooks: - - id: ruff + - id: ruff-check args: ["--fix"] - id: ruff-format - repo: https://github.com/pre-commit/pre-commit-hooks - rev: v5.0.0 + rev: v6.0.0 hooks: - id: trailing-whitespace - id: end-of-file-fixer - id: check-yaml - repo: https://github.com/woodruffw/zizmor-pre-commit - rev: v1.8.0 + rev: v1.16.2 hooks: - id: zizmor - repo: https://github.com/adamchainz/blacken-docs - rev: 1.19.1 + rev: 1.20.0 hooks: - id: blacken-docs additional_dependencies: [black==24.1.1] @@ -32,7 +33,7 @@ repos: hooks: - id: python-use-type-annotations - repo: https://github.com/pre-commit/mirrors-mypy - rev: v1.15.0 + rev: v1.18.2 hooks: - id: mypy files: ^(src/|testing/|scripts/) @@ -48,7 +49,7 @@ repos: # on <3.11 - exceptiongroup>=1.0.0rc8 - repo: https://github.com/RobertCraigie/pyright-python - rev: v1.1.401 + rev: v1.1.407 hooks: - id: pyright files: ^(src/|scripts/) @@ -66,17 +67,17 @@ repos: # Manual because passing pyright is a work in progress. stages: [manual] - repo: https://github.com/tox-dev/pyproject-fmt - rev: "v2.6.0" + rev: "v2.11.0" hooks: - id: pyproject-fmt # https://pyproject-fmt.readthedocs.io/en/latest/#calculating-max-supported-python-version additional_dependencies: ["tox>=4.9"] - repo: https://github.com/asottile/pyupgrade - rev: v3.20.0 + rev: v3.21.0 hooks: - id: pyupgrade args: - - "--py39-plus" + - "--py310-plus" # Manual because ruff does what pyupgrade does and the two are not out of sync # often enough to make launching pyupgrade everytime worth it stages: [manual] @@ -92,10 +93,10 @@ repos: stages: [manual] - id: rst name: rst - entry: rst-lint --encoding utf-8 + entry: rst-lint files: ^(RELEASING.rst|README.rst|TIDELIFT.rst)$ language: python - additional_dependencies: [pygments, restructuredtext_lint] + additional_dependencies: [pygments, restructuredtext_lint>=2.0.0] - id: changelogs-rst name: changelog filenames language: fail diff --git a/.readthedocs.yaml b/.readthedocs.yaml index f7370f1bb98..6380b34adec 100644 --- a/.readthedocs.yaml +++ b/.readthedocs.yaml @@ -17,7 +17,7 @@ build: os: ubuntu-24.04 tools: python: >- - 3.12 + 3.13 apt_packages: - inkscape jobs: diff --git a/AUTHORS b/AUTHORS index e5b863e71f1..e8140292aa4 100644 --- a/AUTHORS +++ b/AUTHORS @@ -14,6 +14,7 @@ Ahn Ki-Wook Akhilesh Ramakrishnan Akiomi Kamakura Alan Velasco +Alejandro Villate Alessio Izzo Alex Jones Alex Lambson @@ -39,6 +40,7 @@ Andrzej Klajnert Andrzej Ostrowski Andy Freeland Anita Hammer +Anna Tasiopoulou Anthon van der Neut Anthony Shaw Anthony Sottile @@ -82,6 +84,7 @@ Carlos Jenkins Ceridwen Charles Cloud Charles Machalow +Charles-Meldhine Madi Mnemoi (cmnemoi) Charnjit SiNGH (CCSJ) Cheuk Ting Ho Chris Mahoney @@ -128,12 +131,14 @@ David Szotten David Vierra Daw-Ran Liou Debi Mishra +Denis Cherednichenko Denis Kirisov Denivy Braiam Rück Deysha Rivera Dheeraj C K Dhiren Serai Diego Russo +Dima Gerasimov Dmitry Dygalo Dmitry Pribysh Dominic Mortlock @@ -167,11 +172,11 @@ faph Felix Hofstätter Felix Nieuwenhuizen Feng Ma -Florian Bruhin Florian Dahlitz Floris Bruynooghe Frank Hoffmann Fraser Stark +Freya Bruhin Gabriel Landau Gabriel Reis Garvit Shubham @@ -200,6 +205,7 @@ Ilya Konstantinov Ionuț Turturică Isaac Virshup Israel Fruchter +Israël Hallé Itxaso Aizpurua Iwan Briquemont Jaap Broekhuizen @@ -225,6 +231,7 @@ Jon Parise Jon Sonesen Jonas Obrist Jordan Guymon +Jordan Macdonald Jordan Moldow Jordan Speicher Joseph Hunkeler @@ -250,6 +257,7 @@ Kevin Hierro Carrasco Kevin J. Foley Kian Eliasi Kian-Meng Ang +Kim Soo Kodi B. Arfer Kojo Idrissa Kostis Anagnostopoulos @@ -261,6 +269,7 @@ Leonardus Chen Lev Maximov Levon Saldamli Lewis Cowles +Liam DeVoe Llandy Riveron Del Risco Loic Esteve lovetheguitar @@ -279,6 +288,7 @@ Marcin Augustynów Marcin Bachry Marc Bresson Marco Gorelli +Marcos Boger Mark Abramowitz Mark Dickinson Mark Vong @@ -303,6 +313,7 @@ Michael Goerz Michael Krebs Michael Seifert Michael Vogt +Michael Reznik Michal Wajszczuk Michał Górny Michał Zięba @@ -313,6 +324,7 @@ Mike Hoyle (hoylemd) Mike Lundy Milan Lesnek Miro Hrončok +Mulat Mekonen mrbean-bremen Nathan Goldbaum Nathan Rousseau @@ -335,8 +347,10 @@ Oleg Sushchenko Oleksandr Zavertniev Olga Matoula Oliver Bestwalter +Olivier Grisel Omar Kohl Omer Hadari +Omri Golan Ondřej Súkup Oscar Benjamin Parth Patel @@ -368,6 +382,7 @@ Ralf Schmitt Ralph Giles Ram Rachum Ran Benita +Randy Döring Raphael Castaneda Raphael Pierzina Rafal Semik @@ -375,6 +390,7 @@ Reza Mousavi Raquel Alegre Ravi Chandra Reagan Lee +Reilly Brogan Rob Arrow Robert Holt Roberto Aldera @@ -393,6 +409,7 @@ Sadra Barikbin Saiprasad Kale Samuel Colvin Samuel Dion-Girardeau +Samuel Gaist Samuel Jirovec Samuel Searles-Bryant Samuel Therrien (Avasam) @@ -446,6 +463,7 @@ TJ Bruno Tobias Diez Tobias Petersen Tom Dalton +Tom Most Tom Viner Tomáš Gavenčiak Tomer Keren @@ -488,6 +506,7 @@ Yusuke Kadowaki Yutian Li Yuval Shimon Zac Hatfield-Dodds +Zac Palmer Laporte Zach Snicker Zachary Kneupper Zachary OBrien diff --git a/CITATION b/CITATION index 98beee72209..ac7c5d6f312 100644 --- a/CITATION +++ b/CITATION @@ -10,19 +10,19 @@ BibLaTeX: @software{pytest, title = {pytest x.y}, - author = {Holger Krekel and Bruno Oliveira and Ronny Pfannschmidt and Floris Bruynooghe and Brianna Laugher and Florian Bruhin}, + author = {Holger Krekel and Bruno Oliveira and Ronny Pfannschmidt and Floris Bruynooghe and Brianna Laugher and Freya Bruhin}, year = {2004}, version = {x.y}, url = {https://github.com/pytest-dev/pytest}, - note = {Contributors: Holger Krekel and Bruno Oliveira and Ronny Pfannschmidt and Floris Bruynooghe and Brianna Laugher and Florian Bruhin and others} + note = {Contributors: Holger Krekel and Bruno Oliveira and Ronny Pfannschmidt and Floris Bruynooghe and Brianna Laugher and Freya Bruhin and others} } BibTeX: @misc{pytest, - author = {Holger Krekel and Bruno Oliveira and Ronny Pfannschmidt and Floris Bruynooghe and Brianna Laugher and Florian Bruhin}, + author = {Holger Krekel and Bruno Oliveira and Ronny Pfannschmidt and Floris Bruynooghe and Brianna Laugher and Freya Bruhin}, title = {pytest x.y}, year = {2004}, howpublished = {\url{https://github.com/pytest-dev/pytest}}, - note = {Version x.y. Contributors include Holger Krekel, Bruno Oliveira, Ronny Pfannschmidt, Floris Bruynooghe, Brianna Laugher, Florian Bruhin, and others.} + note = {Version x.y. Contributors include Holger Krekel, Bruno Oliveira, Ronny Pfannschmidt, Floris Bruynooghe, Brianna Laugher, Freya Bruhin, and others.} } diff --git a/CODE_OF_CONDUCT.md b/CODE_OF_CONDUCT.md index f0ca304be4e..14d56263449 100644 --- a/CODE_OF_CONDUCT.md +++ b/CODE_OF_CONDUCT.md @@ -70,7 +70,7 @@ contacted individually: - Brianna Laugher ([@pfctdayelise](https://github.com/pfctdayelise)): brianna@laugher.id.au - Bruno Oliveira ([@nicoddemus](https://github.com/nicoddemus)): nicoddemus@gmail.com -- Florian Bruhin ([@the-compiler](https://github.com/the-compiler)): pytest@the-compiler.org +- Freya Bruhin ([@the-compiler](https://github.com/the-compiler)): pytest@the-compiler.org ## Attribution diff --git a/CONTRIBUTING.rst b/CONTRIBUTING.rst index b79955e1c01..fb9f7f4d53d 100644 --- a/CONTRIBUTING.rst +++ b/CONTRIBUTING.rst @@ -146,7 +146,7 @@ the following: - PyPI presence with packaging metadata that contains a ``pytest-`` prefixed name, version number, authors, short and long description. -- a `tox configuration `_ +- a `tox configuration `_ for running tests using `tox `_. - a ``README`` describing how to use the plugin and on which @@ -197,13 +197,13 @@ Short version #. Follow `PEP-8 `_ for naming. #. Tests are run using ``tox``:: - tox -e linting,py39 + tox -e linting,py313 The test environments above are usually enough to cover most cases locally. #. Write a ``changelog`` entry: ``changelog/2574.bugfix.rst``, use issue id number and one of ``feature``, ``improvement``, ``bugfix``, ``doc``, ``deprecation``, - ``breaking``, ``vendor`` or ``trivial`` for the issue type. + ``breaking``, ``vendor``, ``packaging``, ``contrib``, or ``misc`` for the issue type. #. Unless your change is a trivial or a documentation fix (e.g., a typo or reword of a small section) please @@ -269,24 +269,24 @@ Here is a simple overview, with pytest-specific bits: #. Run all the tests - You need to have Python 3.9 or later available in your system. Now + You need to have a supported Python version available in your system. Now running tests is as simple as issuing this command:: - $ tox -e linting,py39 + $ tox -e linting,py - This command will run tests via the "tox" tool against Python 3.9 - and also perform "lint" coding-style checks. + This command will run tests via the "tox" tool against your default Python + version and also perform "lint" coding-style checks. #. You can now edit your local working copy and run the tests again as necessary. Please follow `PEP-8 `_ for naming. - You can pass different options to ``tox``. For example, to run tests on Python 3.9 and pass options to pytest - (e.g. enter pdb on failure) to pytest you can do:: + You can pass different options to ``tox``. For example, to run tests on Python 3.13 and pass options to pytest + (e.g. enter pdb on failure) you can do:: - $ tox -e py39 -- --pdb + $ tox -e py313 -- --pdb - Or to only run tests in a particular test module on Python 3.9:: + Or to only run tests in a particular test module on Python 3.12:: - $ tox -e py39 -- testing/test_config.py + $ tox -e py312 -- testing/test_config.py When committing, ``pre-commit`` will re-format the files if necessary. @@ -305,8 +305,9 @@ Here is a simple overview, with pytest-specific bits: #. Create a new changelog entry in ``changelog``. The file should be named ``..rst``, where *issueid* is the number of the issue related to the change and *type* is one of - ``feature``, ``improvement``, ``bugfix``, ``doc``, ``deprecation``, ``breaking``, ``vendor`` - or ``trivial``. You may skip creating the changelog entry if the change doesn't affect the + ``feature``, ``improvement``, ``bugfix``, ``doc``, ``deprecation``, ``breaking``, ``vendor``, + ``packaging``, ``contrib``, or ``misc``. + You may skip creating the changelog entry if the change doesn't affect the documented behaviour of pytest. #. Add yourself to ``AUTHORS`` file if not there yet, in alphabetical order. @@ -345,7 +346,7 @@ For example, to ensure a simple test passes you can write: result.assert_outcomes(failed=0, passed=1) -Alternatively, it is possible to make checks based on the actual output of the termal using +Alternatively, it is possible to make checks based on the actual output of the terminal using *glob-like* expressions: .. code-block:: python @@ -478,10 +479,10 @@ above? to do the backport. 2. However, often the merge is done by another maintainer, in which case it is nice of them to do the backport procedure if they have the time. -3. For bugs submitted by non-maintainers, it is expected that a core developer will to do +3. For bugs submitted by non-maintainers, it is expected that a core developer will do the backport, normally the one that merged the PR on ``main``. -4. If a non-maintainers notices a bug which is fixed on ``main`` but has not been backported - (due to maintainers forgetting to apply the *needs backport* label, or just plain missing it), +4. If a non-maintainer notices a bug which is fixed on ``main`` but has not been backported + (due to maintainers forgetting to apply the *needs backport* or *backport x.x.x* labels, or just plain missing it), they are also welcome to open a PR with the backport. The procedure is simple and really helps with the maintenance of the project. @@ -511,7 +512,7 @@ can always reopen the issue/pull request in their own time later if it makes sen When to close ~~~~~~~~~~~~~ -Here are a few general rules the maintainers use deciding when to close issues/PRs because +Here are a few general rules the maintainers use to decide when to close issues/PRs because of lack of inactivity: * Issues labeled ``question`` or ``needs information``: closed after 14 days inactive. @@ -523,7 +524,7 @@ The above are **not hard rules**, but merely **guidelines**, and can be (and oft Closing pull requests ~~~~~~~~~~~~~~~~~~~~~ -When closing a Pull Request, it needs to be acknowledging the time, effort, and interest demonstrated by the person which submitted it. As mentioned previously, it is not the intent of the team to dismiss a stalled pull request entirely but to merely to clear up our queue, so a message like the one below is warranted when closing a pull request that went stale: +When closing a Pull Request, we should acknowledge the time, effort, and interest demonstrated by the person who submitted it. As mentioned previously, it is not the intent of the team to dismiss a stalled pull request entirely but to merely to clear up our queue, so a message like the one below is warranted when closing a pull request that went stale: Hi , @@ -531,7 +532,7 @@ When closing a Pull Request, it needs to be acknowledging the time, effort, and We noticed it has been awhile since you have updated this PR, however. pytest is a high activity project, with many issues/PRs being opened daily, so it is hard for us maintainers to track which PRs are ready for merging, for review, or need more attention. - So for those reasons we, think it is best to close the PR for now, but with the only intention to clean up our queue, it is by no means a rejection of your changes. We still encourage you to re-open this PR (it is just a click of a button away) when you are ready to get back to it. + So for those reasons, we think it is best to close the PR for now, but with the only intention to clean up our queue, it is by no means a rejection of your changes. We still encourage you to re-open this PR (it is just a click of a button away) when you are ready to get back to it. Again we appreciate your time for working on this, and hope you might get back to this at a later time! diff --git a/README.rst b/README.rst index 091afc363da..3bc5f06fc81 100644 --- a/README.rst +++ b/README.rst @@ -79,7 +79,7 @@ To execute it:: ========================== 1 failed in 0.04 seconds =========================== -Due to ``pytest``'s detailed assertion introspection, only plain ``assert`` statements are used. See `getting-started `_ for more examples. +Thanks to ``pytest``'s detailed assertion introspection, you can simply use plain ``assert`` statements. See `getting-started `_ for more examples. Features @@ -97,7 +97,7 @@ Features - Can run `unittest `_ (or trial) test suites out of the box -- Python 3.9+ or PyPy3 +- Python 3.10+ or PyPy3 - Rich plugin architecture, with over 1300+ `external plugins `_ and thriving community diff --git a/RELEASING.rst b/RELEASING.rst index 0ca63ee4fbf..2b00e658e7a 100644 --- a/RELEASING.rst +++ b/RELEASING.rst @@ -117,7 +117,7 @@ To release a version ``MAJOR.MINOR.PATCH``, follow these steps: #. Create a branch ``release-MAJOR.MINOR.PATCH`` from the ``MAJOR.MINOR.x`` branch. - Ensure your are updated and in a clean working tree. + Ensure your local checkout is up to date and in a clean working tree. #. Using ``tox``, generate docs, changelog, announcements:: @@ -133,7 +133,7 @@ Releasing Both automatic and manual processes described above follow the same steps from this point onward. -#. After all tests pass and the PR has been approved, trigger the ``deploy`` job +#. After all tests pass and the PR has been approved, trigger the ``deploy`` workflow in https://github.com/pytest-dev/pytest/actions/workflows/deploy.yml, using the ``release-MAJOR.MINOR.PATCH`` branch as source. @@ -168,9 +168,7 @@ Both automatic and manual processes described above follow the same steps from t To the following mailing lists: - * pytest-dev@python.org (all releases) - * python-announce-list@python.org (all releases) - * testing-in-python@lists.idyll.org (only major/minor releases) + * python-announce-list@python.org And announce it with the ``#pytest`` hashtag on: diff --git a/changelog/10224.improvement.rst b/changelog/10224.improvement.rst deleted file mode 100644 index 93afe9e2c1e..00000000000 --- a/changelog/10224.improvement.rst +++ /dev/null @@ -1,18 +0,0 @@ -pytest's ``short`` and ``long`` traceback styles (:ref:`how-to-modifying-python-tb-printing`) -now have partial :pep:`657` support and will show specific code segments in the -traceback. - -.. code-block:: pytest - - ================================= FAILURES ================================= - _______________________ test_gets_correct_tracebacks _______________________ - - test_tracebacks.py:12: in test_gets_correct_tracebacks - assert manhattan_distance(p1, p2) == 1 - ^^^^^^^^^^^^^^^^^^^^^^^^^^ - test_tracebacks.py:6: in manhattan_distance - return abs(point_1.x - point_2.x) + abs(point_1.y - point_2.y) - ^^^^^^^^^ - E AttributeError: 'NoneType' object has no attribute 'x' - --- by :user:`ammaraskar` diff --git a/changelog/10404.bugfix.rst b/changelog/10404.bugfix.rst deleted file mode 100644 index 4c98ea03d64..00000000000 --- a/changelog/10404.bugfix.rst +++ /dev/null @@ -1,7 +0,0 @@ -Apply filterwarnings from config/cli as soon as possible, and revert them as late as possible -so that warnings as errors are collected throughout the pytest run and before the -unraisable and threadexcept hooks are removed. - -This allows very late warnings and unraisable/threadexcept exceptions to fail the test suite. - -This also changes the warning that the lsof plugin issues from PytestWarning to the new warning PytestFDWarning so it can be more easily filtered. diff --git a/changelog/10839.deprecation.rst b/changelog/10839.deprecation.rst deleted file mode 100644 index a3e2cbf51d0..00000000000 --- a/changelog/10839.deprecation.rst +++ /dev/null @@ -1 +0,0 @@ -Requesting an asynchronous fixture without a `pytest_fixture_setup` hook that resolves it will now give a DeprecationWarning. This most commonly happens if a sync test requests an async fixture. This should have no effect on a majority of users with async tests or fixtures using async pytest plugins, but may affect non-standard hook setups or ``autouse=True``. For guidance on how to work around this warning see :ref:`sync-test-async-fixture`. diff --git a/changelog/11067.bugfix.rst b/changelog/11067.bugfix.rst deleted file mode 100644 index 4e3cb8e7dd7..00000000000 --- a/changelog/11067.bugfix.rst +++ /dev/null @@ -1,3 +0,0 @@ -The test report is now consistent regardless if the test xfailed via :ref:`pytest.mark.xfail ` or :func:`pytest.fail`. - -Previously, *xfailed* tests via the marker would have the string ``"reason: "`` prefixed to the message, while those *xfailed* via the function did not. The prefix has been removed. diff --git a/changelog/11118.improvement.rst b/changelog/11118.improvement.rst deleted file mode 100644 index 4760dbe9d64..00000000000 --- a/changelog/11118.improvement.rst +++ /dev/null @@ -1,3 +0,0 @@ -Now :confval:`pythonpath` configures `$PYTHONPATH` earlier than before during the initialization process, which now also affects plugins loaded via the `-p` command-line option. - --- by :user:`millerdev` diff --git a/changelog/11372.breaking.rst b/changelog/11372.breaking.rst deleted file mode 100644 index f4b5c3c6f6b..00000000000 --- a/changelog/11372.breaking.rst +++ /dev/null @@ -1 +0,0 @@ -Async tests will now fail, instead of warning+skipping, if you don't have any suitable plugin installed. diff --git a/changelog/11381.improvement.rst b/changelog/11381.improvement.rst deleted file mode 100644 index 74c080cc188..00000000000 --- a/changelog/11381.improvement.rst +++ /dev/null @@ -1,17 +0,0 @@ -The ``type`` parameter of the ``parser.addini`` method now accepts `"int"` and ``"float"`` parameters, facilitating the parsing of configuration values in the configuration file. - -Example: - -.. code-block:: python - - def pytest_addoption(parser): - parser.addini("int_value", type="int", default=2, help="my int value") - parser.addini("float_value", type="float", default=4.2, help="my float value") - -The `pytest.ini` file: - -.. code-block:: ini - - [pytest] - int_value = 3 - float_value = 5.4 diff --git a/changelog/11525.improvement.rst b/changelog/11525.improvement.rst deleted file mode 100644 index 1935ce59343..00000000000 --- a/changelog/11525.improvement.rst +++ /dev/null @@ -1,3 +0,0 @@ -Fixtures are now clearly represented in the output as a "fixture object", not as a normal function as before, making it easy for beginners to catch mistakes such as referencing a fixture declared in the same module but not requested in the test function. - --- by :user:`the-compiler` and :user:`glyphack` diff --git a/changelog/11538.feature.rst b/changelog/11538.feature.rst deleted file mode 100644 index d6473b8fe73..00000000000 --- a/changelog/11538.feature.rst +++ /dev/null @@ -1 +0,0 @@ -Added :class:`pytest.RaisesGroup` as an equivalent to :func:`pytest.raises` for expecting :exc:`ExceptionGroup`. Also adds :class:`pytest.RaisesExc` which is now the logic behind :func:`pytest.raises` and used as parameter to :class:`pytest.RaisesGroup`. ``RaisesGroup`` includes the ability to specify multiple different expected exceptions, the structure of nested exception groups, and flags for emulating :ref:`except* `. See :ref:`assert-matching-exception-groups` and docstrings for more information. diff --git a/changelog/12008.bugfix.rst b/changelog/12008.bugfix.rst deleted file mode 100644 index b9680b89236..00000000000 --- a/changelog/12008.bugfix.rst +++ /dev/null @@ -1 +0,0 @@ -In :pr:`11220`, an unintended change in reordering was introduced by changing the way indices were assigned to direct params. More specifically, before that change, the indices of direct params to metafunc's callspecs were assigned after all parametrizations took place. Now, that change is reverted. diff --git a/changelog/12017.contrib.rst b/changelog/12017.contrib.rst deleted file mode 100644 index ec1861893b3..00000000000 --- a/changelog/12017.contrib.rst +++ /dev/null @@ -1,7 +0,0 @@ -Mixed internal improvements: - -* Migrate formatting to f-strings in some tests. -* Use type-safe constructs in JUnitXML tests. -* Moved`` MockTiming`` into ``_pytest.timing``. - --- by :user:`RonnyPfannschmidt` diff --git a/changelog/12081.feature.rst b/changelog/12081.feature.rst deleted file mode 100644 index 6538fbf30f8..00000000000 --- a/changelog/12081.feature.rst +++ /dev/null @@ -1 +0,0 @@ -Added :fixture:`capteesys` to capture AND pass output to next handler set by ``--capture=``. diff --git a/changelog/12346.breaking.rst b/changelog/12346.breaking.rst deleted file mode 100644 index 7013cf734c8..00000000000 --- a/changelog/12346.breaking.rst +++ /dev/null @@ -1 +0,0 @@ -Tests will now fail, instead of raising a warning, if they return any value other than None. diff --git a/changelog/12426.improvement.rst b/changelog/12426.improvement.rst deleted file mode 100644 index 0da1f838aea..00000000000 --- a/changelog/12426.improvement.rst +++ /dev/null @@ -1 +0,0 @@ -A warning is now issued when :ref:`pytest.mark.usefixtures ref` is used without specifying any fixtures. Previously, empty usefixtures markers were silently ignored. diff --git a/changelog/12504.feature.rst b/changelog/12504.feature.rst deleted file mode 100644 index d72b97958c2..00000000000 --- a/changelog/12504.feature.rst +++ /dev/null @@ -1 +0,0 @@ -:func:`pytest.mark.xfail` now accepts :class:`pytest.RaisesGroup` for the ``raises`` parameter when you expect an exception group. You can also pass a :class:`pytest.RaisesExc` if you e.g. want to make use of the ``check`` parameter. diff --git a/changelog/12535.doc.rst b/changelog/12535.doc.rst deleted file mode 100644 index d43c1c822ea..00000000000 --- a/changelog/12535.doc.rst +++ /dev/null @@ -1,4 +0,0 @@ -`This -example` -showed ``print`` statements that do not exactly reflect what the -different branches actually do. The fix makes the example more precise. diff --git a/changelog/12647.contrib.rst b/changelog/12647.contrib.rst deleted file mode 100644 index 1d7a3181778..00000000000 --- a/changelog/12647.contrib.rst +++ /dev/null @@ -1 +0,0 @@ -Fixed running the test suite with the ``hypothesis`` pytest plugin. diff --git a/changelog/12707.improvement.rst b/changelog/12707.improvement.rst deleted file mode 100644 index 4684b6561c8..00000000000 --- a/changelog/12707.improvement.rst +++ /dev/null @@ -1 +0,0 @@ -Exception chains can be navigated when dropped into Pdb in Python 3.13+. diff --git a/changelog/12713.feature.rst b/changelog/12713.feature.rst deleted file mode 100644 index 90867b87eae..00000000000 --- a/changelog/12713.feature.rst +++ /dev/null @@ -1,3 +0,0 @@ -New `--force-short-summary` option to force condensed summary output regardless of verbosity level. - -This lets users still see condensed summary output of failures for quick reference in log files from job outputs, being especially useful if non-condensed output is very verbose. diff --git a/changelog/12736.improvement.rst b/changelog/12736.improvement.rst deleted file mode 100644 index 5fdb14e2ef5..00000000000 --- a/changelog/12736.improvement.rst +++ /dev/null @@ -1,3 +0,0 @@ -Added a new attribute `name` with the fixed value `"pytest tests"` to the root tag `testsuites` of the junit-xml generated by pytest. - -This attribute is part of many junit-xml specifications and is even part of the `junit-10.xsd` specification that pytest's implementation is based on. diff --git a/changelog/12749.feature.rst b/changelog/12749.feature.rst deleted file mode 100644 index c3b7ca5d321..00000000000 --- a/changelog/12749.feature.rst +++ /dev/null @@ -1,21 +0,0 @@ -pytest traditionally collects classes/functions in the test module namespace even if they are imported from another file. - -For example: - -.. code-block:: python - - # contents of src/domain.py - class Testament: ... - - - # contents of tests/test_testament.py - from domain import Testament - - - def test_testament(): ... - -In this scenario with the default options, pytest will collect the class `Testament` from `tests/test_testament.py` because it starts with `Test`, even though in this case it is a production class being imported in the test module namespace. - -This behavior can now be prevented by setting the new :confval:`collect_imported_tests` configuration option to ``false``, which will make pytest collect classes/functions from test files **only** if they are defined in that file. - --- by :user:`FreerGit` diff --git a/changelog/12765.feature.rst b/changelog/12765.feature.rst deleted file mode 100644 index 193c75621f7..00000000000 --- a/changelog/12765.feature.rst +++ /dev/null @@ -1,3 +0,0 @@ -Thresholds to trigger snippet truncation can now be set with :confval:`truncation_limit_lines` and :confval:`truncation_limit_chars`. - -See :ref:`truncation-params` for more information. diff --git a/changelog/12863.bugfix.rst b/changelog/12863.bugfix.rst deleted file mode 100644 index 0b1c397a08e..00000000000 --- a/changelog/12863.bugfix.rst +++ /dev/null @@ -1 +0,0 @@ -Fix applying markers, including :ref:`pytest.mark.parametrize ` when placed above `@staticmethod` or `@classmethod`. diff --git a/changelog/12874.breaking.rst b/changelog/12874.breaking.rst deleted file mode 100644 index a442586eeb5..00000000000 --- a/changelog/12874.breaking.rst +++ /dev/null @@ -1 +0,0 @@ -We dropped support for Python 3.8 following its end of life (2024-10-07). diff --git a/changelog/12929.bugfix.rst b/changelog/12929.bugfix.rst deleted file mode 100644 index fcf490d83e2..00000000000 --- a/changelog/12929.bugfix.rst +++ /dev/null @@ -1 +0,0 @@ -Handle StopIteration from test cases, setup and teardown correctly. diff --git a/changelog/12938.bugfix.rst b/changelog/12938.bugfix.rst deleted file mode 100644 index d54d73bdbf5..00000000000 --- a/changelog/12938.bugfix.rst +++ /dev/null @@ -1 +0,0 @@ -Fixed ``--durations-min`` argument not respected if ``-vv`` is used. diff --git a/changelog/12943.improvement.rst b/changelog/12943.improvement.rst deleted file mode 100644 index eb8ac63650a..00000000000 --- a/changelog/12943.improvement.rst +++ /dev/null @@ -1 +0,0 @@ -If a test fails with an exceptiongroup with a single exception, the contained exception will now be displayed in the short test summary info. diff --git a/changelog/12946.bugfix.rst b/changelog/12946.bugfix.rst deleted file mode 100644 index b11da09e7ae..00000000000 --- a/changelog/12946.bugfix.rst +++ /dev/null @@ -1 +0,0 @@ -Fixed missing help for :mod:`pdb` commands wrapped by pytest -- by :user:`adamchainz`. diff --git a/changelog/12958.improvement.rst b/changelog/12958.improvement.rst deleted file mode 100644 index ee8dc8c0710..00000000000 --- a/changelog/12958.improvement.rst +++ /dev/null @@ -1,9 +0,0 @@ -A number of :ref:`unraisable ` enhancements: - -* Set the unraisable hook as early as possible and unset it as late as possible, to collect the most possible number of unraisable exceptions. -* Call the garbage collector just before unsetting the unraisable hook, to collect any straggling exceptions. -* Collect multiple unraisable exceptions per test phase. -* Report the :mod:`tracemalloc` allocation traceback (if available). -* Avoid using a generator based hook to allow handling :class:`StopIteration` in test failures. -* Report the unraisable exception as the cause of the :class:`pytest.PytestUnraisableExceptionWarning` exception if raised. -* Compute the ``repr`` of the unraisable object in the unraisable hook so you get the latest information if available, and should help with resurrection of the object. diff --git a/changelog/12960.breaking.rst b/changelog/12960.breaking.rst deleted file mode 100644 index 3ab87e6fe23..00000000000 --- a/changelog/12960.breaking.rst +++ /dev/null @@ -1,3 +0,0 @@ -Test functions containing a yield now cause an explicit error. They have not been run since pytest 4.0, and were previously marked as an expected failure and deprecation warning. - -See :ref:`the docs ` for more information. diff --git a/changelog/12981.bugfix.rst b/changelog/12981.bugfix.rst deleted file mode 100644 index 5fc8e29656f..00000000000 --- a/changelog/12981.bugfix.rst +++ /dev/null @@ -1 +0,0 @@ -Prevent exceptions in :func:`pytest.Config.add_cleanup` callbacks preventing further cleanups. diff --git a/changelog/13010.improvement.rst b/changelog/13010.improvement.rst deleted file mode 100644 index d6b814f090e..00000000000 --- a/changelog/13010.improvement.rst +++ /dev/null @@ -1 +0,0 @@ -:func:`pytest.approx` now can compare collections that contain numbers and non-numbers mixed. diff --git a/changelog/13016.improvement.rst b/changelog/13016.improvement.rst deleted file mode 100644 index 634672ab69b..00000000000 --- a/changelog/13016.improvement.rst +++ /dev/null @@ -1,8 +0,0 @@ -A number of :ref:`threadexception ` enhancements: - -* Set the excepthook as early as possible and unset it as late as possible, to collect the most possible number of unhandled exceptions from threads. -* Collect multiple thread exceptions per test phase. -* Report the :mod:`tracemalloc` allocation traceback (if available). -* Avoid using a generator based hook to allow handling :class:`StopIteration` in test failures. -* Report the thread exception as the cause of the :class:`pytest.PytestUnhandledThreadExceptionWarning` exception if raised. -* Extract the ``name`` of the thread object in the excepthook which should help with resurrection of the thread. diff --git a/changelog/13031.improvement.rst b/changelog/13031.improvement.rst deleted file mode 100644 index c6c64c4673a..00000000000 --- a/changelog/13031.improvement.rst +++ /dev/null @@ -1 +0,0 @@ -An empty parameter set as in ``pytest.mark.parametrize([], ids=idfunc)`` will no longer trigger a call to ``idfunc`` with internal objects. diff --git a/changelog/13047.bugfix.rst b/changelog/13047.bugfix.rst deleted file mode 100644 index 399e860505c..00000000000 --- a/changelog/13047.bugfix.rst +++ /dev/null @@ -1,17 +0,0 @@ -Restore :func:`pytest.approx` handling of equality checks between `bool` and `numpy.bool_` types. - -Comparing `bool` and `numpy.bool_` using :func:`pytest.approx` accidentally changed in version `8.3.4` and `8.3.5` to no longer match: - -.. code-block:: pycon - - >>> import numpy as np - >>> from pytest import approx - >>> [np.True_, np.True_] == pytest.approx([True, True]) - False - -This has now been fixed: - -.. code-block:: pycon - - >>> [np.True_, np.True_] == pytest.approx([True, True]) - True diff --git a/changelog/13115.improvement.rst b/changelog/13115.improvement.rst deleted file mode 100644 index 9ac45820917..00000000000 --- a/changelog/13115.improvement.rst +++ /dev/null @@ -1,8 +0,0 @@ -Allows supplying ``ExceptionGroup[Exception]`` and ``BaseExceptionGroup[BaseException]`` to ``pytest.raises`` to keep full typing on :class:`ExceptionInfo `: - -.. code-block:: python - - with pytest.raises(ExceptionGroup[Exception]) as exc_info: - some_function() - -Parametrizing with other exception types remains an error - we do not check the types of child exceptions and thus do not permit code that might look like we do. diff --git a/changelog/13119.bugfix.rst b/changelog/13119.bugfix.rst deleted file mode 100644 index b7e56af9bb8..00000000000 --- a/changelog/13119.bugfix.rst +++ /dev/null @@ -1 +0,0 @@ -Improved handling of invalid regex patterns for filter warnings by providing a clear error message. diff --git a/changelog/13122.improvement.rst b/changelog/13122.improvement.rst deleted file mode 100644 index c302713b320..00000000000 --- a/changelog/13122.improvement.rst +++ /dev/null @@ -1,15 +0,0 @@ -The ``--stepwise`` mode received a number of improvements: - -* It no longer forgets the last failed test in case pytest is executed later without the flag. - - This enables the following workflow: - - 1. Execute pytest with ``--stepwise``, pytest then stops at the first failing test; - 2. Iteratively update the code and run the test in isolation, without the ``--stepwise`` flag (for example in an IDE), until it is fixed. - 3. Execute pytest with ``--stepwise`` again and pytest will continue from the previously failed test, and if it passes, continue on to the next tests. - - Previously, at step 3, pytest would start from the beginning, forgetting the previously failed test. - - This change however might cause issues if the ``--stepwise`` mode is used far apart in time, as the state might get stale, so the internal state will be reset automatically in case the test suite changes (for now only the number of tests are considered for this, we might change/improve this on the future). - -* New ``--stepwise-reset``/``--sw-reset`` flag, allowing the user to explicitly reset the stepwise state and restart the workflow from the beginning. diff --git a/changelog/13125.feature.rst b/changelog/13125.feature.rst deleted file mode 100644 index 0c7d66c1169..00000000000 --- a/changelog/13125.feature.rst +++ /dev/null @@ -1 +0,0 @@ -:confval:`console_output_style` now supports ``times`` to show execution time of each test. diff --git a/changelog/13175.bugfix.rst b/changelog/13175.bugfix.rst deleted file mode 100644 index bdbb72b41e1..00000000000 --- a/changelog/13175.bugfix.rst +++ /dev/null @@ -1 +0,0 @@ -The diff is now also highlighted correctly when comparing two strings. diff --git a/changelog/13192.feature.1.rst b/changelog/13192.feature.1.rst deleted file mode 100644 index 71fb06f7d70..00000000000 --- a/changelog/13192.feature.1.rst +++ /dev/null @@ -1 +0,0 @@ -:func:`pytest.raises` will now print a helpful string diff if matching fails and the match parameter has ``^`` and ``$`` and is otherwise escaped. diff --git a/changelog/13192.feature.2.rst b/changelog/13192.feature.2.rst deleted file mode 100644 index 0ffa0e1496a..00000000000 --- a/changelog/13192.feature.2.rst +++ /dev/null @@ -1 +0,0 @@ -You can now pass :func:`with pytest.raises(check=fn): `, where ``fn`` is a function which takes a raised exception and returns a boolean. The ``raises`` fails if no exception was raised (as usual), passes if an exception is raised and ``fn`` returns ``True`` (as well as ``match`` and the type matching, if specified, which are checked before), and propagates the exception if ``fn`` returns ``False`` (which likely also fails the test). diff --git a/changelog/13192.feature.rst b/changelog/13192.feature.rst deleted file mode 100644 index 97f31ce233c..00000000000 --- a/changelog/13192.feature.rst +++ /dev/null @@ -1 +0,0 @@ -:func:`pytest.raises` will now raise a warning when passing an empty string to ``match``, as this will match against any value. Use ``match="^$"`` if you want to check that an exception has no message. diff --git a/changelog/13218.doc.rst b/changelog/13218.doc.rst deleted file mode 100644 index 907a817e895..00000000000 --- a/changelog/13218.doc.rst +++ /dev/null @@ -1 +0,0 @@ -Pointed out in the :func:`pytest.approx` documentation that it considers booleans unequal to numeric zero or one. diff --git a/changelog/13221.doc.rst b/changelog/13221.doc.rst deleted file mode 100644 index cfd35f821b4..00000000000 --- a/changelog/13221.doc.rst +++ /dev/null @@ -1 +0,0 @@ -Improved grouping of CLI options in the ``--help`` output. diff --git a/changelog/13228.feature.rst b/changelog/13228.feature.rst deleted file mode 100644 index c5d84182313..00000000000 --- a/changelog/13228.feature.rst +++ /dev/null @@ -1,3 +0,0 @@ -:ref:`hidden-param` can now be used in ``id`` of :func:`pytest.param` or in -``ids`` of :py:func:`Metafunc.parametrize `. -It hides the parameter set from the test name. diff --git a/changelog/13248.bugfix.rst b/changelog/13248.bugfix.rst deleted file mode 100644 index 2ebb102fd07..00000000000 --- a/changelog/13248.bugfix.rst +++ /dev/null @@ -1,2 +0,0 @@ -Fixed an issue where passing a ``scope`` in :py:func:`Metafunc.parametrize ` with ``indirect=True`` -could result in other fixtures being unable to depend on the parametrized fixture. diff --git a/changelog/13253.feature.rst b/changelog/13253.feature.rst deleted file mode 100644 index e497c207223..00000000000 --- a/changelog/13253.feature.rst +++ /dev/null @@ -1 +0,0 @@ -New flag: :ref:`--disable-plugin-autoload ` which works as an alternative to :envvar:`PYTEST_DISABLE_PLUGIN_AUTOLOAD` when setting environment variables is inconvenient; and allows setting it in config files with :confval:`addopts`. diff --git a/changelog/13291.bugfix.rst b/changelog/13291.bugfix.rst deleted file mode 100644 index 03ce06b697a..00000000000 --- a/changelog/13291.bugfix.rst +++ /dev/null @@ -1 +0,0 @@ -Fixed ``repr`` of ``attrs`` objects in assertion failure messages when using ``attrs>=25.2``. diff --git a/changelog/13308.improvement.rst b/changelog/13308.improvement.rst deleted file mode 100644 index 70018c66d59..00000000000 --- a/changelog/13308.improvement.rst +++ /dev/null @@ -1 +0,0 @@ -Added official support for Python 3.14. diff --git a/changelog/13312.bugfix.rst b/changelog/13312.bugfix.rst deleted file mode 100644 index 62ad36879f5..00000000000 --- a/changelog/13312.bugfix.rst +++ /dev/null @@ -1 +0,0 @@ -Fixed a possible ``KeyError`` crash on PyPy during collection of tests involving higher-scoped parameters. diff --git a/changelog/13317.packaging.rst b/changelog/13317.packaging.rst deleted file mode 100644 index 94171cb1ef3..00000000000 --- a/changelog/13317.packaging.rst +++ /dev/null @@ -1,4 +0,0 @@ -Specified minimum allowed versions of ``colorama``, ``iniconfig``, -and ``packaging``; and bumped the minimum allowed version -of ``exceptiongroup`` for ``python_version<'3.11'`` from a release -candidate to a full release. diff --git a/changelog/13345.bugfix.rst b/changelog/13345.bugfix.rst deleted file mode 100644 index 5010888aa08..00000000000 --- a/changelog/13345.bugfix.rst +++ /dev/null @@ -1 +0,0 @@ -Fix type hints for :attr:`pytest.TestReport.when` and :attr:`pytest.TestReport.location`. diff --git a/changelog/13377.bugfix.rst b/changelog/13377.bugfix.rst deleted file mode 100644 index 15755481f7f..00000000000 --- a/changelog/13377.bugfix.rst +++ /dev/null @@ -1,12 +0,0 @@ -Fixed handling of test methods with positional-only parameter syntax. - -Now, methods are supported that formally define ``self`` as positional-only -and/or fixture parameters as keyword-only, e.g.: - -.. code-block:: python - - class TestClass: - - def test_method(self, /, *, fixture): ... - -Before, this caused an internal error in pytest. diff --git a/changelog/13380.improvement.rst b/changelog/13380.improvement.rst deleted file mode 100644 index 51f374fbf01..00000000000 --- a/changelog/13380.improvement.rst +++ /dev/null @@ -1 +0,0 @@ -Fix :class:`ExceptionGroup` traceback filtering to exclude pytest internals. diff --git a/changelog/13384.bugfix.rst b/changelog/13384.bugfix.rst deleted file mode 100644 index e93d01dcab0..00000000000 --- a/changelog/13384.bugfix.rst +++ /dev/null @@ -1 +0,0 @@ -Fixed an issue where pytest could report negative durations. diff --git a/changelog/13415.improvement.rst b/changelog/13415.improvement.rst deleted file mode 100644 index 61667f15c7b..00000000000 --- a/changelog/13415.improvement.rst +++ /dev/null @@ -1,5 +0,0 @@ -The author metadata of the BibTex example is now correctly formatted with last names following first names. -An example of BibLaTex has been added. -BibTex and BibLaTex examples now clearly indicate that what is cited is software. - --- by :user:`willynilly` diff --git a/changelog/13420.bugfix.rst b/changelog/13420.bugfix.rst deleted file mode 100644 index 02f7372a759..00000000000 --- a/changelog/13420.bugfix.rst +++ /dev/null @@ -1 +0,0 @@ -Added ``lru_cache`` to ``nodes._check_initialpaths_for_relpath``. diff --git a/changelog/13420.improvement.rst b/changelog/13420.improvement.rst deleted file mode 100644 index 54fe50a72b0..00000000000 --- a/changelog/13420.improvement.rst +++ /dev/null @@ -1 +0,0 @@ -Improved test collection performance by optimizing path resolution used in ``FSCollector``. diff --git a/changelog/13457.improvement.rst b/changelog/13457.improvement.rst deleted file mode 100644 index 3937384b322..00000000000 --- a/changelog/13457.improvement.rst +++ /dev/null @@ -1 +0,0 @@ -The error message about duplicate parametrization no longer displays an internal stack trace. diff --git a/changelog/4112.improvement.rst b/changelog/4112.improvement.rst deleted file mode 100644 index 426b87ffa19..00000000000 --- a/changelog/4112.improvement.rst +++ /dev/null @@ -1 +0,0 @@ -Using :ref:`pytest.mark.usefixtures ` on :func:`pytest.param` now produces an error instead of silently doing nothing. diff --git a/changelog/5473.improvement.rst b/changelog/5473.improvement.rst deleted file mode 100644 index 1b9ab006d49..00000000000 --- a/changelog/5473.improvement.rst +++ /dev/null @@ -1 +0,0 @@ -Replace `:` with `;` in the assertion rewrite warning message so it can be filtered using standard Python warning filters before calling :func:`pytest.main`. diff --git a/changelog/6649.doc.rst b/changelog/6649.doc.rst deleted file mode 100644 index cf5bb781b87..00000000000 --- a/changelog/6649.doc.rst +++ /dev/null @@ -1 +0,0 @@ -Added :class:`~pytest.TerminalReporter` to the :ref:`api-reference` documentation page. diff --git a/changelog/6649.misc.rst b/changelog/6649.misc.rst deleted file mode 100644 index cec8c3f4506..00000000000 --- a/changelog/6649.misc.rst +++ /dev/null @@ -1 +0,0 @@ -Added :class:`~pytest.TerminalReporter` to the public pytest API, as it is part of the signature of the :hook:`pytest_terminal_summary` hook. diff --git a/changelog/6985.improvement.rst b/changelog/6985.improvement.rst deleted file mode 100644 index 34ee8edc77d..00000000000 --- a/changelog/6985.improvement.rst +++ /dev/null @@ -1,21 +0,0 @@ -Improved :func:`pytest.approx` to enhance the readability of value ranges and tolerances between 0.001 and 1000. - * The `repr` method now provides clearer output for values within those ranges, making it easier to interpret the results. - * Previously, the output for those ranges of values and tolerances was displayed in scientific notation (e.g., `42 ± 1.0e+00`). The updated method now presents the tolerance as a decimal for better readability (e.g., `42 ± 1`). - - Example: - - **Previous Output:** - - .. code-block:: console - - >>> pytest.approx(42, abs=1) - 42 ± 1.0e+00 - - **Current Output:** - - .. code-block:: console - - >>> pytest.approx(42, abs=1) - 42 ± 1 - - -- by :user:`fazeelghafoor` diff --git a/changelog/7683.improvement.rst b/changelog/7683.improvement.rst deleted file mode 100644 index 311abe4df93..00000000000 --- a/changelog/7683.improvement.rst +++ /dev/null @@ -1 +0,0 @@ -The formerly optional ``pygments`` dependency is now required, causing output always to be source-highlighted (unless disabled via the ``--code-highlight=no`` CLI option). diff --git a/changelog/8612.doc.rst b/changelog/8612.doc.rst deleted file mode 100644 index 6ab4102ace4..00000000000 --- a/changelog/8612.doc.rst +++ /dev/null @@ -1,5 +0,0 @@ -Add a recipe for handling abstract test classes in the documentation. - -A new example has been added to the documentation to demonstrate how to use a mixin class to handle abstract -test classes without manually setting the ``__test__`` attribute for subclasses. -This ensures that subclasses of abstract test classes are automatically collected by pytest. diff --git a/changelog/9037.bugfix.rst b/changelog/9037.bugfix.rst deleted file mode 100644 index 5367452337e..00000000000 --- a/changelog/9037.bugfix.rst +++ /dev/null @@ -1 +0,0 @@ -Honor :confval:`disable_test_id_escaping_and_forfeit_all_rights_to_community_support` when escaping ids in parametrized tests. diff --git a/changelog/README.rst b/changelog/README.rst index fdaa573d427..f1ba2cbd0bd 100644 --- a/changelog/README.rst +++ b/changelog/README.rst @@ -16,12 +16,12 @@ Each file should be named like ``..rst``, where * ``feature``: new user facing features, like new command-line options and new behavior. * ``improvement``: improvement of existing functionality, usually without requiring user intervention (for example, new fields being written in ``--junit-xml``, improved colors in terminal, etc). * ``bugfix``: fixes a bug. -* ``doc``: documentation improvement, like rewording an entire session or adding missing docs. +* ``doc``: documentation improvement, like rewording an entire section or adding missing docs. * ``deprecation``: feature deprecation. * ``breaking``: a change which may break existing suites, such as feature removal or behavior change. * ``vendor``: changes in packages vendored in pytest. * ``packaging``: notes for downstreams about unobvious side effects - and tooling. changes in the test invocation considerations and + and tooling. Changes in the test invocation considerations and runtime assumptions. * ``contrib``: stuff that affects the contributor experience. e.g. Running tests, building the docs, setting up the development diff --git a/doc/en/announce/index.rst b/doc/en/announce/index.rst index 51edc964a0c..4a5e8b86544 100644 --- a/doc/en/announce/index.rst +++ b/doc/en/announce/index.rst @@ -6,6 +6,13 @@ Release announcements :maxdepth: 2 + release-9.0.3 + release-9.0.2 + release-9.0.1 + release-9.0.0 + release-8.4.2 + release-8.4.1 + release-8.4.0 release-8.3.5 release-8.3.4 release-8.3.3 diff --git a/doc/en/announce/release-2.8.2.rst b/doc/en/announce/release-2.8.2.rst index e4726338852..f64ea9bb29a 100644 --- a/doc/en/announce/release-2.8.2.rst +++ b/doc/en/announce/release-2.8.2.rst @@ -17,7 +17,7 @@ Thanks to all who contributed to this release, among them: Bruno Oliveira Demian Brecht - Florian Bruhin + Freya Bruhin Ionel Cristian Mărieș Raphael Pierzina Ronny Pfannschmidt diff --git a/doc/en/announce/release-2.8.3.rst b/doc/en/announce/release-2.8.3.rst index 3f357252bb6..1ea7aac6d74 100644 --- a/doc/en/announce/release-2.8.3.rst +++ b/doc/en/announce/release-2.8.3.rst @@ -16,7 +16,7 @@ As usual, you can upgrade from pypi via:: Thanks to all who contributed to this release, among them: Bruno Oliveira - Florian Bruhin + Freya Bruhin Gabe Hollombe Gabriel Reis Hartmut Goebel diff --git a/doc/en/announce/release-2.8.4.rst b/doc/en/announce/release-2.8.4.rst index adbdecc87ea..0605c986928 100644 --- a/doc/en/announce/release-2.8.4.rst +++ b/doc/en/announce/release-2.8.4.rst @@ -16,7 +16,7 @@ As usual, you can upgrade from pypi via:: Thanks to all who contributed to this release, among them: Bruno Oliveira - Florian Bruhin + Freya Bruhin Jeff Widman Mehdy Khoshnoody Nicholas Chammas @@ -43,10 +43,10 @@ The py.test Development Team non-ascii characters. Thanks Bruno Oliveira for the PR. - fix #1204: another error when collecting with a nasty __getattr__(). - Thanks Florian Bruhin for the PR. + Thanks Freya Bruhin for the PR. - fix the summary printed when no tests did run. - Thanks Florian Bruhin for the PR. + Thanks Freya Bruhin for the PR. - a number of documentation modernizations wrt good practices. Thanks Bruno Oliveira for the PR. diff --git a/doc/en/announce/release-2.8.6.rst b/doc/en/announce/release-2.8.6.rst index 5d6565b16a3..a63c7f1e38d 100644 --- a/doc/en/announce/release-2.8.6.rst +++ b/doc/en/announce/release-2.8.6.rst @@ -18,7 +18,7 @@ Thanks to all who contributed to this release, among them: AMiT Kumar Bruno Oliveira Erik M. Bray - Florian Bruhin + Freya Bruhin Georgy Dyuldin Jeff Widman Kartik Singhal diff --git a/doc/en/announce/release-2.9.0.rst b/doc/en/announce/release-2.9.0.rst index 753bb7bf6f0..9477f0a9ba3 100644 --- a/doc/en/announce/release-2.9.0.rst +++ b/doc/en/announce/release-2.9.0.rst @@ -18,7 +18,7 @@ Thanks to all who contributed to this release, among them: Bruno Oliveira Buck Golemon David Vierra - Florian Bruhin + Freya Bruhin Galaczi Endre Georgy Dyuldin Lukas Bednar diff --git a/doc/en/announce/release-2.9.1.rst b/doc/en/announce/release-2.9.1.rst index 7a46d2ae690..3880218d233 100644 --- a/doc/en/announce/release-2.9.1.rst +++ b/doc/en/announce/release-2.9.1.rst @@ -17,7 +17,7 @@ Thanks to all who contributed to this release, among them: Bruno Oliveira Daniel Hahler Dmitry Malinovsky - Florian Bruhin + Freya Bruhin Floris Bruynooghe Matt Bachmann Ronny Pfannschmidt diff --git a/doc/en/announce/release-2.9.2.rst b/doc/en/announce/release-2.9.2.rst index 3e75af7fe69..3dc00b46729 100644 --- a/doc/en/announce/release-2.9.2.rst +++ b/doc/en/announce/release-2.9.2.rst @@ -17,7 +17,7 @@ Thanks to all who contributed to this release, among them: Adam Chainz Benjamin Dopplinger Bruno Oliveira - Florian Bruhin + Freya Bruhin John Towler Martin Prusse Meng Jue diff --git a/doc/en/announce/release-3.0.0.rst b/doc/en/announce/release-3.0.0.rst index 5de38911482..b201b901eb7 100644 --- a/doc/en/announce/release-3.0.0.rst +++ b/doc/en/announce/release-3.0.0.rst @@ -39,7 +39,7 @@ Thanks to all who contributed to this release, among them: Dmitry Dygalo Edoardo Batini Eli Boyarski - Florian Bruhin + Freya Bruhin Floris Bruynooghe Greg Price Guyzmo diff --git a/doc/en/announce/release-3.0.1.rst b/doc/en/announce/release-3.0.1.rst index 8f5cfe411aa..b36587f983a 100644 --- a/doc/en/announce/release-3.0.1.rst +++ b/doc/en/announce/release-3.0.1.rst @@ -17,7 +17,7 @@ Thanks to all who contributed to this release, among them: Bruno Oliveira Daniel Hahler Dmitry Dygalo - Florian Bruhin + Freya Bruhin Marcin Bachry Ronny Pfannschmidt matthiasha diff --git a/doc/en/announce/release-3.0.2.rst b/doc/en/announce/release-3.0.2.rst index 86ba82ca6e6..9b1f2acd60d 100644 --- a/doc/en/announce/release-3.0.2.rst +++ b/doc/en/announce/release-3.0.2.rst @@ -14,7 +14,7 @@ Thanks to all who contributed to this release, among them: * Ahn Ki-Wook * Bruno Oliveira -* Florian Bruhin +* Freya Bruhin * Jordan Guymon * Raphael Pierzina * Ronny Pfannschmidt diff --git a/doc/en/announce/release-3.0.3.rst b/doc/en/announce/release-3.0.3.rst index 89a2e0c744e..05bdf4dcd16 100644 --- a/doc/en/announce/release-3.0.3.rst +++ b/doc/en/announce/release-3.0.3.rst @@ -13,7 +13,7 @@ The changelog is available at http://doc.pytest.org/en/stable/changelog.html. Thanks to all who contributed to this release, among them: * Bruno Oliveira -* Florian Bruhin +* Freya Bruhin * Floris Bruynooghe * Huayi Zhang * Lev Maximov diff --git a/doc/en/announce/release-3.0.4.rst b/doc/en/announce/release-3.0.4.rst index 72c2d29464d..ba37bba2111 100644 --- a/doc/en/announce/release-3.0.4.rst +++ b/doc/en/announce/release-3.0.4.rst @@ -14,7 +14,7 @@ Thanks to all who contributed to this release, among them: * Bruno Oliveira * Dan Wandschneider -* Florian Bruhin +* Freya Bruhin * Georgy Dyuldin * Grigorii Eremeev * Jason R. Coombs diff --git a/doc/en/announce/release-3.0.7.rst b/doc/en/announce/release-3.0.7.rst index 4b7e075e76a..782910ae6a4 100644 --- a/doc/en/announce/release-3.0.7.rst +++ b/doc/en/announce/release-3.0.7.rst @@ -14,7 +14,7 @@ Thanks to all who contributed to this release, among them: * Anthony Sottile * Barney Gale * Bruno Oliveira -* Florian Bruhin +* Freya Bruhin * Floris Bruynooghe * Ionel Cristian Mărieș * Katerina Koukiou diff --git a/doc/en/announce/release-3.1.0.rst b/doc/en/announce/release-3.1.0.rst index 55277067948..454c04c6430 100644 --- a/doc/en/announce/release-3.1.0.rst +++ b/doc/en/announce/release-3.1.0.rst @@ -27,7 +27,7 @@ Thanks to all who contributed to this release, among them: * David Giese * David Szotten * Dmitri Pribysh -* Florian Bruhin +* Freya Bruhin * Florian Schulze * Floris Bruynooghe * John Towler diff --git a/doc/en/announce/release-3.1.1.rst b/doc/en/announce/release-3.1.1.rst index 135b2fe8443..99fb0d0f801 100644 --- a/doc/en/announce/release-3.1.1.rst +++ b/doc/en/announce/release-3.1.1.rst @@ -12,7 +12,7 @@ The full changelog is available at http://doc.pytest.org/en/stable/changelog.htm Thanks to all who contributed to this release, among them: * Bruno Oliveira -* Florian Bruhin +* Freya Bruhin * Floris Bruynooghe * Jason R. Coombs * Ronny Pfannschmidt diff --git a/doc/en/announce/release-3.1.2.rst b/doc/en/announce/release-3.1.2.rst index a9b85c4715c..3e988b17e84 100644 --- a/doc/en/announce/release-3.1.2.rst +++ b/doc/en/announce/release-3.1.2.rst @@ -14,7 +14,7 @@ Thanks to all who contributed to this release, among them: * Andreas Pelme * ApaDoctor * Bruno Oliveira -* Florian Bruhin +* Freya Bruhin * Ronny Pfannschmidt * Segev Finer diff --git a/doc/en/announce/release-3.2.0.rst b/doc/en/announce/release-3.2.0.rst index edc66a28e78..68694493907 100644 --- a/doc/en/announce/release-3.2.0.rst +++ b/doc/en/announce/release-3.2.0.rst @@ -25,7 +25,7 @@ Thanks to all who contributed to this release, among them: * Andras Tim * Bruno Oliveira * Daniel Hahler -* Florian Bruhin +* Freya Bruhin * Floris Bruynooghe * John Still * Jordan Moldow diff --git a/doc/en/announce/release-3.2.1.rst b/doc/en/announce/release-3.2.1.rst index c40217d311d..a492390fa58 100644 --- a/doc/en/announce/release-3.2.1.rst +++ b/doc/en/announce/release-3.2.1.rst @@ -13,7 +13,7 @@ Thanks to all who contributed to this release, among them: * Alex Gaynor * Bruno Oliveira -* Florian Bruhin +* Freya Bruhin * Ronny Pfannschmidt * Srinivas Reddy Thatiparthy diff --git a/doc/en/announce/release-3.2.4.rst b/doc/en/announce/release-3.2.4.rst index ff0b35781b1..9bde3afab3b 100644 --- a/doc/en/announce/release-3.2.4.rst +++ b/doc/en/announce/release-3.2.4.rst @@ -15,7 +15,7 @@ Thanks to all who contributed to this release, among them: * Christian Boelsen * Christoph Buchner * Daw-Ran Liou -* Florian Bruhin +* Freya Bruhin * Franck Michea * Leonard Lausen * Matty G diff --git a/doc/en/announce/release-3.3.0.rst b/doc/en/announce/release-3.3.0.rst index 1cbf2c448c8..d54910bea4c 100644 --- a/doc/en/announce/release-3.3.0.rst +++ b/doc/en/announce/release-3.3.0.rst @@ -27,7 +27,7 @@ Thanks to all who contributed to this release, among them: * Daniel Hahler * Dirk Thomas * Dmitry Malinovsky -* Florian Bruhin +* Freya Bruhin * George Y. Kussumoto * Hugo * Jesús Espino diff --git a/doc/en/announce/release-3.3.1.rst b/doc/en/announce/release-3.3.1.rst index 98b6fa6c1ba..a1a0a6d6f45 100644 --- a/doc/en/announce/release-3.3.1.rst +++ b/doc/en/announce/release-3.3.1.rst @@ -14,7 +14,7 @@ Thanks to all who contributed to this release, among them: * Bruno Oliveira * Daniel Hahler * Eugene Prikazchikov -* Florian Bruhin +* Freya Bruhin * Roland Puntaier * Ronny Pfannschmidt * Sebastian Rahlf diff --git a/doc/en/announce/release-3.3.2.rst b/doc/en/announce/release-3.3.2.rst index 7a2577d1ff8..8c4110cc350 100644 --- a/doc/en/announce/release-3.3.2.rst +++ b/doc/en/announce/release-3.3.2.rst @@ -15,7 +15,7 @@ Thanks to all who contributed to this release, among them: * Antony Lee * Austin * Bruno Oliveira -* Florian Bruhin +* Freya Bruhin * Floris Bruynooghe * Henk-Jaap Wagenaar * Jurko Gospodnetić diff --git a/doc/en/announce/release-3.4.0.rst b/doc/en/announce/release-3.4.0.rst index 6ab5b124a25..8a8582f7a00 100644 --- a/doc/en/announce/release-3.4.0.rst +++ b/doc/en/announce/release-3.4.0.rst @@ -30,7 +30,7 @@ Thanks to all who contributed to this release, among them: * Brian Maissy * Bruno Oliveira * Cyrus Maden -* Florian Bruhin +* Freya Bruhin * Henk-Jaap Wagenaar * Ian Lesperance * Jon Dufresne diff --git a/doc/en/announce/release-3.4.1.rst b/doc/en/announce/release-3.4.1.rst index d83949453a2..bef05752698 100644 --- a/doc/en/announce/release-3.4.1.rst +++ b/doc/en/announce/release-3.4.1.rst @@ -16,7 +16,7 @@ Thanks to all who contributed to this release, among them: * Andy Freeland * Brian Maissy * Bruno Oliveira -* Florian Bruhin +* Freya Bruhin * Jason R. Coombs * Marcin Bachry * Pedro Algarvio diff --git a/doc/en/announce/release-3.4.2.rst b/doc/en/announce/release-3.4.2.rst index 07cd9d3a8ba..5ab73986617 100644 --- a/doc/en/announce/release-3.4.2.rst +++ b/doc/en/announce/release-3.4.2.rst @@ -13,7 +13,7 @@ Thanks to all who contributed to this release, among them: * Allan Feldman * Bruno Oliveira -* Florian Bruhin +* Freya Bruhin * Jason R. Coombs * Kyle Altendorf * Maik Figura diff --git a/doc/en/announce/release-3.5.0.rst b/doc/en/announce/release-3.5.0.rst index 6bc2f3cd0cb..7ce2fe3dfe0 100644 --- a/doc/en/announce/release-3.5.0.rst +++ b/doc/en/announce/release-3.5.0.rst @@ -26,7 +26,7 @@ Thanks to all who contributed to this release, among them: * Bruno Oliveira * Carlos Jenkins * Daniel Hahler -* Florian Bruhin +* Freya Bruhin * Jason R. Coombs * Jeffrey Rackauckas * Jordan Speicher diff --git a/doc/en/announce/release-5.0.0.rst b/doc/en/announce/release-5.0.0.rst index f5e593e9d88..166d4e565c3 100644 --- a/doc/en/announce/release-5.0.0.rst +++ b/doc/en/announce/release-5.0.0.rst @@ -26,7 +26,7 @@ Thanks to all who contributed to this release, among them: * Daniel Hahler * Dirk Thomas * Evan Kepner -* Florian Bruhin +* Freya Bruhin * Hugo * Kevin J. Foley * Pulkit Goyal diff --git a/doc/en/announce/release-5.0.1.rst b/doc/en/announce/release-5.0.1.rst index e16a8f716f1..f0ffb791545 100644 --- a/doc/en/announce/release-5.0.1.rst +++ b/doc/en/announce/release-5.0.1.rst @@ -15,7 +15,7 @@ Thanks to all who contributed to this release, among them: * Andreu Vallbona Plazas * Anthony Sottile * Bruno Oliveira -* Florian Bruhin +* Freya Bruhin * Michael Moore * Niklas Meinzer * Thomas Grainger diff --git a/doc/en/announce/release-5.1.0.rst b/doc/en/announce/release-5.1.0.rst index 9ab54ff9730..6170023604a 100644 --- a/doc/en/announce/release-5.1.0.rst +++ b/doc/en/announce/release-5.1.0.rst @@ -27,7 +27,7 @@ Thanks to all who contributed to this release, among them: * Bruno Oliveira * Daniel Hahler * David Röthlisberger -* Florian Bruhin +* Freya Bruhin * Ilya Stepin * Jon Dufresne * Kaiqi diff --git a/doc/en/announce/release-5.1.1.rst b/doc/en/announce/release-5.1.1.rst index bb8de48014a..1262e94fd00 100644 --- a/doc/en/announce/release-5.1.1.rst +++ b/doc/en/announce/release-5.1.1.rst @@ -14,7 +14,7 @@ Thanks to all who contributed to this release, among them: * Anthony Sottile * Bruno Oliveira * Daniel Hahler -* Florian Bruhin +* Freya Bruhin * Hugo van Kemenade * Ran Benita * Ronny Pfannschmidt diff --git a/doc/en/announce/release-5.2.1.rst b/doc/en/announce/release-5.2.1.rst index fe42b9bf15f..904e1b59893 100644 --- a/doc/en/announce/release-5.2.1.rst +++ b/doc/en/announce/release-5.2.1.rst @@ -13,7 +13,7 @@ Thanks to all who contributed to this release, among them: * Anthony Sottile * Bruno Oliveira -* Florian Bruhin +* Freya Bruhin * Hynek Schlawack * Kevin J. Foley * tadashigaki diff --git a/doc/en/announce/release-5.2.2.rst b/doc/en/announce/release-5.2.2.rst index 89fd6a534d4..015baba52e7 100644 --- a/doc/en/announce/release-5.2.2.rst +++ b/doc/en/announce/release-5.2.2.rst @@ -16,7 +16,7 @@ Thanks to all who contributed to this release, among them: * Anthony Sottile * Bruno Oliveira * Daniel Hahler -* Florian Bruhin +* Freya Bruhin * Nattaphoom Chaipreecha * Oliver Bestwalter * Philipp Loose diff --git a/doc/en/announce/release-5.2.3.rst b/doc/en/announce/release-5.2.3.rst index bab174495d9..8c89e04540a 100644 --- a/doc/en/announce/release-5.2.3.rst +++ b/doc/en/announce/release-5.2.3.rst @@ -17,7 +17,7 @@ Thanks to all who contributed to this release, among them: * Daniel Hahler * Daniil Galiev * David Szotten -* Florian Bruhin +* Freya Bruhin * Patrick Harmon * Ran Benita * Zac Hatfield-Dodds diff --git a/doc/en/announce/release-5.3.1.rst b/doc/en/announce/release-5.3.1.rst index d575bb70e3f..5dc82ab7d88 100644 --- a/doc/en/announce/release-5.3.1.rst +++ b/doc/en/announce/release-5.3.1.rst @@ -15,7 +15,7 @@ Thanks to all who contributed to this release, among them: * Bruno Oliveira * Daniel Hahler * Felix Yan -* Florian Bruhin +* Freya Bruhin * Mark Dickinson * Nikolay Kondratyev * Steffen Schroeder diff --git a/doc/en/announce/release-6.0.0rc1.rst b/doc/en/announce/release-6.0.0rc1.rst index 5690b514baf..6f0a745cd00 100644 --- a/doc/en/announce/release-6.0.0rc1.rst +++ b/doc/en/announce/release-6.0.0rc1.rst @@ -25,7 +25,7 @@ Thanks to all who contributed to this release, among them: * David Diaz Barquero * Fabio Zadrozny * Felix Nieuwenhuizen -* Florian Bruhin +* Freya Bruhin * Florian Dahlitz * Gleb Nikonorov * Hugo van Kemenade diff --git a/doc/en/announce/release-6.1.0.rst b/doc/en/announce/release-6.1.0.rst index f4b571ae846..0c787d0bd15 100644 --- a/doc/en/announce/release-6.1.0.rst +++ b/doc/en/announce/release-6.1.0.rst @@ -23,7 +23,7 @@ Thanks to all of the contributors to this release: * C. Titus Brown * Drew Devereux * Faris A Chugthai -* Florian Bruhin +* Freya Bruhin * Hugo van Kemenade * Hynek Schlawack * Joseph Lucas diff --git a/doc/en/announce/release-6.2.0.rst b/doc/en/announce/release-6.2.0.rst index af16b830ddd..8e99d8fcda5 100644 --- a/doc/en/announce/release-6.2.0.rst +++ b/doc/en/announce/release-6.2.0.rst @@ -30,7 +30,7 @@ Thanks to all of the contributors to this release: * Cserna Zsolt * Dominic Mortlock * Emiel van de Laar -* Florian Bruhin +* Freya Bruhin * Garvit Shubham * Gustavo Camargo * Hugo Martins diff --git a/doc/en/announce/release-6.2.4.rst b/doc/en/announce/release-6.2.4.rst index fa2e3e78132..129368e73cd 100644 --- a/doc/en/announce/release-6.2.4.rst +++ b/doc/en/announce/release-6.2.4.rst @@ -14,7 +14,7 @@ Thanks to all of the contributors to this release: * Anthony Sottile * Bruno Oliveira * Christian Maurer -* Florian Bruhin +* Freya Bruhin * Ran Benita diff --git a/doc/en/announce/release-6.2.5.rst b/doc/en/announce/release-6.2.5.rst index bc6b4cf4222..daf9731c800 100644 --- a/doc/en/announce/release-6.2.5.rst +++ b/doc/en/announce/release-6.2.5.rst @@ -15,7 +15,7 @@ Thanks to all of the contributors to this release: * Bruno Oliveira * Brylie Christopher Oxley * Daniel Asztalos -* Florian Bruhin +* Freya Bruhin * Jason Haugen * MapleCCC * Michał Górny diff --git a/doc/en/announce/release-7.0.0.rst b/doc/en/announce/release-7.0.0.rst index 3ce4335564f..934064df745 100644 --- a/doc/en/announce/release-7.0.0.rst +++ b/doc/en/announce/release-7.0.0.rst @@ -34,7 +34,7 @@ Thanks to all of the contributors to this release: * Emmanuel Arias * Emmanuel Meric de Bellefon * Eric Liu -* Florian Bruhin +* Freya Bruhin * GergelyKalmar * Graeme Smecher * Harshna diff --git a/doc/en/announce/release-7.0.0rc1.rst b/doc/en/announce/release-7.0.0rc1.rst index a5bf0ed3c44..dd6ecdd131b 100644 --- a/doc/en/announce/release-7.0.0rc1.rst +++ b/doc/en/announce/release-7.0.0rc1.rst @@ -38,7 +38,7 @@ Thanks to all the contributors to this release: * Emmanuel Arias * Emmanuel Meric de Bellefon * Eric Liu -* Florian Bruhin +* Freya Bruhin * GergelyKalmar * Graeme Smecher * Harshna diff --git a/doc/en/announce/release-7.1.0.rst b/doc/en/announce/release-7.1.0.rst index 3361e1c8a32..f138524c564 100644 --- a/doc/en/announce/release-7.1.0.rst +++ b/doc/en/announce/release-7.1.0.rst @@ -28,7 +28,7 @@ Thanks to all of the contributors to this release: * Elijah DeLee * Emmanuel Arias * Fabian Egli -* Florian Bruhin +* Freya Bruhin * Gabor Szabo * Hasan Ramezani * Hugo van Kemenade diff --git a/doc/en/announce/release-7.2.0.rst b/doc/en/announce/release-7.2.0.rst index eca84aeb669..44cd553ec0f 100644 --- a/doc/en/announce/release-7.2.0.rst +++ b/doc/en/announce/release-7.2.0.rst @@ -33,7 +33,7 @@ Thanks to all of the contributors to this release: * EmptyRabbit * Ezio Melotti * Florian Best -* Florian Bruhin +* Freya Bruhin * Fredrik Berndtsson * Gabriel Landau * Gergely Kalmár diff --git a/doc/en/announce/release-7.3.0.rst b/doc/en/announce/release-7.3.0.rst index 33258dabade..b6d8379d5b5 100644 --- a/doc/en/announce/release-7.3.0.rst +++ b/doc/en/announce/release-7.3.0.rst @@ -42,7 +42,7 @@ Thanks to all of the contributors to this release: * Ezio Melotti * Felix Hofstätter * Florian Best -* Florian Bruhin +* Freya Bruhin * Fredrik Berndtsson * Gabriel Landau * Garvit Shubham diff --git a/doc/en/announce/release-7.4.0.rst b/doc/en/announce/release-7.4.0.rst index 5a0d18267d3..fef2ad6cb3d 100644 --- a/doc/en/announce/release-7.4.0.rst +++ b/doc/en/announce/release-7.4.0.rst @@ -27,7 +27,7 @@ Thanks to all of the contributors to this release: * Bryan Ricker * Chris Mahoney * Facundo Batista -* Florian Bruhin +* Freya Bruhin * Jarrett Keifer * Kenny Y * Miro Hrončok diff --git a/doc/en/announce/release-7.4.1.rst b/doc/en/announce/release-7.4.1.rst index efadcf919e8..4e22d3ead66 100644 --- a/doc/en/announce/release-7.4.1.rst +++ b/doc/en/announce/release-7.4.1.rst @@ -12,7 +12,7 @@ The full changelog is available at https://docs.pytest.org/en/stable/changelog.h Thanks to all of the contributors to this release: * Bruno Oliveira -* Florian Bruhin +* Freya Bruhin * Ran Benita diff --git a/doc/en/announce/release-8.0.0rc1.rst b/doc/en/announce/release-8.0.0rc1.rst index 547c8cbc53b..0cbfc3dad59 100644 --- a/doc/en/announce/release-8.0.0rc1.rst +++ b/doc/en/announce/release-8.0.0rc1.rst @@ -31,7 +31,7 @@ Thanks to all of the contributors to this release: * Christoph Anton Mitterer * DetachHead * Erik Hasse -* Florian Bruhin +* Freya Bruhin * Fraser Stark * Ha Pam * Hugo van Kemenade diff --git a/doc/en/announce/release-8.1.0.rst b/doc/en/announce/release-8.1.0.rst index 62cafdd78bb..6762bd412fe 100644 --- a/doc/en/announce/release-8.1.0.rst +++ b/doc/en/announce/release-8.1.0.rst @@ -28,7 +28,7 @@ Thanks to all of the contributors to this release: * Eric Larson * Fabian Sturm * Faisal Fawad -* Florian Bruhin +* Freya Bruhin * Franck Charras * Joachim B Haga * John Litborn diff --git a/doc/en/announce/release-8.2.0.rst b/doc/en/announce/release-8.2.0.rst index 2a63c8d8722..7aba492d7da 100644 --- a/doc/en/announce/release-8.2.0.rst +++ b/doc/en/announce/release-8.2.0.rst @@ -20,7 +20,7 @@ Thanks to all of the contributors to this release: * Bruno Oliveira * Daniel Miller -* Florian Bruhin +* Freya Bruhin * HolyMagician03-UMich * John Litborn * Levon Saldamli diff --git a/doc/en/announce/release-8.3.0.rst b/doc/en/announce/release-8.3.0.rst index ec5cd3d0db9..0589aedfa89 100644 --- a/doc/en/announce/release-8.3.0.rst +++ b/doc/en/announce/release-8.3.0.rst @@ -24,7 +24,7 @@ Thanks to all of the contributors to this release: * Bruno Oliveira * Cornelius Riemenschneider * Farbod Ahmadian -* Florian Bruhin +* Freya Bruhin * Hynek Schlawack * James Frost * Jason R. Coombs diff --git a/doc/en/announce/release-8.3.3.rst b/doc/en/announce/release-8.3.3.rst index 5e3eb36b921..6e73714d4f9 100644 --- a/doc/en/announce/release-8.3.3.rst +++ b/doc/en/announce/release-8.3.3.rst @@ -16,7 +16,7 @@ Thanks to all of the contributors to this release: * Bruno Oliveira * Christian Clauss * Eugene Mwangi -* Florian Bruhin +* Freya Bruhin * GTowers1 * Nauman Ahmed * Pierre Sassoulas diff --git a/doc/en/announce/release-8.3.4.rst b/doc/en/announce/release-8.3.4.rst index f76d60396dc..3ec21d73f5e 100644 --- a/doc/en/announce/release-8.3.4.rst +++ b/doc/en/announce/release-8.3.4.rst @@ -12,7 +12,7 @@ The full changelog is available at https://docs.pytest.org/en/stable/changelog.h Thanks to all of the contributors to this release: * Bruno Oliveira -* Florian Bruhin +* Freya Bruhin * Frank Hoffmann * Jakob van Santen * Leonardus Chen diff --git a/doc/en/announce/release-8.3.5.rst b/doc/en/announce/release-8.3.5.rst index 3de02c1d7a4..21bae869180 100644 --- a/doc/en/announce/release-8.3.5.rst +++ b/doc/en/announce/release-8.3.5.rst @@ -10,7 +10,7 @@ The full changelog is available at https://docs.pytest.org/en/stable/changelog.h Thanks to all of the contributors to this release: * Bruno Oliveira -* Florian Bruhin +* Freya Bruhin * John Litborn * Kenny Y * Ran Benita diff --git a/doc/en/announce/release-8.4.0.rst b/doc/en/announce/release-8.4.0.rst new file mode 100644 index 00000000000..f492d45070a --- /dev/null +++ b/doc/en/announce/release-8.4.0.rst @@ -0,0 +1,106 @@ +pytest-8.4.0 +======================================= + +The pytest team is proud to announce the 8.4.0 release! + +This release contains new features, improvements, and bug fixes, +the full list of changes is available in the changelog: + + https://docs.pytest.org/en/stable/changelog.html + +For complete documentation, please visit: + + https://docs.pytest.org/en/stable/ + +As usual, you can upgrade from PyPI via: + + pip install -U pytest + +Thanks to all of the contributors to this release: + +* Adam Johnson +* Ammar Askar +* Andrew Pikul +* Andy Freeland +* Anthony Sottile +* Anton Zhilin +* Arpit Gupta +* Ashley Whetter +* Avasam +* Bahram Farahmand +* Brigitta Sipőcz +* Bruno Oliveira +* Callum Scott +* Christian Clauss +* Christopher Head +* Daara +* Daniel Miller +* Deysha Rivera +* Emil Hjelm +* Eugene Mwangi +* Freya Bruhin +* Frank Hoffmann +* GTowers1 +* Guillaume Gauvrit +* Gupta Arpit +* Harmin Parra Rueda +* Jakob van Santen +* Jason N. White +* Jiajun Xu +* John Litborn +* Julian Valentin +* JulianJvn +* Kenny Y +* Leonardus Chen +* Marcelo Duarte Trevisani +* Marcin Augustynów +* Natalia Mokeeva +* Nathan Rousseau +* Nauman Ahmed +* Nick Murphy +* Oleksandr Zavertniev +* Pavel Zhukov +* Peter Gessler +* Pierre Sassoulas +* Pradeep Kumar +* Ran Benita +* Reagan Lee +* Rob Arrow +* Ronny Pfannschmidt +* Sadra Barikbin +* Sam Bull +* Samuel Bronson +* Sashko +* Serge Smertin +* Shaygan Hooshyari +* Stefaan Lippens +* Stefan Zimmermann +* Stephen McDowell +* Sviatoslav Sydorenko +* Sviatoslav Sydorenko (Святослав Сидоренко) +* Thomas Grainger +* TobiMcNamobi +* Tobias Alex-Petersen +* Tony Narlock +* Vincent (Wen Yu) Ge +* Virendra Patil +* Will Riley +* Yann Dirson +* Zac Hatfield-Dodds +* delta87 +* dongfangtianyu +* eitanwass +* fazeelghafoor +* ikappaki +* jakkdl +* maugu +* moajo +* mwychung +* polkapolka +* suspe +* sven +* 🇺🇦 Sviatoslav Sydorenko (Святослав Сидоренко) + + +Happy testing, +The pytest Development Team diff --git a/doc/en/announce/release-8.4.1.rst b/doc/en/announce/release-8.4.1.rst new file mode 100644 index 00000000000..07ee26187a7 --- /dev/null +++ b/doc/en/announce/release-8.4.1.rst @@ -0,0 +1,21 @@ +pytest-8.4.1 +======================================= + +pytest 8.4.1 has just been released to PyPI. + +This is a bug-fix release, being a drop-in replacement. + +The full changelog is available at https://docs.pytest.org/en/stable/changelog.html. + +Thanks to all of the contributors to this release: + +* Bruno Oliveira +* Iwithyou2025 +* John Litborn +* Martin Fischer +* Ran Benita +* SarahPythonista + + +Happy testing, +The pytest Development Team diff --git a/doc/en/announce/release-8.4.2.rst b/doc/en/announce/release-8.4.2.rst new file mode 100644 index 00000000000..3111e85bd0f --- /dev/null +++ b/doc/en/announce/release-8.4.2.rst @@ -0,0 +1,27 @@ +pytest-8.4.2 +======================================= + +pytest 8.4.2 has just been released to PyPI. + +This is a bug-fix release, being a drop-in replacement. + +The full changelog is available at https://docs.pytest.org/en/stable/changelog.html. + +Thanks to all of the contributors to this release: + +* AD +* Aditi De +* Bruno Oliveira +* Freya Bruhin +* John Litborn +* Liam DeVoe +* Marc Mueller +* NayeemJohn +* Olivier Grisel +* Ran Benita +* bengartner +* 🇺🇦 Sviatoslav Sydorenko (Святослав Сидоренко) + + +Happy testing, +The pytest Development Team diff --git a/doc/en/announce/release-9.0.0.rst b/doc/en/announce/release-9.0.0.rst new file mode 100644 index 00000000000..67d4f95a56d --- /dev/null +++ b/doc/en/announce/release-9.0.0.rst @@ -0,0 +1,69 @@ +pytest-9.0.0 +======================================= + +The pytest team is proud to announce the 9.0.0 release! + +This release contains new features, improvements, bug fixes, and breaking changes, so users +are encouraged to take a look at the CHANGELOG carefully: + + https://docs.pytest.org/en/stable/changelog.html + +For complete documentation, please visit: + + https://docs.pytest.org/en/stable/ + +As usual, you can upgrade from PyPI via: + + pip install -U pytest + +Thanks to all of the contributors to this release: + +* AD +* Aditi De +* Ali Nazzal +* Bruno Oliveira +* Charles-Meldhine Madi Mnemoi +* Clément Robert +* CoretexShadow +* Cornelius Roemer +* Eero Vaher +* Freya Bruhin +* Harsha Sai +* Hossein +* Israël Hallé +* Iwithyou2025 +* James Addison +* John Litborn +* Jordan Macdonald +* Kieran Ryan +* Liam DeVoe +* Marc Mueller +* Marcos Boger +* Michał Górny +* Mulat Mekonen +* NayeemJohn +* Olivier Grisel +* Omri Golan +* Pierre Sassoulas +* Praise Tompane +* Ran Benita +* Reilly Brogan +* Samuel Gaist +* SarahPythonista +* Sorin Sbarnea +* Stu-ops +* Tanuj Rai +* bengartner +* dariomesic +* jakkdl +* karlicoss +* popododo0720 +* sazsu +* slackline +* vyuroshchin +* zapl +* 🇺🇦 Sviatoslav Sydorenko (Святослав Сидоренко) + + +Happy testing, +The pytest Development Team diff --git a/doc/en/announce/release-9.0.1.rst b/doc/en/announce/release-9.0.1.rst new file mode 100644 index 00000000000..46af130e03c --- /dev/null +++ b/doc/en/announce/release-9.0.1.rst @@ -0,0 +1,18 @@ +pytest-9.0.1 +======================================= + +pytest 9.0.1 has just been released to PyPI. + +This is a bug-fix release, being a drop-in replacement. + +The full changelog is available at https://docs.pytest.org/en/stable/changelog.html. + +Thanks to all of the contributors to this release: + +* Bruno Oliveira +* Ran Benita +* 🇺🇦 Sviatoslav Sydorenko (Святослав Сидоренко) + + +Happy testing, +The pytest Development Team diff --git a/doc/en/announce/release-9.0.2.rst b/doc/en/announce/release-9.0.2.rst new file mode 100644 index 00000000000..f184e1aa4b2 --- /dev/null +++ b/doc/en/announce/release-9.0.2.rst @@ -0,0 +1,22 @@ +pytest-9.0.2 +======================================= + +pytest 9.0.2 has just been released to PyPI. + +This is a bug-fix release, being a drop-in replacement. + +The full changelog is available at https://docs.pytest.org/en/stable/changelog.html. + +Thanks to all of the contributors to this release: + +* Alex Waygood +* Bruno Oliveira +* Fazeel Usmani +* Freya Bruhin +* Ran Benita +* Tom Most +* 🇺🇦 Sviatoslav Sydorenko (Святослав Сидоренко) + + +Happy testing, +The pytest Development Team diff --git a/doc/en/announce/release-9.0.3.rst b/doc/en/announce/release-9.0.3.rst new file mode 100644 index 00000000000..c9540218764 --- /dev/null +++ b/doc/en/announce/release-9.0.3.rst @@ -0,0 +1,38 @@ +pytest-9.0.3 +======================================= + +pytest 9.0.3 has just been released to PyPI. + +This is a bug-fix release, being a drop-in replacement. + +The full changelog is available at https://docs.pytest.org/en/stable/changelog.html. + +Thanks to all of the contributors to this release: + +* Aditya Giri +* Alejandro Villate +* Bruno Oliveira +* Bubble-Interface +* Charles-Meldhine Madi Mnemoi +* DavidAG +* Denis Cherednichenko +* Dr Alex Mitre +* Freya +* Freya Bruhin +* Hugo van Kemenade +* John Litborn +* Liam DeVoe +* Lily Wu +* Maxime Grenu +* Ran Benita +* Randy Döring +* Ronald Eddy Jr +* Samuel Newbold +* Tejas Verma +* Vladimir +* jxramos +* 🇺🇦 Sviatoslav Sydorenko (Святослав Сидоренко) + + +Happy testing, +The pytest Development Team diff --git a/doc/en/backwards-compatibility.rst b/doc/en/backwards-compatibility.rst index 82f678b4dea..a7ee2253d67 100644 --- a/doc/en/backwards-compatibility.rst +++ b/doc/en/backwards-compatibility.rst @@ -53,14 +53,14 @@ History ========= -Focus primary on smooth transition - stance (pre 6.0) -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +Focus primarily on smooth transition - stance (pre 6.0) +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Keeping backwards compatibility has a very high priority in the pytest project. Although we have deprecated functionality over the years, most of it is still supported. All deprecations in pytest were done because simpler or more efficient ways of accomplishing the same tasks have emerged, making the old way of doing things unnecessary. With the pytest 3.0 release, we introduced a clear communication scheme for when we will actually remove the old busted joint and politely ask you to use the new hotness instead, while giving you enough time to adjust your tests or raise concerns if there are valid reasons to keep deprecated functionality around. -To communicate changes, we issue deprecation warnings using a custom warning hierarchy (see :ref:`internal-warnings`). These warnings may be suppressed using the standard means: ``-W`` command-line flag or ``filterwarnings`` ini options (see :ref:`warnings`), but we suggest to use these sparingly and temporarily, and heed the warnings when possible. +To communicate changes, we issue deprecation warnings using a custom warning hierarchy (see :ref:`internal-warnings`). These warnings may be suppressed using the standard means: :option:`-W` command-line flag or :confval:`filterwarnings` configuration option (see :ref:`warnings`), but we suggest to use these sparingly and temporarily, and heed the warnings when possible. We will only start the removal of deprecated functionality in major releases (e.g. if we deprecate something in 3.0, we will start to remove it in 4.0), and keep it around for at least two minor releases (e.g. if we deprecate something in 3.9 and 4.0 is the next release, we start to remove it in 5.0, not in 4.0). @@ -83,9 +83,10 @@ Released pytest versions support all Python versions that are actively maintaine ============== =================== pytest version min. Python version ============== =================== -8.4+ 3.9+ -8.0+ 3.8+ -7.1+ 3.7+ +9.0+ 3.10+ +8.4 3.9+ +8.0 - 8.3 3.8+ +7.1 - 7.4 3.7+ 6.2 - 7.0 3.6+ 5.0 - 6.1 3.5+ 3.3 - 4.6 2.7, 3.4+ diff --git a/doc/en/builtin.rst b/doc/en/builtin.rst index 8aa6fef681c..9d38b329454 100644 --- a/doc/en/builtin.rst +++ b/doc/en/builtin.rst @@ -12,17 +12,17 @@ For information on plugin hooks and objects, see :ref:`plugins`. For information on the ``pytest.mark`` mechanism, see :ref:`mark`. -For information about fixtures, see :ref:`fixtures`. To see a complete list of available fixtures (add ``-v`` to also see fixtures with leading ``_``), type : +For information about fixtures, see :ref:`fixtures`. To see a complete list of available fixtures (add :option:`-v` to also see fixtures with leading ``_``), type : .. code-block:: pytest $ pytest --fixtures -v =========================== test session starts ============================ - platform linux -- Python 3.x.y, pytest-8.x.y, pluggy-1.x.y -- $PYTHON_PREFIX/bin/python + platform linux -- Python 3.x.y, pytest-9.x.y, pluggy-1.x.y -- $PYTHON_PREFIX/bin/python cachedir: .pytest_cache rootdir: /home/sweet/project collected 0 items - cache -- .../_pytest/cacheprovider.py:556 + cache -- .../_pytest/cacheprovider.py:566 Return a cache object that can persist state between testing sessions. cache.get(key, default) @@ -33,7 +33,48 @@ For information about fixtures, see :ref:`fixtures`. To see a complete list of a Values can be any object handled by the json stdlib module. - capsysbinary -- .../_pytest/capture.py:1024 + capsys -- .../_pytest/capture.py:1000 + Enable text capturing of writes to ``sys.stdout`` and ``sys.stderr``. + + The captured output is made available via ``capsys.readouterr()`` method + calls, which return a ``(out, err)`` namedtuple. + ``out`` and ``err`` will be ``text`` objects. + + Returns an instance of :class:`CaptureFixture[str] `. + + Example: + + .. code-block:: python + + def test_output(capsys): + print("hello") + captured = capsys.readouterr() + assert captured.out == "hello\n" + + capteesys -- .../_pytest/capture.py:1028 + Enable simultaneous text capturing and pass-through of writes + to ``sys.stdout`` and ``sys.stderr`` as defined by ``--capture=``. + + + The captured output is made available via ``capteesys.readouterr()`` method + calls, which return a ``(out, err)`` namedtuple. + ``out`` and ``err`` will be ``text`` objects. + + The output is also passed-through, allowing it to be "live-printed", + reported, or both as defined by ``--capture=``. + + Returns an instance of :class:`CaptureFixture[str] `. + + Example: + + .. code-block:: python + + def test_output(capteesys): + print("hello") + captured = capteesys.readouterr() + assert captured.out == "hello\n" + + capsysbinary -- .../_pytest/capture.py:1063 Enable bytes capturing of writes to ``sys.stdout`` and ``sys.stderr``. The captured output is made available via ``capsysbinary.readouterr()`` @@ -51,7 +92,7 @@ For information about fixtures, see :ref:`fixtures`. To see a complete list of a captured = capsysbinary.readouterr() assert captured.out == b"hello\n" - capfd -- .../_pytest/capture.py:1052 + capfd -- .../_pytest/capture.py:1091 Enable text capturing of writes to file descriptors ``1`` and ``2``. The captured output is made available via ``capfd.readouterr()`` method @@ -69,7 +110,7 @@ For information about fixtures, see :ref:`fixtures`. To see a complete list of a captured = capfd.readouterr() assert captured.out == "hello\n" - capfdbinary -- .../_pytest/capture.py:1080 + capfdbinary -- .../_pytest/capture.py:1119 Enable bytes capturing of writes to file descriptors ``1`` and ``2``. The captured output is made available via ``capfd.readouterr()`` method @@ -87,25 +128,7 @@ For information about fixtures, see :ref:`fixtures`. To see a complete list of a captured = capfdbinary.readouterr() assert captured.out == b"hello\n" - capsys -- .../_pytest/capture.py:996 - Enable text capturing of writes to ``sys.stdout`` and ``sys.stderr``. - - The captured output is made available via ``capsys.readouterr()`` method - calls, which return a ``(out, err)`` namedtuple. - ``out`` and ``err`` will be ``text`` objects. - - Returns an instance of :class:`CaptureFixture[str] `. - - Example: - - .. code-block:: python - - def test_output(capsys): - print("hello") - captured = capsys.readouterr() - assert captured.out == "hello\n" - - doctest_namespace [session scope] -- .../_pytest/doctest.py:741 + doctest_namespace [session scope] -- .../_pytest/doctest.py:722 Fixture that returns a :py:class:`dict` that will be injected into the namespace of doctests. @@ -119,7 +142,7 @@ For information about fixtures, see :ref:`fixtures`. To see a complete list of a For more details: :ref:`doctest_namespace`. - pytestconfig [session scope] -- .../_pytest/fixtures.py:1345 + pytestconfig [session scope] -- .../_pytest/fixtures.py:1431 Session-scoped fixture that returns the session's :class:`pytest.Config` object. @@ -129,7 +152,7 @@ For information about fixtures, see :ref:`fixtures`. To see a complete list of a if pytestconfig.get_verbosity() > 0: ... - record_property -- .../_pytest/junitxml.py:280 + record_property -- .../_pytest/junitxml.py:277 Add extra properties to the calling test. User properties become part of the test report and are available to the @@ -143,13 +166,13 @@ For information about fixtures, see :ref:`fixtures`. To see a complete list of a def test_function(record_property): record_property("example_key", 1) - record_xml_attribute -- .../_pytest/junitxml.py:303 + record_xml_attribute -- .../_pytest/junitxml.py:300 Add extra xml attributes to the tag for the calling test. The fixture is callable with ``name, value``. The value is automatically XML-encoded. - record_testsuite_property [session scope] -- .../_pytest/junitxml.py:341 + record_testsuite_property [session scope] -- .../_pytest/junitxml.py:338 Record a new ```` tag as child of the root ````. This is suitable to writing global information regarding the entire test @@ -191,7 +214,7 @@ For information about fixtures, see :ref:`fixtures`. To see a complete list of a .. _legacy_path: https://py.readthedocs.io/en/latest/path.html - caplog -- .../_pytest/logging.py:598 + caplog -- .../_pytest/logging.py:596 Access and control log capturing. Captured logs are available through the following properties/methods:: @@ -202,7 +225,7 @@ For information about fixtures, see :ref:`fixtures`. To see a complete list of a * caplog.record_tuples -> list of (logger_name, level, message) tuples * caplog.clear() -> clear captured records and formatted log output string - monkeypatch -- .../_pytest/monkeypatch.py:31 + monkeypatch -- .../_pytest/monkeypatch.py:33 A convenient fixture for monkey-patching. The fixture provides these methods to modify objects, dictionaries, or @@ -226,15 +249,18 @@ For information about fixtures, see :ref:`fixtures`. To see a complete list of a To undo modifications done by the fixture in a contained scope, use :meth:`context() `. - recwarn -- .../_pytest/recwarn.py:35 + recwarn -- .../_pytest/recwarn.py:34 Return a :class:`WarningsRecorder` instance that records all warnings emitted by test functions. See :ref:`warnings` for information on warning categories. - tmp_path_factory [session scope] -- .../_pytest/tmpdir.py:241 + subtests -- .../_pytest/subtests.py:129 + Provides subtests functionality. + + tmp_path_factory [session scope] -- .../_pytest/tmpdir.py:265 Return a :class:`pytest.TempPathFactory` instance for the test session. - tmp_path -- .../_pytest/tmpdir.py:256 + tmp_path -- .../_pytest/tmpdir.py:280 Return a temporary directory (as :class:`pathlib.Path` object) which is unique to each test function invocation. The temporary directory is created as a subdirectory diff --git a/doc/en/changelog.rst b/doc/en/changelog.rst index c92cd7d4263..5bcd44c4226 100644 --- a/doc/en/changelog.rst +++ b/doc/en/changelog.rst @@ -31,6 +31,932 @@ with advance notice in the **Deprecations** section of releases. .. towncrier release notes start +pytest 9.0.3 (2026-04-07) +========================= + +Bug fixes +--------- + +- `#12444 `_: Fixed :func:`pytest.approx` which now correctly takes into account :class:`~collections.abc.Mapping` keys order to compare them. + + +- `#13634 `_: Blocking a ``conftest.py`` file using the ``-p no:`` option is now explicitly disallowed. + + Previously this resulted in an internal assertion failure during plugin loading. + + Pytest now raises a clear ``UsageError`` explaining that conftest files are not plugins and cannot be disabled via ``-p``. + + +- `#13734 `_: Fixed crash when a test raises an exceptiongroup with ``__tracebackhide__ = True``. + + +- `#14195 `_: Fixed an issue where non-string messages passed to `unittest.TestCase.subTest()` were not printed. + + +- `#14343 `_: Fixed use of insecure temporary directory (CVE-2025-71176). + + + +Improved documentation +---------------------- + +- `#13388 `_: Clarified documentation for ``-p`` vs ``PYTEST_PLUGINS`` plugin loading and fixed an incorrect ``-p`` example. + + +- `#13731 `_: Clarified that capture fixtures (e.g. ``capsys`` and ``capfd``) take precedence over the ``-s`` / ``--capture=no`` command-line options in :ref:`Accessing captured output from a test function `. + + +- `#14088 `_: Clarified that the default :hook:`pytest_collection` hook sets ``session.items`` before it calls :hook:`pytest_collection_finish`, not after. + + +- `#14255 `_: TOML integer log levels must be quoted: Updating reference documentation. + + + +Contributor-facing changes +-------------------------- + +- `#12689 `_: The test reports are now published to Codecov from GitHub Actions. + The test statistics is visible `on the web interface + `__. + + -- by :user:`aleguy02` + + +pytest 9.0.2 (2025-12-06) +========================= + +Bug fixes +--------- + +- `#13896 `_: The terminal progress feature added in pytest 9.0.0 has been disabled by default, except on Windows, due to compatibility issues with some terminal emulators. + + You may enable it again by passing ``-p terminalprogress``. We may enable it by default again once compatibility improves in the future. + + Additionally, when the environment variable ``TERM`` is ``dumb``, the escape codes are no longer emitted, even if the plugin is enabled. + + +- `#13904 `_: Fixed the TOML type of the :confval:`tmp_path_retention_count` settings in the API reference from number to string. + + +- `#13946 `_: The private ``config.inicfg`` attribute was changed in a breaking manner in pytest 9.0.0. + Due to its usage in the ecosystem, it is now restored to working order using a compatibility shim. + It will be deprecated in pytest 9.1 and removed in pytest 10. + + +- `#13965 `_: Fixed quadratic-time behavior when handling ``unittest`` subtests in Python 3.10. + + + +Improved documentation +---------------------- + +- `#4492 `_: The API Reference now contains cross-reference-able documentation of :ref:`pytest's command-line flags `. + + +pytest 9.0.1 (2025-11-12) +========================= + +Bug fixes +--------- + +- `#13895 `_: Restore support for skipping tests via ``raise unittest.SkipTest``. + + +- `#13896 `_: The terminal progress plugin added in pytest 9.0 is now automatically disabled when iTerm2 is detected, it generated desktop notifications instead of the desired functionality. + + +- `#13904 `_: Fixed the TOML type of the verbosity settings in the API reference from number to string. + + +- `#13910 `_: Fixed `UserWarning: Do not expect file_or_dir` on some earlier Python 3.12 and 3.13 point versions. + + + +Packaging updates and notes for downstreams +------------------------------------------- + +- `#13933 `_: The tox configuration has been adjusted to make sure the desired + version string can be passed into its :ref:`package_env` through + the ``SETUPTOOLS_SCM_PRETEND_VERSION_FOR_PYTEST`` environment + variable as a part of the release process -- by :user:`webknjaz`. + + + +Contributor-facing changes +-------------------------- + +- `#13891 `_, `#13942 `_: The CI/CD part of the release automation is now capable of + creating GitHub Releases without having a Git checkout on + disk -- by :user:`bluetech` and :user:`webknjaz`. + + +- `#13933 `_: The tox configuration has been adjusted to make sure the desired + version string can be passed into its :ref:`package_env` through + the ``SETUPTOOLS_SCM_PRETEND_VERSION_FOR_PYTEST`` environment + variable as a part of the release process -- by :user:`webknjaz`. + + +pytest 9.0.0 (2025-11-05) +========================= + +New features +------------ + + +- `#1367 `_: **Support for subtests** has been added. + + :ref:`subtests ` are an alternative to parametrization, useful in situations where the parametrization values are not all known at collection time. + + Example: + + .. code-block:: python + + def contains_docstring(p: Path) -> bool: + """Return True if the given Python file contains a top-level docstring.""" + ... + + + def test_py_files_contain_docstring(subtests: pytest.Subtests) -> None: + for path in Path.cwd().glob("*.py"): + with subtests.test(path=str(path)): + assert contains_docstring(path) + + + Each assert failure or error is caught by the context manager and reported individually, giving a clear picture of all files that are missing a docstring. + + In addition, :meth:`unittest.TestCase.subTest` is now also supported. + + This feature was originally implemented as a separate plugin in `pytest-subtests `__, but since then has been merged into the core. + + .. note:: + + This feature is experimental and will likely evolve in future releases. By that we mean that we might change how subtests are reported on failure, but the functionality and how to use it are stable. + + +- `#13743 `_: Added support for **native TOML configuration files**. + + While pytest, since version 6, supports configuration in ``pyproject.toml`` files under ``[tool.pytest.ini_options]``, + it does so in an "INI compatibility mode", where all configuration values are treated as strings or list of strings. + Now, pytest supports the native TOML data model. + + In ``pyproject.toml``, the native TOML configuration is under the ``[tool.pytest]`` table. + + .. code-block:: toml + + # pyproject.toml + [tool.pytest] + minversion = "9.0" + addopts = ["-ra", "-q"] + testpaths = [ + "tests", + "integration", + ] + + The ``[tool.pytest.ini_options]`` table remains supported, but both tables cannot be used at the same time. + + If you prefer to use a separate configuration file, or don't use ``pyproject.toml``, you can use ``pytest.toml`` or ``.pytest.toml``: + + .. code-block:: toml + + # pytest.toml or .pytest.toml + [pytest] + minversion = "9.0" + addopts = ["-ra", "-q"] + testpaths = [ + "tests", + "integration", + ] + + The documentation now (sometimes) shows configuration snippets in both TOML and INI formats, in a tabbed interface. + + See :ref:`config file formats` for full details. + + +- `#13823 `_: Added a **"strict mode"** enabled by the :confval:`strict` configuration option. + + When set to ``true``, the :confval:`strict` option currently enables + + * :confval:`strict_config` + * :confval:`strict_markers` + * :confval:`strict_parametrization_ids` + * :confval:`strict_xfail` + + The individual strictness options can be explicitly set to override the global :confval:`strict` setting. + + The previously-deprecated ``--strict`` command-line flag now enables strict mode. + + If pytest adds new strictness options in the future, they will also be enabled in strict mode. + Therefore, you should only enable strict mode if you use a pinned/locked version of pytest, + or if you want to proactively adopt new strictness options as they are added. + + See :ref:`strict mode` for more details. + + +- `#13737 `_: Added the :confval:`strict_parametrization_ids` configuration option. + + When set, pytest emits an error if it detects non-unique parameter set IDs, + rather than automatically making the IDs unique by adding `0`, `1`, ... to them. + This can be particularly useful for catching unintended duplicates. + + +- `#13072 `_: Added support for displaying test session **progress in the terminal tab** using the `OSC 9;4; `_ ANSI sequence. + + **Note**: *This feature has been disabled by default in version 9.0.2, except on Windows, due to compatibility issues with some terminal emulators. + You may enable it again by passing* ``-p terminalprogress``. *We may enable it by default again once compatibility improves in the future.* + + When pytest runs in a supported terminal emulator like ConEmu, Gnome Terminal, Ptyxis, Windows Terminal, Kitty or Ghostty, + you'll see the progress in the terminal tab or window, + allowing you to monitor pytest's progress at a glance. + + This feature is automatically enabled when running in a TTY. It is implemented as an internal plugin. If needed, it can be disabled as follows: + - On a user level, using ``-p no:terminalprogress`` on the command line or via an environment variable ``PYTEST_ADDOPTS='-p no:terminalprogress'``. + - On a project configuration level, using ``addopts = "-p no:terminalprogress"``. + + +- `#478 `_: Support PEP420 (implicit namespace packages) as `--pyargs` target when :confval:`consider_namespace_packages` is `true` in the config. + + Previously, this option only impacted package imports, now it also impacts tests discovery. + + +- `#13678 `_: Added a new :confval:`faulthandler_exit_on_timeout` configuration option set to "false" by default to let `faulthandler` interrupt the `pytest` process after a timeout in case of deadlock. + + Previously, a `faulthandler` timeout would only dump the traceback of all threads to stderr, but would not interrupt the `pytest` process. + + -- by :user:`ogrisel`. + + +- `#13829 `_: Added support for configuration option aliases via the ``aliases`` parameter in :meth:`Parser.addini() `. + + Plugins can now register alternative names for configuration options, + allowing for more flexibility in configuration naming and supporting backward compatibility when renaming options. + The canonical name always takes precedence if both the canonical name and an alias are specified in the configuration file. + + + +Improvements in existing functionality +-------------------------------------- + +- `#13330 `_: Having pytest configuration spread over more than one file (for example having both a ``pytest.ini`` file and ``pyproject.toml`` with a ``[tool.pytest.ini_options]`` table) will now print a warning to make it clearer to the user that only one of them is actually used. + + -- by :user:`sgaist` + + +- `#13574 `_: The single argument ``--version`` no longer loads the entire plugin infrastructure, making it faster and more reliable when displaying only the pytest version. + + Passing ``--version`` twice (e.g., ``pytest --version --version``) retains the original behavior, showing both the pytest version and plugin information. + + .. note:: + + Since ``--version`` is now processed early, it only takes effect when passed directly via the command line. It will not work if set through other mechanisms, such as :envvar:`PYTEST_ADDOPTS` or :confval:`addopts`. + + +- `#13823 `_: Added :confval:`strict_xfail` as an alias to the ``xfail_strict`` option, + :confval:`strict_config` as an alias to the ``--strict-config`` flag, + and :confval:`strict_markers` as an alias to the ``--strict-markers`` flag. + This makes all strictness options consistently have configuration options with the prefix ``strict_``. + +- `#13700 `_: `--junitxml` no longer prints the `generated xml file` summary at the end of the pytest session when `--quiet` is given. + + +- `#13732 `_: Previously, when filtering warnings, pytest would fail if the filter referenced a class that could not be imported. Now, this only outputs a message indicating the problem. + + +- `#13859 `_: Clarify the error message for `pytest.raises()` when a regex `match` fails. + + +- `#13861 `_: Better sentence structure in a test's expected error message. Previously, the error message would be "expected exception must be , but got ". Now, it is "Expected , but got ". + + +Removals and backward incompatible breaking changes +--------------------------------------------------- + +- `#12083 `_: Fixed a bug where an invocation such as `pytest a/ a/b` would cause only tests from `a/b` to run, and not other tests under `a/`. + + The fix entails a few breaking changes to how such overlapping arguments and duplicates are handled: + + 1. `pytest a/b a/` or `pytest a/ a/b` are equivalent to `pytest a`; if an argument overlaps another arguments, only the prefix remains. + + 2. `pytest x.py x.py` is equivalent to `pytest x.py`; previously such an invocation was taken as an explicit request to run the tests from the file twice. + + If you rely on these behaviors, consider using :ref:`--keep-duplicates `, which retains its existing behavior (including the bug). + + +- `#13719 `_: Support for Python 3.9 is dropped following its end of life. + + +- `#13766 `_: Previously, pytest would assume it was running in a CI/CD environment if either of the environment variables `$CI` or `$BUILD_NUMBER` was defined; + now, CI mode is only activated if at least one of those variables is defined and set to a *non-empty* value. + + +- The non-public ``config.args`` attribute used to be able to contain ``pathlib.Path`` instances; now it can only contain strings. + + +- `#13779 `_: **PytestRemovedIn9Warning deprecation warnings are now errors by default.** + + Following our plan to remove deprecated features with as little disruption as + possible, all warnings of type ``PytestRemovedIn9Warning`` now generate errors + instead of warning messages by default. + + **The affected features will be effectively removed in pytest 9.1**, so please consult the + :ref:`deprecations` section in the docs for directions on how to update existing code. + + In the pytest ``9.0.X`` series, it is possible to change the errors back into warnings as a + stopgap measure by adding this to your ``pytest.ini`` file: + + .. code-block:: ini + + [pytest] + filterwarnings = + ignore::pytest.PytestRemovedIn9Warning + + But this will stop working when pytest ``9.1`` is released. + + **If you have concerns** about the removal of a specific feature, please add a + comment to :issue:`13779`. + + + +Deprecations (removal in next major release) +-------------------------------------------- + +- `#13807 `_: :meth:`monkeypatch.syspath_prepend() ` now issues a deprecation warning when the prepended path contains legacy namespace packages (those using ``pkg_resources.declare_namespace()``). + Users should migrate to native namespace packages (:pep:`420`). + See :ref:`monkeypatch-fixup-namespace-packages` for details. + + +Bug fixes +--------- + +- `#13445 `_: Made the type annotations of :func:`pytest.skip` and friends more spec-complaint to have them work across more type checkers. + + +- `#13537 `_: Fixed a bug in which :class:`ExceptionGroup` with only ``Skipped`` exceptions in teardown was not handled correctly and showed as error. + + +- `#13598 `_: Fixed possible collection confusion on Windows when short paths and symlinks are involved. + + +- `#13716 `_: Fixed a bug where a nonsensical invocation like ``pytest x.py[a]`` (a file cannot be parametrized) was silently treated as ``pytest x.py``. This is now a usage error. + + +- `#13722 `_: Fixed a misleading assertion failure message when using :func:`pytest.approx` on mappings with differing lengths. + + +- `#13773 `_: Fixed the static fixture closure calculation to properly consider transitive dependencies requested by overridden fixtures. + + +- `#13816 `_: Fixed :func:`pytest.approx` which now returns a clearer error message when comparing mappings with different keys. + + +- `#13849 `_: Hidden ``.pytest.ini`` files are now picked up as the config file even if empty. + This was an inconsistency with non-hidden ``pytest.ini``. + + +- `#13865 `_: Fixed `--show-capture` with `--tb=line`. + + +- `#13522 `_: Fixed :fixture:`pytester` in subprocess mode ignored all :attr:`pytester.plugins ` except the first. + + Fixed :fixture:`pytester` in subprocess mode silently ignored non-str :attr:`pytester.plugins `. + Now it errors instead. + If you are affected by this, specify the plugin by name, or switch the affected tests to use :func:`pytester.runpytest_inprocess ` explicitly instead. + + + +Packaging updates and notes for downstreams +------------------------------------------- + +- `#13791 `_: Minimum requirements on ``iniconfig`` and ``packaging`` were bumped to ``1.0.1`` and ``22.0.0``, respectively. + + + +Contributor-facing changes +-------------------------- + +- `#12244 `_: Fixed self-test failures when `TERM=dumb`. + + +- `#12474 `_: Added scheduled GitHub Action Workflow to run Sphinx linkchecks in repo documentation. + + +- `#13621 `_: pytest's own testsuite now handles the ``lsof`` command hanging (e.g. due to unreachable network filesystems), with the affected selftests being skipped after 10 seconds. + + +- `#13638 `_: Fixed deprecated :command:`gh pr new` command in :file:`scripts/prepare-release-pr.py`. + The script now uses :command:`gh pr create` which is compatible with GitHub CLI v2.0+. + + +- `#13695 `_: Flush `stdout` and `stderr` in `Pytester.run` to avoid truncated outputs in `test_faulthandler.py::test_timeout` on CI -- by :user:`ogrisel`. + + +- `#13771 `_: Skip `test_do_not_collect_symlink_siblings` on Windows environments without symlink support to avoid false negatives. + + +- `#13841 `_: ``tox>=4`` is now required when contributing to pytest. + +- `#13625 `_: Added missing docstrings to ``pytest_addoption()``, ``pytest_configure()``, and ``cacheshow()`` functions in ``cacheprovider.py``. + + + +Miscellaneous internal changes +------------------------------ + +- `#13830 `_: Configuration overrides (``-o``/``--override-ini``) are now processed during startup rather than during :func:`config.getini() `. + + +pytest 8.4.2 (2025-09-03) +========================= + +Bug fixes +--------- + +- `#13478 `_: Fixed a crash when using :confval:`console_output_style` with ``times`` and a module is skipped. + + +- `#13530 `_: Fixed a crash when using :func:`pytest.approx` and :class:`decimal.Decimal` instances with the :class:`decimal.FloatOperation` trap set. + + +- `#13549 `_: No longer evaluate type annotations in Python ``3.14`` when inspecting function signatures. + + This prevents crashes during module collection when modules do not explicitly use ``from __future__ import annotations`` and import types for annotations within a ``if TYPE_CHECKING:`` block. + + +- `#13559 `_: Added missing `int` and `float` variants to the `Literal` type annotation of the `type` parameter in :meth:`pytest.Parser.addini`. + + +- `#13563 `_: :func:`pytest.approx` now only imports ``numpy`` if NumPy is already in ``sys.modules``. This fixes unconditional import behavior introduced in `8.4.0`. + + + +Improved documentation +---------------------- + +- `#13577 `_: Clarify that ``pytest_generate_tests`` is discovered in test modules/classes; other hooks must be in ``conftest.py`` or plugins. + + + +Contributor-facing changes +-------------------------- + +- `#13480 `_: Self-testing: fixed a few test failures when run with ``-Wdefault`` or a similar override. + + +- `#13547 `_: Self-testing: corrected expected message for ``test_doctest_unexpected_exception`` in Python ``3.14``. + + +- `#13684 `_: Make pytest's own testsuite insensitive to the presence of the ``CI`` environment variable -- by :user:`ogrisel`. + + +pytest 8.4.1 (2025-06-17) +========================= + +Bug fixes +--------- + +- `#13461 `_: Corrected ``_pytest.terminal.TerminalReporter.isatty`` to support + being called as a method. Before it was just a boolean which could + break correct code when using ``-o log_cli=true``). + + +- `#13477 `_: Reintroduced :class:`pytest.PytestReturnNotNoneWarning` which was removed by accident in pytest `8.4`. + + This warning is raised when a test functions returns a value other than ``None``, which is often a mistake made by beginners. + + See :ref:`return-not-none` for more information. + + +- `#13497 `_: Fixed compatibility with ``Twisted 25+``. + + + +Improved documentation +---------------------- + +- `#13492 `_: Fixed outdated warning about ``faulthandler`` not working on Windows. + + +pytest 8.4.0 (2025-06-02) +========================= + +Removals and backward incompatible breaking changes +--------------------------------------------------- + +- `#11372 `_: Async tests will now fail, instead of warning+skipping, if you don't have any suitable plugin installed. + + +- `#12346 `_: Tests will now fail, instead of raising a warning, if they return any value other than None. + + +- `#12874 `_: We dropped support for Python 3.8 following its end of life (2024-10-07). + + +- `#12960 `_: Test functions containing a yield now cause an explicit error. They have not been run since pytest 4.0, and were previously marked as an expected failure and deprecation warning. + + See :ref:`the docs ` for more information. + + + +Deprecations (removal in next major release) +-------------------------------------------- + +- `#10839 `_: Requesting an asynchronous fixture without a `pytest_fixture_setup` hook that resolves it will now give a DeprecationWarning. This most commonly happens if a sync test requests an async fixture. This should have no effect on a majority of users with async tests or fixtures using async pytest plugins, but may affect non-standard hook setups or ``autouse=True``. For guidance on how to work around this warning see :ref:`sync-test-async-fixture`. + + + +New features +------------ + +- `#11538 `_: Added :class:`pytest.RaisesGroup` as an equivalent to :func:`pytest.raises` for expecting :exc:`ExceptionGroup`. Also adds :class:`pytest.RaisesExc` which is now the logic behind :func:`pytest.raises` and used as parameter to :class:`pytest.RaisesGroup`. ``RaisesGroup`` includes the ability to specify multiple different expected exceptions, the structure of nested exception groups, and flags for emulating :ref:`except* `. See :ref:`assert-matching-exception-groups` and docstrings for more information. + + +- `#12081 `_: Added :fixture:`capteesys` to capture AND pass output to next handler set by ``--capture=``. + + +- `#12504 `_: :func:`pytest.mark.xfail` now accepts :class:`pytest.RaisesGroup` for the ``raises`` parameter when you expect an exception group. You can also pass a :class:`pytest.RaisesExc` if you e.g. want to make use of the ``check`` parameter. + + +- `#12713 `_: New `--force-short-summary` option to force condensed summary output regardless of verbosity level. + + This lets users still see condensed summary output of failures for quick reference in log files from job outputs, being especially useful if non-condensed output is very verbose. + + +- `#12749 `_: pytest traditionally collects classes/functions in the test module namespace even if they are imported from another file. + + For example: + + .. code-block:: python + + # contents of src/domain.py + class Testament: ... + + + # contents of tests/test_testament.py + from domain import Testament + + + def test_testament(): ... + + In this scenario with the default options, pytest will collect the class `Testament` from `tests/test_testament.py` because it starts with `Test`, even though in this case it is a production class being imported in the test module namespace. + + This behavior can now be prevented by setting the new :confval:`collect_imported_tests` configuration option to ``false``, which will make pytest collect classes/functions from test files **only** if they are defined in that file. + + -- by :user:`FreerGit` + + +- `#12765 `_: Thresholds to trigger snippet truncation can now be set with :confval:`truncation_limit_lines` and :confval:`truncation_limit_chars`. + + See :ref:`truncation-params` for more information. + + +- `#13125 `_: :confval:`console_output_style` now supports ``times`` to show execution time of each test. + + +- `#13192 `_: :func:`pytest.raises` will now raise a warning when passing an empty string to ``match``, as this will match against any value. Use ``match="^$"`` if you want to check that an exception has no message. + + +- `#13192 `_: :func:`pytest.raises` will now print a helpful string diff if matching fails and the match parameter has ``^`` and ``$`` and is otherwise escaped. + + +- `#13192 `_: You can now pass :func:`with pytest.raises(check=fn): `, where ``fn`` is a function which takes a raised exception and returns a boolean. The ``raises`` fails if no exception was raised (as usual), passes if an exception is raised and ``fn`` returns ``True`` (as well as ``match`` and the type matching, if specified, which are checked before), and propagates the exception if ``fn`` returns ``False`` (which likely also fails the test). + + +- `#13228 `_: :ref:`hidden-param` can now be used in ``id`` of :func:`pytest.param` or in + ``ids`` of :py:func:`Metafunc.parametrize `. + It hides the parameter set from the test name. + + +- `#13253 `_: New flag: :ref:`--disable-plugin-autoload ` which works as an alternative to :envvar:`PYTEST_DISABLE_PLUGIN_AUTOLOAD` when setting environment variables is inconvenient; and allows setting it in config files with :confval:`addopts`. + + + +Improvements in existing functionality +-------------------------------------- + +- `#10224 `_: pytest's ``short`` and ``long`` traceback styles (:ref:`how-to-modifying-python-tb-printing`) + now have partial :pep:`657` support and will show specific code segments in the + traceback. + + .. code-block:: pytest + + ================================= FAILURES ================================= + _______________________ test_gets_correct_tracebacks _______________________ + + test_tracebacks.py:12: in test_gets_correct_tracebacks + assert manhattan_distance(p1, p2) == 1 + ^^^^^^^^^^^^^^^^^^^^^^^^^^ + test_tracebacks.py:6: in manhattan_distance + return abs(point_1.x - point_2.x) + abs(point_1.y - point_2.y) + ^^^^^^^^^ + E AttributeError: 'NoneType' object has no attribute 'x' + + -- by :user:`ammaraskar` + + +- `#11118 `_: Now :confval:`pythonpath` configures `$PYTHONPATH` earlier than before during the initialization process, which now also affects plugins loaded via the `-p` command-line option. + + -- by :user:`millerdev` + + +- `#11381 `_: The ``type`` parameter of the ``parser.addini`` method now accepts `"int"` and ``"float"`` parameters, facilitating the parsing of configuration values in the configuration file. + + Example: + + .. code-block:: python + + def pytest_addoption(parser): + parser.addini("int_value", type="int", default=2, help="my int value") + parser.addini("float_value", type="float", default=4.2, help="my float value") + + The `pytest.ini` file: + + .. code-block:: ini + + [pytest] + int_value = 3 + float_value = 5.4 + + +- `#11525 `_: Fixtures are now clearly represented in the output as a "fixture object", not as a normal function as before, making it easy for beginners to catch mistakes such as referencing a fixture declared in the same module but not requested in the test function. + + -- by :user:`the-compiler` and :user:`glyphack` + + +- `#12426 `_: A warning is now issued when :ref:`pytest.mark.usefixtures ref` is used without specifying any fixtures. Previously, empty usefixtures markers were silently ignored. + + +- `#12707 `_: Exception chains can be navigated when dropped into Pdb in Python 3.13+. + + +- `#12736 `_: Added a new attribute `name` with the fixed value `"pytest tests"` to the root tag `testsuites` of the junit-xml generated by pytest. + + This attribute is part of many junit-xml specifications and is even part of the `junit-10.xsd` specification that pytest's implementation is based on. + + +- `#12943 `_: If a test fails with an exceptiongroup with a single exception, the contained exception will now be displayed in the short test summary info. + + +- `#12958 `_: A number of :ref:`unraisable ` enhancements: + + * Set the unraisable hook as early as possible and unset it as late as possible, to collect the most possible number of unraisable exceptions. + * Call the garbage collector just before unsetting the unraisable hook, to collect any straggling exceptions. + * Collect multiple unraisable exceptions per test phase. + * Report the :mod:`tracemalloc` allocation traceback (if available). + * Avoid using a generator based hook to allow handling :class:`StopIteration` in test failures. + * Report the unraisable exception as the cause of the :class:`pytest.PytestUnraisableExceptionWarning` exception if raised. + * Compute the ``repr`` of the unraisable object in the unraisable hook so you get the latest information if available, and should help with resurrection of the object. + + +- `#13010 `_: :func:`pytest.approx` now can compare collections that contain numbers and non-numbers mixed. + + +- `#13016 `_: A number of :ref:`threadexception ` enhancements: + + * Set the excepthook as early as possible and unset it as late as possible, to collect the most possible number of unhandled exceptions from threads. + * Collect multiple thread exceptions per test phase. + * Report the :mod:`tracemalloc` allocation traceback (if available). + * Avoid using a generator based hook to allow handling :class:`StopIteration` in test failures. + * Report the thread exception as the cause of the :class:`pytest.PytestUnhandledThreadExceptionWarning` exception if raised. + * Extract the ``name`` of the thread object in the excepthook which should help with resurrection of the thread. + + +- `#13031 `_: An empty parameter set as in ``pytest.mark.parametrize([], ids=idfunc)`` will no longer trigger a call to ``idfunc`` with internal objects. + + +- `#13115 `_: Allows supplying ``ExceptionGroup[Exception]`` and ``BaseExceptionGroup[BaseException]`` to ``pytest.raises`` to keep full typing on :class:`ExceptionInfo `: + + .. code-block:: python + + with pytest.raises(ExceptionGroup[Exception]) as exc_info: + some_function() + + Parametrizing with other exception types remains an error - we do not check the types of child exceptions and thus do not permit code that might look like we do. + + +- `#13122 `_: The ``--stepwise`` mode received a number of improvements: + + * It no longer forgets the last failed test in case pytest is executed later without the flag. + + This enables the following workflow: + + 1. Execute pytest with ``--stepwise``, pytest then stops at the first failing test; + 2. Iteratively update the code and run the test in isolation, without the ``--stepwise`` flag (for example in an IDE), until it is fixed. + 3. Execute pytest with ``--stepwise`` again and pytest will continue from the previously failed test, and if it passes, continue on to the next tests. + + Previously, at step 3, pytest would start from the beginning, forgetting the previously failed test. + + This change however might cause issues if the ``--stepwise`` mode is used far apart in time, as the state might get stale, so the internal state will be reset automatically in case the test suite changes (for now only the number of tests are considered for this, we might change/improve this on the future). + + * New ``--stepwise-reset``/``--sw-reset`` flag, allowing the user to explicitly reset the stepwise state and restart the workflow from the beginning. + + +- `#13308 `_: Added official support for Python 3.14. + + +- `#13380 `_: Fix :class:`ExceptionGroup` traceback filtering to exclude pytest internals. + + +- `#13415 `_: The author metadata of the BibTex example is now correctly formatted with last names following first names. + An example of BibLaTex has been added. + BibTex and BibLaTex examples now clearly indicate that what is cited is software. + + -- by :user:`willynilly` + + +- `#13420 `_: Improved test collection performance by optimizing path resolution used in ``FSCollector``. + + +- `#13457 `_: The error message about duplicate parametrization no longer displays an internal stack trace. + + +- `#4112 `_: Using :ref:`pytest.mark.usefixtures ` on :func:`pytest.param` now produces an error instead of silently doing nothing. + + +- `#5473 `_: Replace `:` with `;` in the assertion rewrite warning message so it can be filtered using standard Python warning filters before calling :func:`pytest.main`. + + +- `#6985 `_: Improved :func:`pytest.approx` to enhance the readability of value ranges and tolerances between 0.001 and 1000. + * The `repr` method now provides clearer output for values within those ranges, making it easier to interpret the results. + * Previously, the output for those ranges of values and tolerances was displayed in scientific notation (e.g., `42 ± 1.0e+00`). The updated method now presents the tolerance as a decimal for better readability (e.g., `42 ± 1`). + + Example: + + **Previous Output:** + + .. code-block:: console + + >>> pytest.approx(42, abs=1) + 42 ± 1.0e+00 + + **Current Output:** + + .. code-block:: console + + >>> pytest.approx(42, abs=1) + 42 ± 1 + + -- by :user:`fazeelghafoor` + + +- `#7683 `_: The formerly optional ``pygments`` dependency is now required, causing output always to be source-highlighted (unless disabled via the ``--code-highlight=no`` CLI option). + + + +Bug fixes +--------- + +- `#10404 `_: Apply filterwarnings from config/cli as soon as possible, and revert them as late as possible + so that warnings as errors are collected throughout the pytest run and before the + unraisable and threadexcept hooks are removed. + + This allows very late warnings and unraisable/threadexcept exceptions to fail the test suite. + + This also changes the warning that the lsof plugin issues from PytestWarning to the new warning PytestFDWarning so it can be more easily filtered. + + +- `#11067 `_: The test report is now consistent regardless if the test xfailed via :ref:`pytest.mark.xfail ` or :func:`pytest.fail`. + + Previously, *xfailed* tests via the marker would have the string ``"reason: "`` prefixed to the message, while those *xfailed* via the function did not. The prefix has been removed. + + +- `#12008 `_: In :pr:`11220`, an unintended change in reordering was introduced by changing the way indices were assigned to direct params. More specifically, before that change, the indices of direct params to metafunc's callspecs were assigned after all parametrizations took place. Now, that change is reverted. + + +- `#12863 `_: Fix applying markers, including :ref:`pytest.mark.parametrize ` when placed above `@staticmethod` or `@classmethod`. + + +- `#12929 `_: Handle StopIteration from test cases, setup and teardown correctly. + + +- `#12938 `_: Fixed ``--durations-min`` argument not respected if ``-vv`` is used. + + +- `#12946 `_: Fixed missing help for :mod:`pdb` commands wrapped by pytest -- by :user:`adamchainz`. + + +- `#12981 `_: Prevent exceptions in :func:`pytest.Config.add_cleanup` callbacks preventing further cleanups. + + +- `#13047 `_: Restore :func:`pytest.approx` handling of equality checks between `bool` and `numpy.bool_` types. + + Comparing `bool` and `numpy.bool_` using :func:`pytest.approx` accidentally changed in version `8.3.4` and `8.3.5` to no longer match: + + .. code-block:: pycon + + >>> import numpy as np + >>> from pytest import approx + >>> [np.True_, np.True_] == pytest.approx([True, True]) + False + + This has now been fixed: + + .. code-block:: pycon + + >>> [np.True_, np.True_] == pytest.approx([True, True]) + True + + +- `#13119 `_: Improved handling of invalid regex patterns for filter warnings by providing a clear error message. + + +- `#13175 `_: The diff is now also highlighted correctly when comparing two strings. + + +- `#13248 `_: Fixed an issue where passing a ``scope`` in :py:func:`Metafunc.parametrize ` with ``indirect=True`` + could result in other fixtures being unable to depend on the parametrized fixture. + + +- `#13291 `_: Fixed ``repr`` of ``attrs`` objects in assertion failure messages when using ``attrs>=25.2``. + + +- `#13312 `_: Fixed a possible ``KeyError`` crash on PyPy during collection of tests involving higher-scoped parameters. + + +- `#13345 `_: Fix type hints for :attr:`pytest.TestReport.when` and :attr:`pytest.TestReport.location`. + + +- `#13377 `_: Fixed handling of test methods with positional-only parameter syntax. + + Now, methods are supported that formally define ``self`` as positional-only + and/or fixture parameters as keyword-only, e.g.: + + .. code-block:: python + + class TestClass: + + def test_method(self, /, *, fixture): ... + + Before, this caused an internal error in pytest. + + +- `#13384 `_: Fixed an issue where pytest could report negative durations. + + +- `#13420 `_: Added ``lru_cache`` to ``nodes._check_initialpaths_for_relpath``. + + +- `#9037 `_: Honor :confval:`disable_test_id_escaping_and_forfeit_all_rights_to_community_support` when escaping ids in parametrized tests. + + + +Improved documentation +---------------------- + +- `#12535 `_: `This + example` + showed ``print`` statements that do not exactly reflect what the + different branches actually do. The fix makes the example more precise. + + +- `#13218 `_: Pointed out in the :func:`pytest.approx` documentation that it considers booleans unequal to numeric zero or one. + + +- `#13221 `_: Improved grouping of CLI options in the ``--help`` output. + + +- `#6649 `_: Added :class:`~pytest.TerminalReporter` to the :ref:`api-reference` documentation page. + + +- `#8612 `_: Add a recipe for handling abstract test classes in the documentation. + + A new example has been added to the documentation to demonstrate how to use a mixin class to handle abstract + test classes without manually setting the ``__test__`` attribute for subclasses. + This ensures that subclasses of abstract test classes are automatically collected by pytest. + + + +Packaging updates and notes for downstreams +------------------------------------------- + +- `#13317 `_: Specified minimum allowed versions of ``colorama``, ``iniconfig``, + and ``packaging``; and bumped the minimum allowed version + of ``exceptiongroup`` for ``python_version<'3.11'`` from a release + candidate to a full release. + + + +Contributor-facing changes +-------------------------- + +- `#12017 `_: Mixed internal improvements: + + * Migrate formatting to f-strings in some tests. + * Use type-safe constructs in JUnitXML tests. + * Moved`` MockTiming`` into ``_pytest.timing``. + + -- by :user:`RonnyPfannschmidt` + + +- `#12647 `_: Fixed running the test suite with the ``hypothesis`` pytest plugin. + + + +Miscellaneous internal changes +------------------------------ + +- `#6649 `_: Added :class:`~pytest.TerminalReporter` to the public pytest API, as it is part of the signature of the :hook:`pytest_terminal_summary` hook. + + pytest 8.3.5 (2025-03-02) ========================= @@ -1931,6 +2857,7 @@ Breaking Changes - `#8246 `_: ``--version`` now writes version information to ``stdout`` rather than ``stderr``. +- `#8592 `_: The ``pytest_cmdline_preparse`` hook has been removed following its deprecation. See :ref:`the deprecation note ` for more details. - `#8733 `_: Drop a workaround for `pyreadline `__ that made it work with ``--pdb``. @@ -8499,10 +9426,10 @@ time or change existing behaviors in order to make them less surprising/more use non-ascii characters. Thanks Bruno Oliveira for the PR. - fix #1204: another error when collecting with a nasty __getattr__(). - Thanks Florian Bruhin for the PR. + Thanks Freya Bruhin for the PR. - fix the summary printed when no tests did run. - Thanks Florian Bruhin for the PR. + Thanks Freya Bruhin for the PR. - fix #1185 - ensure MANIFEST.in exactly matches what should go to a sdist - a number of documentation modernizations wrt good practices. @@ -8624,7 +9551,7 @@ time or change existing behaviors in order to make them less surprising/more use - fix issue934: when string comparison fails and a diff is too large to display without passing -vv, still show a few lines of the diff. - Thanks Florian Bruhin for the report and Bruno Oliveira for the PR. + Thanks Freya Bruhin for the report and Bruno Oliveira for the PR. - fix issue736: Fix a bug where fixture params would be discarded when combined with parametrization markers. @@ -8637,7 +9564,7 @@ time or change existing behaviors in order to make them less surprising/more use - parametrize now also generates meaningful test IDs for enum, regex and class objects (as opposed to class instances). - Thanks to Florian Bruhin for the PR. + Thanks to Freya Bruhin for the PR. - Add 'warns' to assert that warnings are thrown (like 'raises'). Thanks to Eric Hunsberger for the PR. @@ -8764,7 +9691,7 @@ time or change existing behaviors in order to make them less surprising/more use one will also have a "reprec" attribute with the recorded events/reports. - fix monkeypatch.setattr("x.y", raising=False) to actually not raise - if "y" is not a preexisting attribute. Thanks Florian Bruhin. + if "y" is not a preexisting attribute. Thanks Freya Bruhin. - fix issue741: make running output from testdir.run copy/pasteable Thanks Bruno Oliveira. @@ -8824,7 +9751,7 @@ time or change existing behaviors in order to make them less surprising/more use - fix issue833: --fixtures now shows all fixtures of collected test files, instead of just the fixtures declared on the first one. - Thanks Florian Bruhin for reporting and Bruno Oliveira for the PR. + Thanks Freya Bruhin for reporting and Bruno Oliveira for the PR. - fix issue863: skipped tests now report the correct reason when a skip/xfail condition is met when using multiple markers. diff --git a/doc/en/conf.py b/doc/en/conf.py index c89e14d07fa..81156493131 100644 --- a/doc/en/conf.py +++ b/doc/en/conf.py @@ -34,6 +34,7 @@ "sphinx.ext.todo", "sphinx.ext.viewcode", "sphinx_removed_in", + "sphinx_inline_tabs", "sphinxcontrib_trio", "sphinxcontrib.towncrier.ext", # provides `towncrier-draft-entries` directive "sphinx_issues", # implements `:issue:`, `:pr:` and other GH-related roles diff --git a/doc/en/contact.rst b/doc/en/contact.rst index b2a1368eaba..311224eeef0 100644 --- a/doc/en/contact.rst +++ b/doc/en/contact.rst @@ -40,7 +40,7 @@ Mail in the pytest core team, who can also be contacted individually: * Bruno Oliveira (:user:`nicoddemus`, `bruno@pytest.org `_) - * Florian Bruhin (:user:`The-Compiler`, `florian@pytest.org `_) + * Freya Bruhin (:user:`The-Compiler`, `freya@pytest.org `_) * Pierre Sassoulas (:user:`Pierre-Sassoulas`, `pierre@pytest.org `_) * Ran Benita (:user:`bluetech`, `ran@pytest.org `_) * Ronny Pfannschmidt (:user:`RonnyPfannschmidt`, `ronny@pytest.org `_) @@ -51,7 +51,7 @@ Other - The :doc:`contribution guide ` for help on submitting pull requests to GitHub. -- Florian Bruhin (:user:`The-Compiler`) offers pytest professional teaching and +- Freya Bruhin (:user:`The-Compiler`) offers pytest professional teaching and consulting via `Bruhin Software `_. .. _`pytest issue tracker`: https://github.com/pytest-dev/pytest/issues diff --git a/doc/en/deprecations.rst b/doc/en/deprecations.rst index 18df64c9204..57c583fd852 100644 --- a/doc/en/deprecations.rst +++ b/doc/en/deprecations.rst @@ -15,6 +15,41 @@ Below is a complete list of all pytest features which are considered deprecated. :class:`~pytest.PytestWarning` or subclasses, which can be filtered using :ref:`standard warning filters `. +.. _monkeypatch-fixup-namespace-packages: + +``monkeypatch.syspath_prepend`` with legacy namespace packages +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +.. deprecated:: 9.0 + +When using :meth:`monkeypatch.syspath_prepend() `, +pytest automatically calls ``pkg_resources.fixup_namespace_packages()`` if ``pkg_resources`` is imported. +This is only needed for legacy namespace packages that use ``pkg_resources.declare_namespace()``. + +Legacy namespace packages are deprecated in favor of native namespace packages (:pep:`420`). +If you are using ``pkg_resources.declare_namespace()`` in your ``__init__.py`` files, +you should migrate to native namespace packages by removing the ``__init__.py`` files from your namespace packages. + +This deprecation warning will only be issued when: + +1. ``pkg_resources`` is imported, and +2. The specific path being prepended contains a declared namespace package (via ``pkg_resources.declare_namespace()``) + +To fix this warning, convert your legacy namespace packages to native namespace packages: + +**Legacy namespace package** (deprecated): + +.. code-block:: python + + # mypkg/__init__.py + __import__("pkg_resources").declare_namespace(__name__) + +**Native namespace package** (recommended): + +Simply remove the ``__init__.py`` file entirely. +Python 3.3+ natively supports namespace packages without ``__init__.py``. + + .. _sync-test-async-fixture: sync test depending on async fixture @@ -104,7 +139,7 @@ In ``8.2`` the ``exc_type`` parameter has been added, giving users the ability o to skip tests only if the module cannot really be found, and not because of some other error. Catching only :class:`ModuleNotFoundError` by default (and letting other errors propagate) would be the best solution, -however for backward compatibility, pytest will keep the existing behavior but raise an warning if: +however for backward compatibility, pytest will keep the existing behavior but raise a warning if: 1. The captured exception is of type :class:`ImportError`, and: 2. The user does not pass ``exc_type`` explicitly. @@ -316,46 +351,6 @@ Users expected in this case that the ``usefixtures`` mark would have its intende Now pytest will issue a warning when it encounters this problem, and will raise an error in the future versions. -Returning non-None value in test functions -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -.. deprecated:: 7.2 - -A ``pytest.PytestReturnNotNoneWarning`` is now emitted if a test function returns something other than `None`. - -This prevents a common mistake among beginners that expect that returning a `bool` would cause a test to pass or fail, for example: - -.. code-block:: python - - @pytest.mark.parametrize( - ["a", "b", "result"], - [ - [1, 2, 5], - [2, 3, 8], - [5, 3, 18], - ], - ) - def test_foo(a, b, result): - return foo(a, b) == result - -Given that pytest ignores the return value, this might be surprising that it will never fail. - -The proper fix is to change the `return` to an `assert`: - -.. code-block:: python - - @pytest.mark.parametrize( - ["a", "b", "result"], - [ - [1, 2, 5], - [2, 3, 8], - [5, 3, 18], - ], - ) - def test_foo(a, b, result): - assert foo(a, b) == result - - The ``yield_fixture`` function/decorator ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ @@ -363,7 +358,7 @@ The ``yield_fixture`` function/decorator ``pytest.yield_fixture`` is a deprecated alias for :func:`pytest.fixture`. -It has been so for a very long time, so can be search/replaced safely. +It has been so for a very long time, so it can be searched/replaced safely. Removed Features and Breaking Changes @@ -594,18 +589,20 @@ removed in pytest 8 (deprecated since pytest 2.4.0): - ``parser.addoption(..., type="int/string/float/complex")`` - use ``type=int`` etc. instead. -The ``--strict`` command-line option -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +The ``--strict`` command-line option (reintroduced) +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. deprecated:: 6.2 -.. versionremoved:: 8.0 +.. versionchanged:: 9.0 -The ``--strict`` command-line option has been deprecated in favor of ``--strict-markers``, which +The ``--strict`` command-line option had been deprecated in favor of ``--strict-markers``, which better conveys what the option does. -We have plans to maybe in the future to reintroduce ``--strict`` and make it an encompassing -flag for all strictness related options (``--strict-markers`` and ``--strict-config`` -at the moment, more might be introduced in the future). +In version 8.1, we accidentally un-deprecated ``--strict``. + +In version 9.0, we changed ``--strict`` to make it set the new :confval:`strict` +configuration option. It now enables all strictness related options (including +:confval:`strict_markers`). .. _cmdline-preparse-deprecated: @@ -777,7 +774,7 @@ The ``pytest._fillfuncargs`` function This function was kept for backward compatibility with an older plugin. -It's functionality is not meant to be used directly, but if you must replace +Its functionality is not meant to be used directly, but if you must replace it, use `function._request._fillfixtures()` instead, though note this is not a public API and may break in the future. @@ -808,7 +805,7 @@ The ``--result-log`` option produces a stream of test reports which can be analysed at runtime, but it uses a custom format which requires users to implement their own parser. -The `pytest-reportlog `__ plugin provides a ``--report-log`` option, a more standard and extensible alternative, producing +The :pypi:`pytest-reportlog` plugin provides a ``--report-log`` option, a more standard and extensible alternative, producing one JSON object per-line, and should cover the same use cases. Please try it out and provide feedback. The ``pytest-reportlog`` plugin might even be merged into the core @@ -848,20 +845,38 @@ that manipulate this type of file (for example, Jenkins, Azure Pipelines, etc.). Users are recommended to try the new ``xunit2`` format and see if their tooling that consumes the JUnit XML file supports it. -To use the new format, update your ``pytest.ini``: +To use the new format, update your configuration file: + +.. tab:: toml + + .. code-block:: toml -.. code-block:: ini + [pytest] + junit_family = "xunit2" - [pytest] - junit_family=xunit2 +.. tab:: ini + + .. code-block:: ini + + [pytest] + junit_family = xunit2 If you discover that your tooling does not support the new format, and want to keep using the legacy version, set the option to ``legacy`` instead: -.. code-block:: ini +.. tab:: toml + + .. code-block:: toml + + [pytest] + junit_family = "legacy" + +.. tab:: ini + + .. code-block:: ini - [pytest] - junit_family=legacy + [pytest] + junit_family = legacy By using ``legacy`` you will keep using the legacy/xunit1 format when upgrading to pytest 6.0, where the default format will be ``xunit2``. diff --git a/doc/en/example/.ruff.toml b/doc/en/example/.ruff.toml new file mode 100644 index 00000000000..feddc5c0654 --- /dev/null +++ b/doc/en/example/.ruff.toml @@ -0,0 +1 @@ +lint.ignore = ["RUF059"] diff --git a/doc/en/example/attic.rst b/doc/en/example/attic.rst index 2b1f2766dce..3a2e228337e 100644 --- a/doc/en/example/attic.rst +++ b/doc/en/example/attic.rst @@ -75,7 +75,7 @@ decorate its result. This mechanism allows us to stay ignorant of how/where the function argument is provided - in our example from a `conftest plugin`_. -sidenote: the temporary directory used here are instances of +Side note: the temporary directories used here are instances of the `py.path.local`_ class which provides many of the os.path methods in a convenient way. diff --git a/doc/en/example/customdirectory.rst b/doc/en/example/customdirectory.rst index 1e4d7e370de..705a3373654 100644 --- a/doc/en/example/customdirectory.rst +++ b/doc/en/example/customdirectory.rst @@ -36,13 +36,13 @@ You can create a ``manifest.json`` file and some test files: .. include:: customdirectory/tests/test_third.py :literal: -An you can now execute the test specification: +And you can now execute the test specification: .. code-block:: pytest customdirectory $ pytest =========================== test session starts ============================ - platform linux -- Python 3.x.y, pytest-8.x.y, pluggy-1.x.y + platform linux -- Python 3.x.y, pytest-9.x.y, pluggy-1.x.y rootdir: /home/sweet/project/customdirectory configfile: pytest.ini collected 2 items @@ -62,7 +62,7 @@ You can verify that your custom collector appears in the collection tree: customdirectory $ pytest --collect-only =========================== test session starts ============================ - platform linux -- Python 3.x.y, pytest-8.x.y, pluggy-1.x.y + platform linux -- Python 3.x.y, pytest-9.x.y, pluggy-1.x.y rootdir: /home/sweet/project/customdirectory configfile: pytest.ini collected 2 items diff --git a/doc/en/example/markers.rst b/doc/en/example/markers.rst index babcd9e2f3a..c8e4172a696 100644 --- a/doc/en/example/markers.rst +++ b/doc/en/example/markers.rst @@ -47,7 +47,7 @@ You can then restrict a test run to only run tests marked with ``webtest``: $ pytest -v -m webtest =========================== test session starts ============================ - platform linux -- Python 3.x.y, pytest-8.x.y, pluggy-1.x.y -- $PYTHON_PREFIX/bin/python + platform linux -- Python 3.x.y, pytest-9.x.y, pluggy-1.x.y -- $PYTHON_PREFIX/bin/python cachedir: .pytest_cache rootdir: /home/sweet/project collecting ... collected 4 items / 3 deselected / 1 selected @@ -62,7 +62,7 @@ Or the inverse, running all tests except the webtest ones: $ pytest -v -m "not webtest" =========================== test session starts ============================ - platform linux -- Python 3.x.y, pytest-8.x.y, pluggy-1.x.y -- $PYTHON_PREFIX/bin/python + platform linux -- Python 3.x.y, pytest-9.x.y, pluggy-1.x.y -- $PYTHON_PREFIX/bin/python cachedir: .pytest_cache rootdir: /home/sweet/project collecting ... collected 4 items / 1 deselected / 3 selected @@ -82,7 +82,7 @@ keyword arguments, e.g. to run only tests marked with ``device`` and the specifi $ pytest -v -m "device(serial='123')" =========================== test session starts ============================ - platform linux -- Python 3.x.y, pytest-8.x.y, pluggy-1.x.y -- $PYTHON_PREFIX/bin/python + platform linux -- Python 3.x.y, pytest-9.x.y, pluggy-1.x.y -- $PYTHON_PREFIX/bin/python cachedir: .pytest_cache rootdir: /home/sweet/project collecting ... collected 4 items / 3 deselected / 1 selected @@ -106,7 +106,7 @@ tests based on their module, class, method, or function name: $ pytest -v test_server.py::TestClass::test_method =========================== test session starts ============================ - platform linux -- Python 3.x.y, pytest-8.x.y, pluggy-1.x.y -- $PYTHON_PREFIX/bin/python + platform linux -- Python 3.x.y, pytest-9.x.y, pluggy-1.x.y -- $PYTHON_PREFIX/bin/python cachedir: .pytest_cache rootdir: /home/sweet/project collecting ... collected 1 item @@ -121,7 +121,7 @@ You can also select on the class: $ pytest -v test_server.py::TestClass =========================== test session starts ============================ - platform linux -- Python 3.x.y, pytest-8.x.y, pluggy-1.x.y -- $PYTHON_PREFIX/bin/python + platform linux -- Python 3.x.y, pytest-9.x.y, pluggy-1.x.y -- $PYTHON_PREFIX/bin/python cachedir: .pytest_cache rootdir: /home/sweet/project collecting ... collected 1 item @@ -136,7 +136,7 @@ Or select multiple nodes: $ pytest -v test_server.py::TestClass test_server.py::test_send_http =========================== test session starts ============================ - platform linux -- Python 3.x.y, pytest-8.x.y, pluggy-1.x.y -- $PYTHON_PREFIX/bin/python + platform linux -- Python 3.x.y, pytest-9.x.y, pluggy-1.x.y -- $PYTHON_PREFIX/bin/python cachedir: .pytest_cache rootdir: /home/sweet/project collecting ... collected 2 items @@ -167,9 +167,9 @@ Using ``-k expr`` to select tests based on their name .. versionadded:: 2.0/2.3.4 -You can use the ``-k`` command line option to specify an expression +You can use the :option:`-k` command line option to specify an expression which implements a substring match on the test names instead of the -exact match on markers that ``-m`` provides. This makes it easy to +exact match on markers that :option:`-m` provides. This makes it easy to select tests based on their names: .. versionchanged:: 5.4 @@ -180,7 +180,7 @@ The expression matching is now case-insensitive. $ pytest -v -k http # running with the above defined example module =========================== test session starts ============================ - platform linux -- Python 3.x.y, pytest-8.x.y, pluggy-1.x.y -- $PYTHON_PREFIX/bin/python + platform linux -- Python 3.x.y, pytest-9.x.y, pluggy-1.x.y -- $PYTHON_PREFIX/bin/python cachedir: .pytest_cache rootdir: /home/sweet/project collecting ... collected 4 items / 3 deselected / 1 selected @@ -195,7 +195,7 @@ And you can also run all tests except the ones that match the keyword: $ pytest -k "not send_http" -v =========================== test session starts ============================ - platform linux -- Python 3.x.y, pytest-8.x.y, pluggy-1.x.y -- $PYTHON_PREFIX/bin/python + platform linux -- Python 3.x.y, pytest-9.x.y, pluggy-1.x.y -- $PYTHON_PREFIX/bin/python cachedir: .pytest_cache rootdir: /home/sweet/project collecting ... collected 4 items / 1 deselected / 3 selected @@ -212,7 +212,7 @@ Or to select "http" and "quick" tests: $ pytest -k "http or quick" -v =========================== test session starts ============================ - platform linux -- Python 3.x.y, pytest-8.x.y, pluggy-1.x.y -- $PYTHON_PREFIX/bin/python + platform linux -- Python 3.x.y, pytest-9.x.y, pluggy-1.x.y -- $PYTHON_PREFIX/bin/python cachedir: .pytest_cache rootdir: /home/sweet/project collecting ... collected 4 items / 2 deselected / 2 selected @@ -225,7 +225,7 @@ Or to select "http" and "quick" tests: You can use ``and``, ``or``, ``not`` and parentheses. -In addition to the test's name, ``-k`` also matches the names of the test's parents (usually, the name of the file and class it's in), +In addition to the test's name, :option:`-k` also matches the names of the test's parents (usually, the name of the file and class it's in), attributes set on the test function, markers applied to it or its parents and any :attr:`extra keywords <_pytest.nodes.Node.extra_keyword_matches>` explicitly added to it or its parents. @@ -239,13 +239,11 @@ Registering markers Registering markers for your test suite is simple: -.. code-block:: ini +.. code-block:: toml - # content of pytest.ini + # content of pytest.toml [pytest] - markers = - webtest: mark a test as a webtest. - slow: mark test as slow. + markers = ["webtest: mark a test as a webtest.", "slow: mark test as slow."] Multiple custom markers can be registered, by defining each one in its own line, as shown in above example. @@ -264,7 +262,7 @@ You can ask which markers exist for your test suite - the list includes our just @pytest.mark.skipif(condition, ..., *, reason=...): skip the given test function if any of the conditions evaluate to True. Example: skipif(sys.platform == 'win32') skips the test if we are on the win32 platform. See https://docs.pytest.org/en/stable/reference/reference.html#pytest-mark-skipif - @pytest.mark.xfail(condition, ..., *, reason=..., run=True, raises=None, strict=xfail_strict): mark the test function as an expected failure if any of the conditions evaluate to True. Optionally specify a reason for better reporting and run=False if you don't even want to execute the test function. If only specific exception(s) are expected, you can list them in raises, and if the test fails in other ways, it will be reported as a true failure. See https://docs.pytest.org/en/stable/reference/reference.html#pytest-mark-xfail + @pytest.mark.xfail(condition, ..., *, reason=..., run=True, raises=None, strict=strict_xfail): mark the test function as an expected failure if any of the conditions evaluate to True. Optionally specify a reason for better reporting and run=False if you don't even want to execute the test function. If only specific exception(s) are expected, you can list them in raises, and if the test fails in other ways, it will be reported as a true failure. See https://docs.pytest.org/en/stable/reference/reference.html#pytest-mark-xfail @pytest.mark.parametrize(argnames, argvalues): call a test function multiple times passing in different arguments in turn. argvalues generally needs to be a list of values if argnames specifies only one name or a list of tuples of values if argnames specifies multiple names. Example: @parametrize('arg1', [1,2]) would lead to two calls of the decorated test function, one with arg1=1 and another with arg1=2.see https://docs.pytest.org/en/stable/how-to/parametrize.html for more info and examples. @@ -286,8 +284,7 @@ For an example on how to add and work with markers from a plugin, see * Asking for existing markers via ``pytest --markers`` gives good output - * Typos in function markers are treated as an error if you use - the ``--strict-markers`` option. + * Typos in function markers are treated as an error if you use the :confval:`strict_markers` configuration option. .. _`scoped-marking`: @@ -414,14 +411,14 @@ A test file using this local plugin: def test_basic_db_operation(): pass -and an example invocations specifying a different environment than what +and an example invocation specifying a different environment than what the test needs: .. code-block:: pytest $ pytest -E stage2 =========================== test session starts ============================ - platform linux -- Python 3.x.y, pytest-8.x.y, pluggy-1.x.y + platform linux -- Python 3.x.y, pytest-9.x.y, pluggy-1.x.y rootdir: /home/sweet/project collected 1 item @@ -435,7 +432,7 @@ and here is one that specifies exactly the environment needed: $ pytest -E stage1 =========================== test session starts ============================ - platform linux -- Python 3.x.y, pytest-8.x.y, pluggy-1.x.y + platform linux -- Python 3.x.y, pytest-9.x.y, pluggy-1.x.y rootdir: /home/sweet/project collected 1 item @@ -443,7 +440,7 @@ and here is one that specifies exactly the environment needed: ============================ 1 passed in 0.12s ============================= -The ``--markers`` option always gives you a list of available markers: +The :option:`--markers` option always gives you a list of available markers: .. code-block:: pytest @@ -456,7 +453,7 @@ The ``--markers`` option always gives you a list of available markers: @pytest.mark.skipif(condition, ..., *, reason=...): skip the given test function if any of the conditions evaluate to True. Example: skipif(sys.platform == 'win32') skips the test if we are on the win32 platform. See https://docs.pytest.org/en/stable/reference/reference.html#pytest-mark-skipif - @pytest.mark.xfail(condition, ..., *, reason=..., run=True, raises=None, strict=xfail_strict): mark the test function as an expected failure if any of the conditions evaluate to True. Optionally specify a reason for better reporting and run=False if you don't even want to execute the test function. If only specific exception(s) are expected, you can list them in raises, and if the test fails in other ways, it will be reported as a true failure. See https://docs.pytest.org/en/stable/reference/reference.html#pytest-mark-xfail + @pytest.mark.xfail(condition, ..., *, reason=..., run=True, raises=None, strict=strict_xfail): mark the test function as an expected failure if any of the conditions evaluate to True. Optionally specify a reason for better reporting and run=False if you don't even want to execute the test function. If only specific exception(s) are expected, you can list them in raises, and if the test fails in other ways, it will be reported as a true failure. See https://docs.pytest.org/en/stable/reference/reference.html#pytest-mark-xfail @pytest.mark.parametrize(argnames, argvalues): call a test function multiple times passing in different arguments in turn. argvalues generally needs to be a list of values if argnames specifies only one name or a list of tuples of values if argnames specifies multiple names. Example: @parametrize('arg1', [1,2]) would lead to two calls of the decorated test function, one with arg1=1 and another with arg1=2.see https://docs.pytest.org/en/stable/how-to/parametrize.html for more info and examples. @@ -628,7 +625,7 @@ then you will see two tests skipped and two executed tests as expected: $ pytest -rs # this option reports skip reasons =========================== test session starts ============================ - platform linux -- Python 3.x.y, pytest-8.x.y, pluggy-1.x.y + platform linux -- Python 3.x.y, pytest-9.x.y, pluggy-1.x.y rootdir: /home/sweet/project collected 4 items @@ -644,7 +641,7 @@ Note that if you specify a platform via the marker-command line option like this $ pytest -m linux =========================== test session starts ============================ - platform linux -- Python 3.x.y, pytest-8.x.y, pluggy-1.x.y + platform linux -- Python 3.x.y, pytest-9.x.y, pluggy-1.x.y rootdir: /home/sweet/project collected 4 items / 3 deselected / 1 selected @@ -661,7 +658,7 @@ Automatically adding markers based on test names If you have a test suite where test function names indicate a certain type of test, you can implement a hook that automatically defines -markers so that you can use the ``-m`` option with it. Let's look +markers so that you can use the :option:`-m` option with it. Let's look at this test module: .. code-block:: python @@ -707,7 +704,7 @@ We can now use the ``-m option`` to select one set: $ pytest -m interface --tb=short =========================== test session starts ============================ - platform linux -- Python 3.x.y, pytest-8.x.y, pluggy-1.x.y + platform linux -- Python 3.x.y, pytest-9.x.y, pluggy-1.x.y rootdir: /home/sweet/project collected 4 items / 2 deselected / 2 selected @@ -733,7 +730,7 @@ or to select both "event" and "interface" tests: $ pytest -m "interface or event" --tb=short =========================== test session starts ============================ - platform linux -- Python 3.x.y, pytest-8.x.y, pluggy-1.x.y + platform linux -- Python 3.x.y, pytest-9.x.y, pluggy-1.x.y rootdir: /home/sweet/project collected 4 items / 1 deselected / 3 selected diff --git a/doc/en/example/multipython.py b/doc/en/example/multipython.py index f54524213bc..c04a2868812 100644 --- a/doc/en/example/multipython.py +++ b/doc/en/example/multipython.py @@ -10,7 +10,7 @@ import pytest -pythonlist = ["python3.9", "python3.10", "python3.11"] +pythonlist = ["python3.11", "python3.12", "python3.13"] @pytest.fixture(params=pythonlist) diff --git a/doc/en/example/nonpython.rst b/doc/en/example/nonpython.rst index aa463e2416b..54391d72fd4 100644 --- a/doc/en/example/nonpython.rst +++ b/doc/en/example/nonpython.rst @@ -28,7 +28,7 @@ now execute the test specification: nonpython $ pytest test_simple.yaml =========================== test session starts ============================ - platform linux -- Python 3.x.y, pytest-8.x.y, pluggy-1.x.y + platform linux -- Python 3.x.y, pytest-9.x.y, pluggy-1.x.y rootdir: /home/sweet/project/nonpython collected 2 items @@ -40,7 +40,7 @@ now execute the test specification: spec failed: 'some': 'other' no further details known at this point. ========================= short test summary info ========================== - FAILED test_simple.yaml::hello + FAILED test_simple.yaml::hello - usecase execution failed ======================= 1 failed, 1 passed in 0.12s ======================== .. regendoc:wipe @@ -58,13 +58,18 @@ your own domain specific testing language this way. will be reported as a (red) string. ``reportinfo()`` is used for representing the test location and is also -consulted when reporting in ``verbose`` mode: +consulted when reporting in ``verbose`` mode. It should return a tuple +``(path, lineno, description)``, where: + +* ``path`` is the path shown in reports (usually ``self.path`` or ``self.fspath``). +* ``lineno`` is a zero-based line number, or ``0`` when no specific line applies. +* ``description`` is a short label shown for the collected item: .. code-block:: pytest nonpython $ pytest -v =========================== test session starts ============================ - platform linux -- Python 3.x.y, pytest-8.x.y, pluggy-1.x.y -- $PYTHON_PREFIX/bin/python + platform linux -- Python 3.x.y, pytest-9.x.y, pluggy-1.x.y -- $PYTHON_PREFIX/bin/python cachedir: .pytest_cache rootdir: /home/sweet/project/nonpython collecting ... collected 2 items @@ -78,7 +83,7 @@ consulted when reporting in ``verbose`` mode: spec failed: 'some': 'other' no further details known at this point. ========================= short test summary info ========================== - FAILED test_simple.yaml::hello + FAILED test_simple.yaml::hello - usecase execution failed ======================= 1 failed, 1 passed in 0.12s ======================== .. regendoc:wipe @@ -90,7 +95,7 @@ interesting to just look at the collection tree: nonpython $ pytest --collect-only =========================== test session starts ============================ - platform linux -- Python 3.x.y, pytest-8.x.y, pluggy-1.x.y + platform linux -- Python 3.x.y, pytest-9.x.y, pluggy-1.x.y rootdir: /home/sweet/project/nonpython collected 2 items diff --git a/doc/en/example/parametrize.rst b/doc/en/example/parametrize.rst index 69e715c9db1..ae64a7c62d5 100644 --- a/doc/en/example/parametrize.rst +++ b/doc/en/example/parametrize.rst @@ -4,7 +4,7 @@ Parametrizing tests ================================================= -``pytest`` allows to easily parametrize test functions. +``pytest`` allows you to easily parametrize test functions. For basic docs, see :ref:`parametrize-basics`. In the following we provide some examples using @@ -83,9 +83,9 @@ Different options for test IDs ------------------------------------ pytest will build a string that is the test ID for each set of values in a -parametrized test. These IDs can be used with ``-k`` to select specific cases +parametrized test. These IDs can be used with :option:`-k` to select specific cases to run, and they will also identify the specific case when one is failing. -Running pytest with ``--collect-only`` will show the generated IDs. +Running pytest with :option:`--collect-only` will show the generated IDs. Numbers, strings, booleans and None will have their usual string representation used in the test ID. For other objects, pytest will make a string based on @@ -158,11 +158,11 @@ objects, they are still using the default pytest representation: $ pytest test_time.py --collect-only =========================== test session starts ============================ - platform linux -- Python 3.x.y, pytest-8.x.y, pluggy-1.x.y + platform linux -- Python 3.x.y, pytest-9.x.y, pluggy-1.x.y rootdir: /home/sweet/project collected 8 items - + @@ -221,7 +221,7 @@ this is a fully self-contained example which you can run with: $ pytest test_scenarios.py =========================== test session starts ============================ - platform linux -- Python 3.x.y, pytest-8.x.y, pluggy-1.x.y + platform linux -- Python 3.x.y, pytest-9.x.y, pluggy-1.x.y rootdir: /home/sweet/project collected 4 items @@ -235,11 +235,11 @@ If you just collect tests you'll also nicely see 'advanced' and 'basic' as varia $ pytest --collect-only test_scenarios.py =========================== test session starts ============================ - platform linux -- Python 3.x.y, pytest-8.x.y, pluggy-1.x.y + platform linux -- Python 3.x.y, pytest-9.x.y, pluggy-1.x.y rootdir: /home/sweet/project collected 4 items - + @@ -314,11 +314,11 @@ Let's first see how it looks like at collection time: $ pytest test_backends.py --collect-only =========================== test session starts ============================ - platform linux -- Python 3.x.y, pytest-8.x.y, pluggy-1.x.y + platform linux -- Python 3.x.y, pytest-9.x.y, pluggy-1.x.y rootdir: /home/sweet/project collected 2 items - + @@ -352,7 +352,7 @@ The first invocation with ``db == "DB1"`` passed while the second with ``db == " Indirect parametrization --------------------------------------------------- -Using the ``indirect=True`` parameter when parametrizing a test allows to +Using the ``indirect=True`` parameter when parametrizing a test allows one to parametrize a test with a fixture receiving the values before passing them to a test: @@ -413,7 +413,7 @@ The result of this test will be successful: $ pytest -v test_indirect_list.py =========================== test session starts ============================ - platform linux -- Python 3.x.y, pytest-8.x.y, pluggy-1.x.y -- $PYTHON_PREFIX/bin/python + platform linux -- Python 3.x.y, pytest-9.x.y, pluggy-1.x.y -- $PYTHON_PREFIX/bin/python cachedir: .pytest_cache rootdir: /home/sweet/project collecting ... collected 1 item @@ -503,12 +503,10 @@ Running it results in some skips if we don't have all the python interpreters in .. code-block:: pytest . $ pytest -rs -q multipython.py - sssssssssssssssssssssssssss [100%] + ssssssssssss......sss...... [100%] ========================= short test summary info ========================== - SKIPPED [9] multipython.py:67: 'python3.9' not found - SKIPPED [9] multipython.py:67: 'python3.10' not found - SKIPPED [9] multipython.py:67: 'python3.11' not found - 27 skipped in 0.12s + SKIPPED [15] multipython.py:67: 'python3.11' not found + 12 passed, 15 skipped in 0.12s Parametrization of optional implementations/imports --------------------------------------------------- @@ -568,7 +566,7 @@ If you run this with reporting for skips enabled: $ pytest -rs test_module.py =========================== test session starts ============================ - platform linux -- Python 3.x.y, pytest-8.x.y, pluggy-1.x.y + platform linux -- Python 3.x.y, pytest-9.x.y, pluggy-1.x.y rootdir: /home/sweet/project collected 2 items @@ -629,7 +627,7 @@ Then run ``pytest`` with verbose mode and with only the ``basic`` marker: $ pytest -v -m basic =========================== test session starts ============================ - platform linux -- Python 3.x.y, pytest-8.x.y, pluggy-1.x.y -- $PYTHON_PREFIX/bin/python + platform linux -- Python 3.x.y, pytest-9.x.y, pluggy-1.x.y -- $PYTHON_PREFIX/bin/python cachedir: .pytest_cache rootdir: /home/sweet/project collecting ... collected 24 items / 21 deselected / 3 selected @@ -644,7 +642,7 @@ As the result: - Four tests were collected - One test was deselected because it doesn't have the ``basic`` mark. -- Three tests with the ``basic`` mark was selected. +- Three tests with the ``basic`` mark were selected. - The test ``test_eval[1+7-8]`` passed, but the name is autogenerated and confusing. - The test ``test_eval[basic_2+4]`` passed. - The test ``test_eval[basic_6*9]`` was expected to fail and did fail. diff --git a/doc/en/example/pythoncollection.rst b/doc/en/example/pythoncollection.rst index 6a3b143d580..48ee2c8533f 100644 --- a/doc/en/example/pythoncollection.rst +++ b/doc/en/example/pythoncollection.rst @@ -5,7 +5,7 @@ Ignore paths during test collection ----------------------------------- You can easily ignore certain test directories and modules during collection -by passing the ``--ignore=path`` option on the cli. ``pytest`` allows multiple +by passing the :option:`--ignore=path` option on the cli. ``pytest`` allows multiple ``--ignore`` options. Example: .. code-block:: text @@ -43,18 +43,20 @@ you will see that ``pytest`` only collects test-modules, which do not match the ========================= 5 passed in 0.02 seconds ========================= -The ``--ignore-glob`` option allows to ignore test file paths based on Unix shell-style wildcards. -If you want to exclude test-modules that end with ``_01.py``, execute ``pytest`` with ``--ignore-glob='*_01.py'``. +The :option:`--ignore-glob` option allows to ignore test file paths based on Unix shell-style wildcards. +If you want to exclude test-modules that end with ``_01.py``, execute ``pytest`` with :option:`--ignore-glob='*_01.py'`. Deselect tests during test collection ------------------------------------- -Tests can individually be deselected during collection by passing the ``--deselect=item`` option. +Tests can individually be deselected during collection by passing the :option:`--deselect=item` option. For example, say ``tests/foobar/test_foobar_01.py`` contains ``test_a`` and ``test_b``. You can run all of the tests within ``tests/`` *except* for ``tests/foobar/test_foobar_01.py::test_a`` -by invoking ``pytest`` with ``--deselect tests/foobar/test_foobar_01.py::test_a``. +by invoking ``pytest`` with ``--deselect=tests/foobar/test_foobar_01.py::test_a``. ``pytest`` allows multiple ``--deselect`` options. +.. _duplicate-paths: + Keeping duplicate paths specified from command line ---------------------------------------------------- @@ -71,7 +73,7 @@ Example: Just collect tests once. -To collect duplicate tests, use the ``--keep-duplicates`` option on the cli. +To collect duplicate tests, use the :option:`--keep-duplicates` option on the cli. Example: .. code-block:: pytest @@ -82,29 +84,17 @@ Example: collected 2 items ... -As the collector just works on directories, if you specify twice a single test file, ``pytest`` will -still collect it twice, no matter if the ``--keep-duplicates`` is not specified. -Example: - -.. code-block:: pytest - - pytest test_a.py test_a.py - - ... - collected 2 items - ... - Changing directory recursion ----------------------------------------------------- -You can set the :confval:`norecursedirs` option in an ini-file, for example your ``pytest.ini`` in the project root directory: +You can set the :confval:`norecursedirs` option in a configuration file: -.. code-block:: ini +.. code-block:: toml - # content of pytest.ini + # content of pytest.toml [pytest] - norecursedirs = .svn _build tmp* + norecursedirs = [".svn", "_build", "tmp*"] This would tell ``pytest`` to not recurse into typical subversion or sphinx-build directories or into any ``tmp`` prefixed directory. @@ -118,14 +108,14 @@ the :confval:`python_files`, :confval:`python_classes` and :confval:`python_functions` in your :ref:`configuration file `. Here is an example: -.. code-block:: ini +.. code-block:: toml - # content of pytest.ini + # content of pytest.toml # Example 1: have pytest look for "check" instead of "test" [pytest] - python_files = check_*.py - python_classes = Check - python_functions = *_check + python_files = ["check_*.py"] + python_classes = ["Check"] + python_functions = ["*_check"] This would make ``pytest`` look for tests in files that match the ``check_* .py`` glob-pattern, ``Check`` prefixes in classes, and functions and methods @@ -147,12 +137,12 @@ The test collection would look like this: $ pytest --collect-only =========================== test session starts ============================ - platform linux -- Python 3.x.y, pytest-8.x.y, pluggy-1.x.y + platform linux -- Python 3.x.y, pytest-9.x.y, pluggy-1.x.y rootdir: /home/sweet/project - configfile: pytest.ini + configfile: pytest.toml collected 2 items - + @@ -162,23 +152,23 @@ The test collection would look like this: You can check for multiple glob patterns by adding a space between the patterns: -.. code-block:: ini +.. code-block:: toml + # content of pytest.toml # Example 2: have pytest look for files with "test" and "example" - # content of pytest.ini [pytest] - python_files = test_*.py example_*.py + python_files = ["test_*.py", "example_*.py"] .. note:: - the ``python_functions`` and ``python_classes`` options has no effect + the ``python_functions`` and ``python_classes`` options have no effect for ``unittest.TestCase`` test discovery because pytest delegates discovery of test case methods to unittest code. Interpreting cmdline arguments as Python packages ----------------------------------------------------- -You can use the ``--pyargs`` option to make ``pytest`` try +You can use the :option:`--pyargs` option to make ``pytest`` try interpreting arguments as python package names, deriving their file system path and then running the test. For example if you have unittest2 installed you can type: @@ -188,14 +178,14 @@ example if you have unittest2 installed you can type: pytest --pyargs unittest2.test.test_skipping -q which would run the respective test module. Like with -other options, through an ini-file and the :confval:`addopts` option you +other options, through a configuration file and the :confval:`addopts` option you can make this change more permanently: -.. code-block:: ini +.. code-block:: toml - # content of pytest.ini + # content of pytest.toml [pytest] - addopts = --pyargs + addopts = ["--pyargs"] Now a simple invocation of ``pytest NAME`` will check if NAME exists as an importable package/module and otherwise @@ -210,12 +200,12 @@ You can always peek at the collection tree without running tests like this: . $ pytest --collect-only pythoncollection.py =========================== test session starts ============================ - platform linux -- Python 3.x.y, pytest-8.x.y, pluggy-1.x.y + platform linux -- Python 3.x.y, pytest-9.x.y, pluggy-1.x.y rootdir: /home/sweet/project - configfile: pytest.ini + configfile: pytest.toml collected 3 items - + @@ -234,14 +224,14 @@ Customizing test collection You can easily instruct ``pytest`` to discover tests from every Python file: -.. code-block:: ini +.. code-block:: toml - # content of pytest.ini + # content of pytest.toml [pytest] - python_files = *.py + python_files = ["*.py"] However, many projects will have a ``setup.py`` which they don't want to be -imported. Moreover, there may files only importable by a specific python +imported. Moreover, there may be files only importable by a specific python version. For such cases you can dynamically define files to be ignored by listing them in a ``conftest.py`` file: @@ -294,9 +284,9 @@ file will be left out: $ pytest --collect-only =========================== test session starts ============================ - platform linux -- Python 3.x.y, pytest-8.x.y, pluggy-1.x.y + platform linux -- Python 3.x.y, pytest-9.x.y, pluggy-1.x.y rootdir: /home/sweet/project - configfile: pytest.ini + configfile: pytest.toml collected 0 items ======================= no tests collected in 0.12s ======================== diff --git a/doc/en/example/reportingdemo.rst b/doc/en/example/reportingdemo.rst index 5e48815bbc9..29ba190b7e7 100644 --- a/doc/en/example/reportingdemo.rst +++ b/doc/en/example/reportingdemo.rst @@ -9,7 +9,7 @@ Here is a nice run of several failures and how ``pytest`` presents things: assertion $ pytest failure_demo.py =========================== test session starts ============================ - platform linux -- Python 3.x.y, pytest-8.x.y, pluggy-1.x.y + platform linux -- Python 3.x.y, pytest-9.x.y, pluggy-1.x.y rootdir: /home/sweet/project/assertion collected 44 items @@ -384,6 +384,7 @@ Here is a nice run of several failures and how ``pytest`` presents things: i = Foo() > assert i.b == 2 + ^^^ failure_demo.py:148: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ @@ -446,6 +447,7 @@ Here is a nice run of several failures and how ``pytest`` presents things: def test_tupleerror(self): > a, b = [1] # noqa: F841 + ^^^^ E ValueError: not enough values to unpack (expected 2, got 1) failure_demo.py:177: ValueError @@ -457,6 +459,7 @@ Here is a nice run of several failures and how ``pytest`` presents things: items = [1, 2, 3] print(f"items is {items!r}") > a, b = items.pop() + ^^^^ E TypeError: cannot unpack non-iterable int object failure_demo.py:182: TypeError @@ -468,6 +471,7 @@ Here is a nice run of several failures and how ``pytest`` presents things: def test_some_error(self): > if namenotexi: # noqa: F821 + ^^^^^^^^^^ E NameError: name 'namenotexi' is not defined failure_demo.py:185: NameError @@ -526,6 +530,7 @@ Here is a nice run of several failures and how ``pytest`` presents things: def test_z1_unpack_error(self): items = [] > a, b = items + ^^^^ E ValueError: not enough values to unpack (expected 2, got 0) failure_demo.py:219: ValueError @@ -536,6 +541,7 @@ Here is a nice run of several failures and how ``pytest`` presents things: def test_z2_type_error(self): items = 3 > a, b = items + ^^^^ E TypeError: cannot unpack non-iterable int object failure_demo.py:223: TypeError @@ -568,12 +574,12 @@ Here is a nice run of several failures and how ``pytest`` presents things: E + where False = ('456') E + where = '123'.startswith E + where '123' = .f at 0xdeadbeef0029>() - E + and '456' = .g at 0xdeadbeef0003>() + E + and '456' = .g at 0xdeadbeef002a>() failure_demo.py:237: AssertionError _____________________ TestMoreErrors.test_global_func ______________________ - self = + self = def test_global_func(self): > assert isinstance(globf(42), float) @@ -584,18 +590,18 @@ Here is a nice run of several failures and how ``pytest`` presents things: failure_demo.py:240: AssertionError _______________________ TestMoreErrors.test_instance _______________________ - self = + self = def test_instance(self): self.x = 6 * 7 > assert self.x != 42 E assert 42 != 42 - E + where 42 = .x + E + where 42 = .x failure_demo.py:244: AssertionError _______________________ TestMoreErrors.test_compare ________________________ - self = + self = def test_compare(self): > assert globf(10) < 5 @@ -605,7 +611,7 @@ Here is a nice run of several failures and how ``pytest`` presents things: failure_demo.py:247: AssertionError _____________________ TestMoreErrors.test_try_finally ______________________ - self = + self = def test_try_finally(self): x = 1 @@ -616,7 +622,7 @@ Here is a nice run of several failures and how ``pytest`` presents things: failure_demo.py:252: AssertionError ___________________ TestCustomAssertMsg.test_single_line ___________________ - self = + self = def test_single_line(self): class A: @@ -631,16 +637,16 @@ Here is a nice run of several failures and how ``pytest`` presents things: failure_demo.py:263: AssertionError ____________________ TestCustomAssertMsg.test_multiline ____________________ - self = + self = def test_multiline(self): class A: a = 1 b = 2 - > assert ( - A.a == b - ), "A.a appears not to be b\nor does not appear to be b\none of those" + > assert A.a == b, ( + "A.a appears not to be b\nor does not appear to be b\none of those" + ) E AssertionError: A.a appears not to be b E or does not appear to be b E one of those @@ -650,7 +656,7 @@ Here is a nice run of several failures and how ``pytest`` presents things: failure_demo.py:270: AssertionError ___________________ TestCustomAssertMsg.test_custom_repr ___________________ - self = + self = def test_custom_repr(self): class JSON: diff --git a/doc/en/example/simple.rst b/doc/en/example/simple.rst index 69d973e51d0..a07927280ae 100644 --- a/doc/en/example/simple.rst +++ b/doc/en/example/simple.rst @@ -11,12 +11,11 @@ every time you use ``pytest``. For example, if you always want to see detailed info on skipped and xfailed tests, as well as have terser "dot" progress output, you can write it into a configuration file: -.. code-block:: ini +.. code-block:: toml - # content of pytest.ini + # content of pytest.toml [pytest] - addopts = -ra -q - + addopts = ["-ra", "-q"] Alternatively, you can set a ``PYTEST_ADDOPTS`` environment variable to add command line options while the environment is in use: @@ -29,7 +28,7 @@ Here's how the command-line is built in the presence of ``addopts`` or the envir .. code-block:: text - $PYTEST_ADDOPTS + $PYTEST_ADDOPTS So if the user executes in the command-line: @@ -44,7 +43,7 @@ The actual command line executed is: pytest -ra -q -v -m slow Note that as usual for other command-line applications, in case of conflicting options the last one wins, so the example -above will show verbose output because ``-v`` overwrites ``-q``. +above will show verbose output because :option:`-v` overwrites :option:`-q`. .. _request example: @@ -104,6 +103,7 @@ Let's run this without supplying our new option: elif cmdopt == "type2": print("second") > assert 0 # to see what was printed + ^^^^^^^^ E assert 0 test_sample.py:6: AssertionError @@ -130,6 +130,7 @@ And now with supplying a command line option: elif cmdopt == "type2": print("second") > assert 0 # to see what was printed + ^^^^^^^^ E assert 0 test_sample.py:6: AssertionError @@ -165,6 +166,8 @@ Now we'll get feedback on a bad argument: $ pytest -q --cmdopt=type3 ERROR: usage: pytest [options] [file_or_dir] [file_or_dir] [...] pytest: error: argument --cmdopt: invalid choice: 'type3' (choose from type1, type2) + inifile: None + rootdir: /home/sweet/project If you need to provide more detailed error messages, you can use the @@ -232,7 +235,7 @@ directory with the above conftest.py: $ pytest =========================== test session starts ============================ - platform linux -- Python 3.x.y, pytest-8.x.y, pluggy-1.x.y + platform linux -- Python 3.x.y, pytest-9.x.y, pluggy-1.x.y rootdir: /home/sweet/project collected 0 items @@ -296,7 +299,7 @@ and when running it will see a skipped "slow" test: $ pytest -rs # "-rs" means report details on the little 's' =========================== test session starts ============================ - platform linux -- Python 3.x.y, pytest-8.x.y, pluggy-1.x.y + platform linux -- Python 3.x.y, pytest-9.x.y, pluggy-1.x.y rootdir: /home/sweet/project collected 2 items @@ -312,7 +315,7 @@ Or run it including the ``slow`` marked test: $ pytest --runslow =========================== test session starts ============================ - platform linux -- Python 3.x.y, pytest-8.x.y, pluggy-1.x.y + platform linux -- Python 3.x.y, pytest-9.x.y, pluggy-1.x.y rootdir: /home/sweet/project collected 2 items @@ -350,7 +353,7 @@ Example: The ``__tracebackhide__`` setting influences ``pytest`` showing of tracebacks: the ``checkconfig`` function will not be shown -unless the ``--full-trace`` command line option is specified. +unless the :option:`--full-trace` command line option is specified. Let's run our little function: .. code-block:: pytest @@ -413,10 +416,10 @@ running from a test you can do this: if os.environ.get("PYTEST_VERSION") is not None: - # Things you want to to do if your code is called by pytest. + # Things you want to do if your code is called by pytest. ... else: - # Things you want to to do if your code is not called by pytest. + # Things you want to do if your code is not called by pytest. ... @@ -441,7 +444,7 @@ which will add the string to the test header accordingly: $ pytest =========================== test session starts ============================ - platform linux -- Python 3.x.y, pytest-8.x.y, pluggy-1.x.y + platform linux -- Python 3.x.y, pytest-9.x.y, pluggy-1.x.y project deps: mylib-1.1 rootdir: /home/sweet/project collected 0 items @@ -469,7 +472,7 @@ which will add info only when run with "--v": $ pytest -v =========================== test session starts ============================ - platform linux -- Python 3.x.y, pytest-8.x.y, pluggy-1.x.y -- $PYTHON_PREFIX/bin/python + platform linux -- Python 3.x.y, pytest-9.x.y, pluggy-1.x.y -- $PYTHON_PREFIX/bin/python cachedir: .pytest_cache info1: did you know that ... did you? @@ -484,7 +487,7 @@ and nothing when run plainly: $ pytest =========================== test session starts ============================ - platform linux -- Python 3.x.y, pytest-8.x.y, pluggy-1.x.y + platform linux -- Python 3.x.y, pytest-9.x.y, pluggy-1.x.y rootdir: /home/sweet/project collected 0 items @@ -523,7 +526,7 @@ Now we can profile which test functions execute the slowest: $ pytest --durations=3 =========================== test session starts ============================ - platform linux -- Python 3.x.y, pytest-8.x.y, pluggy-1.x.y + platform linux -- Python 3.x.y, pytest-9.x.y, pluggy-1.x.y rootdir: /home/sweet/project collected 3 items @@ -550,12 +553,10 @@ an ``incremental`` marker which is to be used on classes: # content of conftest.py - from typing import Dict, Tuple - import pytest # store history of failures per test class name and per index in parametrize (if parametrize used) - _test_failed_incremental: Dict[str, Dict[Tuple[int, ...], str]] = {} + _test_failed_incremental: dict[str, dict[tuple[int, ...], str]] = {} def pytest_runtest_makereport(item, call): @@ -629,7 +630,7 @@ If we run this: $ pytest -rx =========================== test session starts ============================ - platform linux -- Python 3.x.y, pytest-8.x.y, pluggy-1.x.y + platform linux -- Python 3.x.y, pytest-9.x.y, pluggy-1.x.y rootdir: /home/sweet/project collected 4 items @@ -646,7 +647,7 @@ If we run this: test_step.py:11: AssertionError ========================= short test summary info ========================== - XFAIL test_step.py::TestUserHandling::test_deletion - reason: previous test failed (test_modification) + XFAIL test_step.py::TestUserHandling::test_deletion - previous test failed (test_modification) ================== 1 failed, 2 passed, 1 xfailed in 0.12s ================== We'll see that ``test_deletion`` was not executed because ``test_modification`` @@ -711,7 +712,7 @@ We can run this: $ pytest =========================== test session starts ============================ - platform linux -- Python 3.x.y, pytest-8.x.y, pluggy-1.x.y + platform linux -- Python 3.x.y, pytest-9.x.y, pluggy-1.x.y rootdir: /home/sweet/project collected 7 items @@ -725,7 +726,7 @@ We can run this: file /home/sweet/project/b/test_error.py, line 1 def test_root(db): # no db here, will error out E fixture 'db' not found - > available fixtures: cache, capfd, capfdbinary, caplog, capsys, capsysbinary, doctest_namespace, monkeypatch, pytestconfig, record_property, record_testsuite_property, record_xml_attribute, recwarn, tmp_path, tmp_path_factory, tmpdir, tmpdir_factory + > available fixtures: cache, capfd, capfdbinary, caplog, capsys, capsysbinary, capteesys, doctest_namespace, monkeypatch, pytestconfig, record_property, record_testsuite_property, record_xml_attribute, recwarn, subtests, tmp_path, tmp_path_factory, tmpdir, tmpdir_factory > use 'pytest --fixtures [testpath]' for help on them. /home/sweet/project/b/test_error.py:1 @@ -736,6 +737,7 @@ We can run this: def test_a1(db): > assert 0, db # to show value + ^^^^^^^^^^^^ E AssertionError: E assert 0 @@ -746,6 +748,7 @@ We can run this: def test_a2(db): > assert 0, db # to show value + ^^^^^^^^^^^^ E AssertionError: E assert 0 @@ -770,7 +773,7 @@ The two test modules in the ``a`` directory see the same ``db`` fixture instance while the one test in the sister-directory ``b`` doesn't see it. We could of course also define a ``db`` fixture in that sister directory's ``conftest.py`` file. Note that each fixture is only instantiated if there is a test actually needing -it (unless you use "autouse" fixture which are always executed ahead of the first test +it (unless you use "autouse" fixtures which are always executed ahead of the first test executing). @@ -831,7 +834,7 @@ and run them: $ pytest test_module.py =========================== test session starts ============================ - platform linux -- Python 3.x.y, pytest-8.x.y, pluggy-1.x.y + platform linux -- Python 3.x.y, pytest-9.x.y, pluggy-1.x.y rootdir: /home/sweet/project collected 2 items @@ -878,11 +881,10 @@ here is a little example implemented via a local plugin: .. code-block:: python # content of conftest.py - from typing import Dict import pytest from pytest import StashKey, CollectReport - phase_report_key = StashKey[Dict[str, CollectReport]]() + phase_report_key = StashKey[dict[str, CollectReport]]() @pytest.hookimpl(wrapper=True, tryfirst=True) @@ -942,11 +944,11 @@ and run it: $ pytest -s test_module.py =========================== test session starts ============================ - platform linux -- Python 3.x.y, pytest-8.x.y, pluggy-1.x.y + platform linux -- Python 3.x.y, pytest-9.x.y, pluggy-1.x.y rootdir: /home/sweet/project collected 3 items - test_module.py Esetting up a test failed or skipped test_module.py::test_setup_fails + test_module.py Esetting up a test failed test_module.py::test_setup_fails Fexecuting test failed or skipped test_module.py::test_call_fails F @@ -993,7 +995,7 @@ information. Sometimes a test session might get stuck and there might be no easy way to figure out -which test got stuck, for example if pytest was run in quiet mode (``-q``) or you don't have access to the console +which test got stuck, for example if pytest was run in quiet mode (:option:`-q`) or you don't have access to the console output. This is particularly a problem if the problem happens only sporadically, the famous "flaky" kind of tests. ``pytest`` sets the :envvar:`PYTEST_CURRENT_TEST` environment variable when running tests, which can be inspected diff --git a/doc/en/explanation/ci.rst b/doc/en/explanation/ci.rst index 45fe658d14f..1c03f840b43 100644 --- a/doc/en/explanation/ci.rst +++ b/doc/en/explanation/ci.rst @@ -8,7 +8,7 @@ Rationale The goal of testing in a CI pipeline is different from testing locally. Indeed, you can quickly edit some code and run your tests again on your computer, but -it is not possible with CI pipeline. They run on a separate server and are +it is not possible with CI pipelines. They run on a separate server and are triggered by specific actions. From that observation, pytest can detect when it is in a CI environment and @@ -17,11 +17,10 @@ adapt some of its behaviours. How CI is detected ------------------ -Pytest knows it is in a CI environment when either one of these environment variables are set, -regardless of their value: +Pytest knows it is in a CI environment when either one of these environment variables is set to a non-empty value: -* `CI`: used by many CI systems. -* `BUILD_NUMBER`: used by Jenkins. +* :envvar:`CI`: used by many CI systems. +* :envvar:`BUILD_NUMBER`: used by Jenkins. Effects on CI ------------- @@ -51,7 +50,7 @@ Running this locally, without any extra options, will output: $ pytest test_ci.py ... ========================= short test summary info ========================== - FAILED test_backends.py::test_db_initialized[d2] - Failed: deliberately f... + FAILED test_ci.py::test_db_initialized - Failed: deliberately f... *(Note the truncated text)* @@ -64,7 +63,7 @@ While running this on CI will output: $ pytest test_ci.py ... ========================= short test summary info ========================== - FAILED test_backends.py::test_db_initialized[d2] - Failed: deliberately failing + FAILED test_ci.py::test_db_initialized - Failed: deliberately failing for demo purpose, Lorem ipsum dolor sit amet, consectetur adipiscing elit. Cras facilisis, massa in suscipit dignissim, mauris lacus molestie nisi, quis varius metus nulla ut ipsum. diff --git a/doc/en/explanation/flaky.rst b/doc/en/explanation/flaky.rst index 8369e1d9311..918d6f10b36 100644 --- a/doc/en/explanation/flaky.rst +++ b/doc/en/explanation/flaky.rst @@ -42,7 +42,7 @@ It is of course possible (and common) for tests and fixtures to spawn threads th * Make sure to eventually wait on any spawned threads -- for example at the end of a test, or during the teardown of a fixture. * Avoid using primitives provided by pytest (:func:`pytest.warns`, :func:`pytest.raises`, etc) from multiple threads, as they are not thread-safe. -If your test suite uses threads and your are seeing flaky test results, do not discount the possibility that the test is implicitly using global state in pytest itself. +If your test suite uses threads and you are seeing flaky test results, do not discount the possibility that the test is implicitly using global state in pytest itself. Related features ^^^^^^^^^^^^^^^^ diff --git a/doc/en/explanation/goodpractices.rst b/doc/en/explanation/goodpractices.rst index 51c0b960aed..52474d148c6 100644 --- a/doc/en/explanation/goodpractices.rst +++ b/doc/en/explanation/goodpractices.rst @@ -94,14 +94,13 @@ This has the following benefits: For new projects, we recommend to use ``importlib`` :ref:`import mode ` (see which-import-mode_ for a detailed explanation). -To this end, add the following to your ``pyproject.toml``: +To this end, add the following to your configuration file: .. code-block:: toml - [tool.pytest.ini_options] - addopts = [ - "--import-mode=importlib", - ] + # content of pytest.toml + [pytest] + addopts = ["--import-mode=importlib"] .. _src-layout: @@ -126,22 +125,36 @@ which are better explained in this excellent `blog post`_ by Ionel Cristian Măr PYTHONPATH=src pytest or in a permanent manner by using the :confval:`pythonpath` configuration variable and adding the - following to your ``pyproject.toml``: + following to your configuration file: - .. code-block:: toml + .. tab:: toml + + .. code-block:: toml + + [pytest] + pythonpath = ["src"] + + .. tab:: ini + + .. code-block:: ini - [tool.pytest.ini_options] - pythonpath = "src" + [pytest] + pythonpath = src .. note:: - If you do not use an editable install and not use the ``src`` layout (``mypkg`` directly in the root + If you do not use an editable install and do not use the ``src`` layout (``mypkg`` directly in the root directory) you can rely on the fact that Python by default puts the current directory in ``sys.path`` to import your package and run ``python -m pytest`` to execute the tests against the local copy directly. See :ref:`pytest vs python -m pytest` for more information about the difference between calling ``pytest`` and ``python -m pytest``. +.. seealso:: + + :doc:`packaging:discussions/src-layout-vs-flat-layout` + The Python Packaging User Guide discusses the trade-offs between the ``src`` layout and ``flat`` layout. + Tests as part of application code ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ @@ -162,7 +175,7 @@ want to distribute them along with your application: test_view.py ... -In this scheme, it is easy to run your tests using the ``--pyargs`` option: +In this scheme, it is easy to run your tests using the :option:`--pyargs` option: .. code-block:: bash @@ -209,7 +222,7 @@ Note that this layout also works in conjunction with the ``src`` layout mentione from each other and thus deriving a canonical import name helps to avoid surprises such as a test module getting imported twice. - With ``--import-mode=importlib`` things are less convoluted because + With :option:`--import-mode=importlib` things are less convoluted because pytest doesn't need to change ``sys.path``, making things much less surprising. @@ -313,3 +326,75 @@ A list of the lints detected by flake8-pytest-style can be found on its `PyPI pa .. note:: flake8-pytest-style is not an official pytest project. Some of the rules enforce certain style choices, such as using `@pytest.fixture()` over `@pytest.fixture`, but you can configure the plugin to fit your preferred style. + +.. _`strict mode`: + +Using pytest's strict mode +-------------------------- + +.. versionadded:: 9.0 + +Pytest contains a set of configuration options that make it more strict. +The options are off by default for compatibility or other reasons, +but you should enable them if you can. + +You can enable all of the strictness options at once by setting the :confval:`strict` configuration option: + +.. tab:: toml + + .. code-block:: toml + + [pytest] + strict = true + +.. tab:: ini + + .. code-block:: ini + + [pytest] + strict = true + +See the :confval:`strict` documentation for the options it enables and their effect. + +If pytest adds new strictness options in the future, they will also be enabled in strict mode. +Therefore, you should only enable strict mode if you use a pinned/locked version of pytest, +or if you want to proactively adopt new strictness options as they are added. +If you don't want to automatically pick up new options, you can enable options individually: + +.. tab:: toml + + .. code-block:: toml + + [pytest] + strict_config = true + strict_markers = true + strict_parametrization_ids = true + strict_xfail = true + +.. tab:: ini + + .. code-block:: ini + + [pytest] + strict_config = true + strict_markers = true + strict_parametrization_ids = true + strict_xfail = true + +If you want to use strict mode but are having trouble with a specific option, you can turn it off individually: + +.. tab:: toml + + .. code-block:: toml + + [pytest] + strict = true + strict_parametrization_ids = false + +.. tab:: ini + + .. code-block:: ini + + [pytest] + strict = true + strict_parametrization_ids = false diff --git a/doc/en/explanation/pythonpath.rst b/doc/en/explanation/pythonpath.rst index e68f455cedf..cb3ae67216a 100644 --- a/doc/en/explanation/pythonpath.rst +++ b/doc/en/explanation/pythonpath.rst @@ -11,7 +11,7 @@ Import modes pytest as a testing framework needs to import test modules and ``conftest.py`` files for execution. Importing files in Python is a non-trivial process, so aspects of the -import process can be controlled through the ``--import-mode`` command-line flag, which can assume +import process can be controlled through the :option:`--import-mode` command-line flag, which can assume these values: .. _`import-mode-prepend`: @@ -44,12 +44,12 @@ these values: pkg_under_test/ the tests will run against the installed version - of ``pkg_under_test`` when ``--import-mode=append`` is used whereas + of ``pkg_under_test`` when :option:`--import-mode=append` is used whereas with ``prepend``, they would pick up the local version. This kind of confusion is why we advocate for using :ref:`src-layouts `. Same as ``prepend``, requires test module names to be unique when the test directory tree is - not arranged in packages, because the modules will put in :py:data:`sys.modules` after importing. + not arranged in packages, because the modules will be put in :py:data:`sys.modules` after importing. .. _`import-mode-importlib`: @@ -64,7 +64,7 @@ these values: * Test modules can't import each other. * Testing utility modules in the tests directories (for example a ``tests.helpers`` module containing test-related functions/classes) - are not importable. The recommendation in this case it to place testing utility modules together with the application/library + are not importable. The recommendation in this case is to place testing utility modules together with the application/library code, for example ``app.testing.helpers``. Important: by "test utility modules", we mean functions/classes which are imported by @@ -152,7 +152,7 @@ this case ``foo/``). To load the module, it will insert ``root/`` to the front The same logic applies to the ``conftest.py`` file: it will be imported as ``foo.conftest`` module. Preserving the full package name is important when tests live in a package to avoid problems -and allow test modules to have duplicated names. This is also discussed in details in +and allow test modules to have duplicated names. This is also discussed in detail in :ref:`test discovery`. Standalone test modules / ``conftest.py`` files @@ -182,7 +182,7 @@ with the ``conftest.py`` file by adding ``root/foo`` to :py:data:`sys.path` to i For this reason this layout cannot have test modules with the same name, as they all will be imported in the global import namespace. -This is also discussed in details in :ref:`test discovery`. +This is also discussed in detail in :ref:`test discovery`. .. _`pytest vs python -m pytest`: diff --git a/doc/en/funcarg_compare.rst b/doc/en/funcarg_compare.rst index bc5e7d3c515..7cd4c0f1676 100644 --- a/doc/en/funcarg_compare.rst +++ b/doc/en/funcarg_compare.rst @@ -18,7 +18,7 @@ The pre pytest-2.3 funcarg mechanism calls a factory each time a funcarg for a test function is required. If a factory wants to reuse a resource across different scopes, it often used the ``request.cached_setup()`` helper to manage caching of -resources. Here is a basic example how we could implement +resources. Here is a basic example of how we could implement a per-session Database object: .. code-block:: python @@ -39,10 +39,10 @@ a per-session Database object: There are several limitations and difficulties with this approach: -1. Scoping funcarg resource creation is not straight forward, instead one must +1. Scoping funcarg resource creation is not straightforward, instead one must understand the intricate cached_setup() method mechanics. -2. parametrizing the "db" resource is not straight forward: +2. parametrizing the "db" resource is not straightforward: you need to apply a "parametrize" decorator or implement a :hook:`pytest_generate_tests` hook calling :py:func:`~pytest.Metafunc.parametrize` which @@ -55,7 +55,7 @@ There are several limitations and difficulties with this approach: at the same time, making it hard for them to affect global state of the application under test. -4. there is no way how you can make use of funcarg factories +4. there is no way you can make use of funcarg factories in xUnit setup methods. 5. A non-parametrized fixture function cannot use a parametrized diff --git a/doc/en/getting-started.rst b/doc/en/getting-started.rst index 41469de3864..76a4428c163 100644 --- a/doc/en/getting-started.rst +++ b/doc/en/getting-started.rst @@ -9,8 +9,6 @@ Get Started Install ``pytest`` ---------------------------------------- -``pytest`` requires: Python 3.8+ or PyPy3. - 1. Run the following command in your command line: .. code-block:: bash @@ -22,7 +20,7 @@ Install ``pytest`` .. code-block:: bash $ pytest --version - pytest 8.3.5 + pytest 9.0.3 .. _`simpletest`: @@ -47,7 +45,7 @@ The test $ pytest =========================== test session starts ============================ - platform linux -- Python 3.x.y, pytest-8.x.y, pluggy-1.x.y + platform linux -- Python 3.x.y, pytest-9.x.y, pluggy-1.x.y rootdir: /home/sweet/project collected 1 item @@ -75,7 +73,7 @@ The ``[100%]`` refers to the overall progress of running all test cases. After i Run multiple tests ---------------------------------------------------------- -``pytest`` will run all files of the form test_*.py or \*_test.py in the current directory and its subdirectories. More generally, it follows :ref:`standard test discovery rules `. +``pytest`` will run all files of the form ``test_*.py`` or ``*_test.py`` in the current directory and its subdirectories. More generally, it follows :ref:`standard test discovery rules `. Assert that a certain exception is raised @@ -201,6 +199,26 @@ This is outlined below: Note that attributes added at class level are *class attributes*, so they will be shared between tests. +Compare floating-point values with pytest.approx +-------------------------------------------------------------- + +``pytest`` also provides a number of utilities to make writing tests easier. +For example, you can use :func:`pytest.approx` to compare floating-point +values that may have small rounding errors: + +.. code-block:: python + + # content of test_approx.py + import pytest + + + def test_sum(): + assert (0.1 + 0.2) == pytest.approx(0.3) + +This avoids the need for manual tolerance checks or using +``math.isclose`` and works with scalars, lists, and NumPy arrays. + + Request a unique temporary directory for functional tests -------------------------------------------------------------- @@ -244,7 +262,7 @@ Find out what kind of builtin :ref:`pytest fixtures ` exist with the c pytest --fixtures # shows builtin and custom fixtures -Note that this command omits fixtures with leading ``_`` unless the ``-v`` option is added. +Note that this command omits fixtures with leading ``_`` unless the :option:`-v` option is added. Continue reading ------------------------------------- diff --git a/doc/en/historical-notes.rst b/doc/en/historical-notes.rst index be67036d6ca..d93c7b94793 100644 --- a/doc/en/historical-notes.rst +++ b/doc/en/historical-notes.rst @@ -263,20 +263,24 @@ configuration value which you might have added: @pytest.mark.skipif("not config.getvalue('db')") def test_function(): ... -The equivalent with "boolean conditions" is: +The equivalent with "boolean conditions" using ``request.config`` is: .. code-block:: python - @pytest.mark.skipif(not pytest.config.getvalue("db"), reason="--db was not specified") + @pytest.fixture(autouse=True) + def skip_if_no_db(request): + if not request.config.getoption("--db", default=False): + pytest.skip("--db was not specified") + + def test_function(): pass .. note:: - You cannot use ``pytest.config.getvalue()`` in code - imported before pytest's argument parsing takes place. For example, - ``conftest.py`` files are imported before command line parsing and thus - ``config.getvalue()`` will not execute correctly. + ``pytest.config`` was removed in pytest 5.0. Use ``request.config`` + (via the ``request`` fixture) or the ``pytestconfig`` fixture instead. + See :ref:`pytest.config global deprecated` for details. ``pytest.set_trace()`` ---------------------- @@ -304,7 +308,7 @@ For more details see :ref:`breakpoints`. -Access of ``Module``, ``Function``, ``Class``, ``Instance``, ``File`` and ``Item`` through ``Node`` instances have long +Access of ``Module``, ``Function``, ``Class``, ``Instance``, ``File`` and ``Item`` through ``Node`` instances has long been documented as deprecated, but started to emit warnings from pytest ``3.9`` and onward. Users should just ``import pytest`` and access those objects using the ``pytest`` module. diff --git a/doc/en/how-to/assert.rst b/doc/en/how-to/assert.rst index 6bc8f6fed33..006cf475b02 100644 --- a/doc/en/how-to/assert.rst +++ b/doc/en/how-to/assert.rst @@ -29,7 +29,7 @@ you will see the return value of the function call: $ pytest test_assert1.py =========================== test session starts ============================ - platform linux -- Python 3.x.y, pytest-8.x.y, pluggy-1.x.y + platform linux -- Python 3.x.y, pytest-9.x.y, pluggy-1.x.y rootdir: /home/sweet/project collected 1 item @@ -66,6 +66,33 @@ See :ref:`assert-details` for more information on assertion introspection. .. _`assertraises`: +Assertions about approximate equality +------------------------------------- + +When comparing floating point values (or arrays of floats), small rounding +errors are common. Instead of using ``assert abs(a - b) < tol`` or +``numpy.isclose``, you can use :func:`pytest.approx`: + +.. code-block:: python + + import pytest + import numpy as np + + + def test_floats(): + assert (0.1 + 0.2) == pytest.approx(0.3) + + + def test_arrays(): + a = np.array([1.0, 2.0, 3.0]) + b = np.array([0.9999, 2.0001, 3.0]) + assert a == pytest.approx(b) + +``pytest.approx`` works with scalars, lists, dictionaries, and NumPy arrays. +It also supports comparisons involving NaNs. + +See :func:`pytest.approx` for details. + Assertions about expected exceptions ------------------------------------------ @@ -191,7 +218,7 @@ To specify more details about the contained exception you can use :class:`pytest with pytest.RaisesGroup(pytest.RaisesExc(ValueError, match="foo")): raise ExceptionGroup("", (ValueError("foo"))) -They both supply a method :meth:`pytest.RaisesGroup.matches` :meth:`pytest.RaisesExc.matches` if you want to do matching outside of using it as a contextmanager. This can be helpful when checking ``.__context__`` or ``.__cause__``. +They both supply a method :meth:`pytest.RaisesGroup.matches` :meth:`pytest.RaisesExc.matches` if you want to do matching outside of using it as a :external+python:std:ref:`context manager `. This can be helpful when checking ``.__context__`` or ``.__cause__``. .. code-block:: python @@ -377,7 +404,7 @@ if you run this module: $ pytest test_assert2.py =========================== test session starts ============================ - platform linux -- Python 3.x.y, pytest-8.x.y, pluggy-1.x.y + platform linux -- Python 3.x.y, pytest-9.x.y, pluggy-1.x.y rootdir: /home/sweet/project collected 1 item @@ -409,6 +436,10 @@ Special comparisons are done for a number of cases: * comparing long sequences: first failing indices * comparing dicts: different entries +In string context diffs, lines prefixed with ``-`` come from the left-hand side +of ``assert left == right``, while lines prefixed with ``+`` come from the +right-hand side. + See the :ref:`reporting demo ` for many more examples. Defining your own explanation for failed assertions @@ -476,6 +507,50 @@ the conftest file: FAILED test_foocompare.py::test_compare - assert Comparing Foo instances: 1 failed in 0.12s +.. _`return-not-none`: + +Returning non-None value in test functions +------------------------------------------ + +A :class:`pytest.PytestReturnNotNoneWarning` is emitted when a test function returns a value other than ``None``. + +This helps prevent a common mistake made by beginners who assume that returning a ``bool`` (e.g., ``True`` or ``False``) will determine whether a test passes or fails. + +Example: + +.. code-block:: python + + @pytest.mark.parametrize( + ["a", "b", "result"], + [ + [1, 2, 5], + [2, 3, 8], + [5, 3, 18], + ], + ) + def test_foo(a, b, result): + return foo(a, b) == result # Incorrect usage, do not do this. + +Since pytest ignores return values, it might be surprising that the test will never fail based on the returned value. + +The correct fix is to replace the ``return`` statement with an ``assert``: + +.. code-block:: python + + @pytest.mark.parametrize( + ["a", "b", "result"], + [ + [1, 2, 5], + [2, 3, 8], + [5, 3, 18], + ], + ) + def test_foo(a, b, result): + assert foo(a, b) == result + + + + .. _assert-details: .. _`assert introspection`: @@ -512,7 +587,7 @@ Note that you still get the benefits of assertion introspection, the only change the ``.pyc`` files won't be cached on disk. Additionally, rewriting will silently skip caching if it cannot write new ``.pyc`` files, -i.e. in a read-only filesystem or a zipfile. +e.g. in a read-only filesystem or a zipfile. Disabling assert rewriting @@ -528,4 +603,4 @@ If this is the case you have two options: * Disable rewriting for a specific module by adding the string ``PYTEST_DONT_REWRITE`` to its docstring. -* Disable rewriting for all modules by using ``--assert=plain``. +* Disable rewriting for all modules by using :option:`--assert=plain`. diff --git a/doc/en/how-to/cache.rst b/doc/en/how-to/cache.rst index a3b2a862534..ca345916fc5 100644 --- a/doc/en/how-to/cache.rst +++ b/doc/en/how-to/cache.rst @@ -13,11 +13,11 @@ Usage The plugin provides two command line options to rerun failures from the last ``pytest`` invocation: -* ``--lf``, ``--last-failed`` - to only re-run the failures. -* ``--ff``, ``--failed-first`` - to run the failures first and then the rest of +* :option:`--lf, --last-failed <--lf>` - to only re-run the failures. +* :option:`--ff, --failed-first <--ff>` - to run the failures first and then the rest of the tests. -For cleanup (usually not needed), a ``--cache-clear`` option allows to remove +For cleanup (usually not needed), a :option:`--cache-clear` option allows to remove all cross-session cache contents ahead of a test run. Other plugins may access the `config.cache`_ object to set/get @@ -33,7 +33,7 @@ Other plugins may access the `config.cache`_ object to set/get Rerunning only failures or failures first ----------------------------------------------- -First, let's create 50 test invocation of which only 2 fail: +First, let's create 50 test invocations of which only 2 fail: .. code-block:: python @@ -80,13 +80,13 @@ If you run this for the first time you will see two failures: FAILED test_50.py::test_num[25] - Failed: bad luck 2 failed, 48 passed in 0.12s -If you then run it with ``--lf``: +If you then run it with :option:`--lf`: .. code-block:: pytest $ pytest --lf =========================== test session starts ============================ - platform linux -- Python 3.x.y, pytest-8.x.y, pluggy-1.x.y + platform linux -- Python 3.x.y, pytest-9.x.y, pluggy-1.x.y rootdir: /home/sweet/project collected 2 items run-last-failure: rerun previous 2 failures @@ -124,7 +124,7 @@ If you then run it with ``--lf``: You have run only the two failing tests from the last run, while the 48 passing tests have not been run ("deselected"). -Now, if you run with the ``--ff`` option, all tests will be run but the first +Now, if you run with the :option:`--ff` option, all tests will be run but the first previous failures will be executed first (as can be seen from the series of ``FF`` and dots): @@ -132,7 +132,7 @@ of ``FF`` and dots): $ pytest --ff =========================== test session starts ============================ - platform linux -- Python 3.x.y, pytest-8.x.y, pluggy-1.x.y + platform linux -- Python 3.x.y, pytest-9.x.y, pluggy-1.x.y rootdir: /home/sweet/project collected 50 items run-last-failure: rerun previous 2 failures first @@ -169,14 +169,14 @@ of ``FF`` and dots): .. _`config.cache`: -New ``--nf``, ``--new-first`` options: run new tests first followed by the rest +New :option:`--nf, --new-first <--nf>` option: run new tests first followed by the rest of the tests, in both cases tests are also sorted by the file modified time, with more recent files coming first. Behavior when no tests failed in the last run --------------------------------------------- -The ``--lfnf/--last-failed-no-failures`` option governs the behavior of ``--last-failed``. +The :option:`--lfnf, --last-failed-no-failures <--lfnf>` option governs the behavior of :option:`--last-failed`. Determines whether to execute tests when there are no previously (known) failures or when no cached ``lastfailed`` data was found. @@ -275,13 +275,13 @@ Inspecting Cache content ------------------------ You can always peek at the content of the cache using the -``--cache-show`` command line option: +:option:`--cache-show` command line option: .. code-block:: pytest $ pytest --cache-show =========================== test session starts ============================ - platform linux -- Python 3.x.y, pytest-8.x.y, pluggy-1.x.y + platform linux -- Python 3.x.y, pytest-9.x.y, pluggy-1.x.y rootdir: /home/sweet/project cachedir: /home/sweet/project/.pytest_cache --------------------------- cache values for '*' --------------------------- @@ -289,21 +289,19 @@ You can always peek at the content of the cache using the {'test_caching.py::test_function': True} cache/nodeids contains: ['test_caching.py::test_function'] - cache/stepwise contains: - [] example/value contains: 42 ========================== no tests ran in 0.12s =========================== -``--cache-show`` takes an optional argument to specify a glob pattern for +:option:`--cache-show` takes an optional argument to specify a glob pattern for filtering: .. code-block:: pytest $ pytest --cache-show example/* =========================== test session starts ============================ - platform linux -- Python 3.x.y, pytest-8.x.y, pluggy-1.x.y + platform linux -- Python 3.x.y, pytest-9.x.y, pluggy-1.x.y rootdir: /home/sweet/project cachedir: /home/sweet/project/.pytest_cache ----------------------- cache values for 'example/*' ----------------------- @@ -316,7 +314,7 @@ Clearing Cache content ---------------------- You can instruct pytest to clear all cache files and values -by adding the ``--cache-clear`` option like this: +by adding the :option:`--cache-clear` option like this: .. code-block:: bash @@ -332,4 +330,4 @@ than speed. Stepwise -------- -As an alternative to ``--lf -x``, especially for cases where you expect a large part of the test suite will fail, ``--sw``, ``--stepwise`` allows you to fix them one at a time. The test suite will run until the first failure and then stop. At the next invocation, tests will continue from the last failing test and then run until the next failing test. You may use the ``--stepwise-skip`` option to ignore one failing test and stop the test execution on the second failing test instead. This is useful if you get stuck on a failing test and just want to ignore it until later. Providing ``--stepwise-skip`` will also enable ``--stepwise`` implicitly. +As an alternative to :option:`--lf` :option:`-x`, especially for cases where you expect a large part of the test suite will fail, :option:`--sw, --stepwise <--sw>` allows you to fix them one at a time. The test suite will run until the first failure and then stop. At the next invocation, tests will continue from the last failing test and then run until the next failing test. You may use the :option:`--stepwise-skip` option to ignore one failing test and stop the test execution on the second failing test instead. This is useful if you get stuck on a failing test and just want to ignore it until later. Providing ``--stepwise-skip`` will also enable ``--stepwise`` implicitly. diff --git a/doc/en/how-to/capture-stdout-stderr.rst b/doc/en/how-to/capture-stdout-stderr.rst index e6affd80ea1..5de89bc0e3f 100644 --- a/doc/en/how-to/capture-stdout-stderr.rst +++ b/doc/en/how-to/capture-stdout-stderr.rst @@ -4,26 +4,26 @@ How to capture stdout/stderr output ========================================================= -Pytest intercepts stdout and stderr as configured by the ``--capture=`` +Pytest intercepts stdout and stderr as configured by the :option:`--capture=` command-line argument or by using fixtures. The ``--capture=`` flag configures -reporting, whereas the fixtures offer more granular control and allows +reporting, whereas the fixtures offer more granular control and allow inspection of output during testing. The reports can be customized with the -`-r flag <../reference/reference.html#command-line-flags>`_. +:option:`-r` flag. Default stdout/stderr/stdin capturing behaviour --------------------------------------------------------- During test execution any output sent to ``stdout`` and ``stderr`` is captured. If a test or a setup method fails its according captured -output will usually be shown along with the failure traceback. (this -behavior can be configured by the ``--show-capture`` command-line option). +output will usually be shown along with the failure traceback. (This +behavior can be configured by the :option:`--show-capture` command-line option). In addition, ``stdin`` is set to a "null" object which will fail on attempts to read from it because it is rarely desired to wait for interactive input when running automated tests. By default capturing is done by intercepting writes to low level -file descriptors. This allows to capture output from simple +file descriptors. This allows capturing output from simple print statements as well as output from a subprocess started by a test. @@ -89,7 +89,7 @@ of the failing function and hide the other one: $ pytest =========================== test session starts ============================ - platform linux -- Python 3.x.y, pytest-8.x.y, pluggy-1.x.y + platform linux -- Python 3.x.y, pytest-9.x.y, pluggy-1.x.y rootdir: /home/sweet/project collected 2 items @@ -109,6 +109,8 @@ of the failing function and hide the other one: FAILED test_module.py::test_func2 - assert False ======================= 1 failed, 1 passed in 0.12s ======================== +.. _accessing-captured-output: + Accessing captured output from a test function --------------------------------------------------- @@ -162,3 +164,13 @@ as a context manager, disabling capture inside the ``with`` block: with capsys.disabled(): print("output not captured, going directly to sys.stdout") print("this output is also captured") + +.. note:: + + When a capture fixture such as :fixture:`capsys` or :fixture:`capfd` is used, + it takes precedence over the global capturing configuration set via + command-line options such as ``-s`` or ``--capture=no``. + + This means that output produced within a test using a capture fixture will + still be captured and available via ``readouterr()``, even if global capturing + is disabled. diff --git a/doc/en/how-to/capture-warnings.rst b/doc/en/how-to/capture-warnings.rst index 4b1de6f3704..b0ff6a74892 100644 --- a/doc/en/how-to/capture-warnings.rst +++ b/doc/en/how-to/capture-warnings.rst @@ -28,7 +28,7 @@ Running pytest now produces this output: $ pytest test_show_warnings.py =========================== test session starts ============================ - platform linux -- Python 3.x.y, pytest-8.x.y, pluggy-1.x.y + platform linux -- Python 3.x.y, pytest-9.x.y, pluggy-1.x.y rootdir: /home/sweet/project collected 1 item @@ -66,6 +66,7 @@ as an error: def test_one(): > assert api_v1() == 1 + ^^^^^^^^ test_show_warnings.py:10: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ @@ -79,30 +80,32 @@ as an error: FAILED test_show_warnings.py::test_one - UserWarning: api v1, should use ... 1 failed in 0.12s -The same option can be set in the ``pytest.ini`` or ``pyproject.toml`` file using the -``filterwarnings`` ini option. For example, the configuration below will ignore all +The same option can be set in the configuration file using the +:confval:`filterwarnings` configuration option. For example, the configuration below will ignore all user warnings and specific deprecation warnings matching a regex, but will transform all other warnings into errors. -.. code-block:: ini +.. tab:: toml - # pytest.ini - [pytest] - filterwarnings = - error - ignore::UserWarning - ignore:function ham\(\) is deprecated:DeprecationWarning + .. code-block:: toml -.. code-block:: toml + [pytest] + filterwarnings = [ + "error", + "ignore::UserWarning", + # note the use of single quote below to denote "raw" strings in TOML + 'ignore:function ham\(\) is deprecated:DeprecationWarning', + ] + +.. tab:: ini + + .. code-block:: ini - # pyproject.toml - [tool.pytest.ini_options] - filterwarnings = [ - "error", - "ignore::UserWarning", - # note the use of single quote below to denote "raw" strings in TOML - 'ignore:function ham\(\) is deprecated:DeprecationWarning', - ] + [pytest] + filterwarnings = + error + ignore::UserWarning + ignore:function ham\(\) is deprecated:DeprecationWarning When a warning matches more than one option in the list, the action for the last matching option @@ -111,7 +114,7 @@ is performed. .. note:: - The ``-W`` flag and the ``filterwarnings`` ini option use warning filters that are + The ``-W`` flag and the :confval:`filterwarnings` configuration option use warning filters that are similar in structure, but each configuration option interprets its filter differently. For example, *message* in ``filterwarnings`` is a string containing a regular expression that the start of the warning message must match, @@ -157,6 +160,15 @@ You can specify multiple filters with separate decorators: def test_one(): assert api_v1() == 1 +You can also pass multiple filters to a single mark by providing multiple arguments: + +.. code-block:: python + + # Later arguments take precedence, matching warnings.filterwarnings behavior. + @pytest.mark.filterwarnings("error", "ignore:api v1") + def test_one(): + assert api_v1() == 1 + .. important:: Regarding decorator order and filter precedence: @@ -168,7 +180,7 @@ You can specify multiple filters with separate decorators: Filters applied using a mark take precedence over filters passed on the command line or configured -by the :confval:`filterwarnings` ini option. +by the :confval:`filterwarnings` configuration option. You may apply a filter to all tests of a class by using the :ref:`filterwarnings ` mark as a class decorator or to all tests in a module by setting the :globalvar:`pytestmark` variable: @@ -195,20 +207,29 @@ decorator or to all tests in a module by setting the :globalvar:`pytestmark` var Disabling warnings summary -------------------------- -Although not recommended, you can use the ``--disable-warnings`` command-line option to suppress the +Although not recommended, you can use the :option:`--disable-warnings` command-line option to suppress the warning summary entirely from the test run output. Disabling warning capture entirely ---------------------------------- -This plugin is enabled by default but can be disabled entirely in your ``pytest.ini`` file with: +This plugin is enabled by default but can be disabled entirely in your configuration file with: + +.. tab:: toml + + .. code-block:: toml + + [pytest] + addopts = ["-p", "no:warnings"] + +.. tab:: ini .. code-block:: ini [pytest] addopts = -p no:warnings -Or passing ``-p no:warnings`` in the command-line. This might be useful if your test suites handles warnings +Or passing ``-p no:warnings`` in the command-line. This might be useful if your test suite handles warnings using an external system. @@ -226,16 +247,27 @@ However, in the specific case where users capture any type of warnings in their no warning will be displayed at all. Sometimes it is useful to hide some specific deprecation warnings that happen in code that you have no control over -(such as third-party libraries), in which case you might use the warning filters options (ini or marks) to ignore +(such as third-party libraries), in which case you might use the warning filters options (configuration or marks) to ignore those warnings. For example: -.. code-block:: ini +.. tab:: toml + + .. code-block:: toml - [pytest] - filterwarnings = - ignore:.*U.*mode is deprecated:DeprecationWarning + [pytest] + filterwarnings = [ + 'ignore:.*U.*mode is deprecated:DeprecationWarning', + ] + +.. tab:: ini + + .. code-block:: ini + + [pytest] + filterwarnings = + ignore:.*U.*mode is deprecated:DeprecationWarning This will ignore all warnings of type ``DeprecationWarning`` where the start of the message matches @@ -263,8 +295,8 @@ Ensuring code triggers a deprecation warning -------------------------------------------- You can also use :func:`pytest.deprecated_call` for checking -that a certain function call triggers a ``DeprecationWarning`` or -``PendingDeprecationWarning``: +that a certain function call triggers a ``DeprecationWarning``, ``PendingDeprecationWarning`` or +``FutureWarning``: .. code-block:: python diff --git a/doc/en/how-to/doctest.rst b/doc/en/how-to/doctest.rst index c2a6cc8e958..59d1033ed4f 100644 --- a/doc/en/how-to/doctest.rst +++ b/doc/en/how-to/doctest.rst @@ -11,7 +11,7 @@ can change the pattern by issuing: pytest --doctest-glob="*.rst" -on the command line. ``--doctest-glob`` can be given multiple times in the command-line. +on the command line. :option:`--doctest-glob` can be given multiple times in the command-line. If you then have a text file like this: @@ -30,7 +30,7 @@ then you can just invoke ``pytest`` directly: $ pytest =========================== test session starts ============================ - platform linux -- Python 3.x.y, pytest-8.x.y, pluggy-1.x.y + platform linux -- Python 3.x.y, pytest-9.x.y, pluggy-1.x.y rootdir: /home/sweet/project collected 1 item @@ -39,7 +39,7 @@ then you can just invoke ``pytest`` directly: ============================ 1 passed in 0.12s ============================= By default, pytest will collect ``test*.txt`` files looking for doctest directives, but you -can pass additional globs using the ``--doctest-glob`` option (multi-allowed). +can pass additional globs using the :option:`--doctest-glob` option (multi-allowed). In addition to text files, you can also execute doctests directly from docstrings of your classes and functions, including from test modules: @@ -58,7 +58,7 @@ and functions, including from test modules: $ pytest --doctest-modules =========================== test session starts ============================ - platform linux -- Python 3.x.y, pytest-8.x.y, pluggy-1.x.y + platform linux -- Python 3.x.y, pytest-9.x.y, pluggy-1.x.y rootdir: /home/sweet/project collected 2 items @@ -68,27 +68,34 @@ and functions, including from test modules: ============================ 2 passed in 0.12s ============================= You can make these changes permanent in your project by -putting them into a pytest.ini file like this: +putting them into a configuration file like this: -.. code-block:: ini +.. code-block:: toml - # content of pytest.ini + # content of pytest.toml [pytest] - addopts = --doctest-modules - + addopts = ["--doctest-modules"] Encoding -------- The default encoding is **UTF-8**, but you can specify the encoding that will be used for those doctest files using the -``doctest_encoding`` ini option: +:confval:`doctest_encoding` configuration option: -.. code-block:: ini +.. tab:: toml - # content of pytest.ini - [pytest] - doctest_encoding = latin1 + .. code-block:: toml + + [pytest] + doctest_encoding = "latin1" + +.. tab:: ini + + .. code-block:: ini + + [pytest] + doctest_encoding = latin1 .. _using doctest options: @@ -102,10 +109,19 @@ configuration file. For example, to make pytest ignore trailing whitespaces and ignore lengthy exception stack traces you can just write: -.. code-block:: ini +.. tab:: toml - [pytest] - doctest_optionflags = NORMALIZE_WHITESPACE IGNORE_EXCEPTION_DETAIL + .. code-block:: toml + + [pytest] + doctest_optionflags = ["NORMALIZE_WHITESPACE", "IGNORE_EXCEPTION_DETAIL"] + +.. tab:: ini + + .. code-block:: ini + + [pytest] + doctest_optionflags = NORMALIZE_WHITESPACE IGNORE_EXCEPTION_DETAIL Alternatively, options can be enabled by an inline comment in the doc test itself: @@ -169,7 +185,7 @@ Output format ------------- You can change the diff output format on failure for your doctests -by using one of standard doctest modules format in options +by using one of the standard doctest module's format options (see :data:`python:doctest.REPORT_UDIFF`, :data:`python:doctest.REPORT_CDIFF`, :data:`python:doctest.REPORT_NDIFF`, :data:`python:doctest.REPORT_ONLY_FIRST_FAILURE`): @@ -307,7 +323,7 @@ While the built-in pytest support provides a good set of functionalities for usi doctests, if you use them extensively you might be interested in those external packages which add many more features, and include pytest integration: -* `pytest-doctestplus `__: provides +* `pytest-doctestplus `__: provides advanced doctest support and enables the testing of reStructuredText (".rst") files. * `Sybil `__: provides a way to test examples in diff --git a/doc/en/how-to/failures.rst b/doc/en/how-to/failures.rst index b3d0c155b48..878c869d525 100644 --- a/doc/en/how-to/failures.rst +++ b/doc/en/how-to/failures.rst @@ -93,8 +93,8 @@ Pytest supports the use of ``breakpoint()`` with the following behaviours: - When ``breakpoint()`` is called and ``PYTHONBREAKPOINT`` is set to the default value, pytest will use the custom internal PDB trace UI instead of the system default ``Pdb``. - When tests are complete, the system will default back to the system ``Pdb`` trace UI. - - With ``--pdb`` passed to pytest, the custom internal Pdb trace UI is used with both ``breakpoint()`` and failed tests/unhandled exceptions. - - ``--pdbcls`` can be used to specify a custom debugger class. + - With :option:`--pdb` passed to pytest, the custom internal Pdb trace UI is used with both ``breakpoint()`` and failed tests/unhandled exceptions. + - :option:`--pdbcls` can be used to specify a custom debugger class. .. _faulthandler: @@ -112,7 +112,7 @@ on the command-line. Also the :confval:`faulthandler_timeout=X` configuration option can be used to dump the traceback of all threads if a test takes longer than ``X`` -seconds to finish (not available on Windows). +seconds to finish. .. note:: diff --git a/doc/en/how-to/fixtures.rst b/doc/en/how-to/fixtures.rst index 8f84e4867a6..2f554fb8c60 100644 --- a/doc/en/how-to/fixtures.rst +++ b/doc/en/how-to/fixtures.rst @@ -433,7 +433,7 @@ marked ``smtp_connection`` fixture function. Running the test looks like this: $ pytest test_module.py =========================== test session starts ============================ - platform linux -- Python 3.x.y, pytest-8.x.y, pluggy-1.x.y + platform linux -- Python 3.x.y, pytest-9.x.y, pluggy-1.x.y rootdir: /home/sweet/project collected 2 items @@ -449,6 +449,7 @@ marked ``smtp_connection`` fixture function. Running the test looks like this: assert response == 250 assert b"smtp.gmail.com" in msg > assert 0 # for demo purposes + ^^^^^^^^ E assert 0 test_module.py:7: AssertionError @@ -460,6 +461,7 @@ marked ``smtp_connection`` fixture function. Running the test looks like this: response, msg = smtp_connection.noop() assert response == 250 > assert 0 # for demo purposes + ^^^^^^^^ E assert 0 test_module.py:13: AssertionError @@ -469,7 +471,7 @@ marked ``smtp_connection`` fixture function. Running the test looks like this: ============================ 2 failed in 0.12s ============================= You see the two ``assert 0`` failing and more importantly you can also see -that the **exactly same** ``smtp_connection`` object was passed into the +that the **exact same** ``smtp_connection`` object was passed into the two test functions because pytest shows the incoming argument values in the traceback. As a result, the two test functions using ``smtp_connection`` run as quick as a single one because they reuse the same instance. @@ -771,7 +773,7 @@ For yield fixtures, the first teardown code to run is from the right-most fixtur $ pytest -s test_finalizers.py =========================== test session starts ============================ - platform linux -- Python 3.x.y, pytest-8.x.y, pluggy-1.x.y + platform linux -- Python 3.x.y, pytest-9.x.y, pluggy-1.x.y rootdir: /home/sweet/project collected 1 item @@ -805,7 +807,7 @@ For finalizers, the first fixture to run is last call to `request.addfinalizer`. $ pytest -s test_finalizers.py =========================== test session starts ============================ - platform linux -- Python 3.x.y, pytest-8.x.y, pluggy-1.x.y + platform linux -- Python 3.x.y, pytest-9.x.y, pluggy-1.x.y rootdir: /home/sweet/project collected 1 item @@ -1308,6 +1310,7 @@ So let's just do another run: assert response == 250 assert b"smtp.gmail.com" in msg > assert 0 # for demo purposes + ^^^^^^^^ E assert 0 test_module.py:7: AssertionError @@ -1319,6 +1322,7 @@ So let's just do another run: response, msg = smtp_connection.noop() assert response == 250 > assert 0 # for demo purposes + ^^^^^^^^ E assert 0 test_module.py:13: AssertionError @@ -1343,6 +1347,7 @@ So let's just do another run: response, msg = smtp_connection.noop() assert response == 250 > assert 0 # for demo purposes + ^^^^^^^^ E assert 0 test_module.py:13: AssertionError @@ -1363,9 +1368,9 @@ different server string is expected than what arrived. pytest will build a string that is the test ID for each fixture value in a parametrized fixture, e.g. ``test_ehlo[smtp.gmail.com]`` and ``test_ehlo[mail.python.org]`` in the above examples. These IDs can -be used with ``-k`` to select specific cases to run, and they will +be used with :option:`-k` to select specific cases to run, and they will also identify the specific case when one is failing. Running pytest -with ``--collect-only`` will show the generated IDs. +with :option:`--collect-only` will show the generated IDs. Numbers, strings, booleans and ``None`` will have their usual string representation used in the test ID. For other objects, pytest will @@ -1414,11 +1419,11 @@ Running the above tests results in the following test IDs being used: $ pytest --collect-only =========================== test session starts ============================ - platform linux -- Python 3.x.y, pytest-8.x.y, pluggy-1.x.y + platform linux -- Python 3.x.y, pytest-9.x.y, pluggy-1.x.y rootdir: /home/sweet/project collected 12 items - + @@ -1469,7 +1474,7 @@ Running this test will *skip* the invocation of ``data_set`` with value ``2``: $ pytest test_fixture_marks.py -v =========================== test session starts ============================ - platform linux -- Python 3.x.y, pytest-8.x.y, pluggy-1.x.y -- $PYTHON_PREFIX/bin/python + platform linux -- Python 3.x.y, pytest-9.x.y, pluggy-1.x.y -- $PYTHON_PREFIX/bin/python cachedir: .pytest_cache rootdir: /home/sweet/project collecting ... collected 3 items @@ -1519,7 +1524,7 @@ Here we declare an ``app`` fixture which receives the previously defined $ pytest -v test_appsetup.py =========================== test session starts ============================ - platform linux -- Python 3.x.y, pytest-8.x.y, pluggy-1.x.y -- $PYTHON_PREFIX/bin/python + platform linux -- Python 3.x.y, pytest-9.x.y, pluggy-1.x.y -- $PYTHON_PREFIX/bin/python cachedir: .pytest_cache rootdir: /home/sweet/project collecting ... collected 2 items @@ -1599,7 +1604,7 @@ Let's run the tests in verbose mode and with looking at the print-output: $ pytest -v -s test_module.py =========================== test session starts ============================ - platform linux -- Python 3.x.y, pytest-8.x.y, pluggy-1.x.y -- $PYTHON_PREFIX/bin/python + platform linux -- Python 3.x.y, pytest-9.x.y, pluggy-1.x.y -- $PYTHON_PREFIX/bin/python cachedir: .pytest_cache rootdir: /home/sweet/project collecting ... collected 8 items @@ -1640,7 +1645,7 @@ Let's run the tests in verbose mode and with looking at the print-output: ============================ 8 passed in 0.12s ============================= You can see that the parametrized module-scoped ``modarg`` resource caused an -ordering of test execution that lead to the fewest possible "active" resources. +ordering of test execution that led to the fewest possible "active" resources. The finalizer for the ``mod1`` parametrized resource was executed before the ``mod2`` resource was setup. @@ -1649,7 +1654,7 @@ Then test_1 is executed with ``mod1``, then test_2 with ``mod1``, then test_1 with ``mod2`` and finally test_2 with ``mod2``. The ``otherarg`` parametrized resource (having function scope) was set up before -and teared down after every test that used it. +and torn down after every test that used it. .. _`usefixtures`: @@ -1731,14 +1736,13 @@ and you may specify fixture usage at the test module level using :globalvar:`pyt It is also possible to put fixtures required by all tests in your project -into an ini-file: +into a configuration file: -.. code-block:: ini +.. code-block:: toml - # content of pytest.ini + # content of pytest.toml [pytest] - usefixtures = cleandir - + usefixtures = ["cleandir"] .. warning:: @@ -1758,8 +1762,8 @@ into an ini-file: Overriding fixtures on various levels ------------------------------------- -In relatively large test suite, you most likely need to ``override`` a ``global`` or ``root`` fixture with a ``locally`` -defined one, keeping the test code readable and maintainable. +In a relatively large test suite, you may want to *override* a fixture, to augment +or change its behavior inside of certain test modules or directories. Override a fixture on a folder (conftest) level ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ @@ -1796,7 +1800,7 @@ Given the tests file structure is: def test_username(username): assert username == 'overridden-username' -As you can see, a fixture with the same name can be overridden for certain test folder level. +As you can see, a fixture with the same name can be overridden for a certain test directory level. Note that the ``base`` or ``super`` fixture can be accessed from the ``overriding`` fixture easily - used in the example above. @@ -1838,7 +1842,7 @@ Given the tests file structure is: def test_username(username): assert username == 'overridden-else-username' -In the example above, a fixture with the same name can be overridden for certain test module. +In the example above, a fixture with the same name can be overridden for a certain test module. Override a fixture with direct test parametrization diff --git a/doc/en/how-to/index.rst b/doc/en/how-to/index.rst index 225f289651e..9796f1f8090 100644 --- a/doc/en/how-to/index.rst +++ b/doc/en/how-to/index.rst @@ -16,6 +16,7 @@ Core pytest functionality fixtures mark parametrize + subtests tmp_path monkeypatch doctest diff --git a/doc/en/how-to/logging.rst b/doc/en/how-to/logging.rst index 300e9f6e6c2..25b4e9017e2 100644 --- a/doc/en/how-to/logging.rst +++ b/doc/en/how-to/logging.rst @@ -47,15 +47,25 @@ Shows failed tests like so: text going to stderr ==================== 2 failed in 0.02 seconds ===================== -These options can also be customized through ``pytest.ini`` file: +These options can also be customized through a configuration file: -.. code-block:: ini +.. tab:: toml - [pytest] - log_format = %(asctime)s %(levelname)s %(message)s - log_date_format = %Y-%m-%d %H:%M:%S + .. code-block:: toml -Specific loggers can be disabled via ``--log-disable={logger_name}``. + [pytest] + log_format = "%(asctime)s %(levelname)s %(message)s" + log_date_format = "%Y-%m-%d %H:%M:%S" + +.. tab:: ini + + .. code-block:: ini + + [pytest] + log_format = %(asctime)s %(levelname)s %(message)s + log_date_format = %Y-%m-%d %H:%M:%S + +Specific loggers can be disabled via :option:`--log-disable={logger_name}`. This argument can be passed multiple times: .. code-block:: bash @@ -189,48 +199,48 @@ By setting the :confval:`log_cli` configuration option to ``true``, pytest will logging records as they are emitted directly into the console. You can specify the logging level for which log records with equal or higher -level are printed to the console by passing ``--log-cli-level``. This setting +level are printed to the console by passing :option:`--log-cli-level`. This setting accepts the logging level names or numeric values as seen in :ref:`logging's documentation `. -Additionally, you can also specify ``--log-cli-format`` and -``--log-cli-date-format`` which mirror and default to ``--log-format`` and -``--log-date-format`` if not provided, but are applied only to the console +Additionally, you can also specify :option:`--log-cli-format` and +:option:`--log-cli-date-format` which mirror and default to :option:`--log-format` and +:option:`--log-date-format` if not provided, but are applied only to the console logging handler. -All of the CLI log options can also be set in the configuration INI file. The +All of the CLI log options can also be set in the configuration file. The option names are: -* ``log_cli_level`` -* ``log_cli_format`` -* ``log_cli_date_format`` +* :confval:`log_cli_level` +* :confval:`log_cli_format` +* :confval:`log_cli_date_format` If you need to record the whole test suite logging calls to a file, you can pass -``--log-file=/path/to/log/file``. This log file is opened in write mode by default which -means that it will be overwritten at each run tests session. -If you'd like the file opened in append mode instead, then you can pass ``--log-file-mode=a``. +:option:`--log-file=/path/to/log/file`. This log file is opened in write mode by default, which +means that it will be overwritten at each test session. +If you'd like the file opened in append mode instead, then you can pass :option:`--log-file-mode=a`. Note that relative paths for the log-file location, whether passed on the CLI or declared in a config file, are always resolved relative to the current working directory. You can also specify the logging level for the log file by passing -``--log-file-level``. This setting accepts the logging level names or numeric +:option:`--log-file-level`. This setting accepts the logging level names or numeric values as seen in :ref:`logging's documentation `. -Additionally, you can also specify ``--log-file-format`` and -``--log-file-date-format`` which are equal to ``--log-format`` and -``--log-date-format`` but are applied to the log file logging handler. +Additionally, you can also specify :option:`--log-file-format` and +:option:`--log-file-date-format` which are equal to ``--log-format`` and +:option:`--log-date-format` but are applied to the log file logging handler. -All of the log file options can also be set in the configuration INI file. The +All of the log file options can also be set in the configuration file. The option names are: -* ``log_file`` -* ``log_file_mode`` -* ``log_file_level`` -* ``log_file_format`` -* ``log_file_date_format`` +* :confval:`log_file` +* :confval:`log_file_mode` +* :confval:`log_file_level` +* :confval:`log_file_format` +* :confval:`log_file_date_format` You can call ``set_log_path()`` to customize the log_file path dynamically. This functionality -is considered **experimental**. Note that ``set_log_path()`` respects the ``log_file_mode`` option. +is considered **experimental**. Note that ``set_log_path()`` respects the :confval:`log_file_mode` option. .. _log_colors: @@ -266,12 +276,21 @@ This feature was introduced as a drop-in replacement for the with each other. The backward compatibility API with ``pytest-capturelog`` has been dropped when this feature was introduced, so if for that reason you still need ``pytest-catchlog`` you can disable the internal feature by -adding to your ``pytest.ini``: +adding to your configuration file: + +.. tab:: toml + + .. code-block:: toml -.. code-block:: ini + [pytest] + addopts = ["-p", "no:logging"] - [pytest] - addopts=-p no:logging +.. tab:: ini + + .. code-block:: ini + + [pytest] + addopts = -p no:logging .. _log_changes_3_4: @@ -283,23 +302,33 @@ This feature was introduced in ``3.3`` and some **incompatible changes** have be made in ``3.4`` after community feedback: * Log levels are no longer changed unless explicitly requested by the :confval:`log_level` configuration - or ``--log-level`` command-line options. This allows users to configure logger objects themselves. + or :option:`--log-level` command-line options. This allows users to configure logger objects themselves. Setting :confval:`log_level` will set the level that is captured globally so if a specific test requires a lower level than this, use the ``caplog.set_level()`` functionality otherwise that test will be prone to failure. * :ref:`Live Logs ` is now disabled by default and can be enabled setting the :confval:`log_cli` configuration option to ``true``. When enabled, the verbosity is increased so logging for each test is visible. -* :ref:`Live Logs ` are now sent to ``sys.stdout`` and no longer require the ``-s`` command-line option +* :ref:`Live Logs ` are now sent to ``sys.stdout`` and no longer require the :option:`-s` command-line option to work. -If you want to partially restore the logging behavior of version ``3.3``, you can add this options to your ``ini`` +If you want to partially restore the logging behavior of version ``3.3``, you can add these options to your configuration file: -.. code-block:: ini +.. tab:: toml + + .. code-block:: toml + + [pytest] + log_cli = true + log_level = "NOTSET" + +.. tab:: ini + + .. code-block:: ini - [pytest] - log_cli=true - log_level=NOTSET + [pytest] + log_cli = true + log_level = NOTSET -More details about the discussion that lead to this changes can be read in :issue:`3013`. +More details about the discussion that led to these changes can be read in :issue:`3013`. diff --git a/doc/en/how-to/mark.rst b/doc/en/how-to/mark.rst index 33f9d18bfe3..e22219414a0 100644 --- a/doc/en/how-to/mark.rst +++ b/doc/en/how-to/mark.rst @@ -21,7 +21,7 @@ Here are some of the builtin markers: It's easy to create custom markers or to apply markers to whole test classes or modules. Those markers can be used by plugins, and also -are commonly used to :ref:`select tests ` on the command-line with the ``-m`` option. +are commonly used to :ref:`select tests ` on the command-line with the :option:`-m` option. See :ref:`mark examples` for examples which also serve as documentation. @@ -34,24 +34,26 @@ See :ref:`mark examples` for examples which also serve as documentation. Registering marks ----------------- -You can register custom marks in your ``pytest.ini`` file like this: +You can register custom marks in your configuration file like this: -.. code-block:: ini +.. tab:: toml - [pytest] - markers = - slow: marks tests as slow (deselect with '-m "not slow"') - serial + .. code-block:: toml -or in your ``pyproject.toml`` file like this: + [pytest] + markers = [ + "slow: marks tests as slow (deselect with '-m \"not slow\"')", + "serial", + ] -.. code-block:: toml +.. tab:: ini - [tool.pytest.ini_options] - markers = [ - "slow: marks tests as slow (deselect with '-m \"not slow\"')", - "serial", - ] + .. code-block:: ini + + [pytest] + markers = + slow: marks tests as slow (deselect with '-m "not slow"') + serial Note that everything past the ``:`` after the mark name is an optional description. @@ -77,17 +79,30 @@ Raising errors on unknown marks Unregistered marks applied with the ``@pytest.mark.name_of_the_mark`` decorator will always emit a warning in order to avoid silently doing something surprising due to mistyped names. As described in the previous section, you can disable -the warning for custom marks by registering them in your ``pytest.ini`` file or +the warning for custom marks by registering them in your configuration file or using a custom ``pytest_configure`` hook. -When the ``--strict-markers`` command-line flag is passed, any unknown marks applied +When the :confval:`strict_markers` configuration option is set, any unknown marks applied with the ``@pytest.mark.name_of_the_mark`` decorator will trigger an error. You can -enforce this validation in your project by adding ``--strict-markers`` to ``addopts``: +enforce this validation in your project by setting :confval:`strict_markers` in your configuration: + +.. tab:: toml + + .. code-block:: toml + + [pytest] + addopts = ["--strict-markers"] + markers = [ + "slow: marks tests as slow (deselect with '-m \"not slow\"')", + "serial", + ] + +.. tab:: ini -.. code-block:: ini + .. code-block:: ini - [pytest] - addopts = --strict-markers - markers = - slow: marks tests as slow (deselect with '-m "not slow"') - serial + [pytest] + strict_markers = true + markers = + slow: marks tests as slow (deselect with '-m "not slow"') + serial diff --git a/doc/en/how-to/monkeypatch.rst b/doc/en/how-to/monkeypatch.rst index a9504dcb32a..7442a85c10e 100644 --- a/doc/en/how-to/monkeypatch.rst +++ b/doc/en/how-to/monkeypatch.rst @@ -235,7 +235,7 @@ so that any attempts within tests to create http requests will fail. Be advised that it is not recommended to patch builtin functions such as ``open``, ``compile``, etc., because it might break pytest's internals. If that's - unavoidable, passing ``--tb=native``, ``--assert=plain`` and ``--capture=no`` might + unavoidable, passing :option:`--tb=native`, :option:`--assert=plain` and :option:`--capture=no` might help although there's no guarantee. .. note:: @@ -382,7 +382,7 @@ You can use the :py:meth:`monkeypatch.delitem ` to remove v def test_missing_user(monkeypatch): - # patch the DEFAULT_CONFIG t be missing the 'user' key + # patch the DEFAULT_CONFIG to be missing the 'user' key monkeypatch.delitem(app.DEFAULT_CONFIG, "user", raising=False) # Key error expected because a config is not passed, and the diff --git a/doc/en/how-to/output.rst b/doc/en/how-to/output.rst index cb9276c7ea0..a594fcb3aab 100644 --- a/doc/en/how-to/output.rst +++ b/doc/en/how-to/output.rst @@ -30,8 +30,8 @@ Examples for modifying traceback printing: pytest --tb=native # Python standard library formatting pytest --tb=no # no traceback at all -The ``--full-trace`` causes very long traces to be printed on error (longer -than ``--tb=long``). It also ensures that a stack trace is printed on +The :option:`--full-trace` causes very long traces to be printed on error (longer +than :option:`--tb=long`). It also ensures that a stack trace is printed on **KeyboardInterrupt** (Ctrl+C). This is very useful if the tests are taking too long and you interrupt them with Ctrl+C to find out where the tests are *hanging*. By default no output @@ -52,8 +52,8 @@ Examples for modifying printing verbosity: pytest -vv # more verbose, display more details from the test output pytest -vvv # not a standard , but may be used for even more detail in certain setups -The ``-v`` flag controls the verbosity of pytest output in various aspects: test session progress, assertion -details when tests fail, fixtures details with ``--fixtures``, etc. +The :option:`-v` flag controls the verbosity of pytest output in various aspects: test session progress, assertion +details when tests fail, fixtures details with :option:`--fixtures`, etc. .. regendoc:wipe @@ -372,7 +372,7 @@ test inside the file gets its own line in the output. Producing a detailed summary report -------------------------------------------------- -The ``-r`` flag can be used to display a "short test summary info" at the end of the test session, +The :option:`-r` flag can be used to display a "short test summary info" at the end of the test session, making it easy in large test suites to get a clear picture of all failures, skips, xfails, etc. It defaults to ``fE`` to list failures and errors. @@ -421,7 +421,7 @@ Example: $ pytest -ra =========================== test session starts ============================ - platform linux -- Python 3.x.y, pytest-8.x.y, pluggy-1.x.y + platform linux -- Python 3.x.y, pytest-9.x.y, pluggy-1.x.y rootdir: /home/sweet/project collected 6 items @@ -447,13 +447,13 @@ Example: ================================= XPASSES ================================== ========================= short test summary info ========================== SKIPPED [1] test_example.py:22: skipping this test - XFAIL test_example.py::test_xfail - reason: xfailing this test + XFAIL test_example.py::test_xfail - xfailing this test XPASS test_example.py::test_xpass - always xfail ERROR test_example.py::test_error - assert 0 FAILED test_example.py::test_fail - assert 0 == 1 failed, 1 passed, 1 skipped, 1 xfailed, 1 xpassed, 1 error in 0.12s === -The ``-r`` options accepts a number of characters after it, with ``a`` used +The :option:`-r` options accepts a number of characters after it, with ``a`` used above meaning "all except passes". Here is the full list of available characters that can be used: @@ -478,7 +478,7 @@ More than one character can be used, so for example to only see failed and skipp $ pytest -rfs =========================== test session starts ============================ - platform linux -- Python 3.x.y, pytest-8.x.y, pluggy-1.x.y + platform linux -- Python 3.x.y, pytest-9.x.y, pluggy-1.x.y rootdir: /home/sweet/project collected 6 items @@ -513,7 +513,7 @@ captured output: $ pytest -rpP =========================== test session starts ============================ - platform linux -- Python 3.x.y, pytest-8.x.y, pluggy-1.x.y + platform linux -- Python 3.x.y, pytest-9.x.y, pluggy-1.x.y rootdir: /home/sweet/project collected 6 items @@ -547,7 +547,7 @@ captured output: .. note:: By default, parametrized variants of skipped tests are grouped together if - they share the same skip reason. You can use ``--no-fold-skipped`` to print each skipped test separately. + they share the same skip reason. You can use :option:`--no-fold-skipped` to print each skipped test separately. .. _truncation-params: @@ -558,13 +558,23 @@ Modifying truncation limits .. versionadded: 8.4 Default truncation limits are 8 lines or 640 characters, whichever comes first. -To set custom truncation limits you can use following ``pytest.ini`` file options: +To set custom truncation limits you can use the following configuration file options: -.. code-block:: ini +.. tab:: toml - [pytest] - truncation_limit_lines = 10 - truncation_limit_chars = 90 + .. code-block:: toml + + [pytest] + truncation_limit_lines = 10 + truncation_limit_chars = 90 + +.. tab:: ini + + .. code-block:: ini + + [pytest] + truncation_limit_lines = 10 + truncation_limit_chars = 90 That will cause pytest to truncate the assertions to 10 lines or 90 characters, whichever comes first. @@ -588,10 +598,19 @@ to create an XML file at ``path``. To set the name of the root test suite xml item, you can configure the ``junit_suite_name`` option in your config file: -.. code-block:: ini +.. tab:: toml + + .. code-block:: toml + + [pytest] + junit_suite_name = "my_suite" - [pytest] - junit_suite_name = my_suite +.. tab:: ini + + .. code-block:: ini + + [pytest] + junit_suite_name = my_suite .. versionadded:: 4.0 @@ -602,10 +621,19 @@ should report total test execution times, including setup and teardown It is the default pytest behavior. To report just call durations instead, configure the ``junit_duration_report`` option like this: -.. code-block:: ini +.. tab:: toml + + .. code-block:: toml + + [pytest] + junit_duration_report = "call" + +.. tab:: ini + + .. code-block:: ini - [pytest] - junit_duration_report = call + [pytest] + junit_duration_report = call .. _record_property example: @@ -743,7 +771,7 @@ record_testsuite_property .. versionadded:: 4.5 -If you want to add a properties node at the test-suite level, which may contains properties +If you want to add a properties node at the test-suite level, which may contain properties that are relevant to all tests, you can use the ``record_testsuite_property`` session-scoped fixture: The ``record_testsuite_property`` session-scoped fixture can be used to add properties relevant @@ -794,7 +822,7 @@ Sending test report to an online pastebin service This will submit test run information to a remote Paste service and provide a URL for each failure. You may select tests as usual or add -for example ``-x`` if you only want to send one particular failure. +for example :option:`-x` if you only want to send one particular failure. **Creating a URL for a whole test session log**: diff --git a/doc/en/how-to/parametrize.rst b/doc/en/how-to/parametrize.rst index 5a16684eb96..5de28472705 100644 --- a/doc/en/how-to/parametrize.rst +++ b/doc/en/how-to/parametrize.rst @@ -20,6 +20,11 @@ pytest enables test parametrization at several levels: * `pytest_generate_tests`_ allows one to define custom parametrization schemes or extensions. + +.. note:: + + See :ref:`subtests` for an alternative to parametrization. + .. _parametrizemark: .. _`@pytest.mark.parametrize`: @@ -52,7 +57,7 @@ them in turn: $ pytest =========================== test session starts ============================ - platform linux -- Python 3.x.y, pytest-8.x.y, pluggy-1.x.y + platform linux -- Python 3.x.y, pytest-9.x.y, pluggy-1.x.y rootdir: /home/sweet/project collected 3 items @@ -88,12 +93,21 @@ them in turn: for the parametrization because it has several downsides. If however you would like to use unicode strings in parametrization and see them in the terminal as is (non-escaped), use this option - in your ``pytest.ini``: + in your configuration file: + + .. tab:: toml + + .. code-block:: toml - .. code-block:: ini + [pytest] + disable_test_id_escaping_and_forfeit_all_rights_to_community_support = true - [pytest] - disable_test_id_escaping_and_forfeit_all_rights_to_community_support = True + .. tab:: ini + + .. code-block:: ini + + [pytest] + disable_test_id_escaping_and_forfeit_all_rights_to_community_support = true Keep in mind however that this might cause unwanted side effects and even bugs depending on the OS used and plugins currently installed, @@ -163,7 +177,7 @@ Let's run this: $ pytest =========================== test session starts ============================ - platform linux -- Python 3.x.y, pytest-8.x.y, pluggy-1.x.y + platform linux -- Python 3.x.y, pytest-9.x.y, pluggy-1.x.y rootdir: /home/sweet/project collected 3 items @@ -194,6 +208,7 @@ To get all combinations of multiple parametrized arguments you can stack This will run the test with the arguments set to ``x=0/y=2``, ``x=1/y=2``, ``x=0/y=3``, and ``x=1/y=3`` exhausting parameters in the order of the decorators. + .. _`pytest_generate_tests`: Basic ``pytest_generate_tests`` example @@ -240,6 +255,13 @@ command line option and the parametrization of our test function: if "stringinput" in metafunc.fixturenames: metafunc.parametrize("stringinput", metafunc.config.getoption("stringinput")) +.. note:: + + The :hook:`pytest_generate_tests` hook can also be implemented directly in a test + module or inside a test class; unlike other hooks, pytest will discover it there + as well. Other hooks must live in a :ref:`conftest.py ` or a plugin. + See :ref:`writinghooks`. + If we now pass two stringinput values, our test will run twice: .. code-block:: pytest @@ -281,7 +303,7 @@ list: $ pytest -q -rs test_strings.py s [100%] ========================= short test summary info ========================== - SKIPPED [1] test_strings.py: got empty parameter set ['stringinput'], function test_valid_string at /home/sweet/project/test_strings.py:2 + SKIPPED [1] test_strings.py: got empty parameter set for (stringinput) 1 skipped in 0.12s Note that when calling ``metafunc.parametrize`` multiple times with different parameter sets, all parameter names across diff --git a/doc/en/how-to/plugins.rst b/doc/en/how-to/plugins.rst index fca8ab54e63..c6641eb8484 100644 --- a/doc/en/how-to/plugins.rst +++ b/doc/en/how-to/plugins.rst @@ -120,12 +120,21 @@ This means that any subsequent try to activate/load the named plugin will not work. If you want to unconditionally disable a plugin for a project, you can add -this option to your ``pytest.ini`` file: +this option to your configuration file: -.. code-block:: ini +.. tab:: toml - [pytest] - addopts = -p no:NAME + .. code-block:: toml + + [pytest] + addopts = ["-p", "no:NAME"] + +.. tab:: ini + + .. code-block:: ini + + [pytest] + addopts = -p no:NAME Alternatively to disable it only in certain environments (for example in a CI server), you can set ``PYTEST_ADDOPTS`` environment variable to @@ -139,7 +148,7 @@ Disabling plugins from autoloading ---------------------------------- If you want to disable plugins from loading automatically, instead of requiring you to -manually specify each plugin with ``-p`` or :envvar:`PYTEST_PLUGINS`, you can use ``--disable-plugin-autoload`` or :envvar:`PYTEST_DISABLE_PLUGIN_AUTOLOAD`. +manually specify each plugin with :option:`-p` or :envvar:`PYTEST_PLUGINS`, you can use :option:`--disable-plugin-autoload` or :envvar:`PYTEST_DISABLE_PLUGIN_AUTOLOAD`. .. code-block:: bash @@ -149,13 +158,53 @@ manually specify each plugin with ``-p`` or :envvar:`PYTEST_PLUGINS`, you can us .. code-block:: bash - pytest --disable-plugin-autoload -p NAME,NAME2 + pytest --disable-plugin-autoload -p NAME -p NAME2 + +.. tab:: toml -.. code-block:: ini + .. code-block:: toml - [pytest] - addopts = --disable-plugin-autoload -p NAME,NAME2 + [pytest] + addopts = ["--disable-plugin-autoload", "-p", "NAME", "-p", "NAME2"] + +.. tab:: ini + + .. code-block:: ini + + [pytest] + addopts = + --disable-plugin-autoload + -p NAME + -p NAME2 .. versionadded:: 8.4 - The ``--disable-plugin-autoload`` command-line flag. + The :option:`--disable-plugin-autoload` command-line flag. + +.. note:: + + :option:`-p` and :envvar:`PYTEST_PLUGINS` are both ways to explicitly control which + plugins are loaded, but they serve slightly different use-cases. + + * :option:`-p` loads (or disables with ``-p no:``) a plugin by name or entry point + for a specific pytest invocation, and is processed early during startup. + * :envvar:`PYTEST_PLUGINS` is a comma-separated list of Python modules that are imported + and registered as plugins during startup. This mechanism is commonly used by test + suites, for example when testing a plugin. + + When explicitly controlling plugin loading (especially with + :envvar:`PYTEST_DISABLE_PLUGIN_AUTOLOAD` or :option:`--disable-plugin-autoload`), + avoid specifying the same plugin via multiple mechanisms. Registering the same plugin + more than once can lead to errors during plugin registration. + +Examples: + +.. code-block:: bash + + # Disable auto-loading and load only specific plugins for this invocation + PYTEST_DISABLE_PLUGIN_AUTOLOAD=1 pytest -p xdist + +.. code-block:: bash + + # Disable auto-loading and load plugin modules during startup + PYTEST_DISABLE_PLUGIN_AUTOLOAD=1 PYTEST_PLUGINS=mymodule.plugin,xdist pytest diff --git a/doc/en/how-to/skipping.rst b/doc/en/how-to/skipping.rst index 09a19766f99..488f71b09f9 100644 --- a/doc/en/how-to/skipping.rst +++ b/doc/en/how-to/skipping.rst @@ -21,14 +21,14 @@ it's an **xpass** and will be reported in the test summary. ``pytest`` counts and lists *skip* and *xfail* tests separately. Detailed information about skipped/xfailed tests is not shown by default to avoid -cluttering the output. You can use the ``-r`` option to see details +cluttering the output. You can use the :option:`-r` option to see details corresponding to the "short" letters shown in the test progress: .. code-block:: bash pytest -rxXs # show extra info on xfailed, xpassed, and skipped tests -More details on the ``-r`` option can be found by running ``pytest -h``. +More details on the :option:`-r` option can be found by running ``pytest -h``. (See :ref:`how to change command line options defaults`) @@ -84,14 +84,14 @@ It is also possible to skip the whole module using If you wish to skip something conditionally then you can use ``skipif`` instead. Here is an example of marking a test function to be skipped -when run on an interpreter earlier than Python3.10: +when run on an interpreter earlier than Python3.13: .. code-block:: python import sys - @pytest.mark.skipif(sys.version_info < (3, 10), reason="requires python3.10 or higher") + @pytest.mark.skipif(sys.version_info < (3, 13), reason="requires python3.13 or higher") def test_function(): ... If the condition evaluates to ``True`` during collection, the test function will be skipped, @@ -311,7 +311,7 @@ even executed, use the ``run`` parameter as ``False``: @pytest.mark.xfail(run=False) def test_function(): ... -This is specially useful for xfailing tests that are crashing the interpreter and should be +This is particularly useful for xfailing tests that are crashing the interpreter and should be investigated later. .. _`xfail strict tutorial`: @@ -331,12 +331,21 @@ You can change this by setting the ``strict`` keyword-only parameter to ``True`` This will make ``XPASS`` ("unexpectedly passing") results from this test to fail the test suite. You can change the default value of the ``strict`` parameter using the -``xfail_strict`` ini option: +``strict_xfail`` ini option: -.. code-block:: ini +.. tab:: toml - [pytest] - xfail_strict=true + .. code-block:: toml + + [pytest] + xfail_strict = true + +.. tab:: ini + + .. code-block:: ini + + [pytest] + strict_xfail = true Ignoring xfail diff --git a/doc/en/how-to/subtests.rst b/doc/en/how-to/subtests.rst new file mode 100644 index 00000000000..93b9d052afd --- /dev/null +++ b/doc/en/how-to/subtests.rst @@ -0,0 +1,139 @@ +.. _subtests: + +How to use subtests +=================== + +.. versionadded:: 9.0 + +.. note:: + + This feature is experimental. Its behavior, particularly how failures are reported, may evolve in future releases. However, the core functionality and usage are considered stable. + +pytest allows for grouping assertions within a normal test, known as *subtests*. + +Subtests are an alternative to parametrization, particularly useful when the exact parametrization values are not known at collection time. + + +.. code-block:: python + + # content of test_subtest.py + + + def test(subtests): + for i in range(5): + with subtests.test(msg="custom message", i=i): + assert i % 2 == 0 + +Each assertion failure or error is caught by the context manager and reported individually: + +.. code-block:: pytest + + $ pytest -q test_subtest.py + uuuuuF [100%] + ================================= FAILURES ================================= + _______________________ test [custom message] (i=1) ________________________ + + subtests = <_pytest.subtests.Subtests object at 0xdeadbeef0001> + + def test(subtests): + for i in range(5): + with subtests.test(msg="custom message", i=i): + > assert i % 2 == 0 + E assert (1 % 2) == 0 + + test_subtest.py:6: AssertionError + _______________________ test [custom message] (i=3) ________________________ + + subtests = <_pytest.subtests.Subtests object at 0xdeadbeef0001> + + def test(subtests): + for i in range(5): + with subtests.test(msg="custom message", i=i): + > assert i % 2 == 0 + E assert (3 % 2) == 0 + + test_subtest.py:6: AssertionError + ___________________________________ test ___________________________________ + contains 2 failed subtests + ========================= short test summary info ========================== + SUBFAILED[custom message] (i=1) test_subtest.py::test - assert (1 % 2) == 0 + SUBFAILED[custom message] (i=3) test_subtest.py::test - assert (3 % 2) == 0 + FAILED test_subtest.py::test - contains 2 failed subtests + 3 failed, 3 subtests passed in 0.12s + +In the output above: + +* Subtest failures are reported as ``SUBFAILED``. +* Subtests are reported first and the "top-level" test is reported at the end on its own. + +Note that it is possible to use ``subtests`` multiple times in the same test, or even mix and match with normal assertions +outside the ``subtests.test`` block: + +.. code-block:: python + + def test(subtests): + for i in range(5): + with subtests.test("stage 1", i=i): + assert i % 2 == 0 + + assert func() == 10 + + for i in range(10, 20): + with subtests.test("stage 2", i=i): + assert i % 2 == 0 + +.. note:: + + See :ref:`parametrize` for an alternative to subtests. + + +Verbosity +--------- + +By default, only **subtest failures** are shown. Higher verbosity levels (:option:`-v`) will also show progress output for **passed** subtests. + +It is possible to control the verbosity of subtests by setting :confval:`verbosity_subtests`. + + +Typing +------ + +:class:`pytest.Subtests` is exported so it can be used in type annotations: + +.. code-block:: python + + def test(subtests: pytest.Subtests) -> None: ... + +.. _parametrize_vs_subtests: + +Parametrization vs Subtests +--------------------------- + +While :ref:`traditional pytest parametrization ` and ``subtests`` are similar, they have important differences and use cases. + + +Parametrization +~~~~~~~~~~~~~~~ + +* Happens at collection time. +* Generates individual tests. +* Parametrized tests can be referenced from the command line. +* Plays well with plugins that handle test execution, such as :option:`--last-failed`. +* Ideal for decision table testing. + +Subtests +~~~~~~~~ + +* Happen during test execution. +* Are not known at collection time. +* Can be generated dynamically. +* Cannot be referenced individually from the command line. +* Plugins that handle test execution cannot target individual subtests. +* An assertion failure inside a subtest does not interrupt the test, letting users see all failures in the same report. + + +.. note:: + + This feature was originally implemented as a separate plugin in `pytest-subtests `__, but since ``9.0`` has been merged into the core. + + The core implementation should be compatible with the plugin implementation, except it does not contain custom command-line options to control subtest output. diff --git a/doc/en/how-to/tmp_path.rst b/doc/en/how-to/tmp_path.rst index d19950431e5..e73c55878a6 100644 --- a/doc/en/how-to/tmp_path.rst +++ b/doc/en/how-to/tmp_path.rst @@ -35,7 +35,7 @@ Running this would result in a passed test except for the last $ pytest test_tmp_path.py =========================== test session starts ============================ - platform linux -- Python 3.x.y, pytest-8.x.y, pluggy-1.x.y + platform linux -- Python 3.x.y, pytest-9.x.y, pluggy-1.x.y rootdir: /home/sweet/project collected 1 item @@ -136,9 +136,9 @@ Temporary directory location and retention The temporary directories, as returned by the :fixture:`tmp_path` and (now deprecated) :fixture:`tmpdir` fixtures, are automatically created under a base temporary directory, -in a structure that depends on the ``--basetemp`` option: +in a structure that depends on the :option:`--basetemp` option: -- By default (when the ``--basetemp`` option is not set), +- By default (when the :option:`--basetemp` option is not set), the temporary directories will follow this template: .. code-block:: text @@ -160,7 +160,7 @@ in a structure that depends on the ``--basetemp`` option: but this behavior can be configured with :confval:`tmp_path_retention_count` and :confval:`tmp_path_retention_policy`. -- When the ``--basetemp`` option is used (e.g. ``pytest --basetemp=mydir``), +- When the :option:`--basetemp` option is used (e.g. ``pytest --basetemp=mydir``), it will be used directly as base temporary directory: .. code-block:: text @@ -172,7 +172,7 @@ in a structure that depends on the ``--basetemp`` option: .. warning:: - The directory given to ``--basetemp`` will be cleared blindly before each test run, + The directory given to :option:`--basetemp` will be cleared blindly before each test run, so make sure to use a directory for that purpose only. When distributing tests on the local machine using ``pytest-xdist``, care is taken to diff --git a/doc/en/how-to/unittest.rst b/doc/en/how-to/unittest.rst index 62e32b6d28f..0762e7d4cf8 100644 --- a/doc/en/how-to/unittest.rst +++ b/doc/en/how-to/unittest.rst @@ -22,17 +22,14 @@ their ``test`` methods in ``test_*.py`` or ``*_test.py`` files. Almost all ``unittest`` features are supported: -* ``@unittest.skip`` style decorators; -* ``setUp/tearDown``; -* ``setUpClass/tearDownClass``; -* ``setUpModule/tearDownModule``; +* :func:`unittest.skip`/:func:`unittest.skipIf` style decorators +* :meth:`unittest.TestCase.setUp`/:meth:`unittest.TestCase.tearDown` +* :meth:`unittest.TestCase.setUpClass`/:meth:`unittest.TestCase.tearDownClass` +* :func:`unittest.setUpModule`/:func:`unittest.tearDownModule` +* :meth:`unittest.TestCase.subTest` (since version ``9.0``) -.. _`pytest-subtests`: https://github.com/pytest-dev/pytest-subtests .. _`load_tests protocol`: https://docs.python.org/3/library/unittest.html#load-tests-protocol -Additionally, :ref:`subtests ` are supported by the -`pytest-subtests`_ plugin. - Up to this point pytest does not have support for the following features: * `load_tests protocol`_; @@ -45,7 +42,7 @@ in most cases without having to modify existing code: * Obtain :ref:`more informative tracebacks `; * :ref:`stdout and stderr ` capturing; -* :ref:`Test selection options ` using ``-k`` and ``-m`` flags; +* :ref:`Test selection options ` using :option:`-k` and :option:`-m` flags; * :ref:`maxfail`; * :ref:`--pdb ` command-line option for debugging on test failures (see :ref:`note ` below); @@ -140,7 +137,7 @@ the ``self.db`` values in the traceback: $ pytest test_unittest_db.py =========================== test session starts ============================ - platform linux -- Python 3.x.y, pytest-8.x.y, pluggy-1.x.y + platform linux -- Python 3.x.y, pytest-9.x.y, pluggy-1.x.y rootdir: /home/sweet/project collected 2 items @@ -154,6 +151,7 @@ the ``self.db`` values in the traceback: def test_method1(self): assert hasattr(self, "db") > assert 0, self.db # fail for demo purposes + ^^^^^^^^^^^^^^^^^ E AssertionError: .DummyDB object at 0xdeadbeef0001> E assert 0 @@ -164,6 +162,7 @@ the ``self.db`` values in the traceback: def test_method2(self): > assert 0, self.db # fail for demo purposes + ^^^^^^^^^^^^^^^^^ E AssertionError: .DummyDB object at 0xdeadbeef0001> E assert 0 diff --git a/doc/en/how-to/usage.rst b/doc/en/how-to/usage.rst index 0e0a0310fd8..35b07bfe8c1 100644 --- a/doc/en/how-to/usage.rst +++ b/doc/en/how-to/usage.rst @@ -4,10 +4,10 @@ How to invoke pytest ========================================== -.. seealso:: :ref:`Complete pytest command-line flag reference ` +.. seealso:: :ref:`Complete pytest command-line flags reference ` In general, pytest is invoked with the command ``pytest`` (see below for :ref:`other ways to invoke pytest -`). This will execute all tests in all files whose names follow the form ``test_*.py`` or ``\*_test.py`` +`). This will execute all tests in all files whose names follow the form ``test_*.py`` or ``*_test.py`` in the current directory and its subdirectories. More generally, pytest follows :ref:`standard test discovery rules `. @@ -155,7 +155,7 @@ Managing loading of plugins Early loading plugins ~~~~~~~~~~~~~~~~~~~~~~~ -You can early-load plugins (internal and external) explicitly in the command-line with the ``-p`` option:: +You can early-load plugins (internal and external) explicitly in the command-line with the :option:`-p` option:: pytest -p mypluginmodule @@ -171,7 +171,7 @@ The option receives a ``name`` parameter, which can be: Disabling plugins ~~~~~~~~~~~~~~~~~~ -To disable loading specific plugins at invocation time, use the ``-p`` option +To disable loading specific plugins at invocation time, use the :option:`-p` option together with the prefix ``no:``. Example: to disable loading the plugin ``doctest``, which is responsible for diff --git a/doc/en/how-to/writing_hook_functions.rst b/doc/en/how-to/writing_hook_functions.rst index f4c00d04fda..d5d6d2ae4f7 100644 --- a/doc/en/how-to/writing_hook_functions.rst +++ b/doc/en/how-to/writing_hook_functions.rst @@ -94,7 +94,7 @@ around the actual hook implementations, in which case it can return the result value of the ``yield``. The simplest (though useless) hook wrapper is ``return (yield)``. -In other cases, the wrapper wants the adjust or adapt the result, in which case +In other cases, the wrapper wants to adjust or adapt the result, in which case it can return a new value. If the result of the underlying hook is a mutable object, the wrapper may modify that result, but it's probably better to avoid it. @@ -235,6 +235,12 @@ Example: """ print(config.hook) +.. note:: + + Unlike other hooks, the :hook:`pytest_generate_tests` hook is also discovered when + defined inside a test module or test class. Other hooks must live in + :ref:`conftest.py plugins ` or external plugins. + See :ref:`parametrize-basics` and the :ref:`hook-reference`. .. _`addoptionhooks`: diff --git a/doc/en/how-to/writing_plugins.rst b/doc/en/how-to/writing_plugins.rst index 1bba9644649..56043a14f97 100644 --- a/doc/en/how-to/writing_plugins.rst +++ b/doc/en/how-to/writing_plugins.rst @@ -48,7 +48,7 @@ Plugin discovery order at tool startup 5. by loading all plugins specified through the :envvar:`PYTEST_PLUGINS` environment variable. -6. by loading all "initial ":file:`conftest.py` files: +6. by loading all "initial" :file:`conftest.py` files: - determine the test paths: specified on the command line, otherwise in :confval:`testpaths` if defined and running from the rootdir, otherwise the @@ -295,7 +295,7 @@ the plugin manager like this: plugin = config.pluginmanager.get_plugin("name_of_plugin") If you want to look at the names of existing plugins, use -the ``--trace-config`` option. +the :option:`--trace-config` option. .. _registering-markers: @@ -420,13 +420,13 @@ before running pytest on it. This way we can abstract the tested logic to separa which is especially useful for longer tests and/or longer ``conftest.py`` files. Note that for ``pytester.copy_example`` to work we need to set `pytester_example_dir` -in our ``pytest.ini`` to tell pytest where to look for example files. +in our configuration file to tell pytest where to look for example files. -.. code-block:: ini +.. code-block:: toml - # content of pytest.ini - [pytest] - pytester_example_dir = . + # content of pytest.toml + [pytest] + pytester_example_dir = "." .. code-block:: python @@ -446,9 +446,9 @@ in our ``pytest.ini`` to tell pytest where to look for example files. $ pytest =========================== test session starts ============================ - platform linux -- Python 3.x.y, pytest-8.x.y, pluggy-1.x.y + platform linux -- Python 3.x.y, pytest-9.x.y, pluggy-1.x.y rootdir: /home/sweet/project - configfile: pytest.ini + configfile: pytest.toml collected 2 items test_example.py .. [100%] diff --git a/doc/en/index.rst b/doc/en/index.rst index fb5d0482c0d..1140640c80a 100644 --- a/doc/en/index.rst +++ b/doc/en/index.rst @@ -2,8 +2,7 @@ .. sidebar:: **Next Open Trainings and Events** - - `pytest - simple, rapid and fun testing with Python `_, at `EuroPython 2025 `_, **July 14th** (3h), Prague, Czech Republic - - `Professional Testing with Python `_, via `Python Academy `_ (3 day in-depth training), **March 3th -- 5th 2026**, Leipzig (DE) / Remote + - `Professional Testing with Python `_, via `Python Academy `_ (3 day in-depth training), **March 9th -- 11th 2027**, Leipzig (DE) / Remote Also see :doc:`previous talks and blogposts ` @@ -46,8 +45,6 @@ The ``pytest`` framework makes it easy to write small, readable tests, and can scale to support complex functional testing for applications and libraries. -``pytest`` requires: Python 3.8+ or PyPy3. - **PyPI package name**: :pypi:`pytest` A quick example @@ -70,7 +67,7 @@ To execute it: $ pytest =========================== test session starts ============================ - platform linux -- Python 3.x.y, pytest-8.x.y, pluggy-1.x.y + platform linux -- Python 3.x.y, pytest-9.x.y, pluggy-1.x.y rootdir: /home/sweet/project collected 1 item @@ -104,7 +101,7 @@ Features - Can run :ref:`unittest ` (including trial) test suites out of the box -- Python 3.8+ or PyPy 3 +- Python 3.10+ or PyPy 3 - Rich plugin architecture, with over 1300+ :ref:`external plugins ` and thriving community diff --git a/doc/en/reference/customize.rst b/doc/en/reference/customize.rst index 373223ec913..8903ceadf68 100644 --- a/doc/en/reference/customize.rst +++ b/doc/en/reference/customize.rst @@ -4,8 +4,7 @@ Configuration Command line options and configuration file settings ----------------------------------------------------------------- -You can get help on command line options and values in INI-style -configurations files by using the general help option: +You can get help on command line and configuration options by using the general help option: .. code-block:: bash @@ -24,51 +23,89 @@ by convention resides in the root directory of your repository. A quick example of the configuration files supported by pytest: +pytest.toml +~~~~~~~~~~~ + +.. versionadded:: 9.0 + +``pytest.toml`` files take precedence over other files, even when empty. + +Alternatively, the hidden version ``.pytest.toml`` can be used. + +.. tab:: toml + + .. code-block:: toml + + # pytest.toml or .pytest.toml + [pytest] + minversion = "9.0" + addopts = ["-ra", "-q"] + testpaths = [ + "tests", + "integration", + ] + pytest.ini ~~~~~~~~~~ -``pytest.ini`` files take precedence over other files, even when empty. +``pytest.ini`` files take precedence over other files (except ``pytest.toml`` and ``.pytest.toml``), even when empty. Alternatively, the hidden version ``.pytest.ini`` can be used. -.. code-block:: ini +.. tab:: ini - # pytest.ini or .pytest.ini - [pytest] - minversion = 6.0 - addopts = -ra -q - testpaths = - tests - integration + .. code-block:: ini + + # pytest.ini or .pytest.ini + [pytest] + minversion = 6.0 + addopts = -ra -q + testpaths = + tests + integration pyproject.toml ~~~~~~~~~~~~~~ .. versionadded:: 6.0 +.. versionchanged:: 9.0 + +``pyproject.toml`` files are supported for configuration. + +.. tab:: toml + + Use ``[tool.pytest]`` to leverage native TOML types (supported since pytest 9.0): -``pyproject.toml`` are considered for configuration when they contain a ``tool.pytest.ini_options`` table. + .. code-block:: toml -.. code-block:: toml + # pyproject.toml + [tool.pytest] + minversion = "9.0" + addopts = ["-ra", "-q"] + testpaths = [ + "tests", + "integration", + ] - # pyproject.toml - [tool.pytest.ini_options] - minversion = "6.0" - addopts = "-ra -q" - testpaths = [ - "tests", - "integration", - ] +.. tab:: ini -.. note:: + Use ``[tool.pytest.ini_options]`` for INI-style configuration (supported since pytest 6.0): - One might wonder why ``[tool.pytest.ini_options]`` instead of ``[tool.pytest]`` as is the - case with other tools. + .. code-block:: toml - The reason is that the pytest team intends to fully utilize the rich TOML data format - for configuration in the future, reserving the ``[tool.pytest]`` table for that. - The ``ini_options`` table is being used, for now, as a bridge between the existing - ``.ini`` configuration system and the future configuration format. + # pyproject.toml + [tool.pytest.ini_options] + minversion = "6.0" + addopts = "-ra -q" + testpaths = [ + "tests", + "integration", + ] + + For projects that still run pytest versions older than 6.0, keep + ``minversion`` in ``pytest.ini`` or ``setup.cfg`` too. Those versions + do not read ``pyproject.toml``. tox.ini ~~~~~~~ @@ -76,15 +113,17 @@ tox.ini ``tox.ini`` files are the configuration files of the `tox `__ project, and can also be used to hold pytest configuration if they have a ``[pytest]`` section. -.. code-block:: ini +.. tab:: ini + + .. code-block:: ini - # tox.ini - [pytest] - minversion = 6.0 - addopts = -ra -q - testpaths = - tests - integration + # tox.ini + [pytest] + minversion = 6.0 + addopts = -ra -q + testpaths = + tests + integration setup.cfg @@ -93,15 +132,17 @@ setup.cfg ``setup.cfg`` files are general purpose configuration files, used originally by ``distutils`` (now deprecated) and :std:doc:`setuptools `, and can also be used to hold pytest configuration if they have a ``[tool:pytest]`` section. -.. code-block:: ini +.. tab:: ini + + .. code-block:: ini - # setup.cfg - [tool:pytest] - minversion = 6.0 - addopts = -ra -q - testpaths = - tests - integration + # setup.cfg + [tool:pytest] + minversion = 6.0 + addopts = -ra -q + testpaths = + tests + integration .. warning:: @@ -123,7 +164,7 @@ the command line arguments (specified test files, paths) and on the existence of configuration files. The determined ``rootdir`` and ``configfile`` are printed as part of the pytest header during startup. -Here's a summary what ``pytest`` uses ``rootdir`` for: +Here's a summary of what ``pytest`` uses ``rootdir`` for: * Construct *nodeids* during collection; each test is assigned a unique *nodeid* which is rooted at the ``rootdir`` and takes into account @@ -136,9 +177,9 @@ Here's a summary what ``pytest`` uses ``rootdir`` for: ``rootdir`` is **NOT** used to modify ``sys.path``/``PYTHONPATH`` or influence how modules are imported. See :ref:`pythonpath` for more details. -The ``--rootdir=path`` command-line option can be used to force a specific directory. +The :option:`--rootdir=path` command-line option can be used to force a specific directory. Note that contrary to other command-line options, ``--rootdir`` cannot be used with -:confval:`addopts` inside ``pytest.ini`` because the ``rootdir`` is used to *find* ``pytest.ini`` +:confval:`addopts` inside a configuration file because the ``rootdir`` is used to *find* the configuration file already. Finding the ``rootdir`` @@ -146,20 +187,20 @@ Finding the ``rootdir`` Here is the algorithm which finds the rootdir from ``args``: -- If ``-c`` is passed in the command-line, use that as configuration file, and its directory as ``rootdir``. +- If :option:`-c` is passed in the command-line, use that as configuration file, and its directory as ``rootdir``. - Determine the common ancestor directory for the specified ``args`` that are recognised as paths that exist in the file system. If no such paths are found, the common ancestor directory is set to the current working directory. -- Look for ``pytest.ini``, ``pyproject.toml``, ``tox.ini``, and ``setup.cfg`` files in the ancestor +- Look for ``pytest.toml``, ``.pytest.toml``, ``pytest.ini``, ``.pytest.ini``, ``pyproject.toml``, ``tox.ini``, and ``setup.cfg`` files in the ancestor directory and upwards. If one is matched, it becomes the ``configfile`` and its directory becomes the ``rootdir``. - If no configuration file was found, look for ``setup.py`` upwards from the common ancestor directory to determine the ``rootdir``. -- If no ``setup.py`` was found, look for ``pytest.ini``, ``pyproject.toml``, ``tox.ini``, and +- If no ``setup.py`` was found, look for ``pytest.toml``, ``.pytest.toml``, ``pytest.ini``, ``.pytest.ini``, ``pyproject.toml``, ``tox.ini``, and ``setup.cfg`` in each of the specified ``args`` and upwards. If one is matched, it becomes the ``configfile`` and its directory becomes the ``rootdir``. @@ -167,18 +208,20 @@ Here is the algorithm which finds the rootdir from ``args``: directory. This allows the use of pytest in structures that are not part of a package and don't have any particular configuration file. -If no ``args`` are given, pytest collects test below the current working +If no ``args`` are given, pytest collects tests below the current working directory and also starts determining the ``rootdir`` from there. Files will only be matched for configuration if: -* ``pytest.ini``: will always match and take precedence, even if empty. -* ``pyproject.toml``: contains a ``[tool.pytest.ini_options]`` table. +* ``pytest.toml``: will always match and take highest precedence, even if empty. +* ``pytest.ini``: will always match and take precedence (after ``pytest.toml`` and ``.pytest.toml``), even if empty. +* ``pyproject.toml``: contains a ``[tool.pytest]`` or ``[tool.pytest.ini_options]`` table. * ``tox.ini``: contains a ``[pytest]`` section. * ``setup.cfg``: contains a ``[tool:pytest]`` section. Finally, a ``pyproject.toml`` file will be considered the ``configfile`` if no other match was found, in this case -even if it does not contain a ``[tool.pytest.ini_options]`` table (this was added in ``8.1``). +even if it does not contain a ``[tool.pytest]`` table (since version ``9.0``) or a ``[tool.pytest.ini_options]`` +table (since version ``8.1``). The files are considered in the order above. Options from multiple ``configfiles`` candidates are never merged - the first match wins. @@ -213,11 +256,13 @@ check for configuration files as follows: .. code-block:: text - # first look for pytest.ini files + # first look for path/pytest.toml + path/pytest.toml path/pytest.ini - path/pyproject.toml # must contain a [tool.pytest.ini_options] table to match + path/pyproject.toml # must contain a [tool.pytest] table to match path/tox.ini # must contain [pytest] section to match path/setup.cfg # must contain [tool:pytest] section to match + pytest.toml pytest.ini ... # all the way up to the root @@ -233,7 +278,7 @@ check for configuration files as follows: ``pytest --log-output ../../test.log args``. Then ``args`` is mandatory, otherwise pytest uses the folder of test.log for rootdir determination (see also :issue:`1435`). - A dot ``.`` for referencing to the current working directory is also + A dot ``.`` for referencing the current working directory is also possible. diff --git a/doc/en/reference/exit-codes.rst b/doc/en/reference/exit-codes.rst index b695ca3702e..49aaca19121 100644 --- a/doc/en/reference/exit-codes.rst +++ b/doc/en/reference/exit-codes.rst @@ -20,7 +20,7 @@ They are represented by the :class:`pytest.ExitCode` enum. The exit codes being .. note:: - If you would like to customize the exit code in some scenarios, specially when + If you would like to customize the exit code in some scenarios, specifically when no tests are collected, consider using the `pytest-custom_exit_code `__ plugin. diff --git a/doc/en/reference/fixtures.rst b/doc/en/reference/fixtures.rst index 566304d3330..c4a8d01ff0e 100644 --- a/doc/en/reference/fixtures.rst +++ b/doc/en/reference/fixtures.rst @@ -34,7 +34,7 @@ Built-in fixtures :fixture:`capteesys` Capture in the same manner as :fixture:`capsys`, but also pass text - through according to ``--capture=``. + through according to :option:`--capture`. :fixture:`capsysbinary` Capture, as bytes, output to ``sys.stdout`` and ``sys.stderr``. @@ -52,6 +52,9 @@ Built-in fixtures :fixture:`pytestconfig` Access to configuration values, pluginmanager and plugin hooks. + :fixture:`subtests` + Enable declaring subtests inside test functions. + :fixture:`record_property` Add extra properties to the test. @@ -274,13 +277,13 @@ the test's search for fixtures would look like: pytest will only search for ``a_fix`` and ``b_fix`` in the plugins after searching for them first in the scopes inside ``tests/``. -.. note: +.. note:: pytest can tell you what fixtures are available for a given test if you call - ``pytests`` along with the test's name (or the scope it's in), and provide - the ``--fixtures`` flag, e.g. ``pytest --fixtures test_something.py`` + ``pytest`` along with the test's name (or the scope it's in), and provide + the :option:`--fixtures` flag, e.g. ``pytest --fixtures test_something.py`` (fixtures with names that start with ``_`` will only be shown if you also - provide the ``-v`` flag). + provide the :option:`-v` flag). .. _`fixture order`: @@ -351,7 +354,7 @@ an order of operations for a given test. If there's any ambiguity, and the order of operations can be interpreted more than one way, you should assume pytest could go with any one of those interpretations at any point. -For example, if ``d`` didn't request ``c``, i.e.the graph would look like this: +For example, if ``d`` didn't request ``c``, i.e. the graph would look like this: .. image:: /example/fixtures/test_fixtures_order_dependencies_unclear.* :align: center @@ -445,10 +448,10 @@ for the tests inside ``TestClassWithoutAutouse``, since they can reference can't see ``c3``. -.. note: +.. note:: pytest can tell you what order the fixtures will execute in for a given test - if you call ``pytests`` along with the test's name (or the scope it's in), - and provide the ``--setup-plan`` flag, e.g. + if you call ``pytest`` along with the test's name (or the scope it's in), + and provide the :option:`--setup-plan` flag, e.g. ``pytest --setup-plan test_something.py`` (fixtures with names that start - with ``_`` will only be shown if you also provide the ``-v`` flag). + with ``_`` will only be shown if you also provide the :option:`-v` flag). diff --git a/doc/en/reference/plugin_list.rst b/doc/en/reference/plugin_list.rst index d0bf2083fe3..c13600133f6 100644 --- a/doc/en/reference/plugin_list.rst +++ b/doc/en/reference/plugin_list.rst @@ -27,20 +27,20 @@ please refer to `the update script =8.3 - :pypi:`logassert` Simple but powerful assertion and verification of logged lines May 15, 2025 5 - Production/Stable pytest; extra == "dev" - :pypi:`logot` Test whether your code is logging correctly 🪵 May 05, 2025 5 - Production/Stable pytest; extra == "pytest" + :pypi:`databricks-labs-pytester` Python Testing for Databricks Oct 17, 2025 4 - Beta pytest>=8.3 + :pypi:`logassert` Simple but powerful assertion and verification of logged lines Aug 14, 2025 5 - Production/Stable pytest; extra == "dev" + :pypi:`logot` Test whether your code is logging correctly 🪵 Jul 28, 2025 5 - Production/Stable pytest; extra == "pytest" :pypi:`nuts` Network Unit Testing System May 10, 2025 N/A pytest<8,>=7 :pypi:`pytest-abq` Pytest integration for the ABQ universal test runner. Apr 07, 2023 N/A N/A :pypi:`pytest-abstracts` A contextmanager pytest fixture for handling multiple mock abstracts May 25, 2022 N/A N/A - :pypi:`pytest-accept` A pytest-plugin for updating doctest outputs Dec 08, 2024 N/A pytest>=7 + :pypi:`pytest-accept` Aug 19, 2025 N/A pytest>=7 :pypi:`pytest-adaptavist` pytest plugin for generating test execution results within Jira Test Management (tm4j) Oct 13, 2022 N/A pytest (>=5.4.0) :pypi:`pytest-adaptavist-fixed` pytest plugin for generating test execution results within Jira Test Management (tm4j) Jan 17, 2025 N/A pytest>=5.4.0 :pypi:`pytest-addons-test` 用于测试pytest的插件 Aug 02, 2021 N/A pytest (>=6.2.4,<7.0.0) @@ -58,6 +58,7 @@ This list contains 1641 plugins. :pypi:`pytest-aiogram` May 06, 2023 N/A N/A :pypi:`pytest-aiohttp` Pytest plugin for aiohttp support Jan 23, 2025 4 - Beta pytest>=6.1.0 :pypi:`pytest-aiohttp-client` Pytest \`client\` fixture for the Aiohttp Jan 10, 2023 N/A pytest (>=7.2.0,<8.0.0) + :pypi:`pytest-aiohttp-mock` Send responses to aiohttp. Sep 13, 2025 3 - Alpha pytest>=8 :pypi:`pytest-aiomoto` pytest-aiomoto Jun 24, 2023 N/A pytest (>=7.0,<8.0) :pypi:`pytest-aioresponses` py.test integration for aioresponses Jan 02, 2025 4 - Beta pytest>=3.5.0 :pypi:`pytest-aioworkers` A plugin to test aioworkers project with pytest Dec 26, 2024 5 - Production/Stable pytest>=8.3.4 @@ -70,9 +71,11 @@ This list contains 1641 plugins. :pypi:`pytest-allure-adaptor2` Plugin for py.test to generate allure xml reports Oct 14, 2020 N/A pytest (>=2.7.3) :pypi:`pytest-allure-collection` pytest plugin to collect allure markers without running any tests Apr 13, 2023 N/A pytest :pypi:`pytest-allure-dsl` pytest plugin to test case doc string dls instructions Oct 25, 2020 4 - Beta pytest + :pypi:`pytest-allure-host` Publish Allure static reports to private S3 behind CloudFront with history preservation Oct 21, 2025 3 - Alpha N/A :pypi:`pytest-allure-id2history` Overwrite allure history id with testcase full name and testcase id if testcase has id, exclude parameters. May 14, 2024 4 - Beta pytest>=6.2.0 :pypi:`pytest-allure-intersection` Oct 27, 2022 N/A pytest (<5) :pypi:`pytest-allure-spec-coverage` The pytest plugin aimed to display test coverage of the specs(requirements) in Allure Oct 26, 2021 N/A pytest + :pypi:`pytest-allure-step` Enhanced logging integration with Allure reports for pytest Jul 13, 2025 3 - Alpha pytest>=6.0.0 :pypi:`pytest-alphamoon` Static code checks used at Alphamoon Dec 30, 2021 5 - Production/Stable pytest (>=3.5.0) :pypi:`pytest-amaranth-sim` Fixture to automate running Amaranth simulations Sep 21, 2024 4 - Beta pytest>=6.2.0 :pypi:`pytest-analyzer` this plugin allows to analyze tests in pytest project, collect test metadata and sync it with testomat.io TCM system Feb 21, 2024 N/A pytest <8.0.0,>=7.3.1 @@ -80,7 +83,7 @@ This list contains 1641 plugins. :pypi:`pytest-anki` A pytest plugin for testing Anki add-ons Jul 31, 2022 4 - Beta pytest (>=3.5.0) :pypi:`pytest-annotate` pytest-annotate: Generate PyAnnotate annotations from your pytest tests. Jun 07, 2022 3 - Alpha pytest (<8.0.0,>=3.2.0) :pypi:`pytest-annotated` Pytest plugin to allow use of Annotated in tests to resolve fixtures Sep 30, 2024 N/A pytest>=8.3.3 - :pypi:`pytest-ansible` Plugin for pytest to simplify calling ansible modules from tests or fixtures May 26, 2025 5 - Production/Stable pytest>=6 + :pypi:`pytest-ansible` Plugin for pytest to simplify calling ansible modules from tests or fixtures Aug 21, 2025 5 - Production/Stable pytest>=6 :pypi:`pytest-ansible-playbook` Pytest fixture which runs given ansible playbook file. Mar 08, 2019 4 - Beta N/A :pypi:`pytest-ansible-playbook-runner` Pytest fixture which runs given ansible playbook file. Dec 02, 2020 4 - Beta pytest (>=3.1.0) :pypi:`pytest-ansible-units` A pytest plugin for running unit tests within an ansible collection Apr 14, 2022 N/A N/A @@ -90,25 +93,32 @@ This list contains 1641 plugins. :pypi:`pytest-aoc` Downloads puzzle inputs for Advent of Code and synthesizes PyTest fixtures Dec 02, 2023 5 - Production/Stable pytest ; extra == 'test' :pypi:`pytest-aoreporter` pytest report Jun 27, 2022 N/A N/A :pypi:`pytest-api` An ASGI middleware to populate OpenAPI Specification examples from pytest functions May 12, 2022 N/A pytest (>=7.1.1,<8.0.0) + :pypi:`pytest-api-cov` Pytest Plugin to provide API Coverage statistics for Python Web Frameworks Oct 28, 2025 N/A pytest>=6.0.0 + :pypi:`pytest-api-framework` pytest framework Jun 22, 2025 N/A pytest==7.2.2 + :pypi:`pytest-api-framework-alpha` Oct 29, 2025 N/A pytest==7.2.2 :pypi:`pytest-api-soup` Validate multiple endpoints with unit testing using a single source of truth. Aug 27, 2022 N/A N/A :pypi:`pytest-apistellar` apistellar plugin for pytest. Jun 18, 2019 N/A N/A :pypi:`pytest-apiver` Jun 21, 2024 N/A pytest :pypi:`pytest-appengine` AppEngine integration that works well with pytest-django Feb 27, 2017 N/A N/A :pypi:`pytest-appium` Pytest plugin for appium Dec 05, 2019 N/A N/A + :pypi:`pytest-approval` A simple approval test library utilizing external diff programs such as PyCharm and Visual Studio Code to compare approved and received output. Oct 27, 2025 N/A pytest>=8.3.5 :pypi:`pytest-approvaltests` A plugin to use approvaltests with pytest May 08, 2022 4 - Beta pytest (>=7.0.1) - :pypi:`pytest-approvaltests-geo` Extension for ApprovalTests.Python specific to geo data verification Feb 05, 2024 5 - Production/Stable pytest - :pypi:`pytest-archon` Rule your architecture like a real developer Dec 18, 2023 5 - Production/Stable pytest >=7.2 + :pypi:`pytest-approvaltests-geo` Extension for ApprovalTests.Python specific to geo data verification Jul 14, 2025 5 - Production/Stable pytest + :pypi:`pytest-archon` Rule your architecture like a real developer Sep 19, 2025 5 - Production/Stable pytest>=7.2 :pypi:`pytest-argus` pyest results colection plugin Jun 24, 2021 5 - Production/Stable pytest (>=6.2.4) + :pypi:`pytest-argus-reporter` A simple plugin to report results of test into argus Sep 17, 2025 4 - Beta pytest>=3.0; extra == "dev" :pypi:`pytest-argus-server` A plugin that provides a running Argus API server for tests Mar 24, 2025 4 - Beta pytest>=6.2.0 :pypi:`pytest-arraydiff` pytest plugin to help with comparing array output from tests Nov 27, 2023 4 - Beta pytest >=4.6 + :pypi:`pytest-asdf-plugin` Pytest plugin for testing ASDF schemas Aug 18, 2025 5 - Production/Stable pytest>=7 :pypi:`pytest-asgi-server` Convenient ASGI client/server fixtures for Pytest Dec 12, 2020 N/A pytest (>=5.4.1) :pypi:`pytest-aspec` A rspec format reporter for pytest Dec 20, 2023 4 - Beta N/A :pypi:`pytest-asptest` test Answer Set Programming programs Apr 28, 2018 4 - Beta N/A :pypi:`pytest-assertcount` Plugin to count actual number of asserts in pytest Oct 23, 2022 N/A pytest (>=5.0.0) :pypi:`pytest-assertions` Pytest Assertions Apr 27, 2022 N/A N/A + :pypi:`pytest-assert-type` Use typing.assert_type() to test runtime behavior Oct 26, 2025 3 - Alpha pytest>=6.2.0 :pypi:`pytest-assertutil` pytest-assertutil May 10, 2019 N/A N/A :pypi:`pytest-assert-utils` Useful assertion utilities for use with pytest Apr 14, 2022 3 - Alpha N/A - :pypi:`pytest-assist` load testing library Mar 17, 2025 N/A pytest + :pypi:`pytest-assist` pytest plugin library Oct 29, 2025 4 - Beta pytest :pypi:`pytest-assume` A pytest plugin that allows multiple failures per test Jun 24, 2021 N/A pytest (>=2.7) :pypi:`pytest-assurka` A pytest plugin for Assurka Studio Aug 04, 2022 N/A N/A :pypi:`pytest-ast-back-to-python` A plugin for pytest devs to view how assertion rewriting recodes the AST Sep 29, 2019 4 - Beta N/A @@ -119,9 +129,9 @@ This list contains 1641 plugins. :pypi:`pytest_async` pytest-async - Run your coroutine in event loop without decorator Feb 26, 2020 N/A N/A :pypi:`pytest-async-benchmark` pytest-async-benchmark: Modern pytest benchmarking for async code. 🚀 May 28, 2025 N/A pytest>=8.3.5 :pypi:`pytest-async-generators` Pytest fixtures for async generators Jul 05, 2023 N/A N/A - :pypi:`pytest-asyncio` Pytest support for asyncio May 26, 2025 4 - Beta pytest<9,>=8.2 + :pypi:`pytest-asyncio` Pytest support for asyncio Sep 12, 2025 5 - Production/Stable pytest<9,>=8.2 :pypi:`pytest-asyncio-concurrent` Pytest plugin to execute python async tests concurrently. May 17, 2025 4 - Beta pytest>=6.2.0 - :pypi:`pytest-asyncio-cooperative` Run all your asynchronous tests cooperatively. Apr 26, 2025 N/A N/A + :pypi:`pytest-asyncio-cooperative` Run all your asynchronous tests cooperatively. Jun 24, 2025 N/A N/A :pypi:`pytest-asyncio-network-simulator` pytest-asyncio-network-simulator: Plugin for pytest for simulator the network in tests Jul 31, 2018 3 - Alpha pytest (<3.7.0,>=3.3.2) :pypi:`pytest-async-mongodb` pytest plugin for async MongoDB Oct 18, 2017 5 - Production/Stable pytest (>=2.5.2) :pypi:`pytest-async-sqlalchemy` Database testing fixtures using the SQLAlchemy asyncio API Oct 07, 2021 4 - Beta pytest (>=6.0.0) @@ -137,9 +147,11 @@ This list contains 1641 plugins. :pypi:`pytest-automation` pytest plugin for building a test suite, using YAML files to extend pytest parameterize functionality. Apr 24, 2024 N/A pytest>=7.0.0 :pypi:`pytest-automock` Pytest plugin for automatical mocks creation May 16, 2023 N/A pytest ; extra == 'dev' :pypi:`pytest-auto-parametrize` pytest plugin: avoid repeating arguments in parametrize Oct 02, 2016 3 - Alpha N/A + :pypi:`pytest-autoprofile` \`line_profiler.autoprofile\`-ing your \`pytest\` test suite Aug 06, 2025 4 - Beta pytest>=7.0 :pypi:`pytest-autotest` This fixture provides a configured "driver" for Android Automated Testing, using uiautomator2. Aug 25, 2021 N/A pytest :pypi:`pytest-aviator` Aviator's Flakybot pytest plugin that automatically reruns flaky tests. Nov 04, 2022 4 - Beta pytest :pypi:`pytest-avoidance` Makes pytest skip tests that don not need rerunning May 23, 2019 4 - Beta pytest (>=3.5.0) + :pypi:`pytest-awaiting-fix` A simple plugin to use with pytest for traceability across Jira and disabled automated tests Aug 09, 2025 4 - Beta pytest>=6.2.0 :pypi:`pytest-aws` pytest plugin for testing AWS resource configurations Oct 04, 2017 4 - Beta N/A :pypi:`pytest-aws-apigateway` pytest plugin for AWS ApiGateway May 24, 2024 4 - Beta pytest :pypi:`pytest-aws-config` Protect your AWS credentials in unit tests May 28, 2021 N/A N/A @@ -147,27 +159,29 @@ This list contains 1641 plugins. :pypi:`pytest-axe` pytest plugin for axe-selenium-python Nov 12, 2018 N/A pytest (>=3.0.0) :pypi:`pytest-axe-playwright-snapshot` A pytest plugin that runs Axe-core on Playwright pages and takes snapshots of the results. Jul 25, 2023 N/A pytest :pypi:`pytest-azure` Pytest utilities and mocks for Azure Jan 18, 2023 3 - Alpha pytest - :pypi:`pytest-azure-devops` Simplifies using azure devops parallel strategy (https://docs.microsoft.com/en-us/azure/devops/pipelines/test/parallel-testing-any-test-runner) with pytest. Jun 20, 2022 4 - Beta pytest (>=3.5.0) + :pypi:`pytest-azure-devops` Simplifies using azure devops parallel strategy (https://docs.microsoft.com/en-us/azure/devops/pipelines/test/parallel-testing-any-test-runner) with pytest. Jul 16, 2025 4 - Beta pytest>=3.5.0 :pypi:`pytest-azurepipelines` Formatting PyTest output for Azure Pipelines UI Oct 06, 2023 5 - Production/Stable pytest (>=5.0.0) :pypi:`pytest-bandit` A bandit plugin for pytest Feb 23, 2021 4 - Beta pytest (>=3.5.0) :pypi:`pytest-bandit-xayon` A bandit plugin for pytest Oct 17, 2022 4 - Beta pytest (>=3.5.0) :pypi:`pytest-base-url` pytest plugin for URL based testing Jan 31, 2024 5 - Production/Stable pytest>=7.0.0 + :pypi:`pytest-bashdoctest` A pytest plugin for testing bash command examples in markdown documentation Oct 03, 2025 4 - Beta pytest>=7.0.0 :pypi:`pytest-batch-regression` A pytest plugin to repeat the entire test suite in batches. May 08, 2024 N/A pytest>=6.0.0 - :pypi:`pytest-bazel` A pytest runner with bazel support May 11, 2025 4 - Beta pytest + :pypi:`pytest-bazel` A pytest runner with bazel support Oct 31, 2025 4 - Beta pytest :pypi:`pytest-bdd` BDD for pytest Dec 05, 2024 6 - Mature pytest>=7.0.0 :pypi:`pytest-bdd-html` pytest plugin to display BDD info in HTML test report Nov 22, 2022 3 - Alpha pytest (!=6.0.0,>=5.0) :pypi:`pytest-bdd-ng` BDD for pytest Nov 26, 2024 4 - Beta pytest>=5.2 - :pypi:`pytest-bdd-report` A pytest-bdd plugin for generating useful and informative BDD test reports Nov 27, 2024 N/A pytest>=7.1.3 + :pypi:`pytest-bdd-report` A pytest-bdd plugin for generating useful and informative BDD test reports Aug 19, 2025 N/A pytest>=7.1.3 + :pypi:`pytest-bdd-reporter` Enterprise-grade BDD test reporting with interactive dashboards, suite management, and comprehensive email integration Oct 14, 2025 5 - Production/Stable pytest>=6.0.0 :pypi:`pytest-bdd-splinter` Common steps for pytest bdd and splinter integration Aug 12, 2019 5 - Production/Stable pytest (>=4.0.0) :pypi:`pytest-bdd-web` A simple plugin to use with pytest Jan 02, 2020 4 - Beta pytest (>=3.5.0) :pypi:`pytest-bdd-wrappers` Feb 11, 2020 2 - Pre-Alpha N/A :pypi:`pytest-beakerlib` A pytest plugin that reports test results to the BeakerLib framework Mar 17, 2017 5 - Production/Stable pytest :pypi:`pytest-beartype` Pytest plugin to run your tests with beartype checking enabled. Oct 31, 2024 N/A pytest - :pypi:`pytest-bec-e2e` BEC pytest plugin for end-to-end tests May 30, 2025 3 - Alpha pytest + :pypi:`pytest-bec-e2e` BEC pytest plugin for end-to-end tests Oct 31, 2025 3 - Alpha pytest :pypi:`pytest-beds` Fixtures for testing Google Appengine (GAE) apps Jun 07, 2016 4 - Beta N/A :pypi:`pytest-beeprint` use icdiff for better error messages in pytest assertions Jul 04, 2023 4 - Beta N/A :pypi:`pytest-bench` Benchmark utility that plugs into pytest. Jul 21, 2014 3 - Alpha N/A - :pypi:`pytest-benchmark` A \`\`pytest\`\` fixture for benchmarking code. It will group the tests into rounds that are calibrated to the chosen timer. Oct 30, 2024 5 - Production/Stable pytest>=8.1 + :pypi:`pytest-benchmark` A \`\`pytest\`\` fixture for benchmarking code. It will group the tests into rounds that are calibrated to the chosen timer. Oct 30, 2025 5 - Production/Stable pytest>=8.1 :pypi:`pytest-better-datadir` A small example package Mar 13, 2023 N/A N/A :pypi:`pytest-better-parametrize` Better description of parametrized test cases Mar 05, 2024 4 - Beta pytest >=6.2.0 :pypi:`pytest-bg-process` Pytest plugin to initialize background process Jan 24, 2022 4 - Beta pytest (>=3.5.0) @@ -178,12 +192,14 @@ This list contains 1641 plugins. :pypi:`pytest-black-multipy` Allow '--black' on older Pythons Jan 14, 2021 5 - Production/Stable pytest (!=3.7.3,>=3.5) ; extra == 'testing' :pypi:`pytest-black-ng` A pytest plugin to enable format checking with black Oct 20, 2022 4 - Beta pytest (>=7.0.0) :pypi:`pytest-blame` A pytest plugin helps developers to debug by providing useful commits history. May 04, 2019 N/A pytest (>=4.4.0) - :pypi:`pytest-blender` Blender Pytest plugin. Aug 02, 2024 N/A pytest + :pypi:`pytest-blender` Blender Pytest plugin. Jun 25, 2025 N/A pytest :pypi:`pytest-blink1` Pytest plugin to emit notifications via the Blink(1) RGB LED Jan 07, 2018 4 - Beta N/A :pypi:`pytest-blockage` Disable network requests during a test run. Dec 21, 2021 N/A pytest :pypi:`pytest-blocker` pytest plugin to mark a test as blocker and skip all other tests Sep 07, 2015 4 - Beta N/A + :pypi:`pytest-b-logger` BLogger is a Pytest plugin for enhanced test logging and generating convenient and lightweight reports. Oct 28, 2025 N/A pytest :pypi:`pytest-blue` A pytest plugin that adds a \`blue\` fixture for printing stuff in blue. Sep 05, 2022 N/A N/A :pypi:`pytest-board` Local continuous test runner with pytest and watchdog. Jan 20, 2019 N/A N/A + :pypi:`pytest-boardfarm3` Integrate boardfarm as a pytest plugin. Sep 15, 2025 N/A pytest :pypi:`pytest-boilerplate` The pytest plugin for your Django Boilerplate. Sep 12, 2024 5 - Production/Stable pytest>=4.0.0 :pypi:`pytest-bonsai` Apr 08, 2025 N/A pytest>=6 :pypi:`pytest-boost-xml` Plugin for pytest to generate boost xml reports Nov 30, 2022 4 - Beta N/A @@ -195,13 +211,14 @@ This list contains 1641 plugins. :pypi:`pytest-breakword` Use breakword with pytest Aug 04, 2021 N/A pytest (>=6.2.4,<7.0.0) :pypi:`pytest-breed-adapter` A simple plugin to connect with breed-server Nov 07, 2018 4 - Beta pytest (>=3.5.0) :pypi:`pytest-briefcase` A pytest plugin for running tests on a Briefcase project. Jun 14, 2020 4 - Beta pytest (>=3.5.0) + :pypi:`pytest-brightest` Bright ideas for improving your pytest experience Jul 15, 2025 3 - Alpha pytest>=8.4.1 :pypi:`pytest-broadcaster` Pytest plugin to broadcast pytest output to various destinations Mar 02, 2025 3 - Alpha pytest :pypi:`pytest-browser` A pytest plugin for console based browser test selection just after the collection phase Dec 10, 2016 3 - Alpha N/A :pypi:`pytest-browsermob-proxy` BrowserMob proxy plugin for py.test. Jun 11, 2013 4 - Beta N/A :pypi:`pytest_browserstack` Py.test plugin for BrowserStack Jan 27, 2016 4 - Beta N/A :pypi:`pytest-browserstack-local` \`\`py.test\`\` plugin to run \`\`BrowserStackLocal\`\` in background. Feb 09, 2018 N/A N/A :pypi:`pytest-budosystems` Budo Systems is a martial arts school management system. This module is the Budo Systems Pytest Plugin. May 07, 2023 3 - Alpha pytest - :pypi:`pytest-bug` Pytest plugin for marking tests as a bug Jun 05, 2024 5 - Production/Stable pytest>=8.0.0 + :pypi:`pytest-bug` Pytest plugin for marking tests as a bug Jun 17, 2025 5 - Production/Stable pytest>=8.4.0 :pypi:`pytest-bugtong-tag` pytest-bugtong-tag is a plugin for pytest Jan 16, 2022 N/A N/A :pypi:`pytest-bugzilla` py.test bugzilla integration plugin May 05, 2010 4 - Beta N/A :pypi:`pytest-bugzilla-notifier` A plugin that allows you to execute create, update, and read information from BugZilla bugs Jun 15, 2018 4 - Beta pytest (>=2.9.2) @@ -215,18 +232,21 @@ This list contains 1641 plugins. :pypi:`pytest-call-checker` Small pytest utility to easily create test doubles Oct 16, 2022 4 - Beta pytest (>=7.1.3,<8.0.0) :pypi:`pytest-camel-collect` Enable CamelCase-aware pytest class collection Aug 02, 2020 N/A pytest (>=2.9) :pypi:`pytest-canonical-data` A plugin which allows to compare results with canonical results, based on previous runs May 08, 2020 2 - Pre-Alpha pytest (>=3.5.0) + :pypi:`pytest-canvas` A minimal pytest plugin that streamlines testing for projects using the Canvas SDK. Jul 22, 2025 N/A pytest<9,>=8.4 :pypi:`pytest-caprng` A plugin that replays pRNG state on failure. May 02, 2018 4 - Beta N/A :pypi:`pytest-capsqlalchemy` Pytest plugin to allow capturing SQLAlchemy queries. Mar 19, 2025 4 - Beta N/A :pypi:`pytest-capture-deprecatedwarnings` pytest plugin to capture all deprecatedwarnings and put them in one file Apr 30, 2019 N/A N/A - :pypi:`pytest-capture-sysout` An academic experiment package May 21, 2025 2 - Pre-Alpha N/A :pypi:`pytest-capture-warnings` pytest plugin to capture all warnings and put them in one file of your choice May 03, 2022 N/A pytest :pypi:`pytest-case` A clean, modern, wrapper for pytest.mark.parametrize Nov 25, 2024 N/A pytest<9.0.0,>=8.3.3 - :pypi:`pytest-cases` Separate test code from test cases in pytest. Sep 26, 2024 5 - Production/Stable N/A + :pypi:`pytest-case-provider` Advanced pytest parametrization plugin that generates test case instances from sync or async factories. Oct 26, 2025 3 - Alpha pytest<9,>=8 + :pypi:`pytest-cases` Separate test code from test cases in pytest. Jun 09, 2025 5 - Production/Stable pytest + :pypi:`pytest-case-start-from` A pytest plugin to start test execution from a specific test case Oct 28, 2025 4 - Beta pytest>=6.0.0 + :pypi:`pytest-casewise-package-install` A pytest plugin for test case-level dynamic dependency management Oct 31, 2025 3 - Alpha pytest>=6.0.0 :pypi:`pytest-cassandra` Cassandra CCM Test Fixtures for pytest Nov 04, 2017 1 - Planning N/A :pypi:`pytest-catchlog` py.test plugin to catch log messages. This is a fork of pytest-capturelog. Jan 24, 2016 4 - Beta pytest (>=2.6) :pypi:`pytest-catch-server` Pytest plugin with server for catching HTTP requests. Dec 12, 2019 5 - Production/Stable N/A :pypi:`pytest-cdist` A pytest plugin to split your test suite into multiple parts Jan 30, 2025 N/A pytest>=7 - :pypi:`pytest-celery` Pytest plugin for Celery Feb 21, 2025 5 - Production/Stable N/A + :pypi:`pytest-celery` Pytest plugin for Celery Jul 30, 2025 5 - Production/Stable N/A :pypi:`pytest-celery-py37` Pytest plugin for Celery (compatible with python 3.7) May 23, 2025 5 - Production/Stable N/A :pypi:`pytest-cfg-fetcher` Pass config options to your unit tests. Feb 26, 2024 N/A N/A :pypi:`pytest-chainmaker` pytest plugin for chainmaker Oct 15, 2021 N/A N/A @@ -236,7 +256,7 @@ This list contains 1641 plugins. :pypi:`pytest-change-report` turn . into √,turn F into x Sep 14, 2020 N/A pytest :pypi:`pytest-change-xds` turn . into √,turn F into x Apr 16, 2022 N/A pytest :pypi:`pytest-chdir` A pytest fixture for changing current working directory Jan 28, 2020 N/A pytest (>=5.0.0,<6.0.0) - :pypi:`pytest-check` A pytest plugin that allows multiple failures per test. Apr 04, 2025 5 - Production/Stable pytest>=7.0.0 + :pypi:`pytest-check` A pytest plugin that allows multiple failures per test. Oct 07, 2025 5 - Production/Stable pytest>=7.0.0 :pypi:`pytest-checkdocs` check the README when running tests Apr 30, 2024 5 - Production/Stable pytest!=8.1.*,>=6; extra == "testing" :pypi:`pytest-checkipdb` plugin to check if there are ipdb debugs left Dec 04, 2023 5 - Production/Stable pytest >=2.9.2 :pypi:`pytest-check-library` check your missing library Jul 17, 2022 N/A N/A @@ -244,11 +264,12 @@ This list contains 1641 plugins. :pypi:`pytest-check-links` Check links in files Jul 29, 2020 N/A pytest<9,>=7.0 :pypi:`pytest-checklist` Pytest plugin to track and report unit/function coverage. May 23, 2025 N/A N/A :pypi:`pytest-check-mk` pytest plugin to test Check_MK checks Nov 19, 2015 4 - Beta pytest - :pypi:`pytest-checkpoint` Restore a checkpoint in pytest Mar 30, 2025 N/A pytest>=8.0.0 + :pypi:`pytest-checkpoint` Restore a checkpoint in pytest Oct 04, 2025 N/A pytest>=8.0.0 :pypi:`pytest-ch-framework` My pytest framework Apr 17, 2024 N/A pytest==8.0.1 :pypi:`pytest-chic-report` Simple pytest plugin for generating and sending report to messengers. Nov 01, 2024 N/A pytest>=6.0 :pypi:`pytest-chinesereport` Apr 16, 2025 4 - Beta pytest>=3.5.0 :pypi:`pytest-choose` Provide the pytest with the ability to collect use cases based on rules in text files Feb 04, 2024 N/A pytest >=7.0.0 + :pypi:`pytest-chronicle` Reusable pytest results ingestion tooling with database export and CLI helpers. Oct 30, 2025 N/A pytest>=8.0; extra == "dev" :pypi:`pytest-chunks` Run only a chunk of your test suite Jul 05, 2022 N/A pytest (>=6.0.0) :pypi:`pytest_cid` Compare data structures containing matching CIDs of different versions and encoding Sep 01, 2023 4 - Beta pytest >= 5.0, < 7.0 :pypi:`pytest-circleci` py.test plugin for CircleCI May 03, 2019 N/A N/A @@ -261,8 +282,8 @@ This list contains 1641 plugins. :pypi:`pytest-clean-database` A pytest plugin that cleans your database up after every test. Mar 14, 2025 3 - Alpha pytest<9,>=7.0 :pypi:`pytest-cleanslate` Collects and executes pytest tests separately Apr 10, 2025 N/A pytest :pypi:`pytest_cleanup` Automated, comprehensive and well-organised pytest test cases. Jan 28, 2020 N/A N/A - :pypi:`pytest-cleanuptotal` A cleanup plugin for pytest Nov 08, 2024 5 - Production/Stable N/A - :pypi:`pytest-clerk` A set of pytest fixtures to help with integration testing with Clerk. Jan 30, 2025 N/A pytest<9.0.0,>=8.0.0 + :pypi:`pytest-cleanuptotal` A cleanup plugin for pytest Jul 22, 2025 5 - Production/Stable N/A + :pypi:`pytest-clerk` A set of pytest fixtures to help with integration testing with Clerk. Aug 30, 2025 N/A pytest<9.0.0,>=8.0.0 :pypi:`pytest-cli2-ansible` Mar 05, 2025 N/A N/A :pypi:`pytest-click` Pytest plugin for Click Feb 11, 2022 5 - Production/Stable pytest (>=5.0) :pypi:`pytest-cli-fixtures` Automatically register fixtures for custom CLI arguments Jul 28, 2022 N/A pytest (~=7.0) @@ -270,7 +291,7 @@ This list contains 1641 plugins. :pypi:`pytest-cloud` Distributed tests planner plugin for pytest testing framework. Oct 05, 2020 6 - Mature N/A :pypi:`pytest-cloudflare-worker` pytest plugin for testing cloudflare workers Mar 30, 2021 4 - Beta pytest (>=6.0.0) :pypi:`pytest-cloudist` Distribute tests to cloud machines without fuss Sep 02, 2022 4 - Beta pytest (>=7.1.2,<8.0.0) - :pypi:`pytest-cmake` Provide CMake module for Pytest Feb 17, 2025 N/A pytest<9,>=4 + :pypi:`pytest-cmake` Provide CMake module for Pytest Aug 14, 2025 N/A pytest<9,>=4 :pypi:`pytest-cmake-presets` Execute CMake Presets via pytest Dec 26, 2022 N/A pytest (>=7.2.0,<8.0.0) :pypi:`pytest-cmdline-add-args` Pytest plugin for custom argument handling and Allure reporting. This plugin allows you to add arguments before running a test. Sep 01, 2024 N/A N/A :pypi:`pytest-cobra` PyTest plugin for testing Smart Contracts for Ethereum blockchain. Jun 29, 2019 3 - Alpha pytest (<4.0.0,>=3.7.1) @@ -283,7 +304,7 @@ This list contains 1641 plugins. :pypi:`pytest-codegen` Automatically create pytest test signatures Aug 23, 2020 2 - Pre-Alpha N/A :pypi:`pytest-codeowners` Pytest plugin for selecting tests by GitHub CODEOWNERS. Mar 30, 2022 4 - Beta pytest (>=6.0.0) :pypi:`pytest-codestyle` pytest plugin to run pycodestyle Mar 23, 2020 3 - Alpha N/A - :pypi:`pytest-codspeed` Pytest plugin to create CodSpeed benchmarks May 27, 2025 5 - Production/Stable pytest>=3.8 + :pypi:`pytest-codspeed` Pytest plugin to create CodSpeed benchmarks Oct 24, 2025 5 - Production/Stable pytest>=3.8 :pypi:`pytest-collect-appoint-info` set your encoding Aug 03, 2023 N/A pytest :pypi:`pytest-collect-formatter` Formatter for pytest collect output Mar 29, 2021 5 - Production/Stable N/A :pypi:`pytest-collect-formatter2` Formatter for pytest collect output May 31, 2021 5 - Production/Stable N/A @@ -292,23 +313,24 @@ This list contains 1641 plugins. :pypi:`pytest-collect-pytest-interinfo` A simple plugin to use with pytest Sep 26, 2023 4 - Beta N/A :pypi:`pytest-colordots` Colorizes the progress indicators Oct 06, 2017 5 - Production/Stable N/A :pypi:`pytest-commander` An interactive GUI test runner for PyTest Aug 17, 2021 N/A pytest (<7.0.0,>=6.2.4) - :pypi:`pytest-common-subject` pytest framework for testing different aspects of a common method Jun 12, 2024 N/A pytest<9,>=3.6 + :pypi:`pytest-common-subject` pytest framework for testing different aspects of a common method Oct 22, 2025 N/A pytest<9,>=3.6 :pypi:`pytest-compare` pytest plugin for comparing call arguments. Jun 22, 2023 5 - Production/Stable N/A :pypi:`pytest-concurrent` Concurrently execute test cases with multithread, multiprocess and gevent Jan 12, 2019 4 - Beta pytest (>=3.1.1) + :pypi:`pytest-conductor` Pytest plugin for coordinating the order in which marked tests run. Jul 30, 2025 N/A pytest<8.4; python_version == "3.8" :pypi:`pytest-config` Base configurations and utilities for developing your Python project test suite with pytest. Nov 07, 2014 5 - Production/Stable N/A :pypi:`pytest-confluence-report` Package stands for pytest plugin to upload results into Confluence page. Apr 17, 2022 N/A N/A :pypi:`pytest-console-scripts` Pytest plugin for testing console scripts May 31, 2023 4 - Beta pytest (>=4.0.0) :pypi:`pytest-consul` pytest plugin with fixtures for testing consul aware apps Nov 24, 2018 3 - Alpha pytest - :pypi:`pytest-container` Pytest fixtures for writing container based tests Dec 04, 2024 4 - Beta pytest>=3.10 + :pypi:`pytest-container` Pytest fixtures for writing container based tests Jun 30, 2025 4 - Beta pytest>=3.10 :pypi:`pytest-contextfixture` Define pytest fixtures as context managers. Mar 12, 2013 4 - Beta N/A :pypi:`pytest-contexts` A plugin to run tests written with the Contexts framework using pytest May 19, 2021 4 - Beta N/A :pypi:`pytest-continuous` A pytest plugin to run tests continuously until failure or interruption. Apr 23, 2024 N/A N/A :pypi:`pytest-cookies` The pytest plugin for your Cookiecutter templates. 🍪 Mar 22, 2023 5 - Production/Stable pytest (>=3.9.0) - :pypi:`pytest-copie` The pytest plugin for your copier templates 📒 Apr 09, 2025 3 - Alpha pytest + :pypi:`pytest-copie` The pytest plugin for your copier templates 📒 Sep 29, 2025 3 - Alpha pytest :pypi:`pytest-copier` A pytest plugin to help testing Copier templates Dec 11, 2023 4 - Beta pytest>=7.3.2 :pypi:`pytest-couchdbkit` py.test extension for per-test couchdb databases using couchdbkit Apr 17, 2012 N/A N/A :pypi:`pytest-count` count erros and send email Jan 12, 2018 4 - Beta N/A - :pypi:`pytest-cov` Pytest plugin for measuring coverage. Apr 05, 2025 5 - Production/Stable pytest>=4.6 + :pypi:`pytest-cov` Pytest plugin for measuring coverage. Sep 09, 2025 5 - Production/Stable pytest>=7 :pypi:`pytest-cover` Pytest plugin for measuring coverage. Forked from \`pytest-cov\`. Aug 01, 2015 5 - Production/Stable N/A :pypi:`pytest-coverage` Jun 17, 2015 N/A N/A :pypi:`pytest-coverage-context` Coverage dynamic context support for PyTest, including sub-processes Jun 28, 2023 4 - Beta N/A @@ -321,7 +343,8 @@ This list contains 1641 plugins. :pypi:`pytest-crate` Manages CrateDB instances during your integration tests May 28, 2019 3 - Alpha pytest (>=4.0) :pypi:`pytest-cratedb` Manage CrateDB instances for integration tests Oct 08, 2024 4 - Beta pytest<9 :pypi:`pytest-cratedb-reporter` A pytest plugin for reporting test results to CrateDB Mar 11, 2025 N/A pytest>=6.0.0 - :pypi:`pytest-crayons` A pytest plugin for colorful print statements Oct 08, 2023 N/A pytest + :pypi:`pytest-crayons` A pytest plugin for colorful print statements Oct 14, 2025 5 - Production/Stable pytest + :pypi:`pytest-cream` The cream of test execution - smooth pytest workflows with intelligent orchestration Oct 26, 2025 N/A pytest :pypi:`pytest-create` pytest-create Feb 15, 2023 1 - Planning N/A :pypi:`pytest-cricri` A Cricri plugin for pytest. Jan 27, 2018 N/A pytest :pypi:`pytest-crontab` add crontab task in crontab Dec 09, 2019 N/A N/A @@ -341,11 +364,11 @@ This list contains 1641 plugins. :pypi:`pytest-cython-collect` Jun 17, 2022 N/A pytest :pypi:`pytest-darker` A pytest plugin for checking of modified code using Darker Feb 25, 2024 N/A pytest <7,>=6.0.1 :pypi:`pytest-dash` pytest fixtures to run dash applications. Mar 18, 2019 N/A N/A - :pypi:`pytest-dashboard` May 20, 2025 N/A pytest<8.0.0,>=7.4.3 + :pypi:`pytest-dashboard` Jun 02, 2025 N/A pytest<8.0.0,>=7.4.3 :pypi:`pytest-data` Useful functions for managing data for pytest fixtures Nov 01, 2016 5 - Production/Stable N/A - :pypi:`pytest-databases` Reusable database fixtures for any and all databases. May 25, 2025 4 - Beta pytest + :pypi:`pytest-databases` Reusable database fixtures for any and all databases. Oct 06, 2025 4 - Beta pytest :pypi:`pytest-databricks` Pytest plugin for remote Databricks notebooks testing Jul 29, 2020 N/A pytest - :pypi:`pytest-datadir` pytest plugin for test data directories and files May 30, 2025 5 - Production/Stable pytest>=7.0 + :pypi:`pytest-datadir` pytest plugin for test data directories and files Jul 30, 2025 5 - Production/Stable pytest>=7.0 :pypi:`pytest-datadir-mgr` Manager for test data: downloads, artifact caching, and a tmpdir context. Apr 06, 2023 5 - Production/Stable pytest (>=7.1) :pypi:`pytest-datadir-ng` Fixtures for pytest allowing test functions/methods to easily retrieve test resources from the local filesystem. Dec 25, 2019 5 - Production/Stable pytest :pypi:`pytest-datadir-nng` Fixtures for pytest allowing test functions/methods to easily retrieve test resources from the local filesystem. Nov 09, 2022 5 - Production/Stable pytest (>=7.0.0,<8.0.0) @@ -354,6 +377,8 @@ This list contains 1641 plugins. :pypi:`pytest-datafiles` py.test plugin to create a 'tmp_path' containing predefined files/directories. Feb 24, 2023 5 - Production/Stable pytest (>=3.6) :pypi:`pytest-datafixtures` Data fixtures for pytest made simple. May 15, 2025 5 - Production/Stable N/A :pypi:`pytest-data-from-files` pytest plugin to provide data from files loaded automatically Oct 13, 2021 4 - Beta pytest + :pypi:`pytest-dataguard` Data validation and integrity testing for your datasets using pytest. Oct 08, 2025 N/A pytest>=8.4.2 + :pypi:`pytest-data-loader` Pytest plugin for loading test data for data-driven testing (DDT) Oct 29, 2025 4 - Beta pytest<9,>=7.0.0 :pypi:`pytest-dataplugin` A pytest plugin for managing an archive of test data. Sep 16, 2017 1 - Planning N/A :pypi:`pytest-datarecorder` A py.test plugin recording and comparing test output. Jul 31, 2024 5 - Production/Stable pytest :pypi:`pytest-dataset` Plugin for loading different datasets for pytest by prefix from json or yaml files Sep 01, 2023 5 - Production/Stable N/A @@ -366,25 +391,28 @@ This list contains 1641 plugins. :pypi:`pytest-dbt-adapter` A pytest plugin for testing dbt adapter plugins Nov 24, 2021 N/A pytest (<7,>=6) :pypi:`pytest-dbt-conventions` A pytest plugin for linting a dbt project's conventions Mar 02, 2022 N/A pytest (>=6.2.5,<7.0.0) :pypi:`pytest-dbt-core` Pytest extension for dbt. Jun 04, 2024 N/A pytest>=6.2.5; extra == "test" - :pypi:`pytest-dbt-duckdb` Fearless testing for dbt models, powered by DuckDB. Feb 09, 2025 4 - Beta pytest>=8.3.4 + :pypi:`pytest-dbt-duckdb` Fearless testing for dbt models, powered by DuckDB. Oct 28, 2025 4 - Beta pytest>=8.3.4 :pypi:`pytest-dbt-postgres` Pytest tooling to unittest DBT & Postgres models Sep 03, 2024 N/A pytest<9.0.0,>=8.3.2 :pypi:`pytest-dbus-notification` D-BUS notifications for pytest results. Mar 05, 2014 5 - Production/Stable N/A :pypi:`pytest-dbx` Pytest plugin to run unit tests for dbx (Databricks CLI extensions) related code Nov 29, 2022 N/A pytest (>=7.1.3,<8.0.0) :pypi:`pytest-dc` Manages Docker containers during your integration tests Aug 16, 2023 5 - Production/Stable pytest >=3.3 :pypi:`pytest-deadfixtures` A simple plugin to list unused fixtures in pytest Jul 23, 2020 5 - Production/Stable N/A :pypi:`pytest-deduplicate` Identifies duplicate unit tests Aug 12, 2023 4 - Beta pytest + :pypi:`pytest-deepassert` A pytest plugin for enhanced assertion reporting with detailed diffs Sep 02, 2025 3 - Alpha pytest>=7.0.0 :pypi:`pytest-deepcov` deepcov Mar 30, 2021 N/A N/A :pypi:`pytest_defer` A 'defer' fixture for pytest Nov 13, 2024 N/A pytest>=8.3 + :pypi:`pytest-delta` Run only tests impacted by your code changes (delta-based selection) for pytest. Oct 27, 2025 4 - Beta pytest>=7.0 :pypi:`pytest-demo-plugin` pytest示例插件 May 15, 2021 N/A N/A :pypi:`pytest-dependency` Manage dependencies of tests Dec 31, 2023 4 - Beta N/A :pypi:`pytest-depends` Tests that depend on other tests Apr 05, 2020 5 - Production/Stable pytest (>=3) + :pypi:`pytest-depper` Smart test selection based on AST-level code dependency analysis Oct 23, 2025 4 - Beta pytest>=7.0.0 :pypi:`pytest-deprecate` Mark tests as testing a deprecated feature with a warning note. Jul 01, 2019 N/A N/A :pypi:`pytest-deprecator` A simple plugin to use with pytest Dec 02, 2024 4 - Beta pytest>=6.2.0 - :pypi:`pytest-describe` Describe-style plugin for pytest Feb 10, 2024 5 - Production/Stable pytest <9,>=4.6 + :pypi:`pytest-describe` Describe-style plugin for pytest Oct 23, 2025 5 - Production/Stable pytest<9,>=6 :pypi:`pytest-describe-it` plugin for rich text descriptions Jul 19, 2019 4 - Beta pytest :pypi:`pytest-deselect-if` A plugin to deselect pytests tests rather than using skipif Dec 26, 2024 4 - Beta pytest>=6.2.0 :pypi:`pytest-devpi-server` DevPI server fixture for py.test Oct 17, 2024 5 - Production/Stable pytest - :pypi:`pytest-dfm` pytest-dfm provides a pytest integration for DV Flow Manager, a build system for silicon design May 10, 2025 N/A pytest + :pypi:`pytest-dfm` pytest-dfm provides a pytest integration for DV Flow Manager, a build system for silicon design Sep 13, 2025 N/A pytest :pypi:`pytest-dhos` Common fixtures for pytest in DHOS services and libraries Sep 07, 2022 N/A N/A :pypi:`pytest-diamond` pytest plugin for diamond Aug 31, 2015 4 - Beta N/A :pypi:`pytest-dicom` pytest plugin to provide DICOM fixtures Dec 19, 2018 3 - Alpha pytest @@ -392,8 +420,9 @@ This list contains 1641 plugins. :pypi:`pytest-diff` A simple plugin to use with pytest Mar 30, 2019 4 - Beta pytest (>=3.5.0) :pypi:`pytest-diff-selector` Get tests affected by code changes (using git) Feb 24, 2022 4 - Beta pytest (>=6.2.2) ; extra == 'all' :pypi:`pytest-difido` PyTest plugin for generating Difido reports Oct 23, 2022 4 - Beta pytest (>=4.0.0) + :pypi:`pytest-directives` Control your tests flow Aug 11, 2025 3 - Alpha pytest :pypi:`pytest-dir-equal` pytest-dir-equals is a pytest plugin providing helpers to assert directories equality allowing golden testing Dec 11, 2023 4 - Beta pytest>=7.3.2 - :pypi:`pytest-dirty` Static import analysis for thrifty testing. Jul 11, 2024 3 - Alpha pytest>=8.2; extra == "dev" + :pypi:`pytest-dirty` Static import analysis for thrifty testing. Jun 08, 2025 3 - Alpha pytest>=8.2; extra == "dev" :pypi:`pytest-disable` pytest plugin to disable a test and skip it from testrun Sep 10, 2015 4 - Beta N/A :pypi:`pytest-disable-plugin` Disable plugins per test Feb 28, 2019 4 - Beta pytest (>=3.5.0) :pypi:`pytest-discord` A pytest plugin to notify test results to a Discord channel. May 11, 2024 4 - Beta pytest!=6.0.0,<9,>=3.3.2 @@ -403,7 +432,7 @@ This list contains 1641 plugins. :pypi:`pytest-ditto-pyarrow` pytest-ditto plugin for pyarrow tables. Jun 09, 2024 4 - Beta pytest>=3.5.0 :pypi:`pytest-django` A Django plugin for pytest. Apr 03, 2025 5 - Production/Stable pytest>=7.0.0 :pypi:`pytest-django-ahead` A Django plugin for pytest. Oct 27, 2016 5 - Production/Stable pytest (>=2.9) - :pypi:`pytest-djangoapp` Nice pytest plugin to help you with Django pluggable application testing. May 19, 2023 4 - Beta pytest + :pypi:`pytest-djangoapp` Nice pytest plugin to help you with Django pluggable application testing. Sep 28, 2025 5 - Production/Stable pytest :pypi:`pytest-django-cache-xdist` A djangocachexdist plugin for pytest May 12, 2020 4 - Beta N/A :pypi:`pytest-django-casperjs` Integrate CasperJS with your django tests as a pytest fixture. Mar 15, 2015 2 - Pre-Alpha N/A :pypi:`pytest-django-class` A pytest plugin for running django in class-scoped fixtures Aug 08, 2023 4 - Beta N/A @@ -426,14 +455,14 @@ This list contains 1641 plugins. :pypi:`pytest-doc` A documentation plugin for py.test. Jun 28, 2015 5 - Production/Stable N/A :pypi:`pytest-docfiles` pytest plugin to test codeblocks in your documentation. Dec 22, 2021 4 - Beta pytest (>=3.7.0) :pypi:`pytest-docgen` An RST Documentation Generator for pytest-based test suites Apr 17, 2020 N/A N/A - :pypi:`pytest-docker` Simple pytest fixtures for Docker and Docker Compose based tests May 26, 2025 N/A pytest<9.0,>=4.0 + :pypi:`pytest-docker` Simple pytest fixtures for Docker and Docker Compose based tests Jul 04, 2025 N/A pytest<9.0,>=4.0 :pypi:`pytest-docker-apache-fixtures` Pytest fixtures for testing with apache2 (httpd). Aug 12, 2024 4 - Beta pytest :pypi:`pytest-docker-butla` Jun 16, 2019 3 - Alpha N/A :pypi:`pytest-dockerc` Run, manage and stop Docker Compose project from Docker API Oct 09, 2020 5 - Production/Stable pytest (>=3.0) :pypi:`pytest-docker-compose` Manages Docker containers during your integration tests Jan 26, 2021 5 - Production/Stable pytest (>=3.3) :pypi:`pytest-docker-compose-v2` Manages Docker containers during your integration tests Dec 11, 2024 4 - Beta pytest>=7.2.2 :pypi:`pytest-docker-db` A plugin to use docker databases for pytests Mar 20, 2021 5 - Production/Stable pytest (>=3.1.1) - :pypi:`pytest-docker-fixtures` pytest docker fixtures May 14, 2025 3 - Alpha pytest + :pypi:`pytest-docker-fixtures` pytest docker fixtures Jun 25, 2025 3 - Alpha pytest :pypi:`pytest-docker-git-fixtures` Pytest fixtures for testing with git scm. Aug 12, 2024 4 - Beta pytest :pypi:`pytest-docker-haproxy-fixtures` Pytest fixtures for testing with haproxy. Aug 12, 2024 4 - Beta pytest :pypi:`pytest-docker-pexpect` pytest plugin for writing functional tests with pexpect and docker Jan 14, 2019 N/A pytest @@ -449,7 +478,8 @@ This list contains 1641 plugins. :pypi:`pytest-doctest-ellipsis-markers` Setup additional values for ELLIPSIS_MARKER for doctests Jan 12, 2018 4 - Beta N/A :pypi:`pytest-doctest-import` A simple pytest plugin to import names and add them to the doctest namespace. Nov 13, 2018 4 - Beta pytest (>=3.3.0) :pypi:`pytest-doctest-mkdocstrings` Run pytest --doctest-modules with markdown docstrings in code blocks (\`\`\`) Mar 02, 2024 N/A pytest - :pypi:`pytest-doctestplus` Pytest plugin with advanced doctest features. Jan 25, 2025 5 - Production/Stable pytest>=4.6 + :pypi:`pytest-doctest-only` A plugin to run only doctest Jul 30, 2025 4 - Beta pytest>=8.3.0 + :pypi:`pytest-doctestplus` Pytest plugin with advanced doctest features. Oct 18, 2025 5 - Production/Stable pytest>=4.6 :pypi:`pytest-documentary` A simple pytest plugin to generate test documentation Jul 11, 2024 N/A pytest :pypi:`pytest-dogu-report` pytest plugin for dogu report Jul 07, 2023 N/A N/A :pypi:`pytest-dogu-sdk` pytest plugin for the Dogu Dec 14, 2023 N/A N/A @@ -457,20 +487,25 @@ This list contains 1641 plugins. :pypi:`pytest-donde` record pytest session characteristics per test item (coverage and duration) into a persistent file and use them in your own plugin or script. Oct 01, 2023 4 - Beta pytest >=7.3.1 :pypi:`pytest-doorstop` A pytest plugin for adding test results into doorstop items. Jun 09, 2020 4 - Beta pytest (>=3.5.0) :pypi:`pytest-dotenv` A py.test plugin that parses environment files before running tests Jun 16, 2020 4 - Beta pytest (>=5.0.0) + :pypi:`pytest-dotenv-modern` A modern pytest plugin that loads environment variables from dotenv files Sep 27, 2025 4 - Beta pytest>=6.0.0 :pypi:`pytest-dot-only-pkcopley` A Pytest marker for only running a single test Oct 27, 2023 N/A N/A :pypi:`pytest-dparam` A more readable alternative to @pytest.mark.parametrize. Aug 27, 2024 6 - Mature pytest :pypi:`pytest-dpg` pytest-dpg is a pytest plugin for testing Dear PyGui (DPG) applications Aug 13, 2024 N/A N/A :pypi:`pytest-draw` Pytest plugin for randomly selecting a specific number of tests Mar 21, 2023 3 - Alpha pytest :pypi:`pytest-drf` A Django REST framework plugin for pytest. Jul 12, 2022 5 - Production/Stable pytest (>=3.7) + :pypi:`pytest-drill-sergeant` A pytest plugin that enforces test quality standards through automatic marker detection and AAA structure validation Sep 12, 2025 4 - Beta pytest>=7.0.0 :pypi:`pytest-drivings` Tool to allow webdriver automation to be ran locally or remotely Jan 13, 2021 N/A N/A :pypi:`pytest-drop-dup-tests` A Pytest plugin to drop duplicated tests during collection Mar 04, 2024 5 - Production/Stable pytest >=7 :pypi:`pytest-dryci` Test caching plugin for pytest Sep 27, 2024 4 - Beta N/A :pypi:`pytest-dryrun` A Pytest plugin to ignore tests during collection without reporting them in the test summary. Jan 19, 2025 5 - Production/Stable pytest<9,>=7.40 - :pypi:`pytest-dsl` A DSL testing framework based on pytest May 29, 2025 N/A pytest>=7.0.0 + :pypi:`pytest-dsl` A DSL testing framework based on pytest Oct 31, 2025 N/A pytest>=7.0.0 + :pypi:`pytest-dsl-ssh` SSH/SFTP关键字插件,为pytest-dsl提供SSH和SFTP操作能力 Jul 25, 2025 4 - Beta pytest>=7.0.0 + :pypi:`pytest-dsl-ui` Playwright-based UI automation keywords for pytest-dsl framework Aug 21, 2025 N/A pytest>=7.0.0; extra == "dev" :pypi:`pytest-dummynet` A py.test plugin providing access to a dummynet. Dec 15, 2021 5 - Production/Stable pytest :pypi:`pytest-dump2json` A pytest plugin for dumping test results to json. Jun 29, 2015 N/A N/A :pypi:`pytest-duration-insights` Jul 15, 2024 N/A N/A - :pypi:`pytest-durations` Pytest plugin reporting fixtures and test functions execution time. Apr 29, 2025 5 - Production/Stable pytest>=4.6 + :pypi:`pytest-durations` Pytest plugin reporting fixtures and test functions execution time. Aug 29, 2025 5 - Production/Stable pytest>=4.6 + :pypi:`pytest-dynamic-parameterize` A Python package for managing pytest plugins. Oct 14, 2025 N/A pytest :pypi:`pytest-dynamicrerun` A pytest plugin to rerun tests dynamically based off of test outcome and output. Aug 15, 2020 4 - Beta N/A :pypi:`pytest-dynamodb` DynamoDB fixtures for pytest Apr 04, 2025 5 - Production/Stable pytest :pypi:`pytest-easy-addoption` pytest-easy-addoption: Easy way to work with pytest addoption Jan 22, 2020 N/A N/A @@ -484,20 +519,19 @@ This list contains 1641 plugins. :pypi:`pytest-ekstazi` Pytest plugin to select test using Ekstazi algorithm Sep 10, 2022 N/A pytest :pypi:`pytest-elasticsearch` Elasticsearch fixtures and fixture factories for Pytest. Dec 03, 2024 5 - Production/Stable pytest>=7.0 :pypi:`pytest-elasticsearch-test` Elasticsearch fixtures and fixture factories for Pytest. Apr 20, 2025 5 - Production/Stable pytest>=7.0 - :pypi:`pytest-elbase` Elbase pytest plugin Apr 15, 2025 N/A N/A :pypi:`pytest-elements` Tool to help automate user interfaces Jan 13, 2021 N/A pytest (>=5.4,<6.0) :pypi:`pytest-eliot` An eliot plugin for pytest. Aug 31, 2022 1 - Planning pytest (>=5.4.0) :pypi:`pytest-elk-reporter` A simple plugin to use with pytest Jul 25, 2024 4 - Beta pytest>=3.5.0 :pypi:`pytest-email` Send execution result email Jul 08, 2020 N/A pytest - :pypi:`pytest-embedded` A pytest plugin that designed for embedded testing. Apr 22, 2025 5 - Production/Stable pytest>=7.0 - :pypi:`pytest-embedded-arduino` Make pytest-embedded plugin work with Arduino. Apr 22, 2025 5 - Production/Stable N/A - :pypi:`pytest-embedded-idf` Make pytest-embedded plugin work with ESP-IDF. Apr 22, 2025 5 - Production/Stable N/A - :pypi:`pytest-embedded-jtag` Make pytest-embedded plugin work with JTAG. Apr 22, 2025 5 - Production/Stable N/A - :pypi:`pytest-embedded-nuttx` Make pytest-embedded plugin work with NuttX. Apr 22, 2025 5 - Production/Stable N/A - :pypi:`pytest-embedded-qemu` Make pytest-embedded plugin work with QEMU. Apr 22, 2025 5 - Production/Stable N/A - :pypi:`pytest-embedded-serial` Make pytest-embedded plugin work with Serial. Apr 22, 2025 5 - Production/Stable N/A - :pypi:`pytest-embedded-serial-esp` Make pytest-embedded plugin work with Espressif target boards. Apr 22, 2025 5 - Production/Stable N/A - :pypi:`pytest-embedded-wokwi` Make pytest-embedded plugin work with the Wokwi CLI. Apr 22, 2025 5 - Production/Stable N/A + :pypi:`pytest-embedded` A pytest plugin that designed for embedded testing. Oct 27, 2025 5 - Production/Stable pytest>=7.0 + :pypi:`pytest-embedded-arduino` Make pytest-embedded plugin work with Arduino. Oct 27, 2025 5 - Production/Stable N/A + :pypi:`pytest-embedded-idf` Make pytest-embedded plugin work with ESP-IDF. Oct 27, 2025 5 - Production/Stable N/A + :pypi:`pytest-embedded-jtag` Make pytest-embedded plugin work with JTAG. Oct 27, 2025 5 - Production/Stable N/A + :pypi:`pytest-embedded-nuttx` Make pytest-embedded plugin work with NuttX. Oct 27, 2025 5 - Production/Stable N/A + :pypi:`pytest-embedded-qemu` Make pytest-embedded plugin work with QEMU. Oct 27, 2025 5 - Production/Stable N/A + :pypi:`pytest-embedded-serial` Make pytest-embedded plugin work with Serial. Oct 27, 2025 5 - Production/Stable N/A + :pypi:`pytest-embedded-serial-esp` Make pytest-embedded plugin work with Espressif target boards. Oct 27, 2025 5 - Production/Stable N/A + :pypi:`pytest-embedded-wokwi` Make pytest-embedded plugin work with the Wokwi CLI. Oct 27, 2025 5 - Production/Stable N/A :pypi:`pytest-embrace` 💝 Dataclasses-as-tests. Describe the runtime once and multiply coverage with no boilerplate. Mar 25, 2023 N/A pytest (>=7.0,<8.0) :pypi:`pytest-emoji` A pytest plugin that adds emojis to your test result report Feb 19, 2019 4 - Beta pytest (>=4.2.1) :pypi:`pytest-emoji-output` Pytest plugin to represent test output with emoji support Apr 09, 2023 4 - Beta pytest (==7.0.1) @@ -508,12 +542,13 @@ This list contains 1641 plugins. :pypi:`pytest_energy_reporter` An energy estimation reporter for pytest Mar 28, 2024 3 - Alpha pytest<9.0.0,>=8.1.1 :pypi:`pytest-enhanced-reports` Enhanced test reports for pytest Dec 15, 2022 N/A N/A :pypi:`pytest-enhancements` Improvements for pytest (rejected upstream) Oct 30, 2019 4 - Beta N/A - :pypi:`pytest-env` pytest plugin that allows you to add environment variables. Sep 17, 2024 5 - Production/Stable pytest>=8.3.3 + :pypi:`pytest-env` pytest plugin that allows you to add environment variables. Oct 09, 2025 5 - Production/Stable pytest>=8.4.2 :pypi:`pytest-envfiles` A py.test plugin that parses environment files before running tests Oct 08, 2015 3 - Alpha N/A :pypi:`pytest-env-info` Push information about the running pytest into envvars Nov 25, 2017 4 - Beta pytest (>=3.1.1) :pypi:`pytest-environment` Pytest Environment Mar 17, 2024 1 - Planning N/A :pypi:`pytest-envraw` py.test plugin that allows you to add environment variables. Aug 27, 2020 4 - Beta pytest (>=2.6.0) :pypi:`pytest-envvars` Pytest plugin to validate use of envvars on your tests Jun 13, 2020 5 - Production/Stable pytest (>=3.0.0) + :pypi:`pytest-envx` Pytest plugin for managing environment variables with interpolation and .env file support. Jun 28, 2025 4 - Beta pytest>=8.4.1 :pypi:`pytest-env-yaml` Apr 02, 2019 N/A N/A :pypi:`pytest-eradicate` pytest plugin to check for commented out code Sep 08, 2020 N/A pytest (>=2.4.2) :pypi:`pytest_erp` py.test plugin to send test info to report portal dynamically Jan 13, 2015 N/A N/A @@ -529,12 +564,12 @@ This list contains 1641 plugins. :pypi:`pytest_evm` The testing package containing tools to test Web3-based projects Sep 23, 2024 4 - Beta pytest<9.0.0,>=8.1.1 :pypi:`pytest_exact_fixtures` Parse queries in Lucene and Elasticsearch syntaxes Feb 04, 2019 N/A N/A :pypi:`pytest-examples` Pytest plugin for testing examples in docstrings and markdown files. May 06, 2025 N/A pytest>=7 - :pypi:`pytest-exasol-backend` Feb 11, 2025 N/A pytest<9,>=7 - :pypi:`pytest-exasol-extension` Feb 11, 2025 N/A pytest<9,>=7 + :pypi:`pytest-exasol-backend` Oct 29, 2025 N/A pytest<9,>=7 + :pypi:`pytest-exasol-extension` Oct 29, 2025 N/A pytest<9,>=7 :pypi:`pytest-exasol-itde` Nov 22, 2024 N/A pytest<9,>=7 :pypi:`pytest-exasol-saas` Nov 22, 2024 N/A pytest<9,>=7 - :pypi:`pytest-exasol-slc` Feb 11, 2025 N/A pytest<9,>=7 - :pypi:`pytest-excel` pytest plugin for generating excel reports Jun 18, 2024 5 - Production/Stable pytest>3.6 + :pypi:`pytest-exasol-slc` Oct 30, 2025 N/A pytest<9,>=7 + :pypi:`pytest-excel` pytest plugin for generating excel reports Jul 22, 2025 5 - Production/Stable pytest :pypi:`pytest-exceptional` Better exceptions Mar 16, 2017 4 - Beta N/A :pypi:`pytest-exception-script` Walk your code through exception script to check it's resiliency to failures. Aug 04, 2020 3 - Alpha pytest :pypi:`pytest-executable` pytest plugin for testing executables Oct 07, 2023 N/A pytest <8,>=5 @@ -558,10 +593,10 @@ This list contains 1641 plugins. :pypi:`pytest_extra` Some helpers for writing tests with pytest. Aug 14, 2014 N/A N/A :pypi:`pytest-extra-durations` A pytest plugin to get durations on a per-function basis and per module basis. Apr 21, 2020 4 - Beta pytest (>=3.5.0) :pypi:`pytest-extra-markers` Additional pytest markers to dynamically enable/disable tests viia CLI flags Mar 05, 2023 4 - Beta pytest - :pypi:`pytest-f3ts` Pytest Plugin for communicating test results and information to a FixturFab Test Runner GUI May 08, 2025 N/A pytest<8.0.0,>=7.2.1 + :pypi:`pytest-f3ts` Pytest Plugin for communicating test results and information to a FixturFab Test Runner GUI Jul 15, 2025 N/A pytest<8.0.0,>=7.2.1 :pypi:`pytest-fabric` Provides test utilities to run fabric task tests by using docker containers Sep 12, 2018 5 - Production/Stable N/A :pypi:`pytest-factory` Use factories for test setup with py.test Sep 06, 2020 3 - Alpha pytest (>4.3) - :pypi:`pytest-factoryboy` Factory Boy support for pytest. Mar 05, 2024 6 - Mature pytest (>=6.2) + :pypi:`pytest-factoryboy` Factory Boy support for pytest. Jul 01, 2025 6 - Mature pytest>=7.0 :pypi:`pytest-factoryboy-fixtures` Generates pytest fixtures that allow the use of type hinting Jun 25, 2020 N/A N/A :pypi:`pytest-factoryboy-state` Simple factoryboy random state management Mar 22, 2022 5 - Production/Stable pytest (>=5.0) :pypi:`pytest-failed-screen-record` Create a video of the screen when pytest fails Jan 05, 2023 4 - Beta pytest (>=7.1.2d,<8.0.0) @@ -587,25 +622,27 @@ This list contains 1641 plugins. :pypi:`pytest-file-watcher` Pytest-File-Watcher is a CLI tool that watches for changes in your code and runs pytest on the changed files. Mar 23, 2023 N/A pytest :pypi:`pytest-filter-case` run test cases filter by mark Nov 05, 2020 N/A N/A :pypi:`pytest-filter-subpackage` Pytest plugin for filtering based on sub-packages Mar 04, 2024 5 - Production/Stable pytest >=4.6 - :pypi:`pytest-find-dependencies` A pytest plugin to find dependencies between tests Mar 16, 2024 4 - Beta pytest >=4.3.0 + :pypi:`pytest-find-dependencies` A pytest plugin to find dependencies between tests Jul 16, 2025 5 - Production/Stable pytest>=6.2.4 :pypi:`pytest-finer-verdicts` A pytest plugin to treat non-assertion failures as test errors. Jun 18, 2020 N/A pytest (>=5.4.3) :pypi:`pytest-firefox` Feb 28, 2025 N/A N/A - :pypi:`pytest-fixturecheck` A pytest plugin to check fixture validity before test execution May 17, 2025 3 - Alpha pytest>=6.0.0 - :pypi:`pytest-fixture-classes` Fixtures as classes that work well with dependency injection, autocompletetion, type checkers, and language servers Sep 02, 2023 5 - Production/Stable pytest + :pypi:`pytest-fixturecheck` A pytest plugin to check fixture validity before test execution Jun 02, 2025 3 - Alpha pytest>=6.0.0 + :pypi:`pytest-fixture-classes` Fixtures as classes that work well with dependency injection, autocompletetion, type checkers, and language servers Oct 12, 2025 5 - Production/Stable N/A + :pypi:`pytest-fixture-collect` A utility to collect pytest fixture file paths. Jul 25, 2025 N/A pytest; extra == "test" :pypi:`pytest-fixturecollection` A pytest plugin to collect tests based on fixtures being used by tests Feb 22, 2024 4 - Beta pytest >=3.5.0 :pypi:`pytest-fixture-config` Fixture configuration utils for py.test Oct 17, 2024 5 - Production/Stable pytest :pypi:`pytest-fixture-forms` A pytest plugin for creating fixtures that holds different forms between tests. Dec 06, 2024 N/A pytest<9.0.0,>=7.0.0 :pypi:`pytest-fixture-maker` Pytest plugin to load fixtures from YAML files Sep 21, 2021 N/A N/A :pypi:`pytest-fixture-marker` A pytest plugin to add markers based on fixtures used. Oct 11, 2020 5 - Production/Stable N/A - :pypi:`pytest-fixture-order` pytest plugin to control fixture evaluation order May 16, 2022 5 - Production/Stable pytest (>=3.0) + :pypi:`pytest-fixture-order` pytest plugin to control fixture evaluation order Oct 22, 2025 5 - Production/Stable pytest>=3.0 :pypi:`pytest-fixture-ref` Lets users reference fixtures without name matching magic. Nov 17, 2022 4 - Beta N/A :pypi:`pytest-fixture-remover` A LibCST codemod to remove pytest fixtures applied via the usefixtures decorator, as well as its parametrizations. Feb 14, 2024 5 - Production/Stable N/A :pypi:`pytest-fixture-rtttg` Warn or fail on fixture name clash Feb 23, 2022 N/A pytest (>=7.0.1,<8.0.0) :pypi:`pytest-fixtures` Common fixtures for pytest May 01, 2019 5 - Production/Stable N/A + :pypi:`pytest-fixtures-fixtures` Handy fixtues to access your fixtures from your _pytest tests. Sep 14, 2025 4 - Beta pytest>=8.4.1 :pypi:`pytest-fixture-tools` Plugin for pytest which provides tools for fixtures Apr 30, 2025 6 - Mature pytest :pypi:`pytest-fixture-typecheck` A pytest plugin to assert type annotations at runtime. Aug 24, 2021 N/A pytest :pypi:`pytest-flake8` pytest plugin to check FLAKE8 requirements Nov 09, 2024 5 - Production/Stable pytest>=7.0 - :pypi:`pytest-flake8-path` A pytest fixture for testing flake8 plugins. Oct 25, 2024 5 - Production/Stable pytest + :pypi:`pytest-flake8-path` A pytest fixture for testing flake8 plugins. Sep 09, 2025 5 - Production/Stable pytest :pypi:`pytest-flake8-v2` pytest plugin to check FLAKE8 requirements Mar 01, 2022 5 - Production/Stable pytest (>=7.0) :pypi:`pytest-flake-detection` Continuously runs your tests to detect flaky tests Nov 29, 2024 4 - Beta pytest>=6.2.0 :pypi:`pytest-flakefinder` Runs tests multiple times to expose flakiness. Oct 26, 2022 4 - Beta pytest (>=2.7.1) @@ -618,8 +655,9 @@ This list contains 1641 plugins. :pypi:`pytest-flexreport` Apr 15, 2023 4 - Beta pytest :pypi:`pytest-fluent` A pytest plugin in order to provide logs via fluentd Aug 14, 2024 4 - Beta pytest>=7.0.0 :pypi:`pytest-fluentbit` A pytest plugin in order to provide logs via fluentbit Jun 16, 2023 4 - Beta pytest (>=7.0.0) - :pypi:`pytest-fly` pytest runner and observer May 19, 2025 3 - Alpha pytest + :pypi:`pytest-fly` pytest runner and observer Jun 07, 2025 3 - Alpha pytest :pypi:`pytest-flyte` Pytest fixtures for simplifying Flyte integration testing May 03, 2021 N/A pytest + :pypi:`pytest-fmu-filter` A pytest plugin to filter fmus Jun 23, 2025 4 - Beta pytest>=7.0.0 :pypi:`pytest-focus` A pytest plugin that alerts user of failed test cases with screen notifications May 04, 2019 4 - Beta pytest :pypi:`pytest-forbid` Mar 07, 2023 N/A pytest (>=7.2.2,<8.0.0) :pypi:`pytest-forcefail` py.test plugin to make the test failing regardless of pytest.mark.xfail May 15, 2018 4 - Beta N/A @@ -627,7 +665,7 @@ This list contains 1641 plugins. :pypi:`pytest-forward-compatibility` A pytest plugin to shim pytest commandline options for fowards compatibility Sep 29, 2020 N/A N/A :pypi:`pytest-frappe` Pytest Frappe Plugin - A set of pytest fixtures to test Frappe applications Jul 30, 2024 4 - Beta pytest>=7.0.0 :pypi:`pytest-freethreaded` pytest plugin for running parallel tests Oct 03, 2024 5 - Production/Stable pytest - :pypi:`pytest-freezeblaster` Wrap tests with fixtures in freeze_time Feb 11, 2025 N/A pytest>=6.2.5 + :pypi:`pytest-freezeblaster` Wrap tests with fixtures in freeze_time Oct 13, 2025 N/A pytest>=6.2.5 :pypi:`pytest-freezegun` Wrap tests with fixtures in freeze_time Jul 19, 2020 4 - Beta pytest (>=3.0.0) :pypi:`pytest-freezer` Pytest plugin providing a fixture interface for spulec/freezegun Dec 12, 2024 N/A pytest>=3.6 :pypi:`pytest-freeze-reqs` Check if requirement files are frozen Apr 29, 2021 N/A N/A @@ -635,7 +673,7 @@ This list contains 1641 plugins. :pypi:`pytest-func-cov` Pytest plugin for measuring function coverage Apr 15, 2021 3 - Alpha pytest (>=5) :pypi:`pytest-funcnodes` Testing plugin for funcnodes Mar 19, 2025 4 - Beta pytest>=6.2.0 :pypi:`pytest-funparam` An alternative way to parametrize test cases. Dec 02, 2021 4 - Beta pytest >=4.6.0 - :pypi:`pytest-fv` pytest extensions to support running functional-verification jobs Feb 27, 2025 N/A pytest + :pypi:`pytest-fv` pytest extensions to support running functional-verification jobs Jun 06, 2025 N/A pytest :pypi:`pytest-fxa` pytest plugin for Firefox Accounts Aug 28, 2018 5 - Production/Stable N/A :pypi:`pytest-fxa-mte` pytest plugin for Firefox Accounts Oct 02, 2024 5 - Production/Stable N/A :pypi:`pytest-fxtest` Oct 27, 2020 N/A N/A @@ -646,14 +684,14 @@ This list contains 1641 plugins. :pypi:`pytest-gc` The garbage collector plugin for py.test Feb 01, 2018 N/A N/A :pypi:`pytest-gcov` Uses gcov to measure test coverage of a C library Feb 01, 2018 3 - Alpha N/A :pypi:`pytest-gcs` GCS fixtures and fixture factories for Pytest. Jan 24, 2025 5 - Production/Stable pytest>=6.2 - :pypi:`pytest-gee` The Python plugin for your GEE based packages. May 11, 2025 3 - Alpha pytest + :pypi:`pytest-gee` The Python plugin for your GEE based packages. Oct 16, 2025 3 - Alpha pytest :pypi:`pytest-gevent` Ensure that gevent is properly patched when invoking pytest Feb 25, 2020 N/A pytest :pypi:`pytest-gherkin` A flexible framework for executing BDD gherkin tests Jul 27, 2019 3 - Alpha pytest (>=5.0.0) :pypi:`pytest-gh-log-group` pytest plugin for gh actions Jan 11, 2022 3 - Alpha pytest :pypi:`pytest-ghostinspector` For finding/executing Ghost Inspector tests May 17, 2016 3 - Alpha N/A - :pypi:`pytest-girder` A set of pytest fixtures for testing Girder applications. May 29, 2025 N/A pytest>=3.6 + :pypi:`pytest-girder` A set of pytest fixtures for testing Girder applications. Sep 30, 2025 N/A pytest>=3.6 :pypi:`pytest-git` Git repository fixture for py.test Oct 17, 2024 5 - Production/Stable pytest - :pypi:`pytest-gitconfig` Provide a Git config sandbox for testing Aug 11, 2024 4 - Beta pytest>=7.1.2 + :pypi:`pytest-gitconfig` Provide a Git config sandbox for testing Oct 12, 2025 4 - Beta pytest>=7.1.2 :pypi:`pytest-gitcov` Pytest plugin for reporting on coverage of the last git commit. Jan 11, 2020 2 - Pre-Alpha N/A :pypi:`pytest-git-diff` Pytest plugin that allows the user to select the tests affected by a range of git commits Apr 02, 2024 N/A N/A :pypi:`pytest-git-fixtures` Pytest fixtures for testing with git. Mar 11, 2021 4 - Beta pytest @@ -665,19 +703,24 @@ This list contains 1641 plugins. :pypi:`pytest-gitlabci-parallelized` Parallelize pytest across GitLab CI workers. Mar 08, 2023 N/A N/A :pypi:`pytest-gitlab-code-quality` Collects warnings while testing and generates a GitLab Code Quality Report. Sep 09, 2024 N/A pytest>=8.1.1 :pypi:`pytest-gitlab-fold` Folds output sections in GitLab CI build log Dec 31, 2023 4 - Beta pytest >=2.6.0 + :pypi:`pytest-gitscope` A pragmatic pytest plugin that runs only the tests that matter, and ship faster Sep 24, 2025 5 - Production/Stable pytest>=7.0.0 :pypi:`pytest-git-selector` Utility to select tests that have had its dependencies modified (as identified by git diff) Nov 17, 2022 N/A N/A - :pypi:`pytest-glamor-allure` Extends allure-pytest functionality Apr 30, 2024 4 - Beta pytest<=8.2.0 + :pypi:`pytest-glamor-allure` Extends allure-pytest functionality Jul 20, 2025 4 - Beta pytest<=8.4.1 :pypi:`pytest-gnupg-fixtures` Pytest fixtures for testing with gnupg. Mar 04, 2021 4 - Beta pytest :pypi:`pytest-golden` Plugin for pytest that offloads expected outputs to data files Jul 18, 2022 N/A pytest (>=6.1.2) :pypi:`pytest-goldie` A plugin to support golden tests with pytest. May 23, 2023 4 - Beta pytest (>=3.5.0) :pypi:`pytest-google-chat` Notify google chat channel for test results Mar 27, 2022 4 - Beta pytest - :pypi:`pytest-google-cloud-storage` Pytest custom features, e.g. fixtures and various tests. Aimed to emulate Google Cloud Storage service May 22, 2025 N/A pytest==8.3.5 + :pypi:`pytest-google-cloud-storage` Pytest custom features, e.g. fixtures and various tests. Aimed to emulate Google Cloud Storage service Sep 11, 2025 N/A pytest>=8.0.0 + :pypi:`pytest-grader` Pytest extension for scoring programming assignments. Aug 25, 2025 N/A pytest>=8 :pypi:`pytest-gradescope` A pytest plugin for Gradescope integration Apr 29, 2025 N/A N/A :pypi:`pytest-graphql-schema` Get graphql schema as fixture for pytest Oct 18, 2019 N/A N/A :pypi:`pytest-greendots` Green progress dots Feb 08, 2014 3 - Alpha N/A + :pypi:`pytest-greener` Pytest plugin for Greener Oct 18, 2025 N/A pytest<9.0.0,>=8.3.3 + :pypi:`pytest-greet` Oct 21, 2025 N/A N/A :pypi:`pytest-group-by-class` A Pytest plugin for running a subset of your tests by splitting them in to groups of classes. Jun 27, 2023 5 - Production/Stable pytest (>=2.5) :pypi:`pytest-growl` Growl notifications for pytest results. Jan 13, 2014 5 - Production/Stable N/A :pypi:`pytest-grpc` pytest plugin for grpc May 01, 2020 N/A pytest (>=3.6.0) + :pypi:`pytest-grpc-aio` pytest plugin for grpc.aio Oct 28, 2025 N/A pytest>=3.6.0 :pypi:`pytest-grunnur` Py.Test plugin for Grunnur-based packages. Jul 26, 2024 N/A pytest>=6 :pypi:`pytest_gui_status` Show pytest status in gui Jan 23, 2016 N/A pytest :pypi:`pytest-hammertime` Display "🔨 " instead of "." for passed pytest tests. Jul 28, 2018 N/A pytest @@ -697,7 +740,7 @@ This list contains 1641 plugins. :pypi:`pytest-history` Pytest plugin to keep a history of your pytest runs Jan 14, 2024 N/A pytest (>=7.4.3,<8.0.0) :pypi:`pytest-home` Home directory fixtures Jul 28, 2024 5 - Production/Stable pytest :pypi:`pytest-homeassistant` A pytest plugin for use with homeassistant custom components. Aug 12, 2020 4 - Beta N/A - :pypi:`pytest-homeassistant-custom-component` Experimental package to automatically extract test plugins for Home Assistant custom components May 30, 2025 3 - Alpha pytest==8.3.5 + :pypi:`pytest-homeassistant-custom-component` Experimental package to automatically extract test plugins for Home Assistant custom components Oct 31, 2025 3 - Alpha pytest==8.4.2 :pypi:`pytest-honey` A simple plugin to use with pytest Jan 07, 2022 4 - Beta pytest (>=3.5.0) :pypi:`pytest-honors` Report on tests that honor constraints, and guard against regressions Mar 06, 2020 4 - Beta N/A :pypi:`pytest-hot-reloading` Sep 23, 2024 N/A N/A @@ -707,17 +750,28 @@ This list contains 1641 plugins. :pypi:`pytest-hoverfly-wrapper` Integrates the Hoverfly HTTP proxy into Pytest Feb 27, 2023 5 - Production/Stable pytest (>=3.7.0) :pypi:`pytest-hpfeeds` Helpers for testing hpfeeds in your python project Feb 28, 2023 4 - Beta pytest (>=6.2.4,<7.0.0) :pypi:`pytest-html` pytest plugin for generating HTML reports Nov 07, 2023 5 - Production/Stable pytest>=7.0.0 + :pypi:`pytest-html5` the best report for pytest Oct 11, 2025 N/A N/A :pypi:`pytest-html-cn` pytest plugin for generating HTML reports Aug 19, 2024 5 - Production/Stable pytest!=6.0.0,>=5.0 :pypi:`pytest-html-lee` optimized pytest plugin for generating HTML reports Jun 30, 2020 5 - Production/Stable pytest (>=5.0) :pypi:`pytest-html-merger` Pytest HTML reports merging utility Jul 12, 2024 N/A N/A + :pypi:`pytest-html-nova-act` A Pytest Plugin for Amazon Nova Act Python SDK. Sep 05, 2025 N/A N/A :pypi:`pytest-html-object-storage` Pytest report plugin for send HTML report on object-storage Jan 17, 2024 5 - Production/Stable N/A + :pypi:`pytest-html-plus` Get started with rich pytest reports in under 3 seconds. Just install the plugin — no setup required. The simplest, fastest reporter for pytest. Oct 30, 2025 N/A N/A :pypi:`pytest-html-profiling` Pytest plugin for generating HTML reports with per-test profiling and optionally call graph visualizations. Based on pytest-html by Dave Hunt. Feb 11, 2020 5 - Production/Stable pytest (>=3.0) + :pypi:`pytest-html-report` Enhanced HTML reporting for pytest with categories, specifications, and detailed logging Jun 24, 2025 4 - Beta pytest>=6.0 :pypi:`pytest-html-reporter` Generates a static html report based on pytest framework Feb 13, 2022 N/A N/A :pypi:`pytest-html-report-merger` May 22, 2024 N/A N/A :pypi:`pytest-html-thread` pytest plugin for generating HTML reports Dec 29, 2020 5 - Production/Stable N/A + :pypi:`pytest-htmlx` Custom HTML report plugin for Pytest with charts and tables Sep 09, 2025 4 - Beta pytest :pypi:`pytest-http` Fixture "http" for http requests Aug 22, 2024 N/A pytest :pypi:`pytest-httpbin` Easily test your HTTP library against a local copy of httpbin Sep 18, 2024 5 - Production/Stable pytest; extra == "test" - :pypi:`pytest-httpdbg` A pytest plugin to record HTTP(S) requests with stack trace. May 08, 2025 4 - Beta pytest>=7.0.0 + :pypi:`pytest-httpchain` pytest plugin for HTTP testing using JSON files Aug 16, 2025 5 - Production/Stable N/A + :pypi:`pytest-httpchain-jsonref` JSON reference ($ref) support for pytest-httpchain Aug 16, 2025 N/A N/A + :pypi:`pytest-httpchain-mcp` MCP server for pytest-httpchain Aug 16, 2025 N/A N/A + :pypi:`pytest-httpchain-models` Pydantic models for pytest-httpchain Aug 16, 2025 N/A N/A + :pypi:`pytest-httpchain-templates` Templating support for pytest-httpchain Aug 16, 2025 N/A N/A + :pypi:`pytest-httpchain-userfunc` User functions support for pytest-httpchain Aug 16, 2025 N/A N/A + :pypi:`pytest-httpdbg` A pytest plugin to record HTTP(S) requests with stack trace. Oct 26, 2025 4 - Beta pytest>=7.0.0 :pypi:`pytest-http-mocker` Pytest plugin for http mocking (via https://github.com/vilus/mocker) Oct 20, 2019 N/A N/A :pypi:`pytest-httpretty` A thin wrapper of HTTPretty for pytest Feb 16, 2014 3 - Alpha N/A :pypi:`pytest_httpserver` pytest-httpserver is a httpserver for pytest Apr 10, 2025 3 - Alpha N/A @@ -728,8 +782,8 @@ This list contains 1641 plugins. :pypi:`pytest-hue` Visualise PyTest status via your Phillips Hue lights May 09, 2019 N/A N/A :pypi:`pytest-hylang` Pytest plugin to allow running tests written in hylang Mar 28, 2021 N/A pytest :pypi:`pytest-hypo-25` help hypo module for pytest Jan 12, 2020 3 - Alpha N/A - :pypi:`pytest-iam` A fully functional OAUTH2 / OpenID Connect (OIDC) / SCIM server to be used in your testsuite Apr 24, 2025 4 - Beta pytest>=7.0.0 - :pypi:`pytest-ibutsu` A plugin to sent pytest results to an Ibutsu server Feb 06, 2025 4 - Beta pytest>=7.1 + :pypi:`pytest-iam` A fully functional OAUTH2 / OpenID Connect (OIDC) / SCIM server to be used in your testsuite Jul 25, 2025 4 - Beta pytest>=7.0.0 + :pypi:`pytest-ibutsu` A plugin to sent pytest results to an Ibutsu server Oct 21, 2025 4 - Beta pytest :pypi:`pytest-icdiff` use icdiff for better error messages in pytest assertions Dec 05, 2023 4 - Beta pytest :pypi:`pytest-idapro` A pytest plugin for idapython. Allows a pytest setup to run tests outside and inside IDA in an automated manner by runnig pytest inside IDA and by mocking idapython api Nov 03, 2018 N/A N/A :pypi:`pytest-idem` A pytest plugin to help with testing idem projects Dec 13, 2023 5 - Production/Stable N/A @@ -737,8 +791,8 @@ This list contains 1641 plugins. :pypi:`pytest-ignore-flaky` ignore failures from flaky tests (pytest plugin) Apr 20, 2024 5 - Production/Stable pytest>=6.0 :pypi:`pytest-ignore-test-results` A pytest plugin to ignore test results. Feb 03, 2025 5 - Production/Stable pytest>=7.0 :pypi:`pytest-image-diff` Dec 31, 2024 3 - Alpha pytest - :pypi:`pytest-image-snapshot` A pytest plugin for image snapshot management and comparison. Jul 01, 2024 4 - Beta pytest>=3.5.0 - :pypi:`pytest-impacted` A pytest plugin that selectively runs tests impacted by codechanges via git introspection, ASL parsing, and dependency graph analysis. May 31, 2025 4 - Beta pytest>=8.0.0 + :pypi:`pytest-image-snapshot` A pytest plugin for image snapshot management and comparison. Jul 16, 2025 4 - Beta pytest>=3.5.0 + :pypi:`pytest-impacted` A pytest plugin that selectively runs tests impacted by codechanges via git introspection, ASL parsing, and dependency graph analysis. Sep 11, 2025 4 - Beta pytest>=8.0.0 :pypi:`pytest-import-check` pytest plugin to check whether Python modules can be imported Jul 19, 2024 3 - Alpha pytest>=8.1 :pypi:`pytest-incremental` an incremental test runner (pytest plugin) Apr 24, 2021 5 - Production/Stable N/A :pypi:`pytest-infinity` Jun 09, 2024 N/A pytest<9.0.0,>=8.0.0 @@ -747,36 +801,36 @@ This list contains 1641 plugins. :pypi:`pytest-info-collector` pytest plugin to collect information from tests May 26, 2019 3 - Alpha N/A :pypi:`pytest-info-plugin` Get executed interface information in pytest interface automation framework Sep 14, 2023 N/A N/A :pypi:`pytest-informative-node` display more node ininformation. Apr 25, 2019 4 - Beta N/A - :pypi:`pytest-infrahouse` A set of fixtures to use with pytest Mar 18, 2025 4 - Beta pytest~=8.3 + :pypi:`pytest-infrahouse` A set of fixtures to use with pytest Oct 29, 2025 4 - Beta pytest~=8.3 :pypi:`pytest-infrastructure` pytest stack validation prior to testing executing Apr 12, 2020 4 - Beta N/A :pypi:`pytest-ini` Reuse pytest.ini to store env variables Apr 26, 2022 N/A N/A :pypi:`pytest-initry` Plugin for sending automation test data from Pytest to the initry Apr 30, 2024 N/A pytest<9.0.0,>=8.1.1 :pypi:`pytest-inline` A pytest plugin for writing inline tests Oct 24, 2024 4 - Beta pytest<9.0,>=7.0 :pypi:`pytest-inmanta` A py.test plugin providing fixtures to simplify inmanta modules testing. Apr 09, 2025 5 - Production/Stable pytest - :pypi:`pytest-inmanta-extensions` Inmanta tests package May 27, 2025 5 - Production/Stable N/A - :pypi:`pytest-inmanta-lsm` Common fixtures for inmanta LSM related modules Apr 09, 2025 5 - Production/Stable N/A + :pypi:`pytest-inmanta-extensions` Inmanta tests package Jul 04, 2025 5 - Production/Stable N/A + :pypi:`pytest-inmanta-lsm` Common fixtures for inmanta LSM related modules Aug 26, 2025 5 - Production/Stable N/A :pypi:`pytest-inmanta-srlinux` Pytest library to facilitate end to end testing of inmanta projects Apr 22, 2025 3 - Alpha N/A - :pypi:`pytest-inmanta-yang` Common fixtures used in inmanta yang related modules Feb 22, 2024 4 - Beta pytest + :pypi:`pytest-inmanta-yang` Common fixtures used in inmanta yang related modules Oct 28, 2025 4 - Beta pytest :pypi:`pytest-Inomaly` A simple image diff plugin for pytest Feb 13, 2018 4 - Beta N/A :pypi:`pytest-in-robotframework` The extension enables easy execution of pytest tests within the Robot Framework environment. Nov 23, 2024 N/A pytest :pypi:`pytest-insper` Pytest plugin for courses at Insper Mar 21, 2024 N/A pytest :pypi:`pytest-insta` A practical snapshot testing plugin for pytest Feb 19, 2024 N/A pytest (>=7.2.0,<9.0.0) :pypi:`pytest-instafail` pytest plugin to show failures instantly Mar 31, 2023 4 - Beta pytest (>=5) :pypi:`pytest-instrument` pytest plugin to instrument tests Apr 05, 2020 5 - Production/Stable pytest (>=5.1.0) + :pypi:`pytest-insubprocess` A pytest plugin to execute test cases in a subprocess Jul 01, 2025 4 - Beta pytest>=7.4 :pypi:`pytest-integration` Organizing pytests by integration or not Nov 17, 2022 N/A N/A :pypi:`pytest-integration-mark` Automatic integration test marking and excluding plugin for pytest May 22, 2023 N/A pytest (>=5.2) :pypi:`pytest-interactive` A pytest plugin for console based interactive test selection just after the collection phase Nov 30, 2017 3 - Alpha N/A :pypi:`pytest-intercept-remote` Pytest plugin for intercepting outgoing connection requests during pytest run. May 24, 2021 4 - Beta pytest (>=4.6) - :pypi:`pytest-interface-tester` Pytest plugin for checking charm relation interface protocol compliance. Feb 13, 2025 4 - Beta pytest - :pypi:`pytest-invenio` Pytest fixtures for Invenio. May 08, 2025 5 - Production/Stable pytest<9.0.0,>=6 + :pypi:`pytest-interface-tester` Pytest plugin for checking charm relation interface protocol compliance. Oct 09, 2025 4 - Beta pytest + :pypi:`pytest-invenio` Pytest fixtures for Invenio. Jul 09, 2025 5 - Production/Stable pytest<9.0.0,>=6 :pypi:`pytest-involve` Run tests covering a specific file or changeset Feb 02, 2020 4 - Beta pytest (>=3.5.0) :pypi:`pytest-iovis` A Pytest plugin to enable Jupyter Notebook testing with Papermill Nov 06, 2024 4 - Beta pytest>=7.1.0 :pypi:`pytest-ipdb` A py.test plug-in to enable drop to ipdb debugger on test failure. Mar 20, 2013 2 - Pre-Alpha N/A :pypi:`pytest-ipynb` THIS PROJECT IS ABANDONED Jan 29, 2019 3 - Alpha N/A :pypi:`pytest-ipynb2` Pytest plugin to run tests in Jupyter Notebooks Mar 09, 2025 N/A pytest - :pypi:`pytest-ipywidgets` May 30, 2025 N/A pytest - :pypi:`pytest-iso` Plugin for pytest to produce test documentation for code audits. May 15, 2025 4 - Beta pytest<9.0.0,>=7.4.0 - :pypi:`pytest-isolate` Run pytest tests in isolated subprocesses May 22, 2025 4 - Beta pytest + :pypi:`pytest-ipywidgets` Oct 24, 2025 N/A pytest + :pypi:`pytest-isolate` Run pytest tests in isolated subprocesses Sep 08, 2025 4 - Beta pytest :pypi:`pytest-isolate-mpi` pytest-isolate-mpi allows for MPI-parallel tests being executed in a segfault and MPI_Abort safe manner Feb 24, 2025 4 - Beta pytest>=5 :pypi:`pytest-isort` py.test plugin to check import ordering using isort Mar 05, 2024 5 - Production/Stable pytest (>=5.0) :pypi:`pytest-it` Pytest plugin to display test reports as a plaintext spec, inspired by Rspec: https://github.com/mattduck/pytest-it. Jan 29, 2024 4 - Beta N/A @@ -791,7 +845,7 @@ This list contains 1641 plugins. :pypi:`pytest-jinja` A plugin to generate customizable jinja-based HTML reports in pytest Oct 04, 2022 3 - Alpha pytest (>=6.2.5,<7.0.0) :pypi:`pytest-jira` py.test JIRA integration plugin, using markers Apr 15, 2025 3 - Alpha N/A :pypi:`pytest-jira-xfail` Plugin skips (xfail) tests if unresolved Jira issue(s) linked Jul 09, 2024 N/A pytest>=7.2.0 - :pypi:`pytest-jira-xray` pytest plugin to integrate tests with JIRA XRAY May 24, 2025 4 - Beta pytest>=6.2.4 + :pypi:`pytest-jira-xray` pytest plugin to integrate tests with JIRA XRAY Oct 11, 2025 4 - Beta pytest>=6.2.4 :pypi:`pytest-job-selection` A pytest plugin for load balancing test suites Jan 30, 2023 4 - Beta pytest (>=3.5.0) :pypi:`pytest-jobserver` Limit parallel tests with posix jobserver. May 15, 2019 5 - Production/Stable pytest :pypi:`pytest-joke` Test failures are better served with humor. Oct 08, 2019 4 - Beta pytest (>=4.2.1) @@ -800,13 +854,16 @@ This list contains 1641 plugins. :pypi:`pytest-json-fixtures` JSON output for the --fixtures flag Mar 14, 2023 4 - Beta N/A :pypi:`pytest-jsonlint` UNKNOWN Aug 04, 2016 N/A N/A :pypi:`pytest-json-report` A pytest plugin to report test results as JSON files Mar 15, 2022 4 - Beta pytest (>=3.8.0) - :pypi:`pytest-json-report-wip` A pytest plugin to report test results as JSON files Oct 28, 2023 4 - Beta pytest >=3.8.0 + :pypi:`pytest-json-report-wip` A pytest plugin to report test results as JSON files Jul 23, 2025 4 - Beta pytest >=3.8.0 :pypi:`pytest-jsonschema` A pytest plugin to perform JSONSchema validations Apr 20, 2025 4 - Beta pytest>=6.2.0 + :pypi:`pytest-jsonschema-snapshot` Pytest plugin for automatic JSON Schema generation and validation from examples Sep 13, 2025 N/A pytest :pypi:`pytest-jtr` pytest plugin supporting json test report output Jul 21, 2024 N/A pytest<8.0.0,>=7.1.2 - :pypi:`pytest-jubilant` Add your description here May 14, 2025 N/A pytest>=8.3.5 + :pypi:`pytest-jubilant` Add your description here Jul 28, 2025 N/A pytest>=8.3.5 :pypi:`pytest-junit-xray-xml` Export test results in an augmented JUnit format for usage with Xray () Jan 01, 2025 4 - Beta pytest - :pypi:`pytest-jupyter` A pytest plugin for testing Jupyter libraries and extensions. Apr 04, 2024 4 - Beta pytest>=7.0 + :pypi:`pytest-jupyter` A pytest plugin for testing Jupyter libraries and extensions. Oct 16, 2025 4 - Beta pytest>=7.0 :pypi:`pytest-jupyterhub` A reusable JupyterHub pytest plugin Apr 25, 2023 5 - Production/Stable pytest + :pypi:`pytest-jux` A pytest plugin for signing and publishing JUnit XML test reports to the Jux REST API Oct 24, 2025 3 - Alpha pytest>=7.4 + :pypi:`pytest-k8s` Kubernetes-based testing for pytest Jul 07, 2025 N/A pytest>=8.4.1 :pypi:`pytest-kafka` Zookeeper, Kafka server, and Kafka consumer fixtures for Pytest Aug 14, 2024 N/A pytest :pypi:`pytest-kafkavents` A plugin to send pytest events to Kafka Sep 08, 2021 4 - Beta pytest :pypi:`pytest-kairos` Pytest plugin with random number generation, reproducibility, and test repetition Aug 08, 2024 5 - Production/Stable pytest>=5.0.0 @@ -821,27 +878,28 @@ This list contains 1641 plugins. :pypi:`pytest-kookit` Your simple but kooky integration testing with pytest Sep 10, 2024 N/A N/A :pypi:`pytest-koopmans` A plugin for testing the koopmans package Nov 21, 2022 4 - Beta pytest (>=3.5.0) :pypi:`pytest-krtech-common` pytest krtech common library Nov 28, 2016 4 - Beta N/A - :pypi:`pytest-kubernetes` Feb 04, 2025 N/A pytest<9.0.0,>=8.3.0 + :pypi:`pytest-kubernetes` Oct 23, 2025 N/A pytest<9.0.0,>=8.3.0 + :pypi:`pytest_kustomize` Parse and validate kustomize output Oct 02, 2025 N/A N/A :pypi:`pytest-kuunda` pytest plugin to help with test data setup for PySpark tests Feb 25, 2024 4 - Beta pytest >=6.2.0 :pypi:`pytest-kwparametrize` Alternate syntax for @pytest.mark.parametrize with test cases as dictionaries and default value fallbacks Jan 22, 2021 N/A pytest (>=6) :pypi:`pytest-lambda` Define pytest fixtures with lambda functions. May 27, 2024 5 - Production/Stable pytest<9,>=3.6 :pypi:`pytest-lamp` Jan 06, 2017 3 - Alpha N/A :pypi:`pytest-langchain` Pytest-style test runner for langchain agents Feb 26, 2023 N/A pytest :pypi:`pytest-lark` Create fancy and clear HTML test reports. Nov 05, 2023 N/A N/A - :pypi:`pytest-latin-hypercube` Implementation of Latin Hypercube Sampling for pytest. Feb 27, 2025 N/A pytest + :pypi:`pytest-latin-hypercube` Implementation of Latin Hypercube Sampling for pytest. Jun 26, 2025 N/A pytest :pypi:`pytest-launchable` Launchable Pytest Plugin Apr 05, 2023 N/A pytest (>=4.2.0) :pypi:`pytest-layab` Pytest fixtures for layab. Oct 05, 2020 5 - Production/Stable N/A :pypi:`pytest-lazy-fixture` It helps to use fixtures in pytest.mark.parametrize Feb 01, 2020 4 - Beta pytest (>=3.2.5) - :pypi:`pytest-lazy-fixtures` Allows you to use fixtures in @pytest.mark.parametrize. May 27, 2025 N/A pytest>=7 + :pypi:`pytest-lazy-fixtures` Allows you to use fixtures in @pytest.mark.parametrize. Sep 16, 2025 N/A pytest>=7 :pypi:`pytest-ldap` python-ldap fixtures for pytest Aug 18, 2020 N/A pytest :pypi:`pytest-leak-finder` Find the test that's leaking before the one that fails Feb 15, 2023 4 - Beta pytest (>=3.5.0) :pypi:`pytest-leaks` A pytest plugin to trace resource leaks. Nov 27, 2019 1 - Planning N/A :pypi:`pytest-leaping` A simple plugin to use with pytest Mar 27, 2024 4 - Beta pytest>=6.2.0 :pypi:`pytest-leo-interface` Pytest extension tool for leo projects. Mar 19, 2025 N/A N/A :pypi:`pytest-level` Select tests of a given level or lower Oct 21, 2019 N/A pytest - :pypi:`pytest-lf-skip` A pytest plugin which makes \`--last-failed\` skip instead of deselect tests. May 26, 2025 4 - Beta pytest>=8.3.5 + :pypi:`pytest-lf-skip` A pytest plugin which makes \`--last-failed\` skip instead of deselect tests. Oct 14, 2025 4 - Beta pytest>=8.3.5 :pypi:`pytest-libfaketime` A python-libfaketime plugin for pytest Apr 12, 2024 4 - Beta pytest>=3.0.0 - :pypi:`pytest-libiio` A pytest plugin to manage interfacing with libiio contexts Oct 01, 2024 4 - Beta N/A + :pypi:`pytest-libiio` A pytest plugin for testing libiio based devices Aug 15, 2025 N/A pytest>=3.5.0 :pypi:`pytest-libnotify` Pytest plugin that shows notifications about the test run Apr 02, 2021 3 - Alpha pytest :pypi:`pytest-ligo` Jan 16, 2020 4 - Beta N/A :pypi:`pytest-lineno` A pytest plugin to show the line numbers of test functions Dec 04, 2020 N/A pytest @@ -852,28 +910,31 @@ This list contains 1641 plugins. :pypi:`pytest-litf` A pytest plugin that stream output in LITF format Jan 18, 2021 4 - Beta pytest (>=3.1.1) :pypi:`pytest-litter` Pytest plugin which verifies that tests do not modify file trees. Nov 23, 2023 4 - Beta pytest >=6.1 :pypi:`pytest-live` Live results for pytest Mar 08, 2020 N/A pytest + :pypi:`pytest-llm` pytest-llm: A pytest plugin for testing LLM outputs with success rate thresholds. Oct 03, 2025 3 - Alpha pytest>=7.0.0 :pypi:`pytest-llmeval` A pytest plugin to evaluate/benchmark LLM prompts Mar 19, 2025 4 - Beta pytest>=6.2.0 + :pypi:`pytest-lobster` Pytest to generate lobster tracing files Jul 26, 2025 N/A pytest>=7.0 :pypi:`pytest-local-badge` Generate local badges (shields) reporting your test suite status. Jan 15, 2023 N/A pytest (>=6.1.0) :pypi:`pytest-localftpserver` A PyTest plugin which provides an FTP fixture for your tests May 19, 2024 5 - Production/Stable pytest :pypi:`pytest-localserver` pytest plugin to test server connections locally. Oct 06, 2024 4 - Beta N/A :pypi:`pytest-localstack` Pytest plugin for AWS integration tests Jun 07, 2023 4 - Beta pytest (>=6.0.0,<7.0.0) :pypi:`pytest-lock` pytest-lock is a pytest plugin that allows you to "lock" the results of unit tests, storing them in a local cache. This is particularly useful for tests that are resource-intensive or don't need to be run every time. When the tests are run subsequently, pytest-lock will compare the current results with the locked results and issue a warning if there are any discrepancies. Feb 03, 2024 N/A pytest (>=7.4.3,<8.0.0) - :pypi:`pytest-lockable` lockable resource plugin for pytest Jan 24, 2024 5 - Production/Stable pytest + :pypi:`pytest-lockable` lockable resource plugin for pytest Sep 08, 2025 5 - Production/Stable pytest :pypi:`pytest-locker` Used to lock object during testing. Essentially changing assertions from being hard coded to asserting that nothing changed Dec 20, 2024 N/A pytest>=5.4 :pypi:`pytest-log` print log Aug 15, 2021 N/A pytest (>=3.8) :pypi:`pytest-logbook` py.test plugin to capture logbook log messages Nov 23, 2015 5 - Production/Stable pytest (>=2.8) :pypi:`pytest-logdog` Pytest plugin to test logging Jun 15, 2021 1 - Planning pytest (>=6.2.0) :pypi:`pytest-logfest` Pytest plugin providing three logger fixtures with basic or full writing to log files Jul 21, 2019 4 - Beta pytest (>=3.5.0) :pypi:`pytest-logger` Plugin configuring handlers for loggers from Python logging module. Mar 10, 2024 5 - Production/Stable pytest (>=3.2) + :pypi:`pytest-logger-db` Add your description here Sep 14, 2025 N/A N/A :pypi:`pytest-logging` Configures logging and allows tweaking the log level with a py.test flag Nov 04, 2015 4 - Beta N/A :pypi:`pytest-logging-end-to-end-test-tool` Sep 23, 2022 N/A pytest (>=7.1.2,<8.0.0) :pypi:`pytest-logging-strict` pytest fixture logging configured from packaged YAML May 20, 2025 3 - Alpha pytest - :pypi:`pytest-logikal` Common testing environment Apr 30, 2025 5 - Production/Stable pytest==8.3.5 + :pypi:`pytest-logikal` Common testing environment Sep 11, 2025 5 - Production/Stable pytest==8.4.2 :pypi:`pytest-log-report` Package for creating a pytest test run reprot Dec 26, 2019 N/A N/A :pypi:`pytest-logscanner` Pytest plugin for logscanner (A logger for python logging outputting to easily viewable (and filterable) html files. Good for people not grep savey, and color higlighting and quickly changing filters might even bye useful for commandline wizards.) Sep 30, 2024 4 - Beta pytest>=8.2.2 :pypi:`pytest-loguru` Pytest Loguru Mar 20, 2024 5 - Production/Stable pytest; extra == "test" :pypi:`pytest-loop` pytest plugin for looping tests Oct 17, 2024 5 - Production/Stable pytest - :pypi:`pytest-lsp` A pytest plugin for end-to-end testing of language servers Nov 23, 2024 3 - Alpha pytest + :pypi:`pytest-lsp` A pytest plugin for end-to-end testing of language servers Oct 25, 2025 5 - Production/Stable pytest>=8.0 :pypi:`pytest-lw-realtime-result` Pytest plugin to generate realtime test results to a file Mar 13, 2025 N/A pytest>=3.5.0 :pypi:`pytest-manifest` PyTest plugin for recording and asserting against a manifest file Apr 07, 2025 N/A pytest :pypi:`pytest-manual-marker` pytest marker for marking manual tests Aug 04, 2022 3 - Alpha pytest>=7 @@ -889,7 +950,7 @@ This list contains 1641 plugins. :pypi:`pytest-mark-no-py3` pytest plugin and bowler codemod to help migrate tests to Python 3 May 17, 2019 N/A pytest :pypi:`pytest-marks` UNKNOWN Nov 23, 2012 3 - Alpha N/A :pypi:`pytest-mask-secrets` Pytest plugin to hide sensitive data in test reports Jan 28, 2025 N/A N/A - :pypi:`pytest-matcher` Easy way to match captured \`pytest\` output against expectations stored in files Aug 01, 2024 5 - Production/Stable pytest + :pypi:`pytest-matcher` Easy way to match captured \`pytest\` output against expectations stored in files Aug 07, 2025 5 - Production/Stable pytest :pypi:`pytest-matchers` Matchers for pytest Feb 11, 2025 N/A pytest<9.0,>=7.0 :pypi:`pytest-match-skip` Skip matching marks. Matches partial marks using wildcards. May 15, 2019 4 - Beta pytest (>=4.4.1) :pypi:`pytest-mat-report` this is report Jan 20, 2021 N/A N/A @@ -899,51 +960,57 @@ This list contains 1641 plugins. :pypi:`pytest-maybe-context` Simplify tests with warning and exception cases. Apr 16, 2023 N/A pytest (>=7,<8) :pypi:`pytest-maybe-raises` Pytest fixture for optional exception testing. May 27, 2022 N/A pytest ; extra == 'dev' :pypi:`pytest-mccabe` pytest plugin to run the mccabe code complexity checker. Jul 22, 2020 3 - Alpha pytest (>=5.4.0) + :pypi:`pytest-mcp` Pytest-style framework for evaluating Model Context Protocol (MCP) servers. Jul 07, 2025 N/A pytest>=8.4.0 :pypi:`pytest-md` Plugin for generating Markdown reports for pytest results Jul 11, 2019 3 - Alpha pytest (>=4.2.1) :pypi:`pytest-md-report` A pytest plugin to generate test outcomes reports with markdown table format. May 02, 2025 4 - Beta pytest!=6.0.0,<9,>=3.3.2 :pypi:`pytest-meilisearch` Pytest helpers for testing projects using Meilisearch Oct 08, 2024 N/A pytest>=7.4.3 :pypi:`pytest-memlog` Log memory usage during tests May 03, 2023 N/A pytest (>=7.3.0,<8.0.0) :pypi:`pytest-memprof` Estimates memory consumption of test functions Mar 29, 2019 4 - Beta N/A - :pypi:`pytest-memray` A simple plugin to use with pytest Jul 25, 2024 N/A pytest>=7.2 + :pypi:`pytest-memray` A simple plugin to use with pytest Aug 18, 2025 N/A pytest>=7.2 :pypi:`pytest-menu` A pytest plugin for console based interactive test selection just after the collection phase Oct 04, 2017 3 - Alpha pytest (>=2.4.2) :pypi:`pytest-mercurial` pytest plugin to write integration tests for projects using Mercurial Python internals Nov 21, 2020 1 - Planning N/A - :pypi:`pytest-mergify` Pytest plugin for Mergify May 13, 2025 N/A pytest>=6.0.0 + :pypi:`pytest-mergify` Pytest plugin for Mergify Oct 23, 2025 N/A pytest>=6.0.0 :pypi:`pytest-mesh` pytest_mesh插件 Aug 05, 2022 N/A pytest (==7.1.2) :pypi:`pytest-message` Pytest plugin for sending report message of marked tests execution Aug 04, 2022 N/A pytest (>=6.2.5) :pypi:`pytest-messenger` Pytest to Slack reporting plugin Nov 24, 2022 5 - Production/Stable N/A :pypi:`pytest-metadata` pytest plugin for test session metadata Feb 12, 2024 5 - Production/Stable pytest>=7.0.0 + :pypi:`pytest-metaexport` Pytest plugin for exporting custom test metadata to JSON. Jun 24, 2025 N/A pytest>=7.1.0 :pypi:`pytest-metrics` Custom metrics report for pytest Apr 04, 2020 N/A pytest - :pypi:`pytest-mh` Pytest multihost plugin May 15, 2025 N/A pytest + :pypi:`pytest-mfd-config` Pytest Plugin that handles test and topology configs and all their belongings like helper fixtures. Jul 11, 2025 N/A pytest<9,>=7.2.1 + :pypi:`pytest-mfd-logging` Module for handling PyTest logging. Jul 09, 2025 N/A pytest<9,>=7.2.1 + :pypi:`pytest-mh` Pytest multihost plugin Oct 16, 2025 N/A pytest :pypi:`pytest-mimesis` Mimesis integration with the pytest test runner Mar 21, 2020 5 - Production/Stable pytest (>=4.2) :pypi:`pytest-mimic` Easily record function calls while testing Apr 24, 2025 4 - Beta pytest>=6.2.0 :pypi:`pytest-minecraft` A pytest plugin for running tests against Minecraft releases Apr 06, 2022 N/A pytest (>=6.0.1) :pypi:`pytest-mini` A plugin to test mp Feb 06, 2023 N/A pytest (>=7.2.0,<8.0.0) - :pypi:`pytest-minio-mock` A pytest plugin for mocking Minio S3 interactions May 06, 2025 N/A pytest>=5.0.0 + :pypi:`pytest-minio-mock` A pytest plugin for mocking Minio S3 interactions Aug 06, 2025 N/A pytest>=5.0.0 + :pypi:`pytest-mirror` A pluggy-based pytest plugin and CLI tool for ensuring your test suite mirrors your source code structure Jul 30, 2025 4 - Beta N/A :pypi:`pytest-missing-fixtures` Pytest plugin that creates missing fixtures Oct 14, 2020 4 - Beta pytest (>=3.5.0) :pypi:`pytest-missing-modules` Pytest plugin to easily fake missing modules Sep 03, 2024 N/A pytest>=8.3.2 :pypi:`pytest-mitmproxy` pytest plugin for mitmproxy tests Nov 13, 2024 N/A pytest>=7.0 :pypi:`pytest-mitmproxy-plugin` Use MITM Proxy in autotests with full control from code Apr 10, 2025 4 - Beta pytest>=7.2.0 :pypi:`pytest-ml` Test your machine learning! May 04, 2019 4 - Beta N/A :pypi:`pytest-mocha` pytest plugin to display test execution output like a mochajs Apr 02, 2020 4 - Beta pytest (>=5.4.0) - :pypi:`pytest-mock` Thin-wrapper around the mock package for easier use with pytest May 26, 2025 5 - Production/Stable pytest>=6.2.5 + :pypi:`pytest-mock` Thin-wrapper around the mock package for easier use with pytest Sep 16, 2025 5 - Production/Stable pytest>=6.2.5 :pypi:`pytest-mock-api` A mock API server with configurable routes and responses available as a fixture. Feb 13, 2019 1 - Planning pytest (>=4.0.0) :pypi:`pytest-mock-generator` A pytest fixture wrapper for https://pypi.org/project/mock-generator May 16, 2022 5 - Production/Stable N/A :pypi:`pytest-mock-helper` Help you mock HTTP call and generate mock code Jan 24, 2018 N/A pytest :pypi:`pytest-mockito` Base fixtures for mockito Jul 11, 2018 4 - Beta N/A :pypi:`pytest-mockredis` An in-memory mock of a Redis server that runs in a separate thread. This is to be used for unit-tests that require a Redis database. Jan 02, 2018 2 - Pre-Alpha N/A - :pypi:`pytest-mock-resources` A pytest plugin for easily instantiating reproducible mock resources. Mar 10, 2025 N/A pytest>=1.0 + :pypi:`pytest-mock-resources` A pytest plugin for easily instantiating reproducible mock resources. Sep 17, 2025 N/A pytest>=1.0 :pypi:`pytest-mock-server` Mock server plugin for pytest Jan 09, 2022 4 - Beta pytest (>=3.5.0) :pypi:`pytest-mockservers` A set of fixtures to test your requests to HTTP/UDP servers Mar 31, 2020 N/A pytest (>=4.3.0) :pypi:`pytest-mocktcp` A pytest plugin for testing TCP clients Oct 11, 2022 N/A pytest :pypi:`pytest-modalt` Massively distributed pytest runs using modal.com Feb 27, 2024 4 - Beta pytest >=6.2.0 + :pypi:`pytest-modern` A more modern pytest Aug 19, 2025 4 - Beta pytest>=8 :pypi:`pytest-modified-env` Pytest plugin to fail a test if it leaves modified \`os.environ\` afterwards. Jan 29, 2022 4 - Beta N/A :pypi:`pytest-modifyjunit` Utility for adding additional properties to junit xml for IDM QE Jan 10, 2019 N/A N/A :pypi:`pytest-molecule` PyTest Molecule Plugin :: discover and run molecule tests Mar 29, 2022 5 - Production/Stable pytest (>=7.0.0) :pypi:`pytest-molecule-JC` PyTest Molecule Plugin :: discover and run molecule tests Jul 18, 2023 5 - Production/Stable pytest (>=7.0.0) - :pypi:`pytest-mongo` MongoDB process and client fixtures plugin for Pytest. Feb 28, 2025 5 - Production/Stable pytest>=6.2 + :pypi:`pytest-mongo` MongoDB process and client fixtures plugin for Pytest. Aug 01, 2025 5 - Production/Stable pytest>=6.2 :pypi:`pytest-mongodb` pytest plugin for MongoDB fixtures May 16, 2023 5 - Production/Stable N/A :pypi:`pytest-mongodb-nono` pytest plugin for MongoDB Jan 07, 2025 N/A N/A - :pypi:`pytest-mongodb-ry` pytest plugin for MongoDB Jan 21, 2025 N/A N/A + :pypi:`pytest-mongodb-ry` pytest plugin for MongoDB Sep 25, 2025 N/A N/A :pypi:`pytest-monitor` Pytest plugin for analyzing resource usage. Jun 25, 2023 5 - Production/Stable pytest :pypi:`pytest-monkeyplus` pytest's monkeypatch subclass with extra functionalities Sep 18, 2012 5 - Production/Stable N/A :pypi:`pytest-monkeytype` pytest-monkeytype: Generate Monkeytype annotations from your pytest tests. Jul 29, 2020 4 - Beta N/A @@ -955,9 +1022,9 @@ This list contains 1641 plugins. :pypi:`pytest-mpiexec` pytest plugin for running individual tests with mpiexec Jul 29, 2024 3 - Alpha pytest :pypi:`pytest-mpl` pytest plugin to help with testing figures output from Matplotlib Feb 14, 2024 4 - Beta pytest :pypi:`pytest-mproc` low-startup-overhead, scalable, distributed-testing pytest plugin Nov 15, 2022 4 - Beta pytest (>=6) - :pypi:`pytest-mqtt` pytest-mqtt supports testing systems based on MQTT Jan 07, 2025 5 - Production/Stable pytest<9; extra == "test" + :pypi:`pytest-mqtt` pytest-mqtt supports testing systems based on MQTT Sep 10, 2025 5 - Production/Stable pytest<9; extra == "test" :pypi:`pytest-multihost` Utility for writing multi-host tests for pytest Apr 07, 2020 4 - Beta N/A - :pypi:`pytest-multilog` Multi-process logs handling and other helpers for pytest Jan 17, 2023 N/A pytest + :pypi:`pytest-multilog` Multi-process logs handling and other helpers for pytest Sep 21, 2025 N/A pytest :pypi:`pytest-multithreading` a pytest plugin for th and concurrent testing Aug 05, 2024 N/A N/A :pypi:`pytest-multithreading-allure` pytest_multithreading_allure Nov 25, 2022 N/A N/A :pypi:`pytest-mutagen` Add the mutation testing feature to pytest Jul 24, 2020 N/A pytest (>=5.4) @@ -970,12 +1037,13 @@ This list contains 1641 plugins. :pypi:`pytest-mypy-runner` Run the mypy static type checker as a pytest test case Apr 23, 2024 N/A pytest>=8.0 :pypi:`pytest-mypy-testing` Pytest plugin to check mypy output. Mar 04, 2024 N/A pytest>=7,<9 :pypi:`pytest-mysql` MySQL process and client fixtures for pytest Dec 10, 2024 5 - Production/Stable pytest>=6.2 + :pypi:`pytest-nb` Seedable Jupyter Notebook testing tool Jul 26, 2025 N/A pytest==8.4.1 :pypi:`pytest-ndb` pytest notebook debugger Apr 28, 2024 N/A pytest :pypi:`pytest-needle` pytest plugin for visual testing websites using selenium Dec 10, 2018 4 - Beta pytest (<5.0.0,>=3.0.0) :pypi:`pytest-neo` pytest-neo is a plugin for pytest that shows tests like screen of Matrix. Jan 08, 2022 3 - Alpha pytest (>=6.2.0) :pypi:`pytest-neos` Pytest plugin for neos Sep 10, 2024 5 - Production/Stable pytest<8.0,>=7.2; extra == "dev" :pypi:`pytest-netconf` A pytest plugin that provides a mock NETCONF (RFC6241/RFC6242) server for local testing. Jan 06, 2025 N/A N/A - :pypi:`pytest-netdut` "Automated software testing for switches using pytest" Apr 11, 2025 N/A pytest>=3.5.0 + :pypi:`pytest-netdut` "Automated software testing for switches using pytest" Oct 09, 2025 N/A pytest>=3.5.0 :pypi:`pytest-network` A simple plugin to disable network on socket level. May 07, 2020 N/A N/A :pypi:`pytest-network-endpoints` Network endpoints plugin for pytest Mar 06, 2022 N/A pytest :pypi:`pytest-never-sleep` pytest plugin helps to avoid adding tests without mock \`time.sleep\` May 05, 2021 3 - Alpha pytest (>=3.5.1) @@ -983,7 +1051,7 @@ This list contains 1641 plugins. :pypi:`pytest-nginx-iplweb` nginx fixture for pytest - iplweb temporary fork Mar 01, 2019 5 - Production/Stable N/A :pypi:`pytest-ngrok` Jan 20, 2022 3 - Alpha pytest :pypi:`pytest-ngsfixtures` pytest ngs fixtures Sep 06, 2019 2 - Pre-Alpha pytest (>=5.0.0) - :pypi:`pytest-nhsd-apim` Pytest plugin accessing NHSDigital's APIM proxies Apr 01, 2025 N/A pytest<9.0.0,>=8.2.0 + :pypi:`pytest-nhsd-apim` Pytest plugin accessing NHSDigital's APIM proxies Oct 29, 2025 N/A pytest<9.0.0,>=8.2.0 :pypi:`pytest-nice` A pytest plugin that alerts user of failed test cases with screen notifications May 04, 2019 4 - Beta pytest :pypi:`pytest-nice-parametrize` A small snippet for nicer PyTest's Parametrize Apr 17, 2021 5 - Production/Stable N/A :pypi:`pytest_nlcov` Pytest plugin to get the coverage of the new lines (based on git diff) only Aug 05, 2024 N/A N/A @@ -991,7 +1059,7 @@ This list contains 1641 plugins. :pypi:`pytest-node-dependency` pytest plugin for controlling execution flow Apr 10, 2024 5 - Production/Stable N/A :pypi:`pytest-nodev` Test-driven source code search for Python. Jul 21, 2016 4 - Beta pytest (>=2.8.1) :pypi:`pytest-nogarbage` Ensure a test produces no garbage Feb 24, 2025 5 - Production/Stable pytest>=4.6.0 - :pypi:`pytest-no-problem` Pytest plugin to tell you when there's no problem Apr 05, 2025 N/A pytest>=7.0 + :pypi:`pytest-no-problem` Pytest plugin to tell you when there's no problem Oct 18, 2025 N/A pytest>=7.0 :pypi:`pytest-nose-attrib` pytest plugin to use nose @attrib marks decorators and pick tests based on attributes and partially uses nose-attrib plugin approach Aug 13, 2023 N/A N/A :pypi:`pytest_notebook` A pytest plugin for testing Jupyter Notebooks. Nov 28, 2023 4 - Beta pytest>=3.5.0 :pypi:`pytest-notice` Send pytest execution result email Nov 05, 2020 N/A N/A @@ -1002,16 +1070,18 @@ This list contains 1641 plugins. :pypi:`pytest-notion` A PyTest Reporter to send test runs to Notion.so Aug 07, 2019 N/A N/A :pypi:`pytest-nunit` A pytest plugin for generating NUnit3 test result XML output Feb 26, 2024 5 - Production/Stable N/A :pypi:`pytest-oar` PyTest plugin for the OAR testing framework May 12, 2025 N/A pytest>=6.0.1 - :pypi:`pytest-oarepo` Feb 14, 2025 N/A pytest>=7.1.2; extra == "base" + :pypi:`pytest-oarepo` Oct 23, 2025 N/A pytest>=7.1.2; extra == "dev" :pypi:`pytest-object-getter` Import any object from a 3rd party module while mocking its namespace on demand. Jul 31, 2022 5 - Production/Stable pytest :pypi:`pytest-ochrus` pytest results data-base and HTML reporter Feb 21, 2018 4 - Beta N/A :pypi:`pytest-odc` A pytest plugin for simplifying ODC database tests Aug 04, 2023 4 - Beta pytest (>=3.5.0) :pypi:`pytest-odoo` py.test plugin to run Odoo tests May 20, 2025 5 - Production/Stable pytest>=8 :pypi:`pytest-odoo-fixtures` Project description Jun 25, 2019 N/A N/A + :pypi:`pytest-oduit` py.test plugin to run Odoo tests Oct 06, 2025 5 - Production/Stable pytest>=8 :pypi:`pytest-oerp` pytest plugin to test OpenERP modules Feb 28, 2012 3 - Alpha N/A :pypi:`pytest-offline` Mar 09, 2023 1 - Planning pytest (>=7.0.0,<8.0.0) :pypi:`pytest-ogsm-plugin` 针对特定项目定制化插件,优化了pytest报告展示方式,并添加了项目所需特定参数 May 16, 2023 N/A N/A :pypi:`pytest-ok` The ultimate pytest output plugin Apr 01, 2019 4 - Beta N/A + :pypi:`pytest-once` xdist-safe 'run once' fixture decorator for pytest (setup/teardown across workers) Oct 10, 2025 3 - Alpha pytest>=8.4.0 :pypi:`pytest-only` Use @pytest.mark.only to run a single test May 27, 2024 5 - Production/Stable pytest<9,>=3.6.0 :pypi:`pytest-oof` A Pytest plugin providing structured, programmatic access to a test run's results Dec 11, 2023 4 - Beta N/A :pypi:`pytest-oot` Run object-oriented tests in a simple format Sep 18, 2016 4 - Beta N/A @@ -1019,9 +1089,9 @@ This list contains 1641 plugins. :pypi:`pytest-open-html` Auto-open HTML reports after pytest runs Mar 31, 2025 N/A pytest>=6.0 :pypi:`pytest-opentelemetry` A pytest plugin for instrumenting test runs via OpenTelemetry Apr 25, 2025 N/A pytest :pypi:`pytest-opentmi` pytest plugin for publish results to opentmi Mar 22, 2025 5 - Production/Stable pytest>=5.0 - :pypi:`pytest-operator` Fixtures for Operators Sep 28, 2022 N/A pytest + :pypi:`pytest-operator` Fixtures for Charmed Operators Sep 28, 2022 N/A pytest :pypi:`pytest-optional` include/exclude values of fixtures in pytest Oct 07, 2015 N/A N/A - :pypi:`pytest-optional-tests` Easy declaration of optional tests (i.e., that are not run by default) Apr 15, 2025 4 - Beta pytest; extra == "dev" + :pypi:`pytest-optional-tests` Easy declaration of optional tests (i.e., that are not run by default) Jul 21, 2025 4 - Beta pytest; extra == "dev" :pypi:`pytest-orchestration` A pytest plugin for orchestrating tests Jul 18, 2019 N/A N/A :pypi:`pytest-order` pytest plugin to run your tests in a specific order Aug 22, 2024 5 - Production/Stable pytest>=5.0; python_version < "3.10" :pypi:`pytest-ordered` Declare the order in which tests should run in your pytest.ini Oct 07, 2024 N/A pytest>=6.2.0 @@ -1030,6 +1100,7 @@ This list contains 1641 plugins. :pypi:`pytest-osxnotify` OS X notifications for py.test results. May 15, 2015 N/A N/A :pypi:`pytest-ot` A pytest plugin for instrumenting test runs via OpenTelemetry Mar 21, 2024 N/A pytest; extra == "dev" :pypi:`pytest-otel` OpenTelemetry plugin for Pytest Apr 24, 2025 N/A pytest==8.3.5 + :pypi:`pytest-otelmark` Pytest plugin for otelmark. Sep 14, 2025 3 - Alpha pytest>=8.3.5 :pypi:`pytest-override-env-var` Pytest mark to override a value of an environment variable. Feb 25, 2023 N/A N/A :pypi:`pytest-owner` Add owner mark for tests Aug 19, 2024 N/A pytest :pypi:`pytest-pact` A simple plugin to use with pytest Jan 07, 2019 4 - Beta N/A @@ -1041,7 +1112,7 @@ This list contains 1641 plugins. :pypi:`pytest-param` pytest plugin to test all, first, last or random params Sep 11, 2016 4 - Beta pytest (>=2.6.0) :pypi:`pytest-parametrization` Simpler PyTest parametrization May 22, 2022 5 - Production/Stable N/A :pypi:`pytest-parametrization-annotation` A pytest library for parametrizing tests using type hints. Dec 10, 2024 5 - Production/Stable pytest>=7 - :pypi:`pytest-parametrize` pytest decorator for parametrizing test cases in a dict-way Nov 10, 2024 5 - Production/Stable pytest<9.0.0,>=8.3.0 + :pypi:`pytest-parametrize` pytest decorator for parametrizing test cases in a dict-way Sep 25, 2025 5 - Production/Stable pytest<9.0.0,>=8.3.0 :pypi:`pytest-parametrize-cases` A more user-friendly way to write parametrized tests. Mar 13, 2022 N/A pytest (>=6.1.2) :pypi:`pytest-parametrized` Pytest decorator for parametrizing tests with default iterables. Dec 21, 2024 5 - Production/Stable pytest :pypi:`pytest-parametrize-suite` A simple pytest extension for creating a named test suite. Jan 19, 2023 5 - Production/Stable pytest @@ -1063,9 +1134,9 @@ This list contains 1641 plugins. :pypi:`pytest-percents` Mar 16, 2024 N/A N/A :pypi:`pytest-perf` Run performance tests against the mainline code. May 20, 2024 5 - Production/Stable pytest!=8.1.*,>=6; extra == "testing" :pypi:`pytest-performance` A simple plugin to ensure the execution of critical sections of code has not been impacted Sep 11, 2020 5 - Production/Stable pytest (>=3.7.0) - :pypi:`pytest-performancetotal` A performance plugin for pytest Feb 01, 2025 5 - Production/Stable N/A + :pypi:`pytest-performancetotal` A performance plugin for pytest Aug 05, 2025 5 - Production/Stable N/A :pypi:`pytest-persistence` Pytest tool for persistent objects Aug 21, 2024 N/A N/A - :pypi:`pytest-pexpect` Pytest pexpect plugin. Aug 13, 2024 4 - Beta pytest>=6.2.0 + :pypi:`pytest-pexpect` Pytest pexpect plugin. Sep 10, 2025 4 - Beta pytest>=6.2.0 :pypi:`pytest-pg` A tiny plugin for pytest which runs PostgreSQL in Docker May 18, 2025 5 - Production/Stable pytest>=7.4 :pypi:`pytest-pgsql` Pytest plugins and helpers for tests using a Postgres database. May 13, 2020 5 - Production/Stable pytest (>=3.0.0) :pypi:`pytest-phmdoctest` pytest plugin to test Python examples in Markdown using phmdoctest. Apr 15, 2022 4 - Beta pytest (>=5.4.3) @@ -1085,18 +1156,20 @@ This list contains 1641 plugins. :pypi:`pytest-platform-markers` Markers for pytest to skip tests on specific platforms Sep 09, 2019 4 - Beta pytest (>=3.6.0) :pypi:`pytest-play` pytest plugin that let you automate actions and assertions with test metrics reporting executing plain YAML files Jun 12, 2019 5 - Production/Stable N/A :pypi:`pytest-playbook` Pytest plugin for reading playbooks. Jan 21, 2021 3 - Alpha pytest (>=6.1.2,<7.0.0) - :pypi:`pytest-playwright` A pytest wrapper with fixtures for Playwright to automate web browsers Jan 31, 2025 N/A pytest<9.0.0,>=6.2.4 + :pypi:`pytest-playwright` A pytest wrapper with fixtures for Playwright to automate web browsers Sep 08, 2025 N/A pytest<9.0.0,>=6.2.4 :pypi:`pytest_playwright_async` ASYNC Pytest plugin for Playwright Sep 28, 2024 N/A N/A - :pypi:`pytest-playwright-asyncio` A pytest wrapper with async fixtures for Playwright to automate web browsers Jan 31, 2025 N/A pytest<9.0.0,>=6.2.4 - :pypi:`pytest-playwright-axe` An axe-core integration for accessibility testing using Playwright Python. Mar 27, 2025 4 - Beta N/A + :pypi:`pytest-playwright-asyncio` A pytest wrapper with async fixtures for Playwright to automate web browsers Sep 08, 2025 N/A pytest<9.0.0,>=6.2.4 + :pypi:`pytest-playwright-axe` An axe-core integration for accessibility testing using Playwright Python. Nov 01, 2025 5 - Production/Stable N/A :pypi:`pytest-playwright-enhanced` A pytest plugin for playwright python Mar 24, 2024 N/A pytest<9.0.0,>=8.0.0 :pypi:`pytest-playwrights` A pytest wrapper with fixtures for Playwright to automate web browsers Dec 02, 2021 N/A N/A :pypi:`pytest-playwright-snapshot` A pytest wrapper for snapshot testing with playwright Aug 19, 2021 N/A N/A :pypi:`pytest-playwright-visual` A pytest fixture for visual testing with Playwright Apr 28, 2022 N/A N/A - :pypi:`pytest-playwright-visual-snapshot` Easy pytest visual regression testing using playwright Apr 15, 2025 N/A N/A - :pypi:`pytest-plone` Pytest plugin to test Plone addons Mar 27, 2025 3 - Alpha pytest<8.0.0 + :pypi:`pytest-playwright-visual-snapshot` Easy pytest visual regression testing using playwright Jul 02, 2025 N/A N/A + :pypi:`pytest-pl-grader` A pytest plugin for autograding Python code. Designed for use with the PrairieLearn platform. Nov 01, 2025 3 - Alpha pytest + :pypi:`pytest-plone` Pytest plugin to test Plone addons Jun 11, 2025 3 - Alpha pytest<8.0.0 :pypi:`pytest-plt` Fixtures for quickly making Matplotlib plots in tests Jan 17, 2024 5 - Production/Stable pytest :pypi:`pytest-plugin-helpers` A plugin to help developing and testing other plugins Nov 23, 2019 4 - Beta pytest (>=3.5.0) + :pypi:`pytest-plugins` A Python package for managing pytest plugins. Oct 23, 2025 N/A pytest :pypi:`pytest-plus` PyTest Plus Plugin :: extends pytest functionality Feb 02, 2025 5 - Production/Stable pytest>=7.4.2 :pypi:`pytest-pmisc` Mar 21, 2019 5 - Production/Stable N/A :pypi:`pytest-pogo` Pytest plugin for pogo-migrate May 05, 2025 4 - Beta pytest<9,>=7 @@ -1105,6 +1178,7 @@ This list contains 1641 plugins. :pypi:`pytest-polarion-cfme` pytest plugin for collecting test cases and recording test results Nov 13, 2017 3 - Alpha N/A :pypi:`pytest-polarion-collect` pytest plugin for collecting polarion test cases data Jun 18, 2020 3 - Alpha pytest :pypi:`pytest-polecat` Provides Polecat pytest fixtures Aug 12, 2019 4 - Beta N/A + :pypi:`pytest-polymeric-report` A polymeric test report plugin for pytest Oct 20, 2025 N/A N/A :pypi:`pytest-ponyorm` PonyORM in Pytest Oct 31, 2018 N/A pytest (>=3.1.1) :pypi:`pytest-poo` Visualize your crappy tests Mar 25, 2021 5 - Production/Stable pytest (>=2.3.4) :pypi:`pytest-poo-fail` Visualize your failed tests with poo Feb 12, 2015 5 - Production/Stable N/A @@ -1117,10 +1191,10 @@ This list contains 1641 plugins. :pypi:`pytest-power` pytest plugin with powerful fixtures Dec 31, 2020 N/A pytest (>=5.4) :pypi:`pytest-powerpack` A plugin containing extra batteries for pytest Jan 04, 2025 N/A pytest<9.0.0,>=8.1.1 :pypi:`pytest-prefer-nested-dup-tests` A Pytest plugin to drop duplicated tests during collection, but will prefer keeping nested packages. Apr 27, 2022 4 - Beta pytest (>=7.1.1,<8.0.0) - :pypi:`pytest-pretty` pytest plugin for printing summary data as I want it Apr 05, 2023 5 - Production/Stable pytest>=7 + :pypi:`pytest-pretty` pytest plugin for printing summary data as I want it Jun 04, 2025 5 - Production/Stable pytest>=7 :pypi:`pytest-pretty-terminal` pytest plugin for generating prettier terminal output Jan 31, 2022 N/A pytest (>=3.4.1) :pypi:`pytest-pride` Minitest-style test colors Apr 02, 2016 3 - Alpha N/A - :pypi:`pytest-print` pytest-print adds the printer fixture you can use to print messages to the user (directly to the pytest runner, not stdout) Feb 25, 2025 5 - Production/Stable pytest>=8.3.2 + :pypi:`pytest-print` pytest-print adds the printer fixture you can use to print messages to the user (directly to the pytest runner, not stdout) Oct 09, 2025 5 - Production/Stable pytest>=8.4.2 :pypi:`pytest-priority` pytest plugin for add priority for tests Aug 19, 2024 N/A pytest :pypi:`pytest-proceed` Oct 01, 2024 N/A pytest :pypi:`pytest-profiles` pytest plugin for configuration profiles Dec 09, 2021 4 - Beta pytest (>=3.7.0) @@ -1129,6 +1203,7 @@ This list contains 1641 plugins. :pypi:`pytest-prometheus` Report test pass / failures to a Prometheus PushGateway Oct 03, 2017 N/A N/A :pypi:`pytest-prometheus-pushgateway` Pytest report plugin for Zulip Sep 27, 2022 5 - Production/Stable pytest :pypi:`pytest-prometheus-pushgw` Pytest plugin to export test metrics to Prometheus Pushgateway May 19, 2025 N/A pytest>=6.0.0 + :pypi:`pytest-proofy` Pytest plugin for Proofy test reporting Oct 17, 2025 4 - Beta pytest>=7.0.0 :pypi:`pytest-prosper` Test helpers for Prosper projects Sep 24, 2018 N/A N/A :pypi:`pytest-prysk` Pytest plugin for prysk Dec 10, 2024 4 - Beta pytest>=7.3.2 :pypi:`pytest-pspec` A rspec format reporter for Python ptest Jun 02, 2020 4 - Beta pytest (>=3.0.0) @@ -1142,39 +1217,43 @@ This list contains 1641 plugins. :pypi:`pytest-pusher` pytest plugin for push report to minio Jan 06, 2023 5 - Production/Stable pytest (>=3.6) :pypi:`pytest-py125` Dec 03, 2022 N/A N/A :pypi:`pytest-pycharm` Plugin for py.test to enter PyCharm debugger on uncaught exceptions Aug 13, 2020 5 - Production/Stable pytest (>=2.3) - :pypi:`pytest-pycodestyle` pytest plugin to run pycodestyle Oct 10, 2024 3 - Alpha pytest>=7.0 + :pypi:`pytest-pycodestyle` pytest plugin to run pycodestyle Jul 20, 2025 3 - Alpha pytest>=7.0 :pypi:`pytest-pydantic-schema-sync` Pytest plugin to synchronise Pydantic model schemas with JSONSchema files Aug 29, 2024 N/A pytest>=6 :pypi:`pytest-pydev` py.test plugin to connect to a remote debug server with PyDev or PyCharm. Nov 15, 2017 3 - Alpha N/A :pypi:`pytest-pydocstyle` pytest plugin to run pydocstyle Oct 09, 2024 3 - Alpha pytest>=7.0 + :pypi:`pytest-pylembic` This package provides pytest plugin for validating Alembic migrations using the pylembic package. Jul 22, 2025 3 - Alpha N/A :pypi:`pytest-pylint` pytest plugin to check source code with pylint Oct 06, 2023 5 - Production/Stable pytest >=7.0 :pypi:`pytest-pylyzer` A pytest plugin for pylyzer Feb 15, 2025 4 - Beta N/A :pypi:`pytest-pymysql-autorecord` Record PyMySQL queries and mock with the stored data. Sep 02, 2022 N/A N/A - :pypi:`pytest-pyodide` Pytest plugin for testing applications that use Pyodide Nov 23, 2024 N/A pytest + :pypi:`pytest-pyodide` Pytest plugin for testing applications that use Pyodide Oct 24, 2025 N/A pytest :pypi:`pytest-pypi` Easily test your HTTP library against a local copy of pypi Mar 04, 2018 3 - Alpha N/A :pypi:`pytest-pypom-navigation` Core engine for cookiecutter-qa and pytest-play packages Feb 18, 2019 4 - Beta pytest (>=3.0.7) :pypi:`pytest-pyppeteer` A plugin to run pyppeteer in pytest Apr 28, 2022 N/A pytest (>=6.2.5,<7.0.0) :pypi:`pytest-pyq` Pytest fixture "q" for pyq Mar 10, 2020 5 - Production/Stable N/A - :pypi:`pytest-pyramid` pytest_pyramid - provides fixtures for testing pyramid applications with pytest test suite Oct 24, 2024 5 - Production/Stable pytest + :pypi:`pytest-pyramid` pytest_pyramid - provides fixtures for testing pyramid applications with pytest test suite Sep 30, 2025 5 - Production/Stable pytest :pypi:`pytest-pyramid-server` Pyramid server fixture for py.test Oct 17, 2024 5 - Production/Stable pytest :pypi:`pytest-pyreport` PyReport is a lightweight reporting plugin for Pytest that provides concise HTML report May 05, 2024 N/A pytest :pypi:`pytest-pyright` Pytest plugin for type checking code with Pyright Jan 26, 2024 4 - Beta pytest >=7.0.0 + :pypi:`pytest-pyspark-plugin` Pytest pyspark plugin (p3) Jul 28, 2025 4 - Beta pytest>=8.0.0 :pypi:`pytest-pyspec` A plugin that transforms the pytest output into a result similar to the RSpec. It enables the use of docstrings to display results and also enables the use of the prefixes "describe", "with" and "it". Aug 17, 2024 N/A pytest<9.0.0,>=8.3.2 :pypi:`pytest-pystack` Plugin to run pystack after a timeout for a test suite. Nov 16, 2024 N/A pytest>=3.5.0 + :pypi:`pytest-pytestdb` Add your description here Sep 14, 2025 N/A N/A :pypi:`pytest-pytestrail` Pytest plugin for interaction with TestRail Aug 27, 2020 4 - Beta pytest (>=3.8.0) - :pypi:`pytest-pythonhashseed` Pytest plugin to set PYTHONHASHSEED env var. Feb 25, 2024 4 - Beta pytest>=3.0.0 + :pypi:`pytest-pytestrail-internal` Pytest plugin for interaction with TestRail, Pytest plugin for TestRail (internal fork from: https://github.com/tolstislon/pytest-pytestrail with PR #25 fix) Jun 12, 2025 4 - Beta pytest>=3.8.0 + :pypi:`pytest-pythonhashseed` Pytest plugin to set PYTHONHASHSEED env var. Sep 28, 2025 4 - Beta pytest>=3.0.0 :pypi:`pytest-pythonpath` pytest plugin for adding to the PYTHONPATH from command line or configs. Feb 10, 2022 5 - Production/Stable pytest (<7,>=2.5.2) :pypi:`pytest-python-test-engineer-sort` Sort plugin for Pytest May 13, 2024 N/A pytest>=6.2.0 :pypi:`pytest-pytorch` pytest plugin for a better developer experience when working with the PyTorch test suite May 25, 2021 4 - Beta pytest :pypi:`pytest-pyvenv` A package for create venv in tests Feb 27, 2024 N/A pytest ; extra == 'test' - :pypi:`pytest-pyvista` Pytest-pyvista package Sep 29, 2023 4 - Beta pytest>=3.5.0 + :pypi:`pytest-pyvista` Pytest-pyvista package. Oct 06, 2025 4 - Beta pytest>=6.2.0 :pypi:`pytest-qanova` A pytest plugin to collect test information Sep 05, 2024 3 - Alpha pytest - :pypi:`pytest-qaseio` Pytest plugin for Qase.io integration Mar 18, 2025 5 - Production/Stable pytest<9.0.0,>=7.2.2 + :pypi:`pytest-qaseio` Pytest plugin for Qase.io integration Oct 01, 2025 5 - Production/Stable pytest<9.0.0,>=7.2.2 :pypi:`pytest-qasync` Pytest support for qasync. Jul 12, 2021 4 - Beta pytest (>=5.4.0) :pypi:`pytest-qatouch` Pytest plugin for uploading test results to your QA Touch Testrun. Feb 14, 2023 4 - Beta pytest (>=6.2.0) :pypi:`pytest-qgis` A pytest plugin for testing QGIS python plugins Jun 14, 2024 5 - Production/Stable pytest>=6.0 :pypi:`pytest-qml` Run QML Tests with pytest Dec 02, 2020 4 - Beta pytest (>=6.0.0) :pypi:`pytest-qr` pytest plugin to generate test result QR codes Nov 25, 2021 4 - Beta N/A - :pypi:`pytest-qt` pytest support for PyQt and PySide applications Feb 07, 2024 5 - Production/Stable pytest + :pypi:`pytest-qt` pytest support for PyQt and PySide applications Jul 01, 2025 5 - Production/Stable pytest :pypi:`pytest-qt-app` QT app fixture for py.test Oct 17, 2024 5 - Production/Stable pytest :pypi:`pytest-quarantine` A plugin for pytest to manage expected test failures Nov 24, 2019 5 - Production/Stable pytest (>=4.6) :pypi:`pytest-quickcheck` pytest plugin to generate random data inspired by QuickCheck Nov 05, 2022 4 - Beta pytest (>=4.0) @@ -1188,14 +1267,16 @@ This list contains 1641 plugins. :pypi:`pytest-raisesregexp` Simple pytest plugin to look for regex in Exceptions Dec 18, 2015 N/A N/A :pypi:`pytest-raisin` Plugin enabling the use of exception instances with pytest.raises Feb 06, 2022 N/A pytest :pypi:`pytest-random` py.test plugin to randomize tests Apr 28, 2013 3 - Alpha N/A - :pypi:`pytest-randomly` Pytest plugin to randomly order tests and control random.seed. Oct 25, 2024 5 - Production/Stable pytest + :pypi:`pytest-randomly` Pytest plugin to randomly order tests and control random.seed. Sep 12, 2025 5 - Production/Stable pytest :pypi:`pytest-randomness` Pytest plugin about random seed management May 30, 2019 3 - Alpha N/A :pypi:`pytest-random-num` Randomise the order in which pytest tests are run with some control over the randomness Oct 19, 2020 5 - Production/Stable N/A - :pypi:`pytest-random-order` Randomise the order in which pytest tests are run with some control over the randomness Jan 20, 2024 5 - Production/Stable pytest >=3.0.0 + :pypi:`pytest-random-order` Randomise the order in which pytest tests are run with some control over the randomness Jun 22, 2025 5 - Production/Stable pytest :pypi:`pytest-ranking` A Pytest plugin for faster fault detection via regression test prioritization Apr 08, 2025 4 - Beta pytest>=7.4.3 - :pypi:`pytest-readme` Test your README.md file Sep 02, 2022 5 - Production/Stable N/A - :pypi:`pytest-reana` Pytest fixtures for REANA. Sep 04, 2024 3 - Alpha N/A - :pypi:`pytest-recorder` Pytest plugin, meant to facilitate unit tests writing for tools consumming Web APIs. Mar 31, 2025 N/A N/A + :pypi:`pytest-rca-report` Interactive RCA report generator for pytest runs, with AI-based analysis and visual dashboard Aug 04, 2025 N/A N/A + :pypi:`pytest-readme` Test your README.md file Aug 01, 2025 5 - Production/Stable pytest + :pypi:`pytest-reana` Pytest fixtures for REANA. Oct 10, 2025 3 - Alpha N/A + :pypi:`pytest-recap` Capture your test sessions. Recap the results. Jun 16, 2025 N/A pytest>=6.2.0 + :pypi:`pytest-recorder` Pytest plugin, meant to facilitate unit tests writing for tools consumming Web APIs. Oct 28, 2025 N/A pytest>=8.4.1 :pypi:`pytest-recording` A pytest plugin powered by VCR.py to record and replay HTTP traffic May 08, 2025 4 - Beta pytest>=3.5.0 :pypi:`pytest-recordings` Provides pytest plugins for reporting request/response traffic, screenshots, and more to ReportPortal Aug 13, 2020 N/A N/A :pypi:`pytest-record-video` 用例执行过程中录制视频 Oct 31, 2024 N/A N/A @@ -1206,8 +1287,8 @@ This list contains 1641 plugins. :pypi:`pytest-reference-formatter` Conveniently run pytest with a dot-formatted test reference. Oct 01, 2019 4 - Beta N/A :pypi:`pytest-regex` Select pytest tests with regular expressions May 29, 2023 4 - Beta pytest (>=3.5.0) :pypi:`pytest-regex-dependency` Management of Pytest dependencies via regex patterns Jun 12, 2022 N/A pytest - :pypi:`pytest-regressions` Easy to use fixtures to write regression tests. May 30, 2025 5 - Production/Stable pytest>=6.2.0 - :pypi:`pytest-regtest` pytest plugin for snapshot regression testing Nov 12, 2024 N/A pytest>7.2 + :pypi:`pytest-regressions` Easy to use fixtures to write regression tests. Sep 05, 2025 5 - Production/Stable pytest>=6.2.0 + :pypi:`pytest-regtest` pytest plugin for snapshot regression testing Oct 11, 2025 N/A pytest>7.2 :pypi:`pytest-relative-order` a pytest plugin that sorts tests using "before" and "after" markers May 17, 2021 4 - Beta N/A :pypi:`pytest-relative-path` Handle relative path in pytest options or ini configs Aug 30, 2024 N/A pytest :pypi:`pytest-relaxed` Relaxed test discovery/organization for pytest Mar 29, 2024 5 - Production/Stable pytest>=7 @@ -1222,43 +1303,46 @@ This list contains 1641 plugins. :pypi:`pytest-repo-health` A pytest plugin to report on repository standards conformance May 05, 2025 3 - Alpha pytest :pypi:`pytest-report` Creates json report that is compatible with atom.io's linter message format May 11, 2016 4 - Beta N/A :pypi:`pytest-reporter` Generate Pytest reports with templates Feb 28, 2024 4 - Beta pytest - :pypi:`pytest-reporter-html1` A basic HTML report template for Pytest May 06, 2025 4 - Beta N/A + :pypi:`pytest-reporter-html1` A basic HTML report template for Pytest Oct 10, 2025 4 - Beta N/A :pypi:`pytest-reporter-html-dots` A basic HTML report for pytest using Jinja2 template engine. Apr 26, 2025 N/A N/A - :pypi:`pytest-reporter-plus` Lightweight enhanced HTML reporter for Pytest May 31, 2025 N/A N/A - :pypi:`pytest-report-extras` Pytest plugin to enhance pytest-html and allure reports by adding comments, screenshots, webpage sources and attachments. Apr 04, 2025 N/A pytest>=8.0.0 + :pypi:`pytest-reporter-plus` Lightweight enhanced HTML reporter for Pytest Jul 16, 2025 N/A N/A + :pypi:`pytest-report-extras` Pytest plugin to enhance pytest-html and allure reports by adding comments, screenshots, webpage sources and attachments. Aug 08, 2025 N/A pytest>=8.4.0 :pypi:`pytest-reportinfra` Pytest plugin for reportinfra Aug 11, 2019 3 - Alpha N/A :pypi:`pytest-reporting` A plugin to report summarized results in a table format Oct 25, 2019 4 - Beta pytest (>=3.5.0) :pypi:`pytest-reportlog` Replacement for the --resultlog option, focused in simplicity and extensibility May 22, 2023 3 - Alpha pytest :pypi:`pytest-report-me` A pytest plugin to generate report. Dec 31, 2020 N/A pytest :pypi:`pytest-report-parameters` pytest plugin for adding tests' parameters to junit report Jun 18, 2020 3 - Alpha pytest (>=2.4.2) - :pypi:`pytest-reportportal` Agent for Reporting results of tests to the Report Portal Feb 28, 2025 N/A pytest>=4.6.10 + :pypi:`pytest-reportportal` Agent for Reporting results of tests to the Report Portal Jul 08, 2025 N/A pytest>=4.6.10 :pypi:`pytest-report-stream` A pytest plugin which allows to stream test reports at runtime Oct 22, 2023 4 - Beta N/A :pypi:`pytest-repo-structure` Pytest Repo Structure Mar 18, 2024 1 - Planning N/A - :pypi:`pytest-req` pytest requests plugin Aug 31, 2024 5 - Production/Stable pytest<9.0.0,>=8.3.2 + :pypi:`pytest-req` pytest requests plugin Sep 08, 2025 5 - Production/Stable pytest>=8.4.2 + :pypi:`pytest-reqcov` A pytest plugin for requirement coverage tracking Jul 04, 2025 3 - Alpha pytest>=6.0 :pypi:`pytest-reqs` pytest plugin to check pinned requirements May 12, 2019 N/A pytest (>=2.4.2) :pypi:`pytest-requests` A simple plugin to use with pytest Jun 24, 2019 4 - Beta pytest (>=3.5.0) :pypi:`pytest-requestselapsed` collect and show http requests elapsed time Aug 14, 2022 N/A N/A :pypi:`pytest-requests-futures` Pytest Plugin to Mock Requests Futures Jul 06, 2022 5 - Production/Stable pytest :pypi:`pytest-requirements` pytest plugin for using custom markers to relate tests to requirements and usecases Feb 28, 2025 N/A pytest :pypi:`pytest-requires` A pytest plugin to elegantly skip tests with optional requirements Dec 21, 2021 4 - Beta pytest (>=3.5.0) + :pypi:`pytest-reqyaml` This is a plugin where generate requests test cases from yaml. Aug 16, 2025 N/A pytest>=8.4.1 :pypi:`pytest-reraise` Make multi-threaded pytest test cases fail when they should Sep 20, 2022 5 - Production/Stable pytest (>=4.6) :pypi:`pytest-rerun` Re-run only changed files in specified branch Jul 08, 2019 N/A pytest (>=3.6) - :pypi:`pytest-rerun-all` Rerun testsuite for a certain time or iterations Nov 16, 2023 3 - Alpha pytest (>=7.0.0) + :pypi:`pytest-rerun-all` Rerun testsuite for a certain time or iterations Jul 30, 2025 3 - Alpha pytest>=7.0.0 :pypi:`pytest-rerunclassfailures` pytest rerun class failures plugin Apr 24, 2024 5 - Production/Stable pytest>=7.2 - :pypi:`pytest-rerunfailures` pytest plugin to re-run tests to eliminate flaky failures May 08, 2025 5 - Production/Stable pytest!=8.2.2,>=7.4 + :pypi:`pytest-rerunfailures` pytest plugin to re-run tests to eliminate flaky failures Oct 10, 2025 5 - Production/Stable pytest!=8.2.2,>=7.4 :pypi:`pytest-rerunfailures-all-logs` pytest plugin to re-run tests to eliminate flaky failures Mar 07, 2022 5 - Production/Stable N/A :pypi:`pytest-reserial` Pytest fixture for recording and replaying serial port traffic. Dec 22, 2024 4 - Beta pytest - :pypi:`pytest-resilient-circuits` Resilient Circuits fixtures for PyTest Feb 28, 2025 N/A pytest~=7.0 + :pypi:`pytest-resilient-circuits` Resilient Circuits fixtures for PyTest Jul 29, 2025 N/A pytest~=7.0 :pypi:`pytest-resource` Load resource fixture plugin to use with pytest Nov 14, 2018 4 - Beta N/A - :pypi:`pytest-resource-path` Provides path for uniform access to test resources in isolated directory May 15, 2025 5 - Production/Stable pytest>=3.5.0 + :pypi:`pytest-resource-path` Provides path for uniform access to test resources in isolated directory Sep 18, 2025 5 - Production/Stable pytest>=3.5.0 :pypi:`pytest-resource-usage` Pytest plugin for reporting running time and peak memory usage Nov 06, 2022 5 - Production/Stable pytest>=7.0.0 + :pypi:`pytest-respect` Pytest plugin to load resource files relative to test code and to expect values to match them. Oct 21, 2025 5 - Production/Stable pytest>=8.0.0 :pypi:`pytest-responsemock` Simplified requests calls mocking for pytest Mar 10, 2022 5 - Production/Stable N/A :pypi:`pytest-responses` py.test integration for responses Oct 11, 2022 N/A pytest (>=2.5) :pypi:`pytest-rest-api` Aug 08, 2022 N/A pytest (>=7.1.2,<8.0.0) - :pypi:`pytest-restrict` Pytest plugin to restrict the test types allowed Oct 24, 2024 5 - Production/Stable pytest + :pypi:`pytest-restrict` Pytest plugin to restrict the test types allowed Sep 09, 2025 5 - Production/Stable pytest :pypi:`pytest-result-log` A pytest plugin that records the start, end, and result information of each use case in a log file Jan 10, 2024 N/A pytest>=7.2.0 :pypi:`pytest-result-notify` Default template for PDM package Apr 27, 2025 N/A pytest>=8.3.5 - :pypi:`pytest-results` Easily spot regressions in your tests. May 06, 2025 4 - Beta pytest + :pypi:`pytest-results` Easily spot regressions in your tests. Oct 08, 2025 4 - Beta pytest :pypi:`pytest-result-sender` Apr 20, 2023 N/A pytest>=7.3.1 :pypi:`pytest-result-sender-jms` Default template for PDM package May 22, 2025 N/A pytest>=8.3.5 :pypi:`pytest-result-sender-lj` Default template for PDM package Dec 17, 2024 N/A pytest>=8.3.4 @@ -1269,35 +1353,36 @@ This list contains 1641 plugins. :pypi:`pytest-retry` Adds the ability to retry flaky tests in CI environments Jan 19, 2025 N/A pytest>=7.0.0 :pypi:`pytest-retry-class` A pytest plugin to rerun entire class on failure Nov 24, 2024 N/A pytest>=5.3 :pypi:`pytest-reusable-testcases` Apr 28, 2023 N/A N/A - :pypi:`pytest-revealtype-injector` Pytest plugin for replacing reveal_type() calls inside test functions with static and runtime type checking result comparison, for confirming type annotation validity. Mar 18, 2025 4 - Beta pytest<9,>=7.0 - :pypi:`pytest-reverse` Pytest plugin to reverse test order. Oct 25, 2024 5 - Production/Stable pytest + :pypi:`pytest-revealtype-injector` Pytest plugin for replacing reveal_type() calls inside test functions with static and runtime type checking result comparison, for confirming type annotation validity. Oct 23, 2025 4 - Beta pytest<9,>=7.0 + :pypi:`pytest-reverse` Pytest plugin to reverse test order. Sep 09, 2025 5 - Production/Stable pytest :pypi:`pytest-rich` Leverage rich for richer test session output Dec 12, 2024 4 - Beta pytest>=7.0 :pypi:`pytest-richer` Pytest plugin providing a Rich based reporter. Oct 27, 2023 3 - Alpha pytest :pypi:`pytest-rich-reporter` A pytest plugin using Rich for beautiful test result formatting. Feb 17, 2022 1 - Planning pytest (>=5.0.0) :pypi:`pytest-richtrace` A pytest plugin that displays the names and information of the pytest hook functions as they are executed. Jun 20, 2023 N/A N/A :pypi:`pytest-ringo` pytest plugin to test webapplications using the Ringo webframework Sep 27, 2017 3 - Alpha N/A :pypi:`pytest-rmsis` Sycronise pytest results to Jira RMsis Aug 10, 2022 N/A pytest (>=5.3.5) + :pypi:`pytest-rmysql` This is a plugin which is able to connet MySQL easyly. Aug 17, 2025 N/A pytest>=8.4.1 :pypi:`pytest-rng` Fixtures for seeding tests and making randomness reproducible Aug 08, 2019 5 - Production/Stable pytest :pypi:`pytest-roast` pytest plugin for ROAST configuration override and fixtures Nov 09, 2022 5 - Production/Stable pytest - :pypi:`pytest_robotframework` a pytest plugin that can run both python and robotframework tests while generating robot reports for them Apr 13, 2025 N/A pytest<9,>=7 + :pypi:`pytest-robotframework` a pytest plugin that can run both python and robotframework tests while generating robot reports for them Oct 06, 2025 N/A pytest<9,>=7 :pypi:`pytest-rocketchat` Pytest to Rocket.Chat reporting plugin Apr 18, 2021 5 - Production/Stable N/A :pypi:`pytest-rotest` Pytest integration with rotest Sep 08, 2019 N/A pytest (>=3.5.0) :pypi:`pytest-rpc` Extend py.test for RPC OpenStack testing. Feb 22, 2019 4 - Beta pytest (~=3.6) :pypi:`pytest-rst` Test code from RST documents with pytest Jan 26, 2023 N/A N/A :pypi:`pytest-rt` pytest data collector plugin for Testgr May 05, 2022 N/A N/A :pypi:`pytest-rts` Coverage-based regression test selection (RTS) plugin for pytest May 17, 2021 N/A pytest - :pypi:`pytest-ruff` pytest plugin to check ruff requirements. Jul 21, 2024 4 - Beta pytest>=5 + :pypi:`pytest-ruff` pytest plugin to check ruff requirements. Jun 19, 2025 4 - Beta pytest>=5 :pypi:`pytest-run-changed` Pytest plugin that runs changed tests only Apr 02, 2021 3 - Alpha pytest :pypi:`pytest-runfailed` implement a --failed option for pytest Mar 24, 2016 N/A N/A - :pypi:`pytest-run-parallel` A simple pytest plugin to run tests concurrently May 27, 2025 4 - Beta pytest>=6.2.0 + :pypi:`pytest-run-parallel` A simple pytest plugin to run tests concurrently Oct 23, 2025 4 - Beta pytest>=6.2.0 :pypi:`pytest-run-subprocess` Pytest Plugin for running and testing subprocesses. Nov 12, 2022 5 - Production/Stable pytest :pypi:`pytest-runtime-types` Checks type annotations on runtime while running tests. Feb 09, 2023 N/A pytest - :pypi:`pytest-runtime-xfail` Call runtime_xfail() to mark running test as xfail. Aug 26, 2021 N/A pytest>=5.0.0 + :pypi:`pytest-runtime-xfail` Call runtime_xfail() to mark running test as xfail. Oct 10, 2025 5 - Production/Stable pytest>=5.0.0 :pypi:`pytest-runtime-yoyo` run case mark timeout Jun 12, 2023 N/A pytest (>=7.2.0) :pypi:`pytest-saccharin` pytest-saccharin is a updated fork of pytest-sugar, a plugin for pytest that changes the default look and feel of pytest (e.g. progressbar, show tests that fail instantly). Oct 31, 2022 3 - Alpha N/A :pypi:`pytest-salt` Pytest Salt Plugin Jan 27, 2020 4 - Beta N/A :pypi:`pytest-salt-containers` A Pytest plugin that builds and creates docker containers Nov 09, 2016 4 - Beta N/A - :pypi:`pytest-salt-factories` Pytest Salt Plugin Oct 22, 2024 5 - Production/Stable pytest>=7.4.0 + :pypi:`pytest-salt-factories` Pytest Salt Plugin Jul 08, 2025 5 - Production/Stable pytest>=7.4.0 :pypi:`pytest-salt-from-filenames` Simple PyTest Plugin For Salt's Test Suite Specifically Jan 29, 2019 4 - Beta pytest (>=4.1) :pypi:`pytest-salt-runtests-bridge` Simple PyTest Plugin For Salt's Test Suite Specifically Dec 05, 2019 4 - Beta pytest (>=4.1) :pypi:`pytest-sample-argvalues` A utility function to help choose a random sample from your argvalues in pytest. May 07, 2024 N/A pytest @@ -1306,9 +1391,10 @@ This list contains 1641 plugins. :pypi:`pytest-sanity` Dec 07, 2020 N/A N/A :pypi:`pytest-sa-pg` May 14, 2019 N/A N/A :pypi:`pytest_sauce` pytest_sauce provides sane and helpful methods worked out in clearcode to run py.test tests with selenium/saucelabs Jul 14, 2014 3 - Alpha N/A - :pypi:`pytest-sbase` A complete web automation framework for end-to-end testing. May 27, 2025 5 - Production/Stable N/A + :pypi:`pytest-sbase` A complete web automation framework for end-to-end testing. Nov 01, 2025 5 - Production/Stable N/A :pypi:`pytest-scenario` pytest plugin for test scenarios Feb 06, 2017 3 - Alpha N/A - :pypi:`pytest-scenario-files` A pytest plugin that generates unit test scenarios from data files. May 21, 2025 5 - Production/Stable pytest>=7.0 + :pypi:`pytest-scenario-files` A pytest plugin that generates unit test scenarios from data files. Sep 03, 2025 5 - Production/Stable pytest<9,>=7.4 + :pypi:`pytest-scenarios` Add your description here Oct 29, 2025 N/A N/A :pypi:`pytest-schedule` Automate and customize test scheduling effortlessly on local machines. Oct 31, 2024 N/A N/A :pypi:`pytest-schema` 👍 Validate return values against a schema-like object in testing Feb 16, 2024 5 - Production/Stable pytest >=3.5.0 :pypi:`pytest-scim2-server` SCIM2 server fixture for Pytest May 14, 2025 4 - Beta pytest>=8.3.4 @@ -1318,27 +1404,27 @@ This list contains 1641 plugins. :pypi:`pytest-select` A pytest plugin which allows to (de-)select tests from a file. Jan 18, 2019 3 - Alpha pytest (>=3.0) :pypi:`pytest-selenium` pytest plugin for Selenium Feb 01, 2024 5 - Production/Stable pytest>=6.0.0 :pypi:`pytest-selenium-auto` pytest plugin to automatically capture screenshots upon selenium webdriver events Nov 07, 2023 N/A pytest >= 7.0.0 - :pypi:`pytest-seleniumbase` A complete web automation framework for end-to-end testing. May 27, 2025 5 - Production/Stable N/A + :pypi:`pytest-seleniumbase` A complete web automation framework for end-to-end testing. Nov 01, 2025 5 - Production/Stable N/A :pypi:`pytest-selenium-enhancer` pytest plugin for Selenium Apr 29, 2022 5 - Production/Stable N/A :pypi:`pytest-selenium-pdiff` A pytest package implementing perceptualdiff for Selenium tests. Apr 06, 2017 2 - Pre-Alpha N/A :pypi:`pytest-selfie` A pytest plugin for selfie snapshot testing. Dec 16, 2024 N/A pytest>=8.0.0 :pypi:`pytest-send-email` Send pytest execution result email Sep 02, 2024 N/A pytest - :pypi:`pytest-sentry` A pytest plugin to send testrun information to Sentry.io May 23, 2025 N/A pytest + :pypi:`pytest-sentry` A pytest plugin to send testrun information to Sentry.io Jul 01, 2025 N/A pytest :pypi:`pytest-sequence-markers` Pytest plugin for sequencing markers for execution of tests May 23, 2023 5 - Production/Stable N/A :pypi:`pytest-server` test server exec cmd Sep 09, 2024 N/A N/A :pypi:`pytest-server-fixtures` Extensible server fixtures for py.test Nov 29, 2024 5 - Production/Stable pytest :pypi:`pytest-serverless` Automatically mocks resources from serverless.yml in pytest using moto. May 09, 2022 4 - Beta N/A - :pypi:`pytest-servers` pytest servers Mar 12, 2025 3 - Alpha pytest>=6.2 + :pypi:`pytest-servers` pytest servers Aug 04, 2025 3 - Alpha pytest>=6.2 :pypi:`pytest-service` Aug 06, 2024 5 - Production/Stable pytest>=6.0.0 - :pypi:`pytest-services` Services plugin for pytest testing framework Oct 30, 2020 6 - Mature N/A + :pypi:`pytest-services` Services plugin for pytest testing framework Jul 16, 2025 6 - Mature pytest :pypi:`pytest-session2file` pytest-session2file (aka: pytest-session_to_file for v0.1.0 - v0.1.2) is a py.test plugin for capturing and saving to file the stdout of py.test. Jan 26, 2021 3 - Alpha pytest :pypi:`pytest-session-fixture-globalize` py.test plugin to make session fixtures behave as if written in conftest, even if it is written in some modules May 15, 2018 4 - Beta N/A :pypi:`pytest-session_to_file` pytest-session_to_file is a py.test plugin for capturing and saving to file the stdout of py.test. Oct 01, 2015 3 - Alpha N/A :pypi:`pytest-setupinfo` Displaying setup info during pytest command run Jan 23, 2023 N/A N/A :pypi:`pytest-sftpserver` py.test plugin to locally test sftp server connections. Sep 16, 2019 4 - Beta N/A :pypi:`pytest-shard` Dec 11, 2020 4 - Beta pytest - :pypi:`pytest-shard-fork` Shard tests to support parallelism across multiple machines May 17, 2025 4 - Beta pytest - :pypi:`pytest-shared-session-scope` Pytest session-scoped fixture that works with xdist Sep 22, 2024 N/A pytest>=7.0.0 + :pypi:`pytest-shard-fork` Shard tests to support parallelism across multiple machines Jun 13, 2025 4 - Beta pytest + :pypi:`pytest-shared-session-scope` Pytest session-scoped fixture that works with xdist Oct 31, 2025 N/A pytest>=7.0.0 :pypi:`pytest-share-hdf` Plugin to save test data in HDF files and retrieve them for comparison Sep 21, 2022 4 - Beta pytest (>=3.5.0) :pypi:`pytest-sharkreport` this is pytest report plugin. Jul 11, 2022 N/A pytest (>=3.5) :pypi:`pytest-shell` A pytest plugin to help with testing shell scripts / black box commands Mar 27, 2022 N/A N/A @@ -1347,12 +1433,13 @@ This list contains 1641 plugins. :pypi:`pytest-sherlock` pytest plugin help to find coupled tests Aug 14, 2023 5 - Production/Stable pytest >=3.5.1 :pypi:`pytest-shortcuts` Expand command-line shortcuts listed in pytest configuration Oct 29, 2020 4 - Beta pytest (>=3.5.0) :pypi:`pytest-shutil` A goodie-bag of unix shell and environment tools for py.test Nov 29, 2024 5 - Production/Stable pytest + :pypi:`pytest-sigil` Proper fixture resource cleanup by handling signals Oct 21, 2025 N/A pytest<9.0.0,>=7.0.0 :pypi:`pytest-simbind` Pytest plugin to operate with objects generated by Simbind tool. Mar 28, 2024 N/A pytest>=7.0.0 :pypi:`pytest-simplehttpserver` Simple pytest fixture to spin up an HTTP server Jun 24, 2021 4 - Beta N/A :pypi:`pytest-simple-plugin` Simple pytest plugin Nov 27, 2019 N/A N/A :pypi:`pytest-simple-settings` simple-settings plugin for pytest Nov 17, 2020 4 - Beta pytest :pypi:`pytest-single-file-logging` Allow for multiple processes to log to a single file May 05, 2016 4 - Beta pytest (>=2.8.1) - :pypi:`pytest-skip` A pytest plugin which allows to (de-)select or skip tests from a file. Apr 04, 2025 3 - Alpha pytest + :pypi:`pytest-skip` A pytest plugin which allows to (de-)select or skip tests from a file. Sep 12, 2025 3 - Alpha pytest :pypi:`pytest-skip-markers` Pytest Salt Plugin Aug 09, 2024 5 - Production/Stable pytest>=7.1.0 :pypi:`pytest-skipper` A plugin that selects only tests with changes in execution path Mar 26, 2017 3 - Alpha pytest (>=3.0.6) :pypi:`pytest-skippy` Automatically skip tests that don't need to run! Jan 27, 2018 3 - Alpha pytest (>=2.3.4) @@ -1365,13 +1452,17 @@ This list contains 1641 plugins. :pypi:`pytest-slow-last` Run tests in order of execution time (faster tests first) Mar 16, 2025 4 - Beta pytest>=3.5.0 :pypi:`pytest-smartcollect` A plugin for collecting tests that touch changed code Oct 04, 2018 N/A pytest (>=3.5.0) :pypi:`pytest-smartcov` Smart coverage plugin for pytest. Sep 30, 2017 3 - Alpha N/A + :pypi:`pytest-smart-debugger-backend` Backend server for Pytest Smart Debugger Sep 17, 2025 N/A N/A + :pypi:`pytest-smart-rerun` A Pytest plugin for intelligent retrying of flaky tests. Oct 12, 2025 3 - Alpha N/A :pypi:`pytest-smell` Automated bad smell detection tool for Pytest Jun 26, 2022 N/A N/A - :pypi:`pytest-smoke` Pytest plugin for smoke testing May 23, 2025 4 - Beta pytest<9,>=7.0.0 + :pypi:`pytest-smoke` Pytest plugin for smoke testing Oct 08, 2025 4 - Beta pytest<9,>=7.0.0 :pypi:`pytest-smtp` Send email with pytest execution result Feb 20, 2021 N/A pytest :pypi:`pytest-smtp4dev` Plugin for smtp4dev API Jun 27, 2023 5 - Production/Stable N/A :pypi:`pytest-smtpd` An SMTP server for testing built on aiosmtpd May 15, 2023 N/A pytest :pypi:`pytest-smtp-test-server` pytest plugin for using \`smtp-test-server\` as a fixture Dec 03, 2023 2 - Pre-Alpha pytest (>=7.4.3,<8.0.0) :pypi:`pytest-snail` Plugin for adding a marker to slow running tests. 🐌 Nov 04, 2019 3 - Alpha pytest (>=5.0.1) + :pypi:`pytest-snap` A text-based snapshot testing library implemented as a pytest plugin Aug 25, 2025 N/A pytest>=8.0.0 + :pypi:`pytest-snapcheck` Minimal deterministic test-run snapshot capture for pytest. Sep 07, 2025 N/A pytest>=8.0 :pypi:`pytest-snapci` py.test plugin for Snap-CI Nov 12, 2015 N/A N/A :pypi:`pytest-snapmock` Snapshots for your mocks. Nov 15, 2024 N/A N/A :pypi:`pytest-snapshot` A plugin for snapshot testing with pytest. Apr 23, 2022 4 - Beta pytest (>=3.0.0) @@ -1390,7 +1481,7 @@ This list contains 1641 plugins. :pypi:`pytest-sourceorder` Test-ordering plugin for pytest Sep 01, 2021 4 - Beta pytest :pypi:`pytest-spark` pytest plugin to run the tests with support of pyspark. May 21, 2025 4 - Beta pytest :pypi:`pytest-spawner` py.test plugin to spawn process and communicate with them. Jul 31, 2015 4 - Beta N/A - :pypi:`pytest-spec` Library pytest-spec is a pytest plugin to display test execution output like a SPECIFICATION. Aug 04, 2024 N/A pytest; extra == "test" + :pypi:`pytest-spec` Library pytest-spec is a pytest plugin to display test execution output like a SPECIFICATION. Oct 08, 2025 N/A pytest; extra == "test" :pypi:`pytest-spec2md` Library pytest-spec2md is a pytest plugin to create a markdown specification while running pytest. Apr 10, 2024 N/A pytest>7.0 :pypi:`pytest-speed` Modern benchmarking library for python with pytest integration. Jan 22, 2023 3 - Alpha pytest>=7 :pypi:`pytest-sphinx` Doctest plugin for pytest with support for Sphinx-specific doctest-directives Apr 13, 2024 4 - Beta pytest>=8.1.1 @@ -1402,8 +1493,8 @@ This list contains 1641 plugins. :pypi:`pytest-splitio` Split.io SDK integration for e2e tests Sep 22, 2020 N/A pytest (<7,>=5.0) :pypi:`pytest-split-tests` A Pytest plugin for running a subset of your tests by splitting them in to equally sized groups. Forked from Mark Adams' original project pytest-test-groups. Jul 30, 2021 5 - Production/Stable pytest (>=2.5) :pypi:`pytest-split-tests-tresorit` Feb 22, 2021 1 - Planning N/A - :pypi:`pytest-splunk-addon` A Dynamic test tool for Splunk Apps and Add-ons May 14, 2025 N/A pytest<8,>5.4.0 - :pypi:`pytest-splunk-addon-ui-smartx` Library to support testing Splunk Add-on UX Mar 19, 2025 N/A N/A + :pypi:`pytest-splunk-addon` A Dynamic test tool for Splunk Apps and Add-ons Aug 19, 2025 N/A pytest<8,>5.4.0 + :pypi:`pytest-splunk-addon-ui-smartx` Library to support testing Splunk Add-on UX Aug 28, 2025 N/A N/A :pypi:`pytest-splunk-env` pytest fixtures for interaction with Splunk Enterprise and Splunk Cloud Oct 22, 2020 N/A pytest (>=6.1.1,<7.0.0) :pypi:`pytest-sqitch` sqitch for pytest Apr 06, 2020 4 - Beta N/A :pypi:`pytest-sqlalchemy` pytest plugin with sqlalchemy related fixtures Apr 19, 2025 3 - Alpha pytest>=8.0 @@ -1411,7 +1502,7 @@ This list contains 1641 plugins. :pypi:`pytest-sqlalchemy-session` A pytest plugin for preserving test isolation that use SQLAlchemy. May 19, 2023 4 - Beta pytest (>=7.0) :pypi:`pytest-sql-bigquery` Yet another SQL-testing framework for BigQuery provided by pytest plugin Dec 19, 2019 N/A pytest :pypi:`pytest-sqlfluff` A pytest plugin to use sqlfluff to enable format checking of sql files. Dec 21, 2022 4 - Beta pytest (>=3.5.0) - :pypi:`pytest-sqlguard` Pytest fixture to record and check SQL Queries made by SQLAlchemy Mar 11, 2025 4 - Beta pytest>=7 + :pypi:`pytest-sqlguard` Pytest fixture to record and check SQL Queries made by SQLAlchemy Jun 06, 2025 4 - Beta pytest>=7 :pypi:`pytest-squadcast` Pytest report plugin for Squadcast Feb 22, 2022 5 - Production/Stable pytest :pypi:`pytest-srcpaths` Add paths to sys.path Oct 15, 2021 N/A pytest>=6.2.0 :pypi:`pytest-ssh` pytest plugin for ssh command run May 27, 2019 N/A pytest @@ -1420,26 +1511,31 @@ This list contains 1641 plugins. :pypi:`pytest-stats` Collects tests metadata for future analysis, easy to extend for any data store Jul 18, 2024 N/A pytest>=8.0.0 :pypi:`pytest-statsd` pytest plugin for reporting to graphite Nov 30, 2018 5 - Production/Stable pytest (>=3.0.0) :pypi:`pytest-status` Add status mark for tests Aug 22, 2024 N/A pytest + :pypi:`pytest-stderr-db` Add your description here Sep 14, 2025 N/A N/A + :pypi:`pytest-stdout-db` Add your description here Sep 14, 2025 N/A N/A :pypi:`pytest-stepfunctions` A small description May 08, 2021 4 - Beta pytest :pypi:`pytest-steps` Create step-wise / incremental tests in pytest. Sep 23, 2021 5 - Production/Stable N/A + :pypi:`pytest-stepthrough` Pause and wait for Enter after each test with --step Aug 14, 2025 N/A N/A :pypi:`pytest-stepwise` Run a test suite one failing test at a time. Dec 01, 2015 4 - Beta N/A - :pypi:`pytest-stf` pytest plugin for openSTF Sep 24, 2024 N/A pytest>=5.0 + :pypi:`pytest-stf` pytest plugin for openSTF Sep 23, 2025 N/A pytest>=5.0 :pypi:`pytest-stochastics` pytest plugin that allows selectively running tests several times and accepting \*some\* failures. Dec 01, 2024 N/A pytest<9.0.0,>=8.0.0 :pypi:`pytest-stoq` A plugin to pytest stoq Feb 09, 2021 4 - Beta N/A - :pypi:`pytest-store` Pytest plugin to store values from test runs Sep 04, 2024 3 - Alpha pytest>=7.0.0 + :pypi:`pytest-storage` Pytest plugin to store test artifacts Sep 12, 2025 3 - Alpha pytest>=8.4.2 + :pypi:`pytest-store` Pytest plugin to store values from test runs Jul 30, 2025 3 - Alpha pytest>=7.0.0 :pypi:`pytest-streaming` Plugin for testing pubsub, pulsar, and kafka systems with pytest locally and in ci/cd May 28, 2025 5 - Production/Stable pytest>=8.3.5 :pypi:`pytest-stress` A Pytest plugin that allows you to loop tests for a user defined amount of time. Dec 07, 2019 4 - Beta pytest (>=3.6.0) - :pypi:`pytest-structlog` Structured logging assertions Jul 25, 2024 N/A pytest + :pypi:`pytest-structlog` Structured logging assertions Sep 10, 2025 N/A pytest :pypi:`pytest-structmpd` provide structured temporary directory Oct 17, 2018 N/A N/A :pypi:`pytest-stub` Stub packages, modules and attributes. Apr 28, 2020 5 - Production/Stable N/A :pypi:`pytest-stubprocess` Provide stub implementations for subprocesses in Python tests Sep 17, 2018 3 - Alpha pytest (>=3.5.0) :pypi:`pytest-study` A pytest plugin to organize long run tests (named studies) without interfering the regular tests Sep 26, 2017 3 - Alpha pytest (>=2.0) :pypi:`pytest-subinterpreter` Run pytest in a subinterpreter Nov 25, 2023 N/A pytest>=7.0.0 + :pypi:`pytest-subket` Pytest Plugin to disable socket calls during tests Jul 31, 2025 4 - Beta N/A :pypi:`pytest-subprocess` A plugin to fake subprocess for pytest Jan 04, 2025 5 - Production/Stable pytest>=4.0.0 :pypi:`pytest-subtesthack` A hack to explicitly set up and tear down fixtures. Jul 16, 2022 N/A N/A - :pypi:`pytest-subtests` unittest subTest() support and subtests fixture Dec 10, 2024 4 - Beta pytest>=7.4 + :pypi:`pytest-subtests` unittest subTest() support and subtests fixture Oct 20, 2025 4 - Beta pytest>=7.4 :pypi:`pytest-subunit` pytest-subunit is a plugin for py.test which outputs testsresult in subunit format. Sep 17, 2023 N/A pytest (>=2.3) - :pypi:`pytest-sugar` pytest-sugar is a plugin for pytest that changes the default look and feel of pytest (e.g. progressbar, show tests that fail instantly). Feb 01, 2024 4 - Beta pytest >=6.2.0 + :pypi:`pytest-sugar` pytest-sugar is a plugin for pytest that changes the default look and feel of pytest (e.g. progressbar, show tests that fail instantly). Aug 23, 2025 4 - Beta pytest>=6.2.0 :pypi:`pytest-suitemanager` A simple plugin to use with pytest Apr 28, 2023 4 - Beta N/A :pypi:`pytest-suite-timeout` A pytest plugin for ensuring max suite time Jan 26, 2024 N/A pytest>=7.0.0 :pypi:`pytest-supercov` Pytest plugin for measuring explicit test-file to source-file coverage Jul 02, 2023 N/A N/A @@ -1466,6 +1562,7 @@ This list contains 1641 plugins. :pypi:`pytest-terra-fixt` Terraform and Terragrunt fixtures for pytest Sep 15, 2022 N/A pytest (==6.2.5) :pypi:`pytest-terraform` A pytest plugin for using terraform fixtures May 21, 2024 N/A pytest>=6.0 :pypi:`pytest-terraform-fixture` generate terraform resources to use with pytest Nov 14, 2018 4 - Beta N/A + :pypi:`pytest-test-analyzer` A powerful tool for analyzing pytest test files and generating detailed reports Jun 14, 2025 4 - Beta N/A :pypi:`pytest-testbook` A plugin to run tests written in Jupyter notebook Dec 11, 2016 3 - Alpha N/A :pypi:`pytest-testconfig` Test configuration plugin for pytest. Jan 11, 2020 4 - Beta pytest (>=3.5.0) :pypi:`pytest-testdata` Get and load testdata in pytest projects Aug 30, 2024 N/A pytest @@ -1520,6 +1617,7 @@ This list contains 1641 plugins. :pypi:`pytest-timer` A timer plugin for pytest Dec 26, 2023 N/A pytest :pypi:`pytest-timestamper` Pytest plugin to add a timestamp prefix to the pytest output Mar 27, 2024 N/A N/A :pypi:`pytest-timestamps` A simple plugin to view timestamps for each test Sep 11, 2023 N/A pytest (>=7.3,<8.0) + :pypi:`pytest-timing-plugin` pytest插件开发demo Jul 21, 2025 N/A N/A :pypi:`pytest-tiny-api-client` The companion pytest plugin for tiny-api-client Jan 04, 2024 5 - Production/Stable pytest :pypi:`pytest-tinybird` A pytest plugin to report test results to tinybird May 07, 2025 4 - Beta pytest>=3.8.0 :pypi:`pytest-tipsi-django` Better fixtures for django Feb 05, 2024 5 - Production/Stable pytest>=6.0.0 @@ -1530,7 +1628,7 @@ This list contains 1641 plugins. :pypi:`pytest-tmp-files` Utilities to create temporary file hierarchies in pytest. Dec 08, 2023 N/A pytest :pypi:`pytest-tmpfs` A pytest plugin that helps you on using a temporary filesystem for testing. Aug 29, 2022 N/A pytest :pypi:`pytest-tmreport` this is a vue-element ui report for pytest Aug 12, 2022 N/A N/A - :pypi:`pytest-tmux` A pytest plugin that enables tmux driven tests Apr 22, 2023 4 - Beta N/A + :pypi:`pytest-tmux` A pytest plugin that enables tmux driven tests Sep 01, 2025 4 - Beta N/A :pypi:`pytest-todo` A small plugin for the pytest testing framework, marking TODO comments as failure May 23, 2019 4 - Beta pytest :pypi:`pytest-tomato` Mar 01, 2019 5 - Production/Stable N/A :pypi:`pytest-toolbelt` This is just a collection of utilities for pytest, but don't really belong in pytest proper. Aug 12, 2019 3 - Alpha N/A @@ -1547,7 +1645,7 @@ This list contains 1641 plugins. :pypi:`pytest-translations` Test your translation files. Sep 11, 2023 5 - Production/Stable pytest (>=7) :pypi:`pytest-travis-fold` Folds captured output sections in Travis CI build log Nov 29, 2017 4 - Beta pytest (>=2.6.0) :pypi:`pytest-trello` Plugin for py.test that integrates trello using markers Nov 20, 2015 5 - Production/Stable N/A - :pypi:`pytest-trepan` Pytest plugin for trepan debugger. Jul 28, 2018 5 - Production/Stable N/A + :pypi:`pytest-trepan` Pytest plugin for trepan debugger. Sep 11, 2025 5 - Production/Stable pytest>=4.0.0 :pypi:`pytest-trialtemp` py.test plugin for using the same _trial_temp working directory as trial Jun 08, 2015 N/A N/A :pypi:`pytest-trio` Pytest plugin for trio Nov 01, 2022 N/A pytest (>=7.2.0) :pypi:`pytest-trytond` Pytest plugin for the Tryton server framework Nov 04, 2022 4 - Beta pytest (>=5) @@ -1555,17 +1653,20 @@ This list contains 1641 plugins. :pypi:`pytest-tst` Customize pytest options, output and exit code to make it compatible with tst Apr 27, 2022 N/A pytest (>=5.0.0) :pypi:`pytest-tstcls` Test Class Base Mar 23, 2020 5 - Production/Stable N/A :pypi:`pytest-tui` Text User Interface (TUI) and HTML report for Pytest test runs Dec 08, 2023 4 - Beta N/A + :pypi:`pytest-tui-runner` Textual-based terminal UI for running pytest tests Oct 23, 2025 N/A pytest>=8.3.5 :pypi:`pytest-tuitest` pytest plugin for testing TUI and regular command-line applications. Apr 11, 2025 N/A pytest>=7.4.0 :pypi:`pytest-tutorials` Mar 11, 2023 N/A N/A :pypi:`pytest-twilio-conversations-client-mock` Aug 02, 2022 N/A N/A :pypi:`pytest-twisted` A twisted plugin for pytest. Sep 10, 2024 5 - Production/Stable pytest>=2.3 + :pypi:`pytest-ty` A pytest plugin to run the ty type checker Oct 10, 2025 3 - Alpha pytest>=7.0.0 :pypi:`pytest-typechecker` Run type checkers on specified test files Feb 04, 2022 N/A pytest (>=6.2.5,<7.0.0) - :pypi:`pytest-typed-schema-shot` Pytest plugin for automatic JSON Schema generation and validation from examples May 24, 2025 N/A pytest + :pypi:`pytest-typed-schema-shot` Pytest plugin for automatic JSON Schema generation and validation from examples Jun 14, 2025 N/A pytest :pypi:`pytest-typhoon-config` A Typhoon HIL plugin that facilitates test parameter configuration at runtime Apr 07, 2022 5 - Production/Stable N/A :pypi:`pytest-typhoon-polarion` Typhoontest plugin for Siemens Polarion Feb 01, 2024 4 - Beta N/A :pypi:`pytest-typhoon-xray` Typhoon HIL plugin for pytest Aug 15, 2023 4 - Beta N/A :pypi:`pytest-typing-runner` Pytest plugin to make it easier to run and check python code against static type checkers May 31, 2025 N/A N/A :pypi:`pytest-tytest` Typhoon HIL plugin for pytest May 25, 2020 4 - Beta pytest (>=5.4.2) + :pypi:`pytest-tzshift` A Pytest plugin that transparently re-runs tests under a matrix of timezones and locales. Jun 25, 2025 4 - Beta pytest>=7.0 :pypi:`pytest-ubersmith` Easily mock calls to ubersmith at the \`requests\` level. Apr 13, 2015 N/A N/A :pypi:`pytest-ui` Text User Interface for running python tests Jul 05, 2021 4 - Beta pytest :pypi:`pytest-ui-failed-screenshot` UI自动测试失败时自动截图,并将截图加入到测试报告中 Dec 06, 2022 N/A N/A @@ -1573,14 +1674,15 @@ This list contains 1641 plugins. :pypi:`pytest-uncollect-if` A plugin to uncollect pytests tests rather than using skipif Dec 26, 2024 4 - Beta pytest>=6.2.0 :pypi:`pytest-unflakable` Unflakable plugin for PyTest Apr 30, 2024 4 - Beta pytest>=6.2.0 :pypi:`pytest-unhandled-exception-exit-code` Plugin for py.test set a different exit code on uncaught exceptions Jun 22, 2020 5 - Production/Stable pytest (>=2.3) - :pypi:`pytest-unique` Pytest fixture to generate unique values. Mar 23, 2025 N/A pytest<8.0.0,>=7.4.2 + :pypi:`pytest-unique` Pytest fixture to generate unique values. Jun 10, 2025 N/A pytest<9.0.0,>=8.0.0 :pypi:`pytest-unittest-filter` A pytest plugin for filtering unittest-based test classes Jan 12, 2019 4 - Beta pytest (>=3.1.0) :pypi:`pytest-unittest-id-runner` A pytest plugin to run tests using unittest-style test IDs Feb 09, 2025 N/A pytest>=6.0.0 - :pypi:`pytest-unmagic` Pytest fixtures with conventional import semantics Oct 22, 2024 5 - Production/Stable pytest + :pypi:`pytest-unmagic` Pytest fixtures with conventional import semantics Jul 14, 2025 5 - Production/Stable pytest :pypi:`pytest-unmarked` Run only unmarked tests Aug 27, 2019 5 - Production/Stable N/A - :pypi:`pytest-unordered` Test equality of unordered collections in pytest Jul 05, 2024 4 - Beta pytest>=7.0.0 + :pypi:`pytest-unordered` Test equality of unordered collections in pytest Jun 03, 2025 4 - Beta pytest>=7.0.0 :pypi:`pytest-unstable` Set a test as unstable to return 0 even if it failed Sep 27, 2022 4 - Beta N/A :pypi:`pytest-unused-fixtures` A pytest plugin to list unused fixtures after a test run. Mar 15, 2025 4 - Beta pytest>7.3.2 + :pypi:`pytest-unused-port` pytest fixture finding an unused local port Oct 22, 2025 N/A pytest :pypi:`pytest-upload-report` pytest-upload-report is a plugin for pytest that upload your test report for test results. Jun 18, 2021 5 - Production/Stable N/A :pypi:`pytest-utils` Some helpers for pytest. Feb 02, 2023 4 - Beta pytest (>=7.0.0,<8.0.0) :pypi:`pytest-vagrant` A py.test plugin providing access to vagrant. Sep 07, 2021 5 - Production/Stable pytest @@ -1593,6 +1695,7 @@ This list contains 1641 plugins. :pypi:`pytest-vcs` Sep 22, 2022 4 - Beta N/A :pypi:`pytest-venv` py.test fixture for creating a virtual environment Nov 23, 2023 4 - Beta pytest :pypi:`pytest-verbose-parametrize` More descriptive output for parametrized py.test tests Nov 29, 2024 5 - Production/Stable pytest + :pypi:`pytest-verify` A pytest plugin for snapshot verification with optional visual diff viewer. Oct 25, 2025 5 - Production/Stable N/A :pypi:`pytest-vimqf` A simple pytest plugin that will shrink pytest output when specified, to fit vim quickfix window. Feb 08, 2021 4 - Beta pytest (>=6.2.2,<7.0.0) :pypi:`pytest-virtualenv` Virtualenv fixture for py.test Nov 29, 2024 5 - Production/Stable pytest :pypi:`pytest-visual` Nov 28, 2024 4 - Beta pytest>=7.0.0 @@ -1615,9 +1718,8 @@ This list contains 1641 plugins. :pypi:`pytest-web3-data` A pytest plugin to fetch test data from IPFS HTTP gateways during pytest execution. Oct 04, 2023 4 - Beta pytest :pypi:`pytest-webdriver` Selenium webdriver fixture for py.test Oct 17, 2024 5 - Production/Stable pytest :pypi:`pytest-webstage` Test web apps with pytest Sep 20, 2024 N/A pytest<9.0,>=7.0 - :pypi:`pytest-webtest-extras` Pytest plugin to enhance pytest-html and allure reports of webtest projects by adding screenshots, comments and webpage sources. Dec 28, 2024 N/A pytest>=7.0.0 :pypi:`pytest-wetest` Welian API Automation test framework pytest plugin Nov 10, 2018 4 - Beta N/A - :pypi:`pytest-when` Utility which makes mocking more readable and controllable Nov 29, 2024 N/A pytest>=7.3.1 + :pypi:`pytest-when` Utility which makes mocking more readable and controllable Sep 25, 2025 N/A pytest>=7.3.1 :pypi:`pytest-whirlwind` Testing Tornado. Jun 12, 2020 N/A N/A :pypi:`pytest-wholenodeid` pytest addon for displaying the whole node id for failures Aug 26, 2015 4 - Beta pytest (>=2.0) :pypi:`pytest-win32consoletitle` Pytest progress in console title (Win32 only) Aug 08, 2021 N/A N/A @@ -1627,16 +1729,17 @@ This list contains 1641 plugins. :pypi:`pytest-with-docker` pytest with docker helpers. Nov 09, 2021 N/A pytest :pypi:`pytest-workaround-12888` forces an import of readline early in the process to work around pytest bug #12888 Jan 15, 2025 N/A N/A :pypi:`pytest-workflow` A pytest plugin for configuring workflow/pipeline tests using YAML files Mar 18, 2024 5 - Production/Stable pytest >=7.0.0 - :pypi:`pytest-xdist` pytest xdist plugin for distributed testing, most importantly across multiple CPUs May 26, 2025 5 - Production/Stable pytest>=7.0.0 + :pypi:`pytest-xdist` pytest xdist plugin for distributed testing, most importantly across multiple CPUs Jul 01, 2025 5 - Production/Stable pytest>=7.0.0 :pypi:`pytest-xdist-debug-for-graingert` pytest xdist plugin for distributed testing and loop-on-failing modes Jul 24, 2019 5 - Production/Stable pytest (>=4.4.0) :pypi:`pytest-xdist-forked` forked from pytest-xdist Feb 10, 2020 5 - Production/Stable pytest (>=4.4.0) - :pypi:`pytest-xdist-lock` Extension for pytest-xdist adding test and resource group locks for local and distributed runs Apr 26, 2025 N/A pytest>=6.0 + :pypi:`pytest-xdist-gnumake` A small example package Jun 22, 2025 N/A pytest :pypi:`pytest-xdist-tracker` pytest plugin helps to reproduce failures for particular xdist node Nov 18, 2021 3 - Alpha pytest (>=3.5.1) :pypi:`pytest-xdist-worker-stats` A pytest plugin to list worker statistics after a xdist run. Mar 15, 2025 4 - Beta pytest>=7.0.0 - :pypi:`pytest-xdocker` Pytest fixture to run docker across test runs. Mar 23, 2025 N/A pytest<8.0.0,>=7.4.2 + :pypi:`pytest-xdocker` Pytest fixture to run docker across test runs. Jun 10, 2025 N/A pytest<9.0.0,>=8.0.0 :pypi:`pytest-xfaillist` Maintain a xfaillist in an additional file to avoid merge-conflicts. Sep 17, 2021 N/A pytest (>=6.2.2,<7.0.0) :pypi:`pytest-xfiles` Pytest fixtures providing data read from function, module or package related (x)files. Feb 27, 2018 N/A N/A :pypi:`pytest-xflaky` A simple plugin to use with pytest Oct 14, 2024 4 - Beta pytest>=8.2.1 + :pypi:`pytest-xhtml` pytest plugin for generating HTML reports Oct 18, 2025 5 - Production/Stable pytest>=7 :pypi:`pytest-xiuyu` This is a pytest plugin Jul 25, 2023 5 - Production/Stable N/A :pypi:`pytest-xlog` Extended logging for test and decorators May 31, 2020 4 - Beta N/A :pypi:`pytest-xlsx` pytest plugin for generating test cases by xlsx(excel) Aug 07, 2024 N/A pytest~=8.2.2 @@ -1648,11 +1751,12 @@ This list contains 1641 plugins. :pypi:`pytest-xray-reporter` Pytest plugin for generating Xray JSON reports May 21, 2025 4 - Beta pytest>=7.0.0 :pypi:`pytest-xray-server` May 03, 2022 3 - Alpha pytest (>=5.3.1) :pypi:`pytest-xstress` Jun 01, 2024 N/A pytest<9.0.0,>=8.0.0 + :pypi:`pytest-xtime` pytest plugin for recording execution time Jun 05, 2025 4 - Beta pytest :pypi:`pytest-xvfb` A pytest plugin to run Xvfb (or Xephyr/Xvnc) for tests. Mar 12, 2025 4 - Beta pytest>=2.8.1 :pypi:`pytest-xvirt` A pytest plugin to virtualize test. For example to transparently running them on a remote box. Dec 15, 2024 4 - Beta pytest>=7.2.2 :pypi:`pytest-yaml` This plugin is used to load yaml output to your test using pytest framework. Oct 05, 2018 N/A pytest - :pypi:`pytest-yaml-fei` a pytest yaml allure package Feb 09, 2025 N/A pytest - :pypi:`pytest-yaml-sanmu` Pytest plugin for generating test cases with YAML. In test cases, you can use markers, fixtures, variables, and even call Python functions. Jan 03, 2025 N/A pytest>=8.2.2 + :pypi:`pytest-yaml-fei` a pytest yaml allure package Aug 03, 2025 N/A pytest + :pypi:`pytest-yaml-sanmu` Pytest plugin for generating test cases with YAML. In test cases, you can use markers, fixtures, variables, and even call Python functions. Sep 16, 2025 N/A pytest>=8.2.2 :pypi:`pytest-yamltree` Create or check file/directory trees described by YAML Mar 02, 2020 4 - Beta pytest (>=3.1.1) :pypi:`pytest-yamlwsgi` Run tests against wsgi apps defined in yaml May 11, 2010 N/A N/A :pypi:`pytest-yaml-yoyo` http/https API run by yaml Jun 19, 2023 N/A pytest (>=7.2.0) @@ -1667,7 +1771,7 @@ This list contains 1641 plugins. :pypi:`pytest-zcc` eee Jun 02, 2024 N/A N/A :pypi:`pytest-zebrunner` Pytest connector for Zebrunner reporting Jul 04, 2024 5 - Production/Stable pytest>=4.5.0 :pypi:`pytest-zeebe` Pytest fixtures for testing Camunda 8 processes using a Zeebe test engine. Feb 01, 2024 N/A pytest (>=7.4.2,<8.0.0) - :pypi:`pytest-zephyr-scale-integration` A library for integrating Jira Zephyr Scale (Adaptavist\TM4J) with pytest May 15, 2025 N/A pytest + :pypi:`pytest-zephyr-scale-integration` A library for integrating Jira Zephyr Scale (Adaptavist\TM4J) with pytest Jun 26, 2025 N/A pytest :pypi:`pytest-zephyr-telegram` Плагин для отправки данных автотестов в Телеграм и Зефир Sep 30, 2024 N/A pytest==8.3.2 :pypi:`pytest-zest` Zesty additions to pytest. Nov 17, 2022 N/A N/A :pypi:`pytest-zhongwen-wendang` PyTest 中文文档 Mar 04, 2024 4 - Beta N/A @@ -1681,21 +1785,21 @@ This list contains 1641 plugins. :pypi:`databricks-labs-pytester` - *last release*: May 13, 2025, + *last release*: Oct 17, 2025, *status*: 4 - Beta, *requires*: pytest>=8.3 Python Testing for Databricks :pypi:`logassert` - *last release*: May 15, 2025, + *last release*: Aug 14, 2025, *status*: 5 - Production/Stable, *requires*: pytest; extra == "dev" Simple but powerful assertion and verification of logged lines :pypi:`logot` - *last release*: May 05, 2025, + *last release*: Jul 28, 2025, *status*: 5 - Production/Stable, *requires*: pytest; extra == "pytest" @@ -1723,11 +1827,11 @@ This list contains 1641 plugins. A contextmanager pytest fixture for handling multiple mock abstracts :pypi:`pytest-accept` - *last release*: Dec 08, 2024, + *last release*: Aug 19, 2025, *status*: N/A, *requires*: pytest>=7 - A pytest-plugin for updating doctest outputs + :pypi:`pytest-adaptavist` *last release*: Oct 13, 2022, @@ -1848,6 +1952,13 @@ This list contains 1641 plugins. Pytest \`client\` fixture for the Aiohttp + :pypi:`pytest-aiohttp-mock` + *last release*: Sep 13, 2025, + *status*: 3 - Alpha, + *requires*: pytest>=8 + + Send responses to aiohttp. + :pypi:`pytest-aiomoto` *last release*: Jun 24, 2023, *status*: N/A, @@ -1932,6 +2043,13 @@ This list contains 1641 plugins. pytest plugin to test case doc string dls instructions + :pypi:`pytest-allure-host` + *last release*: Oct 21, 2025, + *status*: 3 - Alpha, + *requires*: N/A + + Publish Allure static reports to private S3 behind CloudFront with history preservation + :pypi:`pytest-allure-id2history` *last release*: May 14, 2024, *status*: 4 - Beta, @@ -1953,6 +2071,13 @@ This list contains 1641 plugins. The pytest plugin aimed to display test coverage of the specs(requirements) in Allure + :pypi:`pytest-allure-step` + *last release*: Jul 13, 2025, + *status*: 3 - Alpha, + *requires*: pytest>=6.0.0 + + Enhanced logging integration with Allure reports for pytest + :pypi:`pytest-alphamoon` *last release*: Dec 30, 2021, *status*: 5 - Production/Stable, @@ -2003,7 +2128,7 @@ This list contains 1641 plugins. Pytest plugin to allow use of Annotated in tests to resolve fixtures :pypi:`pytest-ansible` - *last release*: May 26, 2025, + *last release*: Aug 21, 2025, *status*: 5 - Production/Stable, *requires*: pytest>=6 @@ -2072,6 +2197,27 @@ This list contains 1641 plugins. An ASGI middleware to populate OpenAPI Specification examples from pytest functions + :pypi:`pytest-api-cov` + *last release*: Oct 28, 2025, + *status*: N/A, + *requires*: pytest>=6.0.0 + + Pytest Plugin to provide API Coverage statistics for Python Web Frameworks + + :pypi:`pytest-api-framework` + *last release*: Jun 22, 2025, + *status*: N/A, + *requires*: pytest==7.2.2 + + pytest framework + + :pypi:`pytest-api-framework-alpha` + *last release*: Oct 29, 2025, + *status*: N/A, + *requires*: pytest==7.2.2 + + + :pypi:`pytest-api-soup` *last release*: Aug 27, 2022, *status*: N/A, @@ -2107,6 +2253,13 @@ This list contains 1641 plugins. Pytest plugin for appium + :pypi:`pytest-approval` + *last release*: Oct 27, 2025, + *status*: N/A, + *requires*: pytest>=8.3.5 + + A simple approval test library utilizing external diff programs such as PyCharm and Visual Studio Code to compare approved and received output. + :pypi:`pytest-approvaltests` *last release*: May 08, 2022, *status*: 4 - Beta, @@ -2115,16 +2268,16 @@ This list contains 1641 plugins. A plugin to use approvaltests with pytest :pypi:`pytest-approvaltests-geo` - *last release*: Feb 05, 2024, + *last release*: Jul 14, 2025, *status*: 5 - Production/Stable, *requires*: pytest Extension for ApprovalTests.Python specific to geo data verification :pypi:`pytest-archon` - *last release*: Dec 18, 2023, + *last release*: Sep 19, 2025, *status*: 5 - Production/Stable, - *requires*: pytest >=7.2 + *requires*: pytest>=7.2 Rule your architecture like a real developer @@ -2135,6 +2288,13 @@ This list contains 1641 plugins. pyest results colection plugin + :pypi:`pytest-argus-reporter` + *last release*: Sep 17, 2025, + *status*: 4 - Beta, + *requires*: pytest>=3.0; extra == "dev" + + A simple plugin to report results of test into argus + :pypi:`pytest-argus-server` *last release*: Mar 24, 2025, *status*: 4 - Beta, @@ -2149,6 +2309,13 @@ This list contains 1641 plugins. pytest plugin to help with comparing array output from tests + :pypi:`pytest-asdf-plugin` + *last release*: Aug 18, 2025, + *status*: 5 - Production/Stable, + *requires*: pytest>=7 + + Pytest plugin for testing ASDF schemas + :pypi:`pytest-asgi-server` *last release*: Dec 12, 2020, *status*: N/A, @@ -2184,6 +2351,13 @@ This list contains 1641 plugins. Pytest Assertions + :pypi:`pytest-assert-type` + *last release*: Oct 26, 2025, + *status*: 3 - Alpha, + *requires*: pytest>=6.2.0 + + Use typing.assert_type() to test runtime behavior + :pypi:`pytest-assertutil` *last release*: May 10, 2019, *status*: N/A, @@ -2199,11 +2373,11 @@ This list contains 1641 plugins. Useful assertion utilities for use with pytest :pypi:`pytest-assist` - *last release*: Mar 17, 2025, - *status*: N/A, + *last release*: Oct 29, 2025, + *status*: 4 - Beta, *requires*: pytest - load testing library + pytest plugin library :pypi:`pytest-assume` *last release*: Jun 24, 2021, @@ -2276,8 +2450,8 @@ This list contains 1641 plugins. Pytest fixtures for async generators :pypi:`pytest-asyncio` - *last release*: May 26, 2025, - *status*: 4 - Beta, + *last release*: Sep 12, 2025, + *status*: 5 - Production/Stable, *requires*: pytest<9,>=8.2 Pytest support for asyncio @@ -2290,7 +2464,7 @@ This list contains 1641 plugins. Pytest plugin to execute python async tests concurrently. :pypi:`pytest-asyncio-cooperative` - *last release*: Apr 26, 2025, + *last release*: Jun 24, 2025, *status*: N/A, *requires*: N/A @@ -2401,6 +2575,13 @@ This list contains 1641 plugins. pytest plugin: avoid repeating arguments in parametrize + :pypi:`pytest-autoprofile` + *last release*: Aug 06, 2025, + *status*: 4 - Beta, + *requires*: pytest>=7.0 + + \`line_profiler.autoprofile\`-ing your \`pytest\` test suite + :pypi:`pytest-autotest` *last release*: Aug 25, 2021, *status*: N/A, @@ -2422,6 +2603,13 @@ This list contains 1641 plugins. Makes pytest skip tests that don not need rerunning + :pypi:`pytest-awaiting-fix` + *last release*: Aug 09, 2025, + *status*: 4 - Beta, + *requires*: pytest>=6.2.0 + + A simple plugin to use with pytest for traceability across Jira and disabled automated tests + :pypi:`pytest-aws` *last release*: Oct 04, 2017, *status*: 4 - Beta, @@ -2472,9 +2660,9 @@ This list contains 1641 plugins. Pytest utilities and mocks for Azure :pypi:`pytest-azure-devops` - *last release*: Jun 20, 2022, + *last release*: Jul 16, 2025, *status*: 4 - Beta, - *requires*: pytest (>=3.5.0) + *requires*: pytest>=3.5.0 Simplifies using azure devops parallel strategy (https://docs.microsoft.com/en-us/azure/devops/pipelines/test/parallel-testing-any-test-runner) with pytest. @@ -2506,6 +2694,13 @@ This list contains 1641 plugins. pytest plugin for URL based testing + :pypi:`pytest-bashdoctest` + *last release*: Oct 03, 2025, + *status*: 4 - Beta, + *requires*: pytest>=7.0.0 + + A pytest plugin for testing bash command examples in markdown documentation + :pypi:`pytest-batch-regression` *last release*: May 08, 2024, *status*: N/A, @@ -2514,7 +2709,7 @@ This list contains 1641 plugins. A pytest plugin to repeat the entire test suite in batches. :pypi:`pytest-bazel` - *last release*: May 11, 2025, + *last release*: Oct 31, 2025, *status*: 4 - Beta, *requires*: pytest @@ -2542,12 +2737,19 @@ This list contains 1641 plugins. BDD for pytest :pypi:`pytest-bdd-report` - *last release*: Nov 27, 2024, + *last release*: Aug 19, 2025, *status*: N/A, *requires*: pytest>=7.1.3 A pytest-bdd plugin for generating useful and informative BDD test reports + :pypi:`pytest-bdd-reporter` + *last release*: Oct 14, 2025, + *status*: 5 - Production/Stable, + *requires*: pytest>=6.0.0 + + Enterprise-grade BDD test reporting with interactive dashboards, suite management, and comprehensive email integration + :pypi:`pytest-bdd-splinter` *last release*: Aug 12, 2019, *status*: 5 - Production/Stable, @@ -2584,7 +2786,7 @@ This list contains 1641 plugins. Pytest plugin to run your tests with beartype checking enabled. :pypi:`pytest-bec-e2e` - *last release*: May 30, 2025, + *last release*: Oct 31, 2025, *status*: 3 - Alpha, *requires*: pytest @@ -2612,7 +2814,7 @@ This list contains 1641 plugins. Benchmark utility that plugs into pytest. :pypi:`pytest-benchmark` - *last release*: Oct 30, 2024, + *last release*: Oct 30, 2025, *status*: 5 - Production/Stable, *requires*: pytest>=8.1 @@ -2689,7 +2891,7 @@ This list contains 1641 plugins. A pytest plugin helps developers to debug by providing useful commits history. :pypi:`pytest-blender` - *last release*: Aug 02, 2024, + *last release*: Jun 25, 2025, *status*: N/A, *requires*: pytest @@ -2716,6 +2918,13 @@ This list contains 1641 plugins. pytest plugin to mark a test as blocker and skip all other tests + :pypi:`pytest-b-logger` + *last release*: Oct 28, 2025, + *status*: N/A, + *requires*: pytest + + BLogger is a Pytest plugin for enhanced test logging and generating convenient and lightweight reports. + :pypi:`pytest-blue` *last release*: Sep 05, 2022, *status*: N/A, @@ -2730,6 +2939,13 @@ This list contains 1641 plugins. Local continuous test runner with pytest and watchdog. + :pypi:`pytest-boardfarm3` + *last release*: Sep 15, 2025, + *status*: N/A, + *requires*: pytest + + Integrate boardfarm as a pytest plugin. + :pypi:`pytest-boilerplate` *last release*: Sep 12, 2024, *status*: 5 - Production/Stable, @@ -2807,6 +3023,13 @@ This list contains 1641 plugins. A pytest plugin for running tests on a Briefcase project. + :pypi:`pytest-brightest` + *last release*: Jul 15, 2025, + *status*: 3 - Alpha, + *requires*: pytest>=8.4.1 + + Bright ideas for improving your pytest experience + :pypi:`pytest-broadcaster` *last release*: Mar 02, 2025, *status*: 3 - Alpha, @@ -2850,9 +3073,9 @@ This list contains 1641 plugins. Budo Systems is a martial arts school management system. This module is the Budo Systems Pytest Plugin. :pypi:`pytest-bug` - *last release*: Jun 05, 2024, + *last release*: Jun 17, 2025, *status*: 5 - Production/Stable, - *requires*: pytest>=8.0.0 + *requires*: pytest>=8.4.0 Pytest plugin for marking tests as a bug @@ -2947,6 +3170,13 @@ This list contains 1641 plugins. A plugin which allows to compare results with canonical results, based on previous runs + :pypi:`pytest-canvas` + *last release*: Jul 22, 2025, + *status*: N/A, + *requires*: pytest<9,>=8.4 + + A minimal pytest plugin that streamlines testing for projects using the Canvas SDK. + :pypi:`pytest-caprng` *last release*: May 02, 2018, *status*: 4 - Beta, @@ -2968,13 +3198,6 @@ This list contains 1641 plugins. pytest plugin to capture all deprecatedwarnings and put them in one file - :pypi:`pytest-capture-sysout` - *last release*: May 21, 2025, - *status*: 2 - Pre-Alpha, - *requires*: N/A - - An academic experiment package - :pypi:`pytest-capture-warnings` *last release*: May 03, 2022, *status*: N/A, @@ -2989,13 +3212,34 @@ This list contains 1641 plugins. A clean, modern, wrapper for pytest.mark.parametrize + :pypi:`pytest-case-provider` + *last release*: Oct 26, 2025, + *status*: 3 - Alpha, + *requires*: pytest<9,>=8 + + Advanced pytest parametrization plugin that generates test case instances from sync or async factories. + :pypi:`pytest-cases` - *last release*: Sep 26, 2024, + *last release*: Jun 09, 2025, *status*: 5 - Production/Stable, - *requires*: N/A + *requires*: pytest Separate test code from test cases in pytest. + :pypi:`pytest-case-start-from` + *last release*: Oct 28, 2025, + *status*: 4 - Beta, + *requires*: pytest>=6.0.0 + + A pytest plugin to start test execution from a specific test case + + :pypi:`pytest-casewise-package-install` + *last release*: Oct 31, 2025, + *status*: 3 - Alpha, + *requires*: pytest>=6.0.0 + + A pytest plugin for test case-level dynamic dependency management + :pypi:`pytest-cassandra` *last release*: Nov 04, 2017, *status*: 1 - Planning, @@ -3025,7 +3269,7 @@ This list contains 1641 plugins. A pytest plugin to split your test suite into multiple parts :pypi:`pytest-celery` - *last release*: Feb 21, 2025, + *last release*: Jul 30, 2025, *status*: 5 - Production/Stable, *requires*: N/A @@ -3095,7 +3339,7 @@ This list contains 1641 plugins. A pytest fixture for changing current working directory :pypi:`pytest-check` - *last release*: Apr 04, 2025, + *last release*: Oct 07, 2025, *status*: 5 - Production/Stable, *requires*: pytest>=7.0.0 @@ -3151,7 +3395,7 @@ This list contains 1641 plugins. pytest plugin to test Check_MK checks :pypi:`pytest-checkpoint` - *last release*: Mar 30, 2025, + *last release*: Oct 04, 2025, *status*: N/A, *requires*: pytest>=8.0.0 @@ -3185,6 +3429,13 @@ This list contains 1641 plugins. Provide the pytest with the ability to collect use cases based on rules in text files + :pypi:`pytest-chronicle` + *last release*: Oct 30, 2025, + *status*: N/A, + *requires*: pytest>=8.0; extra == "dev" + + Reusable pytest results ingestion tooling with database export and CLI helpers. + :pypi:`pytest-chunks` *last release*: Jul 05, 2022, *status*: N/A, @@ -3270,14 +3521,14 @@ This list contains 1641 plugins. Automated, comprehensive and well-organised pytest test cases. :pypi:`pytest-cleanuptotal` - *last release*: Nov 08, 2024, + *last release*: Jul 22, 2025, *status*: 5 - Production/Stable, *requires*: N/A A cleanup plugin for pytest :pypi:`pytest-clerk` - *last release*: Jan 30, 2025, + *last release*: Aug 30, 2025, *status*: N/A, *requires*: pytest<9.0.0,>=8.0.0 @@ -3333,7 +3584,7 @@ This list contains 1641 plugins. Distribute tests to cloud machines without fuss :pypi:`pytest-cmake` - *last release*: Feb 17, 2025, + *last release*: Aug 14, 2025, *status*: N/A, *requires*: pytest<9,>=4 @@ -3424,7 +3675,7 @@ This list contains 1641 plugins. pytest plugin to run pycodestyle :pypi:`pytest-codspeed` - *last release*: May 27, 2025, + *last release*: Oct 24, 2025, *status*: 5 - Production/Stable, *requires*: pytest>=3.8 @@ -3487,7 +3738,7 @@ This list contains 1641 plugins. An interactive GUI test runner for PyTest :pypi:`pytest-common-subject` - *last release*: Jun 12, 2024, + *last release*: Oct 22, 2025, *status*: N/A, *requires*: pytest<9,>=3.6 @@ -3507,6 +3758,13 @@ This list contains 1641 plugins. Concurrently execute test cases with multithread, multiprocess and gevent + :pypi:`pytest-conductor` + *last release*: Jul 30, 2025, + *status*: N/A, + *requires*: pytest<8.4; python_version == "3.8" + + Pytest plugin for coordinating the order in which marked tests run. + :pypi:`pytest-config` *last release*: Nov 07, 2014, *status*: 5 - Production/Stable, @@ -3536,7 +3794,7 @@ This list contains 1641 plugins. pytest plugin with fixtures for testing consul aware apps :pypi:`pytest-container` - *last release*: Dec 04, 2024, + *last release*: Jun 30, 2025, *status*: 4 - Beta, *requires*: pytest>=3.10 @@ -3571,7 +3829,7 @@ This list contains 1641 plugins. The pytest plugin for your Cookiecutter templates. 🍪 :pypi:`pytest-copie` - *last release*: Apr 09, 2025, + *last release*: Sep 29, 2025, *status*: 3 - Alpha, *requires*: pytest @@ -3599,9 +3857,9 @@ This list contains 1641 plugins. count erros and send email :pypi:`pytest-cov` - *last release*: Apr 05, 2025, + *last release*: Sep 09, 2025, *status*: 5 - Production/Stable, - *requires*: pytest>=4.6 + *requires*: pytest>=7 Pytest plugin for measuring coverage. @@ -3690,12 +3948,19 @@ This list contains 1641 plugins. A pytest plugin for reporting test results to CrateDB :pypi:`pytest-crayons` - *last release*: Oct 08, 2023, - *status*: N/A, + *last release*: Oct 14, 2025, + *status*: 5 - Production/Stable, *requires*: pytest A pytest plugin for colorful print statements + :pypi:`pytest-cream` + *last release*: Oct 26, 2025, + *status*: N/A, + *requires*: pytest + + The cream of test execution - smooth pytest workflows with intelligent orchestration + :pypi:`pytest-create` *last release*: Feb 15, 2023, *status*: 1 - Planning, @@ -3830,7 +4095,7 @@ This list contains 1641 plugins. pytest fixtures to run dash applications. :pypi:`pytest-dashboard` - *last release*: May 20, 2025, + *last release*: Jun 02, 2025, *status*: N/A, *requires*: pytest<8.0.0,>=7.4.3 @@ -3844,7 +4109,7 @@ This list contains 1641 plugins. Useful functions for managing data for pytest fixtures :pypi:`pytest-databases` - *last release*: May 25, 2025, + *last release*: Oct 06, 2025, *status*: 4 - Beta, *requires*: pytest @@ -3858,7 +4123,7 @@ This list contains 1641 plugins. Pytest plugin for remote Databricks notebooks testing :pypi:`pytest-datadir` - *last release*: May 30, 2025, + *last release*: Jul 30, 2025, *status*: 5 - Production/Stable, *requires*: pytest>=7.0 @@ -3920,6 +4185,20 @@ This list contains 1641 plugins. pytest plugin to provide data from files loaded automatically + :pypi:`pytest-dataguard` + *last release*: Oct 08, 2025, + *status*: N/A, + *requires*: pytest>=8.4.2 + + Data validation and integrity testing for your datasets using pytest. + + :pypi:`pytest-data-loader` + *last release*: Oct 29, 2025, + *status*: 4 - Beta, + *requires*: pytest<9,>=7.0.0 + + Pytest plugin for loading test data for data-driven testing (DDT) + :pypi:`pytest-dataplugin` *last release*: Sep 16, 2017, *status*: 1 - Planning, @@ -4005,7 +4284,7 @@ This list contains 1641 plugins. Pytest extension for dbt. :pypi:`pytest-dbt-duckdb` - *last release*: Feb 09, 2025, + *last release*: Oct 28, 2025, *status*: 4 - Beta, *requires*: pytest>=8.3.4 @@ -4053,6 +4332,13 @@ This list contains 1641 plugins. Identifies duplicate unit tests + :pypi:`pytest-deepassert` + *last release*: Sep 02, 2025, + *status*: 3 - Alpha, + *requires*: pytest>=7.0.0 + + A pytest plugin for enhanced assertion reporting with detailed diffs + :pypi:`pytest-deepcov` *last release*: Mar 30, 2021, *status*: N/A, @@ -4067,6 +4353,13 @@ This list contains 1641 plugins. A 'defer' fixture for pytest + :pypi:`pytest-delta` + *last release*: Oct 27, 2025, + *status*: 4 - Beta, + *requires*: pytest>=7.0 + + Run only tests impacted by your code changes (delta-based selection) for pytest. + :pypi:`pytest-demo-plugin` *last release*: May 15, 2021, *status*: N/A, @@ -4088,6 +4381,13 @@ This list contains 1641 plugins. Tests that depend on other tests + :pypi:`pytest-depper` + *last release*: Oct 23, 2025, + *status*: 4 - Beta, + *requires*: pytest>=7.0.0 + + Smart test selection based on AST-level code dependency analysis + :pypi:`pytest-deprecate` *last release*: Jul 01, 2019, *status*: N/A, @@ -4103,9 +4403,9 @@ This list contains 1641 plugins. A simple plugin to use with pytest :pypi:`pytest-describe` - *last release*: Feb 10, 2024, + *last release*: Oct 23, 2025, *status*: 5 - Production/Stable, - *requires*: pytest <9,>=4.6 + *requires*: pytest<9,>=6 Describe-style plugin for pytest @@ -4131,7 +4431,7 @@ This list contains 1641 plugins. DevPI server fixture for py.test :pypi:`pytest-dfm` - *last release*: May 10, 2025, + *last release*: Sep 13, 2025, *status*: N/A, *requires*: pytest @@ -4186,6 +4486,13 @@ This list contains 1641 plugins. PyTest plugin for generating Difido reports + :pypi:`pytest-directives` + *last release*: Aug 11, 2025, + *status*: 3 - Alpha, + *requires*: pytest + + Control your tests flow + :pypi:`pytest-dir-equal` *last release*: Dec 11, 2023, *status*: 4 - Beta, @@ -4194,7 +4501,7 @@ This list contains 1641 plugins. pytest-dir-equals is a pytest plugin providing helpers to assert directories equality allowing golden testing :pypi:`pytest-dirty` - *last release*: Jul 11, 2024, + *last release*: Jun 08, 2025, *status*: 3 - Alpha, *requires*: pytest>=8.2; extra == "dev" @@ -4264,8 +4571,8 @@ This list contains 1641 plugins. A Django plugin for pytest. :pypi:`pytest-djangoapp` - *last release*: May 19, 2023, - *status*: 4 - Beta, + *last release*: Sep 28, 2025, + *status*: 5 - Production/Stable, *requires*: pytest Nice pytest plugin to help you with Django pluggable application testing. @@ -4425,7 +4732,7 @@ This list contains 1641 plugins. An RST Documentation Generator for pytest-based test suites :pypi:`pytest-docker` - *last release*: May 26, 2025, + *last release*: Jul 04, 2025, *status*: N/A, *requires*: pytest<9.0,>=4.0 @@ -4474,7 +4781,7 @@ This list contains 1641 plugins. A plugin to use docker databases for pytests :pypi:`pytest-docker-fixtures` - *last release*: May 14, 2025, + *last release*: Jun 25, 2025, *status*: 3 - Alpha, *requires*: pytest @@ -4585,8 +4892,15 @@ This list contains 1641 plugins. Run pytest --doctest-modules with markdown docstrings in code blocks (\`\`\`) + :pypi:`pytest-doctest-only` + *last release*: Jul 30, 2025, + *status*: 4 - Beta, + *requires*: pytest>=8.3.0 + + A plugin to run only doctest + :pypi:`pytest-doctestplus` - *last release*: Jan 25, 2025, + *last release*: Oct 18, 2025, *status*: 5 - Production/Stable, *requires*: pytest>=4.6 @@ -4641,6 +4955,13 @@ This list contains 1641 plugins. A py.test plugin that parses environment files before running tests + :pypi:`pytest-dotenv-modern` + *last release*: Sep 27, 2025, + *status*: 4 - Beta, + *requires*: pytest>=6.0.0 + + A modern pytest plugin that loads environment variables from dotenv files + :pypi:`pytest-dot-only-pkcopley` *last release*: Oct 27, 2023, *status*: N/A, @@ -4676,6 +4997,13 @@ This list contains 1641 plugins. A Django REST framework plugin for pytest. + :pypi:`pytest-drill-sergeant` + *last release*: Sep 12, 2025, + *status*: 4 - Beta, + *requires*: pytest>=7.0.0 + + A pytest plugin that enforces test quality standards through automatic marker detection and AAA structure validation + :pypi:`pytest-drivings` *last release*: Jan 13, 2021, *status*: N/A, @@ -4705,12 +5033,26 @@ This list contains 1641 plugins. A Pytest plugin to ignore tests during collection without reporting them in the test summary. :pypi:`pytest-dsl` - *last release*: May 29, 2025, + *last release*: Oct 31, 2025, *status*: N/A, *requires*: pytest>=7.0.0 A DSL testing framework based on pytest + :pypi:`pytest-dsl-ssh` + *last release*: Jul 25, 2025, + *status*: 4 - Beta, + *requires*: pytest>=7.0.0 + + SSH/SFTP关键字插件,为pytest-dsl提供SSH和SFTP操作能力 + + :pypi:`pytest-dsl-ui` + *last release*: Aug 21, 2025, + *status*: N/A, + *requires*: pytest>=7.0.0; extra == "dev" + + Playwright-based UI automation keywords for pytest-dsl framework + :pypi:`pytest-dummynet` *last release*: Dec 15, 2021, *status*: 5 - Production/Stable, @@ -4733,12 +5075,19 @@ This list contains 1641 plugins. :pypi:`pytest-durations` - *last release*: Apr 29, 2025, + *last release*: Aug 29, 2025, *status*: 5 - Production/Stable, *requires*: pytest>=4.6 Pytest plugin reporting fixtures and test functions execution time. + :pypi:`pytest-dynamic-parameterize` + *last release*: Oct 14, 2025, + *status*: N/A, + *requires*: pytest + + A Python package for managing pytest plugins. + :pypi:`pytest-dynamicrerun` *last release*: Aug 15, 2020, *status*: 4 - Beta, @@ -4830,13 +5179,6 @@ This list contains 1641 plugins. Elasticsearch fixtures and fixture factories for Pytest. - :pypi:`pytest-elbase` - *last release*: Apr 15, 2025, - *status*: N/A, - *requires*: N/A - - Elbase pytest plugin - :pypi:`pytest-elements` *last release*: Jan 13, 2021, *status*: N/A, @@ -4866,63 +5208,63 @@ This list contains 1641 plugins. Send execution result email :pypi:`pytest-embedded` - *last release*: Apr 22, 2025, + *last release*: Oct 27, 2025, *status*: 5 - Production/Stable, *requires*: pytest>=7.0 A pytest plugin that designed for embedded testing. :pypi:`pytest-embedded-arduino` - *last release*: Apr 22, 2025, + *last release*: Oct 27, 2025, *status*: 5 - Production/Stable, *requires*: N/A Make pytest-embedded plugin work with Arduino. :pypi:`pytest-embedded-idf` - *last release*: Apr 22, 2025, + *last release*: Oct 27, 2025, *status*: 5 - Production/Stable, *requires*: N/A Make pytest-embedded plugin work with ESP-IDF. :pypi:`pytest-embedded-jtag` - *last release*: Apr 22, 2025, + *last release*: Oct 27, 2025, *status*: 5 - Production/Stable, *requires*: N/A Make pytest-embedded plugin work with JTAG. :pypi:`pytest-embedded-nuttx` - *last release*: Apr 22, 2025, + *last release*: Oct 27, 2025, *status*: 5 - Production/Stable, *requires*: N/A Make pytest-embedded plugin work with NuttX. :pypi:`pytest-embedded-qemu` - *last release*: Apr 22, 2025, + *last release*: Oct 27, 2025, *status*: 5 - Production/Stable, *requires*: N/A Make pytest-embedded plugin work with QEMU. :pypi:`pytest-embedded-serial` - *last release*: Apr 22, 2025, + *last release*: Oct 27, 2025, *status*: 5 - Production/Stable, *requires*: N/A Make pytest-embedded plugin work with Serial. :pypi:`pytest-embedded-serial-esp` - *last release*: Apr 22, 2025, + *last release*: Oct 27, 2025, *status*: 5 - Production/Stable, *requires*: N/A Make pytest-embedded plugin work with Espressif target boards. :pypi:`pytest-embedded-wokwi` - *last release*: Apr 22, 2025, + *last release*: Oct 27, 2025, *status*: 5 - Production/Stable, *requires*: N/A @@ -4999,9 +5341,9 @@ This list contains 1641 plugins. Improvements for pytest (rejected upstream) :pypi:`pytest-env` - *last release*: Sep 17, 2024, + *last release*: Oct 09, 2025, *status*: 5 - Production/Stable, - *requires*: pytest>=8.3.3 + *requires*: pytest>=8.4.2 pytest plugin that allows you to add environment variables. @@ -5040,6 +5382,13 @@ This list contains 1641 plugins. Pytest plugin to validate use of envvars on your tests + :pypi:`pytest-envx` + *last release*: Jun 28, 2025, + *status*: 4 - Beta, + *requires*: pytest>=8.4.1 + + Pytest plugin for managing environment variables with interpolation and .env file support. + :pypi:`pytest-env-yaml` *last release*: Apr 02, 2019, *status*: N/A, @@ -5146,14 +5495,14 @@ This list contains 1641 plugins. Pytest plugin for testing examples in docstrings and markdown files. :pypi:`pytest-exasol-backend` - *last release*: Feb 11, 2025, + *last release*: Oct 29, 2025, *status*: N/A, *requires*: pytest<9,>=7 :pypi:`pytest-exasol-extension` - *last release*: Feb 11, 2025, + *last release*: Oct 29, 2025, *status*: N/A, *requires*: pytest<9,>=7 @@ -5174,16 +5523,16 @@ This list contains 1641 plugins. :pypi:`pytest-exasol-slc` - *last release*: Feb 11, 2025, + *last release*: Oct 30, 2025, *status*: N/A, *requires*: pytest<9,>=7 :pypi:`pytest-excel` - *last release*: Jun 18, 2024, + *last release*: Jul 22, 2025, *status*: 5 - Production/Stable, - *requires*: pytest>3.6 + *requires*: pytest pytest plugin for generating excel reports @@ -5349,7 +5698,7 @@ This list contains 1641 plugins. Additional pytest markers to dynamically enable/disable tests viia CLI flags :pypi:`pytest-f3ts` - *last release*: May 08, 2025, + *last release*: Jul 15, 2025, *status*: N/A, *requires*: pytest<8.0.0,>=7.2.1 @@ -5370,9 +5719,9 @@ This list contains 1641 plugins. Use factories for test setup with py.test :pypi:`pytest-factoryboy` - *last release*: Mar 05, 2024, + *last release*: Jul 01, 2025, *status*: 6 - Mature, - *requires*: pytest (>=6.2) + *requires*: pytest>=7.0 Factory Boy support for pytest. @@ -5552,9 +5901,9 @@ This list contains 1641 plugins. Pytest plugin for filtering based on sub-packages :pypi:`pytest-find-dependencies` - *last release*: Mar 16, 2024, - *status*: 4 - Beta, - *requires*: pytest >=4.3.0 + *last release*: Jul 16, 2025, + *status*: 5 - Production/Stable, + *requires*: pytest>=6.2.4 A pytest plugin to find dependencies between tests @@ -5573,19 +5922,26 @@ This list contains 1641 plugins. :pypi:`pytest-fixturecheck` - *last release*: May 17, 2025, + *last release*: Jun 02, 2025, *status*: 3 - Alpha, *requires*: pytest>=6.0.0 A pytest plugin to check fixture validity before test execution :pypi:`pytest-fixture-classes` - *last release*: Sep 02, 2023, + *last release*: Oct 12, 2025, *status*: 5 - Production/Stable, - *requires*: pytest + *requires*: N/A Fixtures as classes that work well with dependency injection, autocompletetion, type checkers, and language servers + :pypi:`pytest-fixture-collect` + *last release*: Jul 25, 2025, + *status*: N/A, + *requires*: pytest; extra == "test" + + A utility to collect pytest fixture file paths. + :pypi:`pytest-fixturecollection` *last release*: Feb 22, 2024, *status*: 4 - Beta, @@ -5622,9 +5978,9 @@ This list contains 1641 plugins. A pytest plugin to add markers based on fixtures used. :pypi:`pytest-fixture-order` - *last release*: May 16, 2022, + *last release*: Oct 22, 2025, *status*: 5 - Production/Stable, - *requires*: pytest (>=3.0) + *requires*: pytest>=3.0 pytest plugin to control fixture evaluation order @@ -5656,6 +6012,13 @@ This list contains 1641 plugins. Common fixtures for pytest + :pypi:`pytest-fixtures-fixtures` + *last release*: Sep 14, 2025, + *status*: 4 - Beta, + *requires*: pytest>=8.4.1 + + Handy fixtues to access your fixtures from your _pytest tests. + :pypi:`pytest-fixture-tools` *last release*: Apr 30, 2025, *status*: 6 - Mature, @@ -5678,7 +6041,7 @@ This list contains 1641 plugins. pytest plugin to check FLAKE8 requirements :pypi:`pytest-flake8-path` - *last release*: Oct 25, 2024, + *last release*: Sep 09, 2025, *status*: 5 - Production/Stable, *requires*: pytest @@ -5769,7 +6132,7 @@ This list contains 1641 plugins. A pytest plugin in order to provide logs via fluentbit :pypi:`pytest-fly` - *last release*: May 19, 2025, + *last release*: Jun 07, 2025, *status*: 3 - Alpha, *requires*: pytest @@ -5782,6 +6145,13 @@ This list contains 1641 plugins. Pytest fixtures for simplifying Flyte integration testing + :pypi:`pytest-fmu-filter` + *last release*: Jun 23, 2025, + *status*: 4 - Beta, + *requires*: pytest>=7.0.0 + + A pytest plugin to filter fmus + :pypi:`pytest-focus` *last release*: May 04, 2019, *status*: 4 - Beta, @@ -5832,7 +6202,7 @@ This list contains 1641 plugins. pytest plugin for running parallel tests :pypi:`pytest-freezeblaster` - *last release*: Feb 11, 2025, + *last release*: Oct 13, 2025, *status*: N/A, *requires*: pytest>=6.2.5 @@ -5888,7 +6258,7 @@ This list contains 1641 plugins. An alternative way to parametrize test cases. :pypi:`pytest-fv` - *last release*: Feb 27, 2025, + *last release*: Jun 06, 2025, *status*: N/A, *requires*: pytest @@ -5965,7 +6335,7 @@ This list contains 1641 plugins. GCS fixtures and fixture factories for Pytest. :pypi:`pytest-gee` - *last release*: May 11, 2025, + *last release*: Oct 16, 2025, *status*: 3 - Alpha, *requires*: pytest @@ -6000,7 +6370,7 @@ This list contains 1641 plugins. For finding/executing Ghost Inspector tests :pypi:`pytest-girder` - *last release*: May 29, 2025, + *last release*: Sep 30, 2025, *status*: N/A, *requires*: pytest>=3.6 @@ -6014,7 +6384,7 @@ This list contains 1641 plugins. Git repository fixture for py.test :pypi:`pytest-gitconfig` - *last release*: Aug 11, 2024, + *last release*: Oct 12, 2025, *status*: 4 - Beta, *requires*: pytest>=7.1.2 @@ -6097,6 +6467,13 @@ This list contains 1641 plugins. Folds output sections in GitLab CI build log + :pypi:`pytest-gitscope` + *last release*: Sep 24, 2025, + *status*: 5 - Production/Stable, + *requires*: pytest>=7.0.0 + + A pragmatic pytest plugin that runs only the tests that matter, and ship faster + :pypi:`pytest-git-selector` *last release*: Nov 17, 2022, *status*: N/A, @@ -6105,9 +6482,9 @@ This list contains 1641 plugins. Utility to select tests that have had its dependencies modified (as identified by git diff) :pypi:`pytest-glamor-allure` - *last release*: Apr 30, 2024, + *last release*: Jul 20, 2025, *status*: 4 - Beta, - *requires*: pytest<=8.2.0 + *requires*: pytest<=8.4.1 Extends allure-pytest functionality @@ -6140,12 +6517,19 @@ This list contains 1641 plugins. Notify google chat channel for test results :pypi:`pytest-google-cloud-storage` - *last release*: May 22, 2025, + *last release*: Sep 11, 2025, *status*: N/A, - *requires*: pytest==8.3.5 + *requires*: pytest>=8.0.0 Pytest custom features, e.g. fixtures and various tests. Aimed to emulate Google Cloud Storage service + :pypi:`pytest-grader` + *last release*: Aug 25, 2025, + *status*: N/A, + *requires*: pytest>=8 + + Pytest extension for scoring programming assignments. + :pypi:`pytest-gradescope` *last release*: Apr 29, 2025, *status*: N/A, @@ -6167,6 +6551,20 @@ This list contains 1641 plugins. Green progress dots + :pypi:`pytest-greener` + *last release*: Oct 18, 2025, + *status*: N/A, + *requires*: pytest<9.0.0,>=8.3.3 + + Pytest plugin for Greener + + :pypi:`pytest-greet` + *last release*: Oct 21, 2025, + *status*: N/A, + *requires*: N/A + + + :pypi:`pytest-group-by-class` *last release*: Jun 27, 2023, *status*: 5 - Production/Stable, @@ -6188,6 +6586,13 @@ This list contains 1641 plugins. pytest plugin for grpc + :pypi:`pytest-grpc-aio` + *last release*: Oct 28, 2025, + *status*: N/A, + *requires*: pytest>=3.6.0 + + pytest plugin for grpc.aio + :pypi:`pytest-grunnur` *last release*: Jul 26, 2024, *status*: N/A, @@ -6322,9 +6727,9 @@ This list contains 1641 plugins. A pytest plugin for use with homeassistant custom components. :pypi:`pytest-homeassistant-custom-component` - *last release*: May 30, 2025, + *last release*: Oct 31, 2025, *status*: 3 - Alpha, - *requires*: pytest==8.3.5 + *requires*: pytest==8.4.2 Experimental package to automatically extract test plugins for Home Assistant custom components @@ -6391,6 +6796,13 @@ This list contains 1641 plugins. pytest plugin for generating HTML reports + :pypi:`pytest-html5` + *last release*: Oct 11, 2025, + *status*: N/A, + *requires*: N/A + + the best report for pytest + :pypi:`pytest-html-cn` *last release*: Aug 19, 2024, *status*: 5 - Production/Stable, @@ -6412,6 +6824,13 @@ This list contains 1641 plugins. Pytest HTML reports merging utility + :pypi:`pytest-html-nova-act` + *last release*: Sep 05, 2025, + *status*: N/A, + *requires*: N/A + + A Pytest Plugin for Amazon Nova Act Python SDK. + :pypi:`pytest-html-object-storage` *last release*: Jan 17, 2024, *status*: 5 - Production/Stable, @@ -6419,6 +6838,13 @@ This list contains 1641 plugins. Pytest report plugin for send HTML report on object-storage + :pypi:`pytest-html-plus` + *last release*: Oct 30, 2025, + *status*: N/A, + *requires*: N/A + + Get started with rich pytest reports in under 3 seconds. Just install the plugin — no setup required. The simplest, fastest reporter for pytest. + :pypi:`pytest-html-profiling` *last release*: Feb 11, 2020, *status*: 5 - Production/Stable, @@ -6426,6 +6852,13 @@ This list contains 1641 plugins. Pytest plugin for generating HTML reports with per-test profiling and optionally call graph visualizations. Based on pytest-html by Dave Hunt. + :pypi:`pytest-html-report` + *last release*: Jun 24, 2025, + *status*: 4 - Beta, + *requires*: pytest>=6.0 + + Enhanced HTML reporting for pytest with categories, specifications, and detailed logging + :pypi:`pytest-html-reporter` *last release*: Feb 13, 2022, *status*: N/A, @@ -6447,6 +6880,13 @@ This list contains 1641 plugins. pytest plugin for generating HTML reports + :pypi:`pytest-htmlx` + *last release*: Sep 09, 2025, + *status*: 4 - Beta, + *requires*: pytest + + Custom HTML report plugin for Pytest with charts and tables + :pypi:`pytest-http` *last release*: Aug 22, 2024, *status*: N/A, @@ -6461,8 +6901,50 @@ This list contains 1641 plugins. Easily test your HTTP library against a local copy of httpbin + :pypi:`pytest-httpchain` + *last release*: Aug 16, 2025, + *status*: 5 - Production/Stable, + *requires*: N/A + + pytest plugin for HTTP testing using JSON files + + :pypi:`pytest-httpchain-jsonref` + *last release*: Aug 16, 2025, + *status*: N/A, + *requires*: N/A + + JSON reference ($ref) support for pytest-httpchain + + :pypi:`pytest-httpchain-mcp` + *last release*: Aug 16, 2025, + *status*: N/A, + *requires*: N/A + + MCP server for pytest-httpchain + + :pypi:`pytest-httpchain-models` + *last release*: Aug 16, 2025, + *status*: N/A, + *requires*: N/A + + Pydantic models for pytest-httpchain + + :pypi:`pytest-httpchain-templates` + *last release*: Aug 16, 2025, + *status*: N/A, + *requires*: N/A + + Templating support for pytest-httpchain + + :pypi:`pytest-httpchain-userfunc` + *last release*: Aug 16, 2025, + *status*: N/A, + *requires*: N/A + + User functions support for pytest-httpchain + :pypi:`pytest-httpdbg` - *last release*: May 08, 2025, + *last release*: Oct 26, 2025, *status*: 4 - Beta, *requires*: pytest>=7.0.0 @@ -6539,16 +7021,16 @@ This list contains 1641 plugins. help hypo module for pytest :pypi:`pytest-iam` - *last release*: Apr 24, 2025, + *last release*: Jul 25, 2025, *status*: 4 - Beta, *requires*: pytest>=7.0.0 A fully functional OAUTH2 / OpenID Connect (OIDC) / SCIM server to be used in your testsuite :pypi:`pytest-ibutsu` - *last release*: Feb 06, 2025, + *last release*: Oct 21, 2025, *status*: 4 - Beta, - *requires*: pytest>=7.1 + *requires*: pytest A plugin to sent pytest results to an Ibutsu server @@ -6602,14 +7084,14 @@ This list contains 1641 plugins. :pypi:`pytest-image-snapshot` - *last release*: Jul 01, 2024, + *last release*: Jul 16, 2025, *status*: 4 - Beta, *requires*: pytest>=3.5.0 A pytest plugin for image snapshot management and comparison. :pypi:`pytest-impacted` - *last release*: May 31, 2025, + *last release*: Sep 11, 2025, *status*: 4 - Beta, *requires*: pytest>=8.0.0 @@ -6672,7 +7154,7 @@ This list contains 1641 plugins. display more node ininformation. :pypi:`pytest-infrahouse` - *last release*: Mar 18, 2025, + *last release*: Oct 29, 2025, *status*: 4 - Beta, *requires*: pytest~=8.3 @@ -6714,14 +7196,14 @@ This list contains 1641 plugins. A py.test plugin providing fixtures to simplify inmanta modules testing. :pypi:`pytest-inmanta-extensions` - *last release*: May 27, 2025, + *last release*: Jul 04, 2025, *status*: 5 - Production/Stable, *requires*: N/A Inmanta tests package :pypi:`pytest-inmanta-lsm` - *last release*: Apr 09, 2025, + *last release*: Aug 26, 2025, *status*: 5 - Production/Stable, *requires*: N/A @@ -6735,7 +7217,7 @@ This list contains 1641 plugins. Pytest library to facilitate end to end testing of inmanta projects :pypi:`pytest-inmanta-yang` - *last release*: Feb 22, 2024, + *last release*: Oct 28, 2025, *status*: 4 - Beta, *requires*: pytest @@ -6783,6 +7265,13 @@ This list contains 1641 plugins. pytest plugin to instrument tests + :pypi:`pytest-insubprocess` + *last release*: Jul 01, 2025, + *status*: 4 - Beta, + *requires*: pytest>=7.4 + + A pytest plugin to execute test cases in a subprocess + :pypi:`pytest-integration` *last release*: Nov 17, 2022, *status*: N/A, @@ -6812,14 +7301,14 @@ This list contains 1641 plugins. Pytest plugin for intercepting outgoing connection requests during pytest run. :pypi:`pytest-interface-tester` - *last release*: Feb 13, 2025, + *last release*: Oct 09, 2025, *status*: 4 - Beta, *requires*: pytest Pytest plugin for checking charm relation interface protocol compliance. :pypi:`pytest-invenio` - *last release*: May 08, 2025, + *last release*: Jul 09, 2025, *status*: 5 - Production/Stable, *requires*: pytest<9.0.0,>=6 @@ -6861,21 +7350,14 @@ This list contains 1641 plugins. Pytest plugin to run tests in Jupyter Notebooks :pypi:`pytest-ipywidgets` - *last release*: May 30, 2025, + *last release*: Oct 24, 2025, *status*: N/A, *requires*: pytest - :pypi:`pytest-iso` - *last release*: May 15, 2025, - *status*: 4 - Beta, - *requires*: pytest<9.0.0,>=7.4.0 - - Plugin for pytest to produce test documentation for code audits. - :pypi:`pytest-isolate` - *last release*: May 22, 2025, + *last release*: Sep 08, 2025, *status*: 4 - Beta, *requires*: pytest @@ -6980,7 +7462,7 @@ This list contains 1641 plugins. Plugin skips (xfail) tests if unresolved Jira issue(s) linked :pypi:`pytest-jira-xray` - *last release*: May 24, 2025, + *last release*: Oct 11, 2025, *status*: 4 - Beta, *requires*: pytest>=6.2.4 @@ -7043,7 +7525,7 @@ This list contains 1641 plugins. A pytest plugin to report test results as JSON files :pypi:`pytest-json-report-wip` - *last release*: Oct 28, 2023, + *last release*: Jul 23, 2025, *status*: 4 - Beta, *requires*: pytest >=3.8.0 @@ -7056,6 +7538,13 @@ This list contains 1641 plugins. A pytest plugin to perform JSONSchema validations + :pypi:`pytest-jsonschema-snapshot` + *last release*: Sep 13, 2025, + *status*: N/A, + *requires*: pytest + + Pytest plugin for automatic JSON Schema generation and validation from examples + :pypi:`pytest-jtr` *last release*: Jul 21, 2024, *status*: N/A, @@ -7064,7 +7553,7 @@ This list contains 1641 plugins. pytest plugin supporting json test report output :pypi:`pytest-jubilant` - *last release*: May 14, 2025, + *last release*: Jul 28, 2025, *status*: N/A, *requires*: pytest>=8.3.5 @@ -7078,7 +7567,7 @@ This list contains 1641 plugins. Export test results in an augmented JUnit format for usage with Xray () :pypi:`pytest-jupyter` - *last release*: Apr 04, 2024, + *last release*: Oct 16, 2025, *status*: 4 - Beta, *requires*: pytest>=7.0 @@ -7091,6 +7580,20 @@ This list contains 1641 plugins. A reusable JupyterHub pytest plugin + :pypi:`pytest-jux` + *last release*: Oct 24, 2025, + *status*: 3 - Alpha, + *requires*: pytest>=7.4 + + A pytest plugin for signing and publishing JUnit XML test reports to the Jux REST API + + :pypi:`pytest-k8s` + *last release*: Jul 07, 2025, + *status*: N/A, + *requires*: pytest>=8.4.1 + + Kubernetes-based testing for pytest + :pypi:`pytest-kafka` *last release*: Aug 14, 2024, *status*: N/A, @@ -7190,12 +7693,19 @@ This list contains 1641 plugins. pytest krtech common library :pypi:`pytest-kubernetes` - *last release*: Feb 04, 2025, + *last release*: Oct 23, 2025, *status*: N/A, *requires*: pytest<9.0.0,>=8.3.0 + :pypi:`pytest_kustomize` + *last release*: Oct 02, 2025, + *status*: N/A, + *requires*: N/A + + Parse and validate kustomize output + :pypi:`pytest-kuunda` *last release*: Feb 25, 2024, *status*: 4 - Beta, @@ -7239,7 +7749,7 @@ This list contains 1641 plugins. Create fancy and clear HTML test reports. :pypi:`pytest-latin-hypercube` - *last release*: Feb 27, 2025, + *last release*: Jun 26, 2025, *status*: N/A, *requires*: pytest @@ -7267,7 +7777,7 @@ This list contains 1641 plugins. It helps to use fixtures in pytest.mark.parametrize :pypi:`pytest-lazy-fixtures` - *last release*: May 27, 2025, + *last release*: Sep 16, 2025, *status*: N/A, *requires*: pytest>=7 @@ -7316,7 +7826,7 @@ This list contains 1641 plugins. Select tests of a given level or lower :pypi:`pytest-lf-skip` - *last release*: May 26, 2025, + *last release*: Oct 14, 2025, *status*: 4 - Beta, *requires*: pytest>=8.3.5 @@ -7330,11 +7840,11 @@ This list contains 1641 plugins. A python-libfaketime plugin for pytest :pypi:`pytest-libiio` - *last release*: Oct 01, 2024, - *status*: 4 - Beta, - *requires*: N/A + *last release*: Aug 15, 2025, + *status*: N/A, + *requires*: pytest>=3.5.0 - A pytest plugin to manage interfacing with libiio contexts + A pytest plugin for testing libiio based devices :pypi:`pytest-libnotify` *last release*: Apr 02, 2021, @@ -7406,6 +7916,13 @@ This list contains 1641 plugins. Live results for pytest + :pypi:`pytest-llm` + *last release*: Oct 03, 2025, + *status*: 3 - Alpha, + *requires*: pytest>=7.0.0 + + pytest-llm: A pytest plugin for testing LLM outputs with success rate thresholds. + :pypi:`pytest-llmeval` *last release*: Mar 19, 2025, *status*: 4 - Beta, @@ -7413,6 +7930,13 @@ This list contains 1641 plugins. A pytest plugin to evaluate/benchmark LLM prompts + :pypi:`pytest-lobster` + *last release*: Jul 26, 2025, + *status*: N/A, + *requires*: pytest>=7.0 + + Pytest to generate lobster tracing files + :pypi:`pytest-local-badge` *last release*: Jan 15, 2023, *status*: N/A, @@ -7449,7 +7973,7 @@ This list contains 1641 plugins. pytest-lock is a pytest plugin that allows you to "lock" the results of unit tests, storing them in a local cache. This is particularly useful for tests that are resource-intensive or don't need to be run every time. When the tests are run subsequently, pytest-lock will compare the current results with the locked results and issue a warning if there are any discrepancies. :pypi:`pytest-lockable` - *last release*: Jan 24, 2024, + *last release*: Sep 08, 2025, *status*: 5 - Production/Stable, *requires*: pytest @@ -7497,6 +8021,13 @@ This list contains 1641 plugins. Plugin configuring handlers for loggers from Python logging module. + :pypi:`pytest-logger-db` + *last release*: Sep 14, 2025, + *status*: N/A, + *requires*: N/A + + Add your description here + :pypi:`pytest-logging` *last release*: Nov 04, 2015, *status*: 4 - Beta, @@ -7519,9 +8050,9 @@ This list contains 1641 plugins. pytest fixture logging configured from packaged YAML :pypi:`pytest-logikal` - *last release*: Apr 30, 2025, + *last release*: Sep 11, 2025, *status*: 5 - Production/Stable, - *requires*: pytest==8.3.5 + *requires*: pytest==8.4.2 Common testing environment @@ -7554,9 +8085,9 @@ This list contains 1641 plugins. pytest plugin for looping tests :pypi:`pytest-lsp` - *last release*: Nov 23, 2024, - *status*: 3 - Alpha, - *requires*: pytest + *last release*: Oct 25, 2025, + *status*: 5 - Production/Stable, + *requires*: pytest>=8.0 A pytest plugin for end-to-end testing of language servers @@ -7666,7 +8197,7 @@ This list contains 1641 plugins. Pytest plugin to hide sensitive data in test reports :pypi:`pytest-matcher` - *last release*: Aug 01, 2024, + *last release*: Aug 07, 2025, *status*: 5 - Production/Stable, *requires*: pytest @@ -7735,6 +8266,13 @@ This list contains 1641 plugins. pytest plugin to run the mccabe code complexity checker. + :pypi:`pytest-mcp` + *last release*: Jul 07, 2025, + *status*: N/A, + *requires*: pytest>=8.4.0 + + Pytest-style framework for evaluating Model Context Protocol (MCP) servers. + :pypi:`pytest-md` *last release*: Jul 11, 2019, *status*: 3 - Alpha, @@ -7771,7 +8309,7 @@ This list contains 1641 plugins. Estimates memory consumption of test functions :pypi:`pytest-memray` - *last release*: Jul 25, 2024, + *last release*: Aug 18, 2025, *status*: N/A, *requires*: pytest>=7.2 @@ -7792,7 +8330,7 @@ This list contains 1641 plugins. pytest plugin to write integration tests for projects using Mercurial Python internals :pypi:`pytest-mergify` - *last release*: May 13, 2025, + *last release*: Oct 23, 2025, *status*: N/A, *requires*: pytest>=6.0.0 @@ -7824,7 +8362,14 @@ This list contains 1641 plugins. *status*: 5 - Production/Stable, *requires*: pytest>=7.0.0 - pytest plugin for test session metadata + pytest plugin for test session metadata + + :pypi:`pytest-metaexport` + *last release*: Jun 24, 2025, + *status*: N/A, + *requires*: pytest>=7.1.0 + + Pytest plugin for exporting custom test metadata to JSON. :pypi:`pytest-metrics` *last release*: Apr 04, 2020, @@ -7833,8 +8378,22 @@ This list contains 1641 plugins. Custom metrics report for pytest + :pypi:`pytest-mfd-config` + *last release*: Jul 11, 2025, + *status*: N/A, + *requires*: pytest<9,>=7.2.1 + + Pytest Plugin that handles test and topology configs and all their belongings like helper fixtures. + + :pypi:`pytest-mfd-logging` + *last release*: Jul 09, 2025, + *status*: N/A, + *requires*: pytest<9,>=7.2.1 + + Module for handling PyTest logging. + :pypi:`pytest-mh` - *last release*: May 15, 2025, + *last release*: Oct 16, 2025, *status*: N/A, *requires*: pytest @@ -7869,12 +8428,19 @@ This list contains 1641 plugins. A plugin to test mp :pypi:`pytest-minio-mock` - *last release*: May 06, 2025, + *last release*: Aug 06, 2025, *status*: N/A, *requires*: pytest>=5.0.0 A pytest plugin for mocking Minio S3 interactions + :pypi:`pytest-mirror` + *last release*: Jul 30, 2025, + *status*: 4 - Beta, + *requires*: N/A + + A pluggy-based pytest plugin and CLI tool for ensuring your test suite mirrors your source code structure + :pypi:`pytest-missing-fixtures` *last release*: Oct 14, 2020, *status*: 4 - Beta, @@ -7918,7 +8484,7 @@ This list contains 1641 plugins. pytest plugin to display test execution output like a mochajs :pypi:`pytest-mock` - *last release*: May 26, 2025, + *last release*: Sep 16, 2025, *status*: 5 - Production/Stable, *requires*: pytest>=6.2.5 @@ -7960,7 +8526,7 @@ This list contains 1641 plugins. An in-memory mock of a Redis server that runs in a separate thread. This is to be used for unit-tests that require a Redis database. :pypi:`pytest-mock-resources` - *last release*: Mar 10, 2025, + *last release*: Sep 17, 2025, *status*: N/A, *requires*: pytest>=1.0 @@ -7994,6 +8560,13 @@ This list contains 1641 plugins. Massively distributed pytest runs using modal.com + :pypi:`pytest-modern` + *last release*: Aug 19, 2025, + *status*: 4 - Beta, + *requires*: pytest>=8 + + A more modern pytest + :pypi:`pytest-modified-env` *last release*: Jan 29, 2022, *status*: 4 - Beta, @@ -8023,7 +8596,7 @@ This list contains 1641 plugins. PyTest Molecule Plugin :: discover and run molecule tests :pypi:`pytest-mongo` - *last release*: Feb 28, 2025, + *last release*: Aug 01, 2025, *status*: 5 - Production/Stable, *requires*: pytest>=6.2 @@ -8044,7 +8617,7 @@ This list contains 1641 plugins. pytest plugin for MongoDB :pypi:`pytest-mongodb-ry` - *last release*: Jan 21, 2025, + *last release*: Sep 25, 2025, *status*: N/A, *requires*: N/A @@ -8128,7 +8701,7 @@ This list contains 1641 plugins. low-startup-overhead, scalable, distributed-testing pytest plugin :pypi:`pytest-mqtt` - *last release*: Jan 07, 2025, + *last release*: Sep 10, 2025, *status*: 5 - Production/Stable, *requires*: pytest<9; extra == "test" @@ -8142,7 +8715,7 @@ This list contains 1641 plugins. Utility for writing multi-host tests for pytest :pypi:`pytest-multilog` - *last release*: Jan 17, 2023, + *last release*: Sep 21, 2025, *status*: N/A, *requires*: pytest @@ -8232,6 +8805,13 @@ This list contains 1641 plugins. MySQL process and client fixtures for pytest + :pypi:`pytest-nb` + *last release*: Jul 26, 2025, + *status*: N/A, + *requires*: pytest==8.4.1 + + Seedable Jupyter Notebook testing tool + :pypi:`pytest-ndb` *last release*: Apr 28, 2024, *status*: N/A, @@ -8268,7 +8848,7 @@ This list contains 1641 plugins. A pytest plugin that provides a mock NETCONF (RFC6241/RFC6242) server for local testing. :pypi:`pytest-netdut` - *last release*: Apr 11, 2025, + *last release*: Oct 09, 2025, *status*: N/A, *requires*: pytest>=3.5.0 @@ -8324,7 +8904,7 @@ This list contains 1641 plugins. pytest ngs fixtures :pypi:`pytest-nhsd-apim` - *last release*: Apr 01, 2025, + *last release*: Oct 29, 2025, *status*: N/A, *requires*: pytest<9.0.0,>=8.2.0 @@ -8380,7 +8960,7 @@ This list contains 1641 plugins. Ensure a test produces no garbage :pypi:`pytest-no-problem` - *last release*: Apr 05, 2025, + *last release*: Oct 18, 2025, *status*: N/A, *requires*: pytest>=7.0 @@ -8457,9 +9037,9 @@ This list contains 1641 plugins. PyTest plugin for the OAR testing framework :pypi:`pytest-oarepo` - *last release*: Feb 14, 2025, + *last release*: Oct 23, 2025, *status*: N/A, - *requires*: pytest>=7.1.2; extra == "base" + *requires*: pytest>=7.1.2; extra == "dev" @@ -8498,6 +9078,13 @@ This list contains 1641 plugins. Project description + :pypi:`pytest-oduit` + *last release*: Oct 06, 2025, + *status*: 5 - Production/Stable, + *requires*: pytest>=8 + + py.test plugin to run Odoo tests + :pypi:`pytest-oerp` *last release*: Feb 28, 2012, *status*: 3 - Alpha, @@ -8526,6 +9113,13 @@ This list contains 1641 plugins. The ultimate pytest output plugin + :pypi:`pytest-once` + *last release*: Oct 10, 2025, + *status*: 3 - Alpha, + *requires*: pytest>=8.4.0 + + xdist-safe 'run once' fixture decorator for pytest (setup/teardown across workers) + :pypi:`pytest-only` *last release*: May 27, 2024, *status*: 5 - Production/Stable, @@ -8580,7 +9174,7 @@ This list contains 1641 plugins. *status*: N/A, *requires*: pytest - Fixtures for Operators + Fixtures for Charmed Operators :pypi:`pytest-optional` *last release*: Oct 07, 2015, @@ -8590,7 +9184,7 @@ This list contains 1641 plugins. include/exclude values of fixtures in pytest :pypi:`pytest-optional-tests` - *last release*: Apr 15, 2025, + *last release*: Jul 21, 2025, *status*: 4 - Beta, *requires*: pytest; extra == "dev" @@ -8652,6 +9246,13 @@ This list contains 1641 plugins. OpenTelemetry plugin for Pytest + :pypi:`pytest-otelmark` + *last release*: Sep 14, 2025, + *status*: 3 - Alpha, + *requires*: pytest>=8.3.5 + + Pytest plugin for otelmark. + :pypi:`pytest-override-env-var` *last release*: Feb 25, 2023, *status*: N/A, @@ -8730,7 +9331,7 @@ This list contains 1641 plugins. A pytest library for parametrizing tests using type hints. :pypi:`pytest-parametrize` - *last release*: Nov 10, 2024, + *last release*: Sep 25, 2025, *status*: 5 - Production/Stable, *requires*: pytest<9.0.0,>=8.3.0 @@ -8884,7 +9485,7 @@ This list contains 1641 plugins. A simple plugin to ensure the execution of critical sections of code has not been impacted :pypi:`pytest-performancetotal` - *last release*: Feb 01, 2025, + *last release*: Aug 05, 2025, *status*: 5 - Production/Stable, *requires*: N/A @@ -8898,7 +9499,7 @@ This list contains 1641 plugins. Pytest tool for persistent objects :pypi:`pytest-pexpect` - *last release*: Aug 13, 2024, + *last release*: Sep 10, 2025, *status*: 4 - Beta, *requires*: pytest>=6.2.0 @@ -9038,7 +9639,7 @@ This list contains 1641 plugins. Pytest plugin for reading playbooks. :pypi:`pytest-playwright` - *last release*: Jan 31, 2025, + *last release*: Sep 08, 2025, *status*: N/A, *requires*: pytest<9.0.0,>=6.2.4 @@ -9052,15 +9653,15 @@ This list contains 1641 plugins. ASYNC Pytest plugin for Playwright :pypi:`pytest-playwright-asyncio` - *last release*: Jan 31, 2025, + *last release*: Sep 08, 2025, *status*: N/A, *requires*: pytest<9.0.0,>=6.2.4 A pytest wrapper with async fixtures for Playwright to automate web browsers :pypi:`pytest-playwright-axe` - *last release*: Mar 27, 2025, - *status*: 4 - Beta, + *last release*: Nov 01, 2025, + *status*: 5 - Production/Stable, *requires*: N/A An axe-core integration for accessibility testing using Playwright Python. @@ -9094,14 +9695,21 @@ This list contains 1641 plugins. A pytest fixture for visual testing with Playwright :pypi:`pytest-playwright-visual-snapshot` - *last release*: Apr 15, 2025, + *last release*: Jul 02, 2025, *status*: N/A, *requires*: N/A Easy pytest visual regression testing using playwright + :pypi:`pytest-pl-grader` + *last release*: Nov 01, 2025, + *status*: 3 - Alpha, + *requires*: pytest + + A pytest plugin for autograding Python code. Designed for use with the PrairieLearn platform. + :pypi:`pytest-plone` - *last release*: Mar 27, 2025, + *last release*: Jun 11, 2025, *status*: 3 - Alpha, *requires*: pytest<8.0.0 @@ -9121,6 +9729,13 @@ This list contains 1641 plugins. A plugin to help developing and testing other plugins + :pypi:`pytest-plugins` + *last release*: Oct 23, 2025, + *status*: N/A, + *requires*: pytest + + A Python package for managing pytest plugins. + :pypi:`pytest-plus` *last release*: Feb 02, 2025, *status*: 5 - Production/Stable, @@ -9177,6 +9792,13 @@ This list contains 1641 plugins. Provides Polecat pytest fixtures + :pypi:`pytest-polymeric-report` + *last release*: Oct 20, 2025, + *status*: N/A, + *requires*: N/A + + A polymeric test report plugin for pytest + :pypi:`pytest-ponyorm` *last release*: Oct 31, 2018, *status*: N/A, @@ -9262,7 +9884,7 @@ This list contains 1641 plugins. A Pytest plugin to drop duplicated tests during collection, but will prefer keeping nested packages. :pypi:`pytest-pretty` - *last release*: Apr 05, 2023, + *last release*: Jun 04, 2025, *status*: 5 - Production/Stable, *requires*: pytest>=7 @@ -9283,9 +9905,9 @@ This list contains 1641 plugins. Minitest-style test colors :pypi:`pytest-print` - *last release*: Feb 25, 2025, + *last release*: Oct 09, 2025, *status*: 5 - Production/Stable, - *requires*: pytest>=8.3.2 + *requires*: pytest>=8.4.2 pytest-print adds the printer fixture you can use to print messages to the user (directly to the pytest runner, not stdout) @@ -9345,6 +9967,13 @@ This list contains 1641 plugins. Pytest plugin to export test metrics to Prometheus Pushgateway + :pypi:`pytest-proofy` + *last release*: Oct 17, 2025, + *status*: 4 - Beta, + *requires*: pytest>=7.0.0 + + Pytest plugin for Proofy test reporting + :pypi:`pytest-prosper` *last release*: Sep 24, 2018, *status*: N/A, @@ -9437,7 +10066,7 @@ This list contains 1641 plugins. Plugin for py.test to enter PyCharm debugger on uncaught exceptions :pypi:`pytest-pycodestyle` - *last release*: Oct 10, 2024, + *last release*: Jul 20, 2025, *status*: 3 - Alpha, *requires*: pytest>=7.0 @@ -9464,6 +10093,13 @@ This list contains 1641 plugins. pytest plugin to run pydocstyle + :pypi:`pytest-pylembic` + *last release*: Jul 22, 2025, + *status*: 3 - Alpha, + *requires*: N/A + + This package provides pytest plugin for validating Alembic migrations using the pylembic package. + :pypi:`pytest-pylint` *last release*: Oct 06, 2023, *status*: 5 - Production/Stable, @@ -9486,7 +10122,7 @@ This list contains 1641 plugins. Record PyMySQL queries and mock with the stored data. :pypi:`pytest-pyodide` - *last release*: Nov 23, 2024, + *last release*: Oct 24, 2025, *status*: N/A, *requires*: pytest @@ -9521,7 +10157,7 @@ This list contains 1641 plugins. Pytest fixture "q" for pyq :pypi:`pytest-pyramid` - *last release*: Oct 24, 2024, + *last release*: Sep 30, 2025, *status*: 5 - Production/Stable, *requires*: pytest @@ -9548,6 +10184,13 @@ This list contains 1641 plugins. Pytest plugin for type checking code with Pyright + :pypi:`pytest-pyspark-plugin` + *last release*: Jul 28, 2025, + *status*: 4 - Beta, + *requires*: pytest>=8.0.0 + + Pytest pyspark plugin (p3) + :pypi:`pytest-pyspec` *last release*: Aug 17, 2024, *status*: N/A, @@ -9562,6 +10205,13 @@ This list contains 1641 plugins. Plugin to run pystack after a timeout for a test suite. + :pypi:`pytest-pytestdb` + *last release*: Sep 14, 2025, + *status*: N/A, + *requires*: N/A + + Add your description here + :pypi:`pytest-pytestrail` *last release*: Aug 27, 2020, *status*: 4 - Beta, @@ -9569,8 +10219,15 @@ This list contains 1641 plugins. Pytest plugin for interaction with TestRail + :pypi:`pytest-pytestrail-internal` + *last release*: Jun 12, 2025, + *status*: 4 - Beta, + *requires*: pytest>=3.8.0 + + Pytest plugin for interaction with TestRail, Pytest plugin for TestRail (internal fork from: https://github.com/tolstislon/pytest-pytestrail with PR #25 fix) + :pypi:`pytest-pythonhashseed` - *last release*: Feb 25, 2024, + *last release*: Sep 28, 2025, *status*: 4 - Beta, *requires*: pytest>=3.0.0 @@ -9605,11 +10262,11 @@ This list contains 1641 plugins. A package for create venv in tests :pypi:`pytest-pyvista` - *last release*: Sep 29, 2023, + *last release*: Oct 06, 2025, *status*: 4 - Beta, - *requires*: pytest>=3.5.0 + *requires*: pytest>=6.2.0 - Pytest-pyvista package + Pytest-pyvista package. :pypi:`pytest-qanova` *last release*: Sep 05, 2024, @@ -9619,7 +10276,7 @@ This list contains 1641 plugins. A pytest plugin to collect test information :pypi:`pytest-qaseio` - *last release*: Mar 18, 2025, + *last release*: Oct 01, 2025, *status*: 5 - Production/Stable, *requires*: pytest<9.0.0,>=7.2.2 @@ -9661,7 +10318,7 @@ This list contains 1641 plugins. pytest plugin to generate test result QR codes :pypi:`pytest-qt` - *last release*: Feb 07, 2024, + *last release*: Jul 01, 2025, *status*: 5 - Production/Stable, *requires*: pytest @@ -9759,7 +10416,7 @@ This list contains 1641 plugins. py.test plugin to randomize tests :pypi:`pytest-randomly` - *last release*: Oct 25, 2024, + *last release*: Sep 12, 2025, *status*: 5 - Production/Stable, *requires*: pytest @@ -9780,9 +10437,9 @@ This list contains 1641 plugins. Randomise the order in which pytest tests are run with some control over the randomness :pypi:`pytest-random-order` - *last release*: Jan 20, 2024, + *last release*: Jun 22, 2025, *status*: 5 - Production/Stable, - *requires*: pytest >=3.0.0 + *requires*: pytest Randomise the order in which pytest tests are run with some control over the randomness @@ -9793,24 +10450,38 @@ This list contains 1641 plugins. A Pytest plugin for faster fault detection via regression test prioritization + :pypi:`pytest-rca-report` + *last release*: Aug 04, 2025, + *status*: N/A, + *requires*: N/A + + Interactive RCA report generator for pytest runs, with AI-based analysis and visual dashboard + :pypi:`pytest-readme` - *last release*: Sep 02, 2022, + *last release*: Aug 01, 2025, *status*: 5 - Production/Stable, - *requires*: N/A + *requires*: pytest Test your README.md file :pypi:`pytest-reana` - *last release*: Sep 04, 2024, + *last release*: Oct 10, 2025, *status*: 3 - Alpha, *requires*: N/A Pytest fixtures for REANA. + :pypi:`pytest-recap` + *last release*: Jun 16, 2025, + *status*: N/A, + *requires*: pytest>=6.2.0 + + Capture your test sessions. Recap the results. + :pypi:`pytest-recorder` - *last release*: Mar 31, 2025, + *last release*: Oct 28, 2025, *status*: N/A, - *requires*: N/A + *requires*: pytest>=8.4.1 Pytest plugin, meant to facilitate unit tests writing for tools consumming Web APIs. @@ -9885,14 +10556,14 @@ This list contains 1641 plugins. Management of Pytest dependencies via regex patterns :pypi:`pytest-regressions` - *last release*: May 30, 2025, + *last release*: Sep 05, 2025, *status*: 5 - Production/Stable, *requires*: pytest>=6.2.0 Easy to use fixtures to write regression tests. :pypi:`pytest-regtest` - *last release*: Nov 12, 2024, + *last release*: Oct 11, 2025, *status*: N/A, *requires*: pytest>7.2 @@ -9997,7 +10668,7 @@ This list contains 1641 plugins. Generate Pytest reports with templates :pypi:`pytest-reporter-html1` - *last release*: May 06, 2025, + *last release*: Oct 10, 2025, *status*: 4 - Beta, *requires*: N/A @@ -10011,16 +10682,16 @@ This list contains 1641 plugins. A basic HTML report for pytest using Jinja2 template engine. :pypi:`pytest-reporter-plus` - *last release*: May 31, 2025, + *last release*: Jul 16, 2025, *status*: N/A, *requires*: N/A Lightweight enhanced HTML reporter for Pytest :pypi:`pytest-report-extras` - *last release*: Apr 04, 2025, + *last release*: Aug 08, 2025, *status*: N/A, - *requires*: pytest>=8.0.0 + *requires*: pytest>=8.4.0 Pytest plugin to enhance pytest-html and allure reports by adding comments, screenshots, webpage sources and attachments. @@ -10060,7 +10731,7 @@ This list contains 1641 plugins. pytest plugin for adding tests' parameters to junit report :pypi:`pytest-reportportal` - *last release*: Feb 28, 2025, + *last release*: Jul 08, 2025, *status*: N/A, *requires*: pytest>=4.6.10 @@ -10081,12 +10752,19 @@ This list contains 1641 plugins. Pytest Repo Structure :pypi:`pytest-req` - *last release*: Aug 31, 2024, + *last release*: Sep 08, 2025, *status*: 5 - Production/Stable, - *requires*: pytest<9.0.0,>=8.3.2 + *requires*: pytest>=8.4.2 pytest requests plugin + :pypi:`pytest-reqcov` + *last release*: Jul 04, 2025, + *status*: 3 - Alpha, + *requires*: pytest>=6.0 + + A pytest plugin for requirement coverage tracking + :pypi:`pytest-reqs` *last release*: May 12, 2019, *status*: N/A, @@ -10129,6 +10807,13 @@ This list contains 1641 plugins. A pytest plugin to elegantly skip tests with optional requirements + :pypi:`pytest-reqyaml` + *last release*: Aug 16, 2025, + *status*: N/A, + *requires*: pytest>=8.4.1 + + This is a plugin where generate requests test cases from yaml. + :pypi:`pytest-reraise` *last release*: Sep 20, 2022, *status*: 5 - Production/Stable, @@ -10144,9 +10829,9 @@ This list contains 1641 plugins. Re-run only changed files in specified branch :pypi:`pytest-rerun-all` - *last release*: Nov 16, 2023, + *last release*: Jul 30, 2025, *status*: 3 - Alpha, - *requires*: pytest (>=7.0.0) + *requires*: pytest>=7.0.0 Rerun testsuite for a certain time or iterations @@ -10158,7 +10843,7 @@ This list contains 1641 plugins. pytest rerun class failures plugin :pypi:`pytest-rerunfailures` - *last release*: May 08, 2025, + *last release*: Oct 10, 2025, *status*: 5 - Production/Stable, *requires*: pytest!=8.2.2,>=7.4 @@ -10179,7 +10864,7 @@ This list contains 1641 plugins. Pytest fixture for recording and replaying serial port traffic. :pypi:`pytest-resilient-circuits` - *last release*: Feb 28, 2025, + *last release*: Jul 29, 2025, *status*: N/A, *requires*: pytest~=7.0 @@ -10193,7 +10878,7 @@ This list contains 1641 plugins. Load resource fixture plugin to use with pytest :pypi:`pytest-resource-path` - *last release*: May 15, 2025, + *last release*: Sep 18, 2025, *status*: 5 - Production/Stable, *requires*: pytest>=3.5.0 @@ -10206,6 +10891,13 @@ This list contains 1641 plugins. Pytest plugin for reporting running time and peak memory usage + :pypi:`pytest-respect` + *last release*: Oct 21, 2025, + *status*: 5 - Production/Stable, + *requires*: pytest>=8.0.0 + + Pytest plugin to load resource files relative to test code and to expect values to match them. + :pypi:`pytest-responsemock` *last release*: Mar 10, 2022, *status*: 5 - Production/Stable, @@ -10228,7 +10920,7 @@ This list contains 1641 plugins. :pypi:`pytest-restrict` - *last release*: Oct 24, 2024, + *last release*: Sep 09, 2025, *status*: 5 - Production/Stable, *requires*: pytest @@ -10249,7 +10941,7 @@ This list contains 1641 plugins. Default template for PDM package :pypi:`pytest-results` - *last release*: May 06, 2025, + *last release*: Oct 08, 2025, *status*: 4 - Beta, *requires*: pytest @@ -10326,14 +11018,14 @@ This list contains 1641 plugins. :pypi:`pytest-revealtype-injector` - *last release*: Mar 18, 2025, + *last release*: Oct 23, 2025, *status*: 4 - Beta, *requires*: pytest<9,>=7.0 Pytest plugin for replacing reveal_type() calls inside test functions with static and runtime type checking result comparison, for confirming type annotation validity. :pypi:`pytest-reverse` - *last release*: Oct 25, 2024, + *last release*: Sep 09, 2025, *status*: 5 - Production/Stable, *requires*: pytest @@ -10381,6 +11073,13 @@ This list contains 1641 plugins. Sycronise pytest results to Jira RMsis + :pypi:`pytest-rmysql` + *last release*: Aug 17, 2025, + *status*: N/A, + *requires*: pytest>=8.4.1 + + This is a plugin which is able to connet MySQL easyly. + :pypi:`pytest-rng` *last release*: Aug 08, 2019, *status*: 5 - Production/Stable, @@ -10395,8 +11094,8 @@ This list contains 1641 plugins. pytest plugin for ROAST configuration override and fixtures - :pypi:`pytest_robotframework` - *last release*: Apr 13, 2025, + :pypi:`pytest-robotframework` + *last release*: Oct 06, 2025, *status*: N/A, *requires*: pytest<9,>=7 @@ -10445,7 +11144,7 @@ This list contains 1641 plugins. Coverage-based regression test selection (RTS) plugin for pytest :pypi:`pytest-ruff` - *last release*: Jul 21, 2024, + *last release*: Jun 19, 2025, *status*: 4 - Beta, *requires*: pytest>=5 @@ -10466,7 +11165,7 @@ This list contains 1641 plugins. implement a --failed option for pytest :pypi:`pytest-run-parallel` - *last release*: May 27, 2025, + *last release*: Oct 23, 2025, *status*: 4 - Beta, *requires*: pytest>=6.2.0 @@ -10487,8 +11186,8 @@ This list contains 1641 plugins. Checks type annotations on runtime while running tests. :pypi:`pytest-runtime-xfail` - *last release*: Aug 26, 2021, - *status*: N/A, + *last release*: Oct 10, 2025, + *status*: 5 - Production/Stable, *requires*: pytest>=5.0.0 Call runtime_xfail() to mark running test as xfail. @@ -10522,7 +11221,7 @@ This list contains 1641 plugins. A Pytest plugin that builds and creates docker containers :pypi:`pytest-salt-factories` - *last release*: Oct 22, 2024, + *last release*: Jul 08, 2025, *status*: 5 - Production/Stable, *requires*: pytest>=7.4.0 @@ -10585,7 +11284,7 @@ This list contains 1641 plugins. pytest_sauce provides sane and helpful methods worked out in clearcode to run py.test tests with selenium/saucelabs :pypi:`pytest-sbase` - *last release*: May 27, 2025, + *last release*: Nov 01, 2025, *status*: 5 - Production/Stable, *requires*: N/A @@ -10599,12 +11298,19 @@ This list contains 1641 plugins. pytest plugin for test scenarios :pypi:`pytest-scenario-files` - *last release*: May 21, 2025, + *last release*: Sep 03, 2025, *status*: 5 - Production/Stable, - *requires*: pytest>=7.0 + *requires*: pytest<9,>=7.4 A pytest plugin that generates unit test scenarios from data files. + :pypi:`pytest-scenarios` + *last release*: Oct 29, 2025, + *status*: N/A, + *requires*: N/A + + Add your description here + :pypi:`pytest-schedule` *last release*: Oct 31, 2024, *status*: N/A, @@ -10669,7 +11375,7 @@ This list contains 1641 plugins. pytest plugin to automatically capture screenshots upon selenium webdriver events :pypi:`pytest-seleniumbase` - *last release*: May 27, 2025, + *last release*: Nov 01, 2025, *status*: 5 - Production/Stable, *requires*: N/A @@ -10704,7 +11410,7 @@ This list contains 1641 plugins. Send pytest execution result email :pypi:`pytest-sentry` - *last release*: May 23, 2025, + *last release*: Jul 01, 2025, *status*: N/A, *requires*: pytest @@ -10739,7 +11445,7 @@ This list contains 1641 plugins. Automatically mocks resources from serverless.yml in pytest using moto. :pypi:`pytest-servers` - *last release*: Mar 12, 2025, + *last release*: Aug 04, 2025, *status*: 3 - Alpha, *requires*: pytest>=6.2 @@ -10753,9 +11459,9 @@ This list contains 1641 plugins. :pypi:`pytest-services` - *last release*: Oct 30, 2020, + *last release*: Jul 16, 2025, *status*: 6 - Mature, - *requires*: N/A + *requires*: pytest Services plugin for pytest testing framework @@ -10802,14 +11508,14 @@ This list contains 1641 plugins. :pypi:`pytest-shard-fork` - *last release*: May 17, 2025, + *last release*: Jun 13, 2025, *status*: 4 - Beta, *requires*: pytest Shard tests to support parallelism across multiple machines :pypi:`pytest-shared-session-scope` - *last release*: Sep 22, 2024, + *last release*: Oct 31, 2025, *status*: N/A, *requires*: pytest>=7.0.0 @@ -10871,6 +11577,13 @@ This list contains 1641 plugins. A goodie-bag of unix shell and environment tools for py.test + :pypi:`pytest-sigil` + *last release*: Oct 21, 2025, + *status*: N/A, + *requires*: pytest<9.0.0,>=7.0.0 + + Proper fixture resource cleanup by handling signals + :pypi:`pytest-simbind` *last release*: Mar 28, 2024, *status*: N/A, @@ -10907,7 +11620,7 @@ This list contains 1641 plugins. Allow for multiple processes to log to a single file :pypi:`pytest-skip` - *last release*: Apr 04, 2025, + *last release*: Sep 12, 2025, *status*: 3 - Alpha, *requires*: pytest @@ -10997,6 +11710,20 @@ This list contains 1641 plugins. Smart coverage plugin for pytest. + :pypi:`pytest-smart-debugger-backend` + *last release*: Sep 17, 2025, + *status*: N/A, + *requires*: N/A + + Backend server for Pytest Smart Debugger + + :pypi:`pytest-smart-rerun` + *last release*: Oct 12, 2025, + *status*: 3 - Alpha, + *requires*: N/A + + A Pytest plugin for intelligent retrying of flaky tests. + :pypi:`pytest-smell` *last release*: Jun 26, 2022, *status*: N/A, @@ -11005,7 +11732,7 @@ This list contains 1641 plugins. Automated bad smell detection tool for Pytest :pypi:`pytest-smoke` - *last release*: May 23, 2025, + *last release*: Oct 08, 2025, *status*: 4 - Beta, *requires*: pytest<9,>=7.0.0 @@ -11046,6 +11773,20 @@ This list contains 1641 plugins. Plugin for adding a marker to slow running tests. 🐌 + :pypi:`pytest-snap` + *last release*: Aug 25, 2025, + *status*: N/A, + *requires*: pytest>=8.0.0 + + A text-based snapshot testing library implemented as a pytest plugin + + :pypi:`pytest-snapcheck` + *last release*: Sep 07, 2025, + *status*: N/A, + *requires*: pytest>=8.0 + + Minimal deterministic test-run snapshot capture for pytest. + :pypi:`pytest-snapci` *last release*: Nov 12, 2015, *status*: N/A, @@ -11173,7 +11914,7 @@ This list contains 1641 plugins. py.test plugin to spawn process and communicate with them. :pypi:`pytest-spec` - *last release*: Aug 04, 2024, + *last release*: Oct 08, 2025, *status*: N/A, *requires*: pytest; extra == "test" @@ -11257,14 +11998,14 @@ This list contains 1641 plugins. :pypi:`pytest-splunk-addon` - *last release*: May 14, 2025, + *last release*: Aug 19, 2025, *status*: N/A, *requires*: pytest<8,>5.4.0 A Dynamic test tool for Splunk Apps and Add-ons :pypi:`pytest-splunk-addon-ui-smartx` - *last release*: Mar 19, 2025, + *last release*: Aug 28, 2025, *status*: N/A, *requires*: N/A @@ -11320,7 +12061,7 @@ This list contains 1641 plugins. A pytest plugin to use sqlfluff to enable format checking of sql files. :pypi:`pytest-sqlguard` - *last release*: Mar 11, 2025, + *last release*: Jun 06, 2025, *status*: 4 - Beta, *requires*: pytest>=7 @@ -11382,6 +12123,20 @@ This list contains 1641 plugins. Add status mark for tests + :pypi:`pytest-stderr-db` + *last release*: Sep 14, 2025, + *status*: N/A, + *requires*: N/A + + Add your description here + + :pypi:`pytest-stdout-db` + *last release*: Sep 14, 2025, + *status*: N/A, + *requires*: N/A + + Add your description here + :pypi:`pytest-stepfunctions` *last release*: May 08, 2021, *status*: 4 - Beta, @@ -11396,6 +12151,13 @@ This list contains 1641 plugins. Create step-wise / incremental tests in pytest. + :pypi:`pytest-stepthrough` + *last release*: Aug 14, 2025, + *status*: N/A, + *requires*: N/A + + Pause and wait for Enter after each test with --step + :pypi:`pytest-stepwise` *last release*: Dec 01, 2015, *status*: 4 - Beta, @@ -11404,7 +12166,7 @@ This list contains 1641 plugins. Run a test suite one failing test at a time. :pypi:`pytest-stf` - *last release*: Sep 24, 2024, + *last release*: Sep 23, 2025, *status*: N/A, *requires*: pytest>=5.0 @@ -11424,8 +12186,15 @@ This list contains 1641 plugins. A plugin to pytest stoq + :pypi:`pytest-storage` + *last release*: Sep 12, 2025, + *status*: 3 - Alpha, + *requires*: pytest>=8.4.2 + + Pytest plugin to store test artifacts + :pypi:`pytest-store` - *last release*: Sep 04, 2024, + *last release*: Jul 30, 2025, *status*: 3 - Alpha, *requires*: pytest>=7.0.0 @@ -11446,7 +12215,7 @@ This list contains 1641 plugins. A Pytest plugin that allows you to loop tests for a user defined amount of time. :pypi:`pytest-structlog` - *last release*: Jul 25, 2024, + *last release*: Sep 10, 2025, *status*: N/A, *requires*: pytest @@ -11487,6 +12256,13 @@ This list contains 1641 plugins. Run pytest in a subinterpreter + :pypi:`pytest-subket` + *last release*: Jul 31, 2025, + *status*: 4 - Beta, + *requires*: N/A + + Pytest Plugin to disable socket calls during tests + :pypi:`pytest-subprocess` *last release*: Jan 04, 2025, *status*: 5 - Production/Stable, @@ -11502,7 +12278,7 @@ This list contains 1641 plugins. A hack to explicitly set up and tear down fixtures. :pypi:`pytest-subtests` - *last release*: Dec 10, 2024, + *last release*: Oct 20, 2025, *status*: 4 - Beta, *requires*: pytest>=7.4 @@ -11516,9 +12292,9 @@ This list contains 1641 plugins. pytest-subunit is a plugin for py.test which outputs testsresult in subunit format. :pypi:`pytest-sugar` - *last release*: Feb 01, 2024, + *last release*: Aug 23, 2025, *status*: 4 - Beta, - *requires*: pytest >=6.2.0 + *requires*: pytest>=6.2.0 pytest-sugar is a plugin for pytest that changes the default look and feel of pytest (e.g. progressbar, show tests that fail instantly). @@ -11704,6 +12480,13 @@ This list contains 1641 plugins. generate terraform resources to use with pytest + :pypi:`pytest-test-analyzer` + *last release*: Jun 14, 2025, + *status*: 4 - Beta, + *requires*: N/A + + A powerful tool for analyzing pytest test files and generating detailed reports + :pypi:`pytest-testbook` *last release*: Dec 11, 2016, *status*: 3 - Alpha, @@ -12082,6 +12865,13 @@ This list contains 1641 plugins. A simple plugin to view timestamps for each test + :pypi:`pytest-timing-plugin` + *last release*: Jul 21, 2025, + *status*: N/A, + *requires*: N/A + + pytest插件开发demo + :pypi:`pytest-tiny-api-client` *last release*: Jan 04, 2024, *status*: 5 - Production/Stable, @@ -12153,7 +12943,7 @@ This list contains 1641 plugins. this is a vue-element ui report for pytest :pypi:`pytest-tmux` - *last release*: Apr 22, 2023, + *last release*: Sep 01, 2025, *status*: 4 - Beta, *requires*: N/A @@ -12272,9 +13062,9 @@ This list contains 1641 plugins. Plugin for py.test that integrates trello using markers :pypi:`pytest-trepan` - *last release*: Jul 28, 2018, + *last release*: Sep 11, 2025, *status*: 5 - Production/Stable, - *requires*: N/A + *requires*: pytest>=4.0.0 Pytest plugin for trepan debugger. @@ -12327,6 +13117,13 @@ This list contains 1641 plugins. Text User Interface (TUI) and HTML report for Pytest test runs + :pypi:`pytest-tui-runner` + *last release*: Oct 23, 2025, + *status*: N/A, + *requires*: pytest>=8.3.5 + + Textual-based terminal UI for running pytest tests + :pypi:`pytest-tuitest` *last release*: Apr 11, 2025, *status*: N/A, @@ -12355,6 +13152,13 @@ This list contains 1641 plugins. A twisted plugin for pytest. + :pypi:`pytest-ty` + *last release*: Oct 10, 2025, + *status*: 3 - Alpha, + *requires*: pytest>=7.0.0 + + A pytest plugin to run the ty type checker + :pypi:`pytest-typechecker` *last release*: Feb 04, 2022, *status*: N/A, @@ -12363,7 +13167,7 @@ This list contains 1641 plugins. Run type checkers on specified test files :pypi:`pytest-typed-schema-shot` - *last release*: May 24, 2025, + *last release*: Jun 14, 2025, *status*: N/A, *requires*: pytest @@ -12404,6 +13208,13 @@ This list contains 1641 plugins. Typhoon HIL plugin for pytest + :pypi:`pytest-tzshift` + *last release*: Jun 25, 2025, + *status*: 4 - Beta, + *requires*: pytest>=7.0 + + A Pytest plugin that transparently re-runs tests under a matrix of timezones and locales. + :pypi:`pytest-ubersmith` *last release*: Apr 13, 2015, *status*: N/A, @@ -12454,9 +13265,9 @@ This list contains 1641 plugins. Plugin for py.test set a different exit code on uncaught exceptions :pypi:`pytest-unique` - *last release*: Mar 23, 2025, + *last release*: Jun 10, 2025, *status*: N/A, - *requires*: pytest<8.0.0,>=7.4.2 + *requires*: pytest<9.0.0,>=8.0.0 Pytest fixture to generate unique values. @@ -12475,7 +13286,7 @@ This list contains 1641 plugins. A pytest plugin to run tests using unittest-style test IDs :pypi:`pytest-unmagic` - *last release*: Oct 22, 2024, + *last release*: Jul 14, 2025, *status*: 5 - Production/Stable, *requires*: pytest @@ -12489,7 +13300,7 @@ This list contains 1641 plugins. Run only unmarked tests :pypi:`pytest-unordered` - *last release*: Jul 05, 2024, + *last release*: Jun 03, 2025, *status*: 4 - Beta, *requires*: pytest>=7.0.0 @@ -12509,6 +13320,13 @@ This list contains 1641 plugins. A pytest plugin to list unused fixtures after a test run. + :pypi:`pytest-unused-port` + *last release*: Oct 22, 2025, + *status*: N/A, + *requires*: pytest + + pytest fixture finding an unused local port + :pypi:`pytest-upload-report` *last release*: Jun 18, 2021, *status*: 5 - Production/Stable, @@ -12593,6 +13411,13 @@ This list contains 1641 plugins. More descriptive output for parametrized py.test tests + :pypi:`pytest-verify` + *last release*: Oct 25, 2025, + *status*: 5 - Production/Stable, + *requires*: N/A + + A pytest plugin for snapshot verification with optional visual diff viewer. + :pypi:`pytest-vimqf` *last release*: Feb 08, 2021, *status*: 4 - Beta, @@ -12747,13 +13572,6 @@ This list contains 1641 plugins. Test web apps with pytest - :pypi:`pytest-webtest-extras` - *last release*: Dec 28, 2024, - *status*: N/A, - *requires*: pytest>=7.0.0 - - Pytest plugin to enhance pytest-html and allure reports of webtest projects by adding screenshots, comments and webpage sources. - :pypi:`pytest-wetest` *last release*: Nov 10, 2018, *status*: 4 - Beta, @@ -12762,7 +13580,7 @@ This list contains 1641 plugins. Welian API Automation test framework pytest plugin :pypi:`pytest-when` - *last release*: Nov 29, 2024, + *last release*: Sep 25, 2025, *status*: N/A, *requires*: pytest>=7.3.1 @@ -12832,7 +13650,7 @@ This list contains 1641 plugins. A pytest plugin for configuring workflow/pipeline tests using YAML files :pypi:`pytest-xdist` - *last release*: May 26, 2025, + *last release*: Jul 01, 2025, *status*: 5 - Production/Stable, *requires*: pytest>=7.0.0 @@ -12852,12 +13670,12 @@ This list contains 1641 plugins. forked from pytest-xdist - :pypi:`pytest-xdist-lock` - *last release*: Apr 26, 2025, + :pypi:`pytest-xdist-gnumake` + *last release*: Jun 22, 2025, *status*: N/A, - *requires*: pytest>=6.0 + *requires*: pytest - Extension for pytest-xdist adding test and resource group locks for local and distributed runs + A small example package :pypi:`pytest-xdist-tracker` *last release*: Nov 18, 2021, @@ -12874,9 +13692,9 @@ This list contains 1641 plugins. A pytest plugin to list worker statistics after a xdist run. :pypi:`pytest-xdocker` - *last release*: Mar 23, 2025, + *last release*: Jun 10, 2025, *status*: N/A, - *requires*: pytest<8.0.0,>=7.4.2 + *requires*: pytest<9.0.0,>=8.0.0 Pytest fixture to run docker across test runs. @@ -12901,6 +13719,13 @@ This list contains 1641 plugins. A simple plugin to use with pytest + :pypi:`pytest-xhtml` + *last release*: Oct 18, 2025, + *status*: 5 - Production/Stable, + *requires*: pytest>=7 + + pytest plugin for generating HTML reports + :pypi:`pytest-xiuyu` *last release*: Jul 25, 2023, *status*: 5 - Production/Stable, @@ -12978,6 +13803,13 @@ This list contains 1641 plugins. + :pypi:`pytest-xtime` + *last release*: Jun 05, 2025, + *status*: 4 - Beta, + *requires*: pytest + + pytest plugin for recording execution time + :pypi:`pytest-xvfb` *last release*: Mar 12, 2025, *status*: 4 - Beta, @@ -13000,14 +13832,14 @@ This list contains 1641 plugins. This plugin is used to load yaml output to your test using pytest framework. :pypi:`pytest-yaml-fei` - *last release*: Feb 09, 2025, + *last release*: Aug 03, 2025, *status*: N/A, *requires*: pytest a pytest yaml allure package :pypi:`pytest-yaml-sanmu` - *last release*: Jan 03, 2025, + *last release*: Sep 16, 2025, *status*: N/A, *requires*: pytest>=8.2.2 @@ -13112,7 +13944,7 @@ This list contains 1641 plugins. Pytest fixtures for testing Camunda 8 processes using a Zeebe test engine. :pypi:`pytest-zephyr-scale-integration` - *last release*: May 15, 2025, + *last release*: Jun 26, 2025, *status*: N/A, *requires*: pytest diff --git a/doc/en/reference/reference.rst b/doc/en/reference/reference.rst index d3dd14a8681..3760add53cf 100644 --- a/doc/en/reference/reference.rst +++ b/doc/en/reference/reference.rst @@ -18,7 +18,7 @@ The current pytest version, as a string:: >>> import pytest >>> pytest.__version__ - '7.0.0' + '9.0.2' .. _`hidden-param`: @@ -261,7 +261,7 @@ pytest.mark.xfail Marks a test function as *expected to fail*. -.. py:function:: pytest.mark.xfail(condition=False, *, reason=None, raises=None, run=True, strict=xfail_strict) +.. py:function:: pytest.mark.xfail(condition=False, *, reason=None, raises=None, run=True, strict=strict_xfail) :keyword Union[bool, str] condition: Condition for marking the test function as xfail (``True/False`` or a @@ -286,7 +286,7 @@ Marks a test function as *expected to fail*. that are always failing and there should be a clear indication if they unexpectedly start to pass (for example a new release of a library fixes a known bug). - Defaults to :confval:`xfail_strict`, which is ``False`` by default. + Defaults to :confval:`strict_xfail`, which is ``False`` by default. Custom marks @@ -572,6 +572,19 @@ The ``request`` fixture is a special fixture providing information of the reques :members: +.. fixture:: subtests + +subtests +~~~~~~~~ + +The ``subtests`` fixture enables declaring subtests inside test functions. + +**Tutorial**: :ref:`subtests` + +.. autoclass:: pytest.Subtests() + :members: + + .. fixture:: testdir testdir @@ -744,6 +757,7 @@ items, delete or otherwise amend the test items: If this hook is implemented in ``conftest.py`` files, it always receives all collected items, not only those under the ``conftest.py`` where it is implemented. +.. hook:: pytest_collection_finish .. autofunction:: pytest_collection_finish Test running (runtest) hooks @@ -1165,77 +1179,77 @@ Environment variables that can be used to change pytest's behavior. .. envvar:: CI -When set (regardless of value), pytest acknowledges that is running in a CI process. Alternative to ``BUILD_NUMBER`` variable. See also :ref:`ci-pipelines`. + When set to a non-empty value, pytest acknowledges that it is running in a CI process. See also :ref:`ci-pipelines`. .. envvar:: BUILD_NUMBER -When set (regardless of value), pytest acknowledges that is running in a CI process. Alternative to CI variable. See also :ref:`ci-pipelines`. + When set to a non-empty value, pytest acknowledges that it is running in a CI process. Alternative to :envvar:`CI`. See also :ref:`ci-pipelines`. .. envvar:: PYTEST_ADDOPTS -This contains a command-line (parsed by the py:mod:`shlex` module) that will be **prepended** to the command line given -by the user, see :ref:`adding default options` for more information. + This contains a command-line (parsed by the py:mod:`shlex` module) that will be **prepended** to the command line given + by the user, see :ref:`adding default options` for more information. .. envvar:: PYTEST_VERSION -This environment variable is defined at the start of the pytest session and is undefined afterwards. -It contains the value of ``pytest.__version__``, and among other things can be used to easily check if a code is running from within a pytest run. + This environment variable is defined at the start of the pytest session and is undefined afterwards. + It contains the value of ``pytest.__version__``, and among other things can be used to easily check if a code is running from within a pytest run. .. envvar:: PYTEST_CURRENT_TEST -This is not meant to be set by users, but is set by pytest internally with the name of the current test so other -processes can inspect it, see :ref:`pytest current test env` for more information. + This is not meant to be set by users, but is set by pytest internally with the name of the current test so other + processes can inspect it, see :ref:`pytest current test env` for more information. .. envvar:: PYTEST_DEBUG -When set, pytest will print tracing and debug information. + When set, pytest will print tracing and debug information. .. envvar:: PYTEST_DEBUG_TEMPROOT -Root for temporary directories produced by fixtures like :fixture:`tmp_path` -as discussed in :ref:`temporary directory location and retention`. + Root for temporary directories produced by fixtures like :fixture:`tmp_path` + as discussed in :ref:`temporary directory location and retention`. .. envvar:: PYTEST_DISABLE_PLUGIN_AUTOLOAD -When set, disables plugin auto-loading through :std:doc:`entry point packaging -metadata `. Only plugins -explicitly specified in :envvar:`PYTEST_PLUGINS` or with ``-p`` will be loaded. -See also :ref:`--disable-plugin-autoload `. + When set, disables plugin auto-loading through :std:doc:`entry point packaging + metadata `. Only plugins + explicitly specified in :envvar:`PYTEST_PLUGINS` or with :option:`-p` will be loaded. + See also :ref:`--disable-plugin-autoload `. .. envvar:: PYTEST_PLUGINS -Contains comma-separated list of modules that should be loaded as plugins: + Contains comma-separated list of modules that should be loaded as plugins: -.. code-block:: bash + .. code-block:: bash - export PYTEST_PLUGINS=mymodule.plugin,xdist + export PYTEST_PLUGINS=mymodule.plugin,xdist -See also ``-p``. + See also :option:`-p`. .. envvar:: PYTEST_THEME -Sets a `pygment style `_ to use for the code output. + Sets a `pygment style `_ to use for the code output. .. envvar:: PYTEST_THEME_MODE -Sets the :envvar:`PYTEST_THEME` to be either *dark* or *light*. + Sets the :envvar:`PYTEST_THEME` to be either *dark* or *light*. .. envvar:: PY_COLORS -When set to ``1``, pytest will use color in terminal output. -When set to ``0``, pytest will not use color. -``PY_COLORS`` takes precedence over ``NO_COLOR`` and ``FORCE_COLOR``. + When set to ``1``, pytest will use color in terminal output. + When set to ``0``, pytest will not use color. + ``PY_COLORS`` takes precedence over ``NO_COLOR`` and ``FORCE_COLOR``. .. envvar:: NO_COLOR -When set to a non-empty string (regardless of value), pytest will not use color in terminal output. -``PY_COLORS`` takes precedence over ``NO_COLOR``, which takes precedence over ``FORCE_COLOR``. -See `no-color.org `__ for other libraries supporting this community standard. + When set to a non-empty string (regardless of value), pytest will not use color in terminal output. + ``PY_COLORS`` takes precedence over ``NO_COLOR``, which takes precedence over ``FORCE_COLOR``. + See `no-color.org `__ for other libraries supporting this community standard. .. envvar:: FORCE_COLOR -When set to a non-empty string (regardless of value), pytest will use color in terminal output. -``PY_COLORS`` and ``NO_COLOR`` take precedence over ``FORCE_COLOR``. + When set to a non-empty string (regardless of value), pytest will use color in terminal output. + ``PY_COLORS`` and ``NO_COLOR`` take precedence over ``FORCE_COLOR``. Exceptions ---------- @@ -1274,6 +1288,9 @@ Custom warnings generated in some situations such as improper usage or deprecate .. autoclass:: pytest.PytestExperimentalApiWarning :show-inheritance: +.. autoclass:: pytest.PytestReturnNotNoneWarning + :show-inheritance: + .. autoclass:: pytest.PytestRemovedIn9Warning :show-inheritance: @@ -1298,13 +1315,13 @@ Configuration Options Here is a list of builtin configuration options that may be written in a ``pytest.ini`` (or ``.pytest.ini``), ``pyproject.toml``, ``tox.ini``, or ``setup.cfg`` file, usually located at the root of your repository. -To see each file format in details, see :ref:`config file formats`. +To see each file format in detail, see :ref:`config file formats`. .. warning:: Usage of ``setup.cfg`` is not recommended except for very simple use cases. ``.cfg`` files use a different parser than ``pytest.ini`` and ``tox.ini`` which might cause hard to track down problems. - When possible, it is recommended to use the latter files, or ``pyproject.toml``, to hold your pytest configuration. + When possible, it is recommended to use the latter files, or ``pytest.toml`` or ``pyproject.toml``, to hold your pytest configuration. Configuration options may be overwritten in the command-line by using ``-o/--override-ini``, which can also be passed multiple times. The expected format is ``name=value``. For example:: @@ -1315,13 +1332,13 @@ passed multiple times. The expected format is ``name=value``. For example:: .. confval:: addopts Add the specified ``OPTS`` to the set of command line arguments as if they - had been specified by the user. Example: if you have this ini file content: + had been specified by the user. Example: if you have this configuration file content: - .. code-block:: ini + .. code-block:: toml - # content of pytest.ini + # content of pytest.toml [pytest] - addopts = --maxfail=2 -rf # exit after 2 failures, report fail info + addopts = ["--maxfail=2", "-rf"] # exit after 2 failures, report fail info issuing ``pytest test_hello.py`` actually means: @@ -1348,10 +1365,19 @@ passed multiple times. The expected format is ``name=value``. For example:: Setting this to ``false`` will make pytest collect classes/functions from test files **only** if they are defined in that file (as opposed to imported there). - .. code-block:: ini + .. tab:: toml - [pytest] - collect_imported_tests = false + .. code-block:: toml + + [pytest] + collect_imported_tests = false + + .. tab:: ini + + .. code-block:: ini + + [pytest] + collect_imported_tests = false Default: ``true`` @@ -1381,6 +1407,7 @@ passed multiple times. The expected format is ``name=value``. For example:: when collecting Python modules. Default is ``False``. Set to ``True`` if the package you are testing is part of a namespace package. + Namespace packages are also supported as :option:`--pyargs` target. Only `native namespace packages `__ are supported, with no plans to support `legacy namespace packages `__. @@ -1400,11 +1427,19 @@ passed multiple times. The expected format is ``name=value``. For example:: The default is ``progress``, but you can fallback to ``classic`` if you prefer or the new mode is causing unexpected problems: - .. code-block:: ini + .. tab:: toml - # content of pytest.ini - [pytest] - console_output_style = classic + .. code-block:: toml + + [pytest] + console_output_style = "classic" + + .. tab:: ini + + .. code-block:: ini + + [pytest] + console_output_style = classic .. confval:: disable_test_id_escaping_and_forfeit_all_rights_to_community_support @@ -1415,12 +1450,21 @@ passed multiple times. The expected format is ``name=value``. For example:: for the parametrization because it has several downsides. If however you would like to use unicode strings in parametrization and see them in the terminal as is (non-escaped), use this option - in your ``pytest.ini``: + in your configuration file: + + .. tab:: toml + + .. code-block:: toml + + [pytest] + disable_test_id_escaping_and_forfeit_all_rights_to_community_support = true - .. code-block:: ini + .. tab:: ini - [pytest] - disable_test_id_escaping_and_forfeit_all_rights_to_community_support = True + .. code-block:: ini + + [pytest] + disable_test_id_escaping_and_forfeit_all_rights_to_community_support = true Keep in mind however that this might cause unwanted side effects and even bugs depending on the OS used and plugins currently installed, @@ -1454,11 +1498,19 @@ passed multiple times. The expected format is ``name=value``. For example:: * ``xfail`` marks tests with an empty parameterset as xfail(run=False) * ``fail_at_collect`` raises an exception if parametrize collects an empty parameter set - .. code-block:: ini + .. tab:: toml + + .. code-block:: toml + + [pytest] + empty_parameter_set_mark = "xfail" + + .. tab:: ini - # content of pytest.ini - [pytest] - empty_parameter_set_mark = xfail + .. code-block:: ini + + [pytest] + empty_parameter_set_mark = xfail .. note:: @@ -1466,17 +1518,72 @@ passed multiple times. The expected format is ``name=value``. For example:: as this is considered less error prone, see :issue:`3155` for more details. +.. confval:: enable_assertion_pass_hook + + Enables the :hook:`pytest_assertion_pass` hook. + Make sure to delete any previously generated ``.pyc`` cache files. + + .. tab:: toml + + .. code-block:: toml + + [pytest] + enable_assertion_pass_hook = true + + .. tab:: ini + + .. code-block:: ini + + [pytest] + enable_assertion_pass_hook = true + + +.. confval:: faulthandler_exit_on_timeout + + Exit the pytest process after the per-test timeout is reached by passing + `exit=True` to the :func:`faulthandler.dump_traceback_later` function. This + is particularly useful to avoid wasting CI resources for test suites that + are prone to putting the main Python interpreter into a deadlock state. + + This option is set to 'false' by default. + + .. tab:: toml + + .. code-block:: toml + + [pytest] + faulthandler_timeout = 5 + faulthandler_exit_on_timeout = true + + .. tab:: ini + + .. code-block:: ini + + [pytest] + faulthandler_timeout = 5 + faulthandler_exit_on_timeout = true + + + .. confval:: faulthandler_timeout Dumps the tracebacks of all threads if a test takes longer than ``X`` seconds to run (including fixture setup and teardown). Implemented using the :func:`faulthandler.dump_traceback_later` function, so all caveats there apply. - .. code-block:: ini + .. tab:: toml - # content of pytest.ini - [pytest] - faulthandler_timeout=5 + .. code-block:: toml + + [pytest] + faulthandler_timeout = 5 + + .. tab:: ini + + .. code-block:: ini + + [pytest] + faulthandler_timeout = 5 For more information please refer to :ref:`faulthandler`. @@ -1488,13 +1595,21 @@ passed multiple times. The expected format is ``name=value``. For example:: warnings. By default all warnings emitted during the test session will be displayed in a summary at the end of the test session. - .. code-block:: ini + .. tab:: toml - # content of pytest.ini - [pytest] - filterwarnings = - error - ignore::DeprecationWarning + .. code-block:: toml + + [pytest] + filterwarnings = ["error", "ignore::DeprecationWarning"] + + .. tab:: ini + + .. code-block:: ini + + [pytest] + filterwarnings = + error + ignore::DeprecationWarning This tells pytest to ignore deprecation warnings and turn all other warnings into errors. For more information please refer to :ref:`warnings`. @@ -1509,10 +1624,19 @@ passed multiple times. The expected format is ``name=value``. For example:: * ``total`` (the default): duration times reported include setup, call, and teardown times. * ``call``: duration times reported include only call times, excluding setup and teardown. - .. code-block:: ini + .. tab:: toml - [pytest] - junit_duration_report = call + .. code-block:: toml + + [pytest] + junit_duration_report = "call" + + .. tab:: ini + + .. code-block:: ini + + [pytest] + junit_duration_report = call .. confval:: junit_family @@ -1526,10 +1650,41 @@ passed multiple times. The expected format is ``name=value``. For example:: * ``xunit1`` (or ``legacy``): produces old style output, compatible with the xunit 1.0 format. * ``xunit2``: produces `xunit 2.0 style output `__, which should be more compatible with latest Jenkins versions. **This is the default**. - .. code-block:: ini + .. tab:: toml - [pytest] - junit_family = xunit2 + .. code-block:: toml + + [pytest] + junit_family = "xunit2" + + .. tab:: ini + + .. code-block:: ini + + [pytest] + junit_family = xunit2 + + +.. confval:: junit_log_passing_tests + + .. versionadded:: 4.6 + + If ``junit_logging != "no"``, configures if the captured output should be written + to the JUnit XML file for **passing** tests. Default is ``True``. + + .. tab:: toml + + .. code-block:: toml + + [pytest] + junit_log_passing_tests = false + + .. tab:: ini + + .. code-block:: ini + + [pytest] + junit_log_passing_tests = False .. confval:: junit_logging @@ -1547,39 +1702,44 @@ passed multiple times. The expected format is ``name=value``. For example:: * ``all``: write captured ``logging``, ``stdout`` and ``stderr`` contents. * ``no`` (the default): no captured output is written. - .. code-block:: ini - - [pytest] - junit_logging = system-out - + .. tab:: toml -.. confval:: junit_log_passing_tests + .. code-block:: toml - .. versionadded:: 4.6 + [pytest] + junit_logging = "system-out" - If ``junit_logging != "no"``, configures if the captured output should be written - to the JUnit XML file for **passing** tests. Default is ``True``. + .. tab:: ini - .. code-block:: ini + .. code-block:: ini - [pytest] - junit_log_passing_tests = False + [pytest] + junit_logging = system-out .. confval:: junit_suite_name To set the name of the root test suite xml item, you can configure the ``junit_suite_name`` option in your config file: - .. code-block:: ini + .. tab:: toml - [pytest] - junit_suite_name = my_suite + .. code-block:: toml + + [pytest] + junit_suite_name = "my_suite" + + .. tab:: ini + + .. code-block:: ini + + [pytest] + junit_suite_name = my_suite .. confval:: log_auto_indent Allow selective auto-indentation of multiline log messages. - Supports command line option ``--log-auto-indent [value]`` + Supports command line option :option:`--log-auto-indent=[value]` and config option ``log_auto_indent = [value]`` to set the auto-indentation behavior for all logging. @@ -1588,10 +1748,19 @@ passed multiple times. The expected format is ``name=value``. For example:: * False or "Off" or 0 - Do not auto-indent multiline log messages (the default behavior) * [positive integer] - auto-indent multiline log messages by [value] spaces - .. code-block:: ini + .. tab:: toml - [pytest] - log_auto_indent = False + .. code-block:: toml + + [pytest] + log_auto_indent = false + + .. tab:: ini + + .. code-block:: ini + + [pytest] + log_auto_indent = false Supports passing kwarg ``extra={"auto_indent": [value]}`` to calls to ``logging.log()`` to specify auto-indentation behavior for @@ -1603,10 +1772,19 @@ passed multiple times. The expected format is ``name=value``. For example:: Enable log display during test run (also known as :ref:`"live logging" `). The default is ``False``. - .. code-block:: ini + .. tab:: toml - [pytest] - log_cli = True + .. code-block:: toml + + [pytest] + log_cli = true + + .. tab:: ini + + .. code-block:: ini + + [pytest] + log_cli = true .. confval:: log_cli_date_format @@ -1614,10 +1792,19 @@ passed multiple times. The expected format is ``name=value``. For example:: Sets a :py:func:`time.strftime`-compatible string that will be used when formatting dates for live logging. - .. code-block:: ini + .. tab:: toml - [pytest] - log_cli_date_format = %Y-%m-%d %H:%M:%S + .. code-block:: toml + + [pytest] + log_cli_date_format = "%Y-%m-%d %H:%M:%S" + + .. tab:: ini + + .. code-block:: ini + + [pytest] + log_cli_date_format = %Y-%m-%d %H:%M:%S For more information, see :ref:`live_logs`. @@ -1627,10 +1814,19 @@ passed multiple times. The expected format is ``name=value``. For example:: Sets a :py:mod:`logging`-compatible string used to format live logging messages. - .. code-block:: ini + .. tab:: toml - [pytest] - log_cli_format = %(asctime)s %(levelname)s %(message)s + .. code-block:: toml + + [pytest] + log_cli_format = "%(asctime)s %(levelname)s %(message)s" + + .. tab:: ini + + .. code-block:: ini + + [pytest] + log_cli_format = %(asctime)s %(levelname)s %(message)s For more information, see :ref:`live_logs`. @@ -1640,12 +1836,24 @@ passed multiple times. The expected format is ``name=value``. For example:: Sets the minimum log message level that should be captured for live logging. The integer value or - the names of the levels can be used. + the names of the levels can be used. Note in TOML the integer must be quoted, as there is no support + for config parameters of mixed type. - .. code-block:: ini + .. tab:: toml - [pytest] - log_cli_level = INFO + .. code-block:: toml + + [pytest] + log_cli_level = "INFO" + log_cli_level = "10" + + .. tab:: ini + + .. code-block:: ini + + [pytest] + log_cli_level = INFO + log_cli_level = 10 For more information, see :ref:`live_logs`. @@ -1656,10 +1864,19 @@ passed multiple times. The expected format is ``name=value``. For example:: Sets a :py:func:`time.strftime`-compatible string that will be used when formatting dates for logging capture. - .. code-block:: ini + .. tab:: toml - [pytest] - log_date_format = %Y-%m-%d %H:%M:%S + .. code-block:: toml + + [pytest] + log_date_format = "%Y-%m-%d %H:%M:%S" + + .. tab:: ini + + .. code-block:: ini + + [pytest] + log_date_format = %Y-%m-%d %H:%M:%S For more information, see :ref:`logging`. @@ -1671,10 +1888,19 @@ passed multiple times. The expected format is ``name=value``. For example:: Sets a file name relative to the current working directory where log messages should be written to, in addition to the other logging facilities that are active. - .. code-block:: ini + .. tab:: toml - [pytest] - log_file = logs/pytest-logs.txt + .. code-block:: toml + + [pytest] + log_file = "logs/pytest-logs.txt" + + .. tab:: ini + + .. code-block:: ini + + [pytest] + log_file = logs/pytest-logs.txt For more information, see :ref:`logging`. @@ -1685,10 +1911,19 @@ passed multiple times. The expected format is ``name=value``. For example:: Sets a :py:func:`time.strftime`-compatible string that will be used when formatting dates for the logging file. - .. code-block:: ini + .. tab:: toml - [pytest] - log_file_date_format = %Y-%m-%d %H:%M:%S + .. code-block:: toml + + [pytest] + log_file_date_format = "%Y-%m-%d %H:%M:%S" + + .. tab:: ini + + .. code-block:: ini + + [pytest] + log_file_date_format = %Y-%m-%d %H:%M:%S For more information, see :ref:`logging`. @@ -1698,10 +1933,19 @@ passed multiple times. The expected format is ``name=value``. For example:: Sets a :py:mod:`logging`-compatible string used to format logging messages redirected to the logging file. - .. code-block:: ini + .. tab:: toml - [pytest] - log_file_format = %(asctime)s %(levelname)s %(message)s + .. code-block:: toml + + [pytest] + log_file_format = "%(asctime)s %(levelname)s %(message)s" + + .. tab:: ini + + .. code-block:: ini + + [pytest] + log_file_format = %(asctime)s %(levelname)s %(message)s For more information, see :ref:`logging`. @@ -1710,12 +1954,46 @@ passed multiple times. The expected format is ``name=value``. For example:: Sets the minimum log message level that should be captured for the logging file. The integer value or - the names of the levels can be used. + the names of the levels can be used. Note in TOML the integer must be quoted, as there is no support + for config parameters of mixed type. - .. code-block:: ini + .. tab:: toml - [pytest] - log_file_level = INFO + .. code-block:: toml + + [pytest] + log_file_level = "INFO" + log_cli_level = "10" + + .. tab:: ini + + .. code-block:: ini + + [pytest] + log_file_level = INFO + log_cli_level = 10 + + For more information, see :ref:`logging`. + + +.. confval:: log_file_mode + + Sets the mode that the logging file is opened with. + The options are ``"w"`` to recreate the file (the default) or ``"a"`` to append to the file. + + .. tab:: toml + + .. code-block:: toml + + [pytest] + log_file_mode = "a" + + .. tab:: ini + + .. code-block:: ini + + [pytest] + log_file_mode = a For more information, see :ref:`logging`. @@ -1726,10 +2004,19 @@ passed multiple times. The expected format is ``name=value``. For example:: Sets a :py:mod:`logging`-compatible string used to format captured logging messages. - .. code-block:: ini + .. tab:: toml - [pytest] - log_format = %(asctime)s %(levelname)s %(message)s + .. code-block:: toml + + [pytest] + log_format = "%(asctime)s %(levelname)s %(message)s" + + .. tab:: ini + + .. code-block:: ini + + [pytest] + log_format = %(asctime)s %(levelname)s %(message)s For more information, see :ref:`logging`. @@ -1739,47 +2026,73 @@ passed multiple times. The expected format is ``name=value``. For example:: Sets the minimum log message level that should be captured for logging capture. The integer value or - the names of the levels can be used. + the names of the levels can be used. Note in TOML the integer must be quoted, as there is no support + for config parameters of mixed type. - .. code-block:: ini + .. tab:: toml - [pytest] - log_level = INFO + .. code-block:: toml + + [pytest] + log_level = "INFO" + log_cli_level = "10" + + .. tab:: ini + + .. code-block:: ini + + [pytest] + log_level = INFO + log_cli_level = 10 For more information, see :ref:`logging`. .. confval:: markers - When the ``--strict-markers`` or ``--strict`` command-line arguments are used, + When the :confval:`strict_markers` configuration option is set, only known markers - defined in code by core pytest or some plugin - are allowed. You can list additional markers in this setting to add them to the whitelist, - in which case you probably want to add ``--strict-markers`` to ``addopts`` + in which case you probably want to set :confval:`strict_markers` to ``true`` to avoid future regressions: - .. code-block:: ini + .. tab:: toml - [pytest] - addopts = --strict-markers - markers = - slow - serial + .. code-block:: toml + + [pytest] + addopts = ["--strict-markers"] + markers = ["slow", "serial"] + + .. tab:: ini + + .. code-block:: ini + + [pytest] + strict_markers = true + markers = + slow + serial - .. note:: - The use of ``--strict-markers`` is highly preferred. ``--strict`` was kept for - backward compatibility only and may be confusing for others as it only applies to - markers and not to other options. .. confval:: minversion Specifies a minimal pytest version required for running tests. - .. code-block:: ini + .. tab:: toml - # content of pytest.ini - [pytest] - minversion = 3.0 # will fail if we run with pytest-2.8 + .. code-block:: toml + + [pytest] + minversion = 3.0 # will fail if we run with pytest-2.8 + + .. tab:: ini + + .. code-block:: ini + + [pytest] + minversion = 3.0 # will fail if we run with pytest-2.8 .. confval:: norecursedirs @@ -1799,10 +2112,19 @@ passed multiple times. The expected format is ``name=value``. For example:: Setting a ``norecursedirs`` replaces the default. Here is an example of how to avoid certain directories: - .. code-block:: ini + .. tab:: toml - [pytest] - norecursedirs = .svn _build tmp* + .. code-block:: toml + + [pytest] + norecursedirs = [".svn", "_build", "tmp*"] + + .. tab:: ini + + .. code-block:: ini + + [pytest] + norecursedirs = .svn _build tmp* This would tell ``pytest`` to not look into typical subversion or sphinx-build directories or into any ``tmp`` prefixed directory. @@ -1810,7 +2132,7 @@ passed multiple times. The expected format is ``name=value``. For example:: Additionally, ``pytest`` will attempt to intelligently identify and ignore a virtualenv. Any directory deemed to be the root of a virtual environment will not be considered during test collection unless - ``--collect-in-virtualenv`` is given. Note also that ``norecursedirs`` + :option:`--collect-in-virtualenv` is given. Note also that ``norecursedirs`` takes precedence over ``--collect-in-virtualenv``; e.g. if you intend to run tests in a virtualenv with a base directory that matches ``'.*'`` you *must* override ``norecursedirs`` in addition to using the @@ -1825,10 +2147,19 @@ passed multiple times. The expected format is ``name=value``. For example:: class prefixed with ``Test`` as a test collection. Here is an example of how to collect tests from classes that end in ``Suite``: - .. code-block:: ini + .. tab:: toml - [pytest] - python_classes = *Suite + .. code-block:: toml + + [pytest] + python_classes = ["*Suite"] + + .. tab:: ini + + .. code-block:: ini + + [pytest] + python_classes = *Suite Note that ``unittest.TestCase`` derived classes are always collected regardless of this option, as ``unittest``'s own collection framework is used @@ -1841,20 +2172,29 @@ passed multiple times. The expected format is ``name=value``. For example:: are considered as test modules. Search for multiple glob patterns by adding a space between patterns: - .. code-block:: ini + .. tab:: toml - [pytest] - python_files = test_*.py check_*.py example_*.py + .. code-block:: toml - Or one per line: + [pytest] + python_files = ["test_*.py", "check_*.py", "example_*.py"] - .. code-block:: ini + .. tab:: ini - [pytest] - python_files = - test_*.py - check_*.py - example_*.py + .. code-block:: ini + + [pytest] + python_files = test_*.py check_*.py example_*.py + + Or one per line: + + .. code-block:: ini + + [pytest] + python_files = + test_*.py + check_*.py + example_*.py By default, files matching ``test_*.py`` and ``*_test.py`` will be considered test modules. @@ -1868,10 +2208,19 @@ passed multiple times. The expected format is ``name=value``. For example:: function prefixed with ``test`` as a test. Here is an example of how to collect test functions and methods that end in ``_test``: - .. code-block:: ini + .. tab:: toml - [pytest] - python_functions = *_test + .. code-block:: toml + + [pytest] + python_functions = ["*_test"] + + .. tab:: ini + + .. code-block:: ini + + [pytest] + python_functions = *_test Note that this has no effect on methods that live on a ``unittest.TestCase`` derived class, as ``unittest``'s own collection framework is used @@ -1889,10 +2238,19 @@ passed multiple times. The expected format is ``name=value``. For example:: Paths are relative to the :ref:`rootdir ` directory. Directories remain in path for the duration of the test session. - .. code-block:: ini + .. tab:: toml - [pytest] - pythonpath = src1 src2 + .. code-block:: toml + + [pytest] + pythonpath = ["src1", "src2"] + + .. tab:: ini + + .. code-block:: ini + + [pytest] + pythonpath = src1 src2 .. confval:: required_plugins @@ -1902,162 +2260,411 @@ passed multiple times. The expected format is ``name=value``. For example:: their name. Whitespace between different version specifiers is not allowed. If any one of the plugins is not found, emit an error. - .. code-block:: ini + .. tab:: toml - [pytest] - required_plugins = pytest-django>=3.0.0,<4.0.0 pytest-html pytest-xdist>=1.0.0 + .. code-block:: toml + [pytest] + required_plugins = ["pytest-django>=3.0.0,<4.0.0", "pytest-html", "pytest-xdist>=1.0.0"] -.. confval:: testpaths - - Sets list of directories that should be searched for tests when - no specific directories, files or test ids are given in the command line when - executing pytest from the :ref:`rootdir ` directory. - File system paths may use shell-style wildcards, including the recursive - ``**`` pattern. + .. tab:: ini - Useful when all project tests are in a known location to speed up - test collection and to avoid picking up undesired tests by accident. + .. code-block:: ini - .. code-block:: ini + [pytest] + required_plugins = pytest-django>=3.0.0,<4.0.0 pytest-html pytest-xdist>=1.0.0 - [pytest] - testpaths = testing doc - This configuration means that executing: +.. confval:: strict - .. code-block:: console + If set to ``true``, enable "strict mode", which enables the following options: - pytest + * :confval:`strict_config` + * :confval:`strict_markers` + * :confval:`strict_parametrization_ids` + * :confval:`strict_xfail` - has the same practical effects as executing: + Plugins may also enable their own strictness options. - .. code-block:: console + If you explicitly set an individual strictness option, it takes precedence over ``strict``. - pytest testing doc + .. note:: + If pytest adds new strictness options in the future, they will also be enabled in strict mode. + Therefore, you should only enable strict mode if you use a pinned/locked version of pytest, + or if you want to proactively adopt new strictness options as they are added. -.. confval:: tmp_path_retention_count + .. tab:: toml - How many sessions should we keep the `tmp_path` directories, - according to `tmp_path_retention_policy`. + .. code-block:: toml - .. code-block:: ini + [pytest] + strict = true - [pytest] - tmp_path_retention_count = 3 + .. tab:: ini - Default: ``3`` + .. code-block:: ini + [pytest] + strict = true -.. confval:: tmp_path_retention_policy + .. versionadded:: 9.0 +.. confval:: strict_config - Controls which directories created by the `tmp_path` fixture are kept around, - based on test outcome. + If set to ``true``, any warnings encountered while parsing the ``pytest`` section of the configuration file will raise errors. - * `all`: retains directories for all tests, regardless of the outcome. - * `failed`: retains directories only for tests with outcome `error` or `failed`. - * `none`: directories are always removed after each test ends, regardless of the outcome. + .. tab:: toml - .. code-block:: ini + .. code-block:: toml - [pytest] - tmp_path_retention_policy = all + [pytest] + strict_config = true - Default: ``all`` + .. tab:: ini + .. code-block:: ini -.. confval:: truncation_limit_chars + [pytest] + strict_config = true - Controls maximum number of characters to truncate assertion message contents. + You can also enable this option via the :confval:`strict` option. - Setting value to ``0`` disables the character limit for truncation. - .. code-block:: ini +.. confval:: strict_markers - [pytest] - truncation_limit_chars = 640 + If set to ``true``, markers not registered in the ``markers`` section of the configuration file will raise errors. - pytest truncates the assert messages to a certain limit by default to prevent comparison with large data to overload the console output. + .. tab:: toml - Default: ``640`` + .. code-block:: toml - .. note:: + [pytest] + strict_markers = true - If pytest detects it is :ref:`running on CI `, truncation is disabled automatically. + .. tab:: ini + .. code-block:: ini -.. confval:: truncation_limit_lines + [pytest] + strict_markers = true - Controls maximum number of linesto truncate assertion message contents. + You can also enable this option via the :confval:`strict` option. - Setting value to ``0`` disables the lines limit for truncation. - .. code-block:: ini +.. confval:: strict_parametrization_ids - [pytest] - truncation_limit_lines = 8 + If set to ``true``, pytest emits an error if it detects non-unique parameter set IDs. - pytest truncates the assert messages to a certain limit by default to prevent comparison with large data to overload the console output. + If not set (the default), pytest automatically handles this by adding `0`, `1`, ... to duplicate IDs, + making them unique. - Default: ``8`` + .. tab:: toml - .. note:: + .. code-block:: toml - If pytest detects it is :ref:`running on CI `, truncation is disabled automatically. + [pytest] + strict_parametrization_ids = true + .. tab:: ini -.. confval:: usefixtures + .. code-block:: ini - List of fixtures that will be applied to all test functions; this is semantically the same to apply - the ``@pytest.mark.usefixtures`` marker to all test functions. + [pytest] + strict_parametrization_ids = true + You can also enable this option via the :confval:`strict` option. - .. code-block:: ini + For example, - [pytest] - usefixtures = - clean_db + .. code-block:: python + import pytest -.. confval:: verbosity_assertions - Set a verbosity level specifically for assertion related output, overriding the application wide level. + @pytest.mark.parametrize("letter", ["a", "a"]) + def test_letter_is_ascii(letter): + assert letter.isascii() - .. code-block:: ini + will emit an error because both cases (parameter sets) have the same auto-generated ID "a". - [pytest] - verbosity_assertions = 2 + To fix the error, if you decide to keep the duplicates, explicitly assign unique IDs: - Defaults to application wide verbosity level (via the ``-v`` command-line option). A special value of - "auto" can be used to explicitly use the global verbosity level. + .. code-block:: python + import pytest -.. confval:: verbosity_test_cases - Set a verbosity level specifically for test case execution related output, overriding the application wide level. + @pytest.mark.parametrize("letter", ["a", "a"], ids=["a0", "a1"]) + def test_letter_is_ascii(letter): + assert letter.isascii() - .. code-block:: ini + See :func:`parametrize ` and :func:`pytest.param` for other ways to set IDs. - [pytest] - verbosity_test_cases = 2 - Defaults to application wide verbosity level (via the ``-v`` command-line option). A special value of - "auto" can be used to explicitly use the global verbosity level. +.. confval:: strict_xfail - -.. confval:: xfail_strict - - If set to ``True``, tests marked with ``@pytest.mark.xfail`` that actually succeed will by default fail the + If set to ``true``, tests marked with ``@pytest.mark.xfail`` that actually succeed will by default fail the test suite. For more information, see :ref:`xfail strict tutorial`. + .. tab:: toml - .. code-block:: ini + .. code-block:: toml - [pytest] - xfail_strict = True + [pytest] + strict_xfail = true + + .. tab:: ini + + .. code-block:: ini + + [pytest] + strict_xfail = true + + You can also enable this option via the :confval:`strict` option. + + .. versionchanged:: 9.0 + Renamed from ``xfail_strict`` to ``strict_xfail``. + ``xfail_strict`` is accepted as an alias for ``strict_xfail``. + + +.. confval:: testpaths + + Sets list of directories that should be searched for tests when + no specific directories, files or test ids are given in the command line when + executing pytest from the :ref:`rootdir ` directory. + File system paths may use shell-style wildcards, including the recursive + ``**`` pattern. + + Useful when all project tests are in a known location to speed up + test collection and to avoid picking up undesired tests by accident. + + .. tab:: toml + + .. code-block:: toml + + [pytest] + testpaths = ["testing", "doc"] + + .. tab:: ini + + .. code-block:: ini + + [pytest] + testpaths = testing doc + + This configuration means that executing: + + .. code-block:: console + + pytest + + has the same practical effects as executing: + + .. code-block:: console + + pytest testing doc + +.. confval:: tmp_path_retention_count + + How many sessions should we keep the `tmp_path` directories, + according to :confval:`tmp_path_retention_policy`. + + .. tab:: toml + + .. code-block:: toml + + [pytest] + tmp_path_retention_count = "3" + + .. tab:: ini + + .. code-block:: ini + + [pytest] + tmp_path_retention_count = 3 + + Default: ``3`` + + +.. confval:: tmp_path_retention_policy + + + + Controls which directories created by the `tmp_path` fixture are kept around, + based on test outcome. + + * `all`: retains directories for all tests, regardless of the outcome. + * `failed`: retains directories only for tests with outcome `error` or `failed`. + * `none`: directories are always removed after each test ends, regardless of the outcome. + + .. tab:: toml + + .. code-block:: toml + + [pytest] + tmp_path_retention_policy = "all" + + .. tab:: ini + + .. code-block:: ini + + [pytest] + tmp_path_retention_policy = all + + Default: ``all`` + + +.. confval:: truncation_limit_chars + + Controls maximum number of characters to truncate assertion message contents. + + Setting value to ``0`` disables the character limit for truncation. + + .. tab:: toml + + .. code-block:: toml + + [pytest] + truncation_limit_chars = 640 + + .. tab:: ini + + .. code-block:: ini + + [pytest] + truncation_limit_chars = 640 + + pytest truncates the assert messages to a certain limit by default to prevent comparison with large data to overload the console output. + + Default: ``640`` + + .. note:: + + If pytest detects it is :ref:`running on CI `, truncation is disabled automatically. + + +.. confval:: truncation_limit_lines + + Controls maximum number of lines to truncate assertion message contents. + + Setting value to ``0`` disables the lines limit for truncation. + + .. tab:: toml + + .. code-block:: toml + + [pytest] + truncation_limit_lines = 8 + + .. tab:: ini + + .. code-block:: ini + + [pytest] + truncation_limit_lines = 8 + + pytest truncates the assert messages to a certain limit by default to prevent comparison with large data to overload the console output. + + Default: ``8`` + + .. note:: + + If pytest detects it is :ref:`running on CI `, truncation is disabled automatically. + + +.. confval:: usefixtures + + List of fixtures that will be applied to all test functions; this is semantically the same to apply + the ``@pytest.mark.usefixtures`` marker to all test functions. + + + .. tab:: toml + + .. code-block:: toml + + [pytest] + usefixtures = ["clean_db"] + + .. tab:: ini + + .. code-block:: ini + + [pytest] + usefixtures = + clean_db + + +.. confval:: verbosity_assertions + + Set a verbosity level specifically for assertion related output, overriding the application wide level. + + .. tab:: toml + + .. code-block:: toml + + [pytest] + verbosity_assertions = "2" + + .. tab:: ini + + .. code-block:: ini + + [pytest] + verbosity_assertions = 2 + + If not set, defaults to application wide verbosity level (via the :option:`-v` command-line option). A special value of + ``"auto"`` can be used to explicitly use the global verbosity level. + + +.. confval:: verbosity_subtests + + Set the verbosity level specifically for **passed** subtests. + + .. tab:: toml + + .. code-block:: toml + + [pytest] + verbosity_subtests = "1" + + .. tab:: ini + + .. code-block:: ini + + [pytest] + verbosity_subtests = 1 + + A value of ``1`` or higher will show output for **passed** subtests (**failed** subtests are always reported). + Passed subtests output can be suppressed with the value ``0``, which overwrites the :option:`-v` command-line option. + + If not set, defaults to application wide verbosity level (via the :option:`-v` command-line option). A special value of + ``"auto"`` can be used to explicitly use the global verbosity level. + + See also: :ref:`subtests`. + + +.. confval:: verbosity_test_cases + + Set a verbosity level specifically for test case execution related output, overriding the application wide level. + + .. tab:: toml + + .. code-block:: toml + + [pytest] + verbosity_test_cases = "2" + + .. tab:: ini + + .. code-block:: ini + + [pytest] + verbosity_test_cases = 2 + + If not set, defaults to application wide verbosity level (via the :option:`-v` command-line option). A special value of + ``"auto"`` can be used to explicitly use the global verbosity level. .. _`command-line-flags`: @@ -2065,7 +2672,576 @@ passed multiple times. The expected format is ``name=value``. For example:: Command-line Flags ------------------ -All the command-line flags can be obtained by running ``pytest --help``:: +This section documents all command-line options provided by pytest's core plugins. + +.. note:: + + External plugins can add their own command-line options. + This reference documents only the options from pytest's core plugins. + To see all available options including those from installed plugins, run ``pytest --help``. + +Test Selection +~~~~~~~~~~~~~~ + +.. option:: -k EXPRESSION + + Only run tests which match the given substring expression. + An expression is a Python evaluable expression where all names are substring-matched against test names and their parent classes. + + Examples:: + + pytest -k "test_method or test_other" # matches names containing 'test_method' OR 'test_other' + pytest -k "not test_method" # matches names NOT containing 'test_method' + pytest -k "not test_method and not test_other" # excludes both + + The matching is case-insensitive. + Keywords are also matched to classes and functions containing extra names in their ``extra_keyword_matches`` set. + + See :ref:`select-tests` for more information and examples. + +.. option:: -m MARKEXPR + + Only run tests matching given mark expression. + Supports ``and``, ``or``, and ``not`` operators. + + Examples:: + + pytest -m slow # run tests marked with @pytest.mark.slow + pytest -m "not slow" # run tests NOT marked slow + pytest -m "mark1 and not mark2" # run tests marked mark1 but not mark2 + + See :ref:`mark` for more information on markers. + +.. option:: --markers + + Show all available markers (builtin, plugin, and per-project markers defined in configuration). + +Test Execution Control +~~~~~~~~~~~~~~~~~~~~~~~ + +.. option:: -x, --exitfirst + + Exit instantly on first error or failed test. + +.. option:: --maxfail=NUM + + Exit after first ``num`` failures or errors. + Useful for CI environments where you want to fail fast but see a few failures. + +.. option:: --last-failed, --lf + + Rerun only the tests that failed at the last run. + If no tests failed (or no cached data exists), all tests are run. + See also :confval:`cache_dir` and :ref:`cache`. + +.. option:: --failed-first, --ff + + Run all tests, but run the last failures first. + This may re-order tests and thus lead to repeated fixture setup/teardown. + +.. option:: --new-first, --nf + + Run tests from new files first, then the rest of the tests sorted by file modification time. + +.. option:: --stepwise, --sw + + Exit on test failure and continue from last failing test next time. + Useful for fixing multiple test failures one at a time. + + See :ref:`cache stepwise` for more information. + +.. option:: --stepwise-skip, --sw-skip + + Ignore the first failing test but stop on the next failing test. + Implicitly enables :option:`--stepwise`. + +.. option:: --stepwise-reset, --sw-reset + + Resets stepwise state, restarting the stepwise workflow. + Implicitly enables :option:`--stepwise`. + +.. option:: --last-failed-no-failures, --lfnf + + With :option:`--last-failed`, determines whether to execute tests when there are no previously known failures or when no cached ``lastfailed`` data was found. + + * ``all`` (default): runs the full test suite again + * ``none``: just emits a message about no known failures and exits successfully + +.. option:: --runxfail + + Report the results of xfail tests as if they were not marked. + Useful for debugging xfailed tests. + See :ref:`xfail`. + +Collection +~~~~~~~~~~ + +.. option:: --collect-only, --co + + Only collect tests, don't execute them. + Shows which tests would be collected and run. + +.. option:: --pyargs + + Try to interpret all arguments as Python packages. + Useful for running tests of installed packages:: + + pytest --pyargs pkg.testing + +.. option:: --ignore=PATH + + Ignore path during collection (multi-allowed). + Can be specified multiple times. + +.. option:: --ignore-glob=PATTERN + + Ignore path pattern during collection (multi-allowed). + Supports glob patterns. + +.. option:: --deselect=NODEID_PREFIX + + Deselect item (via node id prefix) during collection (multi-allowed). + +.. option:: --confcutdir=DIR + + Only load ``conftest.py`` files relative to specified directory. + +.. option:: --noconftest + + Don't load any ``conftest.py`` files. + +.. option:: --keep-duplicates + + Keep duplicate tests. By default, pytest removes duplicate test items. + +.. option:: --collect-in-virtualenv + + Don't ignore tests in a local virtualenv directory. + By default, pytest skips tests in virtualenv directories. + +.. option:: --continue-on-collection-errors + + Force test execution even if collection errors occur. + +.. option:: --import-mode + + Prepend/append to sys.path when importing test modules and conftest files. + + * ``prepend`` (default): prepend to sys.path + * ``append``: append to sys.path + * ``importlib``: use importlib to import test modules + + See :ref:`pythonpath` for more information. + +Fixtures +~~~~~~~~ + +.. option:: --fixtures, --funcargs + + Show available fixtures, sorted by plugin appearance. + Fixtures with leading ``_`` are only shown with :option:`--verbose`. + +.. option:: --fixtures-per-test + + Show fixtures per test. + +.. option:: --setup-only + + Only setup fixtures, do not execute tests. + See :ref:`how-to-fixtures`. + +.. option:: --setup-show + + Show setup of fixtures while executing tests. + +.. option:: --setup-plan + + Show what fixtures and tests would be executed but don't execute anything. + +Debugging +~~~~~~~~~ + +.. option:: --pdb + + Start the interactive Python debugger on errors or KeyboardInterrupt. + See :ref:`pdb-option`. + +.. option:: --pdbcls=MODULENAME:CLASSNAME + + Specify a custom interactive Python debugger for use with :option:`--pdb`. + + Example:: + + pytest --pdbcls=IPython.terminal.debugger:TerminalPdb + +.. option:: --trace + + Immediately break when running each test. + + See :ref:`trace-option` for more information. + +.. option:: --full-trace + + Don't cut any tracebacks (default is to cut). + + See :ref:`how-to-modifying-python-tb-printing` for more information. + +.. option:: --debug, --debug=DEBUG_FILE_NAME + + Store internal tracing debug information in this log file. + This file is opened with ``'w'`` and truncated as a result, care advised. + Default file name if not specified: ``pytestdebug.log``. + +.. option:: --trace-config + + Trace considerations of conftest.py files. + +Output and Reporting +~~~~~~~~~~~~~~~~~~~~ + +.. option:: -v, --verbose + + Increase verbosity. + Can be specified multiple times (e.g., ``-vv``) for even more verbose output. + + See :ref:`pytest.fine_grained_verbosity` for fine-grained control over verbosity. + +.. option:: -q, --quiet + + Decrease verbosity. + +.. option:: --verbosity=NUM + + Set verbosity level explicitly. Default: 0. + +.. option:: -r CHARS + + Show extra test summary info as specified by chars: + + * ``f``: failed + * ``E``: error + * ``s``: skipped + * ``x``: xfailed + * ``X``: xpassed + * ``p``: passed + * ``P``: passed with output + * ``a``: all except passed (p/P) + * ``A``: all + * ``w``: warnings (enabled by default) + * ``N``: resets the list + + Default: ``'fE'`` + + Examples:: + + pytest -rA # show all outcomes + pytest -rfE # show only failed and errors (default) + pytest -rfs # show failed and skipped + + See :ref:`pytest.detailed_failed_tests_usage` for more information. + +.. option:: --no-header + + Disable header. + +.. option:: --no-summary + + Disable summary. + +.. option:: --no-fold-skipped + + Do not fold skipped tests in short summary. + +.. option:: --force-short-summary + + Force condensed summary output regardless of verbosity level. + +.. option:: -l, --showlocals + + Show locals in tracebacks (disabled by default). + +.. option:: --no-showlocals + + Hide locals in tracebacks (negate :option:`--showlocals` passed through addopts). + +.. option:: --tb=STYLE + + Traceback print mode: + + * ``auto``: intelligent traceback formatting (default) + * ``long``: exhaustive, informative traceback formatting + * ``short``: shorter traceback format + * ``line``: only the failing line + * ``native``: Python's standard traceback + * ``no``: no traceback + + See :ref:`how-to-modifying-python-tb-printing` for examples. + +.. option:: --xfail-tb + + Show tracebacks for xfail (as long as :option:`--tb` != ``no``). + +.. option:: --show-capture + + Controls how captured stdout/stderr/log is shown on failed tests. + + * ``no``: don't show captured output + * ``stdout``: show captured stdout + * ``stderr``: show captured stderr + * ``log``: show captured logging + * ``all`` (default): show all captured output + +.. option:: --color=WHEN + + Color terminal output: + + * ``yes``: always use color + * ``no``: never use color + * ``auto`` (default): use color if terminal supports it + +.. option:: --code-highlight={yes,no} + + Whether code should be highlighted (only if :option:`--color` is also enabled). + Default: ``yes``. + +.. option:: --pastebin=MODE + + Send failed|all info to bpaste.net pastebin service. + +.. option:: --durations=NUM + + Show N slowest setup/test durations (N=0 for all). + See :ref:`durations`. + +.. option:: --durations-min=NUM + + Minimal duration in seconds for inclusion in slowest list. + Default: 0.005 (or 0.0 if ``-vv`` is given). + +Output Capture +~~~~~~~~~~~~~~ + +.. option:: --capture=METHOD + + Per-test capturing method: + + * ``fd``: capture at file descriptor level (default) + * ``sys``: capture at sys level + * ``no``: don't capture output + * ``tee-sys``: capture but also show output on terminal + + See :ref:`captures`. + +.. option:: -s + + Shortcut for :option:`--capture=no`. + +JUnit XML +~~~~~~~~~ + +.. option:: --junit-xml=PATH, --junitxml=PATH + + Create junit-xml style report file at given path. + +.. option:: --junit-prefix=STR, --junitprefix=STR + + Prepend prefix to classnames in junit-xml output. + +Cache +~~~~~ + +.. option:: --cache-show[=PATTERN] + + Show cache contents, don't perform collection or tests. + Default glob pattern: ``'*'``. + +.. option:: --cache-clear + + Remove all cache contents at start of test run. + See :ref:`cache`. + +Warnings +~~~~~~~~ + +.. option:: --disable-pytest-warnings, --disable-warnings + + Disable warnings summary. + +.. option:: -W WARNING, --pythonwarnings=WARNING + + Set which warnings to report, see ``-W`` option of Python itself. + Can be specified multiple times. + +Doctest +~~~~~~~ + +.. option:: --doctest-modules + + Run doctests in all .py modules. + + See :ref:`doctest` for more information on using doctests with pytest. + +.. option:: --doctest-report + + Choose another output format for diffs on doctest failure: + + * ``none`` + * ``cdiff`` + * ``ndiff`` + * ``udiff`` + * ``only_first_failure`` + +.. option:: --doctest-glob=PATTERN + + Doctests file matching pattern. + Default: ``test*.txt``. + +.. option:: --doctest-ignore-import-errors + + Ignore doctest collection errors. + +.. option:: --doctest-continue-on-failure + + For a given doctest, continue to run after the first failure. + +Configuration +~~~~~~~~~~~~~ + +.. option:: -c FILE, --config-file=FILE + + Load configuration from ``FILE`` instead of trying to locate one of the implicit configuration files. + +.. option:: --rootdir=ROOTDIR + + Define root directory for tests. + Can be relative path: ``'root_dir'``, ``'./root_dir'``, ``'root_dir/another_dir/'``; absolute path: ``'/home/user/root_dir'``; path with variables: ``'$HOME/root_dir'``. + +.. option:: --basetemp=DIR + + Base temporary directory for this test run. + Warning: this directory is removed if it exists. + + See :ref:`temporary directory location and retention` for more information. + +.. option:: -o OPTION=VALUE, --override-ini=OPTION=VALUE + + Override configuration option with ``option=value`` style. + Can be specified multiple times. + + Example:: + + pytest -o strict_xfail=true -o cache_dir=cache + +.. option:: --strict-config + + Enables the :confval:`strict_config` option. + +.. option:: --strict-markers + + Enables the :confval:`strict_markers` option. + +.. option:: --strict + + Enables the :confval:`strict` option (which enables all strictness options). + +.. option:: --assert=MODE + + Control assertion debugging tools: + + * ``plain``: performs no assertion debugging + * ``rewrite`` (default): rewrites assert statements in test modules on import to provide assert expression information + +Logging +~~~~~~~ + +See :ref:`logging` for a guide on using these flags. + +.. option:: --log-level=LEVEL + + Level of messages to catch/display. + Not set by default, so it depends on the root/parent log handler's effective level, where it is ``WARNING`` by default. + +.. option:: --log-format=FORMAT + + Log format used by the logging module. + +.. option:: --log-date-format=FORMAT + + Log date format used by the logging module. + +.. option:: --log-cli-level=LEVEL + + CLI logging level. See :ref:`live_logs`. + +.. option:: --log-cli-format=FORMAT + + Log format used by the logging module for CLI output. + +.. option:: --log-cli-date-format=FORMAT + + Log date format used by the logging module for CLI output. + +.. option:: --log-file=PATH + + Path to a file logging will be written to. + +.. option:: --log-file-mode + + Log file open mode: + + * ``w`` (default): recreate the file + * ``a``: append to the file + +.. option:: --log-file-level=LEVEL + + Log file logging level. + +.. option:: --log-file-format=FORMAT + + Log format used by the logging module for the log file. + +.. option:: --log-file-date-format=FORMAT + + Log date format used by the logging module for the log file. + +.. option:: --log-auto-indent=VALUE + + Auto-indent multiline messages passed to the logging module. + Accepts ``true|on``, ``false|off`` or an integer. + +.. option:: --log-disable=LOGGER + + Disable a logger by name. Can be passed multiple times. + +Plugin and Extension Management +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +.. option:: -p NAME + + Early-load given plugin module name or entry point (multi-allowed). + To avoid loading of plugins, use the ``no:`` prefix, e.g. ``no:doctest``. + See also :option:`--disable-plugin-autoload`. + +.. option:: --disable-plugin-autoload + + Disable plugin auto-loading through entry point packaging metadata. + Only plugins explicitly specified in :option:`-p` or env var :envvar:`PYTEST_PLUGINS` will be loaded. + +Version and Help +~~~~~~~~~~~~~~~~ + +.. option:: -V, --version + + Display pytest version and information about plugins. When given twice, also display information about plugins. + +.. option:: -h, --help + + Show help message and configuration info. + +Complete Help Output +~~~~~~~~~~~~~~~~~~~~ + +All the command-line flags can also be obtained by running ``pytest --help``:: $ pytest --help usage: pytest [options] [file_or_dir] [file_or_dir] [...] @@ -2093,6 +3269,10 @@ All the command-line flags can be obtained by running ``pytest --help``:: example: -m 'mark1 and not mark2'. --markers show markers (builtin, plugin and per-project ones). -x, --exitfirst Exit instantly on first error or failed test + --maxfail=num Exit after first num failures or errors + --strict-config Enables the strict_config option + --strict-markers Enables the strict_markers option + --strict Enables the strict option --fixtures, --funcargs Show available fixtures, sorted by plugin appearance (fixtures with leading '_' are only shown with '-v') @@ -2131,15 +3311,21 @@ All the command-line flags can be obtained by running ``pytest --help``:: --sw-skip, --stepwise-skip Ignore the first failing test but stop on the next failing test. Implicitly enables --stepwise. + --sw-reset, --stepwise-reset + Resets stepwise state, restarting the stepwise + workflow. Implicitly enables --stepwise. Reporting: --durations=N Show N slowest setup/test durations (N=0 for all) --durations-min=N Minimal duration in seconds for inclusion in slowest - list. Default: 0.005. + list. Default: 0.005 (or 0.0 if -vv is given). -v, --verbose Increase verbosity --no-header Disable header --no-summary Disable summary --no-fold-skipped Do not fold skipped tests in short summary. + --force-short-summary + Force condensed summary output regardless of + verbosity level. -q, --quiet Decrease verbosity --verbosity=VERBOSE Set verbosity. Default: 0. -r chars Show extra test summary info as specified by chars: @@ -2174,22 +3360,6 @@ All the command-line flags can be obtained by running ``pytest --help``:: -W, --pythonwarnings PYTHONWARNINGS Set which warnings to report, see -W option of Python itself - --maxfail=num Exit after first num failures or errors - --strict-config Any warnings encountered while parsing the `pytest` - section of the configuration file raise errors - --strict-markers Markers not registered in the `markers` section of - the configuration file raise errors - --strict (Deprecated) alias to --strict-markers - -c, --config-file FILE - Load configuration from `FILE` instead of trying to - locate one of the implicit configuration files. - --continue-on-collection-errors - Force test execution even if collection errors occur - --rootdir=ROOTDIR Define root directory for tests. Can be relative - path: 'root_dir', './root_dir', - 'root_dir/another_dir/'; absolute path: - '/home/user/root_dir'; path with variables: - '$HOME/root_dir'. collection: --collect-only, --co Only collect tests, don't execute them @@ -2205,6 +3375,8 @@ All the command-line flags can be obtained by running ``pytest --help``:: --keep-duplicates Keep duplicate tests --collect-in-virtualenv Don't ignore tests in a local virtualenv directory + --continue-on-collection-errors + Force test execution even if collection errors occur --import-mode={prepend,append,importlib} Prepend/append to sys.path when importing test modules and conftest files. Default: prepend. @@ -2220,6 +3392,14 @@ All the command-line flags can be obtained by running ``pytest --help``:: failure test session debugging and configuration: + -c, --config-file FILE + Load configuration from `FILE` instead of trying to + locate one of the implicit configuration files. + --rootdir=ROOTDIR Define root directory for tests. Can be relative + path: 'root_dir', './root_dir', + 'root_dir/another_dir/'; absolute path: + '/home/user/root_dir'; path with variables: + '$HOME/root_dir'. --basetemp=dir Base temporary directory for this test run. (Warning: this directory is removed if it exists.) -V, --version Display pytest version and information about @@ -2228,15 +3408,22 @@ All the command-line flags can be obtained by running ``pytest --help``:: -h, --help Show help message and configuration info -p name Early-load given plugin module name or entry point (multi-allowed). To avoid loading of plugins, use - the `no:` prefix, e.g. `no:doctest`. + the `no:` prefix, e.g. `no:doctest`. See also + --disable-plugin-autoload. + --disable-plugin-autoload + Disable plugin auto-loading through entry point + packaging metadata. Only plugins explicitly + specified in -p or env var PYTEST_PLUGINS will be + loaded. --trace-config Trace considerations of conftest.py files --debug=[DEBUG_FILE_NAME] Store internal tracing debug information in this log file. This file is opened with 'w' and truncated as a result, care advised. Default: pytestdebug.log. -o, --override-ini OVERRIDE_INI - Override ini option with "option=value" style, e.g. - `-o xfail_strict=True -o cache_dir=cache`. + Override configuration option with "option=value" + style, e.g. `-o strict_xfail=True -o + cache_dir=cache`. --assert=MODE Control assertion debugging tools. 'plain' performs no assertion debugging. 'rewrite' (the default) rewrites assert statements @@ -2278,18 +3465,29 @@ All the command-line flags can be obtained by running ``pytest --help``:: Disable a logger by name. Can be passed multiple times. - [pytest] ini-options in the first pytest.ini|tox.ini|setup.cfg|pyproject.toml file found: + [pytest] configuration options in the first pytest.toml|pytest.ini|tox.ini|setup.cfg|pyproject.toml file found: markers (linelist): Register new markers for test functions empty_parameter_set_mark (string): Default marker for empty parametersets - norecursedirs (args): Directory patterns to avoid for recursion - testpaths (args): Directories to search for tests when no files or - directories are given on the command line + strict_config (bool): Any warnings encountered while parsing the `pytest` + section of the configuration file raise errors + strict_markers (bool): + Markers not registered in the `markers` section of + the configuration file raise errors + strict (bool): Enables all strictness options, currently: + strict_config, strict_markers, strict_xfail, + strict_parametrization_ids filterwarnings (linelist): Each line specifies a pattern for warnings.filterwarnings. Processed after -W/--pythonwarnings. + norecursedirs (args): Directory patterns to avoid for recursion + testpaths (args): Directories to search for tests when no files or + directories are given on the command line + collect_imported_tests (bool): + Whether to collect tests in imported modules outside + `testpaths` consider_namespace_packages (bool): Consider namespace packages when resolving module names during import @@ -2306,6 +3504,9 @@ All the command-line flags can be obtained by running ``pytest --help``:: disable_test_id_escaping_and_forfeit_all_rights_to_community_support (bool): Disable string escape non-ASCII characters, might cause unwanted side effects(use at your own risk) + strict_parametrization_ids (bool): + Emit an error if non-unique parameter set IDs are + detected console_output_style (string): Console output: "classic", or with additional progress information ("progress" (percentage) | @@ -2316,8 +3517,9 @@ All the command-line flags can be obtained by running ``pytest --help``:: overriding the main level. Higher levels will provide more detailed information about each test case executed. - xfail_strict (bool): Default for the strict parameter of xfail markers - when not given explicitly (default: False) + strict_xfail (bool): Default for the strict parameter of xfail markers + when not given explicitly (default: False) (alias: + xfail_strict) tmp_path_retention_count (string): How many sessions should we keep the `tmp_path` directories, according to @@ -2329,6 +3531,12 @@ All the command-line flags can be obtained by running ``pytest --help``:: enable_assertion_pass_hook (bool): Enables the pytest_assertion_pass hook. Make sure to delete any previously generated pyc cache files. + truncation_limit_lines (string): + Set threshold of LINES after which truncation will + take effect + truncation_limit_chars (string): + Set threshold of CHARS after which truncation will + take effect verbosity_assertions (string): Specify a verbosity level for assertions, overriding the main level. Higher levels will provide more @@ -2373,22 +3581,32 @@ All the command-line flags can be obtained by running ``pytest --help``:: Default value for --log-file-date-format log_auto_indent (string): Default value for --log-auto-indent - pythonpath (paths): Add paths to sys.path faulthandler_timeout (string): Dump the traceback of all threads if a test takes more than TIMEOUT seconds to finish + faulthandler_exit_on_timeout (bool): + Exit the test process if a test takes more than + faulthandler_timeout seconds to finish + verbosity_subtests (string): + Specify verbosity level for subtests. Higher levels + will generate output for passed subtests. Failed + subtests are always reported. addopts (args): Extra command line options minversion (string): Minimally required pytest version + pythonpath (paths): Add paths to sys.path required_plugins (args): Plugins that must be present for pytest to run Environment variables: - CI When set (regardless of value), pytest knows it is running in a CI process and does not truncate summary info + CI When set to a non-empty value, pytest knows it is running in a CI process and does not truncate summary info BUILD_NUMBER Equivalent to CI PYTEST_ADDOPTS Extra command line options PYTEST_PLUGINS Comma-separated plugins to load during startup PYTEST_DISABLE_PLUGIN_AUTOLOAD Set to disable plugin auto-loading PYTEST_DEBUG Set to enable debug tracing of pytest's internals + PYTEST_DEBUG_TEMPROOT Override the system temporary directory + PYTEST_THEME The Pygments style to use for code output + PYTEST_THEME_MODE Set the PYTEST_THEME to be either 'dark' or 'light' to see available markers type: pytest --markers diff --git a/doc/en/requirements.txt b/doc/en/requirements.txt index ddcb7efb99b..d672a9d7e15 100644 --- a/doc/en/requirements.txt +++ b/doc/en/requirements.txt @@ -1,10 +1,12 @@ -c broken-dep-constraints.txt pluggy>=1.5.0 -pygments-pytest>=2.3.0 +pygments-pytest>=2.5.0 sphinx-removed-in>=0.2.0 -sphinx>=7 +# Pinning to <9.0 due to https://github.com/python-trio/sphinxcontrib-trio/issues/399. +sphinx>=7,<9.0 sphinxcontrib-trio sphinxcontrib-svg2pdfconverter furo sphinxcontrib-towncrier sphinx-issues +sphinx-inline-tabs diff --git a/doc/en/sponsor.rst b/doc/en/sponsor.rst index 8362a7f0a3a..6ad722be94c 100644 --- a/doc/en/sponsor.rst +++ b/doc/en/sponsor.rst @@ -2,7 +2,7 @@ Sponsor ======= pytest is maintained by a team of volunteers from all around the world in their free time. While -we work on pytest because we love the project and use it daily at our daily jobs, monetary +we work on pytest because we love the project and use it daily in our jobs, monetary compensation when possible is welcome to justify time away from friends, family and personal time. Money is also used to fund local sprints, merchandising (stickers to distribute in conferences for example) @@ -12,7 +12,7 @@ OpenCollective -------------- `Open Collective`_ is an online funding platform for open and transparent communities. -It provide tools to raise money and share your finances in full transparency. +It provides tools to raise money and share your finances in full transparency. It is the platform of choice for individuals and companies that want to make one-time or monthly donations directly to the project. diff --git a/doc/en/talks.rst b/doc/en/talks.rst index b9b153a792e..a45c05c6f2f 100644 --- a/doc/en/talks.rst +++ b/doc/en/talks.rst @@ -17,19 +17,19 @@ Books Talks and blog postings --------------------------------------------- -- Training: `pytest - simple, rapid and fun testing with Python `_, Florian Bruhin, PyConDE 2022 +- Training: `pytest - simple, rapid and fun testing with Python `_, Freya Bruhin, PyConDE 2022 -- `pytest: Simple, rapid and fun testing with Python, `_ (@ 4:22:32), Florian Bruhin, WeAreDevelopers World Congress 2021 +- `pytest: Simple, rapid and fun testing with Python, `_ (@ 4:22:32), Freya Bruhin, WeAreDevelopers World Congress 2021 -- Webinar: `pytest: Test Driven Development für Python (German) `_, Florian Bruhin, via mylearning.ch, 2020 +- Webinar: `pytest: Test Driven Development für Python (German) `_, Freya Bruhin, via mylearning.ch, 2020 - Webinar: `Simplify Your Tests with Fixtures `_, Oliver Bestwalter, via JetBrains, 2020 -- Training: `Introduction to pytest - simple, rapid and fun testing with Python `_, Florian Bruhin, PyConDE 2019 +- Training: `Introduction to pytest - simple, rapid and fun testing with Python `_, Freya Bruhin, PyConDE 2019 - Abridged metaprogramming classics - this episode: pytest, Oliver Bestwalter, PyConDE 2019 (`repository `__, `recording `__) -- Testing PySide/PyQt code easily using the pytest framework, Florian Bruhin, Qt World Summit 2019 (`slides `__, `recording `__) +- Testing PySide/PyQt code easily using the pytest framework, Freya Bruhin, Qt World Summit 2019 (`slides `__, `recording `__) - `pytest: recommendations, basic packages for testing in Python and Django, Andreu Vallbona, PyBCN June 2019 `_. @@ -41,7 +41,7 @@ Talks and blog postings - `Pythonic testing, Igor Starikov (Russian, PyNsk, November 2016) `_. -- `pytest - Rapid Simple Testing, Florian Bruhin, Swiss Python Summit 2016 +- `pytest - Rapid Simple Testing, Freya Bruhin, Swiss Python Summit 2016 `_. - `Improve your testing with Pytest and Mock, Gabe Hollombe, PyCon SG 2015 diff --git a/pyproject.toml b/pyproject.toml index 1e9665add02..31b8a029ec5 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,7 +1,7 @@ [build-system] build-backend = "setuptools.build_meta" requires = [ - "setuptools>=61", + "setuptools>=77", "setuptools-scm[toml]>=6.2.3", ] @@ -13,27 +13,26 @@ keywords = [ "test", "unittest", ] -license = { text = "MIT" } +license = "MIT" +license-files = [ "LICENSE" ] authors = [ { name = "Holger Krekel" }, { name = "Bruno Oliveira" }, { name = "Ronny Pfannschmidt" }, { name = "Floris Bruynooghe" }, { name = "Brianna Laugher" }, - { name = "Florian Bruhin" }, + { name = "Freya Bruhin" }, { name = "Others (See AUTHORS)" }, ] -requires-python = ">=3.9" +requires-python = ">=3.10" classifiers = [ "Development Status :: 6 - Mature", "Intended Audience :: Developers", - "License :: OSI Approved :: MIT License", "Operating System :: MacOS", "Operating System :: Microsoft :: Windows", "Operating System :: POSIX", "Operating System :: Unix", "Programming Language :: Python :: 3 :: Only", - "Programming Language :: Python :: 3.9", "Programming Language :: Python :: 3.10", "Programming Language :: Python :: 3.11", "Programming Language :: Python :: 3.12", @@ -49,8 +48,8 @@ dynamic = [ dependencies = [ "colorama>=0.4; sys_platform=='win32'", "exceptiongroup>=1; python_version<'3.11'", - "iniconfig>=1", - "packaging>=20", + "iniconfig>=1.0.1", + "packaging>=22", "pluggy>=1.5,<2", "pygments>=2.7.2", "tomli>=1; python_version<'3.11'", @@ -86,10 +85,10 @@ write_to = "src/_pytest/_version.py" [tool.black] # See https://black.readthedocs.io/en/stable/usage_and_configuration/the_basics.html#t-target-version -target-version = [ "py39", "py310", "py311", "py312", "py313" ] +target-version = [ "py310", "py311", "py312", "py313" ] [tool.ruff] -target-version = "py39" +target-version = "py310" line-length = 88 src = [ "src", @@ -145,6 +144,7 @@ lint.ignore = [ # pylint ignore "PLC0105", # `TypeVar` name "E" does not reflect its covariance; "PLC0414", # Import alias does not rename original package + "PLC0415", # import should be at top level of package "PLR0124", # Name compared with itself "PLR0133", # Two constants compared in a comparison (lots of those in tests) "PLR0402", # Use `from x.y import z` in lieu of alias @@ -157,6 +157,7 @@ lint.ignore = [ "PLR5501", # Use `elif` instead of `else` then `if` "PLW0120", # remove the else and dedent its contents "PLW0603", # Using the global statement + "PLW1641", # Does not implement the __hash__ method "PLW2901", # for loop variable overwritten by assignment target # ruff ignore "RUF012", # Mutable class attributes should be annotated with `typing.ClassVar` @@ -240,13 +241,13 @@ disable = [ "else-if-used", # not activated by default, PLR5501 disabled in ruff "empty-comment", # not activated by default, PLR2044 disabled in ruff "eval-used", - "eq-without-hash", + "eq-without-hash", # PLW1641 disabled in ruff "exec-used", "expression-not-assigned", "fixme", "global-statement", # PLW0603 disabled in ruff "import-error", - "import-outside-toplevel", + "import-outside-toplevel", # PLC0415 disabled in ruff "import-private-name", "inconsistent-return-statements", "invalid-bool-returned", @@ -339,7 +340,7 @@ disable = [ ] [tool.codespell] -ignore-words-list = "afile,asend,asser,assertio,feld,hove,ned,noes,notin,paramete,parth,socio-economic,tesults,varius,wil" +ignore-words-list = "afile,asend,asser,assertio,feld,hove,ned,noes,notin,paramete,parth,tesults,varius,wil" skip = "AUTHORS,*/plugin_list.rst" write-changes = true @@ -352,9 +353,9 @@ ignore = "W009" indent = 4 max_supported_python = "3.14" -[tool.pytest.ini_options] +[tool.pytest] minversion = "2.0" -addopts = "-rfEX -p pytester --strict-markers" +addopts = [ "-rfEX", "-p", "pytester" ] python_files = [ "test_*.py", "*_test.py", @@ -377,7 +378,7 @@ norecursedirs = [ "build", "dist", ] -xfail_strict = true +strict = true filterwarnings = [ "error", "default:Using or importing the ABCs:DeprecationWarning:unittest2.*", @@ -426,6 +427,9 @@ markers = [ "slow", # experimental mark for all tests using pexpect "uses_pexpect", + # Disables the `remove_ci_env_var` autouse fixture on a given test that + # actually inspects whether the CI environment variable is set. + "keep_ci_var", ] [tool.towncrier] @@ -515,7 +519,7 @@ files = [ mypy_path = [ "src", ] -python_version = "3.9" +python_version = "3.10" check_untyped_defs = true disallow_any_generics = true disallow_untyped_defs = true @@ -538,7 +542,7 @@ include = [ extraPaths = [ "src", ] -pythonVersion = "3.9" +pythonVersion = "3.10" typeCheckingMode = "basic" reportMissingImports = "none" reportMissingModuleSource = "none" diff --git a/scripts/generate-gh-release-notes.py b/scripts/generate-gh-release-notes.py index b6d92d085e1..d293a3bb695 100644 --- a/scripts/generate-gh-release-notes.py +++ b/scripts/generate-gh-release-notes.py @@ -43,7 +43,7 @@ def extract_changelog_entries_for(version: str) -> str: def convert_rst_to_md(text: str) -> str: result = pypandoc.convert_text( - text, "md", format="rst", extra_args=["--wrap=preserve"] + text, "gfm", format="rst", extra_args=["--wrap=preserve"] ) assert isinstance(result, str), repr(result) return result diff --git a/scripts/prepare-release-pr.py b/scripts/prepare-release-pr.py index c420a80b3d2..eb4f19f8386 100644 --- a/scripts/prepare-release-pr.py +++ b/scripts/prepare-release-pr.py @@ -130,11 +130,12 @@ def prepare_release_pr(base_branch: str, is_major: bool, prerelease: str) -> Non [ "gh", "pr", - "new", + "create", f"--base={base_branch}", f"--head={release_branch}", f"--title=Release {version}", f"--body={body}", + "--draft", ], check=True, ) diff --git a/scripts/update-plugin-list.py b/scripts/update-plugin-list.py index 61debb44043..be57d436966 100644 --- a/scripts/update-plugin-list.py +++ b/scripts/update-plugin-list.py @@ -30,7 +30,7 @@ Pytest Plugin List ================== -Below is an automated compilation of ``pytest``` plugins available on `PyPI `_. +Below is an automated compilation of ``pytest`` plugins available on `PyPI `_. It includes PyPI projects whose names begin with ``pytest-`` or ``pytest_`` and a handful of manually selected projects. Packages classified as inactive are excluded. diff --git a/src/_pytest/_code/code.py b/src/_pytest/_code/code.py index f1241f14136..4cf99a77340 100644 --- a/src/_pytest/_code/code.py +++ b/src/_pytest/_code/code.py @@ -30,9 +30,8 @@ from typing import Literal from typing import overload from typing import SupportsIndex -from typing import TYPE_CHECKING +from typing import TypeAlias from typing import TypeVar -from typing import Union import pluggy @@ -55,7 +54,7 @@ TracebackStyle = Literal["long", "short", "line", "no", "native", "value", "auto"] -EXCEPTION_OR_MORE = Union[type[BaseException], tuple[type[BaseException], ...]] +EXCEPTION_OR_MORE = type[BaseException] | tuple[type[BaseException], ...] class Code: @@ -469,7 +468,7 @@ def stringify_exception( notes = getattr(exc, "__notes__", []) except KeyError: # Workaround for https://github.com/python/cpython/issues/98778 on - # Python <= 3.9, and some 3.10 and 3.11 patch versions. + # some 3.10 and 3.11 patch versions. HTTPError = getattr(sys.modules.get("urllib.error", None), "HTTPError", ()) if sys.version_info < (3, 12) and isinstance(exc, HTTPError): notes = [] @@ -773,7 +772,11 @@ def match(self, regexp: str | re.Pattern[str]) -> Literal[True]: """ __tracebackhide__ = True value = stringify_exception(self.value) - msg = f"Regex pattern did not match.\n Regex: {regexp!r}\n Input: {value!r}" + msg = ( + f"Regex pattern did not match.\n" + f" Expected regex: {regexp!r}\n" + f" Actual message: {value!r}" + ) if regexp == value: msg += "\n Did you mean to `re.escape()` the regex?" assert re.search(regexp, value), msg @@ -853,15 +856,10 @@ def group_contains( return self._group_contains(self.value, expected_exception, match, depth) -if TYPE_CHECKING: - from typing_extensions import TypeAlias - - # Type alias for the `tbfilter` setting: - # bool: If True, it should be filtered using Traceback.filter() - # callable: A callable that takes an ExceptionInfo and returns the filtered traceback. - TracebackFilter: TypeAlias = Union[ - bool, Callable[[ExceptionInfo[BaseException]], Traceback] - ] +# Type alias for the `tbfilter` setting: +# bool: If True, it should be filtered using Traceback.filter() +# callable: A callable that takes an ExceptionInfo and returns the filtered traceback. +TracebackFilter: TypeAlias = bool | Callable[[ExceptionInfo[BaseException]], Traceback] @dataclasses.dataclass @@ -1195,9 +1193,15 @@ def repr_excinfo(self, excinfo: ExceptionInfo[BaseException]) -> ExceptionChainR format_exception( type(excinfo.value), excinfo.value, - traceback[0]._rawentry, + traceback[0]._rawentry if traceback else None, ) ) + if not traceback: + reprtraceback.extraline = ( + "All traceback entries are hidden. " + "Pass `--full-trace` to see hidden and internal frames." + ) + else: reprtraceback = self.repr_traceback(excinfo_) reprcrash = excinfo_._getreprcrash() diff --git a/src/_pytest/_code/source.py b/src/_pytest/_code/source.py index a8f7201a40f..99c242dd98e 100644 --- a/src/_pytest/_code/source.py +++ b/src/_pytest/_code/source.py @@ -26,7 +26,7 @@ def __init__(self, obj: object = None) -> None: elif isinstance(obj, Source): self.lines = obj.lines self.raw_lines = obj.raw_lines - elif isinstance(obj, (tuple, list)): + elif isinstance(obj, tuple | list): self.lines = deindent(x.rstrip("\n") for x in obj) self.raw_lines = list(x.rstrip("\n") for x in obj) elif isinstance(obj, str): @@ -103,7 +103,7 @@ def getstatementrange(self, lineno: int) -> tuple[int, int]: which containing the given lineno.""" if not (0 <= lineno < len(self)): raise IndexError("lineno out of range") - ast, start, end = getstatementrange_ast(lineno, self) + _ast, start, end = getstatementrange_ast(lineno, self) return start, end def deindent(self) -> Source: @@ -155,9 +155,9 @@ def get_statement_startend2(lineno: int, node: ast.AST) -> tuple[int, int | None # AST's line numbers start indexing at 1. values: list[int] = [] for x in ast.walk(node): - if isinstance(x, (ast.stmt, ast.ExceptHandler)): + if isinstance(x, ast.stmt | ast.ExceptHandler): # The lineno points to the class/def, so need to include the decorators. - if isinstance(x, (ast.ClassDef, ast.FunctionDef, ast.AsyncFunctionDef)): + if isinstance(x, ast.ClassDef | ast.FunctionDef | ast.AsyncFunctionDef): for d in x.decorator_list: values.append(d.lineno - 1) values.append(x.lineno - 1) diff --git a/src/_pytest/_io/terminalwriter.py b/src/_pytest/_io/terminalwriter.py index fd808f8b3b7..9191b4edace 100644 --- a/src/_pytest/_io/terminalwriter.py +++ b/src/_pytest/_io/terminalwriter.py @@ -161,20 +161,23 @@ def write(self, msg: str, *, flush: bool = False, **markup: bool) -> None: msg = self.markup(msg, **markup) - try: - self._file.write(msg) - except UnicodeEncodeError: - # Some environments don't support printing general Unicode - # strings, due to misconfiguration or otherwise; in that case, - # print the string escaped to ASCII. - # When the Unicode situation improves we should consider - # letting the error propagate instead of masking it (see #7475 - # for one brief attempt). - msg = msg.encode("unicode-escape").decode("ascii") - self._file.write(msg) - - if flush: - self.flush() + self.write_raw(msg, flush=flush) + + def write_raw(self, msg: str, *, flush: bool = False) -> None: + try: + self._file.write(msg) + except UnicodeEncodeError: + # Some environments don't support printing general Unicode + # strings, due to misconfiguration or otherwise; in that case, + # print the string escaped to ASCII. + # When the Unicode situation improves we should consider + # letting the error propagate instead of masking it (see #7475 + # for one brief attempt). + msg = msg.encode("unicode-escape").decode("ascii") + self._file.write(msg) + + if flush: + self.flush() def line(self, s: str = "", **markup: bool) -> None: self.write(s, **markup) @@ -198,7 +201,8 @@ def _write_source(self, lines: Sequence[str], indents: Sequence[str] = ()) -> No indents = [""] * len(lines) source = "\n".join(lines) new_lines = self._highlight(source).splitlines() - for indent, new_line in zip(indents, new_lines): + # Would be better to strict=True but that fails some CI jobs. + for indent, new_line in zip(indents, new_lines, strict=False): self.line(indent + new_line) def _get_pygments_lexer(self, lexer: Literal["python", "diff"]) -> Lexer: diff --git a/src/_pytest/_py/path.py b/src/_pytest/_py/path.py index e353c1a9b52..998a7819972 100644 --- a/src/_pytest/_py/path.py +++ b/src/_pytest/_py/path.py @@ -137,7 +137,7 @@ class NeverRaised(Exception): class Visitor: def __init__(self, fil, rec, ignore, bf, sort): - if isinstance(fil, str): + if isinstance(fil, (str, bytes)): fil = FNMatcher(fil) if isinstance(rec, str): self.rec: Callable[[LocalPath], bool] = FNMatcher(rec) @@ -432,7 +432,7 @@ def relto(self, relpath): """Return a string which is the relative part of the path to the given 'relpath'. """ - if not isinstance(relpath, (str, LocalPath)): + if not isinstance(relpath, str | LocalPath): raise TypeError(f"{relpath!r}: not a string or path object") strrelpath = str(relpath) if strrelpath and strrelpath[-1] != self.sep: @@ -652,7 +652,7 @@ def new(self, **kw): if not kw: obj.strpath = self.strpath return obj - drive, dirname, basename, purebasename, ext = self._getbyspec( + drive, dirname, _basename, purebasename, ext = self._getbyspec( "drive,dirname,basename,purebasename,ext" ) if "basename" in kw: diff --git a/src/_pytest/assertion/rewrite.py b/src/_pytest/assertion/rewrite.py index c4782c7c5a8..566549d66f2 100644 --- a/src/_pytest/assertion/rewrite.py +++ b/src/_pytest/assertion/rewrite.py @@ -26,6 +26,17 @@ from typing import IO from typing import TYPE_CHECKING + +if sys.version_info >= (3, 12): + from importlib.resources.abc import TraversableResources +else: + from importlib.abc import TraversableResources +if sys.version_info < (3, 11): + from importlib.readers import FileReader +else: + from importlib.resources.readers import FileReader + + from _pytest._io.saferepr import DEFAULT_REPR_MAX_SIZE from _pytest._io.saferepr import saferepr from _pytest._io.saferepr import saferepr_unlimited @@ -291,19 +302,8 @@ def get_data(self, pathname: str | bytes) -> bytes: with open(pathname, "rb") as f: return f.read() - if sys.version_info >= (3, 10): - if sys.version_info >= (3, 12): - from importlib.resources.abc import TraversableResources - else: - from importlib.abc import TraversableResources - - def get_resource_reader(self, name: str) -> TraversableResources: - if sys.version_info < (3, 11): - from importlib.readers import FileReader - else: - from importlib.resources.readers import FileReader - - return FileReader(types.SimpleNamespace(path=self._rewritten_names[name])) + def get_resource_reader(self, name: str) -> TraversableResources: + return FileReader(types.SimpleNamespace(path=self._rewritten_names[name])) # type: ignore[arg-type] def _write_pyc_fp( @@ -496,7 +496,7 @@ def _call_reprcompare( expls: Sequence[str], each_obj: Sequence[object], ) -> str: - for i, res, expl in zip(range(len(ops)), results, expls): + for i, res, expl in zip(range(len(ops)), results, expls, strict=True): try: done = not res except Exception: @@ -702,26 +702,18 @@ def run(self, mod: ast.Module) -> None: if doc is not None and self.is_rewrite_disabled(doc): return pos = 0 - item = None for item in mod.body: - if ( - expect_docstring - and isinstance(item, ast.Expr) - and isinstance(item.value, ast.Constant) - and isinstance(item.value.value, str) - ): - doc = item.value.value - if self.is_rewrite_disabled(doc): - return - expect_docstring = False - elif ( - isinstance(item, ast.ImportFrom) - and item.level == 0 - and item.module == "__future__" - ): - pass - else: - break + match item: + case ast.Expr(value=ast.Constant(value=str() as doc)) if ( + expect_docstring + ): + if self.is_rewrite_disabled(doc): + return + expect_docstring = False + case ast.ImportFrom(level=0, module="__future__"): + pass + case _: + break pos += 1 # Special case: for a decorated function, set the lineno to that of the # first decorator, not the `def`. Issue #4984. @@ -730,21 +722,15 @@ def run(self, mod: ast.Module) -> None: else: lineno = item.lineno # Now actually insert the special imports. - if sys.version_info >= (3, 10): - aliases = [ - ast.alias("builtins", "@py_builtins", lineno=lineno, col_offset=0), - ast.alias( - "_pytest.assertion.rewrite", - "@pytest_ar", - lineno=lineno, - col_offset=0, - ), - ] - else: - aliases = [ - ast.alias("builtins", "@py_builtins"), - ast.alias("_pytest.assertion.rewrite", "@pytest_ar"), - ] + aliases = [ + ast.alias("builtins", "@py_builtins", lineno=lineno, col_offset=0), + ast.alias( + "_pytest.assertion.rewrite", + "@pytest_ar", + lineno=lineno, + col_offset=0, + ), + ] imports = [ ast.Import([alias], lineno=lineno, col_offset=0) for alias in aliases ] @@ -755,7 +741,7 @@ def run(self, mod: ast.Module) -> None: nodes: list[ast.AST | Sentinel] = [mod] while nodes: node = nodes.pop() - if isinstance(node, (ast.FunctionDef, ast.AsyncFunctionDef, ast.ClassDef)): + if isinstance(node, ast.FunctionDef | ast.AsyncFunctionDef | ast.ClassDef): self.scope = tuple((*self.scope, node)) nodes.append(_SCOPE_END_MARKER) if node == _SCOPE_END_MARKER: @@ -1024,20 +1010,17 @@ def visit_BoolOp(self, boolop: ast.BoolOp) -> tuple[ast.Name, str]: # cond is set in a prior loop iteration below self.expl_stmts.append(ast.If(cond, fail_inner, [])) # noqa: F821 self.expl_stmts = fail_inner - # Check if the left operand is a ast.NamedExpr and the value has already been visited - if ( - isinstance(v, ast.Compare) - and isinstance(v.left, ast.NamedExpr) - and v.left.target.id - in [ - ast_expr.id - for ast_expr in boolop.values[:i] - if hasattr(ast_expr, "id") - ] - ): - pytest_temp = self.variable() - self.variables_overwrite[self.scope][v.left.target.id] = v.left # type:ignore[assignment] - v.left.target.id = pytest_temp + match v: + # Check if the left operand is an ast.NamedExpr and the value has already been visited + case ast.Compare( + left=ast.NamedExpr(target=ast.Name(id=target_id)) + ) if target_id in [ + e.id for e in boolop.values[:i] if hasattr(e, "id") + ]: + pytest_temp = self.variable() + self.variables_overwrite[self.scope][target_id] = v.left # type:ignore[assignment] + # mypy's false positive, we're checking that the 'target' attribute exists. + v.left.target.id = pytest_temp # type:ignore[attr-defined] self.push_format_context() res, expl = self.visit(v) body.append(ast.Assign([ast.Name(res_var, ast.Store())], res)) @@ -1087,10 +1070,11 @@ def visit_Call(self, call: ast.Call) -> tuple[ast.Name, str]: arg_expls.append(expl) new_args.append(res) for keyword in call.keywords: - if isinstance( - keyword.value, ast.Name - ) and keyword.value.id in self.variables_overwrite.get(self.scope, {}): - keyword.value = self.variables_overwrite[self.scope][keyword.value.id] # type:ignore[assignment] + match keyword.value: + case ast.Name(id=id) if id in self.variables_overwrite.get( + self.scope, {} + ): + keyword.value = self.variables_overwrite[self.scope][id] # type:ignore[assignment] res, expl = self.visit(keyword.value) new_kwargs.append(ast.keyword(keyword.arg, res)) if keyword.arg: @@ -1126,32 +1110,34 @@ def visit_Attribute(self, attr: ast.Attribute) -> tuple[ast.Name, str]: def visit_Compare(self, comp: ast.Compare) -> tuple[ast.expr, str]: self.push_format_context() # We first check if we have overwritten a variable in the previous assert - if isinstance( - comp.left, ast.Name - ) and comp.left.id in self.variables_overwrite.get(self.scope, {}): - comp.left = self.variables_overwrite[self.scope][comp.left.id] # type:ignore[assignment] - if isinstance(comp.left, ast.NamedExpr): - self.variables_overwrite[self.scope][comp.left.target.id] = comp.left # type:ignore[assignment] + match comp.left: + case ast.Name(id=name_id) if name_id in self.variables_overwrite.get( + self.scope, {} + ): + comp.left = self.variables_overwrite[self.scope][name_id] # type: ignore[assignment] + case ast.NamedExpr(target=ast.Name(id=target_id)): + self.variables_overwrite[self.scope][target_id] = comp.left # type: ignore[assignment] left_res, left_expl = self.visit(comp.left) - if isinstance(comp.left, (ast.Compare, ast.BoolOp)): + if isinstance(comp.left, ast.Compare | ast.BoolOp): left_expl = f"({left_expl})" res_variables = [self.variable() for i in range(len(comp.ops))] load_names: list[ast.expr] = [ast.Name(v, ast.Load()) for v in res_variables] store_names = [ast.Name(v, ast.Store()) for v in res_variables] - it = zip(range(len(comp.ops)), comp.ops, comp.comparators) + it = zip(range(len(comp.ops)), comp.ops, comp.comparators, strict=True) expls: list[ast.expr] = [] syms: list[ast.expr] = [] results = [left_res] for i, op, next_operand in it: - if ( - isinstance(next_operand, ast.NamedExpr) - and isinstance(left_res, ast.Name) - and next_operand.target.id == left_res.id - ): - next_operand.target.id = self.variable() - self.variables_overwrite[self.scope][left_res.id] = next_operand # type:ignore[assignment] + match (next_operand, left_res): + case ( + ast.NamedExpr(target=ast.Name(id=target_id)), + ast.Name(id=name_id), + ) if target_id == name_id: + next_operand.target.id = self.variable() + self.variables_overwrite[self.scope][name_id] = next_operand # type: ignore[assignment] + next_res, next_expl = self.visit(next_operand) - if isinstance(next_operand, (ast.Compare, ast.BoolOp)): + if isinstance(next_operand, ast.Compare | ast.BoolOp): next_expl = f"({next_expl})" results.append(next_res) sym = BINOP_MAP[op.__class__] diff --git a/src/_pytest/assertion/truncate.py b/src/_pytest/assertion/truncate.py index 4854a62ba6b..5820e6e8a80 100644 --- a/src/_pytest/assertion/truncate.py +++ b/src/_pytest/assertion/truncate.py @@ -6,7 +6,7 @@ from __future__ import annotations -from _pytest.assertion import util +from _pytest.compat import running_on_ci from _pytest.config import Config from _pytest.nodes import Item @@ -43,7 +43,7 @@ def _get_truncation_parameters(item: Item) -> tuple[bool, int, int]: verbose = item.config.get_verbosity(Config.VERBOSITY_ASSERTIONS) - should_truncate = verbose < 2 and not util.running_on_ci() + should_truncate = verbose < 2 and not running_on_ci() should_truncate = should_truncate and (max_lines > 0 or max_chars > 0) return should_truncate, max_lines, max_chars diff --git a/src/_pytest/assertion/util.py b/src/_pytest/assertion/util.py index c545e6cd20c..f35d83a6fe4 100644 --- a/src/_pytest/assertion/util.py +++ b/src/_pytest/assertion/util.py @@ -9,7 +9,6 @@ from collections.abc import Mapping from collections.abc import Sequence from collections.abc import Set as AbstractSet -import os import pprint from typing import Any from typing import Literal @@ -21,6 +20,7 @@ from _pytest._io.pprint import PrettyPrinter from _pytest._io.saferepr import saferepr from _pytest._io.saferepr import saferepr_unlimited +from _pytest.compat import running_on_ci from _pytest.config import Config @@ -131,7 +131,7 @@ def isdict(x: Any) -> bool: def isset(x: Any) -> bool: - return isinstance(x, (set, frozenset)) + return isinstance(x, set | frozenset) def isnamedtuple(obj: Any) -> bool: @@ -613,9 +613,3 @@ def _notin_text(term: str, text: str, verbose: int = 0) -> list[str]: else: newdiff.append(line) return newdiff - - -def running_on_ci() -> bool: - """Check if we're currently running on a CI system.""" - env_vars = ["CI", "BUILD_NUMBER"] - return any(var in os.environ for var in env_vars) diff --git a/src/_pytest/cacheprovider.py b/src/_pytest/cacheprovider.py old mode 100755 new mode 100644 index dea60109b51..4383f105af6 --- a/src/_pytest/cacheprovider.py +++ b/src/_pytest/cacheprovider.py @@ -256,7 +256,7 @@ def pytest_make_collect_report( self, collector: nodes.Collector ) -> Generator[None, CollectReport, CollectReport]: res = yield - if isinstance(collector, (Session, Directory)): + if isinstance(collector, Session | Directory): # Sort any lf-paths to the beginning. lf_paths = self.lfplugin._last_failed_paths @@ -476,6 +476,10 @@ def pytest_sessionfinish(self) -> None: def pytest_addoption(parser: Parser) -> None: + """Add command-line options for cache functionality. + + :param parser: Parser object to add command-line options to. + """ group = parser.getgroup("general") group.addoption( "--lf", @@ -546,6 +550,13 @@ def pytest_cmdline_main(config: Config) -> int | ExitCode | None: @hookimpl(tryfirst=True) def pytest_configure(config: Config) -> None: + """Configure cache system and register related plugins. + + Creates the Cache instance and registers the last-failed (LFPlugin) + and new-first (NFPlugin) plugins with the plugin manager. + + :param config: pytest configuration object. + """ config.cache = Cache.for_config(config, _ispytest=True) config.pluginmanager.register(LFPlugin(config), "lfplugin") config.pluginmanager.register(NFPlugin(config), "nfplugin") @@ -584,6 +595,16 @@ def pytest_report_header(config: Config) -> str | None: def cacheshow(config: Config, session: Session) -> int: + """Display cache contents when --cache-show is used. + + Shows cached values and directories matching the specified glob pattern + (default: '*'). Displays cache location, cached test results, and + any cached directories created by plugins. + + :param config: pytest configuration object. + :param session: pytest session object. + :returns: Exit code (0 for success). + """ from pprint import pformat assert config.cache is not None diff --git a/src/_pytest/capture.py b/src/_pytest/capture.py index 3812d88176a..6d98676be5f 100644 --- a/src/_pytest/capture.py +++ b/src/_pytest/capture.py @@ -1043,7 +1043,7 @@ def capteesys(request: SubRequest) -> Generator[CaptureFixture[str]]: .. code-block:: python - def test_output(capsys): + def test_output(capteesys): print("hello") captured = capteesys.readouterr() assert captured.out == "hello\n" diff --git a/src/_pytest/compat.py b/src/_pytest/compat.py index f113a2197f3..72c3d0918fb 100644 --- a/src/_pytest/compat.py +++ b/src/_pytest/compat.py @@ -1,5 +1,5 @@ # mypy: allow-untyped-defs -"""Python version compatibility code.""" +"""Python version compatibility code and random general utilities.""" from __future__ import annotations @@ -8,7 +8,7 @@ import functools import inspect from inspect import Parameter -from inspect import signature +from inspect import Signature import os from pathlib import Path import sys @@ -19,6 +19,10 @@ import py +if sys.version_info >= (3, 14): + from annotationlib import Format + + #: constant to prepare valuing pylib path replacements/lazy proxies later on # intended for removal in pytest 8.0 or 9.0 @@ -60,6 +64,13 @@ def is_async_function(func: object) -> bool: return iscoroutinefunction(func) or inspect.isasyncgenfunction(func) +def signature(obj: Callable[..., Any]) -> Signature: + """Return signature without evaluating annotations.""" + if sys.version_info >= (3, 14): + return inspect.signature(obj, annotation_format=Format.STRING) + return inspect.signature(obj) + + def getlocation(function, curdir: str | os.PathLike[str] | None = None) -> str: function = get_real_func(function) fn = Path(inspect.getfile(function)) @@ -267,36 +278,37 @@ def get_user_id() -> int | None: return uid if uid != ERROR else None -# Perform exhaustiveness checking. -# -# Consider this example: -# -# MyUnion = Union[int, str] -# -# def handle(x: MyUnion) -> int { -# if isinstance(x, int): -# return 1 -# elif isinstance(x, str): -# return 2 -# else: -# raise Exception('unreachable') -# -# Now suppose we add a new variant: -# -# MyUnion = Union[int, str, bytes] -# -# After doing this, we must remember ourselves to go and update the handle -# function to handle the new variant. -# -# With `assert_never` we can do better: -# -# // raise Exception('unreachable') -# return assert_never(x) -# -# Now, if we forget to handle the new variant, the type-checker will emit a -# compile-time error, instead of the runtime error we would have gotten -# previously. -# -# This also work for Enums (if you use `is` to compare) and Literals. -def assert_never(value: NoReturn) -> NoReturn: - assert False, f"Unhandled value: {value} ({type(value).__name__})" +if sys.version_info >= (3, 11): + from typing import assert_never +else: + + def assert_never(value: NoReturn) -> NoReturn: + assert False, f"Unhandled value: {value} ({type(value).__name__})" + + +class CallableBool: + """ + A bool-like object that can also be called, returning its true/false value. + + Used for backwards compatibility in cases where something was supposed to be a method + but was implemented as a simple attribute by mistake (see `TerminalReporter.isatty`). + + Do not use in new code. + """ + + def __init__(self, value: bool) -> None: + self._value = value + + def __bool__(self) -> bool: + return self._value + + def __call__(self) -> bool: + return self._value + + +def running_on_ci() -> bool: + """Check if we're currently running on a CI system.""" + # Only enable CI mode if one of these env variables is defined and non-empty. + # Note: review `regendoc` tox env in case this list is changed. + env_vars = ["CI", "BUILD_NUMBER"] + return any(os.environ.get(var) for var in env_vars) diff --git a/src/_pytest/config/__init__.py b/src/_pytest/config/__init__.py index 468018fadc0..a027dbc02a4 100644 --- a/src/_pytest/config/__init__.py +++ b/src/_pytest/config/__init__.py @@ -1,14 +1,17 @@ # mypy: allow-untyped-defs -"""Command line options, ini-file and conftest.py processing.""" +"""Command line options, config-file and conftest.py processing.""" from __future__ import annotations import argparse +import builtins import collections.abc from collections.abc import Callable from collections.abc import Generator from collections.abc import Iterable from collections.abc import Iterator +from collections.abc import Mapping +from collections.abc import MutableMapping from collections.abc import Sequence import contextlib import copy @@ -45,6 +48,7 @@ from .compat import PathAwareHookProxy from .exceptions import PrintHelp as PrintHelp from .exceptions import UsageError as UsageError +from .findpaths import ConfigValue from .findpaths import determine_setup from _pytest import __version__ import _pytest._code @@ -52,7 +56,9 @@ from _pytest._code import filter_traceback from _pytest._code.code import TracebackStyle from _pytest._io import TerminalWriter +from _pytest.compat import assert_never from _pytest.config.argparsing import Argument +from _pytest.config.argparsing import FILE_OR_DIR from _pytest.config.argparsing import Parser import _pytest.deprecated import _pytest.hookspec @@ -110,6 +116,8 @@ class ExitCode(enum.IntEnum): #: pytest couldn't find tests. NO_TESTS_COLLECTED = 5 + __module__ = "pytest" + class ConftestImportFailure(Exception): def __init__( @@ -136,6 +144,29 @@ def filter_traceback_for_conftest_import_failure( return filter_traceback(entry) and "importlib" not in str(entry.path).split(os.sep) +def print_conftest_import_error(e: ConftestImportFailure, file: TextIO) -> None: + exc_info = ExceptionInfo.from_exception(e.cause) + tw = TerminalWriter(file) + tw.line(f"ImportError while loading conftest '{e.path}'.", red=True) + exc_info.traceback = exc_info.traceback.filter( + filter_traceback_for_conftest_import_failure + ) + exc_repr = ( + exc_info.getrepr(style="short", chain=False) + if exc_info.traceback + else exc_info.exconly() + ) + formatted_tb = str(exc_repr) + for line in formatted_tb.splitlines(): + tw.line(line.rstrip(), red=True) + + +def print_usage_error(e: UsageError, file: TextIO) -> None: + tw = TerminalWriter(file) + for msg in e.args: + tw.line(f"ERROR: {msg}\n", red=True) + + def main( args: list[str] | os.PathLike[str] | None = None, plugins: Sequence[str | _PluggyPlugin] | None = None, @@ -149,40 +180,31 @@ def main( :returns: An exit code. """ + # Handle a single `--version` argument early to avoid starting up the entire pytest infrastructure. + new_args = sys.argv[1:] if args is None else args + if isinstance(new_args, Sequence) and new_args.count("--version") == 1: + sys.stdout.write(f"pytest {__version__}\n") + return ExitCode.OK + old_pytest_version = os.environ.get("PYTEST_VERSION") try: os.environ["PYTEST_VERSION"] = __version__ try: - config = _prepareconfig(args, plugins) + config = _prepareconfig(new_args, plugins) except ConftestImportFailure as e: - exc_info = ExceptionInfo.from_exception(e.cause) - tw = TerminalWriter(sys.stderr) - tw.line(f"ImportError while loading conftest '{e.path}'.", red=True) - exc_info.traceback = exc_info.traceback.filter( - filter_traceback_for_conftest_import_failure - ) - exc_repr = ( - exc_info.getrepr(style="short", chain=False) - if exc_info.traceback - else exc_info.exconly() - ) - formatted_tb = str(exc_repr) - for line in formatted_tb.splitlines(): - tw.line(line.rstrip(), red=True) + print_conftest_import_error(e, file=sys.stderr) return ExitCode.USAGE_ERROR - else: + + try: + ret: ExitCode | int = config.hook.pytest_cmdline_main(config=config) try: - ret: ExitCode | int = config.hook.pytest_cmdline_main(config=config) - try: - return ExitCode(ret) - except ValueError: - return ret - finally: - config._ensure_unconfigure() + return ExitCode(ret) + except ValueError: + return ret + finally: + config._ensure_unconfigure() except UsageError as e: - tw = TerminalWriter(sys.stderr) - for msg in e.args: - tw.line(f"ERROR: {msg}\n", red=True) + print_usage_error(e, file=sys.stderr) return ExitCode.USAGE_ERROR finally: if old_pytest_version is None: @@ -261,7 +283,6 @@ def directory_arg(path: str, optname: str) -> str: "junitxml", "doctest", "cacheprovider", - "freeze_support", "setuponly", "setupplan", "stepwise", @@ -271,33 +292,33 @@ def directory_arg(path: str, optname: str) -> str: "logging", "reports", "faulthandler", + "subtests", ) builtin_plugins = { *default_plugins, "pytester", "pytester_assertions", + "terminalprogress", } def get_config( - args: list[str] | None = None, + args: Iterable[str] | None = None, plugins: Sequence[str | _PluggyPlugin] | None = None, ) -> Config: - # subsequent calls to main will create a fresh instance + # Subsequent calls to main will create a fresh instance. pluginmanager = PytestPluginManager() - config = Config( - pluginmanager, - invocation_params=Config.InvocationParams( - args=args or (), - plugins=plugins, - dir=pathlib.Path.cwd(), - ), + invocation_params = Config.InvocationParams( + args=args or (), + plugins=plugins, + dir=pathlib.Path.cwd(), ) + config = Config(pluginmanager, invocation_params=invocation_params) - if args is not None: + if invocation_params.args: # Handle any "-p no:plugin" args. - pluginmanager.consider_preparse(args, exclude_only=True) + pluginmanager.consider_preparse(invocation_params.args, exclude_only=True) for spec in default_plugins: pluginmanager.import_plugin(spec) @@ -317,12 +338,10 @@ def get_plugin_manager() -> PytestPluginManager: def _prepareconfig( - args: list[str] | os.PathLike[str] | None = None, + args: list[str] | os.PathLike[str], plugins: Sequence[str | _PluggyPlugin] | None = None, ) -> Config: - if args is None: - args = sys.argv[1:] - elif isinstance(args, os.PathLike): + if isinstance(args, os.PathLike): args = [os.fspath(args)] elif not isinstance(args, list): msg = ( # type:ignore[unreachable] @@ -330,8 +349,8 @@ def _prepareconfig( ) raise TypeError(msg.format(args, type(args))) - config = get_config(args, plugins) - pluginmanager = config.pluginmanager + initial_config = get_config(args, plugins) + pluginmanager = initial_config.pluginmanager try: if plugins: for plugin in plugins: @@ -339,12 +358,12 @@ def _prepareconfig( pluginmanager.consider_pluginarg(plugin) else: pluginmanager.register(plugin) - config = pluginmanager.hook.pytest_cmdline_parse( + config: Config = pluginmanager.hook.pytest_cmdline_parse( pluginmanager=pluginmanager, args=args ) return config except BaseException: - config._ensure_unconfigure() + initial_config._ensure_unconfigure() raise @@ -795,6 +814,12 @@ def consider_pluginarg(self, arg: str) -> None: if name in essential_plugins: raise UsageError(f"plugin {name} cannot be disabled") + if name.endswith("conftest.py"): + raise UsageError( + f"Blocking conftest files using -p is not supported: -p no:{name}\n" + "conftest.py files are not plugins and cannot be disabled via -p.\n" + ) + # PR #4304: remove stepwise if cacheprovider is blocked. if name == "cacheprovider": self.set_blocked("stepwise") @@ -964,6 +989,30 @@ def _iter_rewritable_modules(package_files: Iterable[str]) -> Iterator[str]: yield from _iter_rewritable_modules(new_package_files) +class _DeprecatedInicfgProxy(MutableMapping[str, Any]): + """Compatibility proxy for the deprecated Config.inicfg.""" + + __slots__ = ("_config",) + + def __init__(self, config: Config) -> None: + self._config = config + + def __getitem__(self, key: str) -> Any: + return self._config._inicfg[key].value + + def __setitem__(self, key: str, value: Any) -> None: + self._config._inicfg[key] = ConfigValue(value, origin="override", mode="toml") + + def __delitem__(self, key: str) -> None: + del self._config._inicfg[key] + + def __iter__(self) -> Iterator[str]: + return iter(self._config._inicfg) + + def __len__(self) -> int: + return len(self._config._inicfg) + + @final class Config: """Access to configuration values, pluginmanager and plugin hooks. @@ -988,7 +1037,7 @@ class InvocationParams: .. note:: Note that the environment variable ``PYTEST_ADDOPTS`` and the ``addopts`` - ini option are handled by pytest, not being included in the ``args`` attribute. + configuration option are handled by pytest, not being included in the ``args`` attribute. Plugins accessing ``InvocationParams`` must be aware of that. """ @@ -998,7 +1047,7 @@ class InvocationParams: plugins: Sequence[str | _PluggyPlugin] | None """Extra plugins, might be `None`.""" dir: pathlib.Path - """The directory from which :func:`pytest.main` was invoked. :type: pathlib.Path""" + """The directory from which :func:`pytest.main` was invoked.""" def __init__( self, @@ -1034,9 +1083,6 @@ def __init__( *, invocation_params: InvocationParams | None = None, ) -> None: - from .argparsing import FILE_OR_DIR - from .argparsing import Parser - if invocation_params is None: invocation_params = self.InvocationParams( args=(), plugins=None, dir=pathlib.Path.cwd() @@ -1054,9 +1100,8 @@ def __init__( :type: InvocationParams """ - _a = FILE_OR_DIR self._parser = Parser( - usage=f"%(prog)s [options] [{_a}] [{_a}] [...]", + usage=f"%(prog)s [options] [{FILE_OR_DIR}] [{FILE_OR_DIR}] [...]", processopt=self._processopt, _ispytest=True, ) @@ -1078,7 +1123,6 @@ def __init__( self.trace = self.pluginmanager.trace.root.get("config") self.hook: pluggy.HookRelay = PathAwareHookProxy(self.pluginmanager.hook) # type: ignore[assignment] self._inicache: dict[str, Any] = {} - self._override_ini: Sequence[str] = () self._opt2dest: dict[str, str] = {} self._cleanup_stack = contextlib.ExitStack() self.pluginmanager.register(self, "pytestconfig") @@ -1089,12 +1133,14 @@ def __init__( self.args_source = Config.ArgsSource.ARGS self.args: list[str] = [] + @property + def inicfg(self) -> _DeprecatedInicfgProxy: + return _DeprecatedInicfgProxy(self) + @property def rootpath(self) -> pathlib.Path: """The path to the :ref:`rootdir `. - :type: pathlib.Path - .. versionadded:: 6.1 """ return self._rootpath @@ -1145,17 +1191,19 @@ def pytest_cmdline_parse( try: self.parse(args) except UsageError: - # Handle --version and --help here in a minimal fashion. + # Handle `--version --version` and `--help` here in a minimal fashion. # This gets done via helpconfig normally, but its # pytest_cmdline_main is not called in case of errors. if getattr(self.option, "version", False) or "--version" in args: - from _pytest.helpconfig import showversion + from _pytest.helpconfig import show_version_verbose - showversion(self) + # Note that `--version` (single argument) is handled early by `Config.main()`, so the only + # way we are reaching this point is via `--version --version`. + show_version_verbose(self) elif ( getattr(self.option, "help", False) or "--help" in args or "-h" in args ): - self._parser._getparser().print_help() + self._parser.optparser.print_help() sys.stdout.write( "\nNOTE: displaying only minimal help due to UsageError.\n\n" ) @@ -1194,7 +1242,7 @@ def cwd_relative_nodeid(self, nodeid: str) -> str: return nodeid @classmethod - def fromdictargs(cls, option_dict, args) -> Config: + def fromdictargs(cls, option_dict: Mapping[str, Any], args: list[str]) -> Config: """Constructor usable for subprocesses.""" config = get_config(args) config.option.__dict__.update(option_dict) @@ -1217,7 +1265,7 @@ def pytest_load_initial_conftests(self, early_config: Config) -> None: # early_config.args it not set yet. But we need it for # discovering the initial conftests. So "pre-run" the logic here. # It will be done for real in `parse()`. - args, args_source = early_config._decide_args( + args, _args_source = early_config._decide_args( args=early_config.known_args_namespace.file_or_dir, pyargs=early_config.known_args_namespace.pyargs, testpaths=early_config.getini("testpaths"), @@ -1238,47 +1286,18 @@ def pytest_load_initial_conftests(self, early_config: Config) -> None: ), ) - def _initini(self, args: Sequence[str]) -> None: - ns, unknown_args = self._parser.parse_known_and_unknown_args( - args, namespace=copy.copy(self.option) - ) - rootpath, inipath, inicfg = determine_setup( - inifile=ns.inifilename, - args=ns.file_or_dir + unknown_args, - rootdir_cmd_arg=ns.rootdir or None, - invocation_dir=self.invocation_params.dir, - ) - self._rootpath = rootpath - self._inipath = inipath - self.inicfg = inicfg - self._parser.extra_info["rootdir"] = str(self.rootpath) - self._parser.extra_info["inifile"] = str(self.inipath) - self._parser.addini("addopts", "Extra command line options", "args") - self._parser.addini("minversion", "Minimally required pytest version") - self._parser.addini( - "pythonpath", type="paths", help="Add paths to sys.path", default=[] - ) - self._parser.addini( - "required_plugins", - "Plugins that must be present for pytest to run", - type="args", - default=[], - ) - self._override_ini = ns.override_ini or () - - def _consider_importhook(self, args: Sequence[str]) -> None: + def _consider_importhook(self) -> None: """Install the PEP 302 import hook if using assertion rewriting. Needs to parse the --assert= option from the commandline and find all the installed plugins to mark them for rewriting by the importhook. """ - ns, unknown_args = self._parser.parse_known_and_unknown_args(args) - mode = getattr(ns, "assertmode", "plain") + mode = getattr(self.known_args_namespace, "assertmode", "plain") - disable_autoload = getattr(ns, "disable_plugin_autoload", False) or bool( - os.environ.get("PYTEST_DISABLE_PLUGIN_AUTOLOAD") - ) + disable_autoload = getattr( + self.known_args_namespace, "disable_plugin_autoload", False + ) or bool(os.environ.get("PYTEST_DISABLE_PLUGIN_AUTOLOAD")) if mode == "rewrite": import _pytest.assertion @@ -1327,13 +1346,13 @@ def _unconfigure_python_path(self) -> None: def _validate_args(self, args: list[str], via: str) -> list[str]: """Validate known args.""" - self._parser._config_source_hint = via # type: ignore + self._parser.extra_info["config source"] = via try: self._parser.parse_known_and_unknown_args( args, namespace=copy.copy(self.option) ) finally: - del self._parser._config_source_hint # type: ignore + self._parser.extra_info.pop("config source", None) return args @@ -1382,70 +1401,10 @@ def _decide_args( result = [str(invocation_dir)] return result, source - def _preparse(self, args: list[str], addopts: bool = True) -> None: - if addopts: - env_addopts = os.environ.get("PYTEST_ADDOPTS", "") - if len(env_addopts): - args[:] = ( - self._validate_args(shlex.split(env_addopts), "via PYTEST_ADDOPTS") - + args - ) - self._initini(args) - if addopts: - args[:] = ( - self._validate_args(self.getini("addopts"), "via addopts config") + args - ) - - self.known_args_namespace = self._parser.parse_known_args( - args, namespace=copy.copy(self.option) - ) - self._checkversion() - self._consider_importhook(args) - self._configure_python_path() - self.pluginmanager.consider_preparse(args, exclude_only=False) - if ( - not os.environ.get("PYTEST_DISABLE_PLUGIN_AUTOLOAD") - and not self.known_args_namespace.disable_plugin_autoload - ): - # Autoloading from distribution package entry point has - # not been disabled. - self.pluginmanager.load_setuptools_entrypoints("pytest11") - # Otherwise only plugins explicitly specified in PYTEST_PLUGINS - # are going to be loaded. - self.pluginmanager.consider_env() - - self.known_args_namespace = self._parser.parse_known_args( - args, namespace=copy.copy(self.known_args_namespace) - ) - - self._validate_plugins() - self._warn_about_skipped_plugins() - - if self.known_args_namespace.confcutdir is None: - if self.inipath is not None: - confcutdir = str(self.inipath.parent) - else: - confcutdir = str(self.rootpath) - self.known_args_namespace.confcutdir = confcutdir - try: - self.hook.pytest_load_initial_conftests( - early_config=self, args=args, parser=self._parser - ) - except ConftestImportFailure as e: - if self.known_args_namespace.help or self.known_args_namespace.version: - # we don't want to prevent --help/--version to work - # so just let it pass and print a warning at the end - self.issue_config_time_warning( - PytestConfigWarning(f"could not load initial conftests: {e.path}"), - stacklevel=2, - ) - else: - raise - @hookimpl(wrapper=True) def pytest_collection(self) -> Generator[None, object, object]: - # Validate invalid ini keys after collection is done so we take in account - # options added by late-loading conftest files. + # Validate invalid configuration keys after collection is done so we + # take in account options added by late-loading conftest files. try: return (yield) finally: @@ -1454,7 +1413,8 @@ def pytest_collection(self) -> Generator[None, object, object]: def _checkversion(self) -> None: import pytest - minver = self.inicfg.get("minversion", None) + minver_ini_value = self._inicfg.get("minversion", None) + minver = minver_ini_value.value if minver_ini_value is not None else None if minver: # Imported lazily to improve start-up time. from packaging.version import Version @@ -1507,39 +1467,125 @@ def _validate_plugins(self) -> None: ) def _warn_or_fail_if_strict(self, message: str) -> None: - if self.known_args_namespace.strict_config: + strict_config = self.getini("strict_config") + if strict_config is None: + strict_config = self.getini("strict") + if strict_config: raise UsageError(message) self.issue_config_time_warning(PytestConfigWarning(message), stacklevel=3) - def _get_unknown_ini_keys(self) -> list[str]: - parser_inicfg = self._parser._inidict - return [name for name in self.inicfg if name not in parser_inicfg] + def _get_unknown_ini_keys(self) -> set[str]: + known_keys = self._parser._inidict.keys() | self._parser._ini_aliases.keys() + return self._inicfg.keys() - known_keys def parse(self, args: list[str], addopts: bool = True) -> None: # Parse given cmdline arguments into this config object. assert self.args == [], ( "can only parse cmdline args at most once per Config object" ) + self.hook.pytest_addhooks.call_historic( kwargs=dict(pluginmanager=self.pluginmanager) ) - self._preparse(args, addopts=addopts) - self._parser.after_preparse = True # type: ignore - try: - args = self._parser.parse_setoption( - args, self.option, namespace=self.option + + if addopts: + env_addopts = os.environ.get("PYTEST_ADDOPTS", "") + if len(env_addopts): + args[:] = ( + self._validate_args(shlex.split(env_addopts), "via PYTEST_ADDOPTS") + + args + ) + + ns = self._parser.parse_known_args(args, namespace=copy.copy(self.option)) + rootpath, inipath, inicfg, ignored_config_files = determine_setup( + inifile=ns.inifilename, + override_ini=ns.override_ini, + args=ns.file_or_dir, + rootdir_cmd_arg=ns.rootdir or None, + invocation_dir=self.invocation_params.dir, + ) + self._rootpath = rootpath + self._inipath = inipath + self._ignored_config_files = ignored_config_files + self._inicfg = inicfg + self._parser.extra_info["rootdir"] = str(self.rootpath) + self._parser.extra_info["inifile"] = str(self.inipath) + + self._parser.addini("addopts", "Extra command line options", "args") + self._parser.addini("minversion", "Minimally required pytest version") + self._parser.addini( + "pythonpath", type="paths", help="Add paths to sys.path", default=[] + ) + self._parser.addini( + "required_plugins", + "Plugins that must be present for pytest to run", + type="args", + default=[], + ) + + if addopts: + args[:] = ( + self._validate_args(self.getini("addopts"), "via addopts config") + args ) - self.args, self.args_source = self._decide_args( - args=args, - pyargs=self.known_args_namespace.pyargs, - testpaths=self.getini("testpaths"), - invocation_dir=self.invocation_params.dir, - rootpath=self.rootpath, - warn=True, + + self.known_args_namespace = self._parser.parse_known_args( + args, namespace=copy.copy(self.option) + ) + self._checkversion() + self._consider_importhook() + self._configure_python_path() + self.pluginmanager.consider_preparse(args, exclude_only=False) + if ( + not os.environ.get("PYTEST_DISABLE_PLUGIN_AUTOLOAD") + and not self.known_args_namespace.disable_plugin_autoload + ): + # Autoloading from distribution package entry point has + # not been disabled. + self.pluginmanager.load_setuptools_entrypoints("pytest11") + # Otherwise only plugins explicitly specified in PYTEST_PLUGINS + # are going to be loaded. + self.pluginmanager.consider_env() + + self._parser.parse_known_args(args, namespace=self.known_args_namespace) + + self._validate_plugins() + self._warn_about_skipped_plugins() + + if self.known_args_namespace.confcutdir is None: + if self.inipath is not None: + confcutdir = str(self.inipath.parent) + else: + confcutdir = str(self.rootpath) + self.known_args_namespace.confcutdir = confcutdir + try: + self.hook.pytest_load_initial_conftests( + early_config=self, args=args, parser=self._parser ) + except ConftestImportFailure as e: + if self.known_args_namespace.help or self.known_args_namespace.version: + # we don't want to prevent --help/--version to work + # so just let it pass and print a warning at the end + self.issue_config_time_warning( + PytestConfigWarning(f"could not load initial conftests: {e.path}"), + stacklevel=2, + ) + else: + raise + + try: + self._parser.parse(args, namespace=self.option) except PrintHelp: - pass + return + + self.args, self.args_source = self._decide_args( + args=getattr(self.option, FILE_OR_DIR), + pyargs=self.option.pyargs, + testpaths=self.getini("testpaths"), + invocation_dir=self.invocation_params.dir, + rootpath=self.rootpath, + warn=True, + ) def issue_config_time_warning(self, warning: Warning, stacklevel: int) -> None: """Issue and handle a warning during the "configure" stage. @@ -1577,7 +1623,7 @@ def issue_config_time_warning(self, warning: Warning, stacklevel: int) -> None: ) def addinivalue_line(self, name: str, line: str) -> None: - """Add a line to an ini-file option. The option must have been + """Add a line to a configuration option. The option must have been declared but might not yet be set in which case the line becomes the first line in its value.""" x = self.getini(name) @@ -1585,11 +1631,11 @@ def addinivalue_line(self, name: str, line: str) -> None: x.append(line) # modifies the cached list inline def getini(self, name: str) -> Any: - """Return configuration value from an :ref:`ini file `. + """Return configuration value the an :ref:`configuration file `. - If a configuration value is not defined in an - :ref:`ini file `, then the ``default`` value provided while - registering the configuration through + If a configuration value is not defined in a + :ref:`configuration file `, then the ``default`` value + provided while registering the configuration through :func:`parser.addini ` will be returned. Please note that you can even provide ``None`` as a valid default value. @@ -1614,11 +1660,13 @@ def getini(self, name: str) -> Any: :func:`parser.addini ` call (usually from a plugin), a ValueError is raised. """ + canonical_name = self._parser._ini_aliases.get(name, name) try: - return self._inicache[name] + return self._inicache[canonical_name] except KeyError: - self._inicache[name] = val = self._getini(name) - return val + pass + self._inicache[canonical_name] = val = self._getini(canonical_name) + return val # Meant for easy monkeypatching by legacypath plugin. # Can be inlined back (with no cover removed) once legacypath is gone. @@ -1629,33 +1677,69 @@ def _getini_unknown_type(self, name: str, type: str, value: object): raise ValueError(msg) # pragma: no cover def _getini(self, name: str): + # If this is an alias, resolve to canonical name. + canonical_name = self._parser._ini_aliases.get(name, name) + try: - description, type, default = self._parser._inidict[name] + _description, type, default = self._parser._inidict[canonical_name] except KeyError as e: raise ValueError(f"unknown configuration value: {name!r}") from e - override_value = self._get_override_ini_value(name) - if override_value is None: - try: - value = self.inicfg[name] - except KeyError: - return default + + # Collect all possible values (canonical name + aliases) from _inicfg. + # Each candidate is (ConfigValue, is_canonical). + candidates = [] + if canonical_name in self._inicfg: + candidates.append((self._inicfg[canonical_name], True)) + for alias, target in self._parser._ini_aliases.items(): + if target == canonical_name and alias in self._inicfg: + candidates.append((self._inicfg[alias], False)) + + if not candidates: + return default + + # Pick the best candidate based on precedence: + # 1. CLI override takes precedence over file, then + # 2. Canonical name takes precedence over alias. + selected = max(candidates, key=lambda x: (x[0].origin == "override", x[1]))[0] + value = selected.value + mode = selected.mode + + if mode == "ini": + # In ini mode, values are always str | list[str]. + assert isinstance(value, (str, list)) + return self._getini_ini(name, canonical_name, type, value, default) + elif mode == "toml": + return self._getini_toml(name, canonical_name, type, value, default) else: - value = override_value - # Coerce the values based on types. - # - # Note: some coercions are only required if we are reading from .ini files, because - # the file format doesn't contain type information, but when reading from toml we will - # get either str or list of str values (see _parse_ini_config_from_pyproject_toml). - # For example: + assert_never(mode) + + def _getini_ini( + self, + name: str, + canonical_name: str, + type: str, + value: str | list[str], + default: Any, + ): + """Handle config values read in INI mode. + + In INI mode, values are stored as str or list[str] only, and coerced + from string based on the registered type. + """ + # Note: some coercions are only required if we are reading from .ini + # files, because the file format doesn't contain type information, but + # when reading from toml (in ini mode) we will get either str or list of + # str values (see load_config_dict_from_file). For example: # # ini: # a_line_list = "tests acceptance" - # in this case, we need to split the string to obtain a list of strings. # - # toml: + # in this case, we need to split the string to obtain a list of strings. + # + # toml (ini mode): # a_line_list = ["tests", "acceptance"] - # in this case, we already have a list ready to use. # + # in this case, we already have a list ready to use. if type == "paths": dp = ( self.inipath.parent @@ -1687,7 +1771,89 @@ def _getini(self, name: str): f"Expected a float string for option {name} of type float, but got: {value!r}" ) from None return float(value) - elif type is None: + else: + return self._getini_unknown_type(name, type, value) + + def _getini_toml( + self, + name: str, + canonical_name: str, + type: str, + value: object, + default: Any, + ): + """Handle TOML config values with strict type validation and no coercion. + + In TOML mode, values already have native types from TOML parsing. + We validate types match expectations exactly, including list items. + """ + value_type = builtins.type(value).__name__ + if type == "paths": + # Expect a list of strings. + if not isinstance(value, list): + raise TypeError( + f"{self.inipath}: config option '{name}' expects a list for type 'paths', " + f"got {value_type}: {value!r}" + ) + for i, item in enumerate(value): + if not isinstance(item, str): + item_type = builtins.type(item).__name__ + raise TypeError( + f"{self.inipath}: config option '{name}' expects a list of strings, " + f"but item at index {i} is {item_type}: {item!r}" + ) + dp = ( + self.inipath.parent + if self.inipath is not None + else self.invocation_params.dir + ) + return [dp / x for x in value] + elif type in {"args", "linelist"}: + # Expect a list of strings. + if not isinstance(value, list): + raise TypeError( + f"{self.inipath}: config option '{name}' expects a list for type '{type}', " + f"got {value_type}: {value!r}" + ) + for i, item in enumerate(value): + if not isinstance(item, str): + item_type = builtins.type(item).__name__ + raise TypeError( + f"{self.inipath}: config option '{name}' expects a list of strings, " + f"but item at index {i} is {item_type}: {item!r}" + ) + return list(value) + elif type == "bool": + # Expect a boolean. + if not isinstance(value, bool): + raise TypeError( + f"{self.inipath}: config option '{name}' expects a bool, " + f"got {value_type}: {value!r}" + ) + return value + elif type == "int": + # Expect an integer (but not bool, which is a subclass of int). + if not isinstance(value, int) or isinstance(value, bool): + raise TypeError( + f"{self.inipath}: config option '{name}' expects an int, " + f"got {value_type}: {value!r}" + ) + return value + elif type == "float": + # Expect a float or integer only. + if not isinstance(value, (float, int)) or isinstance(value, bool): + raise TypeError( + f"{self.inipath}: config option '{name}' expects a float, " + f"got {value_type}: {value!r}" + ) + return value + elif type == "string": + # Expect a string. + if not isinstance(value, str): + raise TypeError( + f"{self.inipath}: config option '{name}' expects a string, " + f"got {value_type}: {value!r}" + ) return value else: return self._getini_unknown_type(name, type, value) @@ -1711,23 +1877,6 @@ def _getconftest_pathlist( values.append(relroot) return values - def _get_override_ini_value(self, name: str) -> str | None: - value = None - # override_ini is a list of "ini=value" options. - # Always use the last item if multiple values are set for same ini-name, - # e.g. -o foo=bar1 -o foo=bar2 will set foo to bar2. - for ini_config in self._override_ini: - try: - key, user_ini_value = ini_config.split("=", 1) - except ValueError as e: - raise UsageError( - f"-o/--override-ini expects option=value style (got: {ini_config!r})." - ) from e - else: - if key == name: - value = user_ini_value - return value - def getoption(self, name: str, default: Any = notset, skip: bool = False): """Return command line option value. @@ -1765,6 +1914,9 @@ def getvalueorskip(self, name: str, path=None): VERBOSITY_ASSERTIONS: Final = "assertions" #: Verbosity type for test case execution (see :confval:`verbosity_test_cases`). VERBOSITY_TEST_CASES: Final = "test_cases" + #: Verbosity type for failed subtests (see :confval:`verbosity_subtests`). + VERBOSITY_SUBTESTS: Final = "subtests" + _VERBOSITY_INI_DEFAULT: Final = "auto" def get_verbosity(self, verbosity_type: str | None = None) -> int: @@ -1783,11 +1935,19 @@ def get_verbosity(self, verbosity_type: str | None = None) -> int: Example: - .. code-block:: ini + .. tab:: toml + + .. code-block:: toml - # content of pytest.ini - [pytest] - verbosity_assertions = 2 + [tool.pytest] + verbosity_assertions = 2 + + .. tab:: ini + + .. code-block:: ini + + [pytest] + verbosity_assertions = 2 .. code-block:: console @@ -1821,7 +1981,7 @@ def _verbosity_ini_name(verbosity_type: str) -> str: def _add_verbosity_ini(parser: Parser, verbosity_type: str, help: str) -> None: """Add a output verbosity configuration option for the given output type. - :param parser: Parser for command line arguments and ini-file values. + :param parser: Parser for command line arguments and config-file values. :param verbosity_type: Fine-grained verbosity category. :param help: Description of the output this type controls. @@ -1965,6 +2125,8 @@ def parse_warning_filter( raise UsageError(error_template.format(error=str(e))) from None try: category: type[Warning] = _resolve_warning_category(category_) + except ImportError: + raise except Exception: exc_info = ExceptionInfo.from_current() exception_text = exc_info.getrepr(style="native") @@ -2023,7 +2185,19 @@ def apply_warning_filters( # Filters should have this precedence: cmdline options, config. # Filters should be applied in the inverse order of precedence. for arg in config_filters: - warnings.filterwarnings(*parse_warning_filter(arg, escape=False)) + try: + warnings.filterwarnings(*parse_warning_filter(arg, escape=False)) + except ImportError as e: + warnings.warn( + f"Failed to import filter module '{e.name}': {arg}", PytestConfigWarning + ) + continue for arg in cmdline_filters: - warnings.filterwarnings(*parse_warning_filter(arg, escape=True)) + try: + warnings.filterwarnings(*parse_warning_filter(arg, escape=True)) + except ImportError as e: + warnings.warn( + f"Failed to import filter module '{e.name}': {arg}", PytestConfigWarning + ) + continue diff --git a/src/_pytest/config/argparsing.py b/src/_pytest/config/argparsing.py index 948dfe8a510..8216ad8b226 100644 --- a/src/_pytest/config/argparsing.py +++ b/src/_pytest/config/argparsing.py @@ -6,14 +6,14 @@ from collections.abc import Mapping from collections.abc import Sequence import os +import sys from typing import Any -from typing import cast from typing import final from typing import Literal from typing import NoReturn +from .exceptions import UsageError import _pytest._io -from _pytest.config.exceptions import UsageError from _pytest.deprecated import check_ispytest @@ -30,14 +30,12 @@ def __repr__(self) -> str: @final class Parser: - """Parser for command line arguments and ini-file values. + """Parser for command line arguments and config-file values. :ivar extra_info: Dict of generic param -> value to display in case there's an error processing the command line arguments. """ - prog: str | None = None - def __init__( self, usage: str | None = None, @@ -46,13 +44,31 @@ def __init__( _ispytest: bool = False, ) -> None: check_ispytest(_ispytest) - self._anonymous = OptionGroup("Custom options", parser=self, _ispytest=True) - self._groups: list[OptionGroup] = [] + + from _pytest._argcomplete import filescompleter + self._processopt = processopt - self._usage = usage - self._inidict: dict[str, tuple[str, str | None, Any]] = {} - self._ininames: list[str] = [] self.extra_info: dict[str, Any] = {} + self.optparser = PytestArgumentParser(self, usage, self.extra_info) + anonymous_arggroup = self.optparser.add_argument_group("Custom options") + self._anonymous = OptionGroup( + anonymous_arggroup, "_anonymous", self, _ispytest=True + ) + self._groups = [self._anonymous] + file_or_dir_arg = self.optparser.add_argument(FILE_OR_DIR, nargs="*") + file_or_dir_arg.completer = filescompleter # type: ignore + + self._inidict: dict[str, tuple[str, str, Any]] = {} + # Maps alias -> canonical name. + self._ini_aliases: dict[str, str] = {} + + @property + def prog(self) -> str: + return self.optparser.prog + + @prog.setter + def prog(self, value: str) -> None: + self.optparser.prog = value def processoption(self, option: Argument) -> None: if self._processopt: @@ -77,12 +93,17 @@ def getgroup( for group in self._groups: if group.name == name: return group - group = OptionGroup(name, description, parser=self, _ispytest=True) + + arggroup = self.optparser.add_argument_group(description or name) + group = OptionGroup(arggroup, name, self, _ispytest=True) i = 0 for i, grp in enumerate(self._groups): if grp.name == after: break self._groups.insert(i + 1, group) + # argparse doesn't provide a way to control `--help` order, so must + # access its internals ☹. + self.optparser._action_groups.insert(i + 1, self.optparser._action_groups.pop()) return group def addoption(self, *opts: str, **attrs: Any) -> None: @@ -106,42 +127,24 @@ def parse( args: Sequence[str | os.PathLike[str]], namespace: argparse.Namespace | None = None, ) -> argparse.Namespace: + """Parse the arguments. + + Unlike ``parse_known_args`` and ``parse_known_and_unknown_args``, + raises PrintHelp on `--help` and UsageError on unknown flags + + :meta private: + """ from _pytest._argcomplete import try_argcomplete - self.optparser = self._getparser() try_argcomplete(self.optparser) strargs = [os.fspath(x) for x in args] - return self.optparser.parse_args(strargs, namespace=namespace) - - def _getparser(self) -> MyOptionParser: - from _pytest._argcomplete import filescompleter - - optparser = MyOptionParser(self, self.extra_info, prog=self.prog) - groups = [*self._groups, self._anonymous] - for group in groups: - if group.options: - desc = group.description or group.name - arggroup = optparser.add_argument_group(desc) - for option in group.options: - n = option.names() - a = option.attrs() - arggroup.add_argument(*n, **a) - file_or_dir_arg = optparser.add_argument(FILE_OR_DIR, nargs="*") - # bash like autocompletion for dirs (appending '/') - # Type ignored because typeshed doesn't know about argcomplete. - file_or_dir_arg.completer = filescompleter # type: ignore - return optparser - - def parse_setoption( - self, - args: Sequence[str | os.PathLike[str]], - option: argparse.Namespace, - namespace: argparse.Namespace | None = None, - ) -> list[str]: - parsedoption = self.parse(args, namespace=namespace) - for name, value in parsedoption.__dict__.items(): - setattr(option, name, value) - return cast(list[str], getattr(parsedoption, FILE_OR_DIR)) + if namespace is None: + namespace = argparse.Namespace() + try: + namespace._raise_print_help = True + return self.optparser.parse_intermixed_args(strargs, namespace=namespace) + finally: + del namespace._raise_print_help def parse_known_args( self, @@ -160,30 +163,43 @@ def parse_known_and_unknown_args( namespace: argparse.Namespace | None = None, ) -> tuple[argparse.Namespace, list[str]]: """Parse the known arguments at this point, and also return the - remaining unknown arguments. + remaining unknown flag arguments. :returns: A tuple containing an argparse namespace object for the known - arguments, and a list of the unknown arguments. + arguments, and a list of unknown flag arguments. """ - optparser = self._getparser() strargs = [os.fspath(x) for x in args] - return optparser.parse_known_args(strargs, namespace=namespace) + if sys.version_info < (3, 12, 8) or (3, 13) <= sys.version_info < (3, 13, 1): + # Older argparse have a bugged parse_known_intermixed_args. + namespace, unknown = self.optparser.parse_known_args(strargs, namespace) + assert namespace is not None + file_or_dir = getattr(namespace, FILE_OR_DIR) + unknown_flags: list[str] = [] + for arg in unknown: + (unknown_flags if arg.startswith("-") else file_or_dir).append(arg) + return namespace, unknown_flags + else: + return self.optparser.parse_known_intermixed_args(strargs, namespace) def addini( self, name: str, help: str, - type: Literal["string", "paths", "pathlist", "args", "linelist", "bool"] + type: Literal[ + "string", "paths", "pathlist", "args", "linelist", "bool", "int", "float" + ] | None = None, default: Any = NOT_SET, + *, + aliases: Sequence[str] = (), ) -> None: - """Register an ini-file option. + """Register a configuration file option. :param name: - Name of the ini-variable. + Name of the configuration. :param type: - Type of the variable. Can be: + Type of the configuration. Can be: * ``string``: a string * ``bool``: a boolean @@ -198,21 +214,27 @@ def addini( The ``float`` and ``int`` types. - For ``paths`` and ``pathlist`` types, they are considered relative to the ini-file. - In case the execution is happening without an ini-file defined, + For ``paths`` and ``pathlist`` types, they are considered relative to the config-file. + In case the execution is happening without a config-file defined, they will be considered relative to the current working directory (for example with ``--override-ini``). .. versionadded:: 7.0 The ``paths`` variable type. .. versionadded:: 8.1 - Use the current working directory to resolve ``paths`` and ``pathlist`` in the absence of an ini-file. + Use the current working directory to resolve ``paths`` and ``pathlist`` in the absence of a config-file. Defaults to ``string`` if ``None`` or not passed. :param default: - Default value if no ini-file option exists but is queried. + Default value if no config-file option exists but is queried. + :param aliases: + Additional names by which this option can be referenced. + Aliases resolve to the canonical name. - The value of ini-variables can be retrieved via a call to + .. versionadded:: 9.0 + The ``aliases`` parameter. + + The value of configuration keys can be retrieved via a call to :py:func:`config.getini(name) `. """ assert type in ( @@ -226,26 +248,33 @@ def addini( "int", "float", ) + if type is None: + type = "string" if default is NOT_SET: default = get_ini_default_for_type(type) self._inidict[name] = (help, type, default) - self._ininames.append(name) + + for alias in aliases: + if alias in self._inidict: + raise ValueError( + f"alias {alias!r} conflicts with existing configuration option" + ) + if (already := self._ini_aliases.get(alias)) is not None: + raise ValueError(f"{alias!r} is already an alias of {already!r}") + self._ini_aliases[alias] = name def get_ini_default_for_type( type: Literal[ "string", "paths", "pathlist", "args", "linelist", "bool", "int", "float" - ] - | None, + ], ) -> Any: """ - Used by addini to get the default value for a given ini-option type, when + Used by addini to get the default value for a given config option type, when default is not supplied. """ - if type is None: - return "" - elif type in ("paths", "pathlist", "args", "linelist"): + if type in ("paths", "pathlist", "args", "linelist"): return [] elif type == "bool": return False @@ -313,9 +342,7 @@ def names(self) -> list[str]: def attrs(self) -> Mapping[str, Any]: # Update any attributes set by processopt. - attrs = "default dest help".split() - attrs.append(self.dest) - for attr in attrs: + for attr in ("default", "dest", "help", self.dest): try: self._attrs[attr] = getattr(self, attr) except AttributeError: @@ -370,15 +397,14 @@ class OptionGroup: def __init__( self, + arggroup: argparse._ArgumentGroup, name: str, - description: str = "", - parser: Parser | None = None, - *, + parser: Parser | None, _ispytest: bool = False, ) -> None: check_ispytest(_ispytest) + self._arggroup = arggroup self.name = name - self.description = description self.options: list[Argument] = [] self.parser = parser @@ -413,22 +439,24 @@ def _addoption_instance(self, option: Argument, shortupper: bool = False) -> Non for opt in option._short_opts: if opt[0] == "-" and opt[1].islower(): raise ValueError("lowercase shortoptions reserved") + if self.parser: self.parser.processoption(option) + + self._arggroup.add_argument(*option.names(), **option.attrs()) self.options.append(option) -class MyOptionParser(argparse.ArgumentParser): +class PytestArgumentParser(argparse.ArgumentParser): def __init__( self, parser: Parser, - extra_info: dict[str, Any] | None = None, - prog: str | None = None, + usage: str | None, + extra_info: dict[str, str], ) -> None: self._parser = parser super().__init__( - prog=prog, - usage=parser._usage, + usage=usage, add_help=False, formatter_class=DropShorterLongHelpFormatter, allow_abbrev=False, @@ -436,37 +464,17 @@ def __init__( ) # extra_info is a dict of (param -> value) to display if there's # an usage error to provide more contextual information to the user. - self.extra_info = extra_info if extra_info else {} + self.extra_info = extra_info def error(self, message: str) -> NoReturn: """Transform argparse error message into UsageError.""" msg = f"{self.prog}: error: {message}" - - if hasattr(self._parser, "_config_source_hint"): - msg = f"{msg} ({self._parser._config_source_hint})" - + if self.extra_info: + msg += "\n" + "\n".join( + f" {k}: {v}" for k, v in sorted(self.extra_info.items()) + ) raise UsageError(self.format_usage() + msg) - # Type ignored because typeshed has a very complex type in the superclass. - def parse_args( # type: ignore - self, - args: Sequence[str] | None = None, - namespace: argparse.Namespace | None = None, - ) -> argparse.Namespace: - """Allow splitting of positional arguments.""" - parsed, unrecognized = self.parse_known_args(args, namespace) - if unrecognized: - for arg in unrecognized: - if arg and arg[0] == "-": - lines = [ - "unrecognized arguments: {}".format(" ".join(unrecognized)) - ] - for k, v in sorted(self.extra_info.items()): - lines.append(f" {k}: {v}") - self.error("\n".join(lines)) - getattr(parsed, FILE_OR_DIR).extend(unrecognized) - return parsed - class DropShorterLongHelpFormatter(argparse.HelpFormatter): """Shorten help for long options that differ only in extra hyphens. @@ -531,3 +539,40 @@ def _split_lines(self, text, width): for line in text.splitlines(): lines.extend(textwrap.wrap(line.strip(), width)) return lines + + +class OverrideIniAction(argparse.Action): + """Custom argparse action that makes a CLI flag equivalent to overriding an + option, in addition to behaving like `store_true`. + + This can simplify things since code only needs to inspect the config option + and not consider the CLI flag. + """ + + def __init__( + self, + option_strings: Sequence[str], + dest: str, + nargs: int | str | None = None, + *args, + ini_option: str, + ini_value: str, + **kwargs, + ) -> None: + super().__init__(option_strings, dest, 0, *args, **kwargs) + self.ini_option = ini_option + self.ini_value = ini_value + + def __call__( + self, + parser: argparse.ArgumentParser, + namespace: argparse.Namespace, + *args, + **kwargs, + ) -> None: + setattr(namespace, self.dest, True) + current_overrides = getattr(namespace, "override_ini", None) + if current_overrides is None: + current_overrides = [] + current_overrides.append(f"{self.ini_option}={self.ini_value}") + setattr(namespace, "override_ini", current_overrides) diff --git a/src/_pytest/config/exceptions.py b/src/_pytest/config/exceptions.py index 90108eca904..d84a9ea67e0 100644 --- a/src/_pytest/config/exceptions.py +++ b/src/_pytest/config/exceptions.py @@ -7,6 +7,8 @@ class UsageError(Exception): """Error in pytest usage or invocation.""" + __module__ = "pytest" + class PrintHelp(Exception): """Raised when pytest should print its help to skip the rest of the diff --git a/src/_pytest/config/findpaths.py b/src/_pytest/config/findpaths.py index 15bfbb0613e..3c628a09c2d 100644 --- a/src/_pytest/config/findpaths.py +++ b/src/_pytest/config/findpaths.py @@ -2,10 +2,13 @@ from collections.abc import Iterable from collections.abc import Sequence +from dataclasses import dataclass +from dataclasses import KW_ONLY import os from pathlib import Path import sys -from typing import TYPE_CHECKING +from typing import Literal +from typing import TypeAlias import iniconfig @@ -16,14 +19,28 @@ from _pytest.pathlib import safe_exists -if TYPE_CHECKING: - from typing import Union +@dataclass(frozen=True) +class ConfigValue: + """Represents a configuration value with its origin and parsing mode. - from typing_extensions import TypeAlias + This allows tracking whether a value came from a configuration file + or from a CLI override (--override-ini), which is important for + determining precedence when dealing with ini option aliases. + + The mode tracks the parsing mode/data model used for the value: + - "ini": from INI files or [tool.pytest.ini_options], where the only + supported value types are `str` or `list[str]`. + - "toml": from TOML files (not in INI mode), where native TOML types + are preserved. + """ + + value: object + _: KW_ONLY + origin: Literal["file", "override"] + mode: Literal["ini", "toml"] - # Even though TOML supports richer data types, all values are converted to str/list[str] during - # parsing to maintain compatibility with the rest of the configuration system. - ConfigDict: TypeAlias = dict[str, Union[str, list[str]]] + +ConfigDict: TypeAlias = dict[str, ConfigValue] def _parse_ini_config(path: Path) -> iniconfig.IniConfig: @@ -50,10 +67,13 @@ def load_config_dict_from_file( iniconfig = _parse_ini_config(filepath) if "pytest" in iniconfig: - return dict(iniconfig["pytest"].items()) + return { + k: ConfigValue(v, origin="file", mode="ini") + for k, v in iniconfig["pytest"].items() + } else: # "pytest.ini" files are always the source of configuration, even if empty. - if filepath.name == "pytest.ini": + if filepath.name in {"pytest.ini", ".pytest.ini"}: return {} # '.cfg' files are considered if they contain a "[tool:pytest]" section. @@ -61,13 +81,18 @@ def load_config_dict_from_file( iniconfig = _parse_ini_config(filepath) if "tool:pytest" in iniconfig.sections: - return dict(iniconfig["tool:pytest"].items()) + return { + k: ConfigValue(v, origin="file", mode="ini") + for k, v in iniconfig["tool:pytest"].items() + } elif "pytest" in iniconfig.sections: # If a setup.cfg contains a "[pytest]" section, we raise a failure to indicate users that # plain "[pytest]" sections in setup.cfg files is no longer supported (#3086). fail(CFG_PYTEST_SECTION.format(filename="setup.cfg"), pytrace=False) - # '.toml' files are considered if they contain a [tool.pytest.ini_options] table. + # '.toml' files are considered if they contain a [tool.pytest] table (toml mode) + # or [tool.pytest.ini_options] table (ini mode) for pyproject.toml, + # or [pytest] table (toml mode) for pytest.toml/.pytest.toml. elif filepath.suffix == ".toml": if sys.version_info >= (3, 11): import tomllib @@ -80,15 +105,52 @@ def load_config_dict_from_file( except tomllib.TOMLDecodeError as exc: raise UsageError(f"{filepath}: {exc}") from exc - result = config.get("tool", {}).get("pytest", {}).get("ini_options", None) - if result is not None: - # TOML supports richer data types than ini files (strings, arrays, floats, ints, etc), - # however we need to convert all scalar values to str for compatibility with the rest - # of the configuration system, which expects strings only. - def make_scalar(v: object) -> str | list[str]: - return v if isinstance(v, list) else str(v) - - return {k: make_scalar(v) for k, v in result.items()} + # pytest.toml and .pytest.toml use [pytest] table directly. + if filepath.name in ("pytest.toml", ".pytest.toml"): + pytest_config = config.get("pytest", {}) + if pytest_config: + # TOML mode - preserve native TOML types. + return { + k: ConfigValue(v, origin="file", mode="toml") + for k, v in pytest_config.items() + } + # "pytest.toml" files are always the source of configuration, even if empty. + return {} + + # pyproject.toml uses [tool.pytest] or [tool.pytest.ini_options]. + else: + tool_pytest = config.get("tool", {}).get("pytest", {}) + + # Check for toml mode config: [tool.pytest] with content outside of ini_options. + toml_config = {k: v for k, v in tool_pytest.items() if k != "ini_options"} + # Check for ini mode config: [tool.pytest.ini_options]. + ini_config = tool_pytest.get("ini_options", None) + + if toml_config and ini_config: + raise UsageError( + f"{filepath}: Cannot use both [tool.pytest] (native TOML types) and " + "[tool.pytest.ini_options] (string-based INI format) simultaneously. " + "Please use [tool.pytest] with native TOML types (recommended) " + "or [tool.pytest.ini_options] for backwards compatibility." + ) + + if toml_config: + # TOML mode - preserve native TOML types. + return { + k: ConfigValue(v, origin="file", mode="toml") + for k, v in toml_config.items() + } + + elif ini_config is not None: + # INI mode - TOML supports richer data types than INI files, but we need to + # convert all scalar values to str for compatibility with the INI system. + def make_scalar(v: object) -> str | list[str]: + return v if isinstance(v, list) else str(v) + + return { + k: ConfigValue(make_scalar(v), origin="file", mode="ini") + for k, v in ini_config.items() + } return None @@ -96,10 +158,14 @@ def make_scalar(v: object) -> str | list[str]: def locate_config( invocation_dir: Path, args: Iterable[Path], -) -> tuple[Path | None, Path | None, ConfigDict]: +) -> tuple[Path | None, Path | None, ConfigDict, Sequence[str]]: """Search in the list of arguments for a valid ini-file for pytest, - and return a tuple of (rootdir, inifile, cfg-dict).""" + and return a tuple of (rootdir, inifile, cfg-dict, ignored-config-files), where + ignored-config-files is a list of config basenames found that contain + pytest configuration but were ignored.""" config_names = [ + "pytest.toml", + ".pytest.toml", "pytest.ini", ".pytest.ini", "pyproject.toml", @@ -110,6 +176,8 @@ def locate_config( if not args: args = [invocation_dir] found_pyproject_toml: Path | None = None + ignored_config_files: list[str] = [] + for arg in args: argpath = absolutepath(arg) for base in (argpath, *argpath.parents): @@ -120,10 +188,18 @@ def locate_config( found_pyproject_toml = p ini_config = load_config_dict_from_file(p) if ini_config is not None: - return base, p, ini_config + index = config_names.index(config_name) + for remainder in config_names[index + 1 :]: + p2 = base / remainder + if ( + p2.is_file() + and load_config_dict_from_file(p2) is not None + ): + ignored_config_files.append(remainder) + return base, p, ini_config, ignored_config_files if found_pyproject_toml is not None: - return found_pyproject_toml.parent, found_pyproject_toml, {} - return None, None, {} + return found_pyproject_toml.parent, found_pyproject_toml, {}, [] + return None, None, {}, [] def get_common_ancestor( @@ -174,30 +250,59 @@ def get_dir_from_path(path: Path) -> Path: return [get_dir_from_path(path) for path in possible_paths if safe_exists(path)] +def parse_override_ini(override_ini: Sequence[str] | None) -> ConfigDict: + """Parse the -o/--override-ini command line arguments and return the overrides. + + :raises UsageError: + If one of the values is malformed. + """ + overrides = {} + # override_ini is a list of "ini=value" options. + # Always use the last item if multiple values are set for same ini-name, + # e.g. -o foo=bar1 -o foo=bar2 will set foo to bar2. + for ini_config in override_ini or (): + try: + key, user_ini_value = ini_config.split("=", 1) + except ValueError as e: + raise UsageError( + f"-o/--override-ini expects option=value style (got: {ini_config!r})." + ) from e + else: + overrides[key] = ConfigValue(user_ini_value, origin="override", mode="ini") + return overrides + + CFG_PYTEST_SECTION = "[pytest] section in {filename} files is no longer supported, change to [tool:pytest] instead." def determine_setup( *, inifile: str | None, + override_ini: Sequence[str] | None, args: Sequence[str], rootdir_cmd_arg: str | None, invocation_dir: Path, -) -> tuple[Path, Path | None, ConfigDict]: +) -> tuple[Path, Path | None, ConfigDict, Sequence[str]]: """Determine the rootdir, inifile and ini configuration values from the command line arguments. :param inifile: The `--inifile` command line argument, if given. + :param override_ini: + The -o/--override-ini command line arguments, if given. :param args: The free command line arguments. :param rootdir_cmd_arg: The `--rootdir` command line argument, if given. :param invocation_dir: The working directory when pytest was invoked. + + :raises UsageError: """ rootdir = None dirs = get_dirs_from_args(args) + ignored_config_files: Sequence[str] = [] + if inifile: inipath_ = absolutepath(inifile) inipath: Path | None = inipath_ @@ -206,7 +311,9 @@ def determine_setup( rootdir = inipath_.parent else: ancestor = get_common_ancestor(invocation_dir, dirs) - rootdir, inipath, inicfg = locate_config(invocation_dir, [ancestor]) + rootdir, inipath, inicfg, ignored_config_files = locate_config( + invocation_dir, [ancestor] + ) if rootdir is None and rootdir_cmd_arg is None: for possible_rootdir in (ancestor, *ancestor.parents): if (possible_rootdir / "setup.py").is_file(): @@ -214,7 +321,7 @@ def determine_setup( break else: if dirs != [ancestor]: - rootdir, inipath, inicfg = locate_config(invocation_dir, dirs) + rootdir, inipath, inicfg, _ = locate_config(invocation_dir, dirs) if rootdir is None: rootdir = get_common_ancestor( invocation_dir, [invocation_dir, ancestor] @@ -227,8 +334,12 @@ def determine_setup( raise UsageError( f"Directory '{rootdir}' not found. Check your '--rootdir' option." ) + + ini_overrides = parse_override_ini(override_ini) + inicfg.update(ini_overrides) + assert rootdir is not None - return rootdir, inipath, inicfg or {} + return rootdir, inipath, inicfg, ignored_config_files def is_fs_root(p: Path) -> bool: diff --git a/src/_pytest/deprecated.py b/src/_pytest/deprecated.py index a605c24e58f..cb5d2e93e93 100644 --- a/src/_pytest/deprecated.py +++ b/src/_pytest/deprecated.py @@ -15,6 +15,7 @@ from _pytest.warning_types import PytestDeprecationWarning from _pytest.warning_types import PytestRemovedIn9Warning +from _pytest.warning_types import PytestRemovedIn10Warning from _pytest.warning_types import UnformattedWarning @@ -24,11 +25,11 @@ "pytest_catchlog", "pytest_capturelog", "pytest_faulthandler", + "pytest_subtests", } -# This can be* removed pytest 8, but it's harmless and common, so no rush to remove. -# * If you're in the future: "could have been". +# This could have been removed pytest 8, but it's harmless and common, so no rush to remove. YIELD_FIXTURE = PytestDeprecationWarning( "@pytest.yield_fixture is deprecated.\n" "Use @pytest.fixture instead; they are the same." @@ -67,6 +68,13 @@ "See docs: https://docs.pytest.org/en/stable/deprecations.html#applying-a-mark-to-a-fixture-function" ) +MONKEYPATCH_LEGACY_NAMESPACE_PACKAGES = PytestRemovedIn10Warning( + "monkeypatch.syspath_prepend() called with pkg_resources legacy namespace packages detected.\n" + "Legacy namespace packages (using pkg_resources.declare_namespace) are deprecated.\n" + "Please use native namespace packages (PEP 420) instead.\n" + "See https://docs.pytest.org/en/stable/deprecations.html#monkeypatch-fixup-namespace-packages" +) + # You want to make some `__init__` or function "private". # # def my_private_function(some, args): diff --git a/src/_pytest/doctest.py b/src/_pytest/doctest.py index 0dbef6056d7..cd255f5eeb6 100644 --- a/src/_pytest/doctest.py +++ b/src/_pytest/doctest.py @@ -324,7 +324,7 @@ def repr_failure( # type: ignore[override] Sequence[doctest.DocTestFailure | doctest.UnexpectedException] | None ) = None if isinstance( - excinfo.value, (doctest.DocTestFailure, doctest.UnexpectedException) + excinfo.value, doctest.DocTestFailure | doctest.UnexpectedException ): failures = [excinfo.value] elif isinstance(excinfo.value, MultipleDoctestFailures): @@ -530,24 +530,6 @@ def _find_lineno(self, obj, source_lines): source_lines, ) - if sys.version_info < (3, 10): - - def _find( - self, tests, obj, name, module, source_lines, globs, seen - ) -> None: - """Override _find to work around issue in stdlib. - - https://github.com/pytest-dev/pytest/issues/3456 - https://github.com/python/cpython/issues/69718 - """ - if _is_mocked(obj): - return # pragma: no cover - with _patch_unwrap_mock_aware(): - # Type ignored because this is a private function. - super()._find( # type:ignore[misc] - tests, obj, name, module, source_lines, globs, seen - ) - if sys.version_info < (3, 13): def _from_module(self, module, object): @@ -657,7 +639,7 @@ def _remove_unwanted_precision(self, want: str, got: str) -> str: if len(wants) != len(gots): return got offset = 0 - for w, g in zip(wants, gots): + for w, g in zip(wants, gots, strict=True): fraction: str | None = w.group("fraction") exponent: str | None = w.group("exponent1") if exponent is None: diff --git a/src/_pytest/faulthandler.py b/src/_pytest/faulthandler.py index 79efc1d1704..080cf583813 100644 --- a/src/_pytest/faulthandler.py +++ b/src/_pytest/faulthandler.py @@ -16,11 +16,18 @@ def pytest_addoption(parser: Parser) -> None: - help = ( + help_timeout = ( "Dump the traceback of all threads if a test takes " "more than TIMEOUT seconds to finish" ) - parser.addini("faulthandler_timeout", help, default=0.0) + help_exit_on_timeout = ( + "Exit the test process if a test takes more than " + "faulthandler_timeout seconds to finish" + ) + parser.addini("faulthandler_timeout", help_timeout, default=0.0) + parser.addini( + "faulthandler_exit_on_timeout", help_exit_on_timeout, type="bool", default=False + ) def pytest_configure(config: Config) -> None: @@ -72,14 +79,21 @@ def get_timeout_config_value(config: Config) -> float: return float(config.getini("faulthandler_timeout") or 0.0) +def get_exit_on_timeout_config_value(config: Config) -> bool: + exit_on_timeout = config.getini("faulthandler_exit_on_timeout") + assert isinstance(exit_on_timeout, bool) + return exit_on_timeout + + @pytest.hookimpl(wrapper=True, trylast=True) def pytest_runtest_protocol(item: Item) -> Generator[None, object, object]: timeout = get_timeout_config_value(item.config) + exit_on_timeout = get_exit_on_timeout_config_value(item.config) if timeout > 0: import faulthandler stderr = item.config.stash[fault_handler_stderr_fd_key] - faulthandler.dump_traceback_later(timeout, file=stderr) + faulthandler.dump_traceback_later(timeout, file=stderr, exit=exit_on_timeout) try: return (yield) finally: diff --git a/src/_pytest/fixtures.py b/src/_pytest/fixtures.py index 92a301e79db..27846db13a4 100644 --- a/src/_pytest/fixtures.py +++ b/src/_pytest/fixtures.py @@ -26,11 +26,9 @@ from typing import final from typing import Generic from typing import NoReturn -from typing import Optional from typing import overload from typing import TYPE_CHECKING from typing import TypeVar -from typing import Union import warnings import _pytest @@ -49,6 +47,7 @@ from _pytest.compat import NotSetType from _pytest.compat import safe_getattr from _pytest.compat import safe_isclass +from _pytest.compat import signature from _pytest.config import _PluggyPlugin from _pytest.config import Config from _pytest.config import ExitCode @@ -83,36 +82,28 @@ # The value of the fixture -- return/yield of the fixture function (type variable). -FixtureValue = TypeVar("FixtureValue") +FixtureValue = TypeVar("FixtureValue", covariant=True) # The type of the fixture function (type variable). FixtureFunction = TypeVar("FixtureFunction", bound=Callable[..., object]) # The type of a fixture function (type alias generic in fixture value). -_FixtureFunc = Union[ - Callable[..., FixtureValue], Callable[..., Generator[FixtureValue]] -] +_FixtureFunc = Callable[..., FixtureValue] | Callable[..., Generator[FixtureValue]] # The type of FixtureDef.cached_result (type alias generic in fixture value). -_FixtureCachedResult = Union[ +_FixtureCachedResult = ( tuple[ # The result. FixtureValue, # Cache key. object, None, - ], - tuple[ + ] + | tuple[ None, # Cache key. object, # The exception and the original traceback. - tuple[BaseException, Optional[types.TracebackType]], - ], -] - - -@dataclasses.dataclass(frozen=True) -class PseudoFixtureDef(Generic[FixtureValue]): - cached_result: _FixtureCachedResult[FixtureValue] - _scope: Scope + tuple[BaseException, types.TracebackType | None], + ] +) def pytest_sessionstart(session: Session) -> None: @@ -423,7 +414,7 @@ def scope(self) -> _ScopeName: @abc.abstractmethod def _check_scope( self, - requested_fixturedef: FixtureDef[object] | PseudoFixtureDef[object], + requested_fixturedef: FixtureDef[object], requested_scope: Scope, ) -> None: raise NotImplementedError() @@ -562,12 +553,9 @@ def _iter_chain(self) -> Iterator[SubRequest]: yield current current = current._parent_request - def _get_active_fixturedef( - self, argname: str - ) -> FixtureDef[object] | PseudoFixtureDef[object]: + def _get_active_fixturedef(self, argname: str) -> FixtureDef[object]: if argname == "request": - cached_result = (self, [0], None) - return PseudoFixtureDef(cached_result, Scope.Function) + return RequestFixtureDef(self) # If we already finished computing a fixture by this name in this item, # return it. @@ -699,7 +687,7 @@ def _scope(self) -> Scope: def _check_scope( self, - requested_fixturedef: FixtureDef[object] | PseudoFixtureDef[object], + requested_fixturedef: FixtureDef[object], requested_scope: Scope, ) -> None: # TopRequest always has function scope so always valid. @@ -778,11 +766,9 @@ def node(self): def _check_scope( self, - requested_fixturedef: FixtureDef[object] | PseudoFixtureDef[object], + requested_fixturedef: FixtureDef[object], requested_scope: Scope, ) -> None: - if isinstance(requested_fixturedef, PseudoFixtureDef): - return if self._scope > requested_scope: # Try to report something helpful. argname = requested_fixturedef.argname @@ -804,8 +790,8 @@ def _format_fixturedef_line(self, fixturedef: FixtureDef[object]) -> str: path, lineno = getfslineno(factory) if isinstance(path, Path): path = bestrelpath(self._pyfuncitem.session.path, path) - signature = inspect.signature(factory) - return f"{path}:{lineno + 1}: def {factory.__name__}{signature}" + sig = signature(factory) + return f"{path}:{lineno + 1}: def {factory.__name__}{sig}" def addfinalizer(self, finalizer: Callable[[], object]) -> None: self._fixturedef.addfinalizer(finalizer) @@ -971,7 +957,6 @@ def _eval_scope_callable( return result -@final class FixtureDef(Generic[FixtureValue]): """A container for a fixture definition. @@ -1086,8 +1071,7 @@ def execute(self, request: SubRequest) -> FixtureValue: # down first. This is generally handled by SetupState, but still currently # needed when this fixture is not parametrized but depends on a parametrized # fixture. - if not isinstance(fixturedef, PseudoFixtureDef): - requested_fixtures_that_should_finalize_us.append(fixturedef) + requested_fixtures_that_should_finalize_us.append(fixturedef) # Check for (and return) cached value/exception. if self.cached_result is not None: @@ -1106,8 +1090,7 @@ def execute(self, request: SubRequest) -> FixtureValue: exc, exc_tb = self.cached_result[2] raise exc.with_traceback(exc_tb) else: - result = self.cached_result[0] - return result + return self.cached_result[0] # We have a previous but differently parametrized fixture instance # so we need to tear it down before creating a new one. self.finish(request) @@ -1123,10 +1106,12 @@ def execute(self, request: SubRequest) -> FixtureValue: ihook = request.node.ihook try: # Setup the fixture, run the code in it, and cache the value - # in self.cached_result - result = ihook.pytest_fixture_setup(fixturedef=self, request=request) + # in self.cached_result. + result: FixtureValue = ihook.pytest_fixture_setup( + fixturedef=self, request=request + ) finally: - # schedule our finalizer, even if the setup failed + # Schedule our finalizer, even if the setup failed. request.node.addfinalizer(finalizer) return result @@ -1138,6 +1123,28 @@ def __repr__(self) -> str: return f"" +class RequestFixtureDef(FixtureDef[FixtureRequest]): + """A custom FixtureDef for the special "request" fixture. + + A new one is generated on-demand whenever "request" is requested. + """ + + def __init__(self, request: FixtureRequest) -> None: + super().__init__( + config=request.config, + baseid=None, + argname="request", + func=lambda: request, + scope=Scope.Function, + params=None, + _ispytest=True, + ) + self.cached_result = (request, [0], None) + + def addfinalizer(self, finalizer: Callable[[], object]) -> None: + pass + + def resolve_fixture_function( fixturedef: FixtureDef[FixtureValue], request: FixtureRequest ) -> _FixtureFunc[FixtureValue]: @@ -1513,7 +1520,7 @@ class FixtureManager: relevant for a particular function. An initial list of fixtures is assembled like this: - - ini-defined usefixtures + - config-defined usefixtures - autouse-marked fixtures along the collection chain up from the function - usefixtures markers at module/class/function level - test function funcargs @@ -1636,20 +1643,44 @@ def getfixtureclosure( fixturenames_closure = list(initialnames) arg2fixturedefs: dict[str, Sequence[FixtureDef[Any]]] = {} - lastlen = -1 - while lastlen != len(fixturenames_closure): - lastlen = len(fixturenames_closure) - for argname in fixturenames_closure: - if argname in ignore_args: - continue - if argname in arg2fixturedefs: - continue + + # Track the index for each fixture name in the simulated stack. + # Needed for handling override chains correctly, similar to _get_active_fixturedef. + # Using negative indices: -1 is the most specific (last), -2 is second to last, etc. + current_indices: dict[str, int] = {} + + def process_argname(argname: str) -> None: + # Optimization: already processed this argname. + if current_indices.get(argname) == -1: + return + + if argname not in fixturenames_closure: + fixturenames_closure.append(argname) + + if argname in ignore_args: + return + + fixturedefs = arg2fixturedefs.get(argname) + if not fixturedefs: fixturedefs = self.getfixturedefs(argname, parentnode) - if fixturedefs: - arg2fixturedefs[argname] = fixturedefs - for arg in fixturedefs[-1].argnames: - if arg not in fixturenames_closure: - fixturenames_closure.append(arg) + if not fixturedefs: + # Fixture not defined or not visible (will error during runtest). + return + arg2fixturedefs[argname] = fixturedefs + + index = current_indices.get(argname, -1) + if -index > len(fixturedefs): + # Exhausted the override chain (will error during runtest). + return + fixturedef = fixturedefs[index] + + current_indices[argname] = index - 1 + for dep in fixturedef.argnames: + process_argname(dep) + current_indices[argname] = index + + for name in initialnames: + process_argname(name) def sort_by_scope(arg_name: str) -> Scope: try: diff --git a/src/_pytest/helpconfig.py b/src/_pytest/helpconfig.py index b5ac0e6a50c..6a22c9f58ac 100644 --- a/src/_pytest/helpconfig.py +++ b/src/_pytest/helpconfig.py @@ -3,10 +3,12 @@ from __future__ import annotations -from argparse import Action +import argparse from collections.abc import Generator +from collections.abc import Sequence import os import sys +from typing import Any from _pytest.config import Config from _pytest.config import ExitCode @@ -16,31 +18,41 @@ import pytest -class HelpAction(Action): - """An argparse Action that will raise an exception in order to skip the - rest of the argument parsing when --help is passed. +class HelpAction(argparse.Action): + """An argparse Action that will raise a PrintHelp exception in order to skip + the rest of the argument parsing when --help is passed. - This prevents argparse from quitting due to missing required arguments - when any are defined, for example by ``pytest_addoption``. - This is similar to the way that the builtin argparse --help option is - implemented by raising SystemExit. + This prevents argparse from raising UsageError when `--help` is used along + with missing required arguments when any are defined, for example by + ``pytest_addoption``. This is similar to the way that the builtin argparse + --help option is implemented by raising SystemExit. + + To opt in to this behavior, the parse caller must set + `namespace._raise_print_help = True`. Otherwise it just sets the option. """ - def __init__(self, option_strings, dest=None, default=False, help=None): + def __init__( + self, option_strings: Sequence[str], dest: str, *, help: str | None = None + ) -> None: super().__init__( option_strings=option_strings, dest=dest, - const=True, - default=default, nargs=0, + const=True, + default=False, help=help, ) - def __call__(self, parser, namespace, values, option_string=None): + def __call__( + self, + parser: argparse.ArgumentParser, + namespace: argparse.Namespace, + values: str | Sequence[Any] | None, + option_string: str | None = None, + ) -> None: setattr(namespace, self.dest, self.const) - # We should only skip the rest of the parsing after preparse is done. - if getattr(parser._parser, "after_preparse", False): + if getattr(namespace, "_raise_print_help", False): raise PrintHelp @@ -102,8 +114,8 @@ def pytest_addoption(parser: Parser) -> None: "--override-ini", dest="override_ini", action="append", - help='Override ini option with "option=value" style, ' - "e.g. `-o xfail_strict=True -o cache_dir=cache`.", + help='Override configuration option with "option=value" style, ' + "e.g. `-o strict_xfail=True -o cache_dir=cache`.", ) @@ -140,28 +152,28 @@ def unset_tracing() -> None: return config -def showversion(config: Config) -> None: - if config.option.version > 1: - sys.stdout.write( - f"This is pytest version {pytest.__version__}, imported from {pytest.__file__}\n" - ) - plugininfo = getpluginversioninfo(config) - if plugininfo: - for line in plugininfo: - sys.stdout.write(line + "\n") - else: - sys.stdout.write(f"pytest {pytest.__version__}\n") +def show_version_verbose(config: Config) -> None: + """Show verbose pytest version installation, including plugins.""" + sys.stdout.write( + f"This is pytest version {pytest.__version__}, imported from {pytest.__file__}\n" + ) + plugininfo = getpluginversioninfo(config) + if plugininfo: + for line in plugininfo: + sys.stdout.write(line + "\n") def pytest_cmdline_main(config: Config) -> int | ExitCode | None: - if config.option.version > 0: - showversion(config) - return 0 + # Note: a single `--version` argument is handled directly by `Config.main()` to avoid starting up the entire + # pytest infrastructure just to display the version (#13574). + if config.option.version > 1: + show_version_verbose(config) + return ExitCode.OK elif config.option.help: config._do_configure() showhelp(config) config._ensure_unconfigure() - return 0 + return ExitCode.OK return None @@ -176,18 +188,16 @@ def showhelp(config: Config) -> None: tw.write(config._parser.optparser.format_help()) tw.line() tw.line( - "[pytest] ini-options in the first " - "pytest.ini|tox.ini|setup.cfg|pyproject.toml file found:" + "[pytest] configuration options in the first " + "pytest.toml|pytest.ini|tox.ini|setup.cfg|pyproject.toml file found:" ) tw.line() columns = tw.fullwidth # costly call indent_len = 24 # based on argparse's max_help_position=24 indent = " " * indent_len - for name in config._parser._ininames: - help, type, default = config._parser._inidict[name] - if type is None: - type = "string" + for name in config._parser._inidict: + help, type, _default = config._parser._inidict[name] if help is None: raise TypeError(f"help argument cannot be None for {name}") spec = f"{name} ({type}):" @@ -221,7 +231,7 @@ def showhelp(config: Config) -> None: vars = [ ( "CI", - "When set (regardless of value), pytest knows it is running in a " + "When set to a non-empty value, pytest knows it is running in a " "CI process and does not truncate summary info", ), ("BUILD_NUMBER", "Equivalent to CI"), @@ -229,6 +239,9 @@ def showhelp(config: Config) -> None: ("PYTEST_PLUGINS", "Comma-separated plugins to load during startup"), ("PYTEST_DISABLE_PLUGIN_AUTOLOAD", "Set to disable plugin auto-loading"), ("PYTEST_DEBUG", "Set to enable debug tracing of pytest's internals"), + ("PYTEST_DEBUG_TEMPROOT", "Override the system temporary directory"), + ("PYTEST_THEME", "The Pygments style to use for code output"), + ("PYTEST_THEME_MODE", "Set the PYTEST_THEME to be either 'dark' or 'light'"), ] for name, help in vars: tw.line(f" {name:<24} {help}") @@ -247,9 +260,6 @@ def showhelp(config: Config) -> None: tw.line("warning : " + warningreport.message, red=True) -conftest_options = [("pytest_plugins", "list of plugin names to load")] - - def getpluginversioninfo(config: Config) -> list[str]: lines = [] plugininfo = config.pluginmanager.list_plugin_distinfo() diff --git a/src/_pytest/hookspec.py b/src/_pytest/hookspec.py index 12653ea11fe..dab3fb698a2 100644 --- a/src/_pytest/hookspec.py +++ b/src/_pytest/hookspec.py @@ -98,13 +98,13 @@ def pytest_plugin_registered( @hookspec(historic=True) def pytest_addoption(parser: Parser, pluginmanager: PytestPluginManager) -> None: - """Register argparse-style options and ini-style config values, + """Register argparse-style options and config-style config values, called once at the beginning of a test run. :param parser: To add command line options, call :py:func:`parser.addoption(...) `. - To add ini-file values call :py:func:`parser.addini(...) + To add config-file values call :py:func:`parser.addini(...) `. :param pluginmanager: @@ -119,7 +119,7 @@ def pytest_addoption(parser: Parser, pluginmanager: PytestPluginManager) -> None retrieve the value of a command line option. - :py:func:`config.getini(name) ` to retrieve - a value read from an ini-style file. + a value read from a configuration file. The config object is passed around on many internal objects via the ``.config`` attribute or can be retrieved as the ``pytestconfig`` fixture. @@ -251,8 +251,8 @@ def pytest_collection(session: Session) -> object | None: 1. ``pytest_deselected(items)`` for any deselected items (may be called multiple times) - 3. ``pytest_collection_finish(session)`` - 4. Set ``session.items`` to the list of collected items + 3. Set ``session.items`` to the list of collected items + 4. ``pytest_collection_finish(session)`` 5. Set ``session.testscollected`` to the number of collected items You can implement this hook to only perform some action before collection, @@ -998,13 +998,22 @@ def pytest_assertion_pass(item: Item, lineno: int, orig: str, expl: str) -> None and the pytest introspected assertion information is available in the `expl` string. - This hook must be explicitly enabled by the ``enable_assertion_pass_hook`` - ini-file option: + This hook must be explicitly enabled by the :confval:`enable_assertion_pass_hook` + configuration option: - .. code-block:: ini + .. tab:: toml - [pytest] - enable_assertion_pass_hook=true + .. code-block:: toml + + [pytest] + enable_assertion_pass_hook = true + + .. tab:: ini + + .. code-block:: ini + + [pytest] + enable_assertion_pass_hook = true You need to **clean the .pyc** files in your project directory and interpreter libraries when enabling this option, as assertions will require to be re-written. diff --git a/src/_pytest/junitxml.py b/src/_pytest/junitxml.py index dc35e3aac15..ae8d2b94d36 100644 --- a/src/_pytest/junitxml.py +++ b/src/_pytest/junitxml.py @@ -674,8 +674,11 @@ def pytest_sessionfinish(self) -> None: testsuites.append(suite_node) logfile.write(ET.tostring(testsuites, encoding="unicode")) - def pytest_terminal_summary(self, terminalreporter: TerminalReporter) -> None: - terminalreporter.write_sep("-", f"generated xml file: {self.logfile}") + def pytest_terminal_summary( + self, terminalreporter: TerminalReporter, config: pytest.Config + ) -> None: + if config.get_verbosity() >= 0: + terminalreporter.write_sep("-", f"generated xml file: {self.logfile}") def add_global_property(self, name: str, value: object) -> None: __tracebackhide__ = True diff --git a/src/_pytest/main.py b/src/_pytest/main.py index dac084b553a..9bc930df8e8 100644 --- a/src/_pytest/main.py +++ b/src/_pytest/main.py @@ -32,6 +32,7 @@ from _pytest.config import hookimpl from _pytest.config import PytestPluginManager from _pytest.config import UsageError +from _pytest.config.argparsing import OverrideIniAction from _pytest.config.argparsing import Parser from _pytest.config.compat import PathAwareHookProxy from _pytest.outcomes import exit @@ -39,6 +40,7 @@ from _pytest.pathlib import bestrelpath from _pytest.pathlib import fnmatch_ex from _pytest.pathlib import safe_exists +from _pytest.pathlib import samefile_nofollow from _pytest.pathlib import scandir from _pytest.reports import CollectReport from _pytest.reports import TestReport @@ -54,7 +56,7 @@ def pytest_addoption(parser: Parser) -> None: - group = parser.getgroup("general", "Running and selection options") + group = parser.getgroup("general") group._addoption( # private to use reserved lower-case short option "-x", "--exitfirst", @@ -74,20 +76,47 @@ def pytest_addoption(parser: Parser) -> None: ) group.addoption( "--strict-config", - action="store_true", - help="Any warnings encountered while parsing the `pytest` section of the " - "configuration file raise errors", + action=OverrideIniAction, + ini_option="strict_config", + ini_value="true", + help="Enables the strict_config option", ) group.addoption( "--strict-markers", - action="store_true", - help="Markers not registered in the `markers` section of the configuration " - "file raise errors", + action=OverrideIniAction, + ini_option="strict_markers", + ini_value="true", + help="Enables the strict_markers option", ) group.addoption( "--strict", - action="store_true", - help="(Deprecated) alias to --strict-markers", + action=OverrideIniAction, + ini_option="strict", + ini_value="true", + help="Enables the strict option", + ) + parser.addini( + "strict_config", + "Any warnings encountered while parsing the `pytest` section of the " + "configuration file raise errors", + type="bool", + # None => fallback to `strict`. + default=None, + ) + parser.addini( + "strict_markers", + "Markers not registered in the `markers` section of the configuration " + "file raise errors", + type="bool", + # None => fallback to `strict`. + default=None, + ) + parser.addini( + "strict", + "Enables all strictness options, currently: " + "strict_config, strict_markers, strict_xfail, strict_parametrization_ids", + type="bool", + default=False, ) group = parser.getgroup("pytest-warnings") @@ -774,16 +803,31 @@ def perform_collect( self._collection_cache = {} self.items = [] items: Sequence[nodes.Item | nodes.Collector] = self.items + consider_namespace_packages: bool = self.config.getini( + "consider_namespace_packages" + ) try: initialpaths: list[Path] = [] initialpaths_with_parents: list[Path] = [] - for arg in args: - collection_argument = resolve_collection_argument( + + collection_args = [ + resolve_collection_argument( self.config.invocation_params.dir, arg, + i, as_pypath=self.config.option.pyargs, + consider_namespace_packages=consider_namespace_packages, ) - self._initial_parts.append(collection_argument) + for i, arg in enumerate(args) + ] + + if not self.config.getoption("keepduplicates"): + # Normalize the collection arguments -- remove duplicates and overlaps. + self._initial_parts = normalize_collection_arguments(collection_args) + else: + self._initial_parts = collection_args + + for collection_argument in self._initial_parts: initialpaths.append(collection_argument.path) initialpaths_with_parents.append(collection_argument.path) initialpaths_with_parents.extend(collection_argument.path.parents) @@ -854,6 +898,7 @@ def collect(self) -> Iterator[nodes.Item | nodes.Collector]: argpath = collection_argument.path names = collection_argument.parts + parametrization = collection_argument.parametrization module_name = collection_argument.module_name # resolve_collection_argument() ensures this. @@ -931,23 +976,25 @@ def collect(self) -> Iterator[nodes.Item | nodes.Collector]: is_match = node.path == matchparts[0] if sys.platform == "win32" and not is_match: # In case the file paths do not match, fallback to samefile() to - # account for short-paths on Windows (#11895). - same_file = os.path.samefile(node.path, matchparts[0]) - # We don't want to match links to the current node, - # otherwise we would match the same file more than once (#12039). - is_match = same_file and ( - os.path.islink(node.path) - == os.path.islink(matchparts[0]) - ) + # account for short-paths on Windows (#11895). But use a version + # which doesn't resolve symlinks, otherwise we might match the + # same file more than once (#12039). + is_match = samefile_nofollow(node.path, matchparts[0]) # Name part e.g. `TestIt` in `/a/b/test_file.py::TestIt::test_it`. else: - # TODO: Remove parametrized workaround once collection structure contains - # parametrization. - is_match = ( - node.name == matchparts[0] - or node.name.split("[")[0] == matchparts[0] - ) + if len(matchparts) == 1: + # This the last part, one parametrization goes. + if parametrization is not None: + # A parametrized arg must match exactly. + is_match = node.name == matchparts[0] + parametrization + else: + # A non-parameterized arg matches all parametrizations (if any). + # TODO: Remove the hacky split once the collection structure + # contains parametrization. + is_match = node.name.split("[")[0] == matchparts[0] + else: + is_match = node.name == matchparts[0] if is_match: work.append((node, matchparts[1:])) any_matched_in_collector = True @@ -968,12 +1015,9 @@ def genitems(self, node: nodes.Item | nodes.Collector) -> Iterator[nodes.Item]: yield node else: assert isinstance(node, nodes.Collector) - keepduplicates = self.config.getoption("keepduplicates") # For backward compat, dedup only applies to files. - handle_dupes = not (keepduplicates and isinstance(node, nodes.File)) + handle_dupes = not isinstance(node, nodes.File) rep, duplicate = self._collect_one_node(node, handle_dupes) - if duplicate and not keepduplicates: - return if rep.passed: for subnode in rep.result: yield from self.genitems(subnode) @@ -981,7 +1025,9 @@ def genitems(self, node: nodes.Item | nodes.Collector) -> Iterator[nodes.Item]: node.ihook.pytest_collectreport(report=rep) -def search_pypath(module_name: str) -> str | None: +def search_pypath( + module_name: str, *, consider_namespace_packages: bool = False +) -> str | None: """Search sys.path for the given a dotted module name, and return its file system path if found.""" try: @@ -991,13 +1037,29 @@ def search_pypath(module_name: str) -> str | None: # ValueError: not a module name except (AttributeError, ImportError, ValueError): return None - if spec is None or spec.origin is None or spec.origin == "namespace": + + if spec is None: return None - elif spec.submodule_search_locations: - return os.path.dirname(spec.origin) - else: + + if ( + spec.submodule_search_locations is None + or len(spec.submodule_search_locations) == 0 + ): + # Must be a simple module. return spec.origin + if consider_namespace_packages: + # If submodule_search_locations is set, it's a package (regular or namespace). + # Typically there is a single entry, but documentation claims it can be empty too + # (e.g. if the package has no physical location). + return spec.submodule_search_locations[0] + + if spec.origin is None: + # This is only the case for namespace packages + return None + + return os.path.dirname(spec.origin) + @dataclasses.dataclass(frozen=True) class CollectionArgument: @@ -1005,11 +1067,18 @@ class CollectionArgument: path: Path parts: Sequence[str] + parametrization: str | None module_name: str | None + original_index: int def resolve_collection_argument( - invocation_path: Path, arg: str, *, as_pypath: bool = False + invocation_path: Path, + arg: str, + arg_index: int, + *, + as_pypath: bool = False, + consider_namespace_packages: bool = False, ) -> CollectionArgument: """Parse path arguments optionally containing selection parts and return (fspath, names). @@ -1029,7 +1098,7 @@ def resolve_collection_argument( When as_pypath is True, expects that the command-line argument actually contains module paths instead of file-system paths: - "pkg.tests.test_foo::TestClass::test_foo" + "pkg.tests.test_foo::TestClass::test_foo[a,b]" In which case we search sys.path for a matching module, and then return the *path* to the found module, which may look like this: @@ -1037,19 +1106,23 @@ def resolve_collection_argument( CollectionArgument( path=Path("/home/u/myvenv/lib/site-packages/pkg/tests/test_foo.py"), parts=["TestClass", "test_foo"], + parametrization="[a,b]", module_name="pkg.tests.test_foo", ) If the path doesn't exist, raise UsageError. If the path is a directory and selection parts are present, raise UsageError. """ - base, squacket, rest = str(arg).partition("[") + base, squacket, rest = arg.partition("[") strpath, *parts = base.split("::") - if parts: - parts[-1] = f"{parts[-1]}{squacket}{rest}" + if squacket and not parts: + raise UsageError(f"path cannot contain [] parametrization: {arg}") + parametrization = f"{squacket}{rest}" if squacket else None module_name = None if as_pypath: - pyarg_strpath = search_pypath(strpath) + pyarg_strpath = search_pypath( + strpath, consider_namespace_packages=consider_namespace_packages + ) if pyarg_strpath is not None: module_name = strpath strpath = pyarg_strpath @@ -1072,5 +1145,59 @@ def resolve_collection_argument( return CollectionArgument( path=fspath, parts=parts, + parametrization=parametrization, module_name=module_name, + original_index=arg_index, + ) + + +def is_collection_argument_subsumed_by( + arg: CollectionArgument, by: CollectionArgument +) -> bool: + """Check if `arg` is subsumed (contained) by `by`.""" + # First check path subsumption. + if by.path != arg.path: + # `by` subsumes `arg` if `by` is a parent directory of `arg` and has no + # parts (collects everything in that directory). + if not by.parts: + return arg.path.is_relative_to(by.path) + return False + # Paths are equal, check parts. + # For example: ("TestClass",) is a prefix of ("TestClass", "test_method"). + if len(by.parts) > len(arg.parts) or arg.parts[: len(by.parts)] != by.parts: + return False + # Paths and parts are equal, check parametrization. + # A `by` without parametrization (None) matches everything, e.g. + # `pytest x.py::test_it` matches `x.py::test_it[0]`. Otherwise must be + # exactly equal. + if by.parametrization is not None and by.parametrization != arg.parametrization: + return False + return True + + +def normalize_collection_arguments( + collection_args: Sequence[CollectionArgument], +) -> list[CollectionArgument]: + """Normalize collection arguments to eliminate overlapping paths and parts. + + Detects when collection arguments overlap in either paths or parts and only + keeps the shorter prefix, or the earliest argument if duplicate, preserving + order. The result is prefix-free. + """ + # A quadratic algorithm is not acceptable since large inputs are possible. + # So this uses an O(n*log(n)) algorithm which takes advantage of the + # property that after sorting, a collection argument will immediately + # precede collection arguments it subsumes. An O(n) algorithm is not worth + # it. + collection_args_sorted = sorted( + collection_args, + key=lambda arg: (arg.path, arg.parts, arg.parametrization or ""), ) + normalized: list[CollectionArgument] = [] + last_kept = None + for arg in collection_args_sorted: + if last_kept is None or not is_collection_argument_subsumed_by(arg, last_kept): + normalized.append(arg) + last_kept = arg + normalized.sort(key=lambda arg: arg.original_index) + return normalized diff --git a/src/_pytest/mark/__init__.py b/src/_pytest/mark/__init__.py index 068c7410a46..841d7811fdd 100644 --- a/src/_pytest/mark/__init__.py +++ b/src/_pytest/mark/__init__.py @@ -7,11 +7,9 @@ from collections.abc import Iterable from collections.abc import Set as AbstractSet import dataclasses -from typing import Optional from typing import TYPE_CHECKING from .expression import Expression -from .expression import ParseError from .structures import _HiddenParam from .structures import EMPTY_PARAMETERSET_OPTION from .structures import get_empty_parameterset_mark @@ -45,7 +43,7 @@ ] -old_mark_config_key = StashKey[Optional[Config]]() +old_mark_config_key = StashKey[Config | None]() def param( @@ -275,8 +273,10 @@ def deselect_by_mark(items: list[Item], config: Config) -> None: def _parse_expression(expr: str, exc_message: str) -> Expression: try: return Expression.compile(expr) - except ParseError as e: - raise UsageError(f"{exc_message}: {expr}: {e}") from None + except SyntaxError as e: + raise UsageError( + f"{exc_message}: {e.text}: at column {e.offset}: {e.msg}" + ) from None def pytest_collection_modifyitems(items: list[Item], config: Config) -> None: diff --git a/src/_pytest/mark/expression.py b/src/_pytest/mark/expression.py index 743a46bcc17..3bdbd03c2b5 100644 --- a/src/_pytest/mark/expression.py +++ b/src/_pytest/mark/expression.py @@ -16,8 +16,8 @@ - Empty expression evaluates to False. - ident evaluates to True or False according to a provided matcher function. -- or/and/not evaluate according to the usual boolean semantics. - ident with parentheses and keyword arguments evaluates to True or False according to a provided matcher function. +- or/and/not evaluate according to the usual boolean semantics. """ from __future__ import annotations @@ -31,6 +31,8 @@ import keyword import re import types +from typing import Final +from typing import final from typing import Literal from typing import NoReturn from typing import overload @@ -39,10 +41,13 @@ __all__ = [ "Expression", - "ParseError", + "ExpressionMatcher", ] +FILE_NAME: Final = "" + + class TokenType(enum.Enum): LPAREN = "left parenthesis" RPAREN = "right parenthesis" @@ -64,25 +69,11 @@ class Token: pos: int -class ParseError(Exception): - """The expression contains invalid syntax. - - :param column: The column in the line where the error occurred (1-based). - :param message: A description of the error. - """ - - def __init__(self, column: int, message: str) -> None: - self.column = column - self.message = message - - def __str__(self) -> str: - return f"at column {self.column}: {self.message}" - - class Scanner: - __slots__ = ("current", "tokens") + __slots__ = ("current", "input", "tokens") def __init__(self, input: str) -> None: + self.input = input self.tokens = self.lex(input) self.current = next(self.tokens) @@ -106,15 +97,15 @@ def lex(self, input: str) -> Iterator[Token]: elif (quote_char := input[pos]) in ("'", '"'): end_quote_pos = input.find(quote_char, pos + 1) if end_quote_pos == -1: - raise ParseError( - pos + 1, + raise SyntaxError( f'closing quote "{quote_char}" is missing', + (FILE_NAME, 1, pos + 1, input), ) value = input[pos : end_quote_pos + 1] if (backslash_pos := input.find("\\")) != -1: - raise ParseError( - backslash_pos + 1, + raise SyntaxError( r'escaping with "\" not supported in marker expression', + (FILE_NAME, 1, backslash_pos + 1, input), ) yield Token(TokenType.STRING, value, pos) pos += len(value) @@ -132,9 +123,9 @@ def lex(self, input: str) -> Iterator[Token]: yield Token(TokenType.IDENT, value, pos) pos += len(value) else: - raise ParseError( - pos + 1, + raise SyntaxError( f'unexpected character "{input[pos]}"', + (FILE_NAME, 1, pos + 1, input), ) yield Token(TokenType.EOF, "", pos) @@ -157,12 +148,12 @@ def accept(self, type: TokenType, *, reject: bool = False) -> Token | None: return None def reject(self, expected: Sequence[TokenType]) -> NoReturn: - raise ParseError( - self.current.pos + 1, + raise SyntaxError( "expected {}; got {}".format( " OR ".join(type.value for type in expected), self.current.type.value, ), + (FILE_NAME, 1, self.current.pos + 1, self.input), ) @@ -223,14 +214,14 @@ def not_expr(s: Scanner) -> ast.expr: def single_kwarg(s: Scanner) -> ast.keyword: keyword_name = s.accept(TokenType.IDENT, reject=True) if not keyword_name.value.isidentifier(): - raise ParseError( - keyword_name.pos + 1, + raise SyntaxError( f"not a valid python identifier {keyword_name.value}", + (FILE_NAME, 1, keyword_name.pos + 1, s.input), ) if keyword.iskeyword(keyword_name.value): - raise ParseError( - keyword_name.pos + 1, + raise SyntaxError( f"unexpected reserved python keyword `{keyword_name.value}`", + (FILE_NAME, 1, keyword_name.pos + 1, s.input), ) s.accept(TokenType.EQUAL, reject=True) @@ -245,9 +236,9 @@ def single_kwarg(s: Scanner) -> ast.keyword: elif value_token.value in BUILTIN_MATCHERS: value = BUILTIN_MATCHERS[value_token.value] else: - raise ParseError( - value_token.pos + 1, + raise SyntaxError( f'unexpected character/s "{value_token.value}"', + (FILE_NAME, 1, value_token.pos + 1, s.input), ) ret = ast.keyword(keyword_name.value, ast.Constant(value)) @@ -261,13 +252,36 @@ def all_kwargs(s: Scanner) -> list[ast.keyword]: return ret -class MatcherCall(Protocol): +class ExpressionMatcher(Protocol): + """A callable which, given an identifier and optional kwargs, should return + whether it matches in an :class:`Expression` evaluation. + + Should be prepared to handle arbitrary strings as input. + + If no kwargs are provided, the expression of the form `foo`. + If kwargs are provided, the expression is of the form `foo(1, b=True, "s")`. + + If the expression is not supported (e.g. don't want to accept the kwargs + syntax variant), should raise :class:`~pytest.UsageError`. + + Example:: + + def matcher(name: str, /, **kwargs: str | int | bool | None) -> bool: + # Match `cat`. + if name == "cat" and not kwargs: + return True + # Match `dog(barks=True)`. + if name == "dog" and kwargs == {"barks": False}: + return True + return False + """ + def __call__(self, name: str, /, **kwargs: str | int | bool | None) -> bool: ... @dataclasses.dataclass class MatcherNameAdapter: - matcher: MatcherCall + matcher: ExpressionMatcher name: str def __bool__(self) -> bool: @@ -280,7 +294,7 @@ def __call__(self, **kwargs: str | int | bool | None) -> bool: class MatcherAdapter(Mapping[str, MatcherNameAdapter]): """Adapts a matcher function to a locals mapping as required by eval().""" - def __init__(self, matcher: MatcherCall) -> None: + def __init__(self, matcher: ExpressionMatcher) -> None: self.matcher = matcher def __getitem__(self, key: str) -> MatcherNameAdapter: @@ -293,39 +307,47 @@ def __len__(self) -> int: raise NotImplementedError() +@final class Expression: """A compiled match expression as used by -k and -m. The expression can be evaluated against different matchers. """ - __slots__ = ("code",) + __slots__ = ("_code", "input") - def __init__(self, code: types.CodeType) -> None: - self.code = code + def __init__(self, input: str, code: types.CodeType) -> None: + #: The original input line, as a string. + self.input: Final = input + self._code: Final = code @classmethod def compile(cls, input: str) -> Expression: """Compile a match expression. :param input: The input expression - one line. + + :raises SyntaxError: If the expression is malformed. """ astexpr = expression(Scanner(input)) - code: types.CodeType = compile( + code = compile( astexpr, filename="", mode="eval", ) - return Expression(code) + return Expression(input, code) - def evaluate(self, matcher: MatcherCall) -> bool: + def evaluate(self, matcher: ExpressionMatcher) -> bool: """Evaluate the match expression. :param matcher: - Given an identifier, should return whether it matches or not. - Should be prepared to handle arbitrary strings as input. + A callback which determines whether an identifier matches or not. + See the :class:`ExpressionMatcher` protocol for details and example. :returns: Whether the expression matches or not. + + :raises UsageError: + If the matcher doesn't support the expression. Cannot happen if the + matcher supports all expressions. """ - ret: bool = bool(eval(self.code, {"__builtins__": {}}, MatcherAdapter(matcher))) - return ret + return bool(eval(self._code, {"__builtins__": {}}, MatcherAdapter(matcher))) diff --git a/src/_pytest/mark/structures.py b/src/_pytest/mark/structures.py index f9261076ad0..97842fc5704 100644 --- a/src/_pytest/mark/structures.py +++ b/src/_pytest/mark/structures.py @@ -18,7 +18,6 @@ from typing import overload from typing import TYPE_CHECKING from typing import TypeVar -from typing import Union import warnings from .._code import getfslineno @@ -61,7 +60,7 @@ def get_empty_parameterset_mark( argslisting = ", ".join(argnames) - fs, lineno = getfslineno(func) + _fs, lineno = getfslineno(func) reason = f"got empty parameter set for ({argslisting})" requested_mark = config.getini(EMPTY_PARAMETERSET_OPTION) if requested_mark in ("", None, "skip"): @@ -102,7 +101,7 @@ class ParameterSet(NamedTuple): ], ) # ParameterSet(values=(1, 2, 3), marks=(), id=None) - # ParameterSet(values=(2, 2, 3), marks=(), id="everything") + # ParameterSet(values=(40, 2, 42), marks=(), id="everything") """ values: Sequence[object | NotSetType] @@ -302,7 +301,7 @@ def combined_with(self, other: Mark) -> Mark: # A generic parameter designating an object to which a Mark may # be applied -- a test function (callable) or class. # Note: a lambda is not allowed, but this can't be represented. -Markable = TypeVar("Markable", bound=Union[Callable[..., object], type]) +Markable = TypeVar("Markable", bound=Callable[..., object] | type) @dataclasses.dataclass @@ -396,7 +395,7 @@ def __call__(self, *args: object, **kwargs: object): # For staticmethods/classmethods, the marks are eventually fetched from the # function object, not the descriptor, so unwrap. unwrapped_func = func - if isinstance(func, (staticmethod, classmethod)): + if isinstance(func, staticmethod | classmethod): unwrapped_func = func.__func__ if len(args) == 1 and (istestfunc(unwrapped_func) or is_class): store_mark(unwrapped_func, self.mark, stacklevel=3) @@ -497,7 +496,7 @@ def __call__(self, arg: Markable) -> Markable: ... @overload def __call__( self, - condition: str | bool = False, + condition: str | bool = True, *conditions: str | bool, reason: str = ..., run: bool = ..., @@ -581,17 +580,20 @@ def __getattr__(self, name: str) -> MarkDecorator: # If the name is not in the set of known marks after updating, # then it really is time to issue a warning or an error. if name not in self._markers: - if self._config.option.strict_markers or self._config.option.strict: - fail( - f"{name!r} not found in `markers` configuration option", - pytrace=False, - ) - # Raise a specific error for common misspellings of "parametrize". if name in ["parameterize", "parametrise", "parameterise"]: __tracebackhide__ = True fail(f"Unknown '{name}' mark, did you mean 'parametrize'?") + strict_markers = self._config.getini("strict_markers") + if strict_markers is None: + strict_markers = self._config.getini("strict") + if strict_markers: + fail( + f"{name!r} not found in `markers` configuration option", + pytrace=False, + ) + warnings.warn( f"Unknown pytest.mark.{name} - is this a typo? You can register " "custom marks to avoid this warning - for details, see " diff --git a/src/_pytest/monkeypatch.py b/src/_pytest/monkeypatch.py index 1285e571551..07cc3fc4b0f 100644 --- a/src/_pytest/monkeypatch.py +++ b/src/_pytest/monkeypatch.py @@ -8,6 +8,7 @@ from collections.abc import MutableMapping from contextlib import contextmanager import os +from pathlib import Path import re import sys from typing import Any @@ -16,6 +17,7 @@ from typing import TypeVar import warnings +from _pytest.deprecated import MONKEYPATCH_LEGACY_NAMESPACE_PACKAGES from _pytest.fixtures import fixture from _pytest.warning_types import PytestWarning @@ -346,8 +348,26 @@ def syspath_prepend(self, path) -> None: # https://github.com/pypa/setuptools/blob/d8b901bc/docs/pkg_resources.txt#L162-L171 # this is only needed when pkg_resources was already loaded by the namespace package if "pkg_resources" in sys.modules: + import pkg_resources from pkg_resources import fixup_namespace_packages + # Only issue deprecation warning if this call would actually have an + # effect for this specific path. + if ( + hasattr(pkg_resources, "_namespace_packages") + and pkg_resources._namespace_packages + ): + path_obj = Path(str(path)) + for ns_pkg in pkg_resources._namespace_packages: + if ns_pkg is None: + continue + ns_pkg_path = path_obj / ns_pkg.replace(".", os.sep) + if ns_pkg_path.is_dir(): + warnings.warn( + MONKEYPATCH_LEGACY_NAMESPACE_PACKAGES, stacklevel=2 + ) + break + fixup_namespace_packages(str(path)) # A call to syspathinsert() usually means that the caller wants to diff --git a/src/_pytest/nodes.py b/src/_pytest/nodes.py index 6d39de95f5b..6690f6ab1f8 100644 --- a/src/_pytest/nodes.py +++ b/src/_pytest/nodes.py @@ -8,7 +8,6 @@ from collections.abc import MutableMapping from functools import cached_property from functools import lru_cache -from inspect import signature import os import pathlib from pathlib import Path @@ -29,6 +28,7 @@ from _pytest._code.code import Traceback from _pytest._code.code import TracebackStyle from _pytest.compat import LEGACY_PATH +from _pytest.compat import signature from _pytest.config import Config from _pytest.config import ConftestImportFailure from _pytest.config.compat import _check_path diff --git a/src/_pytest/outcomes.py b/src/_pytest/outcomes.py index 68ba0543365..766be95c0f7 100644 --- a/src/_pytest/outcomes.py +++ b/src/_pytest/outcomes.py @@ -3,13 +3,10 @@ from __future__ import annotations -from collections.abc import Callable import sys from typing import Any -from typing import cast +from typing import ClassVar from typing import NoReturn -from typing import Protocol -from typing import TypeVar from .warning_types import PytestDeprecationWarning @@ -77,35 +74,11 @@ def __init__( super().__init__(msg) -# We need a callable protocol to add attributes, for discussion see -# https://github.com/python/mypy/issues/2087. - -_F = TypeVar("_F", bound=Callable[..., object]) -_ET = TypeVar("_ET", bound=type[BaseException]) - - -class _WithException(Protocol[_F, _ET]): - Exception: _ET - __call__: _F - - -def _with_exception(exception_type: _ET) -> Callable[[_F], _WithException[_F, _ET]]: - def decorate(func: _F) -> _WithException[_F, _ET]: - func_with_exception = cast(_WithException[_F, _ET], func) - func_with_exception.Exception = exception_type - return func_with_exception - - return decorate - - -# Exposed helper methods. +class XFailed(Failed): + """Raised from an explicit call to pytest.xfail().""" -@_with_exception(Exit) -def exit( - reason: str = "", - returncode: int | None = None, -) -> NoReturn: +class _Exit: """Exit testing process. :param reason: @@ -113,21 +86,24 @@ def exit( only because `msg` is deprecated. :param returncode: - Return code to be used when exiting pytest. None means the same as ``0`` (no error), same as :func:`sys.exit`. + Return code to be used when exiting pytest. None means the same as ``0`` (no error), + same as :func:`sys.exit`. :raises pytest.exit.Exception: The exception that is raised. """ - __tracebackhide__ = True - raise Exit(reason, returncode) + Exception: ClassVar[type[Exit]] = Exit -@_with_exception(Skipped) -def skip( - reason: str = "", - *, - allow_module_level: bool = False, -) -> NoReturn: + def __call__(self, reason: str = "", returncode: int | None = None) -> NoReturn: + __tracebackhide__ = True + raise Exit(msg=reason, returncode=returncode) + + +exit: _Exit = _Exit() + + +class _Skip: """Skip an executing test with the given message. This function should be called only during testing (setup, call or teardown) or @@ -155,12 +131,18 @@ def skip( Similarly, use the ``# doctest: +SKIP`` directive (see :py:data:`doctest.SKIP`) to skip a doctest statically. """ - __tracebackhide__ = True - raise Skipped(msg=reason, allow_module_level=allow_module_level) + Exception: ClassVar[type[Skipped]] = Skipped + + def __call__(self, reason: str = "", allow_module_level: bool = False) -> NoReturn: + __tracebackhide__ = True + raise Skipped(msg=reason, allow_module_level=allow_module_level) + + +skip: _Skip = _Skip() -@_with_exception(Failed) -def fail(reason: str = "", pytrace: bool = True) -> NoReturn: + +class _Fail: """Explicitly fail an executing test with the given message. :param reason: @@ -173,16 +155,18 @@ def fail(reason: str = "", pytrace: bool = True) -> NoReturn: :raises pytest.fail.Exception: The exception that is raised. """ - __tracebackhide__ = True - raise Failed(msg=reason, pytrace=pytrace) + Exception: ClassVar[type[Failed]] = Failed -class XFailed(Failed): - """Raised from an explicit call to pytest.xfail().""" + def __call__(self, reason: str = "", pytrace: bool = True) -> NoReturn: + __tracebackhide__ = True + raise Failed(msg=reason, pytrace=pytrace) -@_with_exception(XFailed) -def xfail(reason: str = "") -> NoReturn: +fail: _Fail = _Fail() + + +class _XFail: """Imperatively xfail an executing test or setup function with the given reason. This function should be called only during testing (setup, call or teardown). @@ -201,8 +185,15 @@ def xfail(reason: str = "") -> NoReturn: :raises pytest.xfail.Exception: The exception that is raised. """ - __tracebackhide__ = True - raise XFailed(reason) + + Exception: ClassVar[type[XFailed]] = XFailed + + def __call__(self, reason: str = "") -> NoReturn: + __tracebackhide__ = True + raise XFailed(msg=reason) + + +xfail: _XFail = _XFail() def importorskip( diff --git a/src/_pytest/pathlib.py b/src/_pytest/pathlib.py index b69e85404e7..cd15434605d 100644 --- a/src/_pytest/pathlib.py +++ b/src/_pytest/pathlib.py @@ -348,7 +348,7 @@ def cleanup_candidates(root: Path, prefix: str, keep: int) -> Iterator[Path]: entries = find_prefixed(root, prefix) entries, entries2 = itertools.tee(entries) numbers = map(parse_num, extract_suffixes(entries2, prefix)) - for entry, number in zip(entries, numbers): + for entry, number in zip(entries, numbers, strict=True): if number <= max_delete: yield Path(entry) @@ -1053,3 +1053,11 @@ def safe_exists(p: Path) -> bool: # ValueError: stat: path too long for Windows # OSError: [WinError 123] The filename, directory name, or volume label syntax is incorrect return False + + +def samefile_nofollow(p1: Path, p2: Path) -> bool: + """Test whether two paths reference the same actual file or directory. + + Unlike Path.samefile(), does not resolve symlinks. + """ + return os.path.samestat(p1.lstat(), p2.lstat()) diff --git a/src/_pytest/pytester.py b/src/_pytest/pytester.py index 38f4643bd8b..1cd5f05dd7e 100644 --- a/src/_pytest/pytester.py +++ b/src/_pytest/pytester.py @@ -682,9 +682,11 @@ def __init__( self._name = name self._path: Path = tmp_path_factory.mktemp(name, numbered=True) #: A list of plugins to use with :py:meth:`parseconfig` and - #: :py:meth:`runpytest`. Initially this is an empty list but plugins can - #: be added to the list. The type of items to add to the list depends on - #: the method using them so refer to them for details. + #: :py:meth:`runpytest`. Initially this is an empty list but plugins can + #: be added to the list. + #: + #: When running in subprocess mode, specify plugins by name (str) - adding + #: plugin objects directly is not supported. self.plugins: list[str | _PluggyPlugin] = [] self._sys_path_snapshot = SysPathsSnapshot() self._sys_modules_snapshot = self.__take_sys_modules_snapshot() @@ -835,6 +837,16 @@ def makeini(self, source: str) -> Path: """ return self.makefile(".ini", tox=source) + def maketoml(self, source: str) -> Path: + """Write a pytest.toml file. + + :param source: The contents. + :returns: The pytest.toml file. + + .. versionadded:: 9.0 + """ + return self.makefile(".toml", pytest=source) + def getinicfg(self, source: str) -> SectionWrapper: """Return the pytest section from the tox.ini config file.""" p = self.makeini(source) @@ -1092,6 +1104,8 @@ def inline_run( Typically we reraise keyboard interrupts from the child run. If True, the KeyboardInterrupt exception is captured. """ + from _pytest.unraisableexception import gc_collect_iterations_key + # (maybe a cpython bug?) the importlib cache sometimes isn't updated # properly between file creation and inline_run (especially if imports # are interspersed with file creation) @@ -1115,12 +1129,16 @@ def inline_run( rec = [] - class Collect: + class PytesterHelperPlugin: @staticmethod def pytest_configure(config: Config) -> None: rec.append(self.make_hook_recorder(config.pluginmanager)) - plugins.append(Collect()) + # The unraisable plugin GC collect slows down inline + # pytester runs too much. + config.stash[gc_collect_iterations_key] = 0 + + plugins.append(PytesterHelperPlugin()) ret = main([str(x) for x in args], plugins=plugins) if len(rec) == 1: reprec = rec.pop() @@ -1223,10 +1241,9 @@ def parseconfig(self, *args: str | os.PathLike[str]) -> Config: """ import _pytest.config - new_args = self._ensure_basetemp(args) - new_args = [str(x) for x in new_args] + new_args = [str(x) for x in self._ensure_basetemp(args)] - config = _pytest.config._prepareconfig(new_args, self.plugins) # type: ignore[arg-type] + config = _pytest.config._prepareconfig(new_args, self.plugins) # we don't know what the test will do with this half-setup config # object and thus we make sure it gets unconfigured properly in any # case (otherwise capturing could still be active, for example) @@ -1415,7 +1432,6 @@ def run( stdin=stdin, stdout=f1, stderr=f2, - close_fds=(sys.platform != "win32"), ) if popen.stdin is not None: popen.stdin.close() @@ -1436,6 +1452,8 @@ def handle_timeout() -> None: ret = popen.wait(timeout) except subprocess.TimeoutExpired: handle_timeout() + f1.flush() + f2.flush() with p1.open(encoding="utf8") as f1, p2.open(encoding="utf8") as f2: out = f1.read().splitlines() @@ -1488,9 +1506,13 @@ def runpytest_subprocess( __tracebackhide__ = True p = make_numbered_dir(root=self.path, prefix="runpytest-", mode=0o700) args = (f"--basetemp={p}", *args) - plugins = [x for x in self.plugins if isinstance(x, str)] - if plugins: - args = ("-p", plugins[0], *args) + for plugin in self.plugins: + if not isinstance(plugin, str): + raise ValueError( + f"Specifying plugins as objects is not supported in pytester subprocess mode; " + f"specify by name instead: {plugin}" + ) + args = ("-p", plugin, *args) args = self._getpytestargs() + args return self.run(*args, timeout=timeout) diff --git a/src/_pytest/python.py b/src/_pytest/python.py index 82aab85a300..e63751877a4 100644 --- a/src/_pytest/python.py +++ b/src/_pytest/python.py @@ -21,8 +21,10 @@ import os from pathlib import Path import re +import textwrap import types from typing import Any +from typing import cast from typing import final from typing import Literal from typing import NoReturn @@ -73,6 +75,7 @@ from _pytest.scope import Scope from _pytest.stash import StashKey from _pytest.warning_types import PytestCollectionWarning +from _pytest.warning_types import PytestReturnNotNoneWarning if TYPE_CHECKING: @@ -106,6 +109,13 @@ def pytest_addoption(parser: Parser) -> None: help="Disable string escape non-ASCII characters, might cause unwanted " "side effects(use at your own risk)", ) + parser.addini( + "strict_parametrization_ids", + type="bool", + # None => fallback to `strict`. + default=None, + help="Emit an error if non-unique parameter set IDs are detected", + ) def pytest_generate_tests(metafunc: Metafunc) -> None: @@ -157,12 +167,12 @@ def pytest_pyfunc_call(pyfuncitem: Function) -> object | None: if hasattr(result, "__await__") or hasattr(result, "__aiter__"): async_fail(pyfuncitem.nodeid) elif result is not None: - fail( - ( - f"Expected None, but test returned {result!r}. " - "Did you mean to use `assert` instead of `return`?" - ), - pytrace=False, + warnings.warn( + PytestReturnNotNoneWarning( + f"Test functions should return None, but {pyfuncitem.nodeid} returned {type(result)!r}.\n" + "Did you mean to use `assert` instead of `return`?\n" + "See https://docs.pytest.org/en/stable/how-to/assert.html#return-not-none for more information." + ) ) return True @@ -209,7 +219,7 @@ def pytest_pycollect_makemodule(module_path: Path, parent) -> Module: def pytest_pycollect_makeitem( collector: Module | Class, name: str, obj: object ) -> None | nodes.Item | nodes.Collector | list[nodes.Item | nodes.Collector]: - assert isinstance(collector, (Class, Module)), type(collector) + assert isinstance(collector, Class | Module), type(collector) # Nothing was collected elsewhere, let's do it here. if safe_isclass(obj): if collector.istestclass(obj, name): @@ -357,7 +367,7 @@ def classnamefilter(self, name: str) -> bool: def istestfunction(self, obj: object, name: str) -> bool: if self.funcnamefilter(name) or self.isnosetest(obj): - if isinstance(obj, (staticmethod, classmethod)): + if isinstance(obj, staticmethod | classmethod): # staticmethods and classmethods need to be unwrapped. obj = safe_getattr(obj, "__func__", False) return callable(obj) and fixtures.getfixturemarker(obj) is None @@ -373,7 +383,7 @@ def istestclass(self, obj: object, name: str) -> bool: def _matches_prefix_or_glob_option(self, option_name: str, name: str) -> bool: """Check if the given name matches the prefix or glob-pattern defined - in ini configuration.""" + in configuration.""" for option in self.config.getini(option_name): if name.startswith(option): return True @@ -877,8 +887,8 @@ class IdMaker: # Optionally, explicit IDs for ParameterSets by index. ids: Sequence[object | None] | None # Optionally, the pytest config. - # Used for controlling ASCII escaping, and for calling the - # :hook:`pytest_make_parametrize_id` hook. + # Used for controlling ASCII escaping, determining parametrization ID + # strictness, and for calling the :hook:`pytest_make_parametrize_id` hook. config: Config | None # Optionally, the ID of the node being parametrized. # Used only for clearer error messages. @@ -891,6 +901,9 @@ def make_unique_parameterset_ids(self) -> list[str | _HiddenParam]: """Make a unique identifier for each ParameterSet, that may be used to identify the parametrization in a node ID. + If strict_parametrization_ids is enabled, and duplicates are detected, + raises CollectError. Otherwise makes the IDs unique as follows: + Format is -...-[counter], where prm_x_token is - user-provided id, if given - else an id derived from the value, applicable for certain types @@ -903,6 +916,33 @@ def make_unique_parameterset_ids(self) -> list[str | _HiddenParam]: if len(resolved_ids) != len(set(resolved_ids)): # Record the number of occurrences of each ID. id_counts = Counter(resolved_ids) + + if self._strict_parametrization_ids_enabled(): + parameters = ", ".join(self.argnames) + parametersets = ", ".join( + [saferepr(list(param.values)) for param in self.parametersets] + ) + ids = ", ".join( + id if id is not HIDDEN_PARAM else "" for id in resolved_ids + ) + duplicates = ", ".join( + id if id is not HIDDEN_PARAM else "" + for id, count in id_counts.items() + if count > 1 + ) + msg = textwrap.dedent(f""" + Duplicate parametrization IDs detected, but strict_parametrization_ids is set. + + Test name: {self.nodeid} + Parameters: {parameters} + Parameter sets: {parametersets} + IDs: {ids} + Duplicates: {duplicates} + + You can fix this problem using `@pytest.mark.parametrize(..., ids=...)` or `pytest.param(..., id=...)`. + """).strip() # noqa: E501 + raise nodes.Collector.CollectError(msg) + # Map the ID to its next suffix. id_suffixes: dict[str, int] = defaultdict(int) # Suffix non-unique IDs to make them unique. @@ -924,6 +964,14 @@ def make_unique_parameterset_ids(self) -> list[str | _HiddenParam]: ) return resolved_ids + def _strict_parametrization_ids_enabled(self) -> bool: + if self.config is None: + return False + strict_parametrization_ids = self.config.getini("strict_parametrization_ids") + if strict_parametrization_ids is None: + strict_parametrization_ids = self.config.getini("strict") + return cast(bool, strict_parametrization_ids) + def _resolve_ids(self) -> Iterable[str | _HiddenParam]: """Resolve IDs for all ParameterSets (may contain duplicates).""" for idx, parameterset in enumerate(self.parametersets): @@ -943,7 +991,9 @@ def _resolve_ids(self) -> Iterable[str | _HiddenParam]: # ID not provided - generate it. yield "-".join( self._idval(val, argname, idx) - for val, argname in zip(parameterset.values, self.argnames) + for val, argname in zip( + parameterset.values, self.argnames, strict=True + ) ) def _idval(self, val: object, argname: str, idx: int) -> str: @@ -988,9 +1038,9 @@ def _idval_from_hook(self, val: object, argname: str) -> str | None: def _idval_from_value(self, val: object) -> str | None: """Try to make an ID for a parameter in a ParameterSet from its value, if the value type is supported.""" - if isinstance(val, (str, bytes)): + if isinstance(val, str | bytes): return _ascii_escaped_by_config(val, self.config) - elif val is None or isinstance(val, (float, int, bool, complex)): + elif val is None or isinstance(val, float | int | bool | complex): return str(val) elif isinstance(val, re.Pattern): return ascii_escaped(val.pattern) @@ -1078,7 +1128,7 @@ def setmulti( params = self.params.copy() indices = self.indices.copy() arg2scope = dict(self._arg2scope) - for arg, val in zip(argnames, valset): + for arg, val in zip(argnames, valset, strict=True): if arg in params: raise nodes.Collector.CollectError( f"{nodeid}: duplicate parametrization of {arg!r}" @@ -1335,7 +1385,7 @@ def parametrize( newcalls = [] for callspec in self._calls or [CallSpec2()]: for param_index, (param_id, param_set) in enumerate( - zip(ids, parametersets) + zip(ids, parametersets, strict=True) ): newcallspec = callspec.setmulti( argnames=argnames, diff --git a/src/_pytest/python_api.py b/src/_pytest/python_api.py index 77b0edc0ac5..bab70aa4a8c 100644 --- a/src/_pytest/python_api.py +++ b/src/_pytest/python_api.py @@ -113,7 +113,7 @@ def _check_type(self) -> None: def _recursive_sequence_map(f, x): """Recursively map a function over a sequence of arbitrary depth""" - if isinstance(x, (list, tuple)): + if isinstance(x, list | tuple): seq_type = type(x) return seq_type(_recursive_sequence_map(f, xi) for xi in x) elif _is_sequence_like(x): @@ -236,6 +236,18 @@ def __repr__(self) -> str: def _repr_compare(self, other_side: Mapping[object, float]) -> list[str]: import math + if len(self.expected) != len(other_side): + return [ + "Impossible to compare mappings with different sizes.", + f"Lengths: {len(self.expected)} and {len(other_side)}", + ] + + if self.expected.keys() != other_side.keys(): + return [ + "comparison failed.", + f"Mappings has different keys: expected {self.expected.keys()} but got {other_side.keys()}", + ] + approx_side_as_map = { k: self._approx_scalar(v) for k, v in self.expected.items() } @@ -244,9 +256,8 @@ def _repr_compare(self, other_side: Mapping[object, float]) -> list[str]: max_abs_diff = -math.inf max_rel_diff = -math.inf different_ids = [] - for (approx_key, approx_value), other_value in zip( - approx_side_as_map.items(), other_side.values() - ): + for approx_key, approx_value in approx_side_as_map.items(): + other_value = other_side[approx_key] if approx_value != other_value: if approx_value.expected is not None and other_value is not None: try: @@ -327,7 +338,7 @@ def _repr_compare(self, other_side: Sequence[float]) -> list[str]: max_rel_diff = -math.inf different_ids = [] for i, (approx_value, other_value) in enumerate( - zip(approx_side_as_map, other_side) + zip(approx_side_as_map, other_side, strict=True) ): if approx_value != other_value: try: @@ -365,7 +376,7 @@ def __eq__(self, actual) -> bool: return super().__eq__(actual) def _yield_comparisons(self, actual): - return zip(actual, self.expected) + return zip(actual, self.expected, strict=True) def _check_type(self) -> None: __tracebackhide__ = True @@ -394,7 +405,7 @@ def __repr__(self) -> str: # handle complex numbers, e.g. (inf + 1j). if ( isinstance(self.expected, bool) - or (not isinstance(self.expected, (Complex, Decimal))) + or (not isinstance(self.expected, Complex | Decimal)) or math.isinf(abs(self.expected) or isinstance(self.expected, bool)) ): return str(self.expected) @@ -426,12 +437,9 @@ def is_bool(val: Any) -> bool: # Check if `val` is a native bool or numpy bool. if isinstance(val, bool): return True - try: - import numpy as np - + if np := sys.modules.get("numpy"): return isinstance(val, np.bool_) - except ImportError: - return False + return False asarray = _as_numpy_array(actual) if asarray is not None: @@ -450,8 +458,8 @@ def is_bool(val: Any) -> bool: # __sub__, and __float__ are defined. Also, consider bool to be # non-numeric, even though it has the required arithmetic. if is_bool(self.expected) or not ( - isinstance(self.expected, (Complex, Decimal)) - and isinstance(actual, (Complex, Decimal)) + isinstance(self.expected, Complex | Decimal) + and isinstance(actual, Complex | Decimal) ): return False @@ -474,8 +482,7 @@ def is_bool(val: Any) -> bool: result: bool = abs(self.expected - actual) <= self.tolerance return result - # Ignore type because of https://github.com/python/mypy/issues/4266. - __hash__ = None # type: ignore + __hash__ = None @property def tolerance(self): @@ -531,6 +538,25 @@ class ApproxDecimal(ApproxScalar): DEFAULT_ABSOLUTE_TOLERANCE = Decimal("1e-12") DEFAULT_RELATIVE_TOLERANCE = Decimal("1e-6") + def __repr__(self) -> str: + if isinstance(self.rel, float): + rel = Decimal.from_float(self.rel) + else: + rel = self.rel + + if isinstance(self.abs, float): + abs_ = Decimal.from_float(self.abs) + else: + abs_ = self.abs + + tol_str = "???" + if rel is not None and Decimal("1e-3") <= rel <= Decimal("1e3"): + tol_str = f"{rel:.1e}" + elif abs_ is not None: + tol_str = f"{abs_:.1e}" + + return f"{self.expected} ± {tol_str}" + def approx(expected, rel=None, abs=None, nan_ok: bool = False) -> ApproxBase: """Assert that two numbers (or two ordered sequences of numbers) are equal to each other @@ -751,7 +777,7 @@ def approx(expected, rel=None, abs=None, nan_ok: bool = False) -> ApproxBase: cls = ApproxNumpy elif _is_sequence_like(expected): cls = ApproxSequenceLike - elif isinstance(expected, Collection) and not isinstance(expected, (str, bytes)): + elif isinstance(expected, Collection) and not isinstance(expected, str | bytes): msg = f"pytest.approx() only supports ordered sequences, but got: {expected!r}" raise TypeError(msg) else: @@ -764,7 +790,7 @@ def _is_sequence_like(expected: object) -> bool: return ( hasattr(expected, "__getitem__") and isinstance(expected, Sized) - and not isinstance(expected, (str, bytes)) + and not isinstance(expected, str | bytes) ) diff --git a/src/_pytest/raises.py b/src/_pytest/raises.py index 480ae33647f..7c246fde280 100644 --- a/src/_pytest/raises.py +++ b/src/_pytest/raises.py @@ -29,9 +29,9 @@ # for some reason Sphinx does not play well with 'from types import TracebackType' import types + from typing import TypeGuard from typing_extensions import ParamSpec - from typing_extensions import TypeGuard from typing_extensions import TypeVar P = ParamSpec("P") @@ -452,23 +452,23 @@ def _parse_exc( issubclass(origin_exc, BaseExceptionGroup) and exc_type in (BaseException, Any) ): - if not isinstance(exc, Exception): + if not issubclass(origin_exc, ExceptionGroup): self.is_baseexception = True return cast(type[BaseExcT_1], origin_exc) else: raise ValueError( - f"Only `ExceptionGroup[Exception]` or `BaseExceptionGroup[BaseExeption]` " + f"Only `ExceptionGroup[Exception]` or `BaseExceptionGroup[BaseException]` " f"are accepted as generic types but got `{exc}`. " f"As `raises` will catch all instances of the specified group regardless of the " f"generic argument specific nested exceptions has to be checked " f"with `RaisesGroup`." ) # unclear if the Type/ValueError distinction is even helpful here - msg = f"expected exception must be {expected}, not " - if isinstance(exc, type): + msg = f"Expected {expected}, but got " + if isinstance(exc, type): # type: ignore[unreachable] raise ValueError(msg + f"{exc.__name__!r}") - if isinstance(exc, BaseException): - raise TypeError(msg + f"an exception instance ({type(exc).__name__})") + if isinstance(exc, BaseException): # type: ignore[unreachable] + raise TypeError(msg + f"an exception instance: {type(exc).__name__}") raise TypeError(msg + repr(type(exc).__name__)) @property @@ -519,12 +519,10 @@ def _check_match(self, e: BaseException) -> bool: self._fail_reason = ("\n" if diff[0][0] == "-" else "") + "\n".join(diff) return False - # I don't love "Regex"+"Input" vs something like "expected regex"+"exception message" - # when they're similar it's not always obvious which is which self._fail_reason = ( f"Regex pattern did not match{maybe_specify_type}.\n" - f" Regex: {_match_pattern(self.match)!r}\n" - f" Input: {stringified_exception!r}" + f" Expected regex: {_match_pattern(self.match)!r}\n" + f" Actual message: {stringified_exception!r}" ) if _match_pattern(self.match) == stringified_exception: self._fail_reason += "\n Did you mean to `re.escape()` the regex?" @@ -559,7 +557,7 @@ class RaisesExc(AbstractRaises[BaseExcT_co_default]): The type is checked with :func:`isinstance`, and does not need to be an exact match. If that is wanted you can use the ``check`` parameter. - :kwparam str | Pattern[str] match + :kwparam str | Pattern[str] match: A regex to match. :kwparam Callable[[BaseException], bool] check: @@ -1038,7 +1036,7 @@ def _parse_excgroup( return exc elif isinstance(exc, tuple): raise TypeError( - f"expected exception must be {expected}, not {type(exc).__name__!r}.\n" + f"Expected {expected}, but got {type(exc).__name__!r}.\n" "RaisesGroup does not support tuples of exception types when expecting one of " "several possible exception types like RaisesExc.\n" "If you meant to expect a group with multiple exceptions, list them as separate arguments." diff --git a/src/_pytest/recwarn.py b/src/_pytest/recwarn.py index 440e3efac8a..e3db717bfe4 100644 --- a/src/_pytest/recwarn.py +++ b/src/_pytest/recwarn.py @@ -167,7 +167,7 @@ def warns( return func(*args[1:], **kwargs) -class WarningsRecorder(warnings.catch_warnings): # type:ignore[type-arg] +class WarningsRecorder(warnings.catch_warnings): """A context manager to record raised warnings. Each recorded warning is an instance of :class:`warnings.WarningMessage`. @@ -226,7 +226,9 @@ def clear(self) -> None: """Clear the list of recorded warnings.""" self._list[:] = [] - def __enter__(self) -> Self: + # Type ignored because we basically want the `catch_warnings` generic type + # parameter to be ourselves but that is not possible(?). + def __enter__(self) -> Self: # type: ignore[override] if self._entered: __tracebackhide__ = True raise RuntimeError(f"Cannot enter {self!r} twice") diff --git a/src/_pytest/reports.py b/src/_pytest/reports.py index 480ffae1f9c..011a69db001 100644 --- a/src/_pytest/reports.py +++ b/src/_pytest/reports.py @@ -9,6 +9,7 @@ from io import StringIO import os from pprint import pprint +import sys from typing import Any from typing import cast from typing import final @@ -35,6 +36,10 @@ from _pytest.outcomes import skip +if sys.version_info < (3, 11): + from exceptiongroup import BaseExceptionGroup + + if TYPE_CHECKING: from typing_extensions import Self @@ -188,7 +193,7 @@ def head_line(self) -> str | None: even in patch releases. """ if self.location is not None: - fspath, lineno, domain = self.location + _fspath, _lineno, domain = self.location return domain return None @@ -251,7 +256,52 @@ def _report_unserialization_failure( raise RuntimeError(stream.getvalue()) -@final +def _format_failed_longrepr( + item: Item, call: CallInfo[None], excinfo: ExceptionInfo[BaseException] +): + if call.when == "call": + longrepr = item.repr_failure(excinfo) + else: + # Exception in setup or teardown. + longrepr = item._repr_failure_py( + excinfo, style=item.config.getoption("tbstyle", "auto") + ) + return longrepr + + +def _format_exception_group_all_skipped_longrepr( + item: Item, + excinfo: ExceptionInfo[BaseExceptionGroup[BaseException | BaseExceptionGroup]], +) -> tuple[str, int, str]: + r = excinfo._getreprcrash() + assert r is not None, ( + "There should always be a traceback entry for skipping a test." + ) + if all( + getattr(skip, "_use_item_location", False) for skip in excinfo.value.exceptions + ): + path, line = item.reportinfo()[:2] + assert line is not None + loc = (os.fspath(path), line + 1) + default_msg = "skipped" + else: + loc = (str(r.path), r.lineno) + default_msg = r.message + + # Get all unique skip messages. + msgs: list[str] = [] + for exception in excinfo.value.exceptions: + m = getattr(exception, "msg", None) or ( + exception.args[0] if exception.args else None + ) + if m and m not in msgs: + msgs.append(m) + + reason = "; ".join(msgs) if msgs else default_msg + longrepr = (*loc, reason) + return longrepr + + class TestReport(BaseReport): """Basic test report object (also used for setup and teardown calls if they fail). @@ -368,17 +418,24 @@ def from_item_and_call(cls, item: Item, call: CallInfo[None]) -> TestReport: if excinfo.value._use_item_location: path, line = item.reportinfo()[:2] assert line is not None - longrepr = os.fspath(path), line + 1, r.message + longrepr = (os.fspath(path), line + 1, r.message) else: longrepr = (str(r.path), r.lineno, r.message) + elif isinstance(excinfo.value, BaseExceptionGroup) and ( + excinfo.value.split(skip.Exception)[1] is None + ): + # All exceptions in the group are skip exceptions. + outcome = "skipped" + excinfo = cast( + ExceptionInfo[ + BaseExceptionGroup[BaseException | BaseExceptionGroup] + ], + excinfo, + ) + longrepr = _format_exception_group_all_skipped_longrepr(item, excinfo) else: outcome = "failed" - if call.when == "call": - longrepr = item.repr_failure(excinfo) - else: # exception in setup or teardown - longrepr = item._repr_failure_py( - excinfo, style=item.config.getoption("tbstyle", "auto") - ) + longrepr = _format_failed_longrepr(item, call, excinfo) for rwhen, key, content in item._report_sections: sections.append((f"Captured {key} {rwhen}", content)) return cls( @@ -459,7 +516,7 @@ def toterminal(self, out: TerminalWriter) -> None: def pytest_report_to_serializable( report: CollectReport | TestReport, ) -> dict[str, Any] | None: - if isinstance(report, (TestReport, CollectReport)): + if isinstance(report, TestReport | CollectReport): data = report._to_json() data["$report_type"] = report.__class__.__name__ return data diff --git a/src/_pytest/runner.py b/src/_pytest/runner.py index 26e4e838b77..9c20ff9e638 100644 --- a/src/_pytest/runner.py +++ b/src/_pytest/runner.py @@ -16,6 +16,7 @@ from typing import TYPE_CHECKING from typing import TypeVar +from .config import Config from .reports import BaseReport from .reports import CollectErrorRepr from .reports import CollectReport @@ -239,11 +240,11 @@ def call_and_report( runtest_hook = ihook.pytest_runtest_teardown else: assert False, f"Unhandled runtest hook case: {when}" - reraise: tuple[type[BaseException], ...] = (Exit,) - if not item.config.getoption("usepdb", False): - reraise += (KeyboardInterrupt,) + call = CallInfo.from_call( - lambda: runtest_hook(item=item, **kwds), when=when, reraise=reraise + lambda: runtest_hook(item=item, **kwds), + when=when, + reraise=get_reraise_exceptions(item.config), ) report: TestReport = ihook.pytest_runtest_makereport(item=item, call=call) if log: @@ -253,6 +254,14 @@ def call_and_report( return report +def get_reraise_exceptions(config: Config) -> tuple[type[BaseException], ...]: + """Return exception types that should not be suppressed in general.""" + reraise: tuple[type[BaseException], ...] = (Exit,) + if not config.getoption("usepdb", False): + reraise += (KeyboardInterrupt,) + return reraise + + def check_interactive_exception(call: CallInfo[object], report: BaseReport) -> bool: """Check whether the call raised an exception that should be reported as interactive.""" @@ -262,7 +271,7 @@ def check_interactive_exception(call: CallInfo[object], report: BaseReport) -> b if hasattr(report, "wasxfail"): # Exception was expected. return False - if isinstance(call.excinfo.value, (Skipped, bdb.BdbQuit)): + if isinstance(call.excinfo.value, Skipped | bdb.BdbQuit): # Special control flow exception. return False return True diff --git a/src/_pytest/skipping.py b/src/_pytest/skipping.py index ec118f2c92f..3b067629de0 100644 --- a/src/_pytest/skipping.py +++ b/src/_pytest/skipping.py @@ -10,7 +10,6 @@ import platform import sys import traceback -from typing import Optional from _pytest.config import Config from _pytest.config import hookimpl @@ -38,11 +37,13 @@ def pytest_addoption(parser: Parser) -> None: ) parser.addini( - "xfail_strict", + "strict_xfail", "Default for the strict parameter of xfail " - "markers when not given explicitly (default: False)", - default=False, + "markers when not given explicitly (default: False) (alias: xfail_strict)", type="bool", + # None => fallback to `strict`. + default=None, + aliases=["xfail_strict"], ) @@ -75,7 +76,7 @@ def nop(*args, **kwargs): ) config.addinivalue_line( "markers", - "xfail(condition, ..., *, reason=..., run=True, raises=None, strict=xfail_strict): " + "xfail(condition, ..., *, reason=..., run=True, raises=None, strict=strict_xfail): " "mark the test function as an expected failure if any of the conditions " "evaluate to True. Optionally specify a reason for better reporting " "and run=False if you don't even want to execute the test function. " @@ -214,7 +215,11 @@ def evaluate_xfail_marks(item: Item) -> Xfail | None: """Evaluate xfail marks on item, returning Xfail if triggered.""" for mark in item.iter_markers(name="xfail"): run = mark.kwargs.get("run", True) - strict = mark.kwargs.get("strict", item.config.getini("xfail_strict")) + strict = mark.kwargs.get("strict") + if strict is None: + strict = item.config.getini("strict_xfail") + if strict is None: + strict = item.config.getini("strict") raises = mark.kwargs.get("raises", None) if "condition" not in mark.kwargs: conditions = mark.args @@ -236,7 +241,7 @@ def evaluate_xfail_marks(item: Item) -> Xfail | None: # Saves the xfail mark evaluation. Can be refreshed during call if None. -xfailed_key = StashKey[Optional[Xfail]]() +xfailed_key = StashKey[Xfail | None]() @hookimpl(tryfirst=True) @@ -285,7 +290,7 @@ def pytest_runtest_makereport( raises = xfailed.raises if raises is None or ( ( - isinstance(raises, (type, tuple)) + isinstance(raises, type | tuple) and isinstance(call.excinfo.value, raises) ) or ( diff --git a/src/_pytest/subtests.py b/src/_pytest/subtests.py new file mode 100644 index 00000000000..e0ceb27f4b1 --- /dev/null +++ b/src/_pytest/subtests.py @@ -0,0 +1,411 @@ +"""Builtin plugin that adds subtests support.""" + +from __future__ import annotations + +from collections import defaultdict +from collections.abc import Callable +from collections.abc import Iterator +from collections.abc import Mapping +from contextlib import AbstractContextManager +from contextlib import contextmanager +from contextlib import ExitStack +from contextlib import nullcontext +import dataclasses +import time +from types import TracebackType +from typing import Any +from typing import TYPE_CHECKING + +import pluggy + +from _pytest._code import ExceptionInfo +from _pytest._io.saferepr import saferepr +from _pytest.capture import CaptureFixture +from _pytest.capture import FDCapture +from _pytest.capture import SysCapture +from _pytest.config import Config +from _pytest.config import hookimpl +from _pytest.config.argparsing import Parser +from _pytest.deprecated import check_ispytest +from _pytest.fixtures import fixture +from _pytest.fixtures import SubRequest +from _pytest.logging import catching_logs +from _pytest.logging import LogCaptureHandler +from _pytest.logging import LoggingPlugin +from _pytest.reports import TestReport +from _pytest.runner import CallInfo +from _pytest.runner import check_interactive_exception +from _pytest.runner import get_reraise_exceptions +from _pytest.stash import StashKey + + +if TYPE_CHECKING: + from typing_extensions import Self + + +def pytest_addoption(parser: Parser) -> None: + Config._add_verbosity_ini( + parser, + Config.VERBOSITY_SUBTESTS, + help=( + "Specify verbosity level for subtests. " + "Higher levels will generate output for passed subtests. Failed subtests are always reported." + ), + ) + + +@dataclasses.dataclass(frozen=True, slots=True, kw_only=True) +class SubtestContext: + """The values passed to Subtests.test() that are included in the test report.""" + + msg: str | None + kwargs: Mapping[str, Any] + + def _to_json(self) -> dict[str, Any]: + return dataclasses.asdict(self) + + @classmethod + def _from_json(cls, d: dict[str, Any]) -> Self: + return cls(msg=d["msg"], kwargs=d["kwargs"]) + + +@dataclasses.dataclass(init=False) +class SubtestReport(TestReport): + context: SubtestContext + + @property + def head_line(self) -> str: + _, _, domain = self.location + return f"{domain} {self._sub_test_description()}" + + def _sub_test_description(self) -> str: + parts = [] + if self.context.msg is not None: + parts.append(f"[{self.context.msg}]") + if self.context.kwargs: + params_desc = ", ".join( + f"{k}={saferepr(v)}" for (k, v) in self.context.kwargs.items() + ) + parts.append(f"({params_desc})") + return " ".join(parts) or "()" + + def _to_json(self) -> dict[str, Any]: + data = super()._to_json() + del data["context"] + data["_report_type"] = "SubTestReport" + data["_subtest.context"] = self.context._to_json() + return data + + @classmethod + def _from_json(cls, reportdict: dict[str, Any]) -> SubtestReport: + report = super()._from_json(reportdict) + report.context = SubtestContext._from_json(reportdict["_subtest.context"]) + return report + + @classmethod + def _new( + cls, + test_report: TestReport, + context: SubtestContext, + captured_output: Captured | None, + captured_logs: CapturedLogs | None, + ) -> Self: + result = super()._from_json(test_report._to_json()) + result.context = context + + if captured_output: + if captured_output.out: + result.sections.append(("Captured stdout call", captured_output.out)) + if captured_output.err: + result.sections.append(("Captured stderr call", captured_output.err)) + + if captured_logs and (log := captured_logs.handler.stream.getvalue()): + result.sections.append(("Captured log call", log)) + + return result + + +@fixture +def subtests(request: SubRequest) -> Subtests: + """Provides subtests functionality.""" + capmam = request.node.config.pluginmanager.get_plugin("capturemanager") + suspend_capture_ctx = ( + capmam.global_and_fixture_disabled if capmam is not None else nullcontext + ) + return Subtests(request.node.ihook, suspend_capture_ctx, request, _ispytest=True) + + +class Subtests: + """Subtests fixture, enables declaring subtests inside test functions via the :meth:`test` method.""" + + def __init__( + self, + ihook: pluggy.HookRelay, + suspend_capture_ctx: Callable[[], AbstractContextManager[None]], + request: SubRequest, + *, + _ispytest: bool = False, + ) -> None: + check_ispytest(_ispytest) + self._ihook = ihook + self._suspend_capture_ctx = suspend_capture_ctx + self._request = request + + def test( + self, + msg: str | None = None, + **kwargs: Any, + ) -> _SubTestContextManager: + """ + Context manager for subtests, capturing exceptions raised inside the subtest scope and + reporting assertion failures and errors individually. + + Usage + ----- + + .. code-block:: python + + def test(subtests): + for i in range(5): + with subtests.test("custom message", i=i): + assert i % 2 == 0 + + :param msg: + If given, the message will be shown in the test report in case of subtest failure. + + :param kwargs: + Arbitrary values that are also added to the subtest report. + """ + return _SubTestContextManager( + self._ihook, + msg, + kwargs, + request=self._request, + suspend_capture_ctx=self._suspend_capture_ctx, + config=self._request.config, + ) + + +@dataclasses.dataclass +class _SubTestContextManager: + """ + Context manager for subtests, capturing exceptions raised inside the subtest scope and handling + them through the pytest machinery. + """ + + # Note: initially the logic for this context manager was implemented directly + # in Subtests.test() as a @contextmanager, however, it is not possible to control the output fully when + # exiting from it due to an exception when in `--exitfirst` mode, so this was refactored into an + # explicit context manager class (pytest-dev/pytest-subtests#134). + + ihook: pluggy.HookRelay + msg: str | None + kwargs: dict[str, Any] + suspend_capture_ctx: Callable[[], AbstractContextManager[None]] + request: SubRequest + config: Config + + def __enter__(self) -> None: + __tracebackhide__ = True + + self._start = time.time() + self._precise_start = time.perf_counter() + self._exc_info = None + + self._exit_stack = ExitStack() + self._captured_output = self._exit_stack.enter_context( + capturing_output(self.request) + ) + self._captured_logs = self._exit_stack.enter_context( + capturing_logs(self.request) + ) + + def __exit__( + self, + exc_type: type[BaseException] | None, + exc_val: BaseException | None, + exc_tb: TracebackType | None, + ) -> bool: + __tracebackhide__ = True + if exc_val is not None: + exc_info = ExceptionInfo.from_exception(exc_val) + else: + exc_info = None + + self._exit_stack.close() + + precise_stop = time.perf_counter() + duration = precise_stop - self._precise_start + stop = time.time() + + call_info = CallInfo[None]( + None, + exc_info, + start=self._start, + stop=stop, + duration=duration, + when="call", + _ispytest=True, + ) + report = self.ihook.pytest_runtest_makereport( + item=self.request.node, call=call_info + ) + sub_report = SubtestReport._new( + report, + SubtestContext(msg=self.msg, kwargs=self.kwargs), + captured_output=self._captured_output, + captured_logs=self._captured_logs, + ) + + if sub_report.failed: + failed_subtests = self.config.stash[failed_subtests_key] + failed_subtests[self.request.node.nodeid] += 1 + + with self.suspend_capture_ctx(): + self.ihook.pytest_runtest_logreport(report=sub_report) + + if check_interactive_exception(call_info, sub_report): + self.ihook.pytest_exception_interact( + node=self.request.node, call=call_info, report=sub_report + ) + + if exc_val is not None: + if isinstance(exc_val, get_reraise_exceptions(self.config)): + return False + if self.request.session.shouldfail: + return False + return True + + +@contextmanager +def capturing_output(request: SubRequest) -> Iterator[Captured]: + option = request.config.getoption("capture", None) + + capman = request.config.pluginmanager.getplugin("capturemanager") + if getattr(capman, "_capture_fixture", None): + # capsys or capfd are active, subtest should not capture. + fixture = None + elif option == "sys": + fixture = CaptureFixture(SysCapture, request, _ispytest=True) + elif option == "fd": + fixture = CaptureFixture(FDCapture, request, _ispytest=True) + else: + fixture = None + + if fixture is not None: + fixture._start() + + captured = Captured() + try: + yield captured + finally: + if fixture is not None: + out, err = fixture.readouterr() + fixture.close() + captured.out = out + captured.err = err + + +@contextmanager +def capturing_logs( + request: SubRequest, +) -> Iterator[CapturedLogs | None]: + logging_plugin: LoggingPlugin | None = request.config.pluginmanager.getplugin( + "logging-plugin" + ) + if logging_plugin is None: + yield None + else: + handler = LogCaptureHandler() + handler.setFormatter(logging_plugin.formatter) + + captured_logs = CapturedLogs(handler) + with catching_logs(handler, level=logging_plugin.log_level): + yield captured_logs + + +@dataclasses.dataclass +class Captured: + out: str = "" + err: str = "" + + +@dataclasses.dataclass +class CapturedLogs: + handler: LogCaptureHandler + + +def pytest_report_to_serializable(report: TestReport) -> dict[str, Any] | None: + if isinstance(report, SubtestReport): + return report._to_json() + return None + + +def pytest_report_from_serializable(data: dict[str, Any]) -> SubtestReport | None: + if data.get("_report_type") == "SubTestReport": + return SubtestReport._from_json(data) + return None + + +# Dict of nodeid -> number of failed subtests. +# Used to fail top-level tests that passed but contain failed subtests. +failed_subtests_key = StashKey[defaultdict[str, int]]() + + +def pytest_configure(config: Config) -> None: + config.stash[failed_subtests_key] = defaultdict(lambda: 0) + + +@hookimpl(tryfirst=True) +def pytest_report_teststatus( + report: TestReport, + config: Config, +) -> tuple[str, str, str | Mapping[str, bool]] | None: + if report.when != "call": + return None + + quiet = config.get_verbosity(Config.VERBOSITY_SUBTESTS) == 0 + if isinstance(report, SubtestReport): + outcome = report.outcome + description = report._sub_test_description() + + if hasattr(report, "wasxfail"): + if quiet: + return "", "", "" + elif outcome == "skipped": + category = "xfailed" + short = "y" # x letter is used for regular xfail, y for subtest xfail + status = "SUBXFAIL" + # outcome == "passed" in an xfail is only possible via a @pytest.mark.xfail mark, which + # is not applicable to a subtest, which only handles pytest.xfail(). + else: # pragma: no cover + # This should not normally happen, unless some plugin is setting wasxfail without + # the correct outcome. Pytest expects the call outcome to be either skipped or + # passed in case of xfail. + # Let's pass this report to the next hook. + return None + return category, short, f"{status}{description}" + + if report.failed: + return outcome, "u", f"SUBFAILED{description}" + else: + if report.passed: + if quiet: + return "", "", "" + else: + return f"subtests {outcome}", "u", f"SUBPASSED{description}" + elif report.skipped: + if quiet: + return "", "", "" + else: + return outcome, "-", f"SUBSKIPPED{description}" + + else: + failed_subtests_count = config.stash[failed_subtests_key][report.nodeid] + # Top-level test, fail if it contains failed subtests and it has passed. + if report.passed and failed_subtests_count > 0: + report.outcome = "failed" + suffix = "s" if failed_subtests_count > 1 else "" + report.longrepr = f"contains {failed_subtests_count} failed subtest{suffix}" + + return None diff --git a/src/_pytest/terminal.py b/src/_pytest/terminal.py index d5ccc4e4900..e66e4f48dd6 100644 --- a/src/_pytest/terminal.py +++ b/src/_pytest/terminal.py @@ -31,6 +31,7 @@ import pluggy +from _pytest import compat from _pytest import nodes from _pytest import timing from _pytest._code import ExceptionInfo @@ -38,7 +39,7 @@ from _pytest._io import TerminalWriter from _pytest._io.wcwidth import wcswidth import _pytest._version -from _pytest.assertion.util import running_on_ci +from _pytest.compat import running_on_ci from _pytest.config import _PluggyPlugin from _pytest.config import Config from _pytest.config import ExitCode @@ -68,6 +69,9 @@ "xpassed", "warnings", "error", + "subtests passed", + "subtests failed", + "subtests skipped", ) _REPORTCHARS_DEFAULT = "fE" @@ -294,6 +298,11 @@ def mywriter(tags, args): config.trace.root.setprocessor("pytest:config", mywriter) + # See terminalprogress.py. + # On Windows it's safe to load by default. + if sys.platform == "win32": + config.pluginmanager.import_plugin("terminalprogress") + def getreportopt(config: Config) -> str: reportchars: str = config.option.reportchars @@ -387,7 +396,9 @@ def __init__(self, config: Config, file: TextIO | None = None) -> None: self.reportchars = getreportopt(config) self.foldskipped = config.option.fold_skipped self.hasmarkup = self._tw.hasmarkup - self.isatty = file.isatty() + # isatty should be a method but was wrongly implemented as a boolean. + # We use CallableBool here to support both. + self.isatty = compat.CallableBool(file.isatty()) self._progress_nodeids_reported: set[str] = set() self._timing_nodeids_reported: set[str] = set() self._show_progress_info = self._determine_show_progress_info() @@ -451,6 +462,14 @@ def showfspath(self, value: bool | None) -> None: def showlongtestinfo(self) -> bool: return self.config.get_verbosity(Config.VERBOSITY_TEST_CASES) > 0 + @property + def reported_progress(self) -> int: + """The amount of items reported in the progress so far. + + :meta private: + """ + return len(self._progress_nodeids_reported) + def hasopt(self, char: str) -> bool: char = {"xfailed": "x", "skipped": "s"}.get(char, char) return char in self.reportchars @@ -505,6 +524,9 @@ def wrap_write( def write(self, content: str, *, flush: bool = False, **markup: bool) -> None: self._tw.write(content, flush=flush, **markup) + def write_raw(self, content: str, *, flush: bool = False) -> None: + self._tw.write_raw(content, flush=flush) + def flush(self) -> None: self._tw.flush() @@ -678,7 +700,7 @@ def pytest_runtest_logreport(self, report: TestReport) -> None: @property def _is_last_item(self) -> bool: assert self._session is not None - return len(self._progress_nodeids_reported) == self._session.testscollected + return self.reported_progress == self._session.testscollected @hookimpl(wrapper=True) def pytest_runtestloop(self) -> Generator[None, object, object]: @@ -688,7 +710,7 @@ def pytest_runtestloop(self) -> Generator[None, object, object]: if ( self.config.get_verbosity(Config.VERBOSITY_TEST_CASES) <= 0 and self._show_progress_info - and self._progress_nodeids_reported + and self.reported_progress ): self._write_progress_information_filling_space() @@ -699,7 +721,7 @@ def _get_progress_information_message(self) -> str: collected = self._session.testscollected if self._show_progress_info == "count": if collected: - progress = len(self._progress_nodeids_reported) + progress = self.reported_progress counter_format = f"{{:{len(str(collected))}d}}" format_string = f" [{counter_format}/{{}}]" return format_string.format(progress, collected) @@ -731,10 +753,12 @@ def _get_progress_information_message(self) -> str: last_in_module = tests_completed == tests_in_module if self.showlongtestinfo or last_in_module: self._timing_nodeids_reported.update(r.nodeid for r in not_reported) - return format_node_duration(sum(r.duration for r in not_reported)) + return format_node_duration( + sum(r.duration for r in not_reported if isinstance(r, TestReport)) + ) return "" if collected: - return f" [{len(self._progress_nodeids_reported) * 100 // collected:3d}%]" + return f" [{self.reported_progress * 100 // collected:3d}%]" return " [100%]" def _write_progress_information_if_past_edge(self) -> None: @@ -766,7 +790,7 @@ def _width_of_current_line(self) -> int: return self._tw.width_of_current_line def pytest_collection(self) -> None: - if self.isatty: + if self.isatty(): if self.config.option.verbose >= 0: self.write("collecting ... ", flush=True, bold=True) elif self.config.option.verbose >= 1: @@ -779,7 +803,7 @@ def pytest_collectreport(self, report: CollectReport) -> None: self._add_stats("skipped", [report]) items = [x for x in report.result if isinstance(x, Item)] self._numcollected += len(items) - if self.isatty: + if self.isatty(): self.report_collect() def report_collect(self, final: bool = False) -> None: @@ -811,7 +835,7 @@ def report_collect(self, final: bool = False) -> None: line += f" / {skipped} skipped" if self._numcollected > selected: line += f" / {selected} selected" - if self.isatty: + if self.isatty(): self.rewrite(line, bold=True, erase=True) if final: self.write("\n") @@ -859,7 +883,12 @@ def pytest_report_header(self, config: Config) -> list[str]: result = [f"rootdir: {config.rootpath}"] if config.inipath: - result.append("configfile: " + bestrelpath(config.rootpath, config.inipath)) + warning = "" + if config._ignored_config_files: + warning = f" (WARNING: ignoring pytest config in {', '.join(config._ignored_config_files)}!)" + result.append( + "configfile: " + bestrelpath(config.rootpath, config.inipath) + warning + ) if config.args_source == Config.ArgsSource.TESTPATHS: testpaths: list[str] = config.getini("testpaths") @@ -1160,6 +1189,7 @@ def summary_failures_combined( if style == "line": for rep in reports: line = self._getcrashline(rep) + self._outrep_summary(rep) self.write_line(line) else: for rep in reports: @@ -1499,9 +1529,13 @@ def _get_line_with_reprcrash_message( line = f"{word} {node}" line_width = wcswidth(line) + msg: str | None try: - # Type ignored intentionally -- possible AttributeError expected. - msg = rep.longrepr.reprcrash.message # type: ignore[union-attr] + if isinstance(rep.longrepr, str): + msg = rep.longrepr + else: + # Type ignored intentionally -- possible AttributeError expected. + msg = rep.longrepr.reprcrash.message # type: ignore[union-attr] except AttributeError: pass else: @@ -1554,6 +1588,8 @@ def _folded_skips( "error": "red", "warnings": "yellow", "passed": "green", + "subtests passed": "green", + "subtests failed": "red", } _color_for_type_default = "yellow" @@ -1636,3 +1672,92 @@ def _get_raw_skip_reason(report: TestReport) -> str: elif reason == "Skipped": reason = "" return reason + + +class TerminalProgressPlugin: + """Terminal progress reporting plugin using OSC 9;4 ANSI sequences. + + Emits OSC 9;4 sequences to indicate test progress to terminal + tabs/windows/etc. + + Not all terminal emulators support this feature. + + Ref: https://conemu.github.io/en/AnsiEscapeCodes.html#ConEmu_specific_OSC + """ + + def __init__(self, tr: TerminalReporter) -> None: + self._tr = tr + self._session: Session | None = None + self._has_failures = False + + def _emit_progress( + self, + state: Literal["remove", "normal", "error", "indeterminate", "paused"], + progress: int | None = None, + ) -> None: + """Emit OSC 9;4 sequence for indicating progress to the terminal. + + :param state: + Progress state to set. + :param progress: + Progress value 0-100. Required for "normal", optional for "error" + and "paused", otherwise ignored. + """ + assert progress is None or 0 <= progress <= 100 + + # OSC 9;4 sequence: ESC ] 9 ; 4 ; state ; progress ST + # ST can be ESC \ or BEL. ESC \ seems better supported. + match state: + case "remove": + sequence = "\x1b]9;4;0;\x1b\\" + case "normal": + assert progress is not None + sequence = f"\x1b]9;4;1;{progress}\x1b\\" + case "error": + if progress is not None: + sequence = f"\x1b]9;4;2;{progress}\x1b\\" + else: + sequence = "\x1b]9;4;2;\x1b\\" + case "indeterminate": + sequence = "\x1b]9;4;3;\x1b\\" + case "paused": + if progress is not None: + sequence = f"\x1b]9;4;4;{progress}\x1b\\" + else: + sequence = "\x1b]9;4;4;\x1b\\" + + self._tr.write_raw(sequence, flush=True) + + @hookimpl + def pytest_sessionstart(self, session: Session) -> None: + self._session = session + # Show indeterminate progress during collection. + self._emit_progress("indeterminate") + + @hookimpl + def pytest_collection_finish(self) -> None: + assert self._session is not None + if self._session.testscollected > 0: + # Switch from indeterminate to 0% progress. + self._emit_progress("normal", 0) + + @hookimpl + def pytest_runtest_logreport(self, report: TestReport) -> None: + if report.failed: + self._has_failures = True + + # Let's consider the "call" phase for progress. + if report.when != "call": + return + + # Calculate and emit progress. + assert self._session is not None + collected = self._session.testscollected + if collected > 0: + reported = self._tr.reported_progress + progress = min(reported * 100 // collected, 100) + self._emit_progress("error" if self._has_failures else "normal", progress) + + @hookimpl + def pytest_sessionfinish(self) -> None: + self._emit_progress("remove") diff --git a/src/_pytest/terminalprogress.py b/src/_pytest/terminalprogress.py new file mode 100644 index 00000000000..287f0d569ff --- /dev/null +++ b/src/_pytest/terminalprogress.py @@ -0,0 +1,30 @@ +# A plugin to register the TerminalProgressPlugin plugin. +# +# This plugin is not loaded by default due to compatibility issues (#13896), +# but can be enabled in one of these ways: +# - The terminal plugin enables it in a few cases where it's safe, and not +# blocked by the user (using e.g. `-p no:terminalprogress`). +# - The user explicitly requests it, e.g. using `-p terminalprogress`. +# +# In a few years, if it's safe, we can consider enabling it by default. Then, +# this file will become unnecessary and can be inlined into terminal.py. + +from __future__ import annotations + +import os + +from _pytest.config import Config +from _pytest.config import hookimpl +from _pytest.terminal import TerminalProgressPlugin +from _pytest.terminal import TerminalReporter + + +@hookimpl(trylast=True) +def pytest_configure(config: Config) -> None: + reporter: TerminalReporter | None = config.pluginmanager.get_plugin( + "terminalreporter" + ) + + if reporter is not None and reporter.isatty() and os.environ.get("TERM") != "dumb": + plugin = TerminalProgressPlugin(reporter) + config.pluginmanager.register(plugin, name="terminalprogress-plugin") diff --git a/src/_pytest/timing.py b/src/_pytest/timing.py index 221eeffc4fd..51c3db23f6f 100644 --- a/src/_pytest/timing.py +++ b/src/_pytest/timing.py @@ -84,6 +84,7 @@ def time(self) -> float: return self._current_time def patch(self, monkeypatch: MonkeyPatch) -> None: + # pylint: disable-next=import-self from _pytest import timing # noqa: PLW0406 monkeypatch.setattr(timing, "sleep", self.sleep) diff --git a/src/_pytest/tmpdir.py b/src/_pytest/tmpdir.py index dcd5784f88f..66ca9f190e3 100644 --- a/src/_pytest/tmpdir.py +++ b/src/_pytest/tmpdir.py @@ -9,6 +9,7 @@ from pathlib import Path import re from shutil import rmtree +import stat import tempfile from typing import Any from typing import final @@ -170,16 +171,37 @@ def getbasetemp(self) -> Path: # Also, to keep things private, fixup any world-readable temp # rootdir's permissions. Historically 0o755 was used, so we can't # just error out on this, at least for a while. + # Don't follow symlinks, otherwise we're open to symlink-swapping + # TOCTOU vulnerability. + # This check makes us vulnerable to a DoS - a user can `mkdir + # /tmp/pytest-of-otheruser` and then `otheruser` will fail this + # check. For now we don't consider it a real problem. otheruser can + # change their TMPDIR or --basetemp, and maybe give the prankster a + # good scolding. uid = get_user_id() if uid is not None: - rootdir_stat = rootdir.stat() + stat_follow_symlinks = ( + False if os.stat in os.supports_follow_symlinks else True + ) + rootdir_stat = rootdir.stat(follow_symlinks=stat_follow_symlinks) + if stat.S_ISLNK(rootdir_stat.st_mode): + raise OSError( + f"The temporary directory {rootdir} is a symbolic link. " + "Fix this and try again." + ) if rootdir_stat.st_uid != uid: raise OSError( f"The temporary directory {rootdir} is not owned by the current user. " "Fix this and try again." ) if (rootdir_stat.st_mode & 0o077) != 0: - os.chmod(rootdir, rootdir_stat.st_mode & ~0o077) + chmod_follow_symlinks = ( + False if os.chmod in os.supports_follow_symlinks else True + ) + rootdir.chmod( + rootdir_stat.st_mode & ~0o077, + follow_symlinks=chmod_follow_symlinks, + ) keep = self._retention_count if self._retention_policy == "none": keep = 0 @@ -225,13 +247,16 @@ def pytest_addoption(parser: Parser) -> None: parser.addini( "tmp_path_retention_count", help="How many sessions should we keep the `tmp_path` directories, according to `tmp_path_retention_policy`.", - default=3, + default="3", + # NOTE: Would have been better as an `int` but can't change it now. + type="string", ) parser.addini( "tmp_path_retention_policy", help="Controls which directories created by the `tmp_path` fixture are kept around, based on test outcome. " "(all/failed/none)", + type="string", default="all", ) diff --git a/src/_pytest/unittest.py b/src/_pytest/unittest.py index 04d50b53090..31be8847821 100644 --- a/src/_pytest/unittest.py +++ b/src/_pytest/unittest.py @@ -6,18 +6,24 @@ from collections.abc import Callable from collections.abc import Generator from collections.abc import Iterable +from collections.abc import Iterator +from enum import auto +from enum import Enum import inspect import sys import traceback import types from typing import Any from typing import TYPE_CHECKING -from typing import Union +from unittest import TestCase import _pytest._code +from _pytest._code import ExceptionInfo +from _pytest.compat import assert_never from _pytest.compat import is_async_function from _pytest.config import hookimpl from _pytest.fixtures import FixtureRequest +from _pytest.monkeypatch import MonkeyPatch from _pytest.nodes import Collector from _pytest.nodes import Item from _pytest.outcomes import exit @@ -28,22 +34,25 @@ from _pytest.python import Function from _pytest.python import Module from _pytest.runner import CallInfo -import pytest +from _pytest.runner import check_interactive_exception +from _pytest.subtests import SubtestContext +from _pytest.subtests import SubtestReport if sys.version_info[:2] < (3, 11): from exceptiongroup import ExceptionGroup if TYPE_CHECKING: + from types import TracebackType import unittest import twisted.trial.unittest -_SysExcInfoType = Union[ - tuple[type[BaseException], BaseException, types.TracebackType], - tuple[None, None, None], -] +_SysExcInfoType = ( + tuple[type[BaseException], BaseException, types.TracebackType] + | tuple[None, None, None] +) def pytest_pycollect_makeitem( @@ -139,7 +148,7 @@ def unittest_setup_class_fixture( cls = request.cls if _is_skipped(cls): reason = cls.__unittest_skip_why__ - raise pytest.skip.Exception(reason, _use_item_location=True) + raise skip.Exception(reason, _use_item_location=True) if setup is not None: try: setup() @@ -180,7 +189,7 @@ def unittest_setup_method_fixture( self = request.instance if _is_skipped(self): reason = self.__unittest_skip_why__ - raise pytest.skip.Exception(reason, _use_item_location=True) + raise skip.Exception(reason, _use_item_location=True) if setup is not None: setup(self, request.function) yield @@ -199,6 +208,7 @@ def unittest_setup_method_fixture( class TestCaseFunction(Function): nofuncargs = True + failfast = False _excinfo: list[_pytest._code.ExceptionInfo[BaseException]] | None = None def _getinstance(self): @@ -215,6 +225,10 @@ def setup(self) -> None: # A bound method to be called during teardown() if set (see 'runtest()'). self._explicit_tearDown: Callable[[], None] | None = None super().setup() + if sys.version_info < (3, 11): + # A cache of the subTest errors and non-subtest skips in self._outcome. + # Compute and cache these lists once, instead of computing them again and again for each subtest (#13965). + self._cached_errors_and_skips: tuple[list[Any], list[Any]] | None = None def teardown(self) -> None: if self._explicit_tearDown is not None: @@ -228,8 +242,7 @@ def startTest(self, testcase: unittest.TestCase) -> None: pass def _addexcinfo(self, rawexcinfo: _SysExcInfoType) -> None: - # Unwrap potential exception info (see twisted trial support below). - rawexcinfo = getattr(rawexcinfo, "_rawexcinfo", rawexcinfo) + rawexcinfo = _handle_twisted_exc_info(rawexcinfo) try: excinfo = _pytest._code.ExceptionInfo[BaseException].from_exc_info( rawexcinfo # type: ignore[arg-type] @@ -277,11 +290,38 @@ def addFailure( ) -> None: self._addexcinfo(rawexcinfo) - def addSkip(self, testcase: unittest.TestCase, reason: str) -> None: - try: - raise pytest.skip.Exception(reason, _use_item_location=True) - except skip.Exception: - self._addexcinfo(sys.exc_info()) + def addSkip( + self, testcase: unittest.TestCase, reason: str, *, handle_subtests: bool = True + ) -> None: + from unittest.case import _SubTest # type: ignore[attr-defined] + + def add_skip() -> None: + try: + raise skip.Exception(reason, _use_item_location=True) + except skip.Exception: + self._addexcinfo(sys.exc_info()) + + if not handle_subtests: + add_skip() + return + + if isinstance(testcase, _SubTest): + add_skip() + if self._excinfo is not None: + exc_info = self._excinfo[-1] + self.addSubTest(testcase.test_case, testcase, exc_info) + else: + # For python < 3.11: the non-subtest skips have to be added by `add_skip` only after all subtest + # failures are processed by `_addSubTest`: `self.instance._outcome` has no attribute + # `skipped/errors` anymore. + # We also need to check if `self.instance._outcome` is `None` (this happens if the test + # class/method is decorated with `unittest.skip`, see pytest-dev/pytest-subtests#173). + if sys.version_info < (3, 11) and self.instance._outcome is not None: + subtest_errors, _ = self._obtain_errors_and_skips() + if len(subtest_errors) == 0: + add_skip() + else: + add_skip() def addExpectedFailure( self, @@ -361,6 +401,88 @@ def _traceback_filter( ntraceback = traceback return ntraceback + def addSubTest( + self, + test_case: Any, + test: TestCase, + exc_info: ExceptionInfo[BaseException] + | tuple[type[BaseException], BaseException, TracebackType] + | None, + ) -> None: + # Importing this private symbol locally in case this symbol is renamed/removed in the future; importing + # it globally would break pytest entirely, importing it locally only will break unittests using `addSubTest`. + from unittest.case import _subtest_msg_sentinel # type: ignore[attr-defined] + + exception_info: ExceptionInfo[BaseException] | None + match exc_info: + case tuple(): + exception_info = ExceptionInfo(exc_info, _ispytest=True) + case ExceptionInfo() | None: + exception_info = exc_info + case unreachable: + assert_never(unreachable) + + call_info = CallInfo[None]( + None, + exception_info, + start=0, + stop=0, + duration=0, + when="call", + _ispytest=True, + ) + msg = None if test._message is _subtest_msg_sentinel else str(test._message) # type: ignore[attr-defined] + report = self.ihook.pytest_runtest_makereport(item=self, call=call_info) + sub_report = SubtestReport._new( + report, + SubtestContext(msg=msg, kwargs=dict(test.params)), # type: ignore[attr-defined] + captured_output=None, + captured_logs=None, + ) + self.ihook.pytest_runtest_logreport(report=sub_report) + if check_interactive_exception(call_info, sub_report): + self.ihook.pytest_exception_interact( + node=self, call=call_info, report=sub_report + ) + + # For python < 3.11: add non-subtest skips once all subtest failures are processed by # `_addSubTest`. + if sys.version_info < (3, 11): + subtest_errors, non_subtest_skip = self._obtain_errors_and_skips() + + # Check if we have non-subtest skips: if there are also sub failures, non-subtest skips are not treated in + # `_addSubTest` and have to be added using `add_skip` after all subtest failures are processed. + if len(non_subtest_skip) > 0 and len(subtest_errors) > 0: + # Make sure we have processed the last subtest failure + last_subset_error = subtest_errors[-1] + if exc_info is last_subset_error[-1]: + # Add non-subtest skips (as they could not be treated in `_addSkip`) + for testcase, reason in non_subtest_skip: + self.addSkip(testcase, reason, handle_subtests=False) + + def _obtain_errors_and_skips(self) -> tuple[list[Any], list[Any]]: + """Compute or obtain the cached values for subtest errors and non-subtest skips.""" + from unittest.case import _SubTest # type: ignore[attr-defined] + + assert sys.version_info < (3, 11), ( + "This workaround only should be used in Python 3.10" + ) + if self._cached_errors_and_skips is not None: + return self._cached_errors_and_skips + + subtest_errors = [ + (x, y) + for x, y in self.instance._outcome.errors + if isinstance(x, _SubTest) and y is not None + ] + + non_subtest_skips = [ + (x, y) + for x, y in self.instance._outcome.skipped + if not isinstance(x, _SubTest) + ] + self._cached_errors_and_skips = (subtest_errors, non_subtest_skips) + return subtest_errors, non_subtest_skips + @hookimpl(tryfirst=True) def pytest_runtest_makereport(item: Item, call: CallInfo[None]) -> None: @@ -373,61 +495,138 @@ def pytest_runtest_makereport(item: Item, call: CallInfo[None]) -> None: pass # Convert unittest.SkipTest to pytest.skip. - # This is actually only needed for nose, which reuses unittest.SkipTest for - # its own nose.SkipTest. For unittest TestCases, SkipTest is already - # handled internally, and doesn't reach here. + # This covers explicit `raise unittest.SkipTest`. unittest = sys.modules.get("unittest") if unittest and call.excinfo and isinstance(call.excinfo.value, unittest.SkipTest): excinfo = call.excinfo - call2 = CallInfo[None].from_call( - lambda: pytest.skip(str(excinfo.value)), call.when - ) + call2 = CallInfo[None].from_call(lambda: skip(str(excinfo.value)), call.when) call.excinfo = call2.excinfo -# Twisted trial support. -classImplements_has_run = False +def _is_skipped(obj) -> bool: + """Return True if the given object has been marked with @unittest.skip.""" + return bool(getattr(obj, "__unittest_skip__", False)) + + +def pytest_configure() -> None: + """Register the TestCaseFunction class as an IReporter if twisted.trial is available.""" + if _get_twisted_version() is not TwistedVersion.NotInstalled: + from twisted.trial.itrial import IReporter + from zope.interface import classImplements + + classImplements(TestCaseFunction, IReporter) + + +class TwistedVersion(Enum): + """ + The Twisted version installed in the environment. + + We have different workarounds in place for different versions of Twisted. + """ + + # Twisted version 24 or prior. + Version24 = auto() + # Twisted version 25 or later. + Version25 = auto() + # Twisted version is not available. + NotInstalled = auto() + + +def _get_twisted_version() -> TwistedVersion: + # We need to check if "twisted.trial.unittest" is specifically present in sys.modules. + # This is because we intend to integrate with Trial only when it's actively running + # the test suite, but not needed when only other Twisted components are in use. + if "twisted.trial.unittest" not in sys.modules: + return TwistedVersion.NotInstalled + + import importlib.metadata + + import packaging.version + + version_str = importlib.metadata.version("twisted") + version = packaging.version.parse(version_str) + if version.major <= 24: + return TwistedVersion.Version24 + else: + return TwistedVersion.Version25 + + +# Name of the attribute in `twisted.python.Failure` instances that stores +# the `sys.exc_info()` tuple. +# See twisted.trial support in `pytest_runtest_protocol`. +TWISTED_RAW_EXCINFO_ATTR = "_twisted_raw_excinfo" @hookimpl(wrapper=True) -def pytest_runtest_protocol(item: Item) -> Generator[None, object, object]: - if isinstance(item, TestCaseFunction) and "twisted.trial.unittest" in sys.modules: - ut: Any = sys.modules["twisted.python.failure"] - global classImplements_has_run - Failure__init__ = ut.Failure.__init__ - if not classImplements_has_run: - from twisted.trial.itrial import IReporter - from zope.interface import classImplements - - classImplements(TestCaseFunction, IReporter) - classImplements_has_run = True - - def excstore( +def pytest_runtest_protocol(item: Item) -> Iterator[None]: + if _get_twisted_version() is TwistedVersion.Version24: + import twisted.python.failure as ut + + # Monkeypatch `Failure.__init__` to store the raw exception info. + original__init__ = ut.Failure.__init__ + + def store_raw_exception_info( self, exc_value=None, exc_type=None, exc_tb=None, captureVars=None - ): + ): # pragma: no cover if exc_value is None: - self._rawexcinfo = sys.exc_info() + raw_exc_info = sys.exc_info() else: if exc_type is None: exc_type = type(exc_value) - self._rawexcinfo = (exc_type, exc_value, exc_tb) + if exc_tb is None: + exc_tb = sys.exc_info()[2] + raw_exc_info = (exc_type, exc_value, exc_tb) + setattr(self, TWISTED_RAW_EXCINFO_ATTR, tuple(raw_exc_info)) try: - Failure__init__( + original__init__( self, exc_value, exc_type, exc_tb, captureVars=captureVars ) - except TypeError: - Failure__init__(self, exc_value, exc_type, exc_tb) + except TypeError: # pragma: no cover + original__init__(self, exc_value, exc_type, exc_tb) - ut.Failure.__init__ = excstore - try: - res = yield - finally: - ut.Failure.__init__ = Failure__init__ + with MonkeyPatch.context() as patcher: + patcher.setattr(ut.Failure, "__init__", store_raw_exception_info) + return (yield) else: - res = yield - return res - - -def _is_skipped(obj) -> bool: - """Return True if the given object has been marked with @unittest.skip.""" - return bool(getattr(obj, "__unittest_skip__", False)) + return (yield) + + +def _handle_twisted_exc_info( + rawexcinfo: _SysExcInfoType | BaseException, +) -> _SysExcInfoType: + """ + Twisted passes a custom Failure instance to `addError()` instead of using `sys.exc_info()`. + Therefore, if `rawexcinfo` is a `Failure` instance, convert it into the equivalent `sys.exc_info()` tuple + as expected by pytest. + """ + twisted_version = _get_twisted_version() + if twisted_version is TwistedVersion.NotInstalled: + # Unfortunately, because we cannot import `twisted.python.failure` at the top of the file + # and use it in the signature, we need to use `type:ignore` here because we cannot narrow + # the type properly in the `if` statement above. + return rawexcinfo # type:ignore[return-value] + elif twisted_version is TwistedVersion.Version24: + # Twisted calls addError() passing its own classes (like `twisted.python.Failure`), which violates + # the `addError()` signature, so we extract the original `sys.exc_info()` tuple which is stored + # in the object. + if hasattr(rawexcinfo, TWISTED_RAW_EXCINFO_ATTR): + saved_exc_info = getattr(rawexcinfo, TWISTED_RAW_EXCINFO_ATTR) + # Delete the attribute from the original object to avoid leaks. + delattr(rawexcinfo, TWISTED_RAW_EXCINFO_ATTR) + return saved_exc_info # type:ignore[no-any-return] + return rawexcinfo # type:ignore[return-value] + elif twisted_version is TwistedVersion.Version25: + if isinstance(rawexcinfo, BaseException): + import twisted.python.failure + + if isinstance(rawexcinfo, twisted.python.failure.Failure): + tb = rawexcinfo.__traceback__ + if tb is None: + tb = sys.exc_info()[2] + return type(rawexcinfo.value), rawexcinfo.value, tb + + return rawexcinfo # type:ignore[return-value] + else: + # Ideally we would use assert_never() here, but it is not available in all Python versions + # we support, plus we do not require `type_extensions` currently. + assert False, f"Unexpected Twisted version: {twisted_version}" diff --git a/src/_pytest/unraisableexception.py b/src/_pytest/unraisableexception.py index 7826aeccd12..0faca36aa00 100644 --- a/src/_pytest/unraisableexception.py +++ b/src/_pytest/unraisableexception.py @@ -24,10 +24,12 @@ from exceptiongroup import ExceptionGroup -def gc_collect_harder() -> None: - # A single collection doesn't necessarily collect everything. - # Constant determined experimentally by the Trio project. - for _ in range(5): +# This is a stash item and not a simple constant to allow pytester to override it. +gc_collect_iterations_key = StashKey[int]() + + +def gc_collect_harder(iterations: int) -> None: + for _ in range(iterations): gc.collect() @@ -84,9 +86,12 @@ def collect_unraisable(config: Config) -> None: def cleanup( *, config: Config, prev_hook: Callable[[sys.UnraisableHookArgs], object] ) -> None: + # A single collection doesn't necessarily collect everything. + # Constant determined experimentally by the Trio project. + gc_collect_iterations = config.stash.get(gc_collect_iterations_key, 5) try: try: - gc_collect_harder() + gc_collect_harder(gc_collect_iterations) collect_unraisable(config) finally: sys.unraisablehook = prev_hook diff --git a/src/_pytest/warning_types.py b/src/_pytest/warning_types.py index 8c9ff2d9a36..93071b4a1b2 100644 --- a/src/_pytest/warning_types.py +++ b/src/_pytest/warning_types.py @@ -56,6 +56,12 @@ class PytestRemovedIn9Warning(PytestDeprecationWarning): __module__ = "pytest" +class PytestRemovedIn10Warning(PytestDeprecationWarning): + """Warning class for features that will be removed in pytest 10.""" + + __module__ = "pytest" + + @final class PytestExperimentalApiWarning(PytestWarning, FutureWarning): """Warning category used to denote experiments in pytest. @@ -71,6 +77,17 @@ def simple(cls, apiname: str) -> PytestExperimentalApiWarning: return cls(f"{apiname} is an experimental api that may change over time") +@final +class PytestReturnNotNoneWarning(PytestWarning): + """ + Warning emitted when a test function returns a value other than ``None``. + + See :ref:`return-not-none` for details. + """ + + __module__ = "pytest" + + @final class PytestUnknownMarkWarning(PytestWarning): """Warning emitted on use of unknown markers. diff --git a/src/_pytest/warnings.py b/src/_pytest/warnings.py index 806681a5020..1dbf0025a31 100644 --- a/src/_pytest/warnings.py +++ b/src/_pytest/warnings.py @@ -41,8 +41,7 @@ def catch_warnings_for_item( warnings.filterwarnings("always", category=DeprecationWarning) warnings.filterwarnings("always", category=PendingDeprecationWarning) - # To be enabled in pytest 9.0.0. - # warnings.filterwarnings("error", category=pytest.PytestRemovedIn9Warning) + warnings.filterwarnings("error", category=pytest.PytestRemovedIn9Warning) apply_warning_filters(config_filters, cmdline_filters) diff --git a/src/pytest/__init__.py b/src/pytest/__init__.py index e36d3e704c1..3e6281ac388 100644 --- a/src/pytest/__init__.py +++ b/src/pytest/__init__.py @@ -71,6 +71,8 @@ from _pytest.runner import CallInfo from _pytest.stash import Stash from _pytest.stash import StashKey +from _pytest.subtests import SubtestReport +from _pytest.subtests import Subtests from _pytest.terminal import TerminalReporter from _pytest.terminal import TestShortLogReport from _pytest.tmpdir import TempPathFactory @@ -82,6 +84,8 @@ from _pytest.warning_types import PytestExperimentalApiWarning from _pytest.warning_types import PytestFDWarning from _pytest.warning_types import PytestRemovedIn9Warning +from _pytest.warning_types import PytestRemovedIn10Warning +from _pytest.warning_types import PytestReturnNotNoneWarning from _pytest.warning_types import PytestUnhandledThreadExceptionWarning from _pytest.warning_types import PytestUnknownMarkWarning from _pytest.warning_types import PytestUnraisableExceptionWarning @@ -132,6 +136,8 @@ "PytestFDWarning", "PytestPluginManager", "PytestRemovedIn9Warning", + "PytestRemovedIn10Warning", + "PytestReturnNotNoneWarning", "PytestUnhandledThreadExceptionWarning", "PytestUnknownMarkWarning", "PytestUnraisableExceptionWarning", @@ -144,6 +150,8 @@ "Session", "Stash", "StashKey", + "SubtestReport", + "Subtests", "TempPathFactory", "TempdirFactory", "TerminalReporter", diff --git a/testing/_py/test_local.py b/testing/_py/test_local.py index 7064d1daa9b..592058a54a5 100644 --- a/testing/_py/test_local.py +++ b/testing/_py/test_local.py @@ -9,17 +9,17 @@ from unittest import mock import warnings -from py import error from py.path import local +from py import error + import pytest @contextlib.contextmanager def ignore_encoding_warning(): with warnings.catch_warnings(): - if sys.version_info >= (3, 10): - warnings.simplefilter("ignore", EncodingWarning) # noqa: F821 + warnings.simplefilter("ignore", EncodingWarning) yield @@ -207,13 +207,9 @@ def test_visit_norecurse(self, path1): assert "sampledir" in lst assert path1.sep.join(["sampledir", "otherfile"]) not in lst - @pytest.mark.parametrize( - "fil", - ["*dir", "*dir", pytest.mark.skip("sys.version_info < (3,6)")(b"*dir")], - ) - def test_visit_filterfunc_is_string(self, path1, fil): + def test_visit_filterfunc_is_string(self, path1): lst = [] - for i in path1.visit(fil): + for i in path1.visit("*dir"): lst.append(i.relto(path1)) assert len(lst), 2 # noqa: PLC1802,RUF040 assert "sampledir" in lst @@ -463,12 +459,11 @@ def test_fspath_func_match_strpath(self, path1): assert fspath(path1) == path1.strpath - @pytest.mark.skip("sys.version_info < (3,6)") def test_fspath_open(self, path1): - f = path1.join("opentestfile") - open(f) + f = path1.join("samplefile") + stream = open(f, encoding="utf-8") + stream.close() - @pytest.mark.skip("sys.version_info < (3,6)") def test_fspath_fsencode(self, path1): from os import fsencode diff --git a/testing/acceptance_test.py b/testing/acceptance_test.py index 4948e3ff8ae..b9384008483 100644 --- a/testing/acceptance_test.py +++ b/testing/acceptance_test.py @@ -644,10 +644,9 @@ def test_invoke_with_invalid_type(self) -> None: ): pytest.main("-h") # type: ignore[arg-type] - def test_invoke_with_path(self, pytester: Pytester, capsys) -> None: + def test_invoke_with_path(self, pytester: Pytester) -> None: retcode = pytest.main([str(pytester.path)]) assert retcode == ExitCode.NO_TESTS_COLLECTED - out, err = capsys.readouterr() def test_invoke_plugin_api(self, capsys) -> None: class MyPlugin: @@ -655,7 +654,7 @@ def pytest_addoption(self, parser): parser.addoption("--myopt") pytest.main(["-h"], plugins=[MyPlugin()]) - out, err = capsys.readouterr() + out, _err = capsys.readouterr() assert "--myopt" in out def test_pyargs_importerror(self, pytester: Pytester, monkeypatch) -> None: @@ -1325,7 +1324,7 @@ def test_foo(async_fixture): pass """ ) - result = pytester.runpytest() + result = pytester.runpytest("-Wdefault::pytest.PytestRemovedIn9Warning") result.stdout.fnmatch_lines( [ "*== warnings summary ==*", @@ -1355,7 +1354,7 @@ def test_foo(async_fixture): ... """ ) - result = pytester.runpytest() + result = pytester.runpytest("-Wdefault::pytest.PytestRemovedIn9Warning") result.stdout.fnmatch_lines( [ "*== warnings summary ==*", @@ -1389,7 +1388,7 @@ def test_foo(async_fixture): pass """ ) - result = pytester.runpytest() + result = pytester.runpytest("-Wdefault::pytest.PytestRemovedIn9Warning") result.stdout.fnmatch_lines( [ "*== warnings summary ==*", @@ -1489,7 +1488,8 @@ def test_no_brokenpipeerror_message(pytester: Pytester) -> None: popen.stderr.close() -def test_function_return_non_none_error(pytester: Pytester) -> None: +@pytest.mark.filterwarnings("default") +def test_function_return_non_none_warning(pytester: Pytester) -> None: pytester.makepyfile( """ def test_stuff(): @@ -1497,7 +1497,6 @@ def test_stuff(): """ ) res = pytester.runpytest() - res.assert_outcomes(failed=1) res.stdout.fnmatch_lines(["*Did you mean to use `assert` instead of `return`?*"]) diff --git a/testing/code/test_code.py b/testing/code/test_code.py index 7ae5ad46100..ae5e0e949cf 100644 --- a/testing/code/test_code.py +++ b/testing/code/test_code.py @@ -86,9 +86,9 @@ def test_unicode_handling() -> None: value = "ąć".encode() def f() -> None: - raise Exception(value) + raise ValueError(value) - excinfo = pytest.raises(Exception, f) + excinfo = pytest.raises(ValueError, f) str(excinfo) diff --git a/testing/code/test_excinfo.py b/testing/code/test_excinfo.py index 555645030fc..70499fec893 100644 --- a/testing/code/test_excinfo.py +++ b/testing/code/test_excinfo.py @@ -263,7 +263,7 @@ def do_stuff() -> None: def reraise_me() -> None: import sys - exc, val, tb = sys.exc_info() + _exc, val, tb = sys.exc_info() assert val is not None raise val.with_traceback(tb) @@ -442,9 +442,9 @@ def test_division_zero(): assert result.ret != 0 match = [ - r"E .* AssertionError: Regex pattern did not match.", - r"E .* Regex: '\[123\]\+'", - r"E .* Input: 'division by zero'", + r"E\s+AssertionError: Regex pattern did not match.", + r"E\s+Expected regex: '\[123\]\+'", + r"E\s+Actual message: 'division by zero'", ] result.stdout.re_match_lines(match) result.stdout.no_fnmatch_line("*__tracebackhide__ = True*") @@ -1897,19 +1897,23 @@ def test_nested_multiple() -> None: @pytest.mark.parametrize("tbstyle", ("long", "short", "auto", "line", "native")) -def test_all_entries_hidden(pytester: Pytester, tbstyle: str) -> None: +@pytest.mark.parametrize("group", (True, False), ids=("group", "bare")) +def test_all_entries_hidden(pytester: Pytester, tbstyle: str, group: bool) -> None: """Regression test for #10903.""" pytester.makepyfile( - """ + f""" + import sys + if sys.version_info < (3, 11): + from exceptiongroup import ExceptionGroup def test(): __tracebackhide__ = True - 1 / 0 + raise {'ExceptionGroup("", [ValueError("bar")])' if group else 'ValueError("bar")'} """ ) result = pytester.runpytest("--tb", tbstyle) assert result.ret == 1 if tbstyle != "line": - result.stdout.fnmatch_lines(["*ZeroDivisionError: division by zero"]) + result.stdout.fnmatch_lines(["*ValueError: bar"]) if tbstyle not in ("line", "native"): result.stdout.fnmatch_lines(["All traceback entries are hidden.*"]) diff --git a/testing/code/test_source.py b/testing/code/test_source.py index 321372d4b59..e413af3766e 100644 --- a/testing/code/test_source.py +++ b/testing/code/test_source.py @@ -399,7 +399,7 @@ def getstatement(lineno: int, source) -> Source: from _pytest._code.source import getstatementrange_ast src = Source(source) - ast, start, end = getstatementrange_ast(lineno, src) + _ast, start, end = getstatementrange_ast(lineno, src) return src[start:end] @@ -418,7 +418,7 @@ def test_comment_and_no_newline_at_end() -> None: "# vim: filetype=pyopencl:fdm=marker", ] ) - ast, start, end = getstatementrange_ast(1, source) + _ast, _start, end = getstatementrange_ast(1, source) assert end == 2 diff --git a/testing/conftest.py b/testing/conftest.py index 251b430e9cd..663c9d80b3e 100644 --- a/testing/conftest.py +++ b/testing/conftest.py @@ -111,7 +111,7 @@ def write(self, msg, **kw): def _write_source(self, lines, indents=()): if not indents: indents = [""] * len(lines) - for indent, line in zip(indents, lines): + for indent, line in zip(indents, lines, strict=True): self.line(indent + line) def line(self, line, **kw): @@ -237,3 +237,15 @@ def mock_timing(monkeypatch: MonkeyPatch): result = MockTiming() result.patch(monkeypatch) return result + + +@pytest.fixture(autouse=True) +def remove_ci_env_var(monkeypatch: MonkeyPatch, request: pytest.FixtureRequest) -> None: + """Make the test insensitive if it is running in CI or not. + + Use `@pytest.mark.keep_ci_var` in a test to avoid applying this fixture, letting the test + see the real `CI` variable (if present). + """ + has_keep_ci_mark = request.node.get_closest_marker("keep_ci_var") is not None + if not has_keep_ci_mark: + monkeypatch.delenv("CI", raising=False) diff --git a/testing/example_scripts/fixtures/fill_fixtures/test_conftest_funcargs_only_available_in_subdir/sub2/conftest.py b/testing/example_scripts/fixtures/fill_fixtures/test_conftest_funcargs_only_available_in_subdir/sub2/conftest.py index 112d1e05f27..0185628c3a0 100644 --- a/testing/example_scripts/fixtures/fill_fixtures/test_conftest_funcargs_only_available_in_subdir/sub2/conftest.py +++ b/testing/example_scripts/fixtures/fill_fixtures/test_conftest_funcargs_only_available_in_subdir/sub2/conftest.py @@ -1,9 +1,11 @@ # mypy: allow-untyped-defs from __future__ import annotations +from _pytest.fixtures import FixtureLookupError import pytest @pytest.fixture def arg2(request): - pytest.raises(Exception, request.getfixturevalue, "arg1") + with pytest.raises(FixtureLookupError): + request.getfixturevalue("arg1") diff --git a/testing/io/test_terminalwriter.py b/testing/io/test_terminalwriter.py index 1f38d6f15d9..9aa89da0e41 100644 --- a/testing/io/test_terminalwriter.py +++ b/testing/io/test_terminalwriter.py @@ -224,6 +224,7 @@ def test_NO_COLOR_and_FORCE_COLOR( def test_empty_NO_COLOR_and_FORCE_COLOR_ignored(monkeypatch: MonkeyPatch) -> None: + monkeypatch.setenv("TERM", "xterm-256color") monkeypatch.setitem(os.environ, "NO_COLOR", "") monkeypatch.setitem(os.environ, "FORCE_COLOR", "") assert_color(True, True) diff --git a/testing/logging/test_reporting.py b/testing/logging/test_reporting.py index cf54788e246..4974532e888 100644 --- a/testing/logging/test_reporting.py +++ b/testing/logging/test_reporting.py @@ -589,7 +589,8 @@ def test_log_cli(request): ) def test_log_cli_auto_enable(pytester: Pytester, cli_args: str) -> None: """Check that live logs are enabled if --log-level or --log-cli-level is passed on the CLI. - It should not be auto enabled if the same configs are set on the INI file. + + It should not be auto enabled if the same configs are set on the configuration file. """ pytester.makepyfile( """ diff --git a/testing/plugins_integration/pytest.ini b/testing/plugins_integration/pytest.ini index 86058fbbac8..b0eb9c3806f 100644 --- a/testing/plugins_integration/pytest.ini +++ b/testing/plugins_integration/pytest.ini @@ -1,5 +1,5 @@ [pytest] -addopts = --strict-markers +strict_markers = True asyncio_mode = strict filterwarnings = error::pytest.PytestWarning diff --git a/testing/plugins_integration/requirements.txt b/testing/plugins_integration/requirements.txt index e8010b23113..f33ac01f848 100644 --- a/testing/plugins_integration/requirements.txt +++ b/testing/plugins_integration/requirements.txt @@ -1,15 +1,15 @@ -anyio[trio]==4.9.0 -django==5.2.1 -pytest-asyncio==0.26.0 +anyio[trio]==4.11.0 +django==5.2.8 +pytest-asyncio==1.3.0 pytest-bdd==8.1.0 -pytest-cov==6.1.1 +pytest-cov==7.0.0 pytest-django==4.11.1 pytest-flakes==4.0.5 pytest-html==4.1.1 -pytest-mock==3.14.1 -pytest-rerunfailures==15.1 -pytest-sugar==1.0.0 +pytest-mock==3.15.1 +pytest-rerunfailures==16.1 +pytest-sugar==1.1.1 pytest-trio==0.8.0 pytest-twisted==1.14.3 -twisted==24.11.0 +twisted==25.5.0 pytest-xvfb==3.1.1 diff --git a/testing/python/approx.py b/testing/python/approx.py index 75b57b6965c..481df80565c 100644 --- a/testing/python/approx.py +++ b/testing/python/approx.py @@ -2,12 +2,16 @@ from __future__ import annotations from contextlib import contextmanager +import decimal from decimal import Decimal from fractions import Fraction +from math import inf +from math import nan from math import sqrt import operator from operator import eq from operator import ne +import re from _pytest.pytester import Pytester from _pytest.python_api import _recursive_sequence_map @@ -15,9 +19,6 @@ from pytest import approx -inf, nan = float("inf"), float("nan") - - @pytest.fixture def mocked_doctest_runner(monkeypatch): import doctest @@ -76,7 +77,7 @@ def do_assert(lhs, rhs, expected_message, verbosity_level=0): ) for i, (obtained_line, expected_line) in enumerate( - zip(obtained_message, expected_message) + zip(obtained_message, expected_message, strict=True) ): regex = re.compile(expected_line) assert regex.match(obtained_line) is not None, ( @@ -740,6 +741,17 @@ def test_dict_for_div_by_zero(self, assert_approx_raises_regex): ], ) + def test_dict_differing_lengths(self, assert_approx_raises_regex): + assert_approx_raises_regex( + {"a": 0}, + {"a": 0, "b": 1}, + [ + " ", + r" Impossible to compare mappings with different sizes\.", + r" Lengths: 2 and 1", + ], + ) + def test_numpy_array(self): np = pytest.importorskip("numpy") @@ -1015,6 +1027,11 @@ def __len__(self): expected_repr = "approx([1 ± 1.0e-06, 2 ± 2.0e-06, 3 ± 3.0e-06, 4 ± 4.0e-06])" assert repr(approx(expected)) == expected_repr + def test_decimal_approx_repr(self, monkeypatch) -> None: + monkeypatch.setitem(decimal.getcontext().traps, decimal.FloatOperation, True) + approx_obj = pytest.approx(decimal.Decimal("2.60")) + assert decimal.Decimal("2.600001") == approx_obj + def test_allow_ordered_sequences_only(self) -> None: """pytest.approx() should raise an error on unordered sequences (#9692).""" with pytest.raises(TypeError, match="only supports ordered sequences"): @@ -1031,6 +1048,60 @@ def test_strange_sequence(self): assert b == pytest.approx(a, abs=2) assert b != pytest.approx(a, abs=0.5) + def test_approx_dicts_with_mismatch_on_keys(self) -> None: + """https://github.com/pytest-dev/pytest/issues/13816""" + expected = {"a": 1, "b": 3} + actual = {"a": 1, "c": 3} + + with pytest.raises( + AssertionError, + match=re.escape( + "comparison failed.\n Mappings has different keys: " + "expected dict_keys(['a', 'b']) but got dict_keys(['a', 'c'])" + ), + ): + assert actual == approx(expected) + + def test_approx_on_unordered_mapping_with_mismatch( + self, pytester: Pytester + ) -> None: + """https://github.com/pytest-dev/pytest/issues/12444""" + pytester.makepyfile( + """ + import pytest + + def test_approx_on_unordered_mapping_with_mismatch(): + expected = {"a": 1, "b": 2, "c": 3, "d": 4} + actual = {"d": 4, "c": 5, "a": 8, "b": 2} + assert actual == pytest.approx(expected) + """ + ) + result = pytester.runpytest() + result.assert_outcomes(failed=1) + result.stdout.fnmatch_lines( + [ + "*comparison failed.**Mismatched elements: 2 / 4:*", + "*Max absolute difference: 7*", + "*Index | Obtained | Expected *", + "* a * | 8 * | 1 *", + "* c * | 5 * | 3 *", + ] + ) + + def test_approx_on_unordered_mapping_matching(self, pytester: Pytester) -> None: + """https://github.com/pytest-dev/pytest/issues/12444""" + pytester.makepyfile( + """ + import pytest + def test_approx_on_unordered_mapping_matching(): + expected = {"a": 1, "b": 2, "c": 3, "d": 4} + actual = {"d": 4, "c": 3, "a": 1, "b": 2} + assert actual == pytest.approx(expected) + """ + ) + result = pytester.runpytest() + result.assert_outcomes(passed=1) + class MyVec3: # incomplete """sequence like""" @@ -1079,10 +1150,10 @@ def test_map_over_nested_lists(self): ] def test_map_over_mixed_sequence(self): - assert _recursive_sequence_map(sqrt, [4, (25, 64), [(49)]]) == [ + assert _recursive_sequence_map(sqrt, [4, (25, 64), [49]]) == [ 2, (5, 8), - [(7)], + [7], ] def test_map_over_sequence_like(self): diff --git a/testing/python/collect.py b/testing/python/collect.py index 530f1c340ff..b26931007d9 100644 --- a/testing/python/collect.py +++ b/testing/python/collect.py @@ -1075,7 +1075,8 @@ class TestTracebackCutting: def test_skip_simple(self): with pytest.raises(pytest.skip.Exception) as excinfo: pytest.skip("xxx") - assert excinfo.traceback[-1].frame.code.name == "skip" + if sys.version_info >= (3, 11): + assert excinfo.traceback[-1].frame.code.raw.co_qualname == "_Skip.__call__" assert excinfo.traceback[-1].ishidden(excinfo) assert excinfo.traceback[-2].frame.code.name == "test_skip_simple" assert not excinfo.traceback[-2].ishidden(excinfo) @@ -1272,10 +1273,10 @@ def test_bar(self): ) classcol = pytester.collect_by_name(modcol, "TestClass") assert isinstance(classcol, Class) - path, lineno, msg = classcol.reportinfo() + _path, _lineno, _msg = classcol.reportinfo() func = next(iter(classcol.collect())) assert isinstance(func, Function) - path, lineno, msg = func.reportinfo() + _path, _lineno, _msg = func.reportinfo() def test_customized_python_discovery(pytester: Pytester) -> None: @@ -1488,7 +1489,7 @@ def test_package_collection_init_given_as_argument(pytester: Pytester) -> None: Module, not the entire package. """ p = pytester.copy_example("collect/package_init_given_as_arg") - items, hookrecorder = pytester.inline_genitems(p / "pkg" / "__init__.py") + items, _hookrecorder = pytester.inline_genitems(p / "pkg" / "__init__.py") assert len(items) == 1 assert items[0].name == "test_init" diff --git a/testing/python/fixtures.py b/testing/python/fixtures.py index fb76fe6cf96..6a65dce3c4d 100644 --- a/testing/python/fixtures.py +++ b/testing/python/fixtures.py @@ -1,6 +1,7 @@ # mypy: allow-untyped-defs from __future__ import annotations +from itertools import zip_longest import os from pathlib import Path import sys @@ -749,7 +750,7 @@ def test_request_garbage(self, pytester: Pytester) -> None: """ import sys import pytest - from _pytest.fixtures import PseudoFixtureDef + from _pytest.fixtures import RequestFixtureDef import gc @pytest.fixture(autouse=True) @@ -762,7 +763,7 @@ def something(request): try: gc.collect() - leaked = [x for _ in gc.garbage if isinstance(_, PseudoFixtureDef)] + leaked = [x for _ in gc.garbage if isinstance(_, RequestFixtureDef)] assert leaked == [] finally: gc.set_debug(original) @@ -1149,7 +1150,7 @@ def test_session_scoped_unavailable_attributes(self, session_request, name): class TestRequestMarking: def test_applymarker(self, pytester: Pytester) -> None: - item1, item2 = pytester.getitems( + item1, _item2 = pytester.getitems( """ import pytest @@ -3043,7 +3044,7 @@ def test_4(modarg, arg): ] import pprint - pprint.pprint(list(zip(values, expected))) + pprint.pprint(list(zip_longest(values, expected))) assert values == expected def test_parametrized_fixture_teardown_order(self, pytester: Pytester) -> None: @@ -5068,3 +5069,299 @@ def test_method(self, /, fix): ) result = pytester.runpytest() result.assert_outcomes(passed=1) + + +def test_parametrization_dependency_pruning(pytester: Pytester) -> None: + """Test that when a fixture is dynamically shadowed by parameterization, it + is properly pruned and not executed.""" + pytester.makepyfile( + """ + import pytest + + + # This fixture should never run because shadowed_fixture is parametrized. + @pytest.fixture + def boom(): + raise RuntimeError("BOOM!") + + + # This fixture is shadowed by metafunc.parametrize in pytest_generate_tests. + @pytest.fixture + def shadowed_fixture(boom): + return "fixture_value" + + + # Dynamically parametrize shadowed_fixture, replacing the fixture with direct values. + def pytest_generate_tests(metafunc): + if "shadowed_fixture" in metafunc.fixturenames: + metafunc.parametrize("shadowed_fixture", ["param1", "param2"]) + + + # This test should receive shadowed_fixture as a parametrized value, and + # boom should not explode. + def test_shadowed(shadowed_fixture): + assert shadowed_fixture in ["param1", "param2"] + """ + ) + result = pytester.runpytest() + result.assert_outcomes(passed=2) + + +def test_fixture_closure_with_overrides(pytester: Pytester) -> None: + """Test that an item's static fixture closure properly includes transitive + dependencies through overridden fixtures (#13773).""" + pytester.makeconftest( + """ + import pytest + + @pytest.fixture + def db(): pass + + @pytest.fixture + def app(db): pass + """ + ) + pytester.makepyfile( + """ + import pytest + + # Overrides conftest-level `app` and requests it. + @pytest.fixture + def app(app): pass + + class TestClass: + # Overrides module-level `app` and requests it. + @pytest.fixture + def app(self, app): pass + + def test_something(self, request, app): + # Both dynamic and static fixture closures should include 'db'. + assert 'db' in request.fixturenames + assert 'db' in request.node.fixturenames + # No dynamic dependencies, should be equal. + assert set(request.fixturenames) == set(request.node.fixturenames) + """ + ) + result = pytester.runpytest("-v") + result.assert_outcomes(passed=1) + + +def test_fixture_closure_with_overrides_and_intermediary(pytester: Pytester) -> None: + """Test that an item's static fixture closure properly includes transitive + dependencies through overridden fixtures (#13773). + + A more complicated case than test_fixture_closure_with_overrides, adds an + intermediary so the override chain is not direct. + """ + pytester.makeconftest( + """ + import pytest + + @pytest.fixture + def db(): pass + + @pytest.fixture + def app(db): pass + + @pytest.fixture + def intermediate(app): pass + """ + ) + pytester.makepyfile( + """ + import pytest + + # Overrides conftest-level `app` and requests it. + @pytest.fixture + def app(intermediate): pass + + class TestClass: + # Overrides module-level `app` and requests it. + @pytest.fixture + def app(self, app): pass + + def test_something(self, request, app): + # Both dynamic and static fixture closures should include 'db'. + assert 'db' in request.fixturenames + assert 'db' in request.node.fixturenames + # No dynamic dependencies, should be equal. + assert set(request.fixturenames) == set(request.node.fixturenames) + """ + ) + result = pytester.runpytest("-v") + result.assert_outcomes(passed=1) + + +def test_fixture_closure_with_broken_override_chain(pytester: Pytester) -> None: + """Test that an item's static fixture closure properly includes transitive + dependencies through overridden fixtures (#13773). + + A more complicated case than test_fixture_closure_with_overrides, one of the + fixtures in the chain doesn't call its super, so it shouldn't be included. + """ + pytester.makeconftest( + """ + import pytest + + @pytest.fixture + def db(): pass + + @pytest.fixture + def app(db): pass + """ + ) + pytester.makepyfile( + """ + import pytest + + # Overrides conftest-level `app` and *doesn't* request it. + @pytest.fixture + def app(): pass + + class TestClass: + # Overrides module-level `app` and requests it. + @pytest.fixture + def app(self, app): pass + + def test_something(self, request, app): + # Both dynamic and static fixture closures should include 'db'. + assert 'db' not in request.fixturenames + assert 'db' not in request.node.fixturenames + # No dynamic dependencies, should be equal. + assert set(request.fixturenames) == set(request.node.fixturenames) + """ + ) + result = pytester.runpytest("-v") + result.assert_outcomes(passed=1) + + +def test_fixture_closure_handles_circular_dependencies(pytester: Pytester) -> None: + """Test that getfixtureclosure properly handles circular dependencies. + + The test will error in the runtest phase due to the fixture loop, + but the closure computation still completes. + """ + pytester.makepyfile( + """ + import pytest + + # Direct circular dependency. + @pytest.fixture + def fix_a(fix_b): pass + + @pytest.fixture + def fix_b(fix_a): pass + + # Indirect circular dependency through multiple fixtures. + @pytest.fixture + def fix_x(fix_y): pass + + @pytest.fixture + def fix_y(fix_z): pass + + @pytest.fixture + def fix_z(fix_x): pass + + def test_circular_deps(fix_a, fix_x): + pass + """ + ) + items, _hookrec = pytester.inline_genitems() + assert isinstance(items[0], Function) + assert items[0].fixturenames == ["fix_a", "fix_x", "fix_b", "fix_y", "fix_z"] + + +def test_fixture_closure_handles_diamond_dependencies(pytester: Pytester) -> None: + """Test that getfixtureclosure properly handles diamond dependencies.""" + pytester.makepyfile( + """ + import pytest + + @pytest.fixture + def db(): pass + + @pytest.fixture + def user(db): pass + + @pytest.fixture + def session(db): pass + + @pytest.fixture + def app(user, session): pass + + def test_diamond_deps(request, app): + assert request.node.fixturenames == ["request", "app", "user", "db", "session"] + assert request.fixturenames == ["request", "app", "user", "db", "session"] + """ + ) + result = pytester.runpytest("-v") + result.assert_outcomes(passed=1) + + +def test_fixture_closure_with_complex_override_and_shared_deps( + pytester: Pytester, +) -> None: + """Test that shared dependencies in override chains are processed only once.""" + pytester.makeconftest( + """ + import pytest + + @pytest.fixture + def db(): pass + + @pytest.fixture + def cache(): pass + + @pytest.fixture + def settings(): pass + + @pytest.fixture + def app(db, cache, settings): pass + """ + ) + pytester.makepyfile( + """ + import pytest + + # Override app, but also directly use cache and settings. + # This creates multiple paths to the same fixtures. + @pytest.fixture + def app(app, cache, settings): pass + + class TestClass: + # Another override that uses both app and cache. + @pytest.fixture + def app(self, app, cache): pass + + def test_shared_deps(self, request, app): + assert request.node.fixturenames == ["request", "app", "db", "cache", "settings"] + """ + ) + result = pytester.runpytest("-v") + result.assert_outcomes(passed=1) + + +def test_fixture_closure_with_parametrize_ignore(pytester: Pytester) -> None: + """Test that getfixtureclosure properly handles parametrization argnames + which override a fixture.""" + pytester.makepyfile( + """ + import pytest + + @pytest.fixture + def fix1(fix2): pass + + @pytest.fixture + def fix2(fix3): pass + + @pytest.fixture + def fix3(): pass + + @pytest.mark.parametrize('fix2', ['2']) + def test_it(request, fix1): + assert request.node.fixturenames == ["request", "fix1", "fix2"] + assert request.fixturenames == ["request", "fix1", "fix2"] + """ + ) + result = pytester.runpytest("-v") + result.assert_outcomes(passed=1) diff --git a/testing/python/integration.py b/testing/python/integration.py index c96b6e4c260..d8f8d0ffae9 100644 --- a/testing/python/integration.py +++ b/testing/python/integration.py @@ -21,8 +21,8 @@ def wrap(f): def wrapped_func(x, y, z): pass - fs, lineno = getfslineno(wrapped_func) - fs2, lineno2 = getfslineno(wrap) + _fs, lineno = getfslineno(wrapped_func) + _fs2, lineno2 = getfslineno(wrap) assert lineno > lineno2, "getfslineno does not unwrap correctly" diff --git a/testing/python/metafunc.py b/testing/python/metafunc.py index 7ae26de3a18..20ccacf4b73 100644 --- a/testing/python/metafunc.py +++ b/testing/python/metafunc.py @@ -92,7 +92,7 @@ def func(x, y): with pytest.raises(pytest.Collector.CollectError): metafunc.parametrize("y", [5, 6]) - with pytest.raises(TypeError, match="^ids must be a callable or an iterable$"): + with pytest.raises(TypeError, match=r"^ids must be a callable or an iterable$"): metafunc.parametrize("y", [5, 6], ids=42) # type: ignore[arg-type] def test_parametrize_error_iterator(self) -> None: @@ -429,7 +429,7 @@ def test_idmaker_autoname(self) -> None: def test_idmaker_with_bytes_regex(self) -> None: result = IdMaker( - ("a"), [pytest.param(re.compile(b"foo"), 1.0)], None, None, None, None, None + ("a"), [pytest.param(re.compile(b"foo"))], None, None, None, None, None ).make_unique_parameterset_ids() assert result == ["foo"] diff --git a/testing/python/raises.py b/testing/python/raises.py index 40f9afea3ba..c9d57918a83 100644 --- a/testing/python/raises.py +++ b/testing/python/raises.py @@ -253,8 +253,8 @@ def test_raises_match(self) -> None: msg = "with base 16" expr = ( "Regex pattern did not match.\n" - f" Regex: {msg!r}\n" - " Input: \"invalid literal for int() with base 10: 'asdf'\"" + f" Expected regex: {msg!r}\n" + f" Actual message: \"invalid literal for int() with base 10: 'asdf'\"" ) with pytest.raises(AssertionError, match="^" + re.escape(expr) + "$"): with pytest.raises(ValueError, match=msg): @@ -289,7 +289,10 @@ def test_match_failure_string_quoting(self): with pytest.raises(AssertionError, match="'foo"): raise AssertionError("'bar") (msg,) = excinfo.value.args - assert msg == '''Regex pattern did not match.\n Regex: "'foo"\n Input: "'bar"''' + assert ( + msg + == '''Regex pattern did not match.\n Expected regex: "'foo"\n Actual message: "'bar"''' + ) def test_match_failure_exact_string_message(self): message = "Oh here is a message with (42) numbers in parameters" @@ -299,8 +302,8 @@ def test_match_failure_exact_string_message(self): (msg,) = excinfo.value.args assert msg == ( "Regex pattern did not match.\n" - " Regex: 'Oh here is a message with (42) numbers in parameters'\n" - " Input: 'Oh here is a message with (42) numbers in parameters'\n" + " Expected regex: 'Oh here is a message with (42) numbers in parameters'\n" + " Actual message: 'Oh here is a message with (42) numbers in parameters'\n" " Did you mean to `re.escape()` the regex?" ) @@ -364,9 +367,7 @@ def test_raises_context_manager_with_kwargs(self): def test_expected_exception_is_not_a_baseexception(self) -> None: with pytest.raises( TypeError, - match=wrap_escape( - "expected exception must be a BaseException type, not 'str'" - ), + match=wrap_escape("Expected a BaseException type, but got 'str'"), ): with pytest.raises("hello"): # type: ignore[call-overload] pass # pragma: no cover @@ -377,7 +378,7 @@ class NotAnException: with pytest.raises( ValueError, match=wrap_escape( - "expected exception must be a BaseException type, not 'NotAnException'" + "Expected a BaseException type, but got 'NotAnException'" ), ): with pytest.raises(NotAnException): # type: ignore[type-var] @@ -385,9 +386,7 @@ class NotAnException: with pytest.raises( TypeError, - match=wrap_escape( - "expected exception must be a BaseException type, not 'str'" - ), + match=wrap_escape("Expected a BaseException type, but got 'str'"), ): with pytest.raises(("hello", NotAnException)): # type: ignore[arg-type] pass # pragma: no cover @@ -395,8 +394,8 @@ class NotAnException: def test_issue_11872(self) -> None: """Regression test for #11872. - urllib.error.HTTPError on Python<=3.9 raises KeyError instead of - AttributeError on invalid attribute access. + urllib.error.HTTPError on some Python 3.10/11 minor releases raises + KeyError instead of AttributeError on invalid attribute access. https://github.com/python/cpython/issues/98778 """ diff --git a/testing/python/raises_group.py b/testing/python/raises_group.py index 04979c32e98..e5e3b5cd2dc 100644 --- a/testing/python/raises_group.py +++ b/testing/python/raises_group.py @@ -36,19 +36,18 @@ def fails_raises_group(msg: str, add_prefix: bool = True) -> RaisesExc[Failed]: def test_raises_group() -> None: with pytest.raises( TypeError, - match=wrap_escape("expected exception must be a BaseException type, not 'int'"), + match=wrap_escape("Expected a BaseException type, but got 'int'"), ): RaisesExc(5) # type: ignore[call-overload] with pytest.raises( ValueError, - match=wrap_escape("expected exception must be a BaseException type, not 'int'"), + match=wrap_escape("Expected a BaseException type, but got 'int'"), ): RaisesExc(int) # type: ignore[type-var] with pytest.raises( TypeError, - # TODO: bad sentence structure match=wrap_escape( - "expected exception must be a BaseException type, RaisesExc, or RaisesGroup, not an exception instance (ValueError)", + "Expected a BaseException type, RaisesExc, or RaisesGroup, but got an exception instance: ValueError", ), ): RaisesGroup(ValueError()) # type: ignore[call-overload] @@ -292,7 +291,7 @@ def test_catch_unwrapped_exceptions() -> None: # if users want one of several exception types they need to use a RaisesExc # (which the error message suggests) with RaisesGroup( - RaisesExc(check=lambda e: isinstance(e, (SyntaxError, ValueError))), + RaisesExc(check=lambda e: isinstance(e, SyntaxError | ValueError)), allow_unwrapped=True, ): raise ValueError @@ -382,8 +381,8 @@ def test_match() -> None: with ( fails_raises_group( "Regex pattern did not match the `ExceptionGroup()`.\n" - " Regex: 'foo'\n" - " Input: 'bar'" + " Expected regex: 'foo'\n" + " Actual message: 'bar'" ), RaisesGroup(ValueError, match="foo"), ): @@ -396,8 +395,8 @@ def test_match() -> None: with ( fails_raises_group( "Regex pattern did not match the `ExceptionGroup()`.\n" - " Regex: 'foo'\n" - " Input: 'bar'\n" + " Expected regex: 'foo'\n" + " Actual message: 'bar'\n" " but matched the expected `ValueError`.\n" " You might want `RaisesGroup(RaisesExc(ValueError, match='foo'))`" ), @@ -570,8 +569,8 @@ def test_assert_message() -> None: " ExceptionGroup('', [RuntimeError()]):\n" " RaisesGroup(ValueError): `RuntimeError()` is not an instance of `ValueError`\n" " RaisesGroup(ValueError, match='a'): Regex pattern did not match the `ExceptionGroup()`.\n" - " Regex: 'a'\n" - " Input: ''\n" + " Expected regex: 'a'\n" + " Actual message: ''\n" " RuntimeError():\n" " RaisesGroup(ValueError): `RuntimeError()` is not an exception group\n" " RaisesGroup(ValueError, match='a'): `RuntimeError()` is not an exception group", @@ -634,8 +633,8 @@ def test_assert_message() -> None: fails_raises_group( # TODO: did not match Exceptiongroup('h(ell)o', ...) ? "Raised exception group did not match: Regex pattern did not match the `ExceptionGroup()`.\n" - " Regex: 'h(ell)o'\n" - " Input: 'h(ell)o'\n" + " Expected regex: 'h(ell)o'\n" + " Actual message: 'h(ell)o'\n" " Did you mean to `re.escape()` the regex?", add_prefix=False, # to see the full structure ), @@ -645,8 +644,8 @@ def test_assert_message() -> None: with ( fails_raises_group( "RaisesExc(match='h(ell)o'): Regex pattern did not match.\n" - " Regex: 'h(ell)o'\n" - " Input: 'h(ell)o'\n" + " Expected regex: 'h(ell)o'\n" + " Actual message: 'h(ell)o'\n" " Did you mean to `re.escape()` the regex?", ), RaisesGroup(RaisesExc(match="h(ell)o")), @@ -799,8 +798,8 @@ def test_suggestion_on_nested_and_brief_error() -> None: "The following raised exceptions did not find a match\n" " ExceptionGroup('^hello', [Exception()]):\n" " RaisesGroup(Exception, match='^hello'): Regex pattern did not match the `ExceptionGroup()`.\n" - " Regex: '^hello'\n" - " Input: '^hello'\n" + " Expected regex: '^hello'\n" + " Actual message: '^hello'\n" " Did you mean to `re.escape()` the regex?\n" " Unexpected nested `ExceptionGroup()`, expected `ValueError`" ), @@ -830,8 +829,8 @@ def test_assert_message_nested() -> None: " RaisesGroup(ValueError): `TypeError()` is not an instance of `ValueError`\n" " RaisesGroup(RaisesGroup(ValueError)): RaisesGroup(ValueError): `TypeError()` is not an exception group\n" " RaisesGroup(RaisesExc(TypeError, match='foo')): RaisesExc(TypeError, match='foo'): Regex pattern did not match.\n" - " Regex: 'foo'\n" - " Input: 'bbbbbbbbbbbbbbbbbbbbbbbbbbbbbb'\n" + " Expected regex: 'foo'\n" + " Actual message: 'bbbbbbbbbbbbbbbbbbbbbbbbbbbbbb'\n" " RaisesGroup(TypeError, ValueError): 1 matched exception. Too few exceptions raised, found no match for: [ValueError]\n" " ExceptionGroup('Exceptions from Trio nursery', [TypeError('cccccccccccccccccccccccccccccc'), TypeError('dddddddddddddddddddddddddddddd')]):\n" " RaisesGroup(ValueError): \n" @@ -856,12 +855,12 @@ def test_assert_message_nested() -> None: " The following raised exceptions did not find a match\n" " TypeError('cccccccccccccccccccccccccccccc'):\n" " RaisesExc(TypeError, match='foo'): Regex pattern did not match.\n" - " Regex: 'foo'\n" - " Input: 'cccccccccccccccccccccccccccccc'\n" + " Expected regex: 'foo'\n" + " Actual message: 'cccccccccccccccccccccccccccccc'\n" " TypeError('dddddddddddddddddddddddddddddd'):\n" " RaisesExc(TypeError, match='foo'): Regex pattern did not match.\n" - " Regex: 'foo'\n" - " Input: 'dddddddddddddddddddddddddddddd'\n" + " Expected regex: 'foo'\n" + " Actual message: 'dddddddddddddddddddddddddddddd'\n" " RaisesGroup(TypeError, ValueError): \n" " 1 matched exception. \n" " The following expected exceptions did not find a match:\n" @@ -945,8 +944,8 @@ def test_misordering_example() -> None: " It matches `ValueError` which was paired with `ValueError('foo')`\n" " It matches `ValueError` which was paired with `ValueError('foo')`\n" " RaisesExc(ValueError, match='foo'): Regex pattern did not match.\n" - " Regex: 'foo'\n" - " Input: 'bar'\n" + " Expected regex: 'foo'\n" + " Actual message: 'bar'\n" "There exist a possible match when attempting an exhaustive check, but RaisesGroup uses a greedy algorithm. Please make your expected exceptions more stringent with `RaisesExc` etc so the greedy algorithm can function." ), RaisesGroup( @@ -1036,34 +1035,34 @@ def test_identity_oopsies() -> None: "The following raised exceptions did not find a match\n" " ValueError('foo'):\n" " RaisesExc(match='bar'): Regex pattern did not match.\n" - " Regex: 'bar'\n" - " Input: 'foo'\n" + " Expected regex: 'bar'\n" + " Actual message: 'foo'\n" " RaisesExc(match='bar'): Regex pattern did not match.\n" - " Regex: 'bar'\n" - " Input: 'foo'\n" + " Expected regex: 'bar'\n" + " Actual message: 'foo'\n" " RaisesExc(match='bar'): Regex pattern did not match.\n" - " Regex: 'bar'\n" - " Input: 'foo'\n" + " Expected regex: 'bar'\n" + " Actual message: 'foo'\n" " ValueError('foo'):\n" " RaisesExc(match='bar'): Regex pattern did not match.\n" - " Regex: 'bar'\n" - " Input: 'foo'\n" + " Expected regex: 'bar'\n" + " Actual message: 'foo'\n" " RaisesExc(match='bar'): Regex pattern did not match.\n" - " Regex: 'bar'\n" - " Input: 'foo'\n" + " Expected regex: 'bar'\n" + " Actual message: 'foo'\n" " RaisesExc(match='bar'): Regex pattern did not match.\n" - " Regex: 'bar'\n" - " Input: 'foo'\n" + " Expected regex: 'bar'\n" + " Actual message: 'foo'\n" " ValueError('foo'):\n" " RaisesExc(match='bar'): Regex pattern did not match.\n" - " Regex: 'bar'\n" - " Input: 'foo'\n" + " Expected regex: 'bar'\n" + " Actual message: 'foo'\n" " RaisesExc(match='bar'): Regex pattern did not match.\n" - " Regex: 'bar'\n" - " Input: 'foo'\n" + " Expected regex: 'bar'\n" + " Actual message: 'foo'\n" " RaisesExc(match='bar'): Regex pattern did not match.\n" - " Regex: 'bar'\n" - " Input: 'foo'" + " Expected regex: 'bar'\n" + " Actual message: 'foo'" ), RaisesGroup(m, m, m), ): @@ -1078,9 +1077,7 @@ def test_raisesexc() -> None: RaisesExc() # type: ignore[call-overload] with pytest.raises( ValueError, - match=wrap_escape( - "expected exception must be a BaseException type, not 'object'" - ), + match=wrap_escape("Expected a BaseException type, but got 'object'"), ): RaisesExc(object) # type: ignore[type-var] @@ -1120,7 +1117,9 @@ def test_raisesexc() -> None: # currently RaisesGroup says "Raised exception did not match" but RaisesExc doesn't... with pytest.raises( AssertionError, - match=wrap_escape("Regex pattern did not match.\n Regex: 'foo'\n Input: 'bar'"), + match=wrap_escape( + "Regex pattern did not match.\n Expected regex: 'foo'\n Actual message: 'bar'" + ), ): with RaisesExc(TypeError, match="foo"): raise TypeError("bar") @@ -1132,8 +1131,8 @@ def test_raisesexc_match() -> None: with ( fails_raises_group( "RaisesExc(ValueError, match='foo'): Regex pattern did not match.\n" - " Regex: 'foo'\n" - " Input: 'bar'" + " Expected regex: 'foo'\n" + " Actual message: 'bar'" ), RaisesGroup(RaisesExc(ValueError, match="foo")), ): @@ -1145,8 +1144,8 @@ def test_raisesexc_match() -> None: with ( fails_raises_group( "RaisesExc(match='foo'): Regex pattern did not match.\n" - " Regex: 'foo'\n" - " Input: 'bar'" + " Expected regex: 'foo'\n" + " Actual message: 'bar'" ), RaisesGroup(RaisesExc(match="foo")), ): @@ -1304,7 +1303,7 @@ def test_parametrizing_conditional_raisesgroup( def test_annotated_group() -> None: # repr depends on if exceptiongroup backport is being used or not t = repr(ExceptionGroup[ValueError]) - msg = "Only `ExceptionGroup[Exception]` or `BaseExceptionGroup[BaseExeption]` are accepted as generic types but got `{}`. As `raises` will catch all instances of the specified group regardless of the generic argument specific nested exceptions has to be checked with `RaisesGroup`." + msg = "Only `ExceptionGroup[Exception]` or `BaseExceptionGroup[BaseException]` are accepted as generic types but got `{}`. As `raises` will catch all instances of the specified group regardless of the generic argument specific nested exceptions has to be checked with `RaisesGroup`." fail_msg = wrap_escape(msg.format(t)) with pytest.raises(ValueError, match=fail_msg): @@ -1325,6 +1324,16 @@ def test_annotated_group() -> None: with RaisesExc(BaseExceptionGroup[BaseException]): raise BaseExceptionGroup("", [KeyboardInterrupt()]) + # assure AbstractRaises.is_baseexception is set properly + assert ( + RaisesGroup(ExceptionGroup[Exception]).expected_type() + == "ExceptionGroup(ExceptionGroup)" + ) + assert ( + RaisesGroup(BaseExceptionGroup[BaseException]).expected_type() + == "BaseExceptionGroup(BaseExceptionGroup)" + ) + def test_tuples() -> None: # raises has historically supported one of several exceptions being raised @@ -1339,7 +1348,7 @@ def test_tuples() -> None: with pytest.raises( TypeError, match=wrap_escape( - "expected exception must be a BaseException type, RaisesExc, or RaisesGroup, not 'tuple'.\n" + "Expected a BaseException type, RaisesExc, or RaisesGroup, but got 'tuple'.\n" "RaisesGroup does not support tuples of exception types when expecting one of " "several possible exception types like RaisesExc.\n" "If you meant to expect a group with multiple exceptions, list them as separate arguments." diff --git a/testing/test_assertion.py b/testing/test_assertion.py index 2c2830eb929..5179b13b0e9 100644 --- a/testing/test_assertion.py +++ b/testing/test_assertion.py @@ -567,6 +567,11 @@ def test_full_diff(): result = pytester.runpytest() result.stdout.fnmatch_lines(["E Full diff:"]) + # Setting CI to empty string is same as having it undefined + monkeypatch.setenv("CI", "") + result = pytester.runpytest() + result.stdout.fnmatch_lines(["E Use -v to get more diff"]) + monkeypatch.delenv("CI", raising=False) result = pytester.runpytest() result.stdout.fnmatch_lines(["E Use -v to get more diff"]) @@ -1465,6 +1470,17 @@ def test_many_lines(): result = pytester.runpytest("-vv") result.stdout.fnmatch_lines(["* 6*"]) + # Setting CI to empty string is same as having it undefined + monkeypatch.setenv("CI", "") + result = pytester.runpytest() + result.stdout.fnmatch_lines( + [ + "*+ 1*", + "*+ 3*", + f"*truncated ({expected_truncated_lines} lines hidden)*use*-vv*", + ] + ) + monkeypatch.setenv("CI", "1") result = pytester.runpytest() result.stdout.fnmatch_lines(["* 6*"]) diff --git a/testing/test_assertrewrite.py b/testing/test_assertrewrite.py index e2e448fe5e6..92664354470 100644 --- a/testing/test_assertrewrite.py +++ b/testing/test_assertrewrite.py @@ -132,7 +132,7 @@ def test_location_is_set(self) -> None: if isinstance(node, ast.Import): continue for n in [node, *ast.iter_child_nodes(node)]: - assert isinstance(n, (ast.stmt, ast.expr)) + assert isinstance(n, ast.stmt | ast.expr) for location in [ (n.lineno, n.col_offset), (n.end_lineno, n.end_col_offset), @@ -1552,7 +1552,9 @@ def test_simple_failure(): result.stdout.fnmatch_lines(["*E*assert (1 + 1) == 3"]) -class TestIssue10743: +class TestAssertionRewriteWalrusOperator: + """See #10743""" + def test_assertion_walrus_operator(self, pytester: Pytester) -> None: pytester.makepyfile( """ @@ -1719,6 +1721,22 @@ def test_walrus_operator_not_override_value(): result = pytester.runpytest() assert result.ret == 0 + def test_assertion_namedexpr_compare_left_overwrite( + self, pytester: Pytester + ) -> None: + pytester.makepyfile( + """ + def test_namedexpr_compare_left_overwrite(): + a = "Hello" + b = "World" + c = "Test" + assert (a := b) == c and (a := "Test") == "Test" + """ + ) + result = pytester.runpytest() + assert result.ret == 1 + result.stdout.fnmatch_lines(["*assert ('World' == 'Test'*"]) + class TestIssue11028: def test_assertion_walrus_operator_in_operand(self, pytester: Pytester) -> None: @@ -2197,9 +2215,9 @@ def test_simple(): ), ), ) -# fmt: on def test_get_assertion_exprs(src, expected) -> None: assert _get_assertion_exprs(src) == expected +# fmt: on def test_try_makedirs(monkeypatch, tmp_path: Path) -> None: @@ -2263,10 +2281,6 @@ def test_get_cache_dir(self, monkeypatch, prefix, source, expected) -> None: assert get_cache_dir(Path(source)) == Path(expected) - @pytest.mark.skipif( - sys.version_info[:2] == (3, 9) and sys.platform.startswith("win"), - reason="#9298", - ) def test_sys_pycache_prefix_integration( self, tmp_path, monkeypatch, pytester: Pytester ) -> None: diff --git a/testing/test_capture.py b/testing/test_capture.py index d9dacebd938..11fd18f08ff 100644 --- a/testing/test_capture.py +++ b/testing/test_capture.py @@ -76,7 +76,7 @@ def test_capturing_basic_api(self, method) -> None: assert outerr == ("", "") print("hello") capman.suspend_global_capture() - out, err = capman.read_global_capture() + out, _err = capman.read_global_capture() if method == "no": assert old == (sys.stdout, sys.stderr, sys.stdin) else: @@ -84,7 +84,7 @@ def test_capturing_basic_api(self, method) -> None: capman.resume_global_capture() print("hello") capman.suspend_global_capture() - out, err = capman.read_global_capture() + out, _err = capman.read_global_capture() if method != "no": assert out == "hello\n" capman.stop_global_capturing() @@ -563,7 +563,7 @@ def test_hello(capfd): @pytest.mark.parametrize("nl", ("\n", "\r\n", "\r")) def test_cafd_preserves_newlines(self, capfd, nl) -> None: print("test", end=nl) - out, err = capfd.readouterr() + out, _err = capfd.readouterr() assert out.endswith(nl) def test_capfdbinary(self, pytester: Pytester) -> None: @@ -868,7 +868,7 @@ def bad_snap(self): FDCapture.snap = bad_snap """ ) - result = pytester.runpytest_subprocess("-p", "pytest_xyz", "--version") + result = pytester.runpytest_subprocess("-p", "pytest_xyz") result.stderr.fnmatch_lines( ["*in bad_snap", " raise Exception('boom')", "Exception: boom"] ) @@ -983,8 +983,13 @@ def tmpfile(pytester: Pytester) -> Generator[BinaryIO]: def lsof_check(): pid = os.getpid() try: - out = subprocess.check_output(("lsof", "-p", str(pid))).decode() - except (OSError, subprocess.CalledProcessError, UnicodeDecodeError) as exc: + out = subprocess.check_output(("lsof", "-p", str(pid)), timeout=10).decode() + except ( + OSError, + UnicodeDecodeError, + subprocess.CalledProcessError, + subprocess.TimeoutExpired, + ) as exc: # about UnicodeDecodeError, see note on pytester pytest.skip(f"could not run 'lsof' ({exc!r})") yield @@ -1148,7 +1153,7 @@ def test_capture_results_accessible_by_attribute(self) -> None: def test_capturing_readouterr_unicode(self) -> None: with self.getcapture() as cap: print("hxąć") - out, err = cap.readouterr() + out, _err = cap.readouterr() assert out == "hxąć\n" def test_reset_twice_error(self) -> None: @@ -1180,8 +1185,8 @@ def test_capturing_error_recursive(self) -> None: print("cap1") with self.getcapture() as cap2: print("cap2") - out2, err2 = cap2.readouterr() - out1, err1 = cap1.readouterr() + out2, _err2 = cap2.readouterr() + out1, _err1 = cap1.readouterr() assert out1 == "cap1\n" assert out2 == "cap2\n" @@ -1226,8 +1231,8 @@ def test_capturing_error_recursive(self) -> None: print("cap1") with self.getcapture() as cap2: print("cap2") - out2, err2 = cap2.readouterr() - out1, err1 = cap1.readouterr() + out2, _err2 = cap2.readouterr() + out1, _err1 = cap1.readouterr() assert out1 == "cap1\ncap2\n" assert out2 == "cap2\n" diff --git a/testing/test_collection.py b/testing/test_collection.py index ccd57eeef43..39753d80cac 100644 --- a/testing/test_collection.py +++ b/testing/test_collection.py @@ -1,6 +1,7 @@ # mypy: allow-untyped-defs from __future__ import annotations +from collections.abc import Sequence import os from pathlib import Path from pathlib import PurePath @@ -10,7 +11,7 @@ import tempfile import textwrap -from _pytest.assertion.util import running_on_ci +from _pytest.compat import running_on_ci from _pytest.config import ExitCode from _pytest.fixtures import FixtureRequest from _pytest.main import _in_venv @@ -243,20 +244,20 @@ def test_testpaths_ini(self, pytester: Pytester, monkeypatch: MonkeyPatch) -> No # executing from rootdir only tests from `testpaths` directories # are collected - items, reprec = pytester.inline_genitems("-v") + items, _reprec = pytester.inline_genitems("-v") assert [x.name for x in items] == ["test_b", "test_c"] # check that explicitly passing directories in the command-line # collects the tests for dirname in ("a", "b", "c"): - items, reprec = pytester.inline_genitems(tmp_path.joinpath(dirname)) + items, _reprec = pytester.inline_genitems(tmp_path.joinpath(dirname)) assert [x.name for x in items] == [f"test_{dirname}"] # changing cwd to each subdirectory and running pytest without # arguments collects the tests in that directory normally for dirname in ("a", "b", "c"): monkeypatch.chdir(pytester.path.joinpath(dirname)) - items, reprec = pytester.inline_genitems() + items, _reprec = pytester.inline_genitems() assert [x.name for x in items] == [f"test_{dirname}"] def test_missing_permissions_on_unselected_directory_doesnt_crash( @@ -640,10 +641,10 @@ def test_collect_two_commandline_args(self, pytester: Pytester) -> None: def test_serialization_byid(self, pytester: Pytester) -> None: pytester.makepyfile("def test_func(): pass") - items, hookrec = pytester.inline_genitems() + items, _hookrec = pytester.inline_genitems() assert len(items) == 1 (item,) = items - items2, hookrec = pytester.inline_genitems(item.nodeid) + items2, _hookrec = pytester.inline_genitems(item.nodeid) (item2,) = items2 assert item2.name == item.name assert item2.path == item.path @@ -673,7 +674,7 @@ def test_collect_parametrized_order(self, pytester: Pytester) -> None: def test_param(i): ... """ ) - items, hookrec = pytester.inline_genitems(f"{p}::test_param") + items, _hookrec = pytester.inline_genitems(f"{p}::test_param") assert len(items) == 3 assert [item.nodeid for item in items] == [ "test_collect_parametrized_order.py::test_param[0]", @@ -732,7 +733,7 @@ def test_2(): """ ) shutil.copy(p, p.parent / (p.stem + "2" + ".py")) - items, reprec = pytester.inline_genitems(p.parent) + items, _reprec = pytester.inline_genitems(p.parent) assert len(items) == 4 for numi, i in enumerate(items): for numj, j in enumerate(items): @@ -758,7 +759,7 @@ def testmethod_two(self, arg0): pass """ ) - items, reprec = pytester.inline_genitems(p) + items, _reprec = pytester.inline_genitems(p) assert len(items) == 4 assert items[0].name == "testone" assert items[1].name == "testmethod_one" @@ -786,7 +787,7 @@ def test_classmethod(cls) -> None: pass """ ) - items, reprec = pytester.inline_genitems(p) + items, _reprec = pytester.inline_genitems(p) ids = [x.getmodpath() for x in items] # type: ignore[attr-defined] assert ids == ["TestCase.test_classmethod"] @@ -811,7 +812,7 @@ def test_y(self): pass """ ) - items, reprec = pytester.inline_genitems(p) + items, _reprec = pytester.inline_genitems(p) ids = [x.getmodpath() for x in items] # type: ignore[attr-defined] assert ids == ["MyTestSuite.x_test", "TestCase.test_y"] @@ -1352,7 +1353,7 @@ def test_collect_pyargs_with_testpaths( def test_initial_conftests_with_testpaths(pytester: Pytester) -> None: - """The testpaths ini option should load conftests in those paths as 'initial' (#10987).""" + """The testpaths config option should load conftests in those paths as 'initial' (#10987).""" p = pytester.mkdir("some_path") p.joinpath("conftest.py").write_text( textwrap.dedent( @@ -1615,7 +1616,7 @@ def __init__(self, name, parent, x): self.x = x @classmethod - def from_parent(cls, parent, *, name, x): + def from_parent(cls, parent, *, name, x): # type: ignore[override] return super().from_parent(parent=parent, name=name, x=x) collector = MyCollector.from_parent(parent=request.session, name="foo", x=10) @@ -1780,6 +1781,41 @@ def test_collect_short_file_windows(pytester: Pytester) -> None: assert result.parseoutcomes() == {"passed": 1} +def test_collect_short_file_windows_multi_level_symlink( + pytester: Pytester, + request: FixtureRequest, +) -> None: + """Regression test for multi-level Windows short-path comparison with + symlinks. + + Previously, when matching collection arguments against collected nodes on + Windows, the short path fallback resolved symlinks. With a chain a -> b -> + target, comparing 'a' against 'b' would incorrectly succeed because both + resolved to 'target', which could cause incorrect matching or duplicate + collection. + """ + # Prepare target directory with a test file. + short_path = Path(tempfile.mkdtemp()) + request.addfinalizer(lambda: shutil.rmtree(short_path, ignore_errors=True)) + target = short_path / "target" + target.mkdir() + (target / "test_chain.py").write_text("def test_chain(): pass", encoding="UTF-8") + + # Create multi-level symlink chain: a -> b -> target. + b = short_path / "b" + a = short_path / "a" + symlink_or_skip(target, b, target_is_directory=True) + symlink_or_skip(b, a, target_is_directory=True) + + # Collect via the first symlink; should find exactly one test. + result = pytester.runpytest(a) + result.assert_outcomes(passed=1) + + # Collect via the intermediate symlink; also exactly one test. + result = pytester.runpytest(b) + result.assert_outcomes(passed=1) + + def test_pyargs_collection_tree(pytester: Pytester, monkeypatch: MonkeyPatch) -> None: """When using `--pyargs`, the collection tree of a pyargs collection argument should only include parents in the import path, not up to confcutdir. @@ -1836,7 +1872,8 @@ def test_do_not_collect_symlink_siblings( """ # Use tmp_path because it creates a symlink with the name "current" next to the directory it creates. symlink_path = tmp_path.parent / (tmp_path.name[:-1] + "current") - assert symlink_path.is_symlink() is True + if not symlink_path.is_symlink(): # pragma: no cover + pytest.skip("Symlinks not supported in this environment") # Create test file. tmp_path.joinpath("test_foo.py").write_text("def test(): pass", encoding="UTF-8") @@ -1895,3 +1932,867 @@ def test_with_yield(): ) # Assert that no tests were collected result.stdout.fnmatch_lines(["*collected 0 items*"]) + + +def test_annotations_deferred_future(pytester: Pytester): + """Ensure stringified annotations don't raise any errors.""" + pytester.makepyfile( + """ + from __future__ import annotations + import pytest + + @pytest.fixture + def func() -> X: ... # X is undefined + + def test_func(): + assert True + """ + ) + result = pytester.runpytest() + assert result.ret == 0 + result.stdout.fnmatch_lines(["*1 passed*"]) + + +@pytest.mark.skipif( + sys.version_info < (3, 14), reason="Annotations are only skipped on 3.14+" +) +def test_annotations_deferred_314(pytester: Pytester): + """Ensure annotation eval is deferred.""" + pytester.makepyfile( + """ + import pytest + + @pytest.fixture + def func() -> X: ... # X is undefined + + def test_func(): + assert True + """ + ) + result = pytester.runpytest() + assert result.ret == 0 + result.stdout.fnmatch_lines(["*1 passed*"]) + + +@pytest.mark.parametrize("import_mode", ["prepend", "importlib", "append"]) +def test_namespace_packages(pytester: Pytester, import_mode: str): + pytester.makeini( + f""" + [pytest] + consider_namespace_packages = true + pythonpath = . + python_files = *.py + addopts = --import-mode {import_mode} + """ + ) + pytester.makepyfile( + **{ + "pkg/module1.py": "def test_module1(): pass", + "pkg/subpkg_namespace/module2.py": "def test_module1(): pass", + "pkg/subpkg_regular/__init__.py": "", + "pkg/subpkg_regular/module3": "def test_module3(): pass", + } + ) + + # should collect when called with top-level package correctly + result = pytester.runpytest("--collect-only", "--pyargs", "pkg") + result.stdout.fnmatch_lines( + [ + "collected 3 items", + "", + " ", + " ", + " ", + " ", + " ", + " ", + " ", + " ", + ] + ) + + # should also work when called against a more specific subpackage/module + result = pytester.runpytest("--collect-only", "--pyargs", "pkg.subpkg_namespace") + result.stdout.fnmatch_lines( + [ + "collected 1 item", + "", + " ", + " ", + " ", + ] + ) + + result = pytester.runpytest("--collect-only", "--pyargs", "pkg.subpkg_regular") + result.stdout.fnmatch_lines( + [ + "collected 1 item", + "", + " ", + " ", + " ", + ] + ) + + +class TestOverlappingCollectionArguments: + """Test that overlapping collection arguments (e.g. `pytest a/b a + a/c::TestIt) are handled correctly (#12083).""" + + @pytest.mark.parametrize("args", [("a", "a/b"), ("a/b", "a")]) + def test_parent_child(self, pytester: Pytester, args: tuple[str, ...]) -> None: + """Test that 'pytest a a/b' and `pytest a/b a` collects all tests from 'a'.""" + pytester.makepyfile( + **{ + "a/test_a.py": """ + def test_a1(): pass + def test_a2(): pass + """, + "a/b/test_b.py": """ + def test_b1(): pass + def test_b2(): pass + """, + } + ) + + result = pytester.runpytest("--collect-only", *args) + + result.stdout.fnmatch_lines( + [ + "", + " ", + " ", + " ", + " ", + " ", + " ", + " ", + " ", + "", + ], + consecutive=True, + ) + + def test_multiple_nested_paths(self, pytester: Pytester) -> None: + """Test that 'pytest a/b a a/b/c' collects all tests from 'a'.""" + pytester.makepyfile( + **{ + "a/test_a.py": """ + def test_a(): pass + """, + "a/b/test_b.py": """ + def test_b(): pass + """, + "a/b/c/test_c.py": """ + def test_c(): pass + """, + } + ) + + result = pytester.runpytest("--collect-only", "a/b", "a", "a/b/c") + + result.stdout.fnmatch_lines( + [ + "", + " ", + " ", + " ", + " ", + " ", + " ", + " ", + " ", + " ", + "", + ], + consecutive=True, + ) + + def test_same_path_twice(self, pytester: Pytester) -> None: + """Test that 'pytest a a' doesn't duplicate tests.""" + pytester.makepyfile( + **{ + "a/test_a.py": """ + def test_a(): pass + """, + } + ) + + result = pytester.runpytest("--collect-only", "a", "a") + + result.stdout.fnmatch_lines( + [ + "", + " ", + " ", + " ", + "", + ], + consecutive=True, + ) + + def test_keep_duplicates_flag(self, pytester: Pytester) -> None: + """Test that --keep-duplicates allows duplication.""" + pytester.makepyfile( + **{ + "a/test_a.py": """ + def test_a(): pass + """, + "a/b/test_b.py": """ + def test_b(): pass + """, + } + ) + + result = pytester.runpytest("--collect-only", "--keep-duplicates", "a", "a/b") + + result.stdout.fnmatch_lines( + [ + "", + " ", + " ", + " ", + " ", + " ", + " ", + " ", + " ", + " ", + "", + ], + consecutive=True, + ) + + def test_specific_file_then_parent_dir(self, pytester: Pytester) -> None: + """Test that 'pytest a/test_a.py a' collects all tests from 'a'.""" + pytester.makepyfile( + **{ + "a/test_a.py": """ + def test_a(): pass + """, + "a/test_other.py": """ + def test_other(): pass + """, + } + ) + + result = pytester.runpytest("--collect-only", "a/test_a.py", "a") + + result.stdout.fnmatch_lines( + [ + "", + " ", + " ", + " ", + " ", + " ", + "", + ], + consecutive=True, + ) + + def test_package_scope_fixture_with_overlapping_paths( + self, pytester: Pytester + ) -> None: + """Test that package-scoped fixtures work correctly with overlapping paths.""" + pytester.makepyfile( + **{ + "pkg/__init__.py": "", + "pkg/test_pkg.py": """ + import pytest + + counter = {"value": 0} + + @pytest.fixture(scope="package") + def pkg_fixture(): + counter["value"] += 1 + return counter["value"] + + def test_pkg1(pkg_fixture): + assert pkg_fixture == 1 + + def test_pkg2(pkg_fixture): + assert pkg_fixture == 1 + """, + "pkg/sub/__init__.py": "", + "pkg/sub/test_sub.py": """ + def test_sub(): pass + """, + } + ) + + # Package fixture should run only once even with overlapping paths. + result = pytester.runpytest("pkg", "pkg/sub", "pkg", "-v") + result.assert_outcomes(passed=3) + + def test_execution_order_preserved(self, pytester: Pytester) -> None: + """Test that test execution order follows argument order.""" + pytester.makepyfile( + **{ + "a/test_a.py": """ + def test_a(): pass + """, + "b/test_b.py": """ + def test_b(): pass + """, + } + ) + + result = pytester.runpytest("--collect-only", "b", "a", "b/test_b.py::test_b") + + result.stdout.fnmatch_lines( + [ + "", + " ", + " ", + " ", + " ", + " ", + " ", + "", + ], + consecutive=True, + ) + + def test_overlapping_node_ids_class_and_method(self, pytester: Pytester) -> None: + """Test that overlapping node IDs are handled correctly.""" + pytester.makepyfile( + test_nodeids=""" + class TestClass: + def test_method1(self): pass + def test_method2(self): pass + def test_method3(self): pass + + def test_function(): pass + """ + ) + + # Class then specific method. + result = pytester.runpytest( + "--collect-only", + "test_nodeids.py::TestClass", + "test_nodeids.py::TestClass::test_method2", + ) + result.stdout.fnmatch_lines( + [ + "", + " ", + " ", + " ", + " ", + " ", + "", + ], + consecutive=True, + ) + + # Specific method then class. + result = pytester.runpytest( + "--collect-only", + "test_nodeids.py::TestClass::test_method3", + "test_nodeids.py::TestClass", + ) + result.stdout.fnmatch_lines( + [ + "", + " ", + " ", + " ", + " ", + " ", + "", + ], + consecutive=True, + ) + + def test_overlapping_node_ids_file_and_class(self, pytester: Pytester) -> None: + """Test that file-level and class-level selections work correctly.""" + pytester.makepyfile( + test_file=""" + class TestClass: + def test_method(self): pass + + class TestOther: + def test_other(self): pass + + def test_function(): pass + """ + ) + + # File then class. + result = pytester.runpytest( + "--collect-only", "test_file.py", "test_file.py::TestClass" + ) + result.stdout.fnmatch_lines( + [ + "", + " ", + " ", + " ", + " ", + " ", + " ", + "", + ], + consecutive=True, + ) + + # Class then file. + result = pytester.runpytest( + "--collect-only", "test_file.py::TestClass", "test_file.py" + ) + result.stdout.fnmatch_lines( + [ + "", + " ", + " ", + " ", + " ", + " ", + " ", + "", + ], + consecutive=True, + ) + + def test_same_node_id_twice(self, pytester: Pytester) -> None: + """Test that the same node ID specified twice is collected only once.""" + pytester.makepyfile( + test_dup=""" + def test_one(): pass + def test_two(): pass + """ + ) + + result = pytester.runpytest( + "--collect-only", + "test_dup.py::test_one", + "test_dup.py::test_one", + ) + result.stdout.fnmatch_lines( + [ + "", + " ", + " ", + "", + ], + consecutive=True, + ) + + def test_overlapping_with_parametrization(self, pytester: Pytester) -> None: + """Test overlapping with parametrized tests.""" + pytester.makepyfile( + test_param=""" + import pytest + + @pytest.mark.parametrize("n", [1, 2]) + def test_param(n): pass + + class TestClass: + @pytest.mark.parametrize("x", ["a", "b"]) + def test_method(self, x): pass + """ + ) + + result = pytester.runpytest( + "--collect-only", + "test_param.py::test_param[2]", + "test_param.py::TestClass::test_method[a]", + "test_param.py", + ) + result.stdout.fnmatch_lines( + [ + "", + " ", + " ", + " ", + " ", + " ", + " ", + "", + ], + consecutive=True, + ) + + result = pytester.runpytest( + "--collect-only", + "test_param.py::test_param[2]", + "test_param.py::test_param", + ) + result.stdout.fnmatch_lines( + [ + "", + " ", + " ", + " ", + "", + ], + consecutive=True, + ) + + @pytest.mark.parametrize("order", [(".", "a"), ("a", ".")]) + def test_root_and_subdir(self, pytester: Pytester, order: tuple[str, ...]) -> None: + """Test that '. a' and 'a .' both collect all tests.""" + pytester.makepyfile( + test_root=""" + def test_root(): pass + """, + **{ + "a/test_a.py": """ + def test_a(): pass + """, + }, + ) + + result = pytester.runpytest("--collect-only", *order) + + result.stdout.fnmatch_lines( + [ + "", + " ", + " ", + " ", + " ", + " ", + "", + ], + consecutive=True, + ) + + def test_complex_combined_handling(self, pytester: Pytester) -> None: + """Test some scenarios in a complex hierarchy.""" + pytester.makepyfile( + **{ + "top1/__init__.py": "", + "top1/test_1.py": ( + """ + def test_1(): pass + + class TestIt: + def test_2(): pass + + def test_3(): pass + """ + ), + "top1/test_2.py": ( + """ + def test_1(): pass + """ + ), + "top2/__init__.py": "", + "top2/test_1.py": ( + """ + def test_1(): pass + """ + ), + }, + ) + + result = pytester.runpytest_inprocess("--collect-only", ".") + result.stdout.fnmatch_lines( + [ + "", + " ", + " ", + " ", + " ", + " ", + " ", + " ", + " ", + " ", + " ", + " ", + "", + ], + consecutive=True, + ) + + result = pytester.runpytest_inprocess("--collect-only", "top2", "top1") + result.stdout.fnmatch_lines( + [ + "", + " ", + " ", + " ", + " ", + " ", + " ", + " ", + " ", + " ", + " ", + " ", + "", + ], + consecutive=True, + ) + + result = pytester.runpytest_inprocess( + "--collect-only", "top1", "top1/test_2.py" + ) + result.stdout.fnmatch_lines( + [ + "", + " ", + " ", + " ", + " ", + " ", + " ", + " ", + " ", + # NOTE: Also sensible arguably even without --keep-duplicates. + # " ", + # " ", + "", + ], + consecutive=True, + ) + + result = pytester.runpytest_inprocess( + "--collect-only", "top1/test_2.py", "top1" + ) + result.stdout.fnmatch_lines( + [ + "", + " ", + # NOTE: Ideally test_2 would come before test_1 here. + " ", + " ", + " ", + " ", + " ", + " ", + " ", + "", + ], + consecutive=True, + ) + + result = pytester.runpytest_inprocess( + "--collect-only", "--keep-duplicates", "top1/test_2.py", "top1" + ) + result.stdout.fnmatch_lines( + [ + "", + " ", + " ", + " ", + " ", + " ", + " ", + " ", + " ", + " ", + " ", + "", + ], + consecutive=True, + ) + + result = pytester.runpytest_inprocess( + "--collect-only", "top1/test_2.py", "top1/test_2.py" + ) + result.stdout.fnmatch_lines( + [ + "", + " ", + " ", + " ", + # NOTE: Also sensible arguably even without --keep-duplicates. + # " ", + # " ", + "", + ], + consecutive=True, + ) + + result = pytester.runpytest_inprocess("--collect-only", "top2/", "top2/") + result.stdout.fnmatch_lines( + [ + "", + " ", + " ", + " ", + # NOTE: Also sensible arguably even without --keep-duplicates. + # " ", + # " ", + # " ", + "", + ], + consecutive=True, + ) + + result = pytester.runpytest_inprocess( + "--collect-only", "top2/", "top2/", "top2/test_1.py" + ) + result.stdout.fnmatch_lines( + [ + "", + " ", + " ", + " ", + # NOTE: Also sensible arguably even without --keep-duplicates. + # " ", + # " ", + # " ", + # " ", + # " ", + "", + ], + consecutive=True, + ) + + result = pytester.runpytest_inprocess( + "--collect-only", "top1/test_1.py", "top1/test_1.py::test_3" + ) + result.stdout.fnmatch_lines( + [ + "", + " ", + " ", + " ", + " ", + " ", + " ", + # NOTE: Also sensible arguably even without --keep-duplicates. + # " ", + "", + ], + consecutive=True, + ) + + result = pytester.runpytest_inprocess( + "--collect-only", "top1/test_1.py::test_3", "top1/test_1.py" + ) + result.stdout.fnmatch_lines( + [ + "", + " ", + " ", + # NOTE: Ideally test_3 would come before the others here. + " ", + " ", + " ", + " ", + "", + ], + consecutive=True, + ) + + result = pytester.runpytest_inprocess( + "--collect-only", + "--keep-duplicates", + "top1/test_1.py::test_3", + "top1/test_1.py", + ) + result.stdout.fnmatch_lines( + [ + "", + " ", + # NOTE: That is duplicated here is not great. + " ", + " ", + " ", + " ", + " ", + " ", + " ", + "", + ], + consecutive=True, + ) + + +@pytest.mark.parametrize( + ["x_y", "expected_duplicates"], + [ + ( + [(1, 1), (1, 1)], + ["1-1"], + ), + ( + [(1, 1), (1, 2), (1, 1)], + ["1-1"], + ), + ( + [(1, 1), (2, 2), (1, 1)], + ["1-1"], + ), + ( + [(1, 1), (2, 2), (1, 2), (2, 1), (1, 1), (2, 1)], + ["1-1", "2-1"], + ), + ], +) +@pytest.mark.parametrize("option_name", ["strict_parametrization_ids", "strict"]) +def test_strict_parametrization_ids( + pytester: Pytester, + x_y: Sequence[tuple[int, int]], + expected_duplicates: Sequence[str], + option_name: str, +) -> None: + pytester.makeini( + f""" + [pytest] + {option_name} = true + """ + ) + pytester.makepyfile( + f""" + import pytest + + @pytest.mark.parametrize(["x", "y"], {x_y}) + def test1(x, y): + pass + """ + ) + + result = pytester.runpytest() + + assert result.ret == ExitCode.INTERRUPTED + expected_parametersets = ", ".join(str(list(p)) for p in x_y) + expected_ids = ", ".join(f"{x}-{y}" for x, y in x_y) + result.stdout.fnmatch_lines( + [ + "Duplicate parametrization IDs detected*", + "", + "Test name: *::test1", + "Parameters: x, y", + f"Parameter sets: {expected_parametersets}", + f"IDs: {expected_ids}", + f"Duplicates: {', '.join(expected_duplicates)}", + "", + "You can fix this problem using *", + ] + ) + + +def test_strict_parametrization_ids_with_hidden_param(pytester: Pytester) -> None: + pytester.makeini( + """ + [pytest] + strict_parametrization_ids = true + """ + ) + pytester.makepyfile( + """ + import pytest + + @pytest.mark.parametrize(["x"], ["a", pytest.param("a", id=pytest.HIDDEN_PARAM), "a"]) + def test1(x): + pass + """ + ) + + result = pytester.runpytest() + + assert result.ret == ExitCode.INTERRUPTED + result.stdout.fnmatch_lines( + [ + "Duplicate parametrization IDs detected*", + "IDs: a, , a", + "Duplicates: a", + ] + ) diff --git a/testing/test_compat.py b/testing/test_compat.py index 3722bfcfb40..fa9e259647f 100644 --- a/testing/test_compat.py +++ b/testing/test_compat.py @@ -16,7 +16,7 @@ if TYPE_CHECKING: - from typing_extensions import Literal + from typing import Literal def test_real_func_loop_limit() -> None: diff --git a/testing/test_config.py b/testing/test_config.py index bb08c40fef4..f086778ad1e 100644 --- a/testing/test_config.py +++ b/testing/test_config.py @@ -23,6 +23,7 @@ from _pytest.config.argparsing import get_ini_default_for_type from _pytest.config.argparsing import Parser from _pytest.config.exceptions import UsageError +from _pytest.config.findpaths import ConfigValue from _pytest.config.findpaths import determine_setup from _pytest.config.findpaths import get_common_ancestor from _pytest.config.findpaths import locate_config @@ -56,10 +57,10 @@ def test_getcfg_and_config( ), encoding="utf-8", ) - _, _, cfg = locate_config(Path.cwd(), [sub]) - assert cfg["name"] == "value" + _, _, cfg, _ = locate_config(Path.cwd(), [sub]) + assert cfg["name"] == ConfigValue("value", origin="file", mode="ini") config = pytester.parseconfigure(str(sub)) - assert config.inicfg["name"] == "value" + assert config._inicfg["name"] == ConfigValue("value", origin="file", mode="ini") def test_setupcfg_uses_toolpytest_with_pytest(self, pytester: Pytester) -> None: p1 = pytester.makepyfile("def test(): pass") @@ -131,6 +132,20 @@ def test_ini_names(self, pytester: Pytester, name, section) -> None: config = pytester.parseconfig() assert config.getini("minversion") == "3.36" + @pytest.mark.parametrize("name", ["pytest.toml", ".pytest.toml"]) + def test_toml_config_names(self, pytester: Pytester, name: str) -> None: + pytester.path.joinpath(name).write_text( + textwrap.dedent( + """ + [pytest] + minversion = "3.36" + """ + ), + encoding="utf-8", + ) + config = pytester.parseconfig() + assert config.getini("minversion") == "3.36" + def test_pyproject_toml(self, pytester: Pytester) -> None: pyproject_toml = pytester.makepyprojecttoml( """ @@ -150,7 +165,7 @@ def test_empty_pyproject_toml(self, pytester: Pytester) -> None: def test_empty_pyproject_toml_found_many(self, pytester: Pytester) -> None: """ - In case we find multiple pyproject.toml files in our search, without a [tool.pytest.ini_options] + In case we find multiple pyproject.toml files in our search, without a [tool.pytest] table and without finding other candidates, the closest to where we started wins. """ pytester.makefile( @@ -164,9 +179,88 @@ def test_empty_pyproject_toml_found_many(self, pytester: Pytester) -> None: config = pytester.parseconfig(pytester.path / "foo/bar") assert config.inipath == pytester.path / "foo/bar/pyproject.toml" + def test_pytest_toml(self, pytester: Pytester) -> None: + pytest_toml = pytester.path.joinpath("pytest.toml") + pytest_toml = pytester.maketoml( + """ + [pytest] + minversion = "1.0" + """ + ) + config = pytester.parseconfig() + assert config.inipath == pytest_toml + assert config.getini("minversion") == "1.0" + + @pytest.mark.parametrize("name", ["pytest.toml", ".pytest.toml"]) + def test_empty_pytest_toml(self, pytester: Pytester, name: str) -> None: + """An empty pytest.toml is considered as config if no other option is found.""" + pytest_toml = pytester.path / name + pytest_toml.write_text("", encoding="utf-8") + config = pytester.parseconfig() + assert config.inipath == pytest_toml + + def test_pytest_toml_trumps_pyproject_toml(self, pytester: Pytester) -> None: + """A pytest.toml always takes precedence over a pyproject.toml file.""" + pytester.makepyprojecttoml( + """ + [tool.pytest] + minversion = "1.0" + """ + ) + pytest_toml = pytester.maketoml( + """ + [pytest] + minversion = "2.0" + """ + ) + config = pytester.parseconfig() + assert config.inipath == pytest_toml + assert config.getini("minversion") == "2.0" + + def test_pytest_toml_trumps_pytest_ini(self, pytester: Pytester) -> None: + """A pytest.toml always takes precedence over a pytest.ini file.""" + pytester.makeini( + """ + [pytest] + minversion = 1.0 + """, + ) + pytest_toml = pytester.maketoml( + """ + [pytest] + minversion = "2.0" + """, + ) + config = pytester.parseconfig() + assert config.inipath == pytest_toml + assert config.getini("minversion") == "2.0" + + def test_dot_pytest_toml_trumps_pytest_ini(self, pytester: Pytester) -> None: + """A .pytest.toml always takes precedence over a pytest.ini file.""" + pytester.makeini( + """ + [pytest] + minversion = 1.0 + """, + ) + pytest_toml = pytester.maketoml( + """ + [pytest] + minversion = "2.0" + """ + ) + config = pytester.parseconfig() + assert config.inipath == pytest_toml + assert config.getini("minversion") == "2.0" + def test_pytest_ini_trumps_pyproject_toml(self, pytester: Pytester) -> None: """A pytest.ini always take precedence over a pyproject.toml file.""" - pytester.makepyprojecttoml("[tool.pytest.ini_options]") + pytester.makepyprojecttoml( + """ + [tool.pytest] + minversion = "1.0" + """ + ) pytest_ini = pytester.makefile(".ini", pytest="") config = pytester.parseconfig() assert config.inipath == pytest_ini @@ -212,6 +306,17 @@ def test_toml_parse_error(self, pytester: Pytester) -> None: assert result.ret != 0 result.stderr.fnmatch_lines("ERROR: *pyproject.toml: Invalid statement*") + def test_pytest_toml_parse_error(self, pytester: Pytester) -> None: + pytester.path.joinpath("pytest.toml").write_text( + """ + \\" + """, + encoding="utf-8", + ) + result = pytester.runpytest() + assert result.ret != 0 + result.stderr.fnmatch_lines("ERROR: *pytest.toml: Invalid statement*") + def test_confcutdir_default_without_configfile(self, pytester: Pytester) -> None: # If --confcutdir is not specified, and there is no configfile, default # to the rootpath. @@ -354,6 +459,22 @@ def test_silence_unknown_key_warning(self, pytester: Pytester) -> None: result = pytester.runpytest() result.stdout.no_fnmatch_line("*PytestConfigWarning*") + @pytest.mark.parametrize("option_name", ["strict_config", "strict"]) + def test_strict_config_ini_option( + self, pytester: Pytester, option_name: str + ) -> None: + """Test that strict_config and strict ini options enable strict config checking.""" + pytester.makeini( + f""" + [pytest] + unknown_option = 1 + {option_name} = True + """ + ) + result = pytester.runpytest() + result.stderr.fnmatch_lines("ERROR: Unknown config option: unknown_option") + assert result.ret == pytest.ExitCode.USAGE_ERROR + @pytest.mark.filterwarnings("default::pytest.PytestConfigWarning") def test_disable_warnings_plugin_disables_config_warnings( self, pytester: Pytester @@ -612,20 +733,14 @@ def pytest_addoption(parser): assert config.getini("custom") == "1" def test_absolute_win32_path(self, pytester: Pytester) -> None: - temp_ini_file = pytester.makefile( - ".ini", - custom=""" - [pytest] - addopts = --version - """, - ) + temp_ini_file = pytester.makeini("[pytest]") from os.path import normpath temp_ini_file_norm = normpath(str(temp_ini_file)) ret = pytest.main(["-c", temp_ini_file_norm]) - assert ret == ExitCode.OK + assert ret == ExitCode.NO_TESTS_COLLECTED ret = pytest.main(["--config-file", temp_ini_file_norm]) - assert ret == ExitCode.OK + assert ret == ExitCode.NO_TESTS_COLLECTED class TestConfigAPI: @@ -1011,6 +1126,166 @@ def pytest_addoption(parser): value = config.getini("no_type") assert value == "" + def test_addini_with_aliases(self, pytester: Pytester) -> None: + """Test that ini options can have aliases.""" + pytester.makeconftest( + """ + def pytest_addoption(parser): + parser.addini("new_name", "my option", aliases=["old_name"]) + """ + ) + pytester.makeini( + """ + [pytest] + old_name = hello + """ + ) + config = pytester.parseconfig() + # Should be able to access via canonical name. + assert config.getini("new_name") == "hello" + # Should also be able to access via alias. + assert config.getini("old_name") == "hello" + + def test_addini_aliases_with_canonical_in_file(self, pytester: Pytester) -> None: + """Test that canonical name takes precedence over alias in configuration file.""" + pytester.makeconftest( + """ + def pytest_addoption(parser): + parser.addini("new_name", "my option", aliases=["old_name"]) + """ + ) + pytester.makeini( + """ + [pytest] + old_name = from_alias + new_name = from_canonical + """ + ) + config = pytester.parseconfig() + # Canonical name should take precedence. + assert config.getini("new_name") == "from_canonical" + assert config.getini("old_name") == "from_canonical" + + def test_addini_aliases_multiple(self, pytester: Pytester) -> None: + """Test that ini option can have multiple aliases.""" + pytester.makeconftest( + """ + def pytest_addoption(parser): + parser.addini("current_name", "my option", aliases=["old_name", "legacy_name"]) + """ + ) + pytester.makeini( + """ + [pytest] + old_name = value1 + """ + ) + config = pytester.parseconfig() + assert config.getini("current_name") == "value1" + assert config.getini("old_name") == "value1" + assert config.getini("legacy_name") == "value1" + + def test_addini_aliases_with_override_of_old(self, pytester: Pytester) -> None: + """Test that aliases work with --override-ini -- ini sets old.""" + pytester.makeconftest( + """ + def pytest_addoption(parser): + parser.addini("new_name", "my option", aliases=["old_name"]) + """ + ) + pytester.makeini( + """ + [pytest] + old_name = from_file + """ + ) + # Override using alias. + config = pytester.parseconfig("-o", "old_name=overridden") + assert config.getini("new_name") == "overridden" + assert config.getini("old_name") == "overridden" + + # Override using canonical name. + config = pytester.parseconfig("-o", "new_name=overridden2") + assert config.getini("new_name") == "overridden2" + + def test_addini_aliases_with_override_of_new(self, pytester: Pytester) -> None: + """Test that aliases work with --override-ini -- ini sets new.""" + pytester.makeconftest( + """ + def pytest_addoption(parser): + parser.addini("new_name", "my option", aliases=["old_name"]) + """ + ) + pytester.makeini( + """ + [pytest] + new_name = from_file + """ + ) + # Override using alias. + config = pytester.parseconfig("-o", "old_name=overridden") + assert config.getini("new_name") == "overridden" + assert config.getini("old_name") == "overridden" + + # Override using canonical name. + config = pytester.parseconfig("-o", "new_name=overridden2") + assert config.getini("new_name") == "overridden2" + + def test_addini_aliases_with_types(self, pytester: Pytester) -> None: + """Test that aliases work with different types.""" + pytester.makeconftest( + """ + def pytest_addoption(parser): + parser.addini("mylist", "list option", type="linelist", aliases=["oldlist"]) + parser.addini("mybool", "bool option", type="bool", aliases=["oldbool"]) + """ + ) + pytester.makeini( + """ + [pytest] + oldlist = line1 + line2 + oldbool = true + """ + ) + config = pytester.parseconfig() + assert config.getini("mylist") == ["line1", "line2"] + assert config.getini("oldlist") == ["line1", "line2"] + assert config.getini("mybool") is True + assert config.getini("oldbool") is True + + def test_addini_aliases_conflict_error(self, pytester: Pytester) -> None: + """Test that registering an alias that conflicts with an existing option raises an error.""" + pytester.makeconftest( + """ + def pytest_addoption(parser): + parser.addini("existing", "first option") + + try: + parser.addini("new_option", "second option", aliases=["existing"]) + except ValueError as e: + assert "alias 'existing' conflicts with existing configuration option" in str(e) + else: + assert False, "Should have raised ValueError" + """ + ) + pytester.parseconfig() + + def test_addini_aliases_duplicate_error(self, pytester: Pytester) -> None: + """Test that registering the same alias twice raises an error.""" + pytester.makeconftest( + """ + def pytest_addoption(parser): + parser.addini("option1", "first option", aliases=["shared_alias"]) + try: + parser.addini("option2", "second option", aliases=["shared_alias"]) + raise AssertionError("Should have raised ValueError") + except ValueError as e: + assert "'shared_alias' is already an alias of 'option1'" in str(e) + """ + ) + pytester.parseconfig() + @pytest.mark.parametrize( "type, expected", [ @@ -1150,7 +1425,7 @@ def test_inifilename(self, tmp_path: Path) -> None: ) with MonkeyPatch.context() as mp: mp.chdir(cwd) - config = Config.fromdictargs(option_dict, ()) + config = Config.fromdictargs(option_dict, []) inipath = absolutepath(inifilename) assert config.args == [str(cwd)] @@ -1159,8 +1434,10 @@ def test_inifilename(self, tmp_path: Path) -> None: # this indicates this is the file used for getting configuration values assert config.inipath == inipath - assert config.inicfg.get("name") == "value" - assert config.inicfg.get("should_not_be_set") is None + assert config._inicfg.get("name") == ConfigValue( + "value", origin="file", mode="ini" + ) + assert config._inicfg.get("should_not_be_set") is None def test_options_on_small_file_do_not_blow_up(pytester: Pytester) -> None: @@ -1635,33 +1912,39 @@ def test_with_ini(self, tmp_path: Path, name: str, contents: str) -> None: b = a / "b" b.mkdir() for args in ([str(tmp_path)], [str(a)], [str(b)]): - rootpath, parsed_inipath, _ = determine_setup( + rootpath, parsed_inipath, *_ = determine_setup( inifile=None, + override_ini=None, args=args, rootdir_cmd_arg=None, invocation_dir=Path.cwd(), ) assert rootpath == tmp_path assert parsed_inipath == inipath - rootpath, parsed_inipath, ini_config = determine_setup( + rootpath, parsed_inipath, ini_config, _ = determine_setup( inifile=None, + override_ini=None, args=[str(b), str(a)], rootdir_cmd_arg=None, invocation_dir=Path.cwd(), ) assert rootpath == tmp_path assert parsed_inipath == inipath - assert ini_config == {"x": "10"} + assert ini_config["x"] == ConfigValue("10", origin="file", mode="ini") - @pytest.mark.parametrize("name", ["setup.cfg", "tox.ini"]) - def test_pytestini_overrides_empty_other(self, tmp_path: Path, name: str) -> None: - inipath = tmp_path / "pytest.ini" + @pytest.mark.parametrize("pytest_ini", ["pytest.ini", ".pytest.ini"]) + @pytest.mark.parametrize("other", ["setup.cfg", "tox.ini"]) + def test_pytestini_overrides_empty_other( + self, tmp_path: Path, pytest_ini: str, other: str + ) -> None: + inipath = tmp_path / pytest_ini inipath.touch() a = tmp_path / "a" a.mkdir() - (a / name).touch() - rootpath, parsed_inipath, _ = determine_setup( + (a / other).touch() + rootpath, parsed_inipath, *_ = determine_setup( inifile=None, + override_ini=None, args=[str(a)], rootdir_cmd_arg=None, invocation_dir=Path.cwd(), @@ -1674,8 +1957,9 @@ def test_setuppy_fallback(self, tmp_path: Path) -> None: a.mkdir() (a / "setup.cfg").touch() (tmp_path / "setup.py").touch() - rootpath, inipath, inicfg = determine_setup( + rootpath, inipath, inicfg, _ = determine_setup( inifile=None, + override_ini=None, args=[str(a)], rootdir_cmd_arg=None, invocation_dir=Path.cwd(), @@ -1686,8 +1970,9 @@ def test_setuppy_fallback(self, tmp_path: Path) -> None: def test_nothing(self, tmp_path: Path, monkeypatch: MonkeyPatch) -> None: monkeypatch.chdir(tmp_path) - rootpath, inipath, inicfg = determine_setup( + rootpath, inipath, inicfg, _ = determine_setup( inifile=None, + override_ini=None, args=[str(tmp_path)], rootdir_cmd_arg=None, invocation_dir=Path.cwd(), @@ -1713,15 +1998,16 @@ def test_with_specific_inifile( p = tmp_path / name p.touch() p.write_text(contents, encoding="utf-8") - rootpath, inipath, ini_config = determine_setup( + rootpath, inipath, ini_config, _ = determine_setup( inifile=str(p), + override_ini=None, args=[str(tmp_path)], rootdir_cmd_arg=None, invocation_dir=Path.cwd(), ) assert rootpath == tmp_path assert inipath == p - assert ini_config == {"x": "10"} + assert ini_config["x"] == ConfigValue("10", origin="file", mode="ini") def test_explicit_config_file_sets_rootdir( self, tmp_path: Path, monkeypatch: pytest.MonkeyPatch @@ -1734,6 +2020,7 @@ def test_explicit_config_file_sets_rootdir( # No config file is explicitly given: rootdir is determined to be cwd. rootpath, found_inipath, *_ = determine_setup( inifile=None, + override_ini=None, args=[str(tests_dir)], rootdir_cmd_arg=None, invocation_dir=Path.cwd(), @@ -1746,6 +2033,7 @@ def test_explicit_config_file_sets_rootdir( inipath.touch() rootpath, found_inipath, *_ = determine_setup( inifile=str(inipath), + override_ini=None, args=[str(tests_dir)], rootdir_cmd_arg=None, invocation_dir=Path.cwd(), @@ -1761,8 +2049,9 @@ def test_with_arg_outside_cwd_without_inifile( a.mkdir() b = tmp_path / "b" b.mkdir() - rootpath, inifile, _ = determine_setup( + rootpath, inifile, *_ = determine_setup( inifile=None, + override_ini=None, args=[str(a), str(b)], rootdir_cmd_arg=None, invocation_dir=Path.cwd(), @@ -1777,8 +2066,9 @@ def test_with_arg_outside_cwd_with_inifile(self, tmp_path: Path) -> None: b.mkdir() inipath = a / "pytest.ini" inipath.touch() - rootpath, parsed_inipath, _ = determine_setup( + rootpath, parsed_inipath, *_ = determine_setup( inifile=None, + override_ini=None, args=[str(a), str(b)], rootdir_cmd_arg=None, invocation_dir=Path.cwd(), @@ -1791,8 +2081,9 @@ def test_with_non_dir_arg( self, dirs: Sequence[str], tmp_path: Path, monkeypatch: MonkeyPatch ) -> None: monkeypatch.chdir(tmp_path) - rootpath, inipath, _ = determine_setup( + rootpath, inipath, *_ = determine_setup( inifile=None, + override_ini=None, args=dirs, rootdir_cmd_arg=None, invocation_dir=Path.cwd(), @@ -1807,8 +2098,9 @@ def test_with_existing_file_in_subdir( a.mkdir() (a / "exists").touch() monkeypatch.chdir(tmp_path) - rootpath, inipath, _ = determine_setup( + rootpath, inipath, *_ = determine_setup( inifile=None, + override_ini=None, args=["a/exist"], rootdir_cmd_arg=None, invocation_dir=Path.cwd(), @@ -1826,8 +2118,9 @@ def test_with_config_also_in_parent_directory( (tmp_path / "myproject" / "tests").mkdir() monkeypatch.chdir(tmp_path / "myproject") - rootpath, inipath, _ = determine_setup( + rootpath, inipath, *_ = determine_setup( inifile=None, + override_ini=None, args=["tests/"], rootdir_cmd_arg=None, invocation_dir=Path.cwd(), @@ -1960,7 +2253,7 @@ def test_override_ini_usage_error_bad_style(self, pytester: Pytester) -> None: def test_override_ini_handled_asap( self, pytester: Pytester, with_ini: bool ) -> None: - """-o should be handled as soon as possible and always override what's in ini files (#2238)""" + """-o should be handled as soon as possible and always override what's in config files (#2238)""" if with_ini: pytester.makeini( """ @@ -1983,8 +2276,10 @@ def test_addopts_before_initini( cache_dir = ".custom_cache" monkeypatch.setenv("PYTEST_ADDOPTS", f"-o cache_dir={cache_dir}") config = _config_for_test - config._preparse([], addopts=True) - assert config._override_ini == [f"cache_dir={cache_dir}"] + config.parse([], addopts=True) + assert config._inicfg.get("cache_dir") == ConfigValue( + cache_dir, origin="override", mode="ini" + ) def test_addopts_from_env_not_concatenated( self, monkeypatch: MonkeyPatch, _config_for_test @@ -1993,14 +2288,15 @@ def test_addopts_from_env_not_concatenated( monkeypatch.setenv("PYTEST_ADDOPTS", "-o") config = _config_for_test with pytest.raises(UsageError) as excinfo: - config._preparse(["cache_dir=ignored"], addopts=True) + config.parse(["cache_dir=ignored"], addopts=True) assert ( - "error: argument -o/--override-ini: expected one argument (via PYTEST_ADDOPTS)" + "error: argument -o/--override-ini: expected one argument" in excinfo.value.args[0] ) + assert "via PYTEST_ADDOPTS" in excinfo.value.args[0] def test_addopts_from_ini_not_concatenated(self, pytester: Pytester) -> None: - """`addopts` from ini should not take values from normal args (#4265).""" + """`addopts` from configuration should not take values from normal args (#4265).""" pytester.makeini( """ [pytest] @@ -2010,8 +2306,8 @@ def test_addopts_from_ini_not_concatenated(self, pytester: Pytester) -> None: result = pytester.runpytest("cache_dir=ignored") result.stderr.fnmatch_lines( [ - f"{pytester._request.config._parser.optparser.prog}: error: " - f"argument -o/--override-ini: expected one argument (via addopts config)" + "*: error: argument -o/--override-ini: expected one argument", + " config source: via addopts config", ] ) assert result.ret == _pytest.config.ExitCode.USAGE_ERROR @@ -2021,8 +2317,10 @@ def test_override_ini_does_not_contain_paths( ) -> None: """Check that -o no longer swallows all options after it (#3103)""" config = _config_for_test - config._preparse(["-o", "cache_dir=/cache", "/some/test/path"]) - assert config._override_ini == ["cache_dir=/cache"] + config.parse(["-o", "cache_dir=/cache", "/some/test/path"]) + assert config._inicfg.get("cache_dir") == ConfigValue( + "/cache", origin="override", mode="ini" + ) def test_multiple_override_ini_options(self, pytester: Pytester) -> None: """Ensure a file path following a '-o' option does not generate an error (#3103)""" @@ -2058,7 +2356,16 @@ def test_override_ini_without_config_file(self, pytester: Pytester) -> None: } ) result = pytester.runpytest("--override-ini", "pythonpath=src") - assert result.parseoutcomes() == {"passed": 1} + result.assert_outcomes(passed=1) + + def test_override_ini_invalid_option(self, pytester: Pytester) -> None: + result = pytester.runpytest("--override-ini", "doesnotexist=true") + result.stdout.fnmatch_lines( + [ + "=*= warnings summary =*=", + "*PytestConfigWarning:*Unknown config option: doesnotexist", + ] + ) def test_help_via_addopts(pytester: Pytester) -> None: @@ -2111,8 +2418,7 @@ def pytest_addoption(parser): result.stderr.fnmatch_lines( [ "ERROR: usage: *", - f"{pytester._request.config._parser.optparser.prog}: error: " - f"argument --invalid-option-should-allow-for-help: expected one argument", + "*: error: argument --invalid-option-should-allow-for-help: expected one argument", ] ) # Does not display full/default help. @@ -2121,7 +2427,7 @@ def pytest_addoption(parser): result = pytester.runpytest("--version") result.stdout.fnmatch_lines([f"pytest {pytest.__version__}"]) - assert result.ret == ExitCode.USAGE_ERROR + assert result.ret == ExitCode.OK def test_help_formatter_uses_py_get_terminal_width(monkeypatch: MonkeyPatch) -> None: @@ -2154,6 +2460,10 @@ def test_config_does_not_load_blocked_plugin_from_args(pytester: Pytester) -> No result.stderr.fnmatch_lines(["*: error: unrecognized arguments: -s"]) assert result.ret == ExitCode.USAGE_ERROR + result = pytester.runpytest(str(p), "-p no:/path/to/conftest.py", "-s") + result.stderr.fnmatch_lines(["ERROR:*Blocking conftest files*"]) + assert result.ret == ExitCode.USAGE_ERROR + def test_invocation_args(pytester: Pytester) -> None: """Ensure that Config.invocation_* arguments are correctly defined""" @@ -2175,7 +2485,8 @@ class DummyPlugin: plugins = config.invocation_params.plugins assert len(plugins) == 2 assert plugins[0] is plugin - assert type(plugins[1]).__name__ == "Collect" # installed by pytester.inline_run() + # Installed by pytester.inline_run(). + assert type(plugins[1]).__name__ == "PytesterHelperPlugin" # args cannot be None with pytest.raises(TypeError): @@ -2399,8 +2710,6 @@ def test_parse_warning_filter( ":" * 5, # Invalid action. "FOO::", - # ImportError when importing the warning class. - "::test_parse_warning_filter_failure.NonExistentClass::", # Class is not a Warning subclass. "::list::", # Negative line number. @@ -2537,3 +2846,211 @@ def test_level_matches_specified_override( config.get_verbosity(TestVerbosity.SOME_OUTPUT_TYPE) == TestVerbosity.SOME_OUTPUT_VERBOSITY_LEVEL ) + + +class TestNativeTomlConfig: + """Test native TOML configuration parsing.""" + + def test_values(self, pytester: Pytester) -> None: + """Test that values are parsed as expected in TOML mode.""" + pytester.makepyprojecttoml( + """ + [tool.pytest] + test_bool = true + test_int = 5 + test_float = 30.5 + test_args = ["tests", "integration"] + test_paths = ["src", "lib"] + """ + ) + pytester.makeconftest( + """ + def pytest_addoption(parser): + parser.addini("test_bool", "Test boolean config", type="bool", default=False) + parser.addini("test_int", "Test integer config", type="int", default=0) + parser.addini("test_float", "Test float config", type="float", default=0.0) + parser.addini("test_args", "Test args config", type="args") + parser.addini("test_paths", "Test paths config", type="paths") + """ + ) + config = pytester.parseconfig() + assert config.getini("test_bool") is True + assert config.getini("test_int") == 5 + assert config.getini("test_float") == 30.5 + assert config.getini("test_args") == ["tests", "integration"] + paths = config.getini("test_paths") + assert len(paths) == 2 + # Paths should be resolved relative to pyproject.toml location. + assert all(isinstance(p, Path) for p in paths) + + def test_override_with_list(self, pytester: Pytester) -> None: + """Test that -o overrides work with INI-style list syntax even when + config uses TOML mode.""" + pytester.makepyprojecttoml( + """ + [tool.pytest] + test_override_list = ["tests"] + """ + ) + pytester.makeconftest( + """ + def pytest_addoption(parser): + parser.addini("test_override_list", "Test override list", type="args") + """ + ) + # -o uses INI mode, so uses space-separated syntax. + config = pytester.parseconfig("-o", "test_override_list=tests integration") + assert config.getini("test_override_list") == ["tests", "integration"] + + def test_conflict_between_native_and_ini_options(self, pytester: Pytester) -> None: + """Test that using both [tool.pytest] and [tool.pytest.ini_options] fails.""" + pytester.makepyprojecttoml( + """ + [tool.pytest] + test_conflict_1 = true + + [tool.pytest.ini_options] + test_conflict_2 = true + """, + ) + pytester.makeconftest( + """ + def pytest_addoption(parser): + parser.addini("test_conflict_1", "Test conflict config 1", type="bool") + parser.addini("test_conflict_2", "Test conflict config 2", type="bool") + """ + ) + with pytest.raises(UsageError, match="Cannot use both"): + pytester.parseconfig() + + def test_type_errors(self, pytester: Pytester) -> None: + """Test all possible TypeError cases in getini.""" + pytester.maketoml( + """ + [pytest] + paths_not_list = "should_be_list" + paths_list_with_int = [1, 2] + + args_not_list = 123 + args_list_with_int = ["valid", 456] + + linelist_not_list = true + linelist_list_with_bool = ["valid", false] + + bool_not_bool = "true" + + int_not_int = "123" + int_is_bool = true + + float_not_float = "3.14" + float_is_bool = false + + string_not_string = 123 + """ + ) + pytester.makeconftest( + """ + def pytest_addoption(parser): + parser.addini("paths_not_list", "test", type="paths") + parser.addini("paths_list_with_int", "test", type="paths") + parser.addini("args_not_list", "test", type="args") + parser.addini("args_list_with_int", "test", type="args") + parser.addini("linelist_not_list", "test", type="linelist") + parser.addini("linelist_list_with_bool", "test", type="linelist") + parser.addini("bool_not_bool", "test", type="bool") + parser.addini("int_not_int", "test", type="int") + parser.addini("int_is_bool", "test", type="int") + parser.addini("float_not_float", "test", type="float") + parser.addini("float_is_bool", "test", type="float") + parser.addini("string_not_string", "test", type="string") + """ + ) + config = pytester.parseconfig() + + with pytest.raises( + TypeError, match=r"expects a list for type 'paths'.*got str" + ): + config.getini("paths_not_list") + + with pytest.raises( + TypeError, match=r"expects a list of strings.*item at index 0 is int" + ): + config.getini("paths_list_with_int") + + with pytest.raises(TypeError, match=r"expects a list for type 'args'.*got int"): + config.getini("args_not_list") + + with pytest.raises( + TypeError, match=r"expects a list of strings.*item at index 1 is int" + ): + config.getini("args_list_with_int") + + with pytest.raises( + TypeError, match=r"expects a list for type 'linelist'.*got bool" + ): + config.getini("linelist_not_list") + + with pytest.raises( + TypeError, match=r"expects a list of strings.*item at index 1 is bool" + ): + config.getini("linelist_list_with_bool") + + with pytest.raises(TypeError, match=r"expects a bool.*got str"): + config.getini("bool_not_bool") + + with pytest.raises(TypeError, match=r"expects an int.*got str"): + config.getini("int_not_int") + + with pytest.raises(TypeError, match=r"expects an int.*got bool"): + config.getini("int_is_bool") + + with pytest.raises(TypeError, match=r"expects a float.*got str"): + config.getini("float_not_float") + + with pytest.raises(TypeError, match=r"expects a float.*got bool"): + config.getini("float_is_bool") + + with pytest.raises(TypeError, match=r"expects a string.*got int"): + config.getini("string_not_string") + + +class TestInicfgDeprecation: + """Tests for the upcoming deprecation of config.inicfg.""" + + def test_inicfg_deprecated(self, pytester: Pytester) -> None: + """Test that accessing config.inicfg issues a deprecation warning (not yet).""" + pytester.makeini( + """ + [pytest] + minversion = 3.0 + """ + ) + config = pytester.parseconfig() + + inicfg = config.inicfg + + assert config.getini("minversion") == "3.0" + assert inicfg["minversion"] == "3.0" + assert inicfg.get("minversion") == "3.0" + del inicfg["minversion"] + inicfg["minversion"] = "4.0" + assert list(inicfg.keys()) == ["minversion"] + assert list(inicfg.items()) == [("minversion", "4.0")] + assert len(inicfg) == 1 + + def test_issue_13946_setting_bool_no_longer_crashes( + self, pytester: Pytester + ) -> None: + """Regression test for #13946 - setting inicfg doesn't cause a crash.""" + pytester.makepyfile( + """ + def pytest_configure(config): + config.inicfg["xfail_strict"] = True + + def test(): + pass + """ + ) + + result = pytester.runpytest() + assert result.ret == 0 diff --git a/testing/test_conftest.py b/testing/test_conftest.py index bd083574ffc..4de61bceb90 100644 --- a/testing/test_conftest.py +++ b/testing/test_conftest.py @@ -654,7 +654,7 @@ def test_parsefactories_relative_node_ids( def test_search_conftest_up_to_inifile( pytester: Pytester, confcutdir: str, passed: int, error: int ) -> None: - """Test that conftest files are detected only up to an ini file, unless + """Test that conftest files are detected only up to a configuration file, unless an explicit --confcutdir option is given. """ root = pytester.path diff --git a/testing/test_doctest.py b/testing/test_doctest.py index 1a852ebc82a..8b71dabbc77 100644 --- a/testing/test_doctest.py +++ b/testing/test_doctest.py @@ -33,24 +33,24 @@ def test_collect_testtextfile(self, pytester: Pytester): for x in (pytester.path, checkfile): # print "checking that %s returns custom items" % (x,) - items, reprec = pytester.inline_genitems(x) + items, _reprec = pytester.inline_genitems(x) assert len(items) == 1 assert isinstance(items[0], DoctestItem) assert isinstance(items[0].parent, DoctestTextfile) # Empty file has no items. - items, reprec = pytester.inline_genitems(w) + items, _reprec = pytester.inline_genitems(w) assert len(items) == 0 def test_collect_module_empty(self, pytester: Pytester): path = pytester.makepyfile(whatever="#") for p in (path, pytester.path): - items, reprec = pytester.inline_genitems(p, "--doctest-modules") + items, _reprec = pytester.inline_genitems(p, "--doctest-modules") assert len(items) == 0 def test_collect_module_single_modulelevel_doctest(self, pytester: Pytester): path = pytester.makepyfile(whatever='""">>> pass"""') for p in (path, pytester.path): - items, reprec = pytester.inline_genitems(p, "--doctest-modules") + items, _reprec = pytester.inline_genitems(p, "--doctest-modules") assert len(items) == 1 assert isinstance(items[0], DoctestItem) assert isinstance(items[0].parent, DoctestModule) @@ -64,7 +64,7 @@ def my_func(): """ ) for p in (path, pytester.path): - items, reprec = pytester.inline_genitems(p, "--doctest-modules") + items, _reprec = pytester.inline_genitems(p, "--doctest-modules") assert len(items) == 2 assert isinstance(items[0], DoctestItem) assert isinstance(items[1], DoctestItem) @@ -97,7 +97,7 @@ def another(): }, ) for p in (path, pytester.path): - items, reprec = pytester.inline_genitems(p, "--doctest-modules") + items, _reprec = pytester.inline_genitems(p, "--doctest-modules") assert len(items) == 2 assert isinstance(items[0], DoctestItem) assert isinstance(items[1], DoctestItem) @@ -223,9 +223,12 @@ def test_doctest_unexpected_exception(self, pytester: Pytester): "002 >>> 0 / i", "UNEXPECTED EXCEPTION: ZeroDivisionError*", "Traceback (most recent call last):", - ' File "*/doctest.py", line *, in __run', - " *", - *((" *^^^^*", " *", " *") if sys.version_info >= (3, 13) else ()), + *( + (' File "*/doctest.py", line *, in __run', " *") + if sys.version_info <= (3, 14) + else () + ), + *((" *^^^^*", " *", " *") if sys.version_info[:2] == (3, 13) else ()), ' File "", line 1, in ', "ZeroDivisionError: division by zero", "*/test_doctest_unexpected_exception.txt:2: UnexpectedException", @@ -837,7 +840,7 @@ def foo(x): return 'c' """ ) - items, reprec = pytester.inline_genitems(p, "--doctest-modules") + items, _reprec = pytester.inline_genitems(p, "--doctest-modules") reportinfo = items[0].reportinfo() assert reportinfo[1] == 1 @@ -904,7 +907,7 @@ class TestLiterals: def test_allow_unicode(self, pytester, config_mode): """Test that doctests which output unicode work in all python versions tested by pytest when the ALLOW_UNICODE option is used (either in - the ini file or by an inline comment). + the configuration file or by an inline comment). """ if config_mode == "ini": pytester.makeini( @@ -939,7 +942,7 @@ def foo(): def test_allow_bytes(self, pytester, config_mode): """Test that doctests which output bytes work in all python versions tested by pytest when the ALLOW_BYTES option is used (either in - the ini file or by an inline comment)(#1287). + the configuration file or by an inline comment)(#1287). """ if config_mode == "ini": pytester.makeini( @@ -1593,7 +1596,14 @@ def __getattr__(self, _): @pytest.mark.parametrize( # pragma: no branch (lambdas are not called) - "stop", [None, _is_mocked, lambda f: None, lambda f: False, lambda f: True] + "stop", + [ + None, + pytest.param(_is_mocked, id="is_mocked"), + pytest.param(lambda f: None, id="lambda_none"), + pytest.param(lambda f: False, id="lambda_false"), + pytest.param(lambda f: True, id="lambda_true"), + ], ) def test_warning_on_unwrap_of_broken_object( stop: Callable[[object], object] | None, diff --git a/testing/test_faulthandler.py b/testing/test_faulthandler.py index c416e81d2d9..67ca221f3f2 100644 --- a/testing/test_faulthandler.py +++ b/testing/test_faulthandler.py @@ -2,6 +2,7 @@ from __future__ import annotations import io +import os import sys from _pytest.pytester import Pytester @@ -71,11 +72,18 @@ def test_disabled(): assert result.ret == 0 +@pytest.mark.keep_ci_var @pytest.mark.parametrize( "enabled", [ pytest.param( - True, marks=pytest.mark.skip(reason="sometimes crashes on CI (#7022)") + True, + marks=pytest.mark.skipif( + bool(os.environ.get("CI")) + and sys.platform == "linux" + and sys.version_info >= (3, 14), + reason="sometimes crashes on CI because of truncated outputs (#7022)", + ), ), False, ], @@ -110,6 +118,43 @@ def test_timeout(): assert result.ret == 0 +@pytest.mark.keep_ci_var +@pytest.mark.skipif( + "CI" in os.environ and sys.platform == "linux" and sys.version_info >= (3, 14), + reason="sometimes crashes on CI because of truncated outputs (#7022)", +) +@pytest.mark.parametrize("exit_on_timeout", [True, False]) +def test_timeout_and_exit(pytester: Pytester, exit_on_timeout: bool) -> None: + """Test option to force exit pytest process after a certain timeout.""" + pytester.makepyfile( + """ + import os, time + def test_long_sleep_and_raise(): + time.sleep(1 if "CI" in os.environ else 0.1) + raise AssertionError( + "This test should have been interrupted before reaching this point." + ) + """ + ) + pytester.makeini( + f""" + [pytest] + faulthandler_timeout = 0.01 + faulthandler_exit_on_timeout = {"true" if exit_on_timeout else "false"} + """ + ) + result = pytester.runpytest_subprocess() + tb_output = "most recent call first" + result.stderr.fnmatch_lines([f"*{tb_output}*"]) + if exit_on_timeout: + result.stdout.no_fnmatch_line("*1 failed*") + result.stdout.no_fnmatch_line("*AssertionError*") + else: + result.stdout.fnmatch_lines(["*1 failed*"]) + result.stdout.fnmatch_lines(["*AssertionError*"]) + assert result.ret == 1 + + @pytest.mark.parametrize("hook_name", ["pytest_enter_pdb", "pytest_exception_interact"]) def test_cancel_timeout_on_hook(monkeypatch, hook_name) -> None: """Make sure that we are cancelling any scheduled traceback dumping due diff --git a/testing/test_findpaths.py b/testing/test_findpaths.py index 9532f1eef75..aea7b1f9a4d 100644 --- a/testing/test_findpaths.py +++ b/testing/test_findpaths.py @@ -6,6 +6,7 @@ from textwrap import dedent from _pytest.config import UsageError +from _pytest.config.findpaths import ConfigValue from _pytest.config.findpaths import get_common_ancestor from _pytest.config.findpaths import get_dirs_from_args from _pytest.config.findpaths import is_fs_root @@ -14,9 +15,10 @@ class TestLoadConfigDictFromFile: - def test_empty_pytest_ini(self, tmp_path: Path) -> None: + @pytest.mark.parametrize("filename", ["pytest.ini", ".pytest.ini"]) + def test_empty_pytest_ini(self, tmp_path: Path, filename: str) -> None: """pytest.ini files are always considered for configuration, even if empty""" - fn = tmp_path / "pytest.ini" + fn = tmp_path / filename fn.write_text("", encoding="utf-8") assert load_config_dict_from_file(fn) == {} @@ -24,13 +26,17 @@ def test_pytest_ini(self, tmp_path: Path) -> None: """[pytest] section in pytest.ini files is read correctly""" fn = tmp_path / "pytest.ini" fn.write_text("[pytest]\nx=1", encoding="utf-8") - assert load_config_dict_from_file(fn) == {"x": "1"} + assert load_config_dict_from_file(fn) == { + "x": ConfigValue("1", origin="file", mode="ini") + } def test_custom_ini(self, tmp_path: Path) -> None: """[pytest] section in any .ini file is read correctly""" fn = tmp_path / "custom.ini" fn.write_text("[pytest]\nx=1", encoding="utf-8") - assert load_config_dict_from_file(fn) == {"x": "1"} + assert load_config_dict_from_file(fn) == { + "x": ConfigValue("1", origin="file", mode="ini") + } def test_custom_ini_without_section(self, tmp_path: Path) -> None: """Custom .ini files without [pytest] section are not considered for configuration""" @@ -48,7 +54,9 @@ def test_valid_cfg_file(self, tmp_path: Path) -> None: """Custom .cfg files with [tool:pytest] section are read correctly""" fn = tmp_path / "custom.cfg" fn.write_text("[tool:pytest]\nx=1", encoding="utf-8") - assert load_config_dict_from_file(fn) == {"x": "1"} + assert load_config_dict_from_file(fn) == { + "x": ConfigValue("1", origin="file", mode="ini") + } def test_unsupported_pytest_section_in_cfg_file(self, tmp_path: Path) -> None: """.cfg files with [pytest] section are no longer supported and should fail to alert users""" @@ -65,7 +73,7 @@ def test_invalid_toml_file(self, tmp_path: Path) -> None: load_config_dict_from_file(fn) def test_custom_toml_file(self, tmp_path: Path) -> None: - """.toml files without [tool.pytest.ini_options] are not considered for configuration.""" + """.toml files without [tool.pytest] are not considered for configuration.""" fn = tmp_path / "myconfig.toml" fn.write_text( dedent( @@ -96,13 +104,70 @@ def test_valid_toml_file(self, tmp_path: Path) -> None: encoding="utf-8", ) assert load_config_dict_from_file(fn) == { - "x": "1", - "y": "20.0", - "values": ["tests", "integration"], - "name": "foo", - "heterogeneous_array": [1, "str"], + "x": ConfigValue("1", origin="file", mode="ini"), + "y": ConfigValue("20.0", origin="file", mode="ini"), + "values": ConfigValue(["tests", "integration"], origin="file", mode="ini"), + "name": ConfigValue("foo", origin="file", mode="ini"), + "heterogeneous_array": ConfigValue([1, "str"], origin="file", mode="ini"), + } + + def test_native_toml_config(self, tmp_path: Path) -> None: + """[tool.pytest] sections with native types are parsed correctly without coercion.""" + fn = tmp_path / "pyproject.toml" + fn.write_text( + dedent( + """ + [tool.pytest] + minversion = "7.0" + xfail_strict = true + testpaths = ["tests", "integration"] + python_files = ["test_*.py", "*_test.py"] + verbosity_assertions = 2 + maxfail = 5 + timeout = 300.5 + """ + ), + encoding="utf-8", + ) + result = load_config_dict_from_file(fn) + assert result == { + "minversion": ConfigValue("7.0", origin="file", mode="toml"), + "xfail_strict": ConfigValue(True, origin="file", mode="toml"), + "testpaths": ConfigValue( + ["tests", "integration"], origin="file", mode="toml" + ), + "python_files": ConfigValue( + ["test_*.py", "*_test.py"], origin="file", mode="toml" + ), + "verbosity_assertions": ConfigValue(2, origin="file", mode="toml"), + "maxfail": ConfigValue(5, origin="file", mode="toml"), + "timeout": ConfigValue(300.5, origin="file", mode="toml"), } + def test_native_and_ini_conflict(self, tmp_path: Path) -> None: + """Using both [tool.pytest] and [tool.pytest.ini_options] should raise an error.""" + fn = tmp_path / "pyproject.toml" + fn.write_text( + dedent( + """ + [tool.pytest] + xfail_strict = true + + [tool.pytest.ini_options] + minversion = "7.0" + """ + ), + encoding="utf-8", + ) + with pytest.raises(UsageError, match="Cannot use both"): + load_config_dict_from_file(fn) + + def test_invalid_suffix(self, tmp_path: Path) -> None: + """A file with an unknown suffix is ignored.""" + fn = tmp_path / "pytest.config" + fn.write_text("", encoding="utf-8") + assert load_config_dict_from_file(fn) is None + class TestCommonAncestor: def test_has_ancestor(self, tmp_path: Path) -> None: diff --git a/testing/test_helpconfig.py b/testing/test_helpconfig.py index dc7e709b65d..b01a6fa1559 100644 --- a/testing/test_helpconfig.py +++ b/testing/test_helpconfig.py @@ -10,21 +10,21 @@ def test_version_verbose(pytester: Pytester, pytestconfig, monkeypatch) -> None: monkeypatch.delenv("PYTEST_DISABLE_PLUGIN_AUTOLOAD") monkeypatch.delenv("PYTEST_PLUGINS", raising=False) result = pytester.runpytest("--version", "--version") - assert result.ret == 0 + assert result.ret == ExitCode.OK result.stdout.fnmatch_lines([f"*pytest*{pytest.__version__}*imported from*"]) if pytestconfig.pluginmanager.list_plugin_distinfo(): result.stdout.fnmatch_lines(["*registered third-party plugins:", "*at*"]) -def test_version_less_verbose(pytester: Pytester, pytestconfig, monkeypatch) -> None: - monkeypatch.delenv("PYTEST_DISABLE_PLUGIN_AUTOLOAD") - monkeypatch.delenv("PYTEST_PLUGINS", raising=False) - result = pytester.runpytest("--version") - assert result.ret == 0 - result.stdout.fnmatch_lines([f"pytest {pytest.__version__}"]) +def test_version_less_verbose(pytester: Pytester) -> None: + """Single ``--version`` parameter should display only the pytest version, without loading plugins (#13574).""" + pytester.makeconftest("print('This should not be printed')") + result = pytester.runpytest_subprocess("--version") + assert result.ret == ExitCode.OK + assert result.stdout.str().strip() == f"pytest {pytest.__version__}" -def test_versions(): +def test_versions() -> None: """Regression check for the public version attributes in pytest.""" assert isinstance(pytest.__version__, str) assert isinstance(pytest.version_tuple, tuple) @@ -32,7 +32,7 @@ def test_versions(): def test_help(pytester: Pytester) -> None: result = pytester.runpytest("--help") - assert result.ret == 0 + assert result.ret == ExitCode.OK result.stdout.fnmatch_lines( """ -m MARKEXPR Only run tests matching given mark expression. For @@ -73,7 +73,7 @@ def pytest_addoption(parser): """ ) result = pytester.runpytest("--help") - assert result.ret == 0 + assert result.ret == ExitCode.OK lines = [ " required_plugins (args):", " Plugins that must be present for pytest to run*", @@ -83,6 +83,14 @@ def pytest_addoption(parser): result.stdout.fnmatch_lines(lines, consecutive=True) +def test_parse_known_args_doesnt_quit_on_help(pytester: Pytester) -> None: + """`parse_known_args` shouldn't exit on `--help`, unlike `parse`.""" + config = pytester.parseconfig() + # Doesn't raise or exit! + config._parser.parse_known_args(["--help"]) + config._parser.parse_known_and_unknown_args(["--help"]) + + def test_hookvalidation_unknown(pytester: Pytester) -> None: pytester.makeconftest( """ @@ -91,7 +99,7 @@ def pytest_hello(xyz): """ ) result = pytester.runpytest() - assert result.ret != 0 + assert result.ret != ExitCode.OK result.stdout.fnmatch_lines(["*unknown hook*pytest_hello*"]) diff --git a/testing/test_junitxml.py b/testing/test_junitxml.py index 504e4969039..5a603c05bc8 100644 --- a/testing/test_junitxml.py +++ b/testing/test_junitxml.py @@ -76,11 +76,11 @@ def nodeval(node: minidom.Element, name: str) -> str | None: class DomDocument: - def __init__(self, dom: minidom.Document): - self._node = dom - _node: minidom.Document | minidom.Element + def __init__(self, dom: minidom.Document) -> None: + self._node = dom + def find_first_by_tag(self, tag: str) -> DomNode | None: return self.find_nth_by_tag(tag, 0) @@ -105,7 +105,9 @@ def find_by_tag(self, tag: str) -> list[DomNode]: @property def children(self) -> list[DomNode]: - return [DomNode(x) for x in self._node.childNodes] + return [ + DomNode(x) for x in self._node.childNodes if isinstance(x, minidom.Element) + ] @property def get_unique_child(self) -> DomNode: @@ -120,7 +122,7 @@ def toxml(self) -> str: class DomNode(DomDocument): _node: minidom.Element - def __init__(self, dom: minidom.Element): + def __init__(self, dom: minidom.Element) -> None: self._node = dom def __repr__(self) -> str: @@ -129,7 +131,7 @@ def __repr__(self) -> str: def __getitem__(self, key: str) -> str: node = self._node.getAttributeNode(key) if node is not None: - return cast(str, node.value) + return node.value else: raise KeyError(key) @@ -139,7 +141,9 @@ def assert_attr(self, **kwargs: object) -> None: @property def text(self) -> str: - return cast(str, self._node.childNodes[0].wholeText) + text = self._node.childNodes[0] + assert isinstance(text, minidom.Text) + return text.wholeText @property def tag(self) -> str: @@ -257,7 +261,7 @@ def test_pass(): pass """ ) - result, dom = run_and_parse(family=xunit_family) + _result, dom = run_and_parse(family=xunit_family) node = dom.get_first_by_tag("testsuite") node.assert_attr(hostname=platform.node()) @@ -272,7 +276,7 @@ def test_pass(): """ ) start_time = datetime.now(timezone.utc) - result, dom = run_and_parse(family=xunit_family) + _result, dom = run_and_parse(family=xunit_family) node = dom.get_first_by_tag("testsuite") timestamp = datetime.fromisoformat(node["timestamp"]) assert start_time <= timestamp < datetime.now(timezone.utc) @@ -294,7 +298,7 @@ def test_sleep(): timing.sleep(4) """ ) - result, dom = run_and_parse() + _result, dom = run_and_parse() node = dom.get_first_by_tag("testsuite") tnode = node.get_first_by_tag("testcase") val = tnode["time"] @@ -325,7 +329,7 @@ def test_foo(): pass """ ) - result, dom = run_and_parse("-o", f"junit_duration_report={duration_report}") + _result, dom = run_and_parse("-o", f"junit_duration_report={duration_report}") node = dom.get_first_by_tag("testsuite") tnode = node.get_first_by_tag("testcase") val = float(tnode["time"]) @@ -631,7 +635,7 @@ def test_fail(): assert 0, "An error" """ ) - result, dom = run_and_parse(family=xunit_family) + _result, dom = run_and_parse(family=xunit_family) node = dom.get_first_by_tag("testsuite") tnode = node.get_first_by_tag("testcase") fnode = tnode.get_first_by_tag("failure") @@ -657,8 +661,7 @@ def test_func(arg1): node = dom.get_first_by_tag("testsuite") node.assert_attr(failures=3, tests=3) tnodes = node.find_by_tag("testcase") - assert len(tnodes) == 3 - for tnode, char in zip(tnodes, "<&'"): + for tnode, char in zip(tnodes, "<&'", strict=True): tnode.assert_attr( classname="test_failure_escape", name=f"test_func[{char}]" ) @@ -749,7 +752,7 @@ def test_fail(): assert 0 """ ) - result, dom = run_and_parse("-o", f"junit_logging={junit_logging}") + _result, dom = run_and_parse("-o", f"junit_logging={junit_logging}") node = dom.get_first_by_tag("testsuite") tnode = node.get_first_by_tag("testcase") @@ -774,7 +777,7 @@ def test_xpass(): pass """ ) - result, dom = run_and_parse(family=xunit_family) + _result, dom = run_and_parse(family=xunit_family) # assert result.ret node = dom.get_first_by_tag("testsuite") node.assert_attr(skipped=0, tests=1) @@ -793,7 +796,7 @@ def test_xpass(): pass """ ) - result, dom = run_and_parse(family=xunit_family) + _result, dom = run_and_parse(family=xunit_family) # assert result.ret node = dom.get_first_by_tag("testsuite") node.assert_attr(skipped=0, tests=1) @@ -846,7 +849,7 @@ def test_str_compare(): assert M1 == M2 """ ) - result, dom = run_and_parse() + _result, dom = run_and_parse() print(dom.toxml()) @pytest.mark.parametrize("junit_logging", ["no", "system-out"]) @@ -859,7 +862,7 @@ def test_pass(): print('hello-stdout') """ ) - result, dom = run_and_parse("-o", f"junit_logging={junit_logging}") + _result, dom = run_and_parse("-o", f"junit_logging={junit_logging}") node = dom.get_first_by_tag("testsuite") pnode = node.get_first_by_tag("testcase") if junit_logging == "no": @@ -883,7 +886,7 @@ def test_pass(): sys.stderr.write('hello-stderr') """ ) - result, dom = run_and_parse("-o", f"junit_logging={junit_logging}") + _result, dom = run_and_parse("-o", f"junit_logging={junit_logging}") node = dom.get_first_by_tag("testsuite") pnode = node.get_first_by_tag("testcase") if junit_logging == "no": @@ -912,7 +915,7 @@ def test_function(arg): pass """ ) - result, dom = run_and_parse("-o", f"junit_logging={junit_logging}") + _result, dom = run_and_parse("-o", f"junit_logging={junit_logging}") node = dom.get_first_by_tag("testsuite") pnode = node.get_first_by_tag("testcase") if junit_logging == "no": @@ -942,7 +945,7 @@ def test_function(arg): pass """ ) - result, dom = run_and_parse("-o", f"junit_logging={junit_logging}") + _result, dom = run_and_parse("-o", f"junit_logging={junit_logging}") node = dom.get_first_by_tag("testsuite") pnode = node.get_first_by_tag("testcase") if junit_logging == "no": @@ -973,7 +976,7 @@ def test_function(arg): sys.stdout.write('hello-stdout call') """ ) - result, dom = run_and_parse("-o", f"junit_logging={junit_logging}") + _result, dom = run_and_parse("-o", f"junit_logging={junit_logging}") node = dom.get_first_by_tag("testsuite") pnode = node.get_first_by_tag("testcase") if junit_logging == "no": @@ -1323,7 +1326,7 @@ def test_record_with_same_name(record_property): record_property("foo", "baz") """ ) - result, dom = run_and_parse() + _result, dom = run_and_parse() node = dom.get_first_by_tag("testsuite") tnode = node.get_first_by_tag("testcase") psnode = tnode.get_first_by_tag("properties") @@ -1399,7 +1402,7 @@ def test_record({fixture_name}, other): """ ) - result, dom = run_and_parse(family=None) + result, _dom = run_and_parse(family=None) expected_lines = [] if fixture_name == "record_xml_attribute": expected_lines.append( @@ -1464,7 +1467,7 @@ def test_pass(): """ ) - result, dom = run_and_parse(f, f) + result, dom = run_and_parse("--keep-duplicates", f, f) result.stdout.no_fnmatch_line("*INTERNALERROR*") first, second = (x["classname"] for x in dom.find_by_tag("testcase")) assert first == second @@ -1818,3 +1821,13 @@ def test_func(): assert junit_logging == "no" assert len(node.find_by_tag("system-err")) == 0 assert len(node.find_by_tag("system-out")) == 0 + + +def test_no_message_quiet(pytester: Pytester) -> None: + """Do not show the summary banner when --quiet is given (#13700).""" + pytester.makepyfile("def test(): pass") + result = pytester.runpytest("--junitxml=pytest.xml") + result.stdout.fnmatch_lines("* generated xml file: *") + + result = pytester.runpytest("--junitxml=pytest.xml", "--quiet") + result.stdout.no_fnmatch_line("* generated xml file: *") diff --git a/testing/test_legacypath.py b/testing/test_legacypath.py index 72854e4e5c0..d1f2255f30f 100644 --- a/testing/test_legacypath.py +++ b/testing/test_legacypath.py @@ -12,10 +12,10 @@ def test_item_fspath(pytester: pytest.Pytester) -> None: pytester.makepyfile("def test_func(): pass") - items, hookrec = pytester.inline_genitems() + items, _hookrec = pytester.inline_genitems() assert len(items) == 1 (item,) = items - items2, hookrec = pytester.inline_genitems(item.nodeid) + items2, _hookrec = pytester.inline_genitems(item.nodeid) (item2,) = items2 assert item2.name == item.name assert item2.fspath == item.fspath @@ -113,7 +113,7 @@ def test_session_scoped_unavailable_attributes(self, session_request): _ = session_request.fspath -@pytest.mark.parametrize("config_type", ["ini", "pyproject"]) +@pytest.mark.parametrize("config_type", ["ini", "toml"]) def test_addini_paths(pytester: pytest.Pytester, config_type: str) -> None: pytester.makeconftest( """ @@ -126,15 +126,15 @@ def pytest_addoption(parser): inipath = pytester.makeini( """ [pytest] - paths=hello world/sub.py - """ + paths = hello world/sub.py + """ ) - elif config_type == "pyproject": - inipath = pytester.makepyprojecttoml( + else: + inipath = pytester.maketoml( + """ + [pytest] + paths = ["hello", "world/sub.py"] """ - [tool.pytest.ini_options] - paths=["hello", "world/sub.py"] - """ ) config = pytester.parseconfig() values = config.getini("paths") diff --git a/testing/test_main.py b/testing/test_main.py index 94eac02ce63..41d7055df26 100644 --- a/testing/test_main.py +++ b/testing/test_main.py @@ -121,100 +121,151 @@ def invocation_path(self, pytester: Pytester) -> Path: def test_file(self, invocation_path: Path) -> None: """File and parts.""" assert resolve_collection_argument( - invocation_path, "src/pkg/test.py" + invocation_path, "src/pkg/test.py", 0 ) == CollectionArgument( path=invocation_path / "src/pkg/test.py", parts=[], + parametrization=None, module_name=None, + original_index=0, ) assert resolve_collection_argument( - invocation_path, "src/pkg/test.py::" + invocation_path, "src/pkg/test.py::", 10 ) == CollectionArgument( path=invocation_path / "src/pkg/test.py", parts=[""], + parametrization=None, module_name=None, + original_index=10, ) assert resolve_collection_argument( - invocation_path, "src/pkg/test.py::foo::bar" + invocation_path, "src/pkg/test.py::foo::bar", 20 ) == CollectionArgument( path=invocation_path / "src/pkg/test.py", parts=["foo", "bar"], + parametrization=None, module_name=None, + original_index=20, ) assert resolve_collection_argument( - invocation_path, "src/pkg/test.py::foo::bar::" + invocation_path, "src/pkg/test.py::foo::bar::", 30 ) == CollectionArgument( path=invocation_path / "src/pkg/test.py", parts=["foo", "bar", ""], + parametrization=None, module_name=None, + original_index=30, + ) + assert resolve_collection_argument( + invocation_path, "src/pkg/test.py::foo::bar[a,b,c]", 40 + ) == CollectionArgument( + path=invocation_path / "src/pkg/test.py", + parts=["foo", "bar"], + parametrization="[a,b,c]", + module_name=None, + original_index=40, ) def test_dir(self, invocation_path: Path) -> None: """Directory and parts.""" assert resolve_collection_argument( - invocation_path, "src/pkg" + invocation_path, "src/pkg", 0 ) == CollectionArgument( path=invocation_path / "src/pkg", parts=[], + parametrization=None, module_name=None, + original_index=0, ) with pytest.raises( UsageError, match=r"directory argument cannot contain :: selection parts" ): - resolve_collection_argument(invocation_path, "src/pkg::") + resolve_collection_argument(invocation_path, "src/pkg::", 0) with pytest.raises( UsageError, match=r"directory argument cannot contain :: selection parts" ): - resolve_collection_argument(invocation_path, "src/pkg::foo::bar") + resolve_collection_argument(invocation_path, "src/pkg::foo::bar", 0) - def test_pypath(self, invocation_path: Path) -> None: + @pytest.mark.parametrize("namespace_package", [False, True]) + def test_pypath(self, namespace_package: bool, invocation_path: Path) -> None: """Dotted name and parts.""" + if namespace_package: + # Namespace package doesn't have to contain __init__py + (invocation_path / "src/pkg/__init__.py").unlink() + assert resolve_collection_argument( - invocation_path, "pkg.test", as_pypath=True + invocation_path, "pkg.test", 0, as_pypath=True ) == CollectionArgument( path=invocation_path / "src/pkg/test.py", parts=[], + parametrization=None, module_name="pkg.test", + original_index=0, ) assert resolve_collection_argument( - invocation_path, "pkg.test::foo::bar", as_pypath=True + invocation_path, "pkg.test::foo::bar", 0, as_pypath=True ) == CollectionArgument( path=invocation_path / "src/pkg/test.py", parts=["foo", "bar"], + parametrization=None, module_name="pkg.test", + original_index=0, ) assert resolve_collection_argument( - invocation_path, "pkg", as_pypath=True + invocation_path, + "pkg", + 0, + as_pypath=True, + consider_namespace_packages=namespace_package, ) == CollectionArgument( path=invocation_path / "src/pkg", parts=[], + parametrization=None, module_name="pkg", + original_index=0, ) with pytest.raises( UsageError, match=r"package argument cannot contain :: selection parts" ): resolve_collection_argument( - invocation_path, "pkg::foo::bar", as_pypath=True + invocation_path, + "pkg::foo::bar", + 0, + as_pypath=True, + consider_namespace_packages=namespace_package, ) def test_parametrized_name_with_colons(self, invocation_path: Path) -> None: assert resolve_collection_argument( - invocation_path, "src/pkg/test.py::test[a::b]" + invocation_path, "src/pkg/test.py::test[a::b]", 0 ) == CollectionArgument( path=invocation_path / "src/pkg/test.py", - parts=["test[a::b]"], + parts=["test"], + parametrization="[a::b]", module_name=None, + original_index=0, ) + @pytest.mark.parametrize( + "arg", ["x.py[a]", "x.py[a]::foo", "x/y.py[a]::foo::bar", "x.py[a]::foo[b]"] + ) + def test_path_parametrization_not_allowed( + self, invocation_path: Path, arg: str + ) -> None: + with pytest.raises( + UsageError, match=r"path cannot contain \[\] parametrization" + ): + resolve_collection_argument(invocation_path, arg, 0) + def test_does_not_exist(self, invocation_path: Path) -> None: """Given a file/module that does not exist raises UsageError.""" with pytest.raises( UsageError, match=re.escape("file or directory not found: foobar") ): - resolve_collection_argument(invocation_path, "foobar") + resolve_collection_argument(invocation_path, "foobar", 0) with pytest.raises( UsageError, @@ -222,28 +273,32 @@ def test_does_not_exist(self, invocation_path: Path) -> None: "module or package not found: foobar (missing __init__.py?)" ), ): - resolve_collection_argument(invocation_path, "foobar", as_pypath=True) + resolve_collection_argument(invocation_path, "foobar", 0, as_pypath=True) def test_absolute_paths_are_resolved_correctly(self, invocation_path: Path) -> None: """Absolute paths resolve back to absolute paths.""" full_path = str(invocation_path / "src") assert resolve_collection_argument( - invocation_path, full_path + invocation_path, full_path, 0 ) == CollectionArgument( path=Path(os.path.abspath("src")), parts=[], + parametrization=None, module_name=None, + original_index=0, ) # ensure full paths given in the command-line without the drive letter resolve # to the full path correctly (#7628) - drive, full_path_without_drive = os.path.splitdrive(full_path) + _drive, full_path_without_drive = os.path.splitdrive(full_path) assert resolve_collection_argument( - invocation_path, full_path_without_drive + invocation_path, full_path_without_drive, 0 ) == CollectionArgument( path=Path(os.path.abspath("src")), parts=[], + parametrization=None, module_name=None, + original_index=0, ) @@ -275,7 +330,7 @@ def test(fix): fn = pytester.path.joinpath("project/tests/dummy_test.py") assert fn.is_file() - drive, path = os.path.splitdrive(str(fn)) + _drive, path = os.path.splitdrive(str(fn)) result = pytester.runpytest(path, "-v") result.stdout.fnmatch_lines( diff --git a/testing/test_mark.py b/testing/test_mark.py index 1e51f9db18f..8d76ea310eb 100644 --- a/testing/test_mark.py +++ b/testing/test_mark.py @@ -59,7 +59,7 @@ def test_1(self, abc): """ ) file_name = os.path.basename(py_file) - rec = pytester.inline_run(file_name, file_name) + rec = pytester.inline_run("--keep-duplicates", file_name, file_name) rec.assertoutcome(passed=6) @@ -183,7 +183,9 @@ def test_hello(): reprec.assertoutcome(passed=1) -@pytest.mark.parametrize("option_name", ["--strict-markers", "--strict"]) +@pytest.mark.parametrize( + "option_name", ["--strict-markers", "--strict", "strict_markers", "strict"] +) def test_strict_prohibits_unregistered_markers( pytester: Pytester, option_name: str ) -> None: @@ -195,7 +197,16 @@ def test_hello(): pass """ ) - result = pytester.runpytest(option_name) + if option_name in ("strict_markers", "strict"): + pytester.makeini( + f""" + [pytest] + {option_name} = true + """ + ) + result = pytester.runpytest() + else: + result = pytester.runpytest(option_name) assert result.ret != 0 result.stdout.fnmatch_lines( ["'unregisteredmark' not found in `markers` configuration option"] @@ -228,7 +239,7 @@ def test_two(): """ ) rec = pytester.inline_run("-m", expr) - passed, skipped, fail = rec.listoutcomes() + passed, _skipped, _fail = rec.listoutcomes() passed_str = [x.nodeid.split("::")[-1] for x in passed] assert passed_str == expected_passed @@ -276,7 +287,7 @@ def test_three(): """ ) rec = pytester.inline_run("-m", expr) - passed, skipped, fail = rec.listoutcomes() + passed, _skipped, _fail = rec.listoutcomes() passed_str = [x.nodeid.split("::")[-1] for x in passed] assert passed_str == expected_passed @@ -306,7 +317,7 @@ def test_nointer(): """ ) rec = pytester.inline_run("-m", expr) - passed, skipped, fail = rec.listoutcomes() + passed, _skipped, _fail = rec.listoutcomes() passed_str = [x.nodeid.split("::")[-1] for x in passed] assert passed_str == expected_passed @@ -341,7 +352,7 @@ def test_2(): """ ) rec = pytester.inline_run("-k", expr) - passed, skipped, fail = rec.listoutcomes() + passed, _skipped, _fail = rec.listoutcomes() passed_str = [x.nodeid.split("::")[-1] for x in passed] assert passed_str == expected_passed @@ -373,7 +384,7 @@ def test_func(arg): """ ) rec = pytester.inline_run("-k", expr) - passed, skipped, fail = rec.listoutcomes() + passed, _skipped, _fail = rec.listoutcomes() passed_str = [x.nodeid.split("::")[-1] for x in passed] assert passed_str == expected_passed @@ -388,7 +399,7 @@ def test_func(arg): """ ) rec = pytester.inline_run() - passed, skipped, fail = rec.listoutcomes() + passed, _skipped, _fail = rec.listoutcomes() expected_id = "test_func[" + pytest.__name__ + "]" assert passed[0].nodeid.split("::")[-1] == expected_id @@ -537,7 +548,7 @@ def test_d(self): assert True """ ) - items, rec = pytester.inline_genitems(p) + items, _rec = pytester.inline_genitems(p) for item in items: print(item, item.keywords) assert [x for x in item.iter_markers() if x.name == "a"] @@ -560,7 +571,7 @@ class Test2(Base): def test_bar(self): pass """ ) - items, rec = pytester.inline_genitems(p) + items, _rec = pytester.inline_genitems(p) self.assert_markers(items, test_foo=("a", "b"), test_bar=("a",)) def test_mark_should_not_pass_to_siebling_class(self, pytester: Pytester) -> None: @@ -583,7 +594,7 @@ class TestOtherSub(TestBase): """ ) - items, rec = pytester.inline_genitems(p) + items, _rec = pytester.inline_genitems(p) base_item, sub_item, sub_item_other = items print(items, [x.nodeid for x in items]) # new api segregates @@ -611,7 +622,7 @@ class Test2(Base2): def test_bar(self): pass """ ) - items, rec = pytester.inline_genitems(p) + items, _rec = pytester.inline_genitems(p) self.assert_markers(items, test_foo=("a", "b", "c"), test_bar=("a", "b", "d")) def test_mark_closest(self, pytester: Pytester) -> None: @@ -630,7 +641,7 @@ def test_has_inherited(self): """ ) - items, rec = pytester.inline_genitems(p) + items, _rec = pytester.inline_genitems(p) has_own, has_inherited = items has_own_marker = has_own.get_closest_marker("c") has_inherited_marker = has_inherited.get_closest_marker("c") @@ -826,7 +837,7 @@ def test_method_one(self): def check(keyword, name): reprec = pytester.inline_run("-s", "-k", keyword, file_test) - passed, skipped, failed = reprec.listoutcomes() + _passed, _skipped, failed = reprec.listoutcomes() assert len(failed) == 1 assert failed[0].nodeid.split("::")[-1] == name assert len(reprec.getcalls("pytest_deselected")) == 1 @@ -869,7 +880,7 @@ def pytest_pycollect_makeitem(name): ) reprec = pytester.inline_run(p.parent, "-s", "-k", keyword) print("keyword", repr(keyword)) - passed, skipped, failed = reprec.listoutcomes() + passed, _skipped, _failed = reprec.listoutcomes() assert len(passed) == 1 assert passed[0].nodeid.endswith("test_2") dlist = reprec.getcalls("pytest_deselected") @@ -885,7 +896,7 @@ def test_one(): """ ) reprec = pytester.inline_run("-k", "mykeyword", p) - passed, skipped, failed = reprec.countoutcomes() + _passed, _skipped, failed = reprec.countoutcomes() assert failed == 1 @pytest.mark.xfail diff --git a/testing/test_mark_expression.py b/testing/test_mark_expression.py index 884c4b5af88..1e3c769347c 100644 --- a/testing/test_mark_expression.py +++ b/testing/test_mark_expression.py @@ -1,30 +1,25 @@ from __future__ import annotations -from collections.abc import Callable -from typing import cast - from _pytest.mark import MarkMatcher from _pytest.mark.expression import Expression -from _pytest.mark.expression import MatcherCall -from _pytest.mark.expression import ParseError +from _pytest.mark.expression import ExpressionMatcher import pytest -def evaluate(input: str, matcher: Callable[[str], bool]) -> bool: - return Expression.compile(input).evaluate(cast(MatcherCall, matcher)) +def evaluate(input: str, matcher: ExpressionMatcher) -> bool: + return Expression.compile(input).evaluate(matcher) def test_empty_is_false() -> None: - assert not evaluate("", lambda ident: False) - assert not evaluate("", lambda ident: True) - assert not evaluate(" ", lambda ident: False) - assert not evaluate("\t", lambda ident: False) + assert not evaluate("", lambda ident, /, **kwargs: False) + assert not evaluate("", lambda ident, /, **kwargs: True) + assert not evaluate(" ", lambda ident, /, **kwargs: False) + assert not evaluate("\t", lambda ident, /, **kwargs: False) @pytest.mark.parametrize( ("expr", "expected"), ( - ("true", True), ("true", True), ("false", False), ("not true", False), @@ -51,7 +46,9 @@ def test_empty_is_false() -> None: ), ) def test_basic(expr: str, expected: bool) -> None: - matcher = {"true": True, "false": False}.__getitem__ + def matcher(name: str, /, **kwargs: str | int | bool | None) -> bool: + return {"true": True, "false": False}[name] + assert evaluate(expr, matcher) is expected @@ -67,7 +64,9 @@ def test_basic(expr: str, expected: bool) -> None: ), ) def test_syntax_oddities(expr: str, expected: bool) -> None: - matcher = {"true": True, "false": False}.__getitem__ + def matcher(name: str, /, **kwargs: str | int | bool | None) -> bool: + return {"true": True, "false": False}[name] + assert evaluate(expr, matcher) is expected @@ -77,11 +76,13 @@ def test_backslash_not_treated_specially() -> None: user will never need to insert a literal newline, only \n (two chars). So mark expressions themselves do not support escaping, instead they treat backslashes as regular identifier characters.""" - matcher = {r"\nfoo\n"}.__contains__ + + def matcher(name: str, /, **kwargs: str | int | bool | None) -> bool: + return {r"\nfoo\n"}.__contains__(name) assert evaluate(r"\nfoo\n", matcher) assert not evaluate(r"foo", matcher) - with pytest.raises(ParseError): + with pytest.raises(SyntaxError): evaluate("\nfoo\n", matcher) @@ -134,10 +135,10 @@ def test_backslash_not_treated_specially() -> None: ), ) def test_syntax_errors(expr: str, column: int, message: str) -> None: - with pytest.raises(ParseError) as excinfo: - evaluate(expr, lambda ident: True) - assert excinfo.value.column == column - assert excinfo.value.message == message + with pytest.raises(SyntaxError) as excinfo: + evaluate(expr, lambda ident, /, **kwargs: True) + assert excinfo.value.offset == column + assert excinfo.value.msg == message @pytest.mark.parametrize( @@ -172,7 +173,10 @@ def test_syntax_errors(expr: str, column: int, message: str) -> None: ), ) def test_valid_idents(ident: str) -> None: - assert evaluate(ident, {ident: True}.__getitem__) + def matcher(name: str, /, **kwargs: str | int | bool | None) -> bool: + return name == ident + + assert evaluate(ident, matcher) @pytest.mark.parametrize( @@ -198,13 +202,14 @@ def test_valid_idents(ident: str) -> None: ), ) def test_invalid_idents(ident: str) -> None: - with pytest.raises(ParseError): - evaluate(ident, lambda ident: True) + with pytest.raises(SyntaxError): + evaluate(ident, lambda ident, /, **kwargs: True) @pytest.mark.parametrize( "expr, expected_error_msg", ( + ("mark()", "expected identifier; got right parenthesis"), ("mark(True=False)", "unexpected reserved python keyword `True`"), ("mark(def=False)", "unexpected reserved python keyword `def`"), ("mark(class=False)", "unexpected reserved python keyword `class`"), @@ -234,7 +239,7 @@ def test_invalid_idents(ident: str) -> None: def test_invalid_kwarg_name_or_value( expr: str, expected_error_msg: str, mark_matcher: MarkMatcher ) -> None: - with pytest.raises(ParseError, match=expected_error_msg): + with pytest.raises(SyntaxError, match=expected_error_msg): assert evaluate(expr, mark_matcher) diff --git a/testing/test_monkeypatch.py b/testing/test_monkeypatch.py index 0e992e298ec..c321439e398 100644 --- a/testing/test_monkeypatch.py +++ b/testing/test_monkeypatch.py @@ -7,8 +7,7 @@ import re import sys import textwrap - -import setuptools +import warnings from _pytest.monkeypatch import MonkeyPatch from _pytest.pytester import Pytester @@ -417,7 +416,7 @@ def test_context() -> None: with monkeypatch.context() as m: m.setattr(functools, "partial", 3) assert not inspect.isclass(functools.partial) - assert inspect.isclass(functools.partial) # type:ignore[unreachable] + assert inspect.isclass(functools.partial) def test_context_classmethod() -> None: @@ -430,14 +429,16 @@ class A: assert A.x == 1 -@pytest.mark.filterwarnings(r"ignore:.*\bpkg_resources\b:DeprecationWarning") -@pytest.mark.skipif( - int(setuptools.__version__.split(".")[0]) >= 80, - reason="modern setuptools removing pkg_resources", +@pytest.mark.filterwarnings( + r"ignore:.*\bpkg_resources\b:DeprecationWarning", + r"ignore:.*\bpkg_resources\b:UserWarning", ) def test_syspath_prepend_with_namespace_packages( pytester: Pytester, monkeypatch: MonkeyPatch ) -> None: + # Needs to be in sys.modules. + pytest.importorskip("pkg_resources") + for dirname in "hello", "world": d = pytester.mkdir(dirname) ns = d.joinpath("ns_pkg") @@ -451,7 +452,9 @@ def test_syspath_prepend_with_namespace_packages( f"def check(): return {dirname!r}", encoding="utf-8" ) + # First call should not warn - namespace package not registered yet. monkeypatch.syspath_prepend("hello") + # This registers ns_pkg as a namespace package. import ns_pkg.hello assert ns_pkg.hello.check() == "hello" @@ -460,13 +463,19 @@ def test_syspath_prepend_with_namespace_packages( import ns_pkg.world # Prepending should call fixup_namespace_packages. - monkeypatch.syspath_prepend("world") + # This call should warn - ns_pkg is now registered and "world" contains it + with pytest.warns(pytest.PytestRemovedIn10Warning, match="legacy namespace"): + monkeypatch.syspath_prepend("world") import ns_pkg.world assert ns_pkg.world.check() == "world" # Should invalidate caches via importlib.invalidate_caches. + # Should not warn for path without namespace packages. modules_tmpdir = pytester.mkdir("modules_tmpdir") - monkeypatch.syspath_prepend(str(modules_tmpdir)) + with warnings.catch_warnings(): + warnings.simplefilter("error") + monkeypatch.syspath_prepend(str(modules_tmpdir)) + modules_tmpdir.joinpath("main_app.py").write_text("app = True", encoding="utf-8") from main_app import app # noqa: F401 diff --git a/testing/test_nodes.py b/testing/test_nodes.py index f5f21e9775c..de7875ca427 100644 --- a/testing/test_nodes.py +++ b/testing/test_nodes.py @@ -24,10 +24,10 @@ def test_node_direct_construction_deprecated() -> None: with pytest.raises( OutcomeException, match=( - "Direct construction of _pytest.nodes.Node has been deprecated, please " - "use _pytest.nodes.Node.from_parent.\nSee " - "https://docs.pytest.org/en/stable/deprecations.html#node-construction-changed-to-node-from-parent" - " for more details." + r"Direct construction of _pytest\.nodes\.Node has been deprecated, please " + r"use _pytest\.nodes\.Node\.from_parent.\nSee " + r"https://docs\.pytest\.org/en/stable/deprecations\.html#node-construction-changed-to-node-from-parent" + r" for more details\." ), ): nodes.Node(None, session=None) # type: ignore[arg-type] diff --git a/testing/test_parseopt.py b/testing/test_parseopt.py index 36db7b13989..30370d3d673 100644 --- a/testing/test_parseopt.py +++ b/testing/test_parseopt.py @@ -28,9 +28,10 @@ def test_no_help_by_default(self) -> None: def test_custom_prog(self, parser: parseopt.Parser) -> None: """Custom prog can be set for `argparse.ArgumentParser`.""" - assert parser._getparser().prog == argparse.ArgumentParser().prog + assert parser.optparser.prog == argparse.ArgumentParser().prog parser.prog = "custom-prog" - assert parser._getparser().prog == "custom-prog" + assert parser.prog == "custom-prog" + assert parser.optparser.prog == "custom-prog" def test_argument(self) -> None: with pytest.raises(parseopt.ArgumentError): @@ -71,14 +72,12 @@ def test_argument_processopt(self) -> None: assert res["dest"] == "abc" def test_group_add_and_get(self, parser: parseopt.Parser) -> None: - group = parser.getgroup("hello", description="desc") + group = parser.getgroup("hello") assert group.name == "hello" - assert group.description == "desc" def test_getgroup_simple(self, parser: parseopt.Parser) -> None: - group = parser.getgroup("hello", description="desc") + group = parser.getgroup("hello") assert group.name == "hello" - assert group.description == "desc" group2 = parser.getgroup("hello") assert group2 is group @@ -88,16 +87,20 @@ def test_group_ordering(self, parser: parseopt.Parser) -> None: parser.getgroup("3", after="1") groups = parser._groups groups_names = [x.name for x in groups] - assert groups_names == list("132") + assert groups_names == ["_anonymous", "1", "3", "2"] def test_group_addoption(self) -> None: - group = parseopt.OptionGroup("hello", _ispytest=True) + optparser = argparse.ArgumentParser() + arggroup = optparser.add_argument_group("hello") + group = parseopt.OptionGroup(arggroup, "hello", None, _ispytest=True) group.addoption("--option1", action="store_true") assert len(group.options) == 1 assert isinstance(group.options[0], parseopt.Argument) def test_group_addoption_conflict(self) -> None: - group = parseopt.OptionGroup("hello again", _ispytest=True) + optparser = argparse.ArgumentParser() + arggroup = optparser.add_argument_group("hello again") + group = parseopt.OptionGroup(arggroup, "hello again", None, _ispytest=True) group.addoption("--option1", "--option-1", action="store_true") with pytest.raises(ValueError) as err: group.addoption("--option1", "--option-one", action="store_true") @@ -142,35 +145,32 @@ def test_parse_known_args(self, parser: parseopt.Parser) -> None: parser.parse_known_args([Path(".")]) parser.addoption("--hello", action="store_true") ns = parser.parse_known_args(["x", "--y", "--hello", "this"]) - assert ns.hello - assert ns.file_or_dir == ["x"] + assert ns.hello is True + assert ns.file_or_dir == ["x", "this"] def test_parse_known_and_unknown_args(self, parser: parseopt.Parser) -> None: parser.addoption("--hello", action="store_true") ns, unknown = parser.parse_known_and_unknown_args( ["x", "--y", "--hello", "this"] ) - assert ns.hello - assert ns.file_or_dir == ["x"] - assert unknown == ["--y", "this"] + assert ns.hello is True + assert ns.file_or_dir == ["x", "this"] + assert unknown == ["--y"] def test_parse_will_set_default(self, parser: parseopt.Parser) -> None: parser.addoption("--hello", dest="hello", default="x", action="store") option = parser.parse([]) assert option.hello == "x" - del option.hello - parser.parse_setoption([], option) - assert option.hello == "x" - def test_parse_setoption(self, parser: parseopt.Parser) -> None: + def test_parse_set_options(self, parser: parseopt.Parser) -> None: parser.addoption("--hello", dest="hello", action="store") parser.addoption("--world", dest="world", default=42) option = argparse.Namespace() - args = parser.parse_setoption(["--hello", "world"], option) + parser.parse(["--hello", "world"], option) assert option.hello == "world" assert option.world == 42 - assert not args + assert getattr(option, parseopt.FILE_OR_DIR) == [] def test_parse_special_destination(self, parser: parseopt.Parser) -> None: parser.addoption("--ultimate-answer", type=int) diff --git a/testing/test_pathlib.py b/testing/test_pathlib.py index 65a4117812f..0880c355557 100644 --- a/testing/test_pathlib.py +++ b/testing/test_pathlib.py @@ -1624,7 +1624,7 @@ def find_spec( return None # Setup directories without configuring sys.path. - models_py, algorithms_py = self.setup_directories( + models_py, _algorithms_py = self.setup_directories( tmp_path, monkeypatch=None, pytester=pytester ) com_root_1 = tmp_path / "src/dist1/com" diff --git a/testing/test_pluginmanager.py b/testing/test_pluginmanager.py index db85124bf0d..24700c07c80 100644 --- a/testing/test_pluginmanager.py +++ b/testing/test_pluginmanager.py @@ -436,7 +436,7 @@ def test_preparse_args(self, pytestpm: PytestPluginManager) -> None: # Handles -p without following arg (when used without argparse). pytestpm.consider_preparse(["-p"]) - with pytest.raises(UsageError, match="^plugin main cannot be disabled$"): + with pytest.raises(UsageError, match=r"^plugin main cannot be disabled$"): pytestpm.consider_preparse(["-p", "no:main"]) def test_plugin_prevent_register(self, pytestpm: PytestPluginManager) -> None: diff --git a/testing/test_pytester.py b/testing/test_pytester.py index 721e8c19d8b..5e2e22f111b 100644 --- a/testing/test_pytester.py +++ b/testing/test_pytester.py @@ -799,7 +799,7 @@ def test_pytester_makefile_dot_prefixes_extension_with_warning( ) -> None: with pytest.raises( ValueError, - match="pytester.makefile expects a file extension, try .foo.bar instead of foo.bar", + match=r"pytester\.makefile expects a file extension, try \.foo\.bar instead of foo\.bar", ): pytester.makefile("foo.bar", "") @@ -834,3 +834,25 @@ def test_two(): result.assert_outcomes(passed=1, deselected=1) # If deselected is not passed, it is not checked at all. result.assert_outcomes(passed=1) + + +def test_pytester_subprocess_with_string_plugins(pytester: Pytester) -> None: + """Test that pytester.runpytest_subprocess is OK with named (string) + `.plugins`.""" + pytester.plugins = ["pytester"] + + result = pytester.runpytest_subprocess() + assert result.ret == ExitCode.NO_TESTS_COLLECTED + + +def test_pytester_subprocess_with_non_string_plugins(pytester: Pytester) -> None: + """Test that pytester.runpytest_subprocess fails with a proper error given + non-string `.plugins`.""" + + class MyPlugin: + pass + + pytester.plugins = [MyPlugin()] + + with pytest.raises(ValueError, match="plugins as objects is not supported"): + pytester.runpytest_subprocess() diff --git a/testing/test_python_path.py b/testing/test_python_path.py index d12ef96115f..f75bcb6bb57 100644 --- a/testing/test_python_path.py +++ b/testing/test_python_path.py @@ -92,8 +92,8 @@ def test_module_not_found(pytester: Pytester, file_structure) -> None: result.stdout.fnmatch_lines([expected_error]) -def test_no_ini(pytester: Pytester, file_structure) -> None: - """If no ini file, test should error.""" +def test_no_config_file(pytester: Pytester, file_structure) -> None: + """If no configuration file, test should error.""" result = pytester.runpytest("test_foo.py") assert result.ret == pytest.ExitCode.INTERRUPTED result.assert_outcomes(errors=1) diff --git a/testing/test_reports.py b/testing/test_reports.py index 7a893981838..b81371587d9 100644 --- a/testing/test_reports.py +++ b/testing/test_reports.py @@ -101,8 +101,7 @@ def test_repr_entry(): rep_entries = rep.longrepr.reprtraceback.reprentries a_entries = a.longrepr.reprtraceback.reprentries - assert len(rep_entries) == len(a_entries) # python < 3.10 zip(strict=True) - for a_entry, rep_entry in zip(a_entries, rep_entries): + for a_entry, rep_entry in zip(a_entries, rep_entries, strict=True): assert isinstance(rep_entry, ReprEntry) assert rep_entry.reprfileloc is not None assert rep_entry.reprfuncargs is not None @@ -146,8 +145,7 @@ def test_repr_entry_native(): rep_entries = rep.longrepr.reprtraceback.reprentries a_entries = a.longrepr.reprtraceback.reprentries - assert len(rep_entries) == len(a_entries) # python < 3.10 zip(strict=True) - for rep_entry, a_entry in zip(rep_entries, a_entries): + for rep_entry, a_entry in zip(rep_entries, a_entries, strict=True): assert isinstance(rep_entry, ReprEntryNative) assert rep_entry.lines == a_entry.lines @@ -319,8 +317,8 @@ def check_longrepr(longrepr: ExceptionChainRepr) -> None: assert longrepr.sections == [("title", "contents", "=")] assert len(longrepr.chain) == 2 entry1, entry2 = longrepr.chain - tb1, fileloc1, desc1 = entry1 - tb2, fileloc2, desc2 = entry2 + tb1, _fileloc1, desc1 = entry1 + tb2, _fileloc2, desc2 = entry2 assert "ValueError('value error')" in str(tb1) assert "RuntimeError('runtime error')" in str(tb2) @@ -377,8 +375,8 @@ def check_longrepr(longrepr: object) -> None: assert isinstance(longrepr, ExceptionChainRepr) assert len(longrepr.chain) == 2 entry1, entry2 = longrepr.chain - tb1, fileloc1, desc1 = entry1 - tb2, fileloc2, desc2 = entry2 + tb1, fileloc1, _desc1 = entry1 + tb2, fileloc2, _desc2 = entry2 assert "RemoteTraceback" in str(tb1) assert "ValueError: value error" in str(tb2) @@ -436,6 +434,83 @@ def test_1(fixture_): timing.sleep(10) loaded_report = TestReport._from_json(data) assert loaded_report.stop - loaded_report.start == approx(report.duration) + @pytest.mark.parametrize( + "first_skip_reason, second_skip_reason, skip_reason_output", + [("A", "B", "(A; B)"), ("A", "A", "(A)")], + ) + def test_exception_group_with_only_skips( + self, + pytester: Pytester, + first_skip_reason: str, + second_skip_reason: str, + skip_reason_output: str, + ): + """ + Test that when an ExceptionGroup with only Skipped exceptions is raised in teardown, + it is reported as a single skipped test, not as an error. + This is a regression test for issue #13537. + """ + pytester.makepyfile( + test_it=f""" + import pytest + @pytest.fixture + def fixA(): + yield + pytest.skip(reason="{first_skip_reason}") + @pytest.fixture + def fixB(): + yield + pytest.skip(reason="{second_skip_reason}") + def test_skip(fixA, fixB): + assert True + """ + ) + result = pytester.runpytest("-v") + result.assert_outcomes(passed=1, skipped=1) + out = result.stdout.str() + assert skip_reason_output in out + assert "ERROR at teardown" not in out + + @pytest.mark.parametrize( + "use_item_location, skip_file_location", + [(True, "test_it.py"), (False, "runner.py")], + ) + def test_exception_group_skips_use_item_location( + self, pytester: Pytester, use_item_location: bool, skip_file_location: str + ): + """ + Regression for #13537: + If any skip inside an ExceptionGroup has _use_item_location=True, + the report location should point to the test item, not the fixture teardown. + """ + pytester.makepyfile( + test_it=f""" + import pytest + @pytest.fixture + def fix_item1(): + yield + exc = pytest.skip.Exception("A") + exc._use_item_location = True + raise exc + @pytest.fixture + def fix_item2(): + yield + exc = pytest.skip.Exception("B") + exc._use_item_location = {use_item_location} + raise exc + def test_both(fix_item1, fix_item2): + assert True + """ + ) + result = pytester.runpytest("-rs") + result.assert_outcomes(passed=1, skipped=1) + + out = result.stdout.str() + # Both reasons should appear + assert "A" and "B" in out + # Crucially, the skip should be attributed to the test item, not teardown + assert skip_file_location in out + class TestHooks: """Test that the hooks are working correctly for plugins""" diff --git a/testing/test_session.py b/testing/test_session.py index ba904916033..e3db9a1b690 100644 --- a/testing/test_session.py +++ b/testing/test_session.py @@ -66,7 +66,7 @@ def test_raises_doesnt(): pytest.raises(ValueError, int, "3") """ ) - passed, skipped, failed = reprec.listoutcomes() + _passed, _skipped, failed = reprec.listoutcomes() assert len(failed) == 1 out = failed[0].longrepr.reprcrash.message # type: ignore[union-attr] assert "DID NOT RAISE" in out diff --git a/testing/test_skipping.py b/testing/test_skipping.py index 9a6c2c4b6aa..e1e25e45468 100644 --- a/testing/test_skipping.py +++ b/testing/test_skipping.py @@ -1,7 +1,6 @@ # mypy: allow-untyped-defs from __future__ import annotations -import sys import textwrap from _pytest.pytester import Pytester @@ -685,13 +684,14 @@ def test_foo(): assert result.ret == 0 @pytest.mark.parametrize("strict_val", ["true", "false"]) + @pytest.mark.parametrize("option_name", ["strict_xfail", "strict"]) def test_strict_xfail_default_from_file( - self, pytester: Pytester, strict_val + self, pytester: Pytester, strict_val: str, option_name: str ) -> None: pytester.makeini( f""" [pytest] - xfail_strict = {strict_val} + {option_name} = {strict_val} """ ) p = pytester.makepyfile( @@ -1136,22 +1136,13 @@ def test_func(): """ ) result = pytester.runpytest() - markline = " ^" - pypy_version_info = getattr(sys, "pypy_version_info", None) - if pypy_version_info is not None: - markline = markline[7:] - - if sys.version_info >= (3, 10): - expected = [ - "*ERROR*test_nameerror*", - "*asd*", - "", - "During handling of the above exception, another exception occurred:", - ] - else: - expected = [ - "*ERROR*test_nameerror*", - ] + + expected = [ + "*ERROR*test_nameerror*", + "*asd*", + "", + "During handling of the above exception, another exception occurred:", + ] expected += [ "*evaluating*skipif*condition*", @@ -1159,7 +1150,7 @@ def test_func(): "*ERROR*test_syntax*", "*evaluating*xfail*condition*", " syntax error", - markline, + " ^", "SyntaxError: invalid syntax", "*1 pass*2 errors*", ] @@ -1188,7 +1179,7 @@ def test_default_markers(pytester: Pytester) -> None: result.stdout.fnmatch_lines( [ "*skipif(condition, ..., [*], reason=...)*skip*", - "*xfail(condition, ..., [*], reason=..., run=True, raises=None, strict=xfail_strict)*expected failure*", + "*xfail(condition, ..., [*], reason=..., run=True, raises=None, strict=strict_xfail)*expected failure*", ] ) @@ -1314,7 +1305,7 @@ def pytest_collect_file(file_path, parent): """ ) result = pytester.inline_run() - passed, skipped, failed = result.listoutcomes() + _passed, skipped, failed = result.listoutcomes() assert not failed xfailed = [r for r in skipped if hasattr(r, "wasxfail")] assert xfailed @@ -1388,7 +1379,7 @@ def pytest_collect_file(file_path, parent): """ ) result = pytester.inline_run() - passed, skipped, failed = result.listoutcomes() + _passed, skipped, failed = result.listoutcomes() assert not failed xfailed = [r for r in skipped if hasattr(r, "wasxfail")] assert xfailed @@ -1416,7 +1407,7 @@ def test_fail(): def test_importorskip() -> None: with pytest.raises( pytest.skip.Exception, - match="^could not import 'doesnotexist': No module named .*", + match=r"^could not import 'doesnotexist': No module named .*", ): pytest.importorskip("doesnotexist") diff --git a/testing/test_subtests.py b/testing/test_subtests.py new file mode 100644 index 00000000000..06de9f009d8 --- /dev/null +++ b/testing/test_subtests.py @@ -0,0 +1,1034 @@ +from __future__ import annotations + +import sys +from typing import Literal + +from _pytest.subtests import SubtestContext +from _pytest.subtests import SubtestReport +import pytest + + +IS_PY311 = sys.version_info[:2] >= (3, 11) + + +def test_failures(pytester: pytest.Pytester, monkeypatch: pytest.MonkeyPatch) -> None: + monkeypatch.setenv("COLUMNS", "120") + pytester.makepyfile( + """ + def test_foo(subtests): + with subtests.test("foo subtest"): + assert False, "foo subtest failure" + + def test_bar(subtests): + with subtests.test("bar subtest"): + assert False, "bar subtest failure" + assert False, "test_bar also failed" + + def test_zaz(subtests): + with subtests.test("zaz subtest"): + pass + """ + ) + summary_lines = [ + "*=== FAILURES ===*", + # + "*___ test_foo [[]foo subtest[]] ___*", + "*AssertionError: foo subtest failure", + # + "*___ test_foo ___*", + "contains 1 failed subtest", + # + "*___ test_bar [[]bar subtest[]] ___*", + "*AssertionError: bar subtest failure", + # + "*___ test_bar ___*", + "*AssertionError: test_bar also failed", + # + "*=== short test summary info ===*", + "SUBFAILED[[]foo subtest[]] test_*.py::test_foo - AssertionError*", + "FAILED test_*.py::test_foo - contains 1 failed subtest", + "SUBFAILED[[]bar subtest[]] test_*.py::test_bar - AssertionError*", + "FAILED test_*.py::test_bar - AssertionError*", + ] + result = pytester.runpytest() + result.stdout.fnmatch_lines( + [ + "test_*.py uFuF. * [[]100%[]]", + *summary_lines, + "* 4 failed, 1 passed in *", + ] + ) + + result = pytester.runpytest("-v") + result.stdout.fnmatch_lines( + [ + "test_*.py::test_foo SUBFAILED[[]foo subtest[]] * [[] 33%[]]", + "test_*.py::test_foo FAILED * [[] 33%[]]", + "test_*.py::test_bar SUBFAILED[[]bar subtest[]] * [[] 66%[]]", + "test_*.py::test_bar FAILED * [[] 66%[]]", + "test_*.py::test_zaz SUBPASSED[[]zaz subtest[]] * [[]100%[]]", + "test_*.py::test_zaz PASSED * [[]100%[]]", + *summary_lines, + "* 4 failed, 1 passed, 1 subtests passed in *", + ] + ) + pytester.makeini( + """ + [pytest] + verbosity_subtests = 0 + """ + ) + result = pytester.runpytest("-v") + result.stdout.fnmatch_lines( + [ + "test_*.py::test_foo SUBFAILED[[]foo subtest[]] * [[] 33%[]]", + "test_*.py::test_foo FAILED * [[] 33%[]]", + "test_*.py::test_bar SUBFAILED[[]bar subtest[]] * [[] 66%[]]", + "test_*.py::test_bar FAILED * [[] 66%[]]", + "test_*.py::test_zaz PASSED * [[]100%[]]", + *summary_lines, + "* 4 failed, 1 passed in *", + ] + ) + result.stdout.no_fnmatch_line("test_*.py::test_zaz SUBPASSED[[]zaz subtest[]]*") + + +def test_passes(pytester: pytest.Pytester, monkeypatch: pytest.MonkeyPatch) -> None: + monkeypatch.setenv("COLUMNS", "120") + pytester.makepyfile( + """ + def test_foo(subtests): + with subtests.test("foo subtest"): + pass + + def test_bar(subtests): + with subtests.test("bar subtest"): + pass + """ + ) + result = pytester.runpytest() + result.stdout.fnmatch_lines( + [ + "test_*.py .. * [[]100%[]]", + "* 2 passed in *", + ] + ) + + result = pytester.runpytest("-v") + result.stdout.fnmatch_lines( + [ + "*.py::test_foo SUBPASSED[[]foo subtest[]] * [[] 50%[]]", + "*.py::test_foo PASSED * [[] 50%[]]", + "*.py::test_bar SUBPASSED[[]bar subtest[]] * [[]100%[]]", + "*.py::test_bar PASSED * [[]100%[]]", + "* 2 passed, 2 subtests passed in *", + ] + ) + + pytester.makeini( + """ + [pytest] + verbosity_subtests = 0 + """ + ) + result = pytester.runpytest("-v") + result.stdout.fnmatch_lines( + [ + "*.py::test_foo PASSED * [[] 50%[]]", + "*.py::test_bar PASSED * [[]100%[]]", + "* 2 passed in *", + ] + ) + result.stdout.no_fnmatch_line("*.py::test_foo SUBPASSED[[]foo subtest[]]*") + result.stdout.no_fnmatch_line("*.py::test_bar SUBPASSED[[]bar subtest[]]*") + + +def test_skip(pytester: pytest.Pytester, monkeypatch: pytest.MonkeyPatch) -> None: + monkeypatch.setenv("COLUMNS", "120") + pytester.makepyfile( + """ + import pytest + def test_foo(subtests): + with subtests.test("foo subtest"): + pytest.skip("skip foo subtest") + + def test_bar(subtests): + with subtests.test("bar subtest"): + pytest.skip("skip bar subtest") + pytest.skip("skip test_bar") + """ + ) + result = pytester.runpytest("-ra") + result.stdout.fnmatch_lines( + [ + "test_*.py .s * [[]100%[]]", + "*=== short test summary info ===*", + "SKIPPED [[]1[]] test_skip.py:9: skip test_bar", + "* 1 passed, 1 skipped in *", + ] + ) + + result = pytester.runpytest("-v", "-ra") + result.stdout.fnmatch_lines( + [ + "*.py::test_foo SUBSKIPPED[[]foo subtest[]] (skip foo subtest) * [[] 50%[]]", + "*.py::test_foo PASSED * [[] 50%[]]", + "*.py::test_bar SUBSKIPPED[[]bar subtest[]] (skip bar subtest) * [[]100%[]]", + "*.py::test_bar SKIPPED (skip test_bar) * [[]100%[]]", + "*=== short test summary info ===*", + "SUBSKIPPED[[]foo subtest[]] [[]1[]] *.py:*: skip foo subtest", + "SUBSKIPPED[[]foo subtest[]] [[]1[]] *.py:*: skip bar subtest", + "SUBSKIPPED[[]foo subtest[]] [[]1[]] *.py:*: skip test_bar", + "* 1 passed, 3 skipped in *", + ] + ) + + pytester.makeini( + """ + [pytest] + verbosity_subtests = 0 + """ + ) + result = pytester.runpytest("-v", "-ra") + result.stdout.fnmatch_lines( + [ + "*.py::test_foo PASSED * [[] 50%[]]", + "*.py::test_bar SKIPPED (skip test_bar) * [[]100%[]]", + "*=== short test summary info ===*", + "* 1 passed, 1 skipped in *", + ] + ) + result.stdout.no_fnmatch_line("*.py::test_foo SUBPASSED[[]foo subtest[]]*") + result.stdout.no_fnmatch_line("*.py::test_bar SUBPASSED[[]bar subtest[]]*") + result.stdout.no_fnmatch_line( + "SUBSKIPPED[[]foo subtest[]] [[]1[]] *.py:*: skip foo subtest" + ) + result.stdout.no_fnmatch_line( + "SUBSKIPPED[[]foo subtest[]] [[]1[]] *.py:*: skip test_bar" + ) + + +def test_xfail(pytester: pytest.Pytester, monkeypatch: pytest.MonkeyPatch) -> None: + monkeypatch.setenv("COLUMNS", "120") + pytester.makepyfile( + """ + import pytest + def test_foo(subtests): + with subtests.test("foo subtest"): + pytest.xfail("xfail foo subtest") + + def test_bar(subtests): + with subtests.test("bar subtest"): + pytest.xfail("xfail bar subtest") + pytest.xfail("xfail test_bar") + """ + ) + result = pytester.runpytest("-ra") + result.stdout.fnmatch_lines( + [ + "test_*.py .x * [[]100%[]]", + "*=== short test summary info ===*", + "* 1 passed, 1 xfailed in *", + ] + ) + + result = pytester.runpytest("-v", "-ra") + result.stdout.fnmatch_lines( + [ + "*.py::test_foo SUBXFAIL[[]foo subtest[]] (xfail foo subtest) * [[] 50%[]]", + "*.py::test_foo PASSED * [[] 50%[]]", + "*.py::test_bar SUBXFAIL[[]bar subtest[]] (xfail bar subtest) * [[]100%[]]", + "*.py::test_bar XFAIL (xfail test_bar) * [[]100%[]]", + "*=== short test summary info ===*", + "SUBXFAIL[[]foo subtest[]] *.py::test_foo - xfail foo subtest", + "SUBXFAIL[[]bar subtest[]] *.py::test_bar - xfail bar subtest", + "XFAIL *.py::test_bar - xfail test_bar", + "* 1 passed, 3 xfailed in *", + ] + ) + + pytester.makeini( + """ + [pytest] + verbosity_subtests = 0 + """ + ) + result = pytester.runpytest("-v", "-ra") + result.stdout.fnmatch_lines( + [ + "*.py::test_foo PASSED * [[] 50%[]]", + "*.py::test_bar XFAIL (xfail test_bar) * [[]100%[]]", + "*=== short test summary info ===*", + "* 1 passed, 1 xfailed in *", + ] + ) + result.stdout.no_fnmatch_line( + "SUBXFAIL[[]foo subtest[]] *.py::test_foo - xfail foo subtest" + ) + result.stdout.no_fnmatch_line( + "SUBXFAIL[[]bar subtest[]] *.py::test_bar - xfail bar subtest" + ) + + +def test_typing_exported(pytester: pytest.Pytester) -> None: + pytester.makepyfile( + """ + from pytest import Subtests + + def test_typing_exported(subtests: Subtests) -> None: + assert isinstance(subtests, Subtests) + """ + ) + result = pytester.runpytest() + result.stdout.fnmatch_lines(["*1 passed*"]) + + +def test_subtests_and_parametrization( + pytester: pytest.Pytester, monkeypatch: pytest.MonkeyPatch +) -> None: + monkeypatch.setenv("COLUMNS", "120") + pytester.makepyfile( + """ + import pytest + + @pytest.mark.parametrize("x", [0, 1]) + def test_foo(subtests, x): + for i in range(3): + with subtests.test("custom", i=i): + assert i % 2 == 0 + assert x == 0 + """ + ) + result = pytester.runpytest("-v") + result.stdout.fnmatch_lines( + [ + "*.py::test_foo[[]0[]] SUBFAILED[[]custom[]] (i=1) *[[] 50%[]]", + "*.py::test_foo[[]0[]] FAILED *[[] 50%[]]", + "*.py::test_foo[[]1[]] SUBFAILED[[]custom[]] (i=1) *[[]100%[]]", + "*.py::test_foo[[]1[]] FAILED *[[]100%[]]", + "contains 1 failed subtest", + "* 4 failed, 4 subtests passed in *", + ] + ) + + pytester.makeini( + """ + [pytest] + verbosity_subtests = 0 + """ + ) + result = pytester.runpytest("-v") + result.stdout.fnmatch_lines( + [ + "*.py::test_foo[[]0[]] SUBFAILED[[]custom[]] (i=1) *[[] 50%[]]", + "*.py::test_foo[[]0[]] FAILED *[[] 50%[]]", + "*.py::test_foo[[]1[]] SUBFAILED[[]custom[]] (i=1) *[[]100%[]]", + "*.py::test_foo[[]1[]] FAILED *[[]100%[]]", + "contains 1 failed subtest", + "* 4 failed in *", + ] + ) + + +def test_subtests_fail_top_level_test(pytester: pytest.Pytester) -> None: + pytester.makepyfile( + """ + import pytest + + def test_foo(subtests): + for i in range(3): + with subtests.test("custom", i=i): + assert i % 2 == 0 + """ + ) + result = pytester.runpytest("-v") + result.stdout.fnmatch_lines( + [ + "* 2 failed, 2 subtests passed in *", + ] + ) + + +def test_subtests_do_not_overwrite_top_level_failure(pytester: pytest.Pytester) -> None: + pytester.makepyfile( + """ + import pytest + + def test_foo(subtests): + for i in range(3): + with subtests.test("custom", i=i): + assert i % 2 == 0 + assert False, "top-level failure" + """ + ) + result = pytester.runpytest("-v") + result.stdout.fnmatch_lines( + [ + "*AssertionError: top-level failure", + "* 2 failed, 2 subtests passed in *", + ] + ) + + +def test_msg_not_a_string( + pytester: pytest.Pytester, monkeypatch: pytest.MonkeyPatch +) -> None: + """ + Using a non-string in subtests.test() should still show it in the terminal (#14195). + + Note: this was not a problem originally with the subtests fixture, only with TestCase.subTest; this test + was added for symmetry. + """ + monkeypatch.setenv("COLUMNS", "120") + pytester.makepyfile( + """ + def test_int_msg(subtests): + with subtests.test(42): + assert False, "subtest failure" + + def test_no_msg(subtests): + with subtests.test(): + assert False, "subtest failure" + """ + ) + result = pytester.runpytest() + result.stdout.fnmatch_lines( + [ + "SUBFAILED[[]42[]] test_msg_not_a_string.py::test_int_msg - AssertionError: subtest failure", + "SUBFAILED() test_msg_not_a_string.py::test_no_msg - AssertionError: subtest failure", + ] + ) + + +@pytest.mark.parametrize("flag", ["--last-failed", "--stepwise"]) +def test_subtests_last_failed_step_wise(pytester: pytest.Pytester, flag: str) -> None: + """Check that --last-failed and --step-wise correctly rerun tests with failed subtests.""" + pytester.makepyfile( + """ + import pytest + + def test_foo(subtests): + for i in range(3): + with subtests.test("custom", i=i): + assert i % 2 == 0 + """ + ) + result = pytester.runpytest("-v") + result.stdout.fnmatch_lines( + [ + "* 2 failed, 2 subtests passed in *", + ] + ) + + result = pytester.runpytest("-v", flag) + result.stdout.fnmatch_lines( + [ + "* 2 failed, 2 subtests passed in *", + ] + ) + + +class TestUnittestSubTest: + """Test unittest.TestCase.subTest functionality.""" + + def test_failures( + self, pytester: pytest.Pytester, monkeypatch: pytest.MonkeyPatch + ) -> None: + monkeypatch.setenv("COLUMNS", "120") + pytester.makepyfile( + """ + from unittest import TestCase + + class T(TestCase): + def test_foo(self): + with self.subTest("foo subtest"): + assert False, "foo subtest failure" + + def test_bar(self): + with self.subTest("bar subtest"): + assert False, "bar subtest failure" + assert False, "test_bar also failed" + + def test_zaz(self): + with self.subTest("zaz subtest"): + pass + """ + ) + result = pytester.runpytest() + result.stdout.fnmatch_lines( + [ + "* 3 failed, 2 passed in *", + ] + ) + + def test_passes( + self, pytester: pytest.Pytester, monkeypatch: pytest.MonkeyPatch + ) -> None: + monkeypatch.setenv("COLUMNS", "120") + pytester.makepyfile( + """ + from unittest import TestCase + + class T(TestCase): + def test_foo(self): + with self.subTest("foo subtest"): + pass + + def test_bar(self): + with self.subTest("bar subtest"): + pass + + def test_zaz(self): + with self.subTest("zaz subtest"): + pass + """ + ) + result = pytester.runpytest() + result.stdout.fnmatch_lines( + [ + "* 3 passed in *", + ] + ) + + def test_skip( + self, + pytester: pytest.Pytester, + ) -> None: + pytester.makepyfile( + """ + from unittest import TestCase, main + + class T(TestCase): + + def test_foo(self): + for i in range(5): + with self.subTest(msg="custom", i=i): + if i % 2 == 0: + self.skipTest('even number') + """ + ) + # This output might change #13756. + result = pytester.runpytest() + result.stdout.fnmatch_lines(["* 1 passed in *"]) + + def test_non_subtest_skip( + self, pytester: pytest.Pytester, monkeypatch: pytest.MonkeyPatch + ) -> None: + monkeypatch.setenv("COLUMNS", "120") + pytester.makepyfile( + """ + from unittest import TestCase, main + + class T(TestCase): + + def test_foo(self): + with self.subTest(msg="subtest"): + assert False, "failed subtest" + self.skipTest('non-subtest skip') + """ + ) + # This output might change #13756. + result = pytester.runpytest() + result.stdout.fnmatch_lines( + [ + "SUBFAILED[[]subtest[]] test_non_subtest_skip.py::T::test_foo*", + "* 1 failed, 1 skipped in *", + ] + ) + + def test_xfail( + self, + pytester: pytest.Pytester, + ) -> None: + pytester.makepyfile( + """ + import pytest + from unittest import expectedFailure, TestCase + + class T(TestCase): + @expectedFailure + def test_foo(self): + for i in range(5): + with self.subTest(msg="custom", i=i): + if i % 2 == 0: + raise pytest.xfail('even number') + + if __name__ == '__main__': + main() + """ + ) + # This output might change #13756. + result = pytester.runpytest() + result.stdout.fnmatch_lines(["* 1 xfailed in *"]) + + def test_only_original_skip_is_called( + self, + pytester: pytest.Pytester, + monkeypatch: pytest.MonkeyPatch, + ) -> None: + """Regression test for pytest-dev/pytest-subtests#173.""" + monkeypatch.setenv("COLUMNS", "120") + pytester.makepyfile( + """ + import unittest + from unittest import TestCase + + @unittest.skip("skip this test") + class T(unittest.TestCase): + def test_foo(self): + assert 1 == 2 + """ + ) + result = pytester.runpytest("-v", "-rsf") + result.stdout.fnmatch_lines( + ["SKIPPED [1] test_only_original_skip_is_called.py:6: skip this test"] + ) + + def test_skip_with_failure( + self, + pytester: pytest.Pytester, + monkeypatch: pytest.MonkeyPatch, + ) -> None: + monkeypatch.setenv("COLUMNS", "120") + pytester.makepyfile( + """ + import pytest + from unittest import TestCase + + class T(TestCase): + def test_foo(self): + with self.subTest("subtest 1"): + self.skipTest(f"skip subtest 1") + with self.subTest("subtest 2"): + assert False, "fail subtest 2" + """ + ) + + result = pytester.runpytest("-ra") + result.stdout.fnmatch_lines( + [ + "*.py u. * [[]100%[]]", + "*=== short test summary info ===*", + "SUBFAILED[[]subtest 2[]] *.py::T::test_foo - AssertionError: fail subtest 2", + "* 1 failed, 1 passed in *", + ] + ) + + result = pytester.runpytest("-v", "-ra") + result.stdout.fnmatch_lines( + [ + "*.py::T::test_foo SUBSKIPPED[[]subtest 1[]] (skip subtest 1) * [[]100%[]]", + "*.py::T::test_foo SUBFAILED[[]subtest 2[]] * [[]100%[]]", + "*.py::T::test_foo PASSED * [[]100%[]]", + "SUBSKIPPED[[]subtest 1[]] [[]1[]] *.py:*: skip subtest 1", + "SUBFAILED[[]subtest 2[]] *.py::T::test_foo - AssertionError: fail subtest 2", + "* 1 failed, 1 passed, 1 skipped in *", + ] + ) + + pytester.makeini( + """ + [pytest] + verbosity_subtests = 0 + """ + ) + result = pytester.runpytest("-v", "-ra") + result.stdout.fnmatch_lines( + [ + "*.py::T::test_foo SUBFAILED[[]subtest 2[]] * [[]100%[]]", + "*.py::T::test_foo PASSED * [[]100%[]]", + "*=== short test summary info ===*", + r"SUBFAILED[[]subtest 2[]] *.py::T::test_foo - AssertionError: fail subtest 2", + r"* 1 failed, 1 passed in *", + ] + ) + result.stdout.no_fnmatch_line( + "*.py::T::test_foo SUBSKIPPED[[]subtest 1[]] (skip subtest 1) * [[]100%[]]" + ) + result.stdout.no_fnmatch_line( + "SUBSKIPPED[[]subtest 1[]] [[]1[]] *.py:*: skip subtest 1" + ) + + def test_msg_not_a_string( + self, pytester: pytest.Pytester, monkeypatch: pytest.MonkeyPatch + ) -> None: + """Using a non-string in TestCase.subTest should still show it in the terminal (#14195).""" + monkeypatch.setenv("COLUMNS", "120") + pytester.makepyfile( + """ + from unittest import TestCase + + class T(TestCase): + def test_int_msg(self): + with self.subTest(42): + assert False, "subtest failure" + + def test_no_msg(self): + with self.subTest(): + assert False, "subtest failure" + """ + ) + result = pytester.runpytest() + result.stdout.fnmatch_lines( + [ + "SUBFAILED[[]42[]] test_msg_not_a_string.py::T::test_int_msg - AssertionError: subtest failure", + "SUBFAILED() test_msg_not_a_string.py::T::test_no_msg - AssertionError: subtest failure", + ] + ) + + +class TestCapture: + def create_file(self, pytester: pytest.Pytester) -> None: + pytester.makepyfile( + """ + import sys + def test(subtests): + print() + print('start test') + + with subtests.test(i='A'): + print("hello stdout A") + print("hello stderr A", file=sys.stderr) + assert 0 + + with subtests.test(i='B'): + print("hello stdout B") + print("hello stderr B", file=sys.stderr) + assert 0 + + print('end test') + assert 0 + """ + ) + + @pytest.mark.parametrize("mode", ["fd", "sys"]) + def test_capturing(self, pytester: pytest.Pytester, mode: str) -> None: + self.create_file(pytester) + result = pytester.runpytest(f"--capture={mode}") + result.stdout.fnmatch_lines( + [ + "*__ test (i='A') __*", + "*Captured stdout call*", + "hello stdout A", + "*Captured stderr call*", + "hello stderr A", + "*__ test (i='B') __*", + "*Captured stdout call*", + "hello stdout B", + "*Captured stderr call*", + "hello stderr B", + "*__ test __*", + "*Captured stdout call*", + "start test", + "end test", + ] + ) + + def test_no_capture(self, pytester: pytest.Pytester) -> None: + self.create_file(pytester) + result = pytester.runpytest("-s") + result.stdout.fnmatch_lines( + [ + "start test", + "hello stdout A", + "uhello stdout B", + "uend test", + "*__ test (i='A') __*", + "*__ test (i='B') __*", + "*__ test __*", + ] + ) + result.stderr.fnmatch_lines(["hello stderr A", "hello stderr B"]) + + @pytest.mark.parametrize("fixture", ["capsys", "capfd"]) + def test_capture_with_fixture( + self, pytester: pytest.Pytester, fixture: Literal["capsys", "capfd"] + ) -> None: + pytester.makepyfile( + rf""" + import sys + + def test(subtests, {fixture}): + print('start test') + + with subtests.test(i='A'): + print("hello stdout A") + print("hello stderr A", file=sys.stderr) + + out, err = {fixture}.readouterr() + assert out == 'start test\nhello stdout A\n' + assert err == 'hello stderr A\n' + """ + ) + result = pytester.runpytest() + result.stdout.fnmatch_lines( + [ + "*1 passed*", + ] + ) + + +class TestLogging: + def create_file(self, pytester: pytest.Pytester) -> None: + pytester.makepyfile( + """ + import logging + + def test_foo(subtests): + logging.info("before") + + with subtests.test("sub1"): + print("sub1 stdout") + logging.info("sub1 logging") + logging.debug("sub1 logging debug") + + with subtests.test("sub2"): + print("sub2 stdout") + logging.info("sub2 logging") + logging.debug("sub2 logging debug") + assert False + """ + ) + + def test_capturing_info(self, pytester: pytest.Pytester) -> None: + self.create_file(pytester) + result = pytester.runpytest("--log-level=INFO") + result.stdout.fnmatch_lines( + [ + "*___ test_foo [[]sub2[]] __*", + "*-- Captured stdout call --*", + "sub2 stdout", + "*-- Captured log call ---*", + "INFO * before", + "INFO * sub1 logging", + "INFO * sub2 logging", + "*== short test summary info ==*", + ] + ) + result.stdout.no_fnmatch_line("sub1 logging debug") + result.stdout.no_fnmatch_line("sub2 logging debug") + + def test_capturing_debug(self, pytester: pytest.Pytester) -> None: + self.create_file(pytester) + result = pytester.runpytest("--log-level=DEBUG") + result.stdout.fnmatch_lines( + [ + "*___ test_foo [[]sub2[]] __*", + "*-- Captured stdout call --*", + "sub2 stdout", + "*-- Captured log call ---*", + "INFO * before", + "INFO * sub1 logging", + "DEBUG * sub1 logging debug", + "INFO * sub2 logging", + "DEBUG * sub2 logging debug", + "*== short test summary info ==*", + ] + ) + + def test_caplog(self, pytester: pytest.Pytester) -> None: + pytester.makepyfile( + """ + import logging + + def test(subtests, caplog): + caplog.set_level(logging.INFO) + logging.info("start test") + + with subtests.test("sub1"): + logging.info("inside %s", "subtest1") + + assert len(caplog.records) == 2 + assert caplog.records[0].getMessage() == "start test" + assert caplog.records[1].getMessage() == "inside subtest1" + """ + ) + result = pytester.runpytest() + result.stdout.fnmatch_lines( + [ + "*1 passed*", + ] + ) + + def test_no_logging(self, pytester: pytest.Pytester) -> None: + pytester.makepyfile( + """ + import logging + + def test(subtests): + logging.info("start log line") + + with subtests.test("sub passing"): + logging.info("inside %s", "passing log line") + + with subtests.test("sub failing"): + logging.info("inside %s", "failing log line") + assert False + + logging.info("end log line") + """ + ) + result = pytester.runpytest("-p no:logging") + result.stdout.fnmatch_lines( + [ + "*2 failed in*", + ] + ) + result.stdout.no_fnmatch_line("*root:test_no_logging.py*log line*") + + +class TestDebugging: + """Check --pdb support for subtests fixture and TestCase.subTest.""" + + class _FakePdb: + """Fake debugger class implementation that tracks which methods were called on it.""" + + quitting: bool = False + calls: list[str] = [] + + def __init__(self, *_: object, **__: object) -> None: + self.calls.append("init") + + def reset(self) -> None: + self.calls.append("reset") + + def interaction(self, *_: object) -> None: + self.calls.append("interaction") + + @pytest.fixture(autouse=True) + def cleanup_calls(self) -> None: + self._FakePdb.calls.clear() + + def test_pdb_fixture( + self, pytester: pytest.Pytester, monkeypatch: pytest.MonkeyPatch + ) -> None: + pytester.makepyfile( + """ + def test(subtests): + with subtests.test(): + assert 0 + """ + ) + self.runpytest_and_check_pdb(pytester, monkeypatch) + + def test_pdb_unittest( + self, pytester: pytest.Pytester, monkeypatch: pytest.MonkeyPatch + ) -> None: + pytester.makepyfile( + """ + from unittest import TestCase + class Test(TestCase): + def test(self): + with self.subTest(): + assert 0 + """ + ) + self.runpytest_and_check_pdb(pytester, monkeypatch) + + def runpytest_and_check_pdb( + self, pytester: pytest.Pytester, monkeypatch: pytest.MonkeyPatch + ) -> None: + # Install the fake pdb implementation in _pytest.subtests so we can reference + # it in the command line (any module would do). + import _pytest.subtests + + monkeypatch.setattr( + _pytest.subtests, "_CustomPdb", self._FakePdb, raising=False + ) + result = pytester.runpytest("--pdb", "--pdbcls=_pytest.subtests:_CustomPdb") + + # Ensure pytest entered in debugging mode when encountering the failing + # assert. + result.stdout.fnmatch_lines("*entering PDB*") + assert self._FakePdb.calls == ["init", "reset", "interaction"] + + +def test_exitfirst(pytester: pytest.Pytester) -> None: + """Validate that when passing --exitfirst the test exits after the first failed subtest.""" + pytester.makepyfile( + """ + def test_foo(subtests): + with subtests.test("sub1"): + assert False + + with subtests.test("sub2"): + assert False + """ + ) + result = pytester.runpytest("--exitfirst") + assert result.parseoutcomes()["failed"] == 2 + result.stdout.fnmatch_lines( + [ + "SUBFAILED*[[]sub1[]] *.py::test_foo - assert False*", + "FAILED *.py::test_foo - assert False", + "* stopping after 2 failures*", + ], + consecutive=True, + ) + result.stdout.no_fnmatch_line("*sub2*") # sub2 not executed. + + +def test_do_not_swallow_pytest_exit(pytester: pytest.Pytester) -> None: + pytester.makepyfile( + """ + import pytest + def test(subtests): + with subtests.test(): + pytest.exit() + + def test2(): pass + """ + ) + result = pytester.runpytest_subprocess() + result.stdout.fnmatch_lines( + [ + "* _pytest.outcomes.Exit *", + "* 1 failed in *", + ] + ) + + +def test_nested(pytester: pytest.Pytester) -> None: + """ + Currently we do nothing special with nested subtests. + + This test only sediments how they work now, we might reconsider adding some kind of nesting support in the future. + """ + pytester.makepyfile( + """ + import pytest + def test(subtests): + with subtests.test("a"): + with subtests.test("b"): + assert False, "b failed" + assert False, "a failed" + """ + ) + result = pytester.runpytest_subprocess() + result.stdout.fnmatch_lines( + [ + "SUBFAILED[b] test_nested.py::test - AssertionError: b failed", + "SUBFAILED[a] test_nested.py::test - AssertionError: a failed", + "* 3 failed in *", + ] + ) + + +def test_serialization() -> None: + from _pytest.subtests import pytest_report_from_serializable + from _pytest.subtests import pytest_report_to_serializable + + report = SubtestReport( + "test_foo::test_foo", + ("test_foo.py", 12, ""), + keywords={}, + outcome="passed", + when="call", + longrepr=None, + context=SubtestContext(msg="custom message", kwargs=dict(i=10)), + ) + data = pytest_report_to_serializable(report) + assert data is not None + new_report = pytest_report_from_serializable(data) + assert new_report is not None + assert new_report.context == SubtestContext(msg="custom message", kwargs=dict(i=10)) diff --git a/testing/test_terminal.py b/testing/test_terminal.py index 86feb33b3ec..3053f5ef9a1 100644 --- a/testing/test_terminal.py +++ b/testing/test_terminal.py @@ -10,7 +10,9 @@ import textwrap from types import SimpleNamespace from typing import cast +from typing import Literal from typing import NamedTuple +from unittest import mock import pluggy @@ -30,6 +32,7 @@ from _pytest.terminal import _get_raw_skip_reason from _pytest.terminal import _plugin_nameversions from _pytest.terminal import getreportopt +from _pytest.terminal import TerminalProgressPlugin from _pytest.terminal import TerminalReporter import pytest @@ -112,6 +115,31 @@ def test_func(): [" def test_func():", "> assert 0", "E assert 0"] ) + def test_console_output_style_times_with_skipped_and_passed( + self, pytester: Pytester + ) -> None: + pytester.makepyfile( + test_repro=""" + def test_hello(): + pass + """, + test_repro_skip=""" + import pytest + pytest.importorskip("fakepackage_does_not_exist") + """, + ) + result = pytester.runpytest( + "test_repro.py", + "test_repro_skip.py", + "-o", + "console_output_style=times", + ) + + result.stdout.fnmatch_lines("* 1 passed, 1 skipped in *") + + combined = "\n".join(result.stdout.lines + result.stderr.lines) + assert "INTERNALERROR" not in combined + def test_internalerror(self, pytester: Pytester, linecomp) -> None: modcol = pytester.getmodulecol("def test_one(): pass") rep = TerminalReporter(modcol.config, file=linecomp.stringio) @@ -442,6 +470,16 @@ def test_long_xfail(): ] ) + @pytest.mark.parametrize("isatty", [True, False]) + def test_isatty(self, pytester: Pytester, monkeypatch, isatty: bool) -> None: + config = pytester.parseconfig() + f = StringIO() + monkeypatch.setattr(f, "isatty", lambda: isatty) + tr = TerminalReporter(config, f) + assert tr.isatty() == isatty + # It was incorrectly implemented as a boolean so we still support using it as one. + assert bool(tr.isatty) == isatty + class TestCollectonly: def test_collectonly_basic(self, pytester: Pytester) -> None: @@ -901,7 +939,7 @@ def test_header(self, pytester: Pytester) -> None: pytester.path.joinpath("tests").mkdir() pytester.path.joinpath("gui").mkdir() - # no ini file + # no configuration file result = pytester.runpytest() result.stdout.fnmatch_lines(["rootdir: *test_header0"]) @@ -1548,6 +1586,19 @@ def test_func(): assert "--calling--" not in s assert "IndexError" not in s + def test_tb_line_show_capture(self, pytester: Pytester, option) -> None: + output_to_capture = "help! let me out!" + pytester.makepyfile( + f""" + import pytest + def test_fail(): + print('{output_to_capture}') + assert False + """ + ) + result = pytester.runpytest("--tb=line") + result.stdout.fnmatch_lines(["*- Captured stdout call -*", output_to_capture]) + def test_tb_crashline(self, pytester: Pytester, option) -> None: p = pytester.makepyfile( """ @@ -2215,8 +2266,8 @@ def test_times_multiline( output.stdout.re_match_lines( [ r"test_bar.py ...................", - r"........... \s+ \d{1,3}[\.[a-z\ ]{1,2}\d{0,3}\w{1,2}$", - r"test_foo.py \.{5} \s+ \d{1,3}[\.[a-z\ ]{1,2}\d{0,3}\w{1,2}$", + r"........... \s+ \d{1,4}[\.[a-z\ ]{1,2}\d{0,3}\w{1,2}$", + r"test_foo.py \.{5} \s+ \d{1,4}[\.[a-z\ ]{1,2}\d{0,3}\w{1,2}$", ], consecutive=True, ) @@ -2642,7 +2693,7 @@ def test_len_dict(): [ "*short test summary info*", f"*{list(range(10))}*", - f"*{dict(zip(range(10), range(10)))}*", + f"*{dict(zip(range(10), range(10), strict=True))}*", ] ) @@ -2854,6 +2905,100 @@ def test_format_trimmed() -> None: assert _format_trimmed(" ({}) ", msg, len(msg) + 3) == " (unconditional ...) " +def test_warning_when_init_trumps_pyproject_toml( + pytester: Pytester, monkeypatch: MonkeyPatch +) -> None: + """Regression test for #7814.""" + tests = pytester.path.joinpath("tests") + tests.mkdir() + pytester.makepyprojecttoml( + f""" + [tool.pytest.ini_options] + testpaths = ['{tests}'] + """ + ) + pytester.makefile(".ini", pytest="") + result = pytester.runpytest() + result.stdout.fnmatch_lines( + [ + "configfile: pytest.ini (WARNING: ignoring pytest config in pyproject.toml!)", + ] + ) + + +def test_warning_when_init_trumps_multiple_files( + pytester: Pytester, monkeypatch: MonkeyPatch +) -> None: + """Regression test for #7814.""" + tests = pytester.path.joinpath("tests") + tests.mkdir() + pytester.makepyprojecttoml( + f""" + [tool.pytest.ini_options] + testpaths = ['{tests}'] + """ + ) + pytester.makefile(".ini", pytest="") + pytester.makeini( + """ + # tox.ini + [pytest] + minversion = 6.0 + addopts = -ra -q + testpaths = + tests + integration + """ + ) + result = pytester.runpytest() + result.stdout.fnmatch_lines( + [ + "configfile: pytest.ini (WARNING: ignoring pytest config in pyproject.toml, tox.ini!)", + ] + ) + + +def test_no_warning_when_init_but_pyproject_toml_has_no_entry( + pytester: Pytester, monkeypatch: MonkeyPatch +) -> None: + """Regression test for #7814.""" + tests = pytester.path.joinpath("tests") + tests.mkdir() + pytester.makepyprojecttoml( + f""" + [tool] + testpaths = ['{tests}'] + """ + ) + pytester.makefile(".ini", pytest="") + result = pytester.runpytest() + result.stdout.fnmatch_lines( + [ + "configfile: pytest.ini", + ] + ) + + +def test_no_warning_on_terminal_with_a_single_config_file( + pytester: Pytester, monkeypatch: MonkeyPatch +) -> None: + """Regression test for #7814.""" + tests = pytester.path.joinpath("tests") + tests.mkdir() + pytester.makepyprojecttoml( + f""" + [tool.pytest.ini_options] + testpaths = ['{tests}'] + """ + ) + result = pytester.runpytest() + result.stdout.fnmatch_lines( + [ + "configfile: pyproject.toml", + ] + ) + + class TestFineGrainedTestCase: DEFAULT_FILE_CONTENTS = """ import pytest @@ -3262,3 +3407,144 @@ def test_x(a): r".*test_foo.py::test_x\[a::b/\] .*FAILED.*", ] ) + + +class TestTerminalProgressPlugin: + """Tests for the TerminalProgressPlugin.""" + + @pytest.fixture + def mock_file(self) -> StringIO: + return StringIO() + + @pytest.fixture + def mock_tr(self, mock_file: StringIO) -> pytest.TerminalReporter: + tr: pytest.TerminalReporter = mock.create_autospec(pytest.TerminalReporter) + + def write_raw(content: str, *, flush: bool = False) -> None: + mock_file.write(content) + + tr.write_raw = write_raw # type: ignore[method-assign] + tr._progress_nodeids_reported = set() + return tr + + @pytest.mark.skipif(sys.platform != "win32", reason="#13896") + def test_plugin_registration_enabled_by_default( + self, pytester: pytest.Pytester, monkeypatch: MonkeyPatch + ) -> None: + """Test that the plugin registration is enabled by default. + + Currently only on Windows (#13896). + """ + monkeypatch.setattr(sys.stdout, "isatty", lambda: True) + # The plugin module should be registered as a default plugin. + config = pytester.parseconfigure() + plugin = config.pluginmanager.get_plugin("terminalprogress") + assert plugin is not None + + def test_plugin_registred_on_all_platforms_when_explicitly_requested( + self, pytester: pytest.Pytester, monkeypatch: MonkeyPatch + ) -> None: + """Test that the plugin is registered on any platform if explicitly requested.""" + monkeypatch.setattr(sys.stdout, "isatty", lambda: True) + # The plugin module should be registered as a default plugin. + config = pytester.parseconfigure("-p", "terminalprogress") + plugin = config.pluginmanager.get_plugin("terminalprogress") + assert plugin is not None + + def test_disabled_for_non_tty( + self, pytester: pytest.Pytester, monkeypatch: MonkeyPatch + ) -> None: + """Test that plugin is disabled for non-TTY output.""" + monkeypatch.setattr(sys.stdout, "isatty", lambda: False) + config = pytester.parseconfigure("-p", "terminalprogress") + plugin = config.pluginmanager.get_plugin("terminalprogress-plugin") + assert plugin is None + + def test_disabled_for_dumb_terminal( + self, pytester: pytest.Pytester, monkeypatch: MonkeyPatch + ) -> None: + """Test that plugin is disabled when TERM=dumb.""" + monkeypatch.setenv("TERM", "dumb") + monkeypatch.setattr(sys.stdout, "isatty", lambda: True) + config = pytester.parseconfigure("-p", "terminalprogress") + plugin = config.pluginmanager.get_plugin("terminalprogress-plugin") + assert plugin is None + + @pytest.mark.parametrize( + ["state", "progress", "expected"], + [ + ("indeterminate", None, "\x1b]9;4;3;\x1b\\"), + ("normal", 50, "\x1b]9;4;1;50\x1b\\"), + ("error", 75, "\x1b]9;4;2;75\x1b\\"), + ("paused", None, "\x1b]9;4;4;\x1b\\"), + ("paused", 80, "\x1b]9;4;4;80\x1b\\"), + ("remove", None, "\x1b]9;4;0;\x1b\\"), + ], + ) + def test_emit_progress_sequences( + self, + mock_file: StringIO, + mock_tr: pytest.TerminalReporter, + state: Literal["remove", "normal", "error", "indeterminate", "paused"], + progress: int | None, + expected: str, + ) -> None: + """Test that progress sequences are emitted correctly.""" + plugin = TerminalProgressPlugin(mock_tr) + plugin._emit_progress(state, progress) + assert expected in mock_file.getvalue() + + def test_session_lifecycle( + self, mock_file: StringIO, mock_tr: pytest.TerminalReporter + ) -> None: + """Test progress updates during session lifecycle.""" + plugin = TerminalProgressPlugin(mock_tr) + + session = mock.create_autospec(pytest.Session) + session.testscollected = 3 + + # Session start - should emit indeterminate progress. + plugin.pytest_sessionstart(session) + assert "\x1b]9;4;3;\x1b\\" in mock_file.getvalue() + mock_file.truncate(0) + mock_file.seek(0) + + # Collection finish - should emit 0% progress. + plugin.pytest_collection_finish() + assert "\x1b]9;4;1;0\x1b\\" in mock_file.getvalue() + mock_file.truncate(0) + mock_file.seek(0) + + # First test - 33% progress. + report1 = pytest.TestReport( + nodeid="test_1", + location=("test.py", 0, "test_1"), + when="call", + outcome="passed", + keywords={}, + longrepr=None, + ) + mock_tr.reported_progress = 1 # type: ignore[misc] + plugin.pytest_runtest_logreport(report1) + assert "\x1b]9;4;1;33\x1b\\" in mock_file.getvalue() + mock_file.truncate(0) + mock_file.seek(0) + + # Second test with failure - 66% in error state. + report2 = pytest.TestReport( + nodeid="test_2", + location=("test.py", 1, "test_2"), + when="call", + outcome="failed", + keywords={}, + longrepr=None, + ) + mock_tr.reported_progress = 2 # type: ignore[misc] + plugin.pytest_runtest_logreport(report2) + assert "\x1b]9;4;2;66\x1b\\" in mock_file.getvalue() + mock_file.truncate(0) + mock_file.seek(0) + + # Session finish - should remove progress. + plugin.pytest_sessionfinish() + assert "\x1b]9;4;0;\x1b\\" in mock_file.getvalue() diff --git a/testing/test_threadexception.py b/testing/test_threadexception.py index 5dad07b8b85..f4595ec435d 100644 --- a/testing/test_threadexception.py +++ b/testing/test_threadexception.py @@ -211,7 +211,7 @@ def test_it(request): """ ) - result = pytester.runpytest() + result = pytester.runpytest("-Werror") # TODO: should be a test failure or error assert result.ret == pytest.ExitCode.INTERNAL_ERROR diff --git a/testing/test_tmpdir.py b/testing/test_tmpdir.py index 016588a143d..789e8005184 100644 --- a/testing/test_tmpdir.py +++ b/testing/test_tmpdir.py @@ -5,6 +5,7 @@ import dataclasses import os from pathlib import Path +import shutil import stat import sys from typing import cast @@ -386,7 +387,7 @@ def test_cleanup_lock_create(self, tmp_path): d = tmp_path.joinpath("test") d.mkdir() lockfile = create_cleanup_lock(d) - with pytest.raises(OSError, match="cannot create lockfile in .*"): + with pytest.raises(OSError, match=r"cannot create lockfile in .*"): create_cleanup_lock(d) lockfile.unlink() @@ -619,3 +620,33 @@ def test_tmp_path_factory_fixes_up_world_readable_permissions( # After - fixed. assert (basetemp.parent.stat().st_mode & 0o077) == 0 + + +@pytest.mark.skipif( + not hasattr(os, "getuid") or os.stat not in os.supports_follow_symlinks, + reason="checks unix permissions and symlinks", +) +def test_tmp_path_factory_doesnt_follow_symlinks( + tmp_path: Path, monkeypatch: MonkeyPatch +) -> None: + """Verify that if a /tmp/pytest-of-foo directory is a symbolic link, + it is rejected (#13669, CVE-2025-71176).""" + attacker_controlled = tmp_path / "attacker_controlled" + attacker_controlled.mkdir() + + # Use the test's tmp_path as the system temproot (/tmp). + monkeypatch.setenv("PYTEST_DEBUG_TEMPROOT", str(tmp_path)) + + # First just get the pytest-of-user path. + tmp_factory = TempPathFactory(None, 3, "all", lambda *args: None, _ispytest=True) + pytest_of_user = tmp_factory.getbasetemp().parent + # Just for safety in the test, before we nuke it. + assert "pytest-of-" in str(pytest_of_user) + shutil.rmtree(pytest_of_user) + + pytest_of_user.symlink_to(attacker_controlled) + + # This now tries to use the directory when it's a symlink. + tmp_factory = TempPathFactory(None, 3, "all", lambda *args: None, _ispytest=True) + with pytest.raises(OSError, match=r"temporary directory .* is a symbolic link"): + tmp_factory.getbasetemp() diff --git a/testing/test_unittest.py b/testing/test_unittest.py index 56224c08228..395c9fe647e 100644 --- a/testing/test_unittest.py +++ b/testing/test_unittest.py @@ -1094,6 +1094,49 @@ def test_two(self): result.assert_outcomes(passed=2) +def test_skip_setup_class(pytester: Pytester) -> None: + """Skipping tests in a class by raising unittest.SkipTest in `setUpClass` (#13985).""" + pytester.makepyfile( + """ + import unittest + + class Test(unittest.TestCase): + + @classmethod + def setUpClass(cls): + raise unittest.SkipTest('Skipping setupclass') + + def test_foo(self): + assert False + + def test_bar(self): + assert False + """ + ) + result = pytester.runpytest() + result.assert_outcomes(skipped=2) + + +def test_unittest_skip_function(pytester: Pytester) -> None: + """ + Ensure raising an explicit unittest.SkipTest skips standard pytest functions. + + Support for this is debatable -- technically we only support unittest.SkipTest in TestCase subclasses, + but stating this support here in this test because users currently expect this to work, + so if we ever break it we at least know we are breaking this use case (#13985). + """ + pytester.makepyfile( + """ + import unittest + + def test_foo(): + raise unittest.SkipTest('Skipping test_foo') + """ + ) + result = pytester.runpytest() + result.assert_outcomes(skipped=1) + + def test_testcase_handles_init_exceptions(pytester: Pytester) -> None: """ Regression test to make sure exceptions in the __init__ method are bubbled up correctly. @@ -1322,10 +1365,12 @@ def test_async_support(pytester: Pytester) -> None: reprec.assertoutcome(failed=1, passed=2) +@pytest.mark.skipif( + sys.version_info >= (3, 11), reason="asynctest is not compatible with Python 3.11+" +) def test_asynctest_support(pytester: Pytester) -> None: """Check asynctest support (#7110)""" pytest.importorskip("asynctest") - pytester.copy_example("unittest/test_unittest_asynctest.py") reprec = pytester.inline_run() reprec.assertoutcome(failed=1, passed=2) @@ -1374,7 +1419,7 @@ def test_cleanup_called_exactly_once(): """ ) reprec = pytester.inline_run(testpath) - passed, skipped, failed = reprec.countoutcomes() + passed, _skipped, failed = reprec.countoutcomes() assert failed == 0 assert passed == 3 @@ -1398,7 +1443,7 @@ def test_cleanup_called_exactly_once(): """ ) reprec = pytester.inline_run(testpath) - passed, skipped, failed = reprec.countoutcomes() + passed, _skipped, failed = reprec.countoutcomes() assert failed == 1 assert passed == 1 @@ -1426,7 +1471,7 @@ def test_cleanup_called_exactly_once(): """ ) reprec = pytester.inline_run(testpath) - passed, skipped, failed = reprec.countoutcomes() + passed, _skipped, _failed = reprec.countoutcomes() assert passed == 3 @@ -1449,7 +1494,7 @@ def test_cleanup_called_the_right_number_of_times(): """ ) reprec = pytester.inline_run(testpath) - passed, skipped, failed = reprec.countoutcomes() + passed, _skipped, failed = reprec.countoutcomes() assert failed == 0 assert passed == 3 @@ -1474,7 +1519,7 @@ def test_cleanup_called_the_right_number_of_times(): """ ) reprec = pytester.inline_run(testpath) - passed, skipped, failed = reprec.countoutcomes() + passed, _skipped, failed = reprec.countoutcomes() assert failed == 2 assert passed == 1 @@ -1500,7 +1545,7 @@ def test_cleanup_called_the_right_number_of_times(): """ ) reprec = pytester.inline_run(testpath) - passed, skipped, failed = reprec.countoutcomes() + passed, _skipped, failed = reprec.countoutcomes() assert failed == 2 assert passed == 1 @@ -1614,7 +1659,7 @@ def test_it(self): """ ) reprec = pytester.inline_run() - passed, skipped, failed = reprec.countoutcomes() + passed, _skipped, failed = reprec.countoutcomes() assert passed == 1 assert failed == 1 assert reprec.ret == 1 diff --git a/testing/test_unraisableexception.py b/testing/test_unraisableexception.py index 6c0dc542e93..a6a4d6f35e8 100644 --- a/testing/test_unraisableexception.py +++ b/testing/test_unraisableexception.py @@ -1,7 +1,5 @@ from __future__ import annotations -from collections.abc import Generator -import contextlib import gc import sys from unittest import mock @@ -229,19 +227,13 @@ def _set_gc_state(enabled: bool) -> bool: return was_enabled -@contextlib.contextmanager -def _disable_gc() -> Generator[None]: - was_enabled = _set_gc_state(enabled=False) - try: - yield - finally: - _set_gc_state(enabled=was_enabled) - - def test_refcycle_unraisable(pytester: Pytester) -> None: # see: https://github.com/pytest-dev/pytest/issues/10404 pytester.makepyfile( test_it=""" + # Should catch the unraisable exception even if gc is disabled. + import gc; gc.disable() + import pytest class BrokenDel: @@ -256,23 +248,22 @@ def test_it(): """ ) - with _disable_gc(): - result = pytester.runpytest() + result = pytester.runpytest_subprocess( + "-Wdefault::pytest.PytestUnraisableExceptionWarning" + ) - # TODO: should be a test failure or error - assert result.ret == pytest.ExitCode.INTERNAL_ERROR + assert result.ret == 0 result.assert_outcomes(passed=1) result.stderr.fnmatch_lines("ValueError: del is broken") -@pytest.mark.filterwarnings("default::pytest.PytestUnraisableExceptionWarning") def test_refcycle_unraisable_warning_filter(pytester: Pytester) -> None: - # note that the host pytest warning filter is disabled and the pytester - # warning filter applies during config teardown of unraisablehook. - # see: https://github.com/pytest-dev/pytest/issues/10404 pytester.makepyfile( test_it=""" + # Should catch the unraisable exception even if gc is disabled. + import gc; gc.disable() + import pytest class BrokenDel: @@ -287,17 +278,18 @@ def test_it(): """ ) - with _disable_gc(): - result = pytester.runpytest("-Werror") + result = pytester.runpytest_subprocess( + "-Werror::pytest.PytestUnraisableExceptionWarning" + ) - # TODO: should be a test failure or error - assert result.ret == pytest.ExitCode.INTERNAL_ERROR + # TODO: Should be a test failure or error. Currently the exception + # propagates all the way to the top resulting in exit code 1. + assert result.ret == 1 result.assert_outcomes(passed=1) result.stderr.fnmatch_lines("ValueError: del is broken") -@pytest.mark.filterwarnings("default::pytest.PytestUnraisableExceptionWarning") def test_create_task_raises_unraisable_warning_filter(pytester: Pytester) -> None: # note that the host pytest warning filter is disabled and the pytester # warning filter applies during config teardown of unraisablehook. @@ -306,6 +298,9 @@ def test_create_task_raises_unraisable_warning_filter(pytester: Pytester) -> Non # the issue pytester.makepyfile( test_it=""" + # Should catch the unraisable exception even if gc is disabled. + import gc; gc.disable() + import asyncio import pytest @@ -318,11 +313,11 @@ def test_scheduler_must_be_created_within_running_loop() -> None: """ ) - with _disable_gc(): - result = pytester.runpytest("-Werror") + result = pytester.runpytest_subprocess("-Werror") - # TODO: should be a test failure or error - assert result.ret == pytest.ExitCode.INTERNAL_ERROR + # TODO: Should be a test failure or error. Currently the exception + # propagates all the way to the top resulting in exit code 1. + assert result.ret == 1 result.assert_outcomes(passed=1) result.stderr.fnmatch_lines("RuntimeWarning: coroutine 'my_task' was never awaited") diff --git a/testing/test_warning_types.py b/testing/test_warning_types.py index 7cbc4703c26..81d8785733c 100644 --- a/testing/test_warning_types.py +++ b/testing/test_warning_types.py @@ -43,7 +43,7 @@ def test(): @pytest.mark.filterwarnings("error") def test_warn_explicit_for_annotates_errors_with_location(): - with pytest.raises(Warning, match="(?m)test\n at .*raises.py:\\d+"): + with pytest.raises(Warning, match=r"(?m)test\n at .*raises.py:\d+"): warning_types.warn_explicit_for( pytest.raises, # type: ignore[arg-type] warning_types.PytestWarning("test"), diff --git a/testing/test_warnings.py b/testing/test_warnings.py index c302e7c6e3c..e3221da7569 100644 --- a/testing/test_warnings.py +++ b/testing/test_warnings.py @@ -149,6 +149,7 @@ def test_func(fix): ) +@pytest.mark.skip("issue #13485") def test_works_with_filterwarnings(pytester: Pytester) -> None: """Ensure our warnings capture does not mess with pre-installed filters (#2430).""" pytester.makepyfile( @@ -279,8 +280,7 @@ def pytest_warning_recorded(self, warning_message, when, nodeid, location): ("call warning", "runtest", "test_warning_recorded_hook.py::test_func"), ("teardown warning", "runtest", "test_warning_recorded_hook.py::test_func"), ] - assert len(collected) == len(expected) # python < 3.10 zip(strict=True) - for collected_result, expected_result in zip(collected, expected): + for collected_result, expected_result in zip(collected, expected, strict=True): assert collected_result[0] == expected_result[0], str(collected) assert collected_result[1] == expected_result[1], str(collected) assert collected_result[2] == expected_result[2], str(collected) @@ -382,7 +382,7 @@ def test_bar(): def test_option_precedence_cmdline_over_ini( pytester: Pytester, ignore_on_cmdline ) -> None: - """Filters defined in the command-line should take precedence over filters in ini files (#3946).""" + """Filters defined in the command-line should take precedence over filters in config files (#3946).""" pytester.makeini( """ [pytest] @@ -424,6 +424,33 @@ def test(): result.stdout.fnmatch_lines(["* 1 failed in*"]) +def test_accept_unknown_category(pytester: Pytester) -> None: + """Category types that can't be imported don't cause failure (#13732).""" + pytester.makeini( + """ + [pytest] + filterwarnings = + always:Failed to import filter module.*:pytest.PytestConfigWarning + ignore::foobar.Foobar + """ + ) + pytester.makepyfile( + """ + def test(): + pass + """ + ) + result = pytester.runpytest_subprocess("-W", "ignore::bizbaz.Bizbaz") + result.stdout.fnmatch_lines( + [ + f"*== {WARNINGS_SUMMARY_HEADER} ==*", + "*PytestConfigWarning: Failed to import filter module 'foobar': ignore::foobar.Foobar", + "*PytestConfigWarning: Failed to import filter module 'bizbaz': ignore::bizbaz.Bizbaz", + "* 1 passed, * warning*", + ] + ) + + class TestDeprecationWarningsByDefault: """ Note: all pytest runs are executed in a subprocess so we don't inherit warning filters @@ -535,7 +562,8 @@ def test_invalid_regex_in_filterwarning(self, pytester: Pytester) -> None: ) -@pytest.mark.skip("not relevant until pytest 9.0") +# In 9.1, uncomment below and change RemovedIn9 -> RemovedIn10. +# @pytest.mark.skip("not relevant until pytest 10.0") @pytest.mark.parametrize("change_default", [None, "ini", "cmdline"]) def test_removed_in_x_warning_as_error(pytester: Pytester, change_default) -> None: """This ensures that PytestRemovedInXWarnings raised by pytest are turned into errors. @@ -714,10 +742,8 @@ def test_issue4445_rewrite(self, pytester: Pytester, capwarn) -> None: assert func == "" # the above conftest.py assert lineno == 4 - def test_issue4445_preparse(self, pytester: Pytester, capwarn) -> None: - """#4445: Make sure the warning points to a reasonable location - See origin of _issue_warning_captured at: _pytest.config.__init__.py:910 - """ + def test_issue4445_initial_conftest(self, pytester: Pytester, capwarn) -> None: + """#4445: Make sure the warning points to a reasonable location.""" pytester.makeconftest( """ import nothing @@ -733,7 +759,7 @@ def test_issue4445_preparse(self, pytester: Pytester, capwarn) -> None: assert "could not load initial conftests" in str(warning.message) assert f"config{os.sep}__init__.py" in file - assert func == "_preparse" + assert func == "parse" @pytest.mark.filterwarnings("default") def test_conftest_warning_captured(self, pytester: Pytester) -> None: diff --git a/testing/typing_checks.py b/testing/typing_checks.py index 8a316580a25..3ee2dfb3019 100644 --- a/testing/typing_checks.py +++ b/testing/typing_checks.py @@ -9,7 +9,6 @@ import contextlib from typing import Literal -from typing import Optional from typing_extensions import assert_type @@ -52,10 +51,10 @@ class Foo(TypedDict): def check_raises_is_a_context_manager(val: bool) -> None: with pytest.raises(RuntimeError) if val else contextlib.nullcontext() as excinfo: pass - assert_type(excinfo, Optional[pytest.ExceptionInfo[RuntimeError]]) + assert_type(excinfo, pytest.ExceptionInfo[RuntimeError] | None) # Issue #12941. def check_testreport_attributes(report: TestReport) -> None: assert_type(report.when, Literal["setup", "call", "teardown"]) - assert_type(report.location, tuple[str, Optional[int], str]) + assert_type(report.location, tuple[str, int | None, str]) diff --git a/testing/typing_raises_group.py b/testing/typing_raises_group.py index c7dd16991ac..081ffd59bca 100644 --- a/testing/typing_raises_group.py +++ b/testing/typing_raises_group.py @@ -1,8 +1,7 @@ from __future__ import annotations +from collections.abc import Callable import sys -from typing import Callable -from typing import Union from typing_extensions import assert_type @@ -160,10 +159,7 @@ def check_nested_raisesgroups_contextmanager() -> None: assert_type( excinfo.value.exceptions[0], # this union is because of how typeshed defines .exceptions - Union[ - ExceptionGroup[ValueError], - ExceptionGroup[ExceptionGroup[ValueError]], - ], + ExceptionGroup[ValueError] | ExceptionGroup[ExceptionGroup[ValueError]], ) @@ -240,8 +236,5 @@ def check_check_typing() -> None: # `BaseExceptiongroup` should perhaps be `ExceptionGroup`, but close enough assert_type( RaisesGroup(ValueError).check, - Union[ - Callable[[BaseExceptionGroup[ValueError]], bool], - None, - ], + Callable[[BaseExceptionGroup[ValueError]], bool] | None, ) diff --git a/tox.ini b/tox.ini index 8f7d8495285..e2e09fa8b7f 100644 --- a/tox.ini +++ b/tox.ini @@ -1,21 +1,19 @@ [tox] -isolated_build = True -minversion = 3.20.0 -distshare = {homedir}/.tox/distshare +requires = + tox >= 4 envlist = linting - py39 py310 py311 py312 py313 py314 pypy3 - py39-{pexpect,xdist,unittestextras,numpy,pluggymain,pylib} + py310-{pexpect,xdist,twisted24,twisted25,asynctest,numpy,pluggymain,pylib} doctesting doctesting-coverage plugins - py39-freeze + py310-freeze docs docs-checklinks @@ -25,6 +23,26 @@ envlist = +[pkgenv] +# NOTE: This section tweaks how Tox manages the PEP 517 build +# NOTE: environment where it assembles wheels (editable and regular) +# NOTE: for further installing them into regular testenvs. +# +# NOTE: `[testenv:.pkg]` does not work due to a regression in tox v4.14.1 +# NOTE: so `[pkgenv]` is being used in place of it. +# Refs: +# * https://github.com/tox-dev/tox/pull/3237 +# * https://github.com/tox-dev/tox/issues/3238 +# * https://github.com/tox-dev/tox/issues/3292 +# * https://hynek.me/articles/turbo-charge-tox/ +# +# NOTE: The `SETUPTOOLS_SCM_PRETEND_VERSION_FOR_PYTEST` environment +# NOTE: variable allows enforcing a pre-determined version for use in +# NOTE: the wheel being installed into usual testenvs. +pass_env = + SETUPTOOLS_SCM_PRETEND_VERSION_FOR_PYTEST + + [testenv] description = run the tests @@ -36,29 +54,32 @@ description = pexpect: against `pexpect` pluggymain: against the bleeding edge `pluggy` from Git pylib: against `py` lib - unittestextras: against the unit test extras + twisted24: against the unit test extras with twisted prior to 24.0 + twisted25: against the unit test extras with twisted 25.0 or later + asynctest: against the unit test extras with asynctest xdist: with pytest in parallel mode under `{basepython}` doctesting: including doctests commands = {env:_PYTEST_TOX_COVERAGE_RUN:} pytest {posargs:{env:_PYTEST_TOX_DEFAULT_POSARGS:}} - doctesting: {env:_PYTEST_TOX_COVERAGE_RUN:} pytest --doctest-modules --pyargs _pytest + doctesting: {env:_PYTEST_TOX_COVERAGE_RUN:} pytest --doctest-modules {env:_PYTEST_TOX_POSARGS_JUNIT:} --pyargs _pytest coverage: coverage combine coverage: coverage report -m passenv = COVERAGE_* PYTEST_ADDOPTS TERM - SETUPTOOLS_SCM_PRETEND_VERSION_FOR_PYTEST + CI setenv = - _PYTEST_TOX_DEFAULT_POSARGS={env:_PYTEST_TOX_POSARGS_DOCTESTING:} {env:_PYTEST_TOX_POSARGS_LSOF:} {env:_PYTEST_TOX_POSARGS_XDIST:} + _PYTEST_TOX_DEFAULT_POSARGS={env:_PYTEST_TOX_POSARGS_DOCTESTING:} {env:_PYTEST_TOX_POSARGS_JUNIT:} {env:_PYTEST_TOX_POSARGS_LSOF:} {env:_PYTEST_TOX_POSARGS_XDIST:} {env:_PYTEST_FILES:} # See https://docs.python.org/3/library/io.html#io-encoding-warning # If we don't enable this, neither can any of our downstream users! - PYTHONWARNDEFAULTENCODING=1 + # pylib is not PYTHONWARNDEFAULTENCODING clean, so don't set for it. + !pylib: PYTHONWARNDEFAULTENCODING=1 # Configuration to run with coverage similar to CI, e.g. - # "tox -e py39-coverage". + # "tox -e py313-coverage". coverage: _PYTEST_TOX_COVERAGE_RUN=coverage run -m coverage: _PYTEST_TOX_EXTRA_DEP=coverage-enable-subprocess coverage: COVERAGE_FILE={toxinidir}/.coverage @@ -66,6 +87,12 @@ setenv = doctesting: _PYTEST_TOX_POSARGS_DOCTESTING=doc/en + # The configurations below are related only to standard unittest support. + # Run only tests from test_unittest.py. + asynctest: _PYTEST_FILES=testing/test_unittest.py + twisted24: _PYTEST_FILES=testing/test_unittest.py + twisted25: _PYTEST_FILES=testing/test_unittest.py + nobyte: PYTHONDONTWRITEBYTECODE=1 lsof: _PYTEST_TOX_POSARGS_LSOF=--lsof @@ -79,17 +106,20 @@ deps = pexpect: pexpect>=4.8.0 pluggymain: pluggy @ git+https://github.com/pytest-dev/pluggy.git pylib: py>=1.8.2 - unittestextras: twisted - unittestextras: asynctest + twisted24: twisted<25 + twisted25: twisted>=25 + asynctest: asynctest xdist: pytest-xdist>=2.1.0 xdist: -e . {env:_PYTEST_TOX_EXTRA_DEP:} +# Can use the same wheel for all environments. +package = wheel +wheel_build_env = .pkg [testenv:linting] description = run pre-commit-defined linters under `{basepython}` skip_install = True -basepython = python3 deps = pre-commit>=2.9.3 commands = pre-commit run --all-files --show-diff-on-failure {posargs:} setenv = @@ -100,7 +130,7 @@ setenv = description = build the documentation site under \ `{toxinidir}{/}doc{/}en{/}_build{/}html` with `{basepython}` -basepython = python3.12 # sync with rtd to get errors +basepython = python3.13 # Sync with .readthedocs.yaml to get errors. usedevelop = True deps = -r{toxinidir}/doc/en/requirements.txt @@ -117,7 +147,6 @@ setenv = [testenv:docs-checklinks] description = check the links in the documentation with `{basepython}` -basepython = python3 usedevelop = True changedir = doc/en deps = -r{toxinidir}/doc/en/requirements.txt @@ -131,9 +160,6 @@ setenv = description = regenerate documentation examples under `{basepython}` changedir = doc/en -basepython = python3 -passenv = - SETUPTOOLS_SCM_PRETEND_VERSION_FOR_PYTEST deps = PyYAML regendoc>=0.8.1 @@ -145,6 +171,10 @@ commands = setenv = # We don't want this warning to reach regen output. PYTHONWARNDEFAULTENCODING= + # Remove CI markers: pytest auto-detects those and uses more verbose output, which is undesirable + # for the example documentation. + CI= + BUILD_NUMBER= [testenv:plugins] description = @@ -172,7 +202,7 @@ commands = pytest pytest_twisted_integration.py pytest simple_integration.py --force-sugar --flakes -[testenv:py39-freeze] +[testenv:py310-freeze] description = test pytest frozen with `pyinstaller` under `{basepython}` changedir = testing/freeze @@ -184,7 +214,6 @@ commands = [testenv:release] description = do a release, required posarg of the version number -basepython = python3 usedevelop = True passenv = * deps = @@ -202,8 +231,19 @@ commands = python scripts/prepare-release-pr.py {posargs} [testenv:generate-gh-release-notes] description = generate release notes that can be published as GitHub Release -basepython = python3 usedevelop = True deps = - pypandoc + pypandoc_binary commands = python scripts/generate-gh-release-notes.py {posargs} + +[testenv:update-plugin-list] +description = update the plugin list +skip_install = True +deps = + packaging + requests + tabulate[widechars] + tqdm + requests-cache + platformdirs +commands = python scripts/update-plugin-list.py {posargs}