blob: 5f512d22103ee817c00f477d6bd316ef7885db6b [file] [log] [blame]
[tox]
minversion = 3.0
envlist = formatting, py38, py39, py310, py311, pypy, benchmark
skip_missing_interpreters = true
requires = pip >=21.3.1
isolated_build = true
[testenv:pylint]
deps =
-r {toxinidir}/requirements_test.txt
commands =
pre-commit run pylint --all-files
[testenv:formatting]
basepython = python3
deps =
-r {toxinidir}/requirements_test.txt
commands =
pre-commit run --all-files
[testenv:mypy]
basepython = python3
deps =
pre-commit~=2.20
commands =
pre-commit run mypy --all-files
[testenv]
setenv =
COVERAGE_FILE = {toxinidir}/.coverage.{envname}
deps =
!pypy: -r {toxinidir}/requirements_test.txt
pypy: -r {toxinidir}/requirements_test_min.txt
commands =
; Run tests, ensuring all benchmark tests do not run
pytest --benchmark-disable {toxinidir}/tests/ {posargs:}
[testenv:spelling]
deps =
-r {toxinidir}/requirements_test.txt
commands =
pytest {toxinidir}/tests/ {posargs:} -k unittest_spelling
[testenv:coverage-html]
setenv =
COVERAGE_FILE = {toxinidir}/.coverage
deps =
-r {toxinidir}/requirements_test.txt
skip_install = true
commands =
coverage combine
coverage html --ignore-errors --rcfile={toxinidir}/.coveragerc
[testenv:docs]
changedir = doc/
deps =
-r {toxinidir}/doc/requirements.txt
commands =
# Readthedoc launch a slightly different command see '.readthedocs.yaml'
# sphinx-build -T -W -E --keep-going -b html -d _build/doctrees -D language=en . _build/html
# Changes were made for performance reasons, add or remove only if you can't reproduce.
sphinx-build -T -W -j auto --keep-going -b html -d _build/doctrees -D language=en . _build/html
# -E: don't use a saved environment, always read all files
# -j auto: build in parallel with N processes where possible (special value "auto" will set N to cpu-count)
[testenv:test_doc]
deps =
-r {toxinidir}/requirements_test.txt
commands =
pytest {toxinidir}/doc/test_messages_documentation.py
[testenv:benchmark]
deps =
-r {toxinidir}/requirements_test.txt
pygal
commands =
; Run the only the benchmark tests, grouping output and forcing .json output so we
; can compare benchmark runs
pytest --exitfirst \
--failed-first \
--benchmark-only \
--benchmark-save=batch_files \
--benchmark-save-data \
--benchmark-autosave {toxinidir}/tests \
--benchmark-group-by="group" \
{posargs:}
[testenv:profile_against_external]
setenv =
PYTEST_PROFILE_EXTERNAL = 1
deps =
-r {toxinidir}/requirements_test.txt
gprof2dot
commands =
pytest --exitfirst \
--profile-svg \
{toxinidir}/tests/profile/test_profile_against_externals.py