diff --git a/.github/workflows/codspeed.yml b/.github/workflows/codspeed.yml new file mode 100644 index 000000000..4466811af --- /dev/null +++ b/.github/workflows/codspeed.yml @@ -0,0 +1,23 @@ +name: codspeed-benchmarks + +on: + push: + branches: + - "main" + pull_request: + workflow_dispatch: + +jobs: + benchmarks: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v4 + - uses: actions/setup-python@v5 + with: + python-version: '3.12' + - run: python -m pip install .[tests] pytest-codspeed + - name: Run benchmarks + uses: CodspeedHQ/action@v2 + with: + token: ${{ secrets.CODSPEED_TOKEN }} + run: "pytest -vvv -r fEs --pyargs dkist --codspeed" diff --git a/changelog/382.feature.rst b/changelog/382.feature.rst new file mode 100644 index 000000000..7c96dd31e --- /dev/null +++ b/changelog/382.feature.rst @@ -0,0 +1 @@ +Add GitHub workflow and dependencies for Codspeed, to benchmark PRs against main. diff --git a/dkist/conftest.py b/dkist/conftest.py index 21a216395..8c8e9e106 100644 --- a/dkist/conftest.py +++ b/dkist/conftest.py @@ -307,7 +307,16 @@ def small_visp_dataset(): @pytest.fixture(scope="session") -def large_visp_dataset(tmp_path_factory): +def large_visp_dataset_file(tmp_path_factory): + vispdir = tmp_path_factory.mktemp("data") + with gzip.open(Path(rootdir) / "large_visp.asdf.gz", mode="rb") as gfo: + with open(vispdir / "test_visp.asdf", mode="wb") as afo: + afo.write(gfo.read()) + return vispdir / "test_visp.asdf" + + +@pytest.fixture(scope="session") +def large_visp_dataset(large_visp_dataset_file): # This dataset was generated by the following code: # from dkist_data_simulator.spec214.visp import SimpleVISPDataset # from dkist_inventory.asdf_generator import dataset_from_fits @@ -319,8 +328,4 @@ def large_visp_dataset(tmp_path_factory): # ds.generate_files(vispdir) # dataset_from_fits(vispdir, "test_visp.asdf") - vispdir = tmp_path_factory.mktemp("data") - with gzip.open(Path(rootdir) / "large_visp.asdf.gz", mode="rb") as gfo: - with open(vispdir / "test_visp.asdf", mode="wb") as afo: - afo.write(gfo.read()) - return load_dataset(vispdir / "test_visp.asdf") + return load_dataset(large_visp_dataset_file) diff --git a/dkist/tests/test_benchmarks.py b/dkist/tests/test_benchmarks.py new file mode 100644 index 000000000..44a2ecb78 --- /dev/null +++ b/dkist/tests/test_benchmarks.py @@ -0,0 +1,26 @@ +import matplotlib.pyplot as plt +import pytest + +from dkist import load_dataset + + +@pytest.mark.benchmark +def test_load_asdf(benchmark, large_visp_dataset_file): + benchmark(load_dataset, large_visp_dataset_file) + + +@pytest.mark.benchmark +@pytest.mark.parametrize("axes", [ + ["y", "x", None, None], + ["y", None, "x", None], + ["y", None, None, "x"], + [None, "y", "x", None], + [None, "y", None, "x"], + [None, None, "y", "x"], +]) +def test_plot_dataset(benchmark, axes, large_visp_dataset): + @benchmark + def plot_and_save_fig(ds=large_visp_dataset, axes=axes): + ds.plot(plot_axes=axes) + plt.savefig("tmpplot") + plt.close() diff --git a/pyproject.toml b/pyproject.toml index 83b13da95..cea47f307 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -53,6 +53,7 @@ tests = [ "pytest-mpl", "pytest-httpserver", "pytest-filter-subpackage", + "pytest-benchmark", "hypothesis", "tox", "pydot", diff --git a/pytest.ini b/pytest.ini index 5b440f409..732d176ea 100644 --- a/pytest.ini +++ b/pytest.ini @@ -20,6 +20,7 @@ addopts = --doctest-rst -p no:unraisableexception -p no:threadexception markers = online: marks this test function as needing online connectivity. figure: marks this test function as using hash-based Matplotlib figure verification. This mark is not meant to be directly applied, but is instead automatically applied when a test function uses the @sunpy.tests.helpers.figure_test decorator. + benchmark: marks this test as a benchmark # Disable internet access for tests not marked remote_data remote_data_strict = True asdf_schema_root = dkist/io/asdf/resources/ diff --git a/tox.ini b/tox.ini index a05a7de74..d66ef2836 100644 --- a/tox.ini +++ b/tox.ini @@ -10,6 +10,7 @@ envlist = py310-oldestdeps build_docs{,-notebooks} codestyle + benchmarks [testenv] pypi_filter = https://raw.githubusercontent.com/sunpy/sunpy/main/.test_package_pins.txt @@ -34,7 +35,7 @@ set_env = COLUMNS = 180 devdeps: PIP_EXTRA_INDEX_URL = https://pypi.anaconda.org/astropy/simple https://pypi.anaconda.org/scientific-python-nightly-wheels/simple # Define the base test command here to allow us to add more flags for each tox factor - PYTEST_COMMAND = pytest -vvv -r fEs --pyargs dkist --cov-report=xml --cov=dkist --cov-config={toxinidir}/.coveragerc {toxinidir}/docs + PYTEST_COMMAND = pytest -vvv -r fEs --pyargs dkist --cov-report=xml --cov=dkist --cov-config={toxinidir}/.coveragerc {toxinidir}/docs --benchmark-skip deps = # For packages which publish nightly wheels this will pull the latest nightly devdeps: astropy>=0.0.dev0 @@ -89,3 +90,8 @@ commands = !notebooks: sphinx-build -j 1 --color -W --keep-going -b html -d _build/.doctrees . _build/html -D nb_execution_mode=off {posargs} notebooks: sphinx-build -j 1 --color -W --keep-going -b html -d _build/.doctrees . _build/html {posargs} python -c 'import pathlib; print("Documentation available under file://\{0\}".format(pathlib.Path(r"{toxinidir}") / "docs" / "_build" / "index.html"))' + +[testenv:benchmarks] +description = Run benchmarks on PR and compare against main to ensure there are no performance regressions +allowlist_externals=git +commands = {env:PYTEST_COMMAND} -m benchmark --benchmark-autosave