diff --git a/.coveragerc b/.coveragerc deleted file mode 100644 index cbdf7bb42e..0000000000 --- a/.coveragerc +++ /dev/null @@ -1,16 +0,0 @@ -[run] -branch = True -source = src/pyhf - -[report] -exclude_lines = - if self.debug: - pragma: no cover - raise NotImplementedError - if __name__ == .__main__.: -ignore_errors = True -omit = - binder/* - docker/* - tests/* - validation/* diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 977091c60b..925c08bd61 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -49,7 +49,7 @@ jobs: - name: Test with pytest run: | - pytest -r sx --ignore tests/benchmarks/ --ignore tests/contrib --ignore tests/test_notebooks.py + pytest --ignore tests/benchmarks/ --ignore tests/contrib --ignore tests/test_notebooks.py - name: Launch a tmate session if tests fail if: failure() && github.event_name == 'workflow_dispatch' @@ -64,7 +64,7 @@ jobs: - name: Test Contrib module with pytest run: | - pytest -r sx tests/contrib --mpl --mpl-baseline-path tests/contrib/baseline + pytest tests/contrib --mpl --mpl-baseline-path tests/contrib/baseline - name: Report contrib coverage with Codecov if: github.event_name != 'schedule' && matrix.python-version == '3.9' && matrix.os == 'ubuntu-latest' @@ -75,7 +75,7 @@ jobs: - name: Test docstring examples with doctest if: matrix.python-version == '3.9' - run: pytest -r sx src/ README.rst + run: pytest src/ README.rst - name: Report doctest coverage with Codecov if: github.event_name != 'schedule' && matrix.python-version == '3.9' && matrix.os == 'ubuntu-latest' @@ -87,4 +87,4 @@ jobs: - name: Run benchmarks if: github.event_name == 'schedule' && matrix.python-version == '3.9' run: | - pytest -r sx --benchmark-sort=mean tests/benchmarks/test_benchmark.py + pytest --benchmark-sort=mean tests/benchmarks/test_benchmark.py diff --git a/.github/workflows/dependencies-head.yml b/.github/workflows/dependencies-head.yml index 90d5c2e44c..dc7b6a5e6d 100644 --- a/.github/workflows/dependencies-head.yml +++ b/.github/workflows/dependencies-head.yml @@ -31,7 +31,7 @@ jobs: - name: Test with pytest run: | - pytest -r sx --ignore tests/benchmarks/ --ignore tests/contrib --ignore tests/test_notebooks.py + pytest --ignore tests/benchmarks/ --ignore tests/contrib --ignore tests/test_notebooks.py scipy: @@ -61,7 +61,7 @@ jobs: - name: Test with pytest run: | - pytest -r sx --ignore tests/benchmarks/ --ignore tests/contrib --ignore tests/test_notebooks.py + pytest --ignore tests/benchmarks/ --ignore tests/contrib --ignore tests/test_notebooks.py iminuit: @@ -87,7 +87,7 @@ jobs: python -m pip list - name: Test with pytest run: | - pytest -r sx --ignore tests/benchmarks/ --ignore tests/contrib --ignore tests/test_notebooks.py + pytest --ignore tests/benchmarks/ --ignore tests/contrib --ignore tests/test_notebooks.py uproot4: @@ -112,7 +112,7 @@ jobs: python -m pip list - name: Test with pytest run: | - pytest -r sx --ignore tests/benchmarks/ --ignore tests/contrib --ignore tests/test_notebooks.py + pytest --ignore tests/benchmarks/ --ignore tests/contrib --ignore tests/test_notebooks.py pytest: @@ -137,4 +137,4 @@ jobs: python -m pip list - name: Test with pytest run: | - pytest -r sx --ignore tests/benchmarks/ --ignore tests/contrib --ignore tests/test_notebooks.py + pytest --ignore tests/benchmarks/ --ignore tests/contrib --ignore tests/test_notebooks.py diff --git a/.github/workflows/lower-bound-requirements.yml b/.github/workflows/lower-bound-requirements.yml index 7360723312..bbba51e0c2 100644 --- a/.github/workflows/lower-bound-requirements.yml +++ b/.github/workflows/lower-bound-requirements.yml @@ -34,5 +34,9 @@ jobs: - name: Test with pytest run: | + # Override the ini option for filterwarnings with an empty list to disable error on filterwarnings + # as testing for oldest releases that work with latest API, not the oldest releases that are warning + # free. Though still show warnings by setting warning control to 'default'. + export PYTHONWARNINGS='default' # Run on tests/ to skip doctests of src given examples are for latest APIs - pytest -r sx --ignore tests/benchmarks/ --ignore tests/contrib --ignore tests/test_notebooks.py tests/ + pytest --override-ini filterwarnings= --ignore tests/benchmarks/ --ignore tests/contrib --ignore tests/test_notebooks.py tests/ diff --git a/.github/workflows/notebooks.yml b/.github/workflows/notebooks.yml index 112efbd530..81a1cdceaf 100644 --- a/.github/workflows/notebooks.yml +++ b/.github/workflows/notebooks.yml @@ -27,4 +27,4 @@ jobs: python -m pip list - name: Test example notebooks run: | - pytest -r sx tests/test_notebooks.py + pytest tests/test_notebooks.py diff --git a/.github/workflows/release_tests.yml b/.github/workflows/release_tests.yml index 53387ab112..74d64e7277 100644 --- a/.github/workflows/release_tests.yml +++ b/.github/workflows/release_tests.yml @@ -40,7 +40,7 @@ jobs: - name: Canary test public API run: | - pytest -r sx tests/test_public_api.py + pytest tests/test_public_api.py - name: Verify requirements in codemeta.json run: | diff --git a/pyproject.toml b/pyproject.toml index 285855442d..d36a10d305 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -42,18 +42,19 @@ ignore = [ minversion = "6.0" xfail_strict = true addopts = [ - "--ignore=setup.py", - "--ignore=validation/", - "--ignore=binder/", - "--ignore=docs/", + "-ra", "--cov=pyhf", - "--cov-config=.coveragerc", + "--cov-branch", + "--showlocals", + "--strict-markers", + "--strict-config", "--cov-report=term-missing", "--cov-report=xml", "--cov-report=html", "--doctest-modules", - "--doctest-glob='*.rst'" + "--doctest-glob='*.rst'", ] +log_cli_level = "info" testpaths = "tests" markers = [ "fail_jax", @@ -75,12 +76,21 @@ markers = [ "skip_pytorch64", "skip_tensorflow", ] - -[tool.nbqa.config] -black = "pyproject.toml" +filterwarnings = [ + "error", + 'ignore:the imp module is deprecated:DeprecationWarning', # tensorflow + 'ignore:distutils Version classes are deprecated:DeprecationWarning', # tensorflow-probability + 'ignore:the `interpolation=` argument to percentile was renamed to `method=`, which has additional options:DeprecationWarning', # Issue #1772 + "ignore:The interpolation= argument to 'quantile' is deprecated. Use 'method=' instead:DeprecationWarning", # Issue #1772 + 'ignore: Exception ignored in:pytest.PytestUnraisableExceptionWarning', #FIXME: Exception ignored in: <_io.FileIO [closed]> + 'ignore:invalid value encountered in true_divide:RuntimeWarning', #FIXME + 'ignore:invalid value encountered in add:RuntimeWarning', #FIXME + "ignore:In future, it will be an error for 'np.bool_' scalars to be interpreted as an index:DeprecationWarning", #FIXME: tests/test_tensor.py::test_pdf_eval[pytorch] + 'ignore:Creating a tensor from a list of numpy.ndarrays is extremely slow. Please consider converting the list to a single numpy.ndarray with:UserWarning', #FIXME: tests/test_optim.py::test_minimize[no_grad-scipy-pytorch-no_stitch] + 'ignore:divide by zero encountered in true_divide:RuntimeWarning', #FIXME: pytest tests/test_tensor.py::test_pdf_calculations[numpy] +] [tool.nbqa.mutate] -black = 1 pyupgrade = 1 [tool.nbqa.addopts] diff --git a/tests/__init__.py b/tests/__init__.py deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/tests/test_export.py b/tests/test_export.py index 241d7bdb62..6b9b21968c 100644 --- a/tests/test_export.py +++ b/tests/test_export.py @@ -352,17 +352,14 @@ def test_export_sample_zerodata(mocker, spec): sampledata = [0.0] * len(samplespec['data']) mocker.patch('pyhf.writexml._ROOT_DATA_FILE') - # make sure no RuntimeWarning, https://stackoverflow.com/a/45671804 - with pytest.warns(None) as record: - for modifierspec in samplespec['modifiers']: - pyhf.writexml.build_modifier( - {'measurements': [{'config': {'parameters': []}}]}, - modifierspec, - channelname, - samplename, - sampledata, - ) - assert not record.list + for modifierspec in samplespec['modifiers']: + pyhf.writexml.build_modifier( + {'measurements': [{'config': {'parameters': []}}]}, + modifierspec, + channelname, + samplename, + sampledata, + ) @pytest.mark.parametrize( @@ -424,7 +421,8 @@ def test_integer_data(datadir, mocker): """ Test that a spec with only integer data will be written correctly """ - spec = json.load(open(datadir.join("workspace_integer_data.json"))) + with open(datadir.join("workspace_integer_data.json")) as spec_file: + spec = json.load(spec_file) channel_spec = spec["channels"][0] mocker.patch("pyhf.writexml._ROOT_DATA_FILE") diff --git a/tests/test_optim.py b/tests/test_optim.py index 697547fa43..9e1ba0c8eb 100644 --- a/tests/test_optim.py +++ b/tests/test_optim.py @@ -4,6 +4,7 @@ from pyhf.tensor.common import _TensorViewer import pytest from scipy.optimize import minimize, OptimizeResult +from scipy.optimize import OptimizeWarning import iminuit import itertools import numpy as np @@ -563,7 +564,8 @@ def test_solver_options_scipy(mocker): # Note: in this case, scipy won't usually raise errors for arbitrary options -# so this test exists as a sanity reminder that scipy is not perfect +# so this test exists as a sanity reminder that scipy is not perfect. +# It does raise a scipy.optimize.OptimizeWarning though. def test_bad_solver_options_scipy(mocker): optimizer = pyhf.optimize.scipy_optimizer( solver_options={'arbitrary_option': 'foobar'} @@ -573,7 +575,11 @@ def test_bad_solver_options_scipy(mocker): model = pyhf.simplemodels.uncorrelated_background([50.0], [100.0], [10.0]) data = pyhf.tensorlib.astensor([125.0] + model.config.auxdata) - assert pyhf.infer.mle.fit(data, model).tolist() + + with pytest.warns( + OptimizeWarning, match="Unknown solver options: arbitrary_option" + ): + assert pyhf.infer.mle.fit(data, model).tolist() def test_minuit_param_names(mocker): diff --git a/tests/test_tensor.py b/tests/test_tensor.py index 81251660aa..68d615aeae 100644 --- a/tests/test_tensor.py +++ b/tests/test_tensor.py @@ -274,37 +274,39 @@ def test_shape(backend): @pytest.mark.fail_pytorch64 def test_pdf_calculations(backend): tb = pyhf.tensorlib - assert tb.tolist(tb.normal_cdf(tb.astensor([0.8]))) == pytest.approx( - [0.7881446014166034], 1e-07 - ) - assert tb.tolist( - tb.normal_logpdf( - tb.astensor([0, 0, 1, 1, 0, 0, 1, 1]), - tb.astensor([0, 1, 0, 1, 0, 1, 0, 1]), - tb.astensor([0, 0, 0, 0, 1, 1, 1, 1]), + # FIXME + with pytest.warns(RuntimeWarning, match="divide by zero encountered in log"): + assert tb.tolist(tb.normal_cdf(tb.astensor([0.8]))) == pytest.approx( + [0.7881446014166034], 1e-07 + ) + assert tb.tolist( + tb.normal_logpdf( + tb.astensor([0, 0, 1, 1, 0, 0, 1, 1]), + tb.astensor([0, 1, 0, 1, 0, 1, 0, 1]), + tb.astensor([0, 0, 0, 0, 1, 1, 1, 1]), + ) + ) == pytest.approx( + [ + np.nan, + np.nan, + np.nan, + np.nan, + -0.91893853, + -1.41893853, + -1.41893853, + -0.91893853, + ], + nan_ok=True, + ) + # Allow poisson(lambda=0) under limit Poisson(n = 0 | lambda -> 0) = 1 + assert tb.tolist( + tb.poisson(tb.astensor([0, 0, 1, 1]), tb.astensor([0, 1, 0, 1])) + ) == pytest.approx([1.0, 0.3678794503211975, 0.0, 0.3678794503211975]) + assert tb.tolist( + tb.poisson_logpdf(tb.astensor([0, 0, 1, 1]), tb.astensor([0, 1, 0, 1])) + ) == pytest.approx( + np.log([1.0, 0.3678794503211975, 0.0, 0.3678794503211975]).tolist() ) - ) == pytest.approx( - [ - np.nan, - np.nan, - np.nan, - np.nan, - -0.91893853, - -1.41893853, - -1.41893853, - -0.91893853, - ], - nan_ok=True, - ) - # Allow poisson(lambda=0) under limit Poisson(n = 0 | lambda -> 0) = 1 - assert tb.tolist( - tb.poisson(tb.astensor([0, 0, 1, 1]), tb.astensor([0, 1, 0, 1])) - ) == pytest.approx([1.0, 0.3678794503211975, 0.0, 0.3678794503211975]) - assert tb.tolist( - tb.poisson_logpdf(tb.astensor([0, 0, 1, 1]), tb.astensor([0, 1, 0, 1])) - ) == pytest.approx( - np.log([1.0, 0.3678794503211975, 0.0, 0.3678794503211975]).tolist() - ) # Ensure continuous approximation is valid assert tb.tolist( @@ -343,11 +345,12 @@ def test_pdf_calculations_pytorch(backend): assert tb.tolist( tb.poisson(tb.astensor([0, 0, 1, 1]), tb.astensor([0, 1, 0, 1])) ) == pytest.approx([1.0, 0.3678794503211975, 0.0, 0.3678794503211975]) - assert tb.tolist( - tb.poisson_logpdf(tb.astensor([0, 0, 1, 1]), tb.astensor([0, 1, 0, 1])) - ) == pytest.approx( - np.log([1.0, 0.3678794503211975, 0.0, 0.3678794503211975]).tolist() - ) + with pytest.warns(RuntimeWarning, match="divide by zero encountered in log"): + assert tb.tolist( + tb.poisson_logpdf(tb.astensor([0, 0, 1, 1]), tb.astensor([0, 1, 0, 1])) + ) == pytest.approx( + np.log([1.0, 0.3678794503211975, 0.0, 0.3678794503211975]).tolist() + ) # Ensure continuous approximation is valid assert tb.tolist(