Skip to content
Merged
Show file tree
Hide file tree
Changes from 1 commit
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Prev Previous commit
Next Next commit
Adding flaky reruns for unit tests
  • Loading branch information
Neeratyoy committed Mar 8, 2021
commit cd6053fcbe371e1cf5386d49e8152f6567f4f6cc
2 changes: 1 addition & 1 deletion setup.py
Original file line number Diff line number Diff line change
Expand Up @@ -68,7 +68,7 @@
"pyarrow",
"pre-commit",
"pytest-cov",
"mypy",
"pytest-rerunfailures" "mypy",
],
"examples": [
"matplotlib",
Expand Down
4 changes: 2 additions & 2 deletions tests/test_runs/test_run_functions.py
Original file line number Diff line number Diff line change
Expand Up @@ -1572,7 +1572,7 @@ def test_format_prediction_task_regression(self):
res = format_prediction(regression, *ignored_input)
self.assertListEqual(res, [0] * 5)

@pytest.mark.flaky() # appears to fail stochastically on test server
@pytest.mark.flaky(reruns=3, reruns_delay=2) # appears to fail stochastically on test server
@unittest.skipIf(
LooseVersion(sklearn.__version__) < "0.21",
reason="couldn't perform local tests successfully w/o bloating RAM",
Expand Down Expand Up @@ -1626,7 +1626,7 @@ def test__run_task_get_arffcontent_2(self, parallel_mock):
self.assertSequenceEqual(scores, expected_scores, seq_type=list)
set_loky_pickler()

@pytest.mark.flaky() # appears to fail stochastically on test server
@pytest.mark.flaky(reruns=2, reruns_delay=2) # appears to fail stochastically on test server
@unittest.skipIf(
LooseVersion(sklearn.__version__) < "0.21",
reason="couldn't perform local tests successfully w/o bloating RAM",
Expand Down
2 changes: 1 addition & 1 deletion tests/test_study/test_study_functions.py
Original file line number Diff line number Diff line change
Expand Up @@ -114,7 +114,7 @@ def test_publish_benchmark_suite(self):
self.assertEqual(study_downloaded.status, "deactivated")
# can't delete study, now it's not longer in preparation

@pytest.mark.flaky() # appears to fail stochastically on test server
@pytest.mark.flaky(reruns=5, reruns_delay=2)
def test_publish_study(self):
# get some random runs to attach
run_list = openml.evaluations.list_evaluations("predictive_accuracy", size=10)
Expand Down