forked from openml/openml-python
-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathtest_utils.py
More file actions
91 lines (68 loc) · 3.57 KB
/
test_utils.py
File metadata and controls
91 lines (68 loc) · 3.57 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
from openml.testing import TestBase
import numpy as np
import openml
import sys
if sys.version_info[0] >= 3:
from unittest import mock
else:
import mock
class OpenMLTaskTest(TestBase):
_multiprocess_can_split_ = True
_batch_size = 25
def mocked_perform_api_call(call, request_method):
# TODO: JvR: Why is this not a staticmethod?
url = openml.config.server + '/' + call
return openml._api_calls._download_text_file(url)
def test_list_all(self):
openml.utils._list_all(listing_call=openml.tasks.functions._list_tasks)
@mock.patch('openml._api_calls._perform_api_call',
side_effect=mocked_perform_api_call)
def test_list_all_few_results_available(self, _perform_api_call):
# we want to make sure that the number of api calls is only 1.
# Although we have multiple versions of the iris dataset, there is only
# one with this name/version combination
datasets = openml.datasets.list_datasets(size=1000,
data_name='iris',
data_version=1)
self.assertEqual(len(datasets), 1)
self.assertEqual(_perform_api_call.call_count, 1)
def test_list_all_for_datasets(self):
required_size = 127 # default test server reset value
datasets = openml.datasets.list_datasets(batch_size=self._batch_size, size=required_size)
self.assertEqual(len(datasets), required_size)
for did in datasets:
self._check_dataset(datasets[did])
def test_list_datasets_with_high_size_parameter(self):
# Testing on prod since concurrent deletion of uploded datasets make the test fail
openml.config.server = self.production_server
datasets_a = openml.datasets.list_datasets()
datasets_b = openml.datasets.list_datasets(size=np.inf)
# Reverting to test server
openml.config.server = self.test_server
self.assertEqual(len(datasets_a), len(datasets_b))
def test_list_all_for_tasks(self):
required_size = 1068 # default test server reset value
tasks = openml.tasks.list_tasks(batch_size=self._batch_size, size=required_size)
self.assertEqual(len(tasks), required_size)
def test_list_all_for_flows(self):
required_size = 15 # default test server reset value
flows = openml.flows.list_flows(batch_size=self._batch_size, size=required_size)
self.assertEqual(len(flows), required_size)
def test_list_all_for_setups(self):
required_size = 50
# TODO apparently list_setups function does not support kwargs
setups = openml.setups.list_setups(size=required_size)
# might not be on test server after reset, please rerun test at least once if fails
self.assertEqual(len(setups), required_size)
def test_list_all_for_runs(self):
required_size = 21
runs = openml.runs.list_runs(batch_size=self._batch_size, size=required_size)
# might not be on test server after reset, please rerun test at least once if fails
self.assertEqual(len(runs), required_size)
def test_list_all_for_evaluations(self):
required_size = 22
# TODO apparently list_evaluations function does not support kwargs
evaluations = openml.evaluations.list_evaluations(function='predictive_accuracy',
size=required_size)
# might not be on test server after reset, please rerun test at least once if fails
self.assertEqual(len(evaluations), required_size)