Skip to content
Merged
Show file tree
Hide file tree
Changes from 1 commit
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Prev Previous commit
Next Next commit
test: Ensure context fixtures are session scoped
  • Loading branch information
eddiebergman committed Oct 17, 2024
commit c95ef39bae2a8168347fdeee8483ac713dc82f87
6 changes: 3 additions & 3 deletions openml/config.py
Original file line number Diff line number Diff line change
Expand Up @@ -175,11 +175,11 @@ def get_server_base_url() -> str:
apikey: str = _defaults["apikey"]
show_progress: bool = _defaults["show_progress"]
# The current cache directory (without the server name)
_root_cache_directory = Path(_defaults["cachedir"])
_root_cache_directory: Path = Path(_defaults["cachedir"])
avoid_duplicate_runs = _defaults["avoid_duplicate_runs"]

retry_policy = _defaults["retry_policy"]
connection_n_retries = _defaults["connection_n_retries"]
retry_policy: Literal["human", "robot"] = _defaults["retry_policy"]
connection_n_retries: int = _defaults["connection_n_retries"]


def set_retry_policy(value: Literal["human", "robot"], n_retries: int | None = None) -> None:
Expand Down
2 changes: 2 additions & 0 deletions openml/evaluations/functions.py
Original file line number Diff line number Diff line change
Expand Up @@ -461,10 +461,12 @@ def list_evaluations_setups(
)
else:
parameters.append({})
print(parameters) # noqa: T201
Comment thread
LennartPurucker marked this conversation as resolved.
Outdated
setup_data["parameters"] = parameters
# Merge setups with evaluations
_df = evals.merge(setup_data, on="setup_id", how="left")

print(_df) # noqa: T201
Comment thread
LennartPurucker marked this conversation as resolved.
Outdated
if parameters_in_separate_columns:
_df = pd.concat(
[_df.drop("parameters", axis=1), _df["parameters"].apply(pd.Series)],
Expand Down
17 changes: 9 additions & 8 deletions tests/conftest.py
Original file line number Diff line number Diff line change
Expand Up @@ -23,6 +23,7 @@
# License: BSD 3-Clause
from __future__ import annotations

from collections.abc import Iterator
import logging
import os
import shutil
Expand Down Expand Up @@ -228,41 +229,41 @@ def _expected_static_cache_state(root_dir: Path) -> list[Path]:

def assert_static_test_cache_correct(root_dir: Path) -> None:
for p in _expected_static_cache_state(root_dir):
assert p.exists(), f"Expected path {p} does not exist"
assert p.exists(), f"Expected path {p} exists"


@pytest.fixture(scope="class")
def long_version(request):
request.cls.long_version = request.config.getoption("--long")


@pytest.fixture()
@pytest.fixture(scope="session")
def test_files_directory() -> Path:
return Path(__file__).parent / "files"


@pytest.fixture()
@pytest.fixture(scope="session")
def test_api_key() -> str:
return "c0c42819af31e706efe1f4b88c23c6c1"


@pytest.fixture(autouse=True)
def verify_cache_state(test_files_directory) -> None:
@pytest.fixture(autouse=True, scope="function")
def verify_cache_state(test_files_directory) -> Iterator[None]:
assert_static_test_cache_correct(test_files_directory)
yield
assert_static_test_cache_correct(test_files_directory)


@pytest.fixture(autouse=True)
def as_robot():
@pytest.fixture(autouse=True, scope="session")
def as_robot() -> Iterator[None]:
policy = openml.config.retry_policy
n_retries = openml.config.connection_n_retries
openml.config.set_retry_policy("robot", n_retries=20)
yield
openml.config.set_retry_policy(policy, n_retries)


@pytest.fixture(autouse=True)
@pytest.fixture(autouse=True, scope="session")
def with_test_server():
openml.config.start_using_configuration_for_example()
yield
Expand Down
66 changes: 39 additions & 27 deletions tests/test_evaluations/test_evaluations_example.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,35 +3,47 @@

import unittest

from openml.config import overwrite_config_context


class TestEvaluationsExample(unittest.TestCase):
def test_example_python_paper(self):
# Example script which will appear in the upcoming OpenML-Python paper
# This test ensures that the example will keep running!

import matplotlib.pyplot as plt
import numpy as np

import openml

df = openml.evaluations.list_evaluations_setups(
"predictive_accuracy",
flows=[8353],
tasks=[6],
output_format="dataframe",
parameters_in_separate_columns=True,
) # Choose an SVM flow, for example 8353, and a task.

hp_names = ["sklearn.svm.classes.SVC(16)_C", "sklearn.svm.classes.SVC(16)_gamma"]
df[hp_names] = df[hp_names].astype(float).apply(np.log)
C, gamma, score = df[hp_names[0]], df[hp_names[1]], df["value"]

cntr = plt.tricontourf(C, gamma, score, levels=12, cmap="RdBu_r")
plt.colorbar(cntr, label="accuracy")
plt.xlim((min(C), max(C)))
plt.ylim((min(gamma), max(gamma)))
plt.xlabel("C (log10)", size=16)
plt.ylabel("gamma (log10)", size=16)
plt.title("SVM performance landscape", size=20)

plt.tight_layout()
with overwrite_config_context(
{
"server": "https://www.openml.org/api/v1/xml",
"apikey": None,
}
):
import matplotlib.pyplot as plt
import numpy as np

import openml

df = openml.evaluations.list_evaluations_setups(
"predictive_accuracy",
flows=[8353],
tasks=[6],
output_format="dataframe",
parameters_in_separate_columns=True,
) # Choose an SVM flow, for example 8353, and a task.

assert len(df) > 0, (
"No evaluation found for flow 8353 on task 6, could "
"be that this task is not available on the test server."
)

hp_names = ["sklearn.svm.classes.SVC(16)_C", "sklearn.svm.classes.SVC(16)_gamma"]
df[hp_names] = df[hp_names].astype(float).apply(np.log)
C, gamma, score = df[hp_names[0]], df[hp_names[1]], df["value"]

cntr = plt.tricontourf(C, gamma, score, levels=12, cmap="RdBu_r")
plt.colorbar(cntr, label="accuracy")
plt.xlim((min(C), max(C)))
plt.ylim((min(gamma), max(gamma)))
plt.xlabel("C (log10)", size=16)
plt.ylabel("gamma (log10)", size=16)
plt.title("SVM performance landscape", size=20)

plt.tight_layout()