Skip to content
Merged
Show file tree
Hide file tree
Changes from 1 commit
Commits
Show all changes
36 commits
Select commit Hold shift + click to select a range
24faeb7
bump to 0.11.1dev to continue developing (#971)
mfeurer Oct 25, 2020
e84cdf9
update home page example to numerical dataset (pendigits) (#976)
a-moadel Oct 26, 2020
07e87ad
Speed up tests (#977)
PGijsbers Oct 29, 2020
4923e5b
Additional fixes to PR 777 (#967)
Neeratyoy Oct 29, 2020
f2af798
Improving the performance of check_datasets_active (#980)
ArlindKadra Oct 29, 2020
756e747
Add CI through Github Actions (#975)
PGijsbers Oct 29, 2020
3132dac
add validation for ignore_attributes and default_target_attribute at …
a-moadel Oct 29, 2020
6afc880
Updated the way 'image features' are stored, updated old unit tests, …
ArlindKadra Oct 29, 2020
5b6de8a
Retry on database error to reduce number of test failures (#984)
mfeurer Oct 30, 2020
63ec0ae
Transition other Travis jobs to Github Actions (#988)
PGijsbers Nov 2, 2020
9a3a6dd
update progress file (#991)
a-moadel Nov 2, 2020
81cc423
docs: add a-moadel as a contributor (#992)
allcontributors[bot] Nov 2, 2020
51eaff6
docs: add Neeratyoy as a contributor (#998)
allcontributors[bot] Nov 2, 2020
a629562
Improve unit tests (#985)
mfeurer Nov 3, 2020
accde88
Warning if fitted sklearn model being used (#989)
Neeratyoy Nov 3, 2020
560e952
Cache dataset features and qualities as pickle (#979)
mfeurer Nov 3, 2020
5d5a48e
Update string formatting (#1001)
PGijsbers Nov 17, 2020
16799ad
Specify encoding for README file (#1004)
PGijsbers Nov 18, 2020
fba6aab
Making some unit tests work (#1000)
Neeratyoy Dec 24, 2020
e074c14
Refactor data loading/storing (#1018)
PGijsbers Jan 19, 2021
ab793a6
Adding helper functions to support ColumnTransformer (#982)
Neeratyoy Jan 28, 2021
47cda65
Rework local openml directory (#987)
mfeurer Feb 10, 2021
80ae046
Feature/give possibility to not download the dataset qualities (#1017)
a-moadel Feb 11, 2021
d2945ba
Adding sklearn 0.24 support (#1016)
Neeratyoy Feb 11, 2021
3c680c1
improve path detection (#1021)
mfeurer Feb 12, 2021
7553281
Removing flaky decorator for study unit test (#1024)
Neeratyoy Feb 16, 2021
ff7a251
Adding sklearn min. dependencies for all versions (#1022)
Neeratyoy Feb 18, 2021
4ff66ed
Parallel evaluation of tasks (#1020)
Neeratyoy Feb 18, 2021
38f9bf0
Parquet Support (#1029)
PGijsbers Mar 4, 2021
6c609b8
API for topics (#1023)
sahithyaravi Mar 9, 2021
4aec00a
Remove nan-likes from category header (#1037)
PGijsbers Mar 12, 2021
f94672e
Measuring runtimes (#1031)
Neeratyoy Mar 12, 2021
bd8ae14
Fix 1013: Store run `setup_string` (#1015)
PGijsbers Mar 25, 2021
11e6235
Fix #1033: skip two unit tests on Windows (#1040)
mfeurer Mar 26, 2021
d9037e7
bump version for new release (#1041)
mfeurer Mar 29, 2021
5511fa0
fix loky/concurrency issue (#1042)
mfeurer Mar 30, 2021
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Prev Previous commit
Next Next commit
Cache dataset features and qualities as pickle (#979)
* cache dataset features and qualities as pickle

* incorporate feedback

* Fix unit tests

* black, pep8 etc

* Remove unused imports

Co-authored-by: PGijsbers <p.gijsbers@tue.nl>
  • Loading branch information
mfeurer and PGijsbers authored Nov 3, 2020
commit 560e952bb11d9a3ff39271198b0c2667c476e5f7
11 changes: 10 additions & 1 deletion openml/datasets/data_feature.py
Original file line number Diff line number Diff line change
@@ -1,5 +1,7 @@
# License: BSD 3-Clause

from typing import List


class OpenMLDataFeature(object):
"""
Expand All @@ -20,7 +22,14 @@ class OpenMLDataFeature(object):

LEGAL_DATA_TYPES = ["nominal", "numeric", "string", "date"]

def __init__(self, index, name, data_type, nominal_values, number_missing_values):
def __init__(
self,
index: int,
name: str,
data_type: str,
nominal_values: List[str],
number_missing_values: int,
):
if type(index) != int:
raise ValueError("Index is of wrong datatype")
if data_type not in self.LEGAL_DATA_TYPES:
Expand Down
143 changes: 96 additions & 47 deletions openml/datasets/dataset.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,6 @@
from collections import OrderedDict
import re
import gzip
import io
import logging
import os
import pickle
Expand All @@ -13,6 +12,7 @@
import numpy as np
import pandas as pd
import scipy.sparse
import xmltodict

from openml.base import OpenMLBase
from .data_feature import OpenMLDataFeature
Expand Down Expand Up @@ -125,8 +125,8 @@ def __init__(
update_comment=None,
md5_checksum=None,
data_file=None,
features=None,
qualities=None,
features_file: Optional[str] = None,
qualities_file: Optional[str] = None,
dataset=None,
):
def find_invalid_characters(string, pattern):
Expand Down Expand Up @@ -188,7 +188,7 @@ def find_invalid_characters(string, pattern):
self.default_target_attribute = default_target_attribute
self.row_id_attribute = row_id_attribute
if isinstance(ignore_attribute, str):
self.ignore_attribute = [ignore_attribute]
self.ignore_attribute = [ignore_attribute] # type: Optional[List[str]]
elif isinstance(ignore_attribute, list) or ignore_attribute is None:
self.ignore_attribute = ignore_attribute
else:
Expand All @@ -202,33 +202,25 @@ def find_invalid_characters(string, pattern):
self.update_comment = update_comment
self.md5_checksum = md5_checksum
self.data_file = data_file
self.features = None
self.qualities = None
self._dataset = dataset

if features is not None:
self.features = {}
for idx, xmlfeature in enumerate(features["oml:feature"]):
nr_missing = xmlfeature.get("oml:number_of_missing_values", 0)
feature = OpenMLDataFeature(
int(xmlfeature["oml:index"]),
xmlfeature["oml:name"],
xmlfeature["oml:data_type"],
xmlfeature.get("oml:nominal_value"),
int(nr_missing),
)
if idx != feature.index:
raise ValueError("Data features not provided " "in right order")
self.features[feature.index] = feature
if features_file is not None:
self.features = _read_features(
features_file
) # type: Optional[Dict[int, OpenMLDataFeature]]
else:
self.features = None

self.qualities = _check_qualities(qualities)
if qualities_file:
self.qualities = _read_qualities(qualities_file) # type: Optional[Dict[str, float]]
else:
self.qualities = None

if data_file is not None:
(
self.data_pickle_file,
self.data_feather_file,
self.feather_attribute_file,
) = self._create_pickle_in_cache(data_file)
rval = self._create_pickle_in_cache(data_file)
self.data_pickle_file = rval[0] # type: Optional[str]
self.data_feather_file = rval[1] # type: Optional[str]
self.feather_attribute_file = rval[2] # type: Optional[str]
else:
self.data_pickle_file, self.data_feather_file, self.feather_attribute_file = (
None,
Expand Down Expand Up @@ -357,7 +349,7 @@ def decode_arff(fh):
with gzip.open(filename) as fh:
return decode_arff(fh)
else:
with io.open(filename, encoding="utf8") as fh:
with open(filename, encoding="utf8") as fh:
return decode_arff(fh)

def _parse_data_from_arff(
Expand Down Expand Up @@ -405,12 +397,10 @@ def _parse_data_from_arff(
# can be encoded into integers
pd.factorize(type_)[0]
except ValueError:
raise ValueError(
"Categorical data needs to be numeric when " "using sparse ARFF."
)
raise ValueError("Categorical data needs to be numeric when using sparse ARFF.")
# string can only be supported with pandas DataFrame
elif type_ == "STRING" and self.format.lower() == "sparse_arff":
raise ValueError("Dataset containing strings is not supported " "with sparse ARFF.")
raise ValueError("Dataset containing strings is not supported with sparse ARFF.")

# infer the dtype from the ARFF header
if isinstance(type_, list):
Expand Down Expand Up @@ -743,7 +733,7 @@ def get_data(
to_exclude.extend(self.ignore_attribute)

if len(to_exclude) > 0:
logger.info("Going to remove the following attributes:" " %s" % to_exclude)
logger.info("Going to remove the following attributes: %s" % to_exclude)
keep = np.array(
[True if column not in to_exclude else False for column in attribute_names]
)
Expand Down Expand Up @@ -810,6 +800,10 @@ def retrieve_class_labels(self, target_name: str = "class") -> Union[None, List[
-------
list
"""
if self.features is None:
raise ValueError(
"retrieve_class_labels can only be called if feature information is available."
)
for feature in self.features.values():
if (feature.name == target_name) and (feature.data_type == "nominal"):
return feature.nominal_values
Expand Down Expand Up @@ -938,18 +932,73 @@ def _to_dict(self) -> "OrderedDict[str, OrderedDict]":
return data_container


def _check_qualities(qualities):
if qualities is not None:
qualities_ = {}
for xmlquality in qualities:
name = xmlquality["oml:name"]
if xmlquality.get("oml:value", None) is None:
value = float("NaN")
elif xmlquality["oml:value"] == "null":
value = float("NaN")
else:
value = float(xmlquality["oml:value"])
qualities_[name] = value
return qualities_
else:
return None
def _read_features(features_file: str) -> Dict[int, OpenMLDataFeature]:
features_pickle_file = _get_features_pickle_file(features_file)
try:
with open(features_pickle_file, "rb") as fh_binary:
features = pickle.load(fh_binary)
except: # noqa E722
with open(features_file, encoding="utf8") as fh:
features_xml_string = fh.read()
xml_dict = xmltodict.parse(
features_xml_string, force_list=("oml:feature", "oml:nominal_value")
)
features_xml = xml_dict["oml:data_features"]

features = {}
for idx, xmlfeature in enumerate(features_xml["oml:feature"]):
nr_missing = xmlfeature.get("oml:number_of_missing_values", 0)
feature = OpenMLDataFeature(
int(xmlfeature["oml:index"]),
xmlfeature["oml:name"],
xmlfeature["oml:data_type"],
xmlfeature.get("oml:nominal_value"),
int(nr_missing),
)
if idx != feature.index:
raise ValueError("Data features not provided in right order")
features[feature.index] = feature

with open(features_pickle_file, "wb") as fh_binary:
pickle.dump(features, fh_binary)
return features


def _get_features_pickle_file(features_file: str) -> str:
"""This function only exists so it can be mocked during unit testing"""
return features_file + ".pkl"


def _read_qualities(qualities_file: str) -> Dict[str, float]:
qualities_pickle_file = _get_qualities_pickle_file(qualities_file)
try:
with open(qualities_pickle_file, "rb") as fh_binary:
qualities = pickle.load(fh_binary)
except: # noqa E722
with open(qualities_file, encoding="utf8") as fh:
qualities_xml = fh.read()
xml_as_dict = xmltodict.parse(qualities_xml, force_list=("oml:quality",))
qualities = xml_as_dict["oml:data_qualities"]["oml:quality"]
qualities = _check_qualities(qualities)
with open(qualities_pickle_file, "wb") as fh_binary:
pickle.dump(qualities, fh_binary)
return qualities


def _get_qualities_pickle_file(qualities_file: str) -> str:
"""This function only exists so it can be mocked during unit testing"""
return qualities_file + ".pkl"


def _check_qualities(qualities: List[Dict[str, str]]) -> Dict[str, float]:
qualities_ = {}
for xmlquality in qualities:
name = xmlquality["oml:name"]
if xmlquality.get("oml:value", None) is None:
value = float("NaN")
elif xmlquality["oml:value"] == "null":
value = float("NaN")
else:
value = float(xmlquality["oml:value"])
qualities_[name] = value
return qualities_
Loading