Skip to content
Merged
Show file tree
Hide file tree
Changes from 1 commit
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Next Next commit
Preliminary changes
  • Loading branch information
Neeratyoy committed Jul 27, 2020
commit 46c25fdfdfac32eb44d7bcd1683d31176a7e75f9
77 changes: 55 additions & 22 deletions tests/test_flows/test_flow.py
Original file line number Diff line number Diff line change
Expand Up @@ -305,15 +305,27 @@ def test_publish_error(self, api_call_mock, flow_exists_mock, get_flow_mock):
"collected from {}: {}".format(__file__.split("/")[-1], flow.flow_id)
)

fixture = (
"The flow on the server is inconsistent with the local flow. "
"The server flow ID is 1. Please check manually and remove "
"the flow if necessary! Error is:\n"
"'Flow sklearn.ensemble.forest.RandomForestClassifier: "
"values for attribute 'name' differ: "
"'sklearn.ensemble.forest.RandomForestClassifier'"
"\nvs\n'sklearn.ensemble.forest.RandomForestClassifie'.'"
)
if LooseVersion(sklearn.__version__) < "0.22":
fixture = (
"The flow on the server is inconsistent with the local flow. "
"The server flow ID is 1. Please check manually and remove "
"the flow if necessary! Error is:\n"
"'Flow sklearn.ensemble.forest.RandomForestClassifier: "
"values for attribute 'name' differ: "
"'sklearn.ensemble.forest.RandomForestClassifier'"
"\nvs\n'sklearn.ensemble.forest.RandomForestClassifie'.'"
)
else:
# sklearn.ensemble.forest -> sklearn.ensemble._forest
fixture = (
"The flow on the server is inconsistent with the local flow. "
"The server flow ID is 1. Please check manually and remove "
"the flow if necessary! Error is:\n"
"'Flow sklearn.ensemble._forest.RandomForestClassifier: "
"values for attribute 'name' differ: "
"'sklearn.ensemble._forest.RandomForestClassifier'"
"\nvs\n'sklearn.ensemble._forest.RandomForestClassifie'.'"
)

self.assertEqual(context_manager.exception.args[0], fixture)
self.assertEqual(get_flow_mock.call_count, 2)
Expand Down Expand Up @@ -463,19 +475,40 @@ def test_sklearn_to_upload_to_flow(self):

# OneHotEncoder was moved to _encoders module in 0.20
module_name_encoder = "_encoders" if LooseVersion(sklearn.__version__) >= "0.20" else "data"
fixture_name = (
"%ssklearn.model_selection._search.RandomizedSearchCV("
"estimator=sklearn.pipeline.Pipeline("
"ohe=sklearn.preprocessing.%s.OneHotEncoder,"
"scaler=sklearn.preprocessing.data.StandardScaler,"
"fu=sklearn.pipeline.FeatureUnion("
"pca=sklearn.decomposition.truncated_svd.TruncatedSVD,"
"fs="
"sklearn.feature_selection.univariate_selection.SelectPercentile),"
"boosting=sklearn.ensemble.weight_boosting.AdaBoostClassifier("
"base_estimator=sklearn.tree.tree.DecisionTreeClassifier)))"
% (sentinel, module_name_encoder)
)
if LooseVersion(sklearn.__version__) < "0.22":
fixture_name = (
"%ssklearn.model_selection._search.RandomizedSearchCV("
"estimator=sklearn.pipeline.Pipeline("
"ohe=sklearn.preprocessing.%s.OneHotEncoder,"
"scaler=sklearn.preprocessing.data.StandardScaler,"
"fu=sklearn.pipeline.FeatureUnion("
"pca=sklearn.decomposition.truncated_svd.TruncatedSVD,"
"fs="
"sklearn.feature_selection.univariate_selection.SelectPercentile),"
"boosting=sklearn.ensemble.weight_boosting.AdaBoostClassifier("
"base_estimator=sklearn.tree.tree.DecisionTreeClassifier)))"
% (sentinel, module_name_encoder)
)
else:
# sklearn.sklearn.preprocessing.data -> sklearn.sklearn.preprocessing._data
# sklearn.sklearn.decomposition.truncated_svd -> sklearn.decomposition._truncated_svd
# sklearn.feature_selection.univariate_selection ->
# sklearn.feature_selection._univariate_selection
# sklearn.ensemble.weight_boosting -> sklearn.ensemble._weight_boosting
# sklearn.tree.tree.DecisionTree... -> sklearn.tree._classes.DecisionTree...
fixture_name = (
"%ssklearn.model_selection._search.RandomizedSearchCV("
"estimator=sklearn.pipeline.Pipeline("
"ohe=sklearn.preprocessing.%s.OneHotEncoder,"
"scaler=sklearn.preprocessing._data.StandardScaler,"
"fu=sklearn.pipeline.FeatureUnion("
"pca=sklearn.decomposition._truncated_svd.TruncatedSVD,"
"fs="
"sklearn.feature_selection._univariate_selection.SelectPercentile),"
"boosting=sklearn.ensemble._weight_boosting.AdaBoostClassifier("
"base_estimator=sklearn.tree._classes.DecisionTreeClassifier)))"
% (sentinel, module_name_encoder)
)
self.assertEqual(new_flow.name, fixture_name)
new_flow.model.fit(X, y)

Expand Down
10 changes: 8 additions & 2 deletions tests/test_runs/test_run_functions.py
Original file line number Diff line number Diff line change
Expand Up @@ -199,8 +199,11 @@ def _perform_run(
classes_without_random_state = [
"sklearn.model_selection._search.GridSearchCV",
"sklearn.pipeline.Pipeline",
"sklearn.linear_model.base.LinearRegression",
]
if LooseVersion(sklearn.__version__) < "0.22":
classes_without_random_state.append("sklearn.linear_model.base.LinearRegression")
else:
classes_without_random_state.append("sklearn.linear_model._base.LinearRegression")

def _remove_random_state(flow):
if "random_state" in flow.parameters:
Expand Down Expand Up @@ -779,10 +782,13 @@ def _test_local_evaluations(self, run):
(sklearn.metrics.cohen_kappa_score, {"weights": None}),
(sklearn.metrics.roc_auc_score, {}),
(sklearn.metrics.average_precision_score, {}),
(sklearn.metrics.jaccard_similarity_score, {}),
(sklearn.metrics.precision_score, {"average": "macro"}),
(sklearn.metrics.brier_score_loss, {}),
]
if LooseVersion(sklearn.__version__) < "0.23":
tests.append((sklearn.metrics.jaccard_similarity_score, {}))
else:
tests.append((sklearn.metrics.jaccard_score, {}))
for test_idx, test in enumerate(tests):
alt_scores = run.get_metric_fn(sklearn_fn=test[0], kwargs=test[1],)
self.assertEqual(len(alt_scores), 10)
Expand Down