summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorDebian Med Packaging Team <debian-med-packaging@lists.alioth.debian.org>2023-02-03 21:35:26 +0100
committerÉtienne Mollier <emollier@debian.org>2023-02-03 21:35:26 +0100
commit07dd462c05552ae2c48dd8ff419b6088663c3414 (patch)
treecc06aafd7c70427fd4875d8995bc8ea77660324b
parentc545752cabf5e705c298c7d930136b3dcaae7630 (diff)
Fix autopkgtest errors that were failing due to sklearn changed API and
assignment of multi-dimensiondal array to pandas Author: Mohammed Bilal <mdbilal@disroot.org> Last-Update: 2022-09-09 Gbp-Pq: Name fix-autopkgtest.patch
-rw-r--r--q2_sample_classifier/utilities.py8
1 files changed, 4 insertions, 4 deletions
diff --git a/q2_sample_classifier/utilities.py b/q2_sample_classifier/utilities.py
index 6f57456..d4e7477 100644
--- a/q2_sample_classifier/utilities.py
+++ b/q2_sample_classifier/utilities.py
@@ -253,7 +253,7 @@ def _extract_rfe_scores(rfecv):
for n in range(len(rfecv.grid_scores_)-1, -1, -1)]
if x[0] < 1:
x[0] = 1
- return pd.Series(rfecv.grid_scores_, index=x, name='Accuracy')
+ return pd.Series(rfecv.cv_results_['mean_test_score'], index=x, name='Accuracy')
def nested_cross_validation(table, metadata, cv, random_state, n_jobs,
@@ -512,13 +512,13 @@ def _extract_estimator_parameters(estimator):
# (drop pipeline params and individual base estimators)
estimator_params = {k: v for k, v in estimator.get_params().items() if
k.startswith('est__') and k != 'est__base_estimator'}
- return pd.Series(estimator_params, name='Parameter setting')
+ return pd.Series(list(estimator_params), name='Parameter setting')
def _summarize_estimator(output_dir, sample_estimator):
try:
rfep = _plot_RFE(
- x=sample_estimator.rfe_scores.index, y=sample_estimator.rfe_scores)
+ x=sample_estimator.rfe_scores.index, y=np.stack(sample_estimator.rfe_scores.values))
rfep.savefig(join(output_dir, 'rfe_plot.png'))
rfep.savefig(join(output_dir, 'rfe_plot.pdf'))
plt.close('all')
@@ -807,7 +807,7 @@ def _train_adaboost_base_estimator(table, metadata, column, n_estimators,
return Pipeline(
[('dv', base_estimator.named_steps.dv),
('est', adaboost_estimator(base_estimator.named_steps.est,
- n_estimators, random_state=random_state))])
+ n_estimators=n_estimators, random_state=random_state))])
def _disable_feature_selection(estimator, optimize_feature_selection):