Skip to content

Commit

Permalink
BREAKING_CHANGE(xgboost): stricter checks on eval_metric
Browse files Browse the repository at this point in the history
  • Loading branch information
sebffischer authored Aug 17, 2024
2 parents ad78250 + fe9af67 commit 254488b
Show file tree
Hide file tree
Showing 5 changed files with 44 additions and 2 deletions.
4 changes: 4 additions & 0 deletions NEWS.md
Original file line number Diff line number Diff line change
Expand Up @@ -3,6 +3,10 @@
* bugfix: validation for learner `lrn("regr.xgboost")` now works properly. Previously the training data was used.
* feat: add weights for logistic regression again, which were incorrectlu removed
in a previous release (#265)
* BREAKING_CHANGE: When using internal tuning for xgboost learners, the `eval_metric` must now be set.
This achieves that one needs to make the conscious decision which performance metric to use for
early stopping.


# mlr3learners 0.7.0

Expand Down
3 changes: 3 additions & 0 deletions R/LearnerClassifXgboost.R
Original file line number Diff line number Diff line change
Expand Up @@ -83,6 +83,9 @@ LearnerClassifXgboost = R6Class("LearnerClassifXgboost",
if (is.null(param_vals$early_stopping_rounds)) {
stop("Parameter 'early_stopping_rounds' must be set to use internal tuning.")
}
if (is.null(param_vals$eval_metric)) {
stop("Parameter 'eval_metric' must be set explicitly when using internal tuning.")
}
assert_integerish(domain$upper, len = 1L, any.missing = FALSE) }, .parent = topenv()),
disable_in_tune = list(early_stopping_rounds = NULL)
)
Expand Down
3 changes: 3 additions & 0 deletions R/LearnerRegrXgboost.R
Original file line number Diff line number Diff line change
Expand Up @@ -62,6 +62,9 @@ LearnerRegrXgboost = R6Class("LearnerRegrXgboost",
if (is.null(param_vals$early_stopping_rounds)) {
stop("Parameter 'early_stopping_rounds' must be set to use internal tuning.")
}
if (is.null(param_vals$eval_metric)) {
stop("Parameter 'eval_metric' must be set explicitly when using internal tuning.")
}
assert_integerish(domain$upper, len = 1L, any.missing = FALSE) }, .parent = topenv()),
disable_in_tune = list(early_stopping_rounds = NULL)
)
Expand Down
18 changes: 17 additions & 1 deletion tests/testthat/test_classif_xgboost.R
Original file line number Diff line number Diff line change
Expand Up @@ -76,7 +76,6 @@ test_that("validation and inner tuning", {
expect_named(learner$model$evaluation_log, c("iter", "test_logloss"))
expect_list(learner$internal_valid_scores, types = "numeric")
expect_equal(names(learner$internal_valid_scores), "logloss")
expect_equal(learner$internal_valid_scores$logloss, learner$model$evaluation_log[get("iter") == 10, "test_logloss"][[1L]])

expect_list(learner$internal_tuned_values, types = "integerish")
expect_equal(names(learner$internal_tuned_values), "nrounds")
Expand Down Expand Up @@ -123,4 +122,21 @@ test_that("validation and inner tuning", {
learner$train(task)
expect_equal(learner$internal_valid_scores$logloss, learner$model$evaluation_log$test_logloss[10L])
expect_true(is.null(learner$internal_tuned_values))

learner$param_set$set_values(
nrounds = to_tune(upper = 100, internal = TRUE),
early_stopping_rounds = 10
)
expect_error(
learner$param_set$convert_internal_search_space(learner$param_set$search_space()),
"eval_metric"
)

learner$param_set$set_values(
eval_metric = "logloss"
)
expect_error(
learner$param_set$convert_internal_search_space(learner$param_set$search_space()),
regexp = NA
)
})
18 changes: 17 additions & 1 deletion tests/testthat/test_regr_xgboost.R
Original file line number Diff line number Diff line change
Expand Up @@ -57,7 +57,6 @@ test_that("validation and inner tuning", {
expect_named(learner$model$evaluation_log, c("iter", "test_rmse"))
expect_list(learner$internal_valid_scores, types = "numeric")
expect_equal(names(learner$internal_valid_scores), "rmse")
expect_equal(learner$internal_valid_scores$rmse, learner$model$evaluation_log[get("iter") == 10, "test_rmse"][[1L]])

expect_list(learner$internal_tuned_values, types = "integerish")
expect_equal(names(learner$internal_tuned_values), "nrounds")
Expand Down Expand Up @@ -104,4 +103,21 @@ test_that("validation and inner tuning", {
learner$train(task)
expect_equal(learner$internal_valid_scores$rmse, learner$model$evaluation_log$test_rmse[10L])
expect_true(is.null(learner$internal_tuned_values))

learner$param_set$set_values(
nrounds = to_tune(upper = 100, internal = TRUE),
early_stopping_rounds = 10
)
expect_error(
learner$param_set$convert_internal_search_space(learner$param_set$search_space()),
"eval_metric"
)

learner$param_set$set_values(
eval_metric = "rmse"
)
expect_error(
learner$param_set$convert_internal_search_space(learner$param_set$search_space()),
regexp = NA
)
})

0 comments on commit 254488b

Please sign in to comment.