diff --git a/src/gbm/gblinear_model.cc b/src/gbm/gblinear_model.cc index 7e702493352e..3a86cc07be07 100644 --- a/src/gbm/gblinear_model.cc +++ b/src/gbm/gblinear_model.cc @@ -18,6 +18,7 @@ void GBLinearModel::SaveModel(Json* p_out) const { j_weights[i] = weight[i]; } out["weights"] = std::move(j_weights); + out["boosted_rounds"] = Json{this->num_boosted_rounds}; } void GBLinearModel::LoadModel(Json const& in) { @@ -27,6 +28,13 @@ void GBLinearModel::LoadModel(Json const& in) { for (size_t i = 0; i < n_weights; ++i) { weight[i] = get(j_weights[i]); } + auto const& obj = get(in); + auto boosted_rounds = obj.find("boosted_rounds"); + if (boosted_rounds != obj.cend()) { + this->num_boosted_rounds = get(boosted_rounds->second); + } else { + this->num_boosted_rounds = 0; + } } DMLC_REGISTER_PARAMETER(DeprecatedGBLinearModelParam); diff --git a/tests/python-gpu/test_gpu_linear.py b/tests/python-gpu/test_gpu_linear.py index 262a4eb95153..e8ec23b72923 100644 --- a/tests/python-gpu/test_gpu_linear.py +++ b/tests/python-gpu/test_gpu_linear.py @@ -1,7 +1,6 @@ import sys -from hypothesis import strategies, given, settings, assume +from hypothesis import strategies, given, settings, assume, note import pytest -import numpy import xgboost as xgb sys.path.append("tests/python") import testing as tm @@ -17,10 +16,14 @@ 'top_k': strategies.integers(1, 10), }) + def train_result(param, dmat, num_rounds): result = {} - xgb.train(param, dmat, num_rounds, [(dmat, 'train')], verbose_eval=False, - evals_result=result) + booster = xgb.train( + param, dmat, num_rounds, [(dmat, 'train')], verbose_eval=False, + evals_result=result + ) + assert booster.num_boosted_rounds() == num_rounds return result @@ -33,6 +36,7 @@ def test_gpu_coordinate(self, param, num_rounds, dataset): param['updater'] = 'gpu_coord_descent' param = dataset.set_params(param) result = train_result(param, dataset.get_dmat(), num_rounds)['train'][dataset.metric] + note(result) assert tm.non_increasing(result) # Loss is not guaranteed to always decrease because of regularisation parameters @@ -49,6 +53,7 @@ def test_gpu_coordinate_regularised(self, param, num_rounds, dataset, alpha, lam param['lambda'] = lambd param = dataset.set_params(param) result = train_result(param, dataset.get_dmat(), num_rounds)['train'][dataset.metric] + note(result) assert tm.non_increasing([result[0], result[-1]]) @pytest.mark.skipif(**tm.no_cupy()) diff --git a/tests/python/test_linear.py b/tests/python/test_linear.py index 5e1f6b8e914d..2ed5eea2ff34 100644 --- a/tests/python/test_linear.py +++ b/tests/python/test_linear.py @@ -32,6 +32,7 @@ def test_coordinate(self, param, num_rounds, dataset, coord_param): param.update(coord_param) param = dataset.set_params(param) result = train_result(param, dataset.get_dmat(), num_rounds)['train'][dataset.metric] + note(result) assert tm.non_increasing(result, 5e-4) # Loss is not guaranteed to always decrease because of regularisation parameters @@ -48,6 +49,7 @@ def test_coordinate_regularised(self, param, num_rounds, dataset, coord_param, a param.update(coord_param) param = dataset.set_params(param) result = train_result(param, dataset.get_dmat(), num_rounds)['train'][dataset.metric] + note(result) assert tm.non_increasing([result[0], result[-1]]) @given(parameter_strategy, strategies.integers(10, 50), @@ -57,6 +59,7 @@ def test_shotgun(self, param, num_rounds, dataset): param['updater'] = 'shotgun' param = dataset.set_params(param) result = train_result(param, dataset.get_dmat(), num_rounds)['train'][dataset.metric] + note(result) # shotgun is non-deterministic, so we relax the test by only using first and last # iteration. if len(result) > 2: @@ -75,4 +78,5 @@ def test_shotgun_regularised(self, param, num_rounds, dataset, alpha, lambd): param['lambda'] = lambd param = dataset.set_params(param) result = train_result(param, dataset.get_dmat(), num_rounds)['train'][dataset.metric] + note(result) assert tm.non_increasing([result[0], result[-1]])