Skip to content

Commit

Permalink
[BP] Fix num_boosted_rounds for linear model. (#7538) (#7559)
Browse files Browse the repository at this point in the history
* Add note.

* Fix n boosted rounds.
  • Loading branch information
trivialfis committed Jan 13, 2022
1 parent 328d1e1 commit 1311a20
Show file tree
Hide file tree
Showing 3 changed files with 21 additions and 4 deletions.
8 changes: 8 additions & 0 deletions src/gbm/gblinear_model.cc
Expand Up @@ -18,6 +18,7 @@ void GBLinearModel::SaveModel(Json* p_out) const {
j_weights[i] = weight[i];
}
out["weights"] = std::move(j_weights);
out["boosted_rounds"] = Json{this->num_boosted_rounds};
}

void GBLinearModel::LoadModel(Json const& in) {
Expand All @@ -27,6 +28,13 @@ void GBLinearModel::LoadModel(Json const& in) {
for (size_t i = 0; i < n_weights; ++i) {
weight[i] = get<Number const>(j_weights[i]);
}
auto const& obj = get<Object const>(in);
auto boosted_rounds = obj.find("boosted_rounds");
if (boosted_rounds != obj.cend()) {
this->num_boosted_rounds = get<Integer const>(boosted_rounds->second);
} else {
this->num_boosted_rounds = 0;
}
}

DMLC_REGISTER_PARAMETER(DeprecatedGBLinearModelParam);
Expand Down
13 changes: 9 additions & 4 deletions tests/python-gpu/test_gpu_linear.py
@@ -1,7 +1,6 @@
import sys
from hypothesis import strategies, given, settings, assume
from hypothesis import strategies, given, settings, assume, note
import pytest
import numpy
import xgboost as xgb
sys.path.append("tests/python")
import testing as tm
Expand All @@ -17,10 +16,14 @@
'top_k': strategies.integers(1, 10),
})


def train_result(param, dmat, num_rounds):
result = {}
xgb.train(param, dmat, num_rounds, [(dmat, 'train')], verbose_eval=False,
evals_result=result)
booster = xgb.train(
param, dmat, num_rounds, [(dmat, 'train')], verbose_eval=False,
evals_result=result
)
assert booster.num_boosted_rounds() == num_rounds
return result


Expand All @@ -33,6 +36,7 @@ def test_gpu_coordinate(self, param, num_rounds, dataset):
param['updater'] = 'gpu_coord_descent'
param = dataset.set_params(param)
result = train_result(param, dataset.get_dmat(), num_rounds)['train'][dataset.metric]
note(result)
assert tm.non_increasing(result)

# Loss is not guaranteed to always decrease because of regularisation parameters
Expand All @@ -49,6 +53,7 @@ def test_gpu_coordinate_regularised(self, param, num_rounds, dataset, alpha, lam
param['lambda'] = lambd
param = dataset.set_params(param)
result = train_result(param, dataset.get_dmat(), num_rounds)['train'][dataset.metric]
note(result)
assert tm.non_increasing([result[0], result[-1]])

@pytest.mark.skipif(**tm.no_cupy())
Expand Down
4 changes: 4 additions & 0 deletions tests/python/test_linear.py
Expand Up @@ -32,6 +32,7 @@ def test_coordinate(self, param, num_rounds, dataset, coord_param):
param.update(coord_param)
param = dataset.set_params(param)
result = train_result(param, dataset.get_dmat(), num_rounds)['train'][dataset.metric]
note(result)
assert tm.non_increasing(result, 5e-4)

# Loss is not guaranteed to always decrease because of regularisation parameters
Expand All @@ -48,6 +49,7 @@ def test_coordinate_regularised(self, param, num_rounds, dataset, coord_param, a
param.update(coord_param)
param = dataset.set_params(param)
result = train_result(param, dataset.get_dmat(), num_rounds)['train'][dataset.metric]
note(result)
assert tm.non_increasing([result[0], result[-1]])

@given(parameter_strategy, strategies.integers(10, 50),
Expand All @@ -57,6 +59,7 @@ def test_shotgun(self, param, num_rounds, dataset):
param['updater'] = 'shotgun'
param = dataset.set_params(param)
result = train_result(param, dataset.get_dmat(), num_rounds)['train'][dataset.metric]
note(result)
# shotgun is non-deterministic, so we relax the test by only using first and last
# iteration.
if len(result) > 2:
Expand All @@ -75,4 +78,5 @@ def test_shotgun_regularised(self, param, num_rounds, dataset, alpha, lambd):
param['lambda'] = lambd
param = dataset.set_params(param)
result = train_result(param, dataset.get_dmat(), num_rounds)['train'][dataset.metric]
note(result)
assert tm.non_increasing([result[0], result[-1]])

0 comments on commit 1311a20

Please sign in to comment.