diff --git a/doc/parameter.rst b/doc/parameter.rst index 8833c6eb7150..837a7d1b0b7a 100644 --- a/doc/parameter.rst +++ b/doc/parameter.rst @@ -370,9 +370,11 @@ Specify the learning task and the corresponding learning objective. The objectiv - ``reg:gamma``: gamma regression with log-link. Output is a mean of gamma distribution. It might be useful, e.g., for modeling insurance claims severity, or for any outcome that might be `gamma-distributed `_. - ``reg:tweedie``: Tweedie regression with log-link. It might be useful, e.g., for modeling total loss in insurance, or for any outcome that might be `Tweedie-distributed `_. -* ``base_score`` [default=0.5] +* ``base_score`` - The initial prediction score of all instances, global bias + - The parameter is automatically estimated for selected objectives before training. To + disable the estimation, specify a real number argument. - For sufficient number of iterations, changing this value will not have too much effect. * ``eval_metric`` [default according to objective] diff --git a/include/xgboost/gbm.h b/include/xgboost/gbm.h index 9951f7778ca2..07dd823716a1 100644 --- a/include/xgboost/gbm.h +++ b/include/xgboost/gbm.h @@ -75,6 +75,11 @@ class GradientBooster : public Model, public Configurable { /*! \brief Return number of boosted rounds. */ virtual int32_t BoostedRounds() const = 0; + /** + * \brief Whether the model has already been trained. When tree booster is chosen, then + * returns true when there are existing trees. + */ + virtual bool ModelFitted() const = 0; /*! * \brief perform update to the model(boosting) * \param p_fmat feature matrix that provide access to features diff --git a/include/xgboost/learner.h b/include/xgboost/learner.h index 34ae5a4d53bb..6969c7d7dd48 100644 --- a/include/xgboost/learner.h +++ b/include/xgboost/learner.h @@ -328,7 +328,7 @@ struct LearnerModelParam { void Copy(LearnerModelParam const& that); /* \brief Whether this parameter is initialized with LearnerModelParamLegacy. */ - bool Initialized() const { return num_feature != 0; } + bool Initialized() const { return num_feature != 0 && num_output_group != 0; } }; } // namespace xgboost diff --git a/src/common/host_device_vector.cu b/src/common/host_device_vector.cu index 456c60a67071..00f19230dc7e 100644 --- a/src/common/host_device_vector.cu +++ b/src/common/host_device_vector.cu @@ -162,6 +162,10 @@ class HostDeviceVectorImpl { if (device_ >= 0) { LazySyncHost(GPUAccess::kNone); } + + if (device_ >= 0 && device >= 0) { + CHECK_EQ(device_, device) << "New device ordinal is different from previous one."; + } device_ = device; if (device_ >= 0) { LazyResizeDevice(data_h_.size()); diff --git a/src/common/linalg_op.h b/src/common/linalg_op.h index 0de173c8e73f..efb9cf300238 100644 --- a/src/common/linalg_op.h +++ b/src/common/linalg_op.h @@ -3,8 +3,8 @@ */ #ifndef XGBOOST_COMMON_LINALG_OP_H_ #define XGBOOST_COMMON_LINALG_OP_H_ -#include #include // std::int32_t +#include #include "common.h" #include "threading_utils.h" @@ -43,12 +43,12 @@ void ElementWiseKernelHost(linalg::TensorView t, int32_t n_threads, Fn&& f #if !defined(XGBOOST_USE_CUDA) template -void ElementWiseKernelDevice(linalg::TensorView t, Fn&& fn, void* s = nullptr) { +void ElementWiseKernelDevice(linalg::TensorView, Fn&&, void* = nullptr) { common::AssertGPUSupport(); } template -void ElementWiseTransformDevice(linalg::TensorView t, Fn&& fn, void* s = nullptr) { +void ElementWiseTransformDevice(linalg::TensorView, Fn&&, void* = nullptr) { common::AssertGPUSupport(); } diff --git a/src/gbm/gblinear.cc b/src/gbm/gblinear.cc index c8cdfeb476b1..2498865e9110 100644 --- a/src/gbm/gblinear.cc +++ b/src/gbm/gblinear.cc @@ -95,6 +95,8 @@ class GBLinear : public GradientBooster { return model_.num_boosted_rounds; } + bool ModelFitted() const override { return BoostedRounds() != 0; } + void Load(dmlc::Stream* fi) override { model_.Load(fi); } diff --git a/src/gbm/gbtree.h b/src/gbm/gbtree.h index ce82cb29629f..78224aba4408 100644 --- a/src/gbm/gbtree.h +++ b/src/gbm/gbtree.h @@ -252,6 +252,10 @@ class GBTree : public GradientBooster { return model_.trees.size() / this->LayerTrees(); } + bool ModelFitted() const override { + return !model_.trees.empty() || !model_.trees_to_update.empty(); + } + void PredictBatch(DMatrix *p_fmat, PredictionCacheEntry *out_preds, bool training, unsigned layer_begin, unsigned layer_end) override; diff --git a/src/learner.cc b/src/learner.cc index 4ba4f2f34a87..d2386b0062e9 100644 --- a/src/learner.cc +++ b/src/learner.cc @@ -12,6 +12,7 @@ #include #include +#include #include #include #include // std::numeric_limits @@ -27,7 +28,6 @@ #include "common/charconv.h" #include "common/common.h" #include "common/io.h" -#include "common/linalg_op.h" #include "common/observer.h" #include "common/random.h" #include "common/threading_utils.h" @@ -64,6 +64,15 @@ DECLARE_FIELD_ENUM_CLASS(xgboost::DataSplitMode); namespace xgboost { Learner::~Learner() = default; +namespace { +StringView ModelNotFitted() { return "Model is not yet initialized (not fitted)."; } + +template +T& UsePtr(T& ptr) { // NOLINT + CHECK(ptr); + return ptr; +} +} // anonymous namespace /*! \brief training parameter for regression * @@ -75,20 +84,28 @@ struct LearnerModelParamLegacy : public dmlc::Parameter /* \brief global bias */ bst_float base_score; /* \brief number of features */ - uint32_t num_feature; + bst_feature_t num_feature; /* \brief number of classes, if it is multi-class classification */ - int32_t num_class; + std::int32_t num_class; /*! \brief Model contain additional properties */ int32_t contain_extra_attrs; /*! \brief Model contain eval metrics */ int32_t contain_eval_metrics; /*! \brief the version of XGBoost. */ - uint32_t major_version; - uint32_t minor_version; + std::uint32_t major_version; + std::uint32_t minor_version; uint32_t num_target{1}; - - int32_t base_score_estimated{0}; + /** + * \brief Whether we should calculate the base score from training data. + * + * This is a private parameter as we can't expose it as boolean due to binary model + * format. Exposing it as integer creates inconsistency with other parameters. + * + * Automatically disabled when base_score is specifed by user. int32 is used instead + * of bool for the ease of serialization. + */ + std::int32_t boost_from_average{true}; /*! \brief reserved field */ int reserved[25]; /*! \brief constructor */ @@ -98,14 +115,14 @@ struct LearnerModelParamLegacy : public dmlc::Parameter num_target = 1; major_version = std::get<0>(Version::Self()); minor_version = std::get<1>(Version::Self()); - base_score_estimated = 0; + boost_from_average = true; static_assert(sizeof(LearnerModelParamLegacy) == 136, "Do not change the size of this struct, as it will break binary IO."); } // Skip other legacy fields. Json ToJson() const { - Object obj; + Json obj{Object{}}; char floats[NumericLimits::kToCharsSize]; auto ret = to_chars(floats, floats + NumericLimits::kToCharsSize, base_score); CHECK(ret.ec == std::errc{}); @@ -120,15 +137,19 @@ struct LearnerModelParamLegacy : public dmlc::Parameter ret = to_chars(integers, integers + NumericLimits::kToCharsSize, static_cast(num_class)); CHECK(ret.ec == std::errc()); - obj["num_class"] = - std::string{integers, static_cast(std::distance(integers, ret.ptr))}; + obj["num_class"] = std::string{integers, static_cast(std::distance(integers, ret.ptr))}; ret = to_chars(integers, integers + NumericLimits::kToCharsSize, static_cast(num_target)); obj["num_target"] = std::string{integers, static_cast(std::distance(integers, ret.ptr))}; - return Json(std::move(obj)); + ret = to_chars(integers, integers + NumericLimits::kToCharsSize, + static_cast(boost_from_average)); + obj["boost_from_average"] = + std::string{integers, static_cast(std::distance(integers, ret.ptr))}; + + return obj; } void FromJson(Json const& obj) { auto const& j_param = get(obj); @@ -139,13 +160,15 @@ struct LearnerModelParamLegacy : public dmlc::Parameter if (n_targets_it != j_param.cend()) { m["num_target"] = get(n_targets_it->second); } + auto bse_it = j_param.find("boost_from_average"); + if (bse_it != j_param.cend()) { + m["boost_from_average"] = get(bse_it->second); + } this->Init(m); std::string str = get(j_param.at("base_score")); from_chars(str.c_str(), str.c_str() + str.size(), base_score); - // It can only be estimated during the first training, we consider it estimated afterward - base_score_estimated = 1; } LearnerModelParamLegacy ByteSwap() const { @@ -158,7 +181,7 @@ struct LearnerModelParamLegacy : public dmlc::Parameter dmlc::ByteSwap(&x.major_version, sizeof(x.major_version), 1); dmlc::ByteSwap(&x.minor_version, sizeof(x.minor_version), 1); dmlc::ByteSwap(&x.num_target, sizeof(x.num_target), 1); - dmlc::ByteSwap(&x.base_score_estimated, sizeof(x.base_score_estimated), 1); + dmlc::ByteSwap(&x.boost_from_average, sizeof(x.boost_from_average), 1); dmlc::ByteSwap(x.reserved, sizeof(x.reserved[0]), sizeof(x.reserved) / sizeof(x.reserved[0])); return x; } @@ -166,14 +189,13 @@ struct LearnerModelParamLegacy : public dmlc::Parameter template Args UpdateAllowUnknown(Container const& kwargs) { // Detect whether user has made their own base score. - if (std::find_if(kwargs.cbegin(), kwargs.cend(), - [](auto const& kv) { return kv.first == "base_score"; }) != kwargs.cend()) { - base_score_estimated = true; - } - if (std::find_if(kwargs.cbegin(), kwargs.cend(), [](auto const& kv) { - return kv.first == "base_score_estimated"; - }) != kwargs.cend()) { - LOG(FATAL) << "`base_score_estimated` cannot be specified as hyper-parameter."; + auto find_key = [&kwargs](char const* key) { + return std::find_if(kwargs.cbegin(), kwargs.cend(), + [key](auto const& kv) { return kv.first == key; }); + }; + auto it = find_key("base_score"); + if (it != kwargs.cend()) { + boost_from_average = false; } return dmlc::Parameter::UpdateAllowUnknown(kwargs); } @@ -195,7 +217,9 @@ struct LearnerModelParamLegacy : public dmlc::Parameter .set_default(1) .set_lower_bound(1) .describe("Number of target for multi-target regression."); - DMLC_DECLARE_FIELD(base_score_estimated).set_default(0); + DMLC_DECLARE_FIELD(boost_from_average) + .set_default(true) + .describe("Whether we should calculate the base score from training data."); } }; @@ -224,7 +248,7 @@ LearnerModelParam::LearnerModelParam(Context const* ctx, LearnerModelParamLegacy linalg::TensorView LearnerModelParam::BaseScore(int32_t device) const { // multi-class is not yet supported. - CHECK_EQ(base_score_.Size(), 1); + CHECK_EQ(base_score_.Size(), 1) << ModelNotFitted(); if (device == Context::kCpuId) { // Make sure that we won't run into race condition. CHECK(base_score_.Data()->HostCanRead()); @@ -385,6 +409,21 @@ class LearnerConfiguration : public Learner { // Initial prediction. std::vector metric_names_; + void ConfigureModelParamWithoutBaseScore() { + // Convert mparam to learner_model_param + this->ConfigureTargets(); + + auto task = UsePtr(obj_)->Task(); + linalg::Tensor base_score({1}, Ctx()->gpu_id); + auto h_base_score = base_score.HostView(); + + // transform to margin + h_base_score(0) = obj_->ProbToMargin(mparam_.base_score); + // move it to model param, which is shared with all other components. + learner_model_param_ = LearnerModelParam(Ctx(), mparam_, std::move(base_score), task); + CHECK(learner_model_param_.Initialized()); + CHECK_NE(learner_model_param_.BaseScore(Ctx()).Size(), 0); + } /** * \brief Calculate the `base_score` based on input data. * @@ -403,38 +442,24 @@ class LearnerConfiguration : public Learner { // - model loaded from new binary or JSON. // - model is created from scratch. // - model is configured second time due to change of parameter - CHECK(obj_); - if (!mparam_.base_score_estimated) { + if (!learner_model_param_.Initialized()) { + this->ConfigureModelParamWithoutBaseScore(); + } + if (mparam_.boost_from_average && !UsePtr(gbm_)->ModelFitted()) { if (p_fmat) { + auto const& info = p_fmat->Info(); + info.Validate(Ctx()->gpu_id); // We estimate it from input data. linalg::Tensor base_score; - obj_->InitEstimation(p_fmat->Info(), &base_score); + UsePtr(obj_)->InitEstimation(info, &base_score); mparam_.base_score = base_score(0); CHECK(!std::isnan(mparam_.base_score)); - } else { - mparam_.base_score = ObjFunction::DefaultBaseScore(); } - mparam_.base_score_estimated = true; // Update the shared model parameter - this->ConfigureModelParam(); + this->ConfigureModelParamWithoutBaseScore(); } - } - - // Convert mparam to learner_model_param - void ConfigureModelParam() { - this->ConfigureTargets(); - - CHECK(obj_); - auto task = obj_->Task(); - linalg::Tensor base_score({1}, Ctx()->gpu_id); - auto h_base_score = base_score.HostView(); - - // transform to margin - h_base_score(0) = obj_->ProbToMargin(mparam_.base_score); - // move it to model param, which is shared with all other components. - learner_model_param_ = LearnerModelParam(Ctx(), mparam_, std::move(base_score), task); - CHECK(learner_model_param_.Initialized()); - CHECK_NE(learner_model_param_.BaseScore(Ctx()).Size(), 0); + CHECK(!std::isnan(mparam_.base_score)); + CHECK(!std::isinf(mparam_.base_score)); } public: @@ -496,7 +521,8 @@ class LearnerConfiguration : public Learner { learner_model_param_.task = obj_->Task(); // required by gbm configuration. this->ConfigureGBM(old_tparam, args); ctx_.ConfigureGpuId(this->gbm_->UseGPU()); - this->ConfigureModelParam(); + + this->ConfigureModelParamWithoutBaseScore(); this->ConfigureMetrics(args); @@ -510,8 +536,8 @@ class LearnerConfiguration : public Learner { } void CheckModelInitialized() const { - CHECK(learner_model_param_.Initialized()) << "Model not yet initialized."; - CHECK_NE(learner_model_param_.BaseScore(this->Ctx()).Size(), 0); + CHECK(learner_model_param_.Initialized()) << ModelNotFitted(); + CHECK_NE(learner_model_param_.BaseScore(this->Ctx()).Size(), 0) << ModelNotFitted(); } virtual PredictionContainer* GetPredictionCache() const { @@ -1318,8 +1344,6 @@ class LearnerImpl : public LearnerIO { HostDeviceVector* in_gpair) override { monitor_.Start("BoostOneIter"); this->Configure(); - // Should have been set to default in the first prediction. - CHECK(mparam_.base_score_estimated); if (ctx_.seed_per_iteration) { common::GlobalRandom().seed(ctx_.seed * kRandSeedMagic + iter); @@ -1380,7 +1404,9 @@ class LearnerImpl : public LearnerIO { static_cast(pred_interactions) + static_cast(pred_contribs); this->Configure(); - this->InitBaseScore(nullptr); + if (training) { + this->InitBaseScore(nullptr); + } this->CheckModelInitialized(); CHECK_LE(multiple_predictions, 1) << "Perform one kind of prediction at a time."; @@ -1425,7 +1451,6 @@ class LearnerImpl : public LearnerIO { HostDeviceVector** out_preds, uint32_t iteration_begin, uint32_t iteration_end) override { this->Configure(); - this->InitBaseScore(nullptr); this->CheckModelInitialized(); auto& out_predictions = this->GetThreadLocal().prediction_entry; diff --git a/src/objective/regression_obj.cu b/src/objective/regression_obj.cu index 632a27778a89..9aff1c787180 100644 --- a/src/objective/regression_obj.cu +++ b/src/objective/regression_obj.cu @@ -723,10 +723,15 @@ class MeanAbsoluteError : public ObjFunction { out(0) = common::Median(ctx_, info.labels, info.weights_) * w; } - // Weighted average base score across all workers collective::Allreduce(out.Values().data(), out.Values().size()); collective::Allreduce(&w, 1); + if (common::CloseTo(w, 0.0)) { + // Mostly for handling empty dataset test. + LOG(WARNING) << "Sum of weights is close to 0.0, skipping base score estimation."; + out(0) = ObjFunction::DefaultBaseScore(); + return; + } std::transform(linalg::cbegin(out), linalg::cend(out), linalg::begin(out), [w](float v) { return v / w; }); } diff --git a/tests/cpp/test_learner.cc b/tests/cpp/test_learner.cc index 49c1d9537426..35dde0c9e8e9 100644 --- a/tests/cpp/test_learner.cc +++ b/tests/cpp/test_learner.cc @@ -453,73 +453,162 @@ TEST(Learner, MultiTarget) { /** * Test the model initialization sequence is correctly performed. */ -TEST(Learner, InitEstimation) { - size_t constexpr kCols = 10; - auto Xy = RandomDataGenerator{10, kCols, 0}.GenerateDMatrix(true); +class InitBaseScore : public ::testing::Test { + protected: + std::size_t static constexpr Cols() { return 10; } + std::shared_ptr Xy_; - { - std::unique_ptr learner{Learner::Create({Xy})}; + void SetUp() override { Xy_ = RandomDataGenerator{10, Cols(), 0}.GenerateDMatrix(true); } + + static float GetBaseScore(Json const &config) { + return std::stof(get(config["learner"]["learner_model_param"]["base_score"])); + } + + public: + void TestUpdateConfig() { + std::unique_ptr learner{Learner::Create({Xy_})}; learner->SetParam("objective", "reg:absoluteerror"); + learner->UpdateOneIter(0, Xy_); + Json config{Object{}}; + learner->SaveConfig(&config); + auto base_score = GetBaseScore(config); + ASSERT_NE(base_score, ObjFunction::DefaultBaseScore()); + + // already initialized + auto Xy1 = RandomDataGenerator{100, Cols(), 0}.Seed(321).GenerateDMatrix(true); + learner->UpdateOneIter(1, Xy1); + learner->SaveConfig(&config); + auto base_score1 = GetBaseScore(config); + ASSERT_EQ(base_score, base_score1); + + Json model{Object{}}; + learner->SaveModel(&model); + learner.reset(Learner::Create({})); + learner->LoadModel(model); learner->Configure(); - HostDeviceVector predt; - learner->Predict(Xy, false, &predt, 0, 0); + learner->UpdateOneIter(2, Xy1); + learner->SaveConfig(&config); + auto base_score2 = GetBaseScore(config); + ASSERT_EQ(base_score, base_score2); + } + void TestBoostFromAvgParam() { + std::unique_ptr learner{Learner::Create({Xy_})}; + learner->SetParam("objective", "reg:absoluteerror"); + learner->SetParam("base_score", "1.3"); + Json config(Object{}); + learner->Configure(); + learner->SaveConfig(&config); + + auto base_score = GetBaseScore(config); + // no change + ASSERT_FLOAT_EQ(base_score, 1.3); + + HostDeviceVector predt; + learner->Predict(Xy_, false, &predt, 0, 0); auto h_predt = predt.ConstHostSpan(); for (auto v : h_predt) { - ASSERT_EQ(v, ObjFunction::DefaultBaseScore()); + ASSERT_FLOAT_EQ(v, 1.3); } - Json config{Object{}}; + learner->UpdateOneIter(0, Xy_); learner->SaveConfig(&config); - auto base_score = - std::stof(get(config["learner"]["learner_model_param"]["base_score"])); - // No base score is estimated yet. - ASSERT_EQ(base_score, ObjFunction::DefaultBaseScore()); + base_score = GetBaseScore(config); + // no change + ASSERT_FLOAT_EQ(base_score, 1.3); + + auto from_avg = std::stoi( + get(config["learner"]["learner_model_param"]["boost_from_average"])); + // from_avg is disabled when base score is set + ASSERT_EQ(from_avg, 0); + // in the future when we can deprecate the binary model, user can set the parameter directly. + learner->SetParam("boost_from_average", "1"); + learner->Configure(); + learner->SaveConfig(&config); + from_avg = std::stoi( + get(config["learner"]["learner_model_param"]["boost_from_average"])); + ASSERT_EQ(from_avg, 1); } - { - std::unique_ptr learner{Learner::Create({Xy})}; + void TestInitAfterLoad() { + std::unique_ptr learner{Learner::Create({Xy_})}; learner->SetParam("objective", "reg:absoluteerror"); - learner->UpdateOneIter(0, Xy); + learner->Configure(); + + Json model{Object{}}; + learner->SaveModel(&model); + auto base_score = GetBaseScore(model); + ASSERT_EQ(base_score, ObjFunction::DefaultBaseScore()); + learner.reset(Learner::Create({Xy_})); + learner->LoadModel(model); + Json config(Object{}); + learner->Configure(); + learner->SaveConfig(&config); + base_score = GetBaseScore(config); + ASSERT_EQ(base_score, ObjFunction::DefaultBaseScore()); + + learner->UpdateOneIter(0, Xy_); + learner->SaveConfig(&config); + base_score = GetBaseScore(config); + ASSERT_NE(base_score, ObjFunction::DefaultBaseScore()); + } + + void TestInitWithPredt() { + std::unique_ptr learner{Learner::Create({Xy_})}; + learner->SetParam("objective", "reg:absoluteerror"); HostDeviceVector predt; - learner->Predict(Xy, false, &predt, 0, 0); + learner->Predict(Xy_, false, &predt, 0, 0); + auto h_predt = predt.ConstHostSpan(); for (auto v : h_predt) { - ASSERT_NE(v, ObjFunction::DefaultBaseScore()); + ASSERT_EQ(v, ObjFunction::DefaultBaseScore()); } - Json config{Object{}}; + Json config(Object{}); learner->SaveConfig(&config); - auto base_score = - std::stof(get(config["learner"]["learner_model_param"]["base_score"])); - ASSERT_NE(base_score, ObjFunction::DefaultBaseScore()); + auto base_score = GetBaseScore(config); + ASSERT_EQ(base_score, ObjFunction::DefaultBaseScore()); - ASSERT_THROW( - { - learner->SetParam("base_score_estimated", "1"); - learner->Configure(); - }, - dmlc::Error); + // since prediction is not used for trianing, the train procedure still runs estimation + learner->UpdateOneIter(0, Xy_); + learner->SaveConfig(&config); + base_score = GetBaseScore(config); + ASSERT_NE(base_score, ObjFunction::DefaultBaseScore()); } - { - std::unique_ptr learner{Learner::Create({Xy})}; + void TestUpdateProcess() { + // Check that when training continuation is performed with update, the base score is + // not re-evaluated. + std::unique_ptr learner{Learner::Create({Xy_})}; learner->SetParam("objective", "reg:absoluteerror"); - learner->SetParam("base_score", "1.3"); learner->Configure(); - HostDeviceVector predt; - learner->Predict(Xy, false, &predt, 0, 0); - auto h_predt = predt.ConstHostSpan(); - for (auto v : h_predt) { - ASSERT_FLOAT_EQ(v, 1.3); - } - learner->UpdateOneIter(0, Xy); - Json config{Object{}}; + + learner->UpdateOneIter(0, Xy_); + Json model{Object{}}; + learner->SaveModel(&model); + auto base_score = GetBaseScore(model); + + auto Xy1 = RandomDataGenerator{100, Cols(), 0}.Seed(321).GenerateDMatrix(true); + learner.reset(Learner::Create({Xy1})); + learner->LoadModel(model); + learner->SetParam("process_type", "update"); + learner->SetParam("updater", "refresh"); + learner->UpdateOneIter(1, Xy1); + + Json config(Object{}); learner->SaveConfig(&config); - auto base_score = - std::stof(get(config["learner"]["learner_model_param"]["base_score"])); - // no change - ASSERT_FLOAT_EQ(base_score, 1.3); + auto base_score1 = GetBaseScore(config); + ASSERT_EQ(base_score, base_score1); } -} +}; + +TEST_F(InitBaseScore, TestUpdateConfig) { this->TestUpdateConfig(); } + +TEST_F(InitBaseScore, FromAvgParam) { this->TestBoostFromAvgParam(); } + +TEST_F(InitBaseScore, InitAfterLoad) { this->TestInitAfterLoad(); } + +TEST_F(InitBaseScore, InitWithPredict) { this->TestInitWithPredt(); } + +TEST_F(InitBaseScore, UpdateProcess) { this->TestUpdateProcess(); } } // namespace xgboost