Skip to content

Commit

Permalink
cleanup.
Browse files Browse the repository at this point in the history
  • Loading branch information
trivialfis committed Sep 14, 2022
1 parent e176289 commit b716145
Show file tree
Hide file tree
Showing 3 changed files with 16 additions and 16 deletions.
6 changes: 2 additions & 4 deletions include/xgboost/learner.h
Expand Up @@ -8,16 +8,14 @@
#ifndef XGBOOST_LEARNER_H_
#define XGBOOST_LEARNER_H_

#include <dmlc/any.h>
#include <xgboost/base.h>
#include <xgboost/feature_map.h>
#include <xgboost/generic_parameters.h>
#include <xgboost/generic_parameters.h> // Context
#include <xgboost/host_device_vector.h>
#include <xgboost/model.h>
#include <xgboost/predictor.h>
#include <xgboost/task.h>

#include <limits> // std::numeric_limit
#include <map>
#include <memory>
#include <string>
Expand Down Expand Up @@ -275,7 +273,7 @@ class Learner : public Model, public Configurable, public dmlc::Serializable {
/**
* \brief Return the context object of this Booster.
*/
virtual GenericParameter const* Ctx() const = 0;
virtual Context const* Ctx() const = 0;
/*!
* \brief Get configuration arguments currently stored by the learner
* \return Key-value pairs representing configuration arguments
Expand Down
9 changes: 5 additions & 4 deletions src/common/linalg_op.h
Expand Up @@ -4,6 +4,7 @@
#ifndef XGBOOST_COMMON_LINALG_OP_H_
#define XGBOOST_COMMON_LINALG_OP_H_
#include <type_traits>
#include <cstdint> // std::int32_t

#include "common.h"
#include "threading_utils.h"
Expand Down Expand Up @@ -60,27 +61,27 @@ void ElementWiseKernel(GenericParameter const* ctx, linalg::TensorView<T, D> t,
}
#endif // !defined(XGBOOST_USE_CUDA)

template <typename T, int32_t kDim>
template <typename T, std::int32_t kDim>
auto cbegin(TensorView<T, kDim> v) { // NOLINT
auto it = common::MakeIndexTransformIter([&](size_t i) -> std::remove_cv_t<T> const& {
return linalg::detail::Apply(v, linalg::UnravelIndex(i, v.Shape()));
});
return it;
}

template <typename T, int32_t kDim>
template <typename T, std::int32_t kDim>
auto cend(TensorView<T, kDim> v) { // NOLINT
return cbegin(v) + v.Size();
}

template <typename T, int32_t kDim>
template <typename T, std::int32_t kDim>
auto begin(TensorView<T, kDim> v) { // NOLINT
auto it = common::MakeIndexTransformIter(
[&](size_t i) -> T& { return linalg::detail::Apply(v, linalg::UnravelIndex(i, v.Shape())); });
return it;
}

template <typename T, int32_t kDim>
template <typename T, std::int32_t kDim>
auto end(TensorView<T, kDim> v) { // NOLINT
return begin(v) + v.Size();
}
Expand Down
17 changes: 9 additions & 8 deletions src/learner.cc
Expand Up @@ -6,14 +6,15 @@
*/
#include "xgboost/learner.h"

#include <dmlc/any.h>
#include <dmlc/io.h>
#include <dmlc/parameter.h>
#include <dmlc/thread_local.h>

#include <algorithm>
#include <atomic>
#include <iomanip>
#include <limits>
#include <limits> // std::numeric_limits
#include <memory>
#include <mutex>
#include <sstream>
Expand All @@ -31,7 +32,6 @@
#include "common/threading_utils.h"
#include "common/timer.h"
#include "common/version.h"
#include "dmlc/any.h"
#include "xgboost/base.h"
#include "xgboost/c_api.h"
#include "xgboost/data.h"
Expand Down Expand Up @@ -180,7 +180,7 @@ struct LearnerModelParamLegacy : public dmlc::Parameter<LearnerModelParamLegacy>
// declare parameters
DMLC_DECLARE_PARAMETER(LearnerModelParamLegacy) {
DMLC_DECLARE_FIELD(base_score)
.set_default(std::numeric_limits<float>::quiet_NaN())
.set_default(ObjFunction::DefaultBaseScore())
.describe("Global bias of the model.");
DMLC_DECLARE_FIELD(num_feature)
.set_default(0)
Expand Down Expand Up @@ -252,7 +252,8 @@ void LearnerModelParam::Copy(LearnerModelParam const& that) {
CHECK(base_score_.Data()->HostCanRead());

num_feature = that.num_feature;
num_output_group = that.num_output_group, task = that.task;
num_output_group = that.num_output_group;
task = that.task;
}

struct LearnerTrainParam : public XGBoostParameter<LearnerTrainParam> {
Expand Down Expand Up @@ -422,15 +423,15 @@ class LearnerConfiguration : public Learner {

CHECK(obj_);
auto task = obj_->Task();
linalg::Tensor<float, 1> base_score({1}, ctx_.gpu_id);
linalg::Tensor<float, 1> base_score({1}, Ctx()->gpu_id);
auto h_base_score = base_score.HostView();

// transform to margin
h_base_score(0) = obj_->ProbToMargin(mparam_.base_score);
// move it to model param, which is shared with all other components.
learner_model_param_ = LearnerModelParam(&ctx_, mparam_, std::move(base_score), task);
learner_model_param_ = LearnerModelParam(Ctx(), mparam_, std::move(base_score), task);
CHECK(learner_model_param_.Initialized());
CHECK_NE(learner_model_param_.BaseScore(&ctx_).Size(), 0);
CHECK_NE(learner_model_param_.BaseScore(Ctx()).Size(), 0);
}

public:
Expand Down Expand Up @@ -665,7 +666,7 @@ class LearnerConfiguration : public Learner {
return cfg_;
}

GenericParameter const* Ctx() const override { return &ctx_; }
Context const* Ctx() const override { return &ctx_; }

private:
void ValidateParameters() {
Expand Down

0 comments on commit b716145

Please sign in to comment.