Skip to content

Commit

Permalink
Reduce compiler warnings on CPU-only build. (#8483)
Browse files Browse the repository at this point in the history
  • Loading branch information
trivialfis committed Nov 28, 2022
1 parent d666ba7 commit 3fc1046
Show file tree
Hide file tree
Showing 12 changed files with 71 additions and 85 deletions.
2 changes: 1 addition & 1 deletion include/xgboost/data.h
Expand Up @@ -417,7 +417,7 @@ class EllpackPage {
size_t Size() const;

/*! \brief Set the base row id for this page. */
void SetBaseRowId(size_t row_id);
void SetBaseRowId(std::size_t row_id);

const EllpackPageImpl* Impl() const { return impl_.get(); }
EllpackPageImpl* Impl() { return impl_.get(); }
Expand Down
4 changes: 1 addition & 3 deletions src/data/data.cc
Expand Up @@ -734,9 +734,7 @@ void MetaInfo::Validate(int32_t device) const {
}

#if !defined(XGBOOST_USE_CUDA)
void MetaInfo::SetInfoFromCUDA(Context const& ctx, StringView key, Json arr) {
common::AssertGPUSupport();
}
void MetaInfo::SetInfoFromCUDA(Context const&, StringView, Json) { common::AssertGPUSupport(); }
#endif // !defined(XGBOOST_USE_CUDA)

using DMatrixThreadLocal =
Expand Down
4 changes: 2 additions & 2 deletions src/data/ellpack_page.cc
Expand Up @@ -12,7 +12,7 @@ class EllpackPageImpl {};

EllpackPage::EllpackPage() = default;

EllpackPage::EllpackPage(DMatrix* dmat, const BatchParam& param) {
EllpackPage::EllpackPage(DMatrix*, const BatchParam&) {
LOG(FATAL) << "Internal Error: XGBoost is not compiled with CUDA but "
"EllpackPage is required";
}
Expand All @@ -22,7 +22,7 @@ EllpackPage::~EllpackPage() {
"EllpackPage is required";
}

void EllpackPage::SetBaseRowId(size_t row_id) {
void EllpackPage::SetBaseRowId(std::size_t) {
LOG(FATAL) << "Internal Error: XGBoost is not compiled with CUDA but "
"EllpackPage is required";
}
Expand Down
2 changes: 1 addition & 1 deletion src/data/ellpack_page.cu
Expand Up @@ -25,7 +25,7 @@ EllpackPage::EllpackPage(EllpackPage&& that) { std::swap(impl_, that.impl_); }

size_t EllpackPage::Size() const { return impl_->Size(); }

void EllpackPage::SetBaseRowId(size_t row_id) { impl_->SetBaseRowId(row_id); }
void EllpackPage::SetBaseRowId(std::size_t row_id) { impl_->SetBaseRowId(row_id); }

// Bin each input data entry, store the bin indices in compressed form.
__global__ void CompressBinEllpackKernel(
Expand Down
2 changes: 1 addition & 1 deletion src/data/ellpack_page.cuh
Expand Up @@ -190,7 +190,7 @@ class EllpackPageImpl {
size_t Size() const;

/*! \brief Set the base row id for this page. */
void SetBaseRowId(size_t row_id) {
void SetBaseRowId(std::size_t row_id) {
base_rowid = row_id;
}

Expand Down
6 changes: 3 additions & 3 deletions src/data/sparse_page_dmatrix.cc
Expand Up @@ -19,7 +19,7 @@ const MetaInfo &SparsePageDMatrix::Info() const { return info_; }

namespace detail {
// Use device dispatch
size_t NSamplesDevice(DMatrixProxy *proxy)
std::size_t NSamplesDevice(DMatrixProxy *)
#if defined(XGBOOST_USE_CUDA)
; // NOLINT
#else
Expand All @@ -28,7 +28,7 @@ size_t NSamplesDevice(DMatrixProxy *proxy)
return 0;
}
#endif
size_t NFeaturesDevice(DMatrixProxy *proxy)
std::size_t NFeaturesDevice(DMatrixProxy *)
#if defined(XGBOOST_USE_CUDA)
; // NOLINT
#else
Expand Down Expand Up @@ -189,7 +189,7 @@ BatchSet<GHistIndexMatrix> SparsePageDMatrix::GetGradientIndex(const BatchParam
}

#if !defined(XGBOOST_USE_CUDA)
BatchSet<EllpackPage> SparsePageDMatrix::GetEllpackBatches(const BatchParam& param) {
BatchSet<EllpackPage> SparsePageDMatrix::GetEllpackBatches(const BatchParam &) {
common::AssertGPUSupport();
auto begin_iter = BatchIterator<EllpackPage>(ellpack_page_source_);
return BatchSet<EllpackPage>(BatchIterator<EllpackPage>(begin_iter));
Expand Down
4 changes: 2 additions & 2 deletions src/data/sparse_page_source.cu
Expand Up @@ -9,11 +9,11 @@ namespace xgboost {
namespace data {

namespace detail {
size_t NSamplesDevice(DMatrixProxy *proxy) {
std::size_t NSamplesDevice(DMatrixProxy *proxy) {
return Dispatch(proxy, [](auto const &value) { return value.NumRows(); });
}

size_t NFeaturesDevice(DMatrixProxy *proxy) {
std::size_t NFeaturesDevice(DMatrixProxy *proxy) {
return Dispatch(proxy, [](auto const &value) { return value.NumCols(); });
}
} // namespace detail
Expand Down
14 changes: 6 additions & 8 deletions src/gbm/gbtree.cc
Expand Up @@ -191,11 +191,10 @@ void GBTree::ConfigureUpdaters() {
}
}

void GPUCopyGradient(HostDeviceVector<GradientPair> const *in_gpair,
bst_group_t n_groups, bst_group_t group_id,
HostDeviceVector<GradientPair> *out_gpair)
void GPUCopyGradient(HostDeviceVector<GradientPair> const*, bst_group_t, bst_group_t,
HostDeviceVector<GradientPair>*)
#if defined(XGBOOST_USE_CUDA)
; // NOLINT
; // NOLINT
#else
{
common::AssertGPUSupport();
Expand Down Expand Up @@ -627,11 +626,10 @@ GBTree::GetPredictor(HostDeviceVector<float> const *out_pred,
* \param predts Prediction for current tree.
* \param tree_w Tree weight.
*/
void GPUDartPredictInc(common::Span<float> out_predts,
common::Span<float> predts, float tree_w, size_t n_rows,
bst_group_t n_groups, bst_group_t group)
void GPUDartPredictInc(common::Span<float>, common::Span<float>, float, size_t, bst_group_t,
bst_group_t)
#if defined(XGBOOST_USE_CUDA)
; // NOLINT
; // NOLINT
#else
{
common::AssertGPUSupport();
Expand Down
1 change: 1 addition & 0 deletions src/learner.cc
Expand Up @@ -343,6 +343,7 @@ void GenericParameter::ConfigureGpuId(bool require_gpu) {
#else
// Just set it to CPU, don't think about it.
this->UpdateAllowUnknown(Args{{"gpu_id", std::to_string(kCpuId)}});
(void)(require_gpu);
#endif // defined(XGBOOST_USE_CUDA)

common::SetDevice(this->gpu_id);
Expand Down
37 changes: 15 additions & 22 deletions src/metric/auc.cc
Expand Up @@ -390,24 +390,21 @@ XGBOOST_REGISTER_METRIC(EvalAUC, "auc")
.set_body([](const char*) { return new EvalROCAUC(); });

#if !defined(XGBOOST_USE_CUDA)
std::tuple<double, double, double>
GPUBinaryROCAUC(common::Span<float const> predts, MetaInfo const &info,
int32_t device, std::shared_ptr<DeviceAUCCache> *p_cache) {
std::tuple<double, double, double> GPUBinaryROCAUC(common::Span<float const>, MetaInfo const &,
std::int32_t,
std::shared_ptr<DeviceAUCCache> *) {
common::AssertGPUSupport();
return {};
}

double GPUMultiClassROCAUC(common::Span<float const> predts,
MetaInfo const &info, int32_t device,
std::shared_ptr<DeviceAUCCache> *cache,
size_t n_classes) {
double GPUMultiClassROCAUC(common::Span<float const>, MetaInfo const &, std::int32_t,
std::shared_ptr<DeviceAUCCache> *, std::size_t) {
common::AssertGPUSupport();
return 0.0;
}

std::pair<double, uint32_t>
GPURankingAUC(common::Span<float const> predts, MetaInfo const &info,
int32_t device, std::shared_ptr<DeviceAUCCache> *p_cache) {
std::pair<double, std::uint32_t> GPURankingAUC(common::Span<float const>, MetaInfo const &,
std::int32_t, std::shared_ptr<DeviceAUCCache> *) {
common::AssertGPUSupport();
return {};
}
Expand All @@ -432,8 +429,8 @@ class EvalPRAUC : public EvalAUC<EvalPRAUC> {
return std::make_tuple(pr, re, auc);
}

double EvalMultiClass(HostDeviceVector<float> const &predts,
MetaInfo const &info, size_t n_classes) {
double EvalMultiClass(HostDeviceVector<float> const &predts, MetaInfo const &info,
size_t n_classes) {
if (tparam_->gpu_id == GenericParameter::kCpuId) {
auto n_threads = this->tparam_->Threads();
return MultiClassOVR(predts.ConstHostSpan(), info, n_classes, n_threads,
Expand Down Expand Up @@ -472,24 +469,20 @@ XGBOOST_REGISTER_METRIC(AUCPR, "aucpr")
.set_body([](char const *) { return new EvalPRAUC{}; });

#if !defined(XGBOOST_USE_CUDA)
std::tuple<double, double, double>
GPUBinaryPRAUC(common::Span<float const> predts, MetaInfo const &info,
int32_t device, std::shared_ptr<DeviceAUCCache> *p_cache) {
std::tuple<double, double, double> GPUBinaryPRAUC(common::Span<float const>, MetaInfo const &,
std::int32_t, std::shared_ptr<DeviceAUCCache> *) {
common::AssertGPUSupport();
return {};
}

double GPUMultiClassPRAUC(common::Span<float const> predts,
MetaInfo const &info, int32_t device,
std::shared_ptr<DeviceAUCCache> *cache,
size_t n_classes) {
double GPUMultiClassPRAUC(common::Span<float const>, MetaInfo const &, std::int32_t,
std::shared_ptr<DeviceAUCCache> *, std::size_t) {
common::AssertGPUSupport();
return {};
}

std::pair<double, uint32_t>
GPURankingPRAUC(common::Span<float const> predts, MetaInfo const &info,
int32_t device, std::shared_ptr<DeviceAUCCache> *cache) {
std::pair<double, std::uint32_t> GPURankingPRAUC(common::Span<float const>, MetaInfo const &,
std::int32_t, std::shared_ptr<DeviceAUCCache> *) {
common::AssertGPUSupport();
return {};
}
Expand Down
38 changes: 18 additions & 20 deletions src/metric/auc.cu
Expand Up @@ -162,9 +162,9 @@ GPUBinaryAUC(common::Span<float const> predts, MetaInfo const &info,
return std::make_tuple(last.first, last.second, auc);
}

std::tuple<double, double, double>
GPUBinaryROCAUC(common::Span<float const> predts, MetaInfo const &info,
int32_t device, std::shared_ptr<DeviceAUCCache> *p_cache) {
std::tuple<double, double, double> GPUBinaryROCAUC(common::Span<float const> predts,
MetaInfo const &info, std::int32_t device,
std::shared_ptr<DeviceAUCCache> *p_cache) {
auto &cache = *p_cache;
InitCacheOnce<false>(predts, p_cache);

Expand Down Expand Up @@ -451,10 +451,9 @@ void MultiClassSortedIdx(common::Span<float const> predts,
dh::SegmentedArgSort<false>(d_predts_t, d_class_ptr, d_sorted_idx);
}

double GPUMultiClassROCAUC(common::Span<float const> predts,
MetaInfo const &info, int32_t device,
std::shared_ptr<DeviceAUCCache> *p_cache,
size_t n_classes) {
double GPUMultiClassROCAUC(common::Span<float const> predts, MetaInfo const &info,
std::int32_t device, std::shared_ptr<DeviceAUCCache> *p_cache,
std::size_t n_classes) {
auto& cache = *p_cache;
InitCacheOnce<true>(predts, p_cache);

Expand All @@ -480,9 +479,9 @@ struct RankScanItem {
};
} // anonymous namespace

std::pair<double, uint32_t>
GPURankingAUC(common::Span<float const> predts, MetaInfo const &info,
int32_t device, std::shared_ptr<DeviceAUCCache> *p_cache) {
std::pair<double, std::uint32_t> GPURankingAUC(common::Span<float const> predts,
MetaInfo const &info, std::int32_t device,
std::shared_ptr<DeviceAUCCache> *p_cache) {
auto& cache = *p_cache;
InitCacheOnce<false>(predts, p_cache);

Expand Down Expand Up @@ -600,9 +599,9 @@ GPURankingAUC(common::Span<float const> predts, MetaInfo const &info,
return std::make_pair(auc, n_valid);
}

std::tuple<double, double, double>
GPUBinaryPRAUC(common::Span<float const> predts, MetaInfo const &info,
int32_t device, std::shared_ptr<DeviceAUCCache> *p_cache) {
std::tuple<double, double, double> GPUBinaryPRAUC(common::Span<float const> predts,
MetaInfo const &info, std::int32_t device,
std::shared_ptr<DeviceAUCCache> *p_cache) {
auto& cache = *p_cache;
InitCacheOnce<false>(predts, p_cache);

Expand Down Expand Up @@ -640,10 +639,9 @@ GPUBinaryPRAUC(common::Span<float const> predts, MetaInfo const &info,
return std::make_tuple(1.0, 1.0, auc);
}

double GPUMultiClassPRAUC(common::Span<float const> predts,
MetaInfo const &info, int32_t device,
std::shared_ptr<DeviceAUCCache> *p_cache,
size_t n_classes) {
double GPUMultiClassPRAUC(common::Span<float const> predts, MetaInfo const &info,
std::int32_t device, std::shared_ptr<DeviceAUCCache> *p_cache,
std::size_t n_classes) {
auto& cache = *p_cache;
InitCacheOnce<true>(predts, p_cache);

Expand Down Expand Up @@ -816,9 +814,9 @@ GPURankingPRAUCImpl(common::Span<float const> predts, MetaInfo const &info,
return std::make_pair(auc, n_groups - invalid_groups);
}

std::pair<double, uint32_t>
GPURankingPRAUC(common::Span<float const> predts, MetaInfo const &info,
int32_t device, std::shared_ptr<DeviceAUCCache> *p_cache) {
std::pair<double, std::uint32_t> GPURankingPRAUC(common::Span<float const> predts,
MetaInfo const &info, std::int32_t device,
std::shared_ptr<DeviceAUCCache> *p_cache) {
dh::safe_cuda(cudaSetDevice(device));
if (predts.empty()) {
return std::make_pair(0.0, static_cast<uint32_t>(0));
Expand Down
42 changes: 20 additions & 22 deletions src/metric/auc.h
Expand Up @@ -29,34 +29,32 @@ XGBOOST_DEVICE inline double TrapezoidArea(double x0, double x1, double y0, doub

struct DeviceAUCCache;

std::tuple<double, double, double>
GPUBinaryROCAUC(common::Span<float const> predts, MetaInfo const &info,
int32_t device, std::shared_ptr<DeviceAUCCache> *p_cache);
std::tuple<double, double, double> GPUBinaryROCAUC(common::Span<float const> predts,
MetaInfo const &info, std::int32_t device,
std::shared_ptr<DeviceAUCCache> *p_cache);

double GPUMultiClassROCAUC(common::Span<float const> predts,
MetaInfo const &info, int32_t device,
std::shared_ptr<DeviceAUCCache> *cache,
size_t n_classes);
double GPUMultiClassROCAUC(common::Span<float const> predts, MetaInfo const &info,
std::int32_t device, std::shared_ptr<DeviceAUCCache> *cache,
std::size_t n_classes);

std::pair<double, uint32_t>
GPURankingAUC(common::Span<float const> predts, MetaInfo const &info,
int32_t device, std::shared_ptr<DeviceAUCCache> *cache);
std::pair<double, std::uint32_t> GPURankingAUC(common::Span<float const> predts,
MetaInfo const &info, std::int32_t device,
std::shared_ptr<DeviceAUCCache> *cache);

/**********
* PR AUC *
**********/
std::tuple<double, double, double>
GPUBinaryPRAUC(common::Span<float const> predts, MetaInfo const &info,
int32_t device, std::shared_ptr<DeviceAUCCache> *p_cache);

double GPUMultiClassPRAUC(common::Span<float const> predts,
MetaInfo const &info, int32_t device,
std::shared_ptr<DeviceAUCCache> *cache,
size_t n_classes);

std::pair<double, uint32_t>
GPURankingPRAUC(common::Span<float const> predts, MetaInfo const &info,
int32_t device, std::shared_ptr<DeviceAUCCache> *cache);
std::tuple<double, double, double> GPUBinaryPRAUC(common::Span<float const> predts,
MetaInfo const &info, std::int32_t device,
std::shared_ptr<DeviceAUCCache> *p_cache);

double GPUMultiClassPRAUC(common::Span<float const> predts, MetaInfo const &info,
std::int32_t device, std::shared_ptr<DeviceAUCCache> *cache,
std::size_t n_classes);

std::pair<double, std::uint32_t> GPURankingPRAUC(common::Span<float const> predts,
MetaInfo const &info, std::int32_t device,
std::shared_ptr<DeviceAUCCache> *cache);

namespace detail {
XGBOOST_DEVICE inline double CalcH(double fp_a, double fp_b, double tp_a,
Expand Down

0 comments on commit 3fc1046

Please sign in to comment.