From 794cbaa60ac3db2ceb369253d70e2c382c762140 Mon Sep 17 00:00:00 2001 From: Rory Mitchell Date: Tue, 5 Jul 2022 10:24:31 +0200 Subject: [PATCH] Fuse split evaluation kernels (#8026) --- src/tree/gpu_hist/evaluate_splits.cu | 251 ++++++++---------- src/tree/gpu_hist/evaluate_splits.cuh | 88 +++--- src/tree/gpu_hist/evaluator.cu | 37 +-- src/tree/updater_gpu_hist.cu | 114 ++++---- .../cpp/tree/gpu_hist/test_evaluate_splits.cu | 128 ++++----- tests/python-gpu/test_gpu_with_dask.py | 6 +- 6 files changed, 309 insertions(+), 315 deletions(-) diff --git a/src/tree/gpu_hist/evaluate_splits.cu b/src/tree/gpu_hist/evaluate_splits.cu index 5261c1b6ae03..249a2aca7834 100644 --- a/src/tree/gpu_hist/evaluate_splits.cu +++ b/src/tree/gpu_hist/evaluate_splits.cu @@ -2,6 +2,7 @@ * Copyright 2020-2022 by XGBoost Contributors */ #include // std::max +#include #include #include "../../common/categorical.h" @@ -22,11 +23,10 @@ XGBOOST_DEVICE float LossChangeMissing(const GradientPairPrecise &scan, TreeEvaluator::SplitEvaluator evaluator, bool &missing_left_out) { // NOLINT float parent_gain = CalcGain(param, parent_sum); - float missing_left_gain = - evaluator.CalcSplitGain(param, nidx, fidx, GradStats(scan + missing), - GradStats(parent_sum - (scan + missing))); - float missing_right_gain = evaluator.CalcSplitGain( - param, nidx, fidx, GradStats(scan), GradStats(parent_sum - scan)); + float missing_left_gain = evaluator.CalcSplitGain(param, nidx, fidx, GradStats(scan + missing), + GradStats(parent_sum - (scan + missing))); + float missing_right_gain = + evaluator.CalcSplitGain(param, nidx, fidx, GradStats(scan), GradStats(parent_sum - scan)); if (missing_left_gain > missing_right_gain) { missing_left_out = true; @@ -47,13 +47,11 @@ XGBOOST_DEVICE float LossChangeMissing(const GradientPairPrecise &scan, * \param end * \param temp_storage Shared memory for intermediate result. */ -template -__device__ GradientSumT -ReduceFeature(common::Span feature_histogram, - TempStorageT* temp_storage) { +template +__device__ GradientSumT ReduceFeature(common::Span feature_histogram, + TempStorageT *temp_storage) { __shared__ cub::Uninitialized uninitialized_sum; - GradientSumT& shared_sum = uninitialized_sum.Alias(); + GradientSumT &shared_sum = uninitialized_sum.Alias(); GradientSumT local_sum = GradientSumT(); // For loop sums features into one block size @@ -78,16 +76,15 @@ ReduceFeature(common::Span feature_histogram, template __device__ void EvaluateFeature( - int fidx, EvaluateSplitInputs inputs, + int fidx, const EvaluateSplitInputs &inputs, const EvaluateSplitSharedInputs &shared_inputs, TreeEvaluator::SplitEvaluator evaluator, common::Span sorted_idx, size_t offset, DeviceSplitCandidate *best_split, // shared memory storing best split TempStorageT *temp_storage // temp memory for cub operations ) { // Use pointer from cut to indicate begin and end of bins for each feature. - uint32_t gidx_begin = inputs.feature_segments[fidx]; // beginning bin - uint32_t gidx_end = - inputs.feature_segments[fidx + 1]; // end bin for i^th feature + uint32_t gidx_begin = shared_inputs.feature_segments[fidx]; // beginning bin + uint32_t gidx_end = shared_inputs.feature_segments[fidx + 1]; // end bin for i^th feature auto feature_hist = inputs.gradient_histogram.subspan(gidx_begin, gidx_end - gidx_begin); // Sum histogram bins for current feature @@ -133,8 +130,8 @@ __device__ void EvaluateFeature( bool missing_left = true; float gain = null_gain; if (thread_active) { - gain = LossChangeMissing(GradientPairPrecise{bin}, missing, inputs.parent_sum, inputs.param, - inputs.nidx, fidx, evaluator, missing_left); + gain = LossChangeMissing(GradientPairPrecise{bin}, missing, inputs.parent_sum, + shared_inputs.param, inputs.nidx, fidx, evaluator, missing_left); } __syncthreads(); @@ -156,40 +153,40 @@ __device__ void EvaluateFeature( switch (type) { case kNum: { // Use pointer from cut to indicate begin and end of bins for each feature. - uint32_t gidx_begin = inputs.feature_segments[fidx]; // beginning bin + uint32_t gidx_begin = shared_inputs.feature_segments[fidx]; // beginning bin int split_gidx = (scan_begin + threadIdx.x) - 1; float fvalue; if (split_gidx < static_cast(gidx_begin)) { - fvalue = inputs.min_fvalue[fidx]; + fvalue = shared_inputs.min_fvalue[fidx]; } else { - fvalue = inputs.feature_values[split_gidx]; + fvalue = shared_inputs.feature_values[split_gidx]; } GradientPairPrecise left = missing_left ? GradientPairPrecise{bin} + missing : GradientPairPrecise{bin}; GradientPairPrecise right = inputs.parent_sum - left; best_split->Update(gain, missing_left ? kLeftDir : kRightDir, fvalue, fidx, left, right, - false, inputs.param); + false, shared_inputs.param); break; } case kOneHot: { int32_t split_gidx = (scan_begin + threadIdx.x); - float fvalue = inputs.feature_values[split_gidx]; + float fvalue = shared_inputs.feature_values[split_gidx]; GradientPairPrecise left = missing_left ? GradientPairPrecise{bin} + missing : GradientPairPrecise{bin}; GradientPairPrecise right = inputs.parent_sum - left; best_split->Update(gain, missing_left ? kLeftDir : kRightDir, fvalue, fidx, left, right, - true, inputs.param); + true, shared_inputs.param); break; } case kPart: { int32_t split_gidx = (scan_begin + threadIdx.x); - float fvalue = inputs.feature_values[split_gidx]; + float fvalue = shared_inputs.feature_values[split_gidx]; GradientPairPrecise left = missing_left ? GradientPairPrecise{bin} + missing : GradientPairPrecise{bin}; GradientPairPrecise right = inputs.parent_sum - left; auto best_thresh = block_max.key; // index of best threshold inside a feature. best_split->Update(gain, missing_left ? kLeftDir : kRightDir, best_thresh, fidx, left, - right, true, inputs.param); + right, true, shared_inputs.param); break; } } @@ -199,15 +196,14 @@ __device__ void EvaluateFeature( } template -__global__ void EvaluateSplitsKernel(EvaluateSplitInputs left, - EvaluateSplitInputs right, - common::Span sorted_idx, - TreeEvaluator::SplitEvaluator evaluator, - common::Span out_candidates) { +__global__ __launch_bounds__(BLOCK_THREADS) void EvaluateSplitsKernel( + bst_feature_t number_active_features, common::Span d_inputs, + const EvaluateSplitSharedInputs shared_inputs, common::Span sorted_idx, + TreeEvaluator::SplitEvaluator evaluator, + common::Span out_candidates) { // KeyValuePair here used as threadIdx.x -> gain_value using ArgMaxT = cub::KeyValuePair; - using BlockScanT = - cub::BlockScan; + using BlockScanT = cub::BlockScan; using MaxReduceT = cub::BlockReduce; using SumReduceT = cub::BlockReduce; @@ -220,7 +216,7 @@ __global__ void EvaluateSplitsKernel(EvaluateSplitInputs left, // Aligned && shared storage for best_split __shared__ cub::Uninitialized uninitialized_split; - DeviceSplitCandidate& best_split = uninitialized_split.Alias(); + DeviceSplitCandidate &best_split = uninitialized_split.Alias(); __shared__ TempStorage temp_storage; if (threadIdx.x == 0) { @@ -229,30 +225,32 @@ __global__ void EvaluateSplitsKernel(EvaluateSplitInputs left, __syncthreads(); - // If this block is working on the left or right node - bool is_left = blockIdx.x < left.feature_set.size(); - EvaluateSplitInputs& inputs = is_left ? left : right; - + // Allocate blocks to one feature of one node + const auto input_idx = blockIdx.x / number_active_features; + const EvaluateSplitInputs &inputs = d_inputs[input_idx]; // One block for each feature. Features are sampled, so fidx != blockIdx.x - int fidx = inputs.feature_set[is_left ? blockIdx.x - : blockIdx.x - left.feature_set.size()]; - if (common::IsCat(inputs.feature_types, fidx)) { - auto n_bins_in_feat = inputs.feature_segments[fidx + 1] - inputs.feature_segments[fidx]; - if (common::UseOneHot(n_bins_in_feat, inputs.param.max_cat_to_onehot)) { + int fidx = inputs.feature_set[blockIdx.x % number_active_features]; + + if (common::IsCat(shared_inputs.feature_types, fidx)) { + auto n_bins_in_feat = + shared_inputs.feature_segments[fidx + 1] - shared_inputs.feature_segments[fidx]; + if (common::UseOneHot(n_bins_in_feat, shared_inputs.param.max_cat_to_onehot)) { EvaluateFeature(fidx, inputs, evaluator, sorted_idx, 0, &best_split, &temp_storage); + kOneHot>(fidx, inputs, shared_inputs, evaluator, sorted_idx, 0, &best_split, + &temp_storage); } else { - auto node_sorted_idx = is_left ? sorted_idx.first(inputs.feature_values.size()) - : sorted_idx.last(inputs.feature_values.size()); - size_t offset = is_left ? 0 : inputs.feature_values.size(); + auto total_bins = shared_inputs.feature_values.size(); + size_t offset = total_bins * input_idx; + auto node_sorted_idx = sorted_idx.subspan(offset, total_bins); EvaluateFeature(fidx, inputs, evaluator, node_sorted_idx, offset, &best_split, - &temp_storage); + kPart>(fidx, inputs, shared_inputs, evaluator, node_sorted_idx, offset, + &best_split, &temp_storage); } } else { EvaluateFeature(fidx, inputs, evaluator, sorted_idx, 0, &best_split, &temp_storage); + kNum>(fidx, inputs, shared_inputs, evaluator, sorted_idx, 0, &best_split, + &temp_storage); } cub::CTA_SYNC(); @@ -262,35 +260,34 @@ __global__ void EvaluateSplitsKernel(EvaluateSplitInputs left, } } -__device__ DeviceSplitCandidate operator+(const DeviceSplitCandidate& a, - const DeviceSplitCandidate& b) { +__device__ DeviceSplitCandidate operator+(const DeviceSplitCandidate &a, + const DeviceSplitCandidate &b) { return b.loss_chg > a.loss_chg ? b : a; } /** * \brief Set the bits for categorical splits based on the split threshold. */ -template -__device__ void SetCategoricalSplit(EvaluateSplitInputs const &input, - common::Span d_sorted_idx, bst_feature_t fidx, - bool is_left, common::Span out, - DeviceSplitCandidate *p_out_split) { +__device__ void SetCategoricalSplit(const EvaluateSplitSharedInputs &shared_inputs, + common::Span d_sorted_idx, + bst_feature_t fidx, std::size_t input_idx, + common::Span out, + DeviceSplitCandidate *p_out_split) { auto &out_split = *p_out_split; out_split.split_cats = common::CatBitField{out}; // Simple case for one hot split - if (common::UseOneHot(input.FeatureBins(fidx), input.param.max_cat_to_onehot)) { + if (common::UseOneHot(shared_inputs.FeatureBins(fidx), shared_inputs.param.max_cat_to_onehot)) { out_split.split_cats.Set(common::AsCat(out_split.fvalue)); return; } - auto node_sorted_idx = - is_left ? d_sorted_idx.subspan(0, input.feature_values.size()) - : d_sorted_idx.subspan(input.feature_values.size(), input.feature_values.size()); - size_t node_offset = is_left ? 0 : input.feature_values.size(); + auto node_sorted_idx = d_sorted_idx.subspan(shared_inputs.feature_values.size() * input_idx, + shared_inputs.feature_values.size()); + size_t node_offset = input_idx * shared_inputs.feature_values.size(); auto best_thresh = out_split.PopBestThresh(); - auto f_sorted_idx = - node_sorted_idx.subspan(input.feature_segments[fidx], input.FeatureBins(fidx)); + auto f_sorted_idx = node_sorted_idx.subspan(shared_inputs.feature_segments[fidx], + shared_inputs.FeatureBins(fidx)); if (out_split.dir != kLeftDir) { // forward, missing on right auto beg = dh::tcbegin(f_sorted_idx); @@ -299,7 +296,7 @@ __device__ void SetCategoricalSplit(EvaluateSplitInputs const &inp boundary = std::max(boundary, static_cast(1ul)); auto end = beg + boundary; thrust::for_each(thrust::seq, beg, end, [&](auto c) { - auto cat = input.feature_values[c - node_offset]; + auto cat = shared_inputs.feature_values[c - node_offset]; assert(!out_split.split_cats.Check(cat) && "already set"); out_split.SetCat(cat); }); @@ -307,44 +304,36 @@ __device__ void SetCategoricalSplit(EvaluateSplitInputs const &inp assert((f_sorted_idx.size() - best_thresh + 1) != 0 && " == 0"); thrust::for_each(thrust::seq, dh::tcrbegin(f_sorted_idx), dh::tcrbegin(f_sorted_idx) + (f_sorted_idx.size() - best_thresh), [&](auto c) { - auto cat = input.feature_values[c - node_offset]; + auto cat = shared_inputs.feature_values[c - node_offset]; out_split.SetCat(cat); }); } } template -void GPUHistEvaluator::EvaluateSplits( - EvaluateSplitInputs left, EvaluateSplitInputs right, +void GPUHistEvaluator::LaunchEvaluateSplits( + bst_feature_t number_active_features, common::Span d_inputs, + EvaluateSplitSharedInputs shared_inputs, TreeEvaluator::SplitEvaluator evaluator, common::Span out_splits) { if (need_sort_histogram_) { - this->SortHistogram(left, right, evaluator); + this->SortHistogram(d_inputs, shared_inputs, evaluator); } - size_t combined_num_features = left.feature_set.size() + right.feature_set.size(); + size_t combined_num_features = number_active_features * d_inputs.size(); dh::TemporaryArray feature_best_splits(combined_num_features); // One block for each feature - uint32_t constexpr kBlockThreads = 256; + uint32_t constexpr kBlockThreads = 32; dh::LaunchKernel {static_cast(combined_num_features), kBlockThreads, 0}( - EvaluateSplitsKernel, left, right, this->SortedIdx(left), + EvaluateSplitsKernel, number_active_features, d_inputs, + shared_inputs, this->SortedIdx(d_inputs.size(), shared_inputs.feature_values.size()), evaluator, dh::ToSpan(feature_best_splits)); // Reduce to get best candidate for left and right child over all features - auto reduce_offset = dh::MakeTransformIterator(thrust::make_counting_iterator(0llu), - [=] __device__(size_t idx) -> size_t { - if (idx == 0) { - return 0; - } - if (idx == 1) { - return left.feature_set.size(); - } - if (idx == 2) { - return combined_num_features; - } - return 0; - }); + auto reduce_offset = dh::MakeTransformIterator( + thrust::make_counting_iterator(0llu), + [=] __device__(size_t idx) -> size_t { return idx * number_active_features; }); size_t temp_storage_bytes = 0; auto num_segments = out_splits.size(); cub::DeviceSegmentedReduce::Sum(nullptr, temp_storage_bytes, feature_best_splits.data(), @@ -357,89 +346,73 @@ void GPUHistEvaluator::EvaluateSplits( } template -void GPUHistEvaluator::CopyToHost(EvaluateSplitInputs const &input, - common::Span cats_out) { - if (cats_out.empty()) return; +void GPUHistEvaluator::CopyToHost(const std::vector &nidx) { + if (!has_categoricals_) return; + auto d_cats = this->DeviceCatStorage(nidx); + auto h_cats = this->HostCatStorage(nidx); dh::CUDAEvent event; event.Record(dh::DefaultStream()); - auto h_cats = this->HostCatStorage(input.nidx); - copy_stream_.View().Wait(event); - dh::safe_cuda(cudaMemcpyAsync(h_cats.data(), cats_out.data(), cats_out.size_bytes(), - cudaMemcpyDeviceToHost, copy_stream_.View())); + for (auto idx : nidx) { + copy_stream_.View().Wait(event); + dh::safe_cuda(cudaMemcpyAsync( + h_cats.GetNodeCatStorage(idx).data(), d_cats.GetNodeCatStorage(idx).data(), + d_cats.GetNodeCatStorage(idx).size_bytes(), cudaMemcpyDeviceToHost, copy_stream_.View())); + } } template -void GPUHistEvaluator::EvaluateSplits(GPUExpandEntry candidate, - EvaluateSplitInputs left, - EvaluateSplitInputs right, - common::Span out_entries) { +void GPUHistEvaluator::EvaluateSplits( + const std::vector &nidx, bst_feature_t number_active_features, + common::Span d_inputs, EvaluateSplitSharedInputs shared_inputs, + common::Span out_entries) { auto evaluator = this->tree_evaluator_.template GetEvaluator(); - dh::TemporaryArray splits_out_storage(2); + dh::TemporaryArray splits_out_storage(d_inputs.size()); auto out_splits = dh::ToSpan(splits_out_storage); - this->EvaluateSplits(left, right, evaluator, out_splits); + this->LaunchEvaluateSplits(number_active_features, d_inputs, shared_inputs, evaluator, + out_splits); - auto d_sorted_idx = this->SortedIdx(left); + auto d_sorted_idx = this->SortedIdx(d_inputs.size(), shared_inputs.feature_values.size()); auto d_entries = out_entries; - auto cats_out = this->DeviceCatStorage(left.nidx); + auto device_cats_accessor = this->DeviceCatStorage(nidx); // turn candidate into entry, along with handling sort based split. - dh::LaunchN(right.feature_set.empty() ? 1 : 2, [=] __device__(size_t i) { - auto const &input = i == 0 ? left : right; + dh::LaunchN(d_inputs.size(), [=] __device__(size_t i) mutable { + auto const input = d_inputs[i]; auto &split = out_splits[i]; auto fidx = out_splits[i].findex; if (split.is_cat) { - bool is_left = i == 0; - auto out = is_left ? cats_out.first(cats_out.size() / 2) : cats_out.last(cats_out.size() / 2); - SetCategoricalSplit(input, d_sorted_idx, fidx, is_left, out, &out_splits[i]); + SetCategoricalSplit(shared_inputs, d_sorted_idx, fidx, i, + device_cats_accessor.GetNodeCatStorage(input.nidx), &out_splits[i]); } - float base_weight = - evaluator.CalcWeight(input.nidx, input.param, GradStats{split.left_sum + split.right_sum}); - float left_weight = evaluator.CalcWeight(input.nidx, input.param, GradStats{split.left_sum}); - float right_weight = evaluator.CalcWeight(input.nidx, input.param, GradStats{split.right_sum}); + float base_weight = evaluator.CalcWeight(input.nidx, shared_inputs.param, + GradStats{split.left_sum + split.right_sum}); + float left_weight = + evaluator.CalcWeight(input.nidx, shared_inputs.param, GradStats{split.left_sum}); + float right_weight = + evaluator.CalcWeight(input.nidx, shared_inputs.param, GradStats{split.right_sum}); - d_entries[i] = GPUExpandEntry{input.nidx, candidate.depth + 1, out_splits[i], - base_weight, left_weight, right_weight}; + d_entries[i] = GPUExpandEntry{input.nidx, input.depth, out_splits[i], + base_weight, left_weight, right_weight}; }); - this->CopyToHost(left, cats_out); + this->CopyToHost(nidx); } template GPUExpandEntry GPUHistEvaluator::EvaluateSingleSplit( - EvaluateSplitInputs input, float weight) { - dh::TemporaryArray splits_out(1); - auto out_split = dh::ToSpan(splits_out); - auto evaluator = tree_evaluator_.GetEvaluator(); - this->EvaluateSplits(input, {}, evaluator, out_split); - - auto cats_out = this->DeviceCatStorage(input.nidx); - auto d_sorted_idx = this->SortedIdx(input); - - dh::TemporaryArray entries(1); - auto d_entries = entries.data().get(); - dh::LaunchN(1, [=] __device__(size_t i) { - auto &split = out_split[i]; - auto fidx = out_split[i].findex; - - if (split.is_cat) { - SetCategoricalSplit(input, d_sorted_idx, fidx, true, cats_out, &out_split[i]); - } - - float left_weight = evaluator.CalcWeight(0, input.param, GradStats{split.left_sum}); - float right_weight = evaluator.CalcWeight(0, input.param, GradStats{split.right_sum}); - d_entries[0] = GPUExpandEntry(0, 0, split, weight, left_weight, right_weight); - }); - this->CopyToHost(input, cats_out); - + EvaluateSplitInputs input, EvaluateSplitSharedInputs shared_inputs, float weight) { + dh::device_vector inputs = std::vector{input}; + dh::TemporaryArray out_entries(1); + this->EvaluateSplits({input.nidx}, input.feature_set.size(), dh::ToSpan(inputs), shared_inputs, + dh::ToSpan(out_entries)); GPUExpandEntry root_entry; - dh::safe_cuda(cudaMemcpyAsync(&root_entry, entries.data().get(), - sizeof(GPUExpandEntry) * entries.size(), cudaMemcpyDeviceToHost)); + dh::safe_cuda(cudaMemcpyAsync(&root_entry, out_entries.data().get(), sizeof(GPUExpandEntry), + cudaMemcpyDeviceToHost)); return root_entry; } -template class GPUHistEvaluator; template class GPUHistEvaluator; } // namespace tree } // namespace xgboost diff --git a/src/tree/gpu_hist/evaluate_splits.cuh b/src/tree/gpu_hist/evaluate_splits.cuh index 08b0270ee4d7..aebaa89b2e7b 100644 --- a/src/tree/gpu_hist/evaluate_splits.cuh +++ b/src/tree/gpu_hist/evaluate_splits.cuh @@ -17,24 +17,40 @@ class HistogramCuts; } namespace tree { -template + +// Inputs specific to each node struct EvaluateSplitInputs { int nidx; + int depth; GradientPairPrecise parent_sum; - GPUTrainingParam param; common::Span feature_set; + common::Span gradient_histogram; +}; + +// Inputs necessary for all nodes +struct EvaluateSplitSharedInputs { + GPUTrainingParam param; common::Span feature_types; common::Span feature_segments; common::Span feature_values; common::Span min_fvalue; - common::Span gradient_histogram; - XGBOOST_DEVICE auto Features() const { return feature_segments.size() - 1; } __device__ auto FeatureBins(bst_feature_t fidx) const { return feature_segments[fidx + 1] - feature_segments[fidx]; } }; +// Used to return internal storage regions for categoricals +// Usable on device +struct CatAccessor { + common::Span cat_storage; + std::size_t node_categorical_storage_size; + XGBOOST_DEVICE common::Span GetNodeCatStorage(bst_node_t nidx) { + return this->cat_storage.subspan(nidx * this->node_categorical_storage_size, + this->node_categorical_storage_size); + } +}; + template class GPUHistEvaluator { using CatST = common::CatBitField::value_type; // categorical storage type @@ -61,61 +77,53 @@ class GPUHistEvaluator { // Do we have any categorical features that require sorting histograms? // use this to skip the expensive sort step bool need_sort_histogram_ = false; + bool has_categoricals_ = false; // Number of elements of categorical storage type // needed to hold categoricals for a single mode std::size_t node_categorical_storage_size_ = 0; // Copy the categories from device to host asynchronously. - void CopyToHost(EvaluateSplitInputs const &input, common::Span cats_out); + void CopyToHost( const std::vector& nidx); /** * \brief Get host category storage of nidx for internal calculation. */ - auto HostCatStorage(bst_node_t nidx) { - - std::size_t min_size=(nidx+2)*node_categorical_storage_size_; - if(h_split_cats_.size() &nidx) { + if (!has_categoricals_) return CatAccessor{}; + auto max_nidx = *std::max_element(nidx.begin(), nidx.end()); + std::size_t min_size = (max_nidx + 2) * node_categorical_storage_size_; + if (h_split_cats_.size() < min_size) { h_split_cats_.resize(min_size); } - - if (nidx == RegTree::kRoot) { - auto cats_out = common::Span{h_split_cats_}.subspan(nidx * node_categorical_storage_size_, node_categorical_storage_size_); - return cats_out; - } - auto cats_out = common::Span{h_split_cats_}.subspan(nidx * node_categorical_storage_size_, node_categorical_storage_size_ * 2); - return cats_out; + return CatAccessor{{h_split_cats_.data(), h_split_cats_.size()}, + node_categorical_storage_size_}; } /** * \brief Get device category storage of nidx for internal calculation. */ - auto DeviceCatStorage(bst_node_t nidx) { - std::size_t min_size=(nidx+2)*node_categorical_storage_size_; - if(split_cats_.size() &nidx) { + if (!has_categoricals_) return CatAccessor{}; + auto max_nidx = *std::max_element(nidx.begin(), nidx.end()); + std::size_t min_size = (max_nidx + 2) * node_categorical_storage_size_; + if (split_cats_.size() < min_size) { split_cats_.resize(min_size); } - if (nidx == RegTree::kRoot) { - auto cats_out = dh::ToSpan(split_cats_).subspan(nidx * node_categorical_storage_size_, node_categorical_storage_size_); - return cats_out; - } - auto cats_out = dh::ToSpan(split_cats_).subspan(nidx * node_categorical_storage_size_, node_categorical_storage_size_ * 2); - return cats_out; + return CatAccessor{dh::ToSpan(split_cats_), node_categorical_storage_size_}; } /** * \brief Get sorted index storage based on the left node of inputs. */ - auto SortedIdx(EvaluateSplitInputs left) { - if (left.nidx == RegTree::kRoot && !cat_sorted_idx_.empty()) { - return dh::ToSpan(cat_sorted_idx_).first(left.feature_values.size()); - } + auto SortedIdx(int num_nodes, bst_feature_t total_bins) { + if(!need_sort_histogram_) return common::Span(); + cat_sorted_idx_.resize(num_nodes * total_bins); return dh::ToSpan(cat_sorted_idx_); } - auto SortInput(EvaluateSplitInputs left) { - if (left.nidx == RegTree::kRoot && !cat_sorted_idx_.empty()) { - return dh::ToSpan(sort_input_).first(left.feature_values.size()); - } + auto SortInput(int num_nodes, bst_feature_t total_bins) { + if(!need_sort_histogram_) return common::Span(); + sort_input_.resize(num_nodes * total_bins); return dh::ToSpan(sort_input_); } @@ -154,26 +162,24 @@ class GPUHistEvaluator { /** * \brief Sort the histogram based on output to obtain contiguous partitions. */ - common::Span SortHistogram( - EvaluateSplitInputs const &left, EvaluateSplitInputs const &right, + common::Span SortHistogram(common::Span d_inputs, + EvaluateSplitSharedInputs shared_inputs, TreeEvaluator::SplitEvaluator evaluator); // impl of evaluate splits, contains CUDA kernels so it's public - void EvaluateSplits(EvaluateSplitInputs left, - EvaluateSplitInputs right, + void LaunchEvaluateSplits(bst_feature_t number_active_features,common::Span d_inputs,EvaluateSplitSharedInputs shared_inputs, TreeEvaluator::SplitEvaluator evaluator, common::Span out_splits); /** * \brief Evaluate splits for left and right nodes. */ - void EvaluateSplits(GPUExpandEntry candidate, - EvaluateSplitInputs left, - EvaluateSplitInputs right, + void EvaluateSplits(const std::vector &nidx,bst_feature_t number_active_features,common::Span d_inputs, + EvaluateSplitSharedInputs shared_inputs, common::Span out_splits); /** * \brief Evaluate splits for root node. */ - GPUExpandEntry EvaluateSingleSplit(EvaluateSplitInputs input, float weight); + GPUExpandEntry EvaluateSingleSplit(EvaluateSplitInputs input,EvaluateSplitSharedInputs shared_inputs, float weight); }; } // namespace tree } // namespace xgboost diff --git a/src/tree/gpu_hist/evaluator.cu b/src/tree/gpu_hist/evaluator.cu index aaf35243b2f5..93d7215a2629 100644 --- a/src/tree/gpu_hist/evaluator.cu +++ b/src/tree/gpu_hist/evaluator.cu @@ -21,6 +21,7 @@ void GPUHistEvaluator::Reset(common::HistogramCuts const &cuts, int32_t device) { param_ = param; tree_evaluator_ = TreeEvaluator{param, n_features, device}; + has_categoricals_ = cuts.HasCategorical(); if (cuts.HasCategorical()) { dh::XGBCachingDeviceAllocator alloc; auto ptrs = cuts.cut_ptrs_.ConstDeviceSpan(); @@ -69,42 +70,46 @@ void GPUHistEvaluator::Reset(common::HistogramCuts const &cuts, template common::Span GPUHistEvaluator::SortHistogram( - EvaluateSplitInputs const &left, EvaluateSplitInputs const &right, + common::Span d_inputs, EvaluateSplitSharedInputs shared_inputs, TreeEvaluator::SplitEvaluator evaluator) { dh::XGBCachingDeviceAllocator alloc; - auto sorted_idx = this->SortedIdx(left); + auto sorted_idx = this->SortedIdx(d_inputs.size(), shared_inputs.feature_values.size()); dh::Iota(sorted_idx); - auto data = this->SortInput(left); + auto data = this->SortInput(d_inputs.size(), shared_inputs.feature_values.size()); auto it = thrust::make_counting_iterator(0u); auto d_feature_idx = dh::ToSpan(feature_idx_); + auto total_bins = shared_inputs.feature_values.size(); thrust::transform(thrust::cuda::par(alloc), it, it + data.size(), dh::tbegin(data), [=] XGBOOST_DEVICE(uint32_t i) { - auto is_left = i < left.feature_values.size(); - auto const &input = is_left ? left : right; - auto j = i - (is_left ? 0 : input.feature_values.size()); + auto const &input = d_inputs[i / total_bins]; + auto j = i % total_bins; auto fidx = d_feature_idx[j]; - if (common::IsCat(input.feature_types, fidx)) { - auto lw = evaluator.CalcWeightCat(input.param, input.gradient_histogram[j]); + if (common::IsCat(shared_inputs.feature_types, fidx)) { + auto lw = evaluator.CalcWeightCat(shared_inputs.param, + input.gradient_histogram[j]); return thrust::make_tuple(i, lw); } return thrust::make_tuple(i, 0.0); }); + // Sort an array segmented according to + // - nodes + // - features within each node + // - gradients within each feature thrust::stable_sort_by_key(thrust::cuda::par(alloc), dh::tbegin(data), dh::tend(data), dh::tbegin(sorted_idx), [=] XGBOOST_DEVICE(SortPair const &l, SortPair const &r) { auto li = thrust::get<0>(l); auto ri = thrust::get<0>(r); - auto l_is_left = li < left.feature_values.size(); - auto r_is_left = ri < left.feature_values.size(); + auto l_node = li / total_bins; + auto r_node = ri / total_bins; - if (l_is_left != r_is_left) { - return l_is_left; // not the same node + if (l_node != r_node) { + return l_node < r_node; // not the same node } - auto const &input = l_is_left ? left : right; - li -= (l_is_left ? 0 : input.feature_values.size()); - ri -= (r_is_left ? 0 : input.feature_values.size()); + li = li % total_bins; + ri = ri % total_bins; auto lfidx = d_feature_idx[li]; auto rfidx = d_feature_idx[ri]; @@ -113,7 +118,7 @@ common::Span GPUHistEvaluator::SortHistogram( return lfidx < rfidx; // not the same feature } - if (common::IsCat(input.feature_types, lfidx)) { + if (common::IsCat(shared_inputs.feature_types, lfidx)) { auto lw = thrust::get<1>(l); auto rw = thrust::get<1>(r); return lw < rw; diff --git a/src/tree/updater_gpu_hist.cu b/src/tree/updater_gpu_hist.cu index 5eaaeecbadf6..aba4ca1ddfc8 100644 --- a/src/tree/updater_gpu_hist.cu +++ b/src/tree/updater_gpu_hist.cu @@ -196,6 +196,7 @@ struct GPUHistMakerDevice { HistRounding histogram_rounding; dh::PinnedMemory pinned; + dh::PinnedMemory pinned2; common::Monitor monitor; common::ColumnSampler column_sampler; @@ -279,58 +280,64 @@ struct GPUHistMakerDevice { common::Span feature_set = interaction_constraints.Query(sampled_features->DeviceSpan(), nidx); auto matrix = page->GetDeviceAccessor(ctx_->gpu_id); - EvaluateSplitInputs inputs{nidx, - root_sum, - gpu_param, - feature_set, - feature_types, - matrix.feature_segments, - matrix.gidx_fvalue_map, - matrix.min_fvalue, - hist.GetNodeHistogram(nidx)}; - auto split = this->evaluator_.EvaluateSingleSplit(inputs, weight); + EvaluateSplitInputs inputs{nidx, 0, root_sum, feature_set, hist.GetNodeHistogram(nidx)}; + EvaluateSplitSharedInputs shared_inputs{ + gpu_param, feature_types, matrix.feature_segments, matrix.gidx_fvalue_map, + matrix.min_fvalue, + }; + auto split = this->evaluator_.EvaluateSingleSplit(inputs, shared_inputs, weight); return split; } - void EvaluateLeftRightSplits(GPUExpandEntry candidate, int left_nidx, int right_nidx, - const RegTree& tree, + void EvaluateSplits(const std::vector& candidates, const RegTree& tree, common::Span pinned_candidates_out) { - dh::TemporaryArray splits_out(2); - GPUTrainingParam gpu_param(param); - auto left_sampled_features = column_sampler.GetFeatureSet(tree.GetDepth(left_nidx)); - left_sampled_features->SetDevice(ctx_->gpu_id); - common::Span left_feature_set = - interaction_constraints.Query(left_sampled_features->DeviceSpan(), left_nidx); - auto right_sampled_features = column_sampler.GetFeatureSet(tree.GetDepth(right_nidx)); - right_sampled_features->SetDevice(ctx_->gpu_id); - common::Span right_feature_set = - interaction_constraints.Query(right_sampled_features->DeviceSpan(), left_nidx); + if (candidates.empty()) return; + dh::TemporaryArray d_node_inputs(2 * candidates.size()); + dh::TemporaryArray splits_out(2 * candidates.size()); + std::vector nidx(2 * candidates.size()); + auto h_node_inputs = pinned2.GetSpan(2 * candidates.size()); auto matrix = page->GetDeviceAccessor(ctx_->gpu_id); - - EvaluateSplitInputs left{left_nidx, - candidate.split.left_sum, - gpu_param, - left_feature_set, - feature_types, - matrix.feature_segments, - matrix.gidx_fvalue_map, - matrix.min_fvalue, - hist.GetNodeHistogram(left_nidx)}; - EvaluateSplitInputs right{right_nidx, - candidate.split.right_sum, - gpu_param, - right_feature_set, - feature_types, - matrix.feature_segments, - matrix.gidx_fvalue_map, - matrix.min_fvalue, - hist.GetNodeHistogram(right_nidx)}; - - dh::TemporaryArray entries(2); - this->evaluator_.EvaluateSplits(candidate, left, right, dh::ToSpan(entries)); - dh::safe_cuda(cudaMemcpyAsync(pinned_candidates_out.data(), entries.data().get(), - sizeof(GPUExpandEntry) * entries.size(), cudaMemcpyDeviceToHost)); - } + EvaluateSplitSharedInputs shared_inputs{ + GPUTrainingParam(param), feature_types, matrix.feature_segments, + matrix.gidx_fvalue_map, matrix.min_fvalue, + }; + dh::TemporaryArray entries(2 * candidates.size()); + for (int i = 0; i < candidates.size(); i++) { + auto candidate = candidates.at(i); + int left_nidx = tree[candidate.nid].LeftChild(); + int right_nidx = tree[candidate.nid].RightChild(); + nidx[i * 2] = left_nidx; + nidx[i * 2 + 1] = right_nidx; + auto left_sampled_features = column_sampler.GetFeatureSet(tree.GetDepth(left_nidx)); + left_sampled_features->SetDevice(ctx_->gpu_id); + common::Span left_feature_set = + interaction_constraints.Query(left_sampled_features->DeviceSpan(), left_nidx); + auto right_sampled_features = column_sampler.GetFeatureSet(tree.GetDepth(right_nidx)); + right_sampled_features->SetDevice(ctx_->gpu_id); + common::Span right_feature_set = + interaction_constraints.Query(right_sampled_features->DeviceSpan(), left_nidx); + h_node_inputs[i * 2] = {left_nidx, candidate.depth + 1, candidate.split.left_sum, + left_feature_set, hist.GetNodeHistogram(left_nidx)}; + h_node_inputs[i * 2 + 1] = {right_nidx, candidate.depth + 1, candidate.split.right_sum, + right_feature_set, hist.GetNodeHistogram(right_nidx)}; + } + bst_feature_t number_active_features = h_node_inputs[0].feature_set.size(); + for (auto input : h_node_inputs) { + CHECK_EQ(input.feature_set.size(), number_active_features) + << "Current implementation assumes that the number of active features " + "(after sampling) in any node is the same"; + } + dh::safe_cuda(cudaMemcpyAsync(d_node_inputs.data().get(), h_node_inputs.data(), + h_node_inputs.size() * sizeof(EvaluateSplitInputs), + cudaMemcpyDefault)); + + this->evaluator_.EvaluateSplits(nidx, number_active_features, dh::ToSpan(d_node_inputs), + shared_inputs, dh::ToSpan(entries)); + dh::safe_cuda(cudaMemcpyAsync(pinned_candidates_out.data(), + entries.data().get(), sizeof(GPUExpandEntry) * entries.size(), + cudaMemcpyDeviceToHost)); + dh::DefaultStream().Sync(); + } void BuildHist(int nidx) { auto d_node_hist = hist.GetNodeHistogram(nidx); @@ -697,16 +704,9 @@ struct GPUHistMakerDevice { this->BuildHistLeftRight(filtered_expand_set, reducer, tree); monitor.Stop("BuildHist"); - for (auto i = 0ull; i < filtered_expand_set.size(); i++) { - auto candidate = filtered_expand_set.at(i); - int left_child_nidx = tree[candidate.nid].LeftChild(); - int right_child_nidx = tree[candidate.nid].RightChild(); - - monitor.Start("EvaluateSplits"); - this->EvaluateLeftRightSplits(candidate, left_child_nidx, right_child_nidx, *p_tree, - new_candidates.subspan(i * 2, 2)); - monitor.Stop("EvaluateSplits"); - } + monitor.Start("EvaluateSplits"); + this->EvaluateSplits(filtered_expand_set, *p_tree, new_candidates); + monitor.Stop("EvaluateSplits"); dh::DefaultStream().Sync(); driver.Push(new_candidates.begin(), new_candidates.end()); expand_set = driver.Pop(); diff --git a/tests/cpp/tree/gpu_hist/test_evaluate_splits.cu b/tests/cpp/tree/gpu_hist/test_evaluate_splits.cu index fb1726004a39..f2750ed67bcc 100644 --- a/tests/cpp/tree/gpu_hist/test_evaluate_splits.cu +++ b/tests/cpp/tree/gpu_hist/test_evaluate_splits.cu @@ -35,8 +35,8 @@ void TestEvaluateSingleSplit(bool is_categorical) { std::vector{0, 1}; // Setup gradients so that second feature gets higher gain - thrust::device_vector feature_histogram = - std::vector{ + thrust::device_vector feature_histogram = + std::vector{ {-0.5, 0.5}, {0.5, 0.5}, {-1.0, 0.5}, {1.0, 0.5}}; thrust::device_vector monotonic_constraints(feature_set.size(), 0); @@ -50,21 +50,23 @@ void TestEvaluateSingleSplit(bool is_categorical) { d_feature_types = dh::ToSpan(feature_types); } - EvaluateSplitInputs input{1, + EvaluateSplitInputs input{1,0, parent_sum, - param, dh::ToSpan(feature_set), - d_feature_types, - cuts.cut_ptrs_.ConstDeviceSpan(), - cuts.cut_values_.ConstDeviceSpan(), - cuts.min_vals_.ConstDeviceSpan(), dh::ToSpan(feature_histogram)}; + EvaluateSplitSharedInputs shared_inputs{ + param, + d_feature_types, + cuts.cut_ptrs_.ConstDeviceSpan(), + cuts.cut_values_.ConstDeviceSpan(), + cuts.min_vals_.ConstDeviceSpan(), + }; - GPUHistEvaluator evaluator{ + GPUHistEvaluator evaluator{ tparam, static_cast(feature_set.size()), 0}; evaluator.Reset(cuts, dh::ToSpan(feature_types), feature_set.size(), tparam, 0); DeviceSplitCandidate result = - evaluator.EvaluateSingleSplit(input, 0).split; + evaluator.EvaluateSingleSplit(input, shared_inputs,0).split; EXPECT_EQ(result.findex, 1); EXPECT_EQ(result.fvalue, 11.0); @@ -93,21 +95,23 @@ TEST(GpuHist, EvaluateSingleSplitMissing) { std::vector{0, 2}; thrust::device_vector feature_values = std::vector{1.0, 2.0}; thrust::device_vector feature_min_values = std::vector{0.0}; - thrust::device_vector feature_histogram = - std::vector{{-0.5, 0.5}, {0.5, 0.5}}; + thrust::device_vector feature_histogram = + std::vector{{-0.5, 0.5}, {0.5, 0.5}}; thrust::device_vector monotonic_constraints(feature_set.size(), 0); - EvaluateSplitInputs input{1, + EvaluateSplitInputs input{1,0, parent_sum, - param, dh::ToSpan(feature_set), - {}, - dh::ToSpan(feature_segments), - dh::ToSpan(feature_values), - dh::ToSpan(feature_min_values), dh::ToSpan(feature_histogram)}; + EvaluateSplitSharedInputs shared_inputs{ + param, + {}, + dh::ToSpan(feature_segments), + dh::ToSpan(feature_values), + dh::ToSpan(feature_min_values), + }; - GPUHistEvaluator evaluator(tparam, feature_set.size(), 0); - DeviceSplitCandidate result = evaluator.EvaluateSingleSplit(input, 0).split; + GPUHistEvaluator evaluator(tparam, feature_set.size(), 0); + DeviceSplitCandidate result = evaluator.EvaluateSingleSplit(input, shared_inputs,0).split; EXPECT_EQ(result.findex, 0); EXPECT_EQ(result.fvalue, 1.0); @@ -118,9 +122,9 @@ TEST(GpuHist, EvaluateSingleSplitMissing) { TEST(GpuHist, EvaluateSingleSplitEmpty) { TrainParam tparam = ZeroParam(); - GPUHistEvaluator evaluator(tparam, 1, 0); + GPUHistEvaluator evaluator(tparam, 1, 0); DeviceSplitCandidate result = - evaluator.EvaluateSingleSplit(EvaluateSplitInputs{}, 0).split; + evaluator.EvaluateSingleSplit(EvaluateSplitInputs{}, EvaluateSplitSharedInputs{}, 0).split; EXPECT_EQ(result.findex, -1); EXPECT_LT(result.loss_chg, 0.0f); } @@ -140,22 +144,24 @@ TEST(GpuHist, EvaluateSingleSplitFeatureSampling) { std::vector{1.0, 2.0, 11.0, 12.0}; thrust::device_vector feature_min_values = std::vector{0.0, 10.0}; - thrust::device_vector feature_histogram = - std::vector{ + thrust::device_vector feature_histogram = + std::vector{ {-10.0, 0.5}, {10.0, 0.5}, {-0.5, 0.5}, {0.5, 0.5}}; thrust::device_vector monotonic_constraints(2, 0); - EvaluateSplitInputs input{1, + EvaluateSplitInputs input{1,0, parent_sum, - param, dh::ToSpan(feature_set), + dh::ToSpan(feature_histogram)}; + EvaluateSplitSharedInputs shared_inputs{ + param, {}, dh::ToSpan(feature_segments), dh::ToSpan(feature_values), dh::ToSpan(feature_min_values), - dh::ToSpan(feature_histogram)}; + }; - GPUHistEvaluator evaluator(tparam, feature_min_values.size(), 0); - DeviceSplitCandidate result = evaluator.EvaluateSingleSplit(input, 0).split; + GPUHistEvaluator evaluator(tparam, feature_min_values.size(), 0); + DeviceSplitCandidate result = evaluator.EvaluateSingleSplit(input,shared_inputs, 0).split; EXPECT_EQ(result.findex, 1); EXPECT_EQ(result.fvalue, 11.0); @@ -178,22 +184,24 @@ TEST(GpuHist, EvaluateSingleSplitBreakTies) { std::vector{1.0, 2.0, 11.0, 12.0}; thrust::device_vector feature_min_values = std::vector{0.0, 10.0}; - thrust::device_vector feature_histogram = - std::vector{ + thrust::device_vector feature_histogram = + std::vector{ {-0.5, 0.5}, {0.5, 0.5}, {-0.5, 0.5}, {0.5, 0.5}}; thrust::device_vector monotonic_constraints(2, 0); - EvaluateSplitInputs input{1, + EvaluateSplitInputs input{1,0, parent_sum, - param, dh::ToSpan(feature_set), + dh::ToSpan(feature_histogram)}; + EvaluateSplitSharedInputs shared_inputs{ + param, {}, dh::ToSpan(feature_segments), dh::ToSpan(feature_values), dh::ToSpan(feature_min_values), - dh::ToSpan(feature_histogram)}; + }; - GPUHistEvaluator evaluator(tparam, feature_min_values.size(), 0); - DeviceSplitCandidate result = evaluator.EvaluateSingleSplit(input, 0).split; + GPUHistEvaluator evaluator(tparam, feature_min_values.size(), 0); + DeviceSplitCandidate result = evaluator.EvaluateSingleSplit(input,shared_inputs, 0).split; EXPECT_EQ(result.findex, 0); EXPECT_EQ(result.fvalue, 1.0); @@ -214,37 +222,35 @@ TEST(GpuHist, EvaluateSplits) { std::vector{1.0, 2.0, 11.0, 12.0}; thrust::device_vector feature_min_values = std::vector{0.0, 0.0}; - thrust::device_vector feature_histogram_left = - std::vector{ + thrust::device_vector feature_histogram_left = + std::vector{ {-0.5, 0.5}, {0.5, 0.5}, {-1.0, 0.5}, {1.0, 0.5}}; - thrust::device_vector feature_histogram_right = - std::vector{ + thrust::device_vector feature_histogram_right = + std::vector{ {-1.0, 0.5}, {1.0, 0.5}, {-0.5, 0.5}, {0.5, 0.5}}; thrust::device_vector monotonic_constraints(feature_set.size(), 0); - EvaluateSplitInputs input_left{ - 1, + EvaluateSplitInputs input_left{ + 1,0, parent_sum, - param, dh::ToSpan(feature_set), - {}, - dh::ToSpan(feature_segments), - dh::ToSpan(feature_values), - dh::ToSpan(feature_min_values), dh::ToSpan(feature_histogram_left)}; - EvaluateSplitInputs input_right{ - 2, + EvaluateSplitInputs input_right{ + 2,0, parent_sum, - param, dh::ToSpan(feature_set), - {}, - dh::ToSpan(feature_segments), - dh::ToSpan(feature_values), - dh::ToSpan(feature_min_values), dh::ToSpan(feature_histogram_right)}; + EvaluateSplitSharedInputs shared_inputs{ + param, + {}, + dh::ToSpan(feature_segments), + dh::ToSpan(feature_values), + dh::ToSpan(feature_min_values), + }; - GPUHistEvaluator evaluator{ + GPUHistEvaluator evaluator{ tparam, static_cast(feature_min_values.size()), 0}; - evaluator.EvaluateSplits(input_left, input_right, evaluator.GetEvaluator(), + dh::device_vector inputs = std::vector{input_left,input_right}; + evaluator.LaunchEvaluateSplits(input_left.feature_set.size(),dh::ToSpan(inputs),shared_inputs, evaluator.GetEvaluator(), dh::ToSpan(out_splits)); DeviceSplitCandidate result_left = out_splits[0]; @@ -273,16 +279,18 @@ TEST_F(TestPartitionBasedSplit, GpuHist) { cudaMemcpyHostToDevice)); dh::device_vector feature_set{std::vector{0}}; - EvaluateSplitInputs input{0, + EvaluateSplitInputs input{0,0, total_gpair_, - GPUTrainingParam{param_}, dh::ToSpan(feature_set), + dh::ToSpan(d_hist)}; + EvaluateSplitSharedInputs shared_inputs{ + GPUTrainingParam{ param_}, dh::ToSpan(ft), cuts_.cut_ptrs_.ConstDeviceSpan(), cuts_.cut_values_.ConstDeviceSpan(), cuts_.min_vals_.ConstDeviceSpan(), - dh::ToSpan(d_hist)}; - auto split = evaluator.EvaluateSingleSplit(input, 0).split; + }; + auto split = evaluator.EvaluateSingleSplit(input, shared_inputs, 0).split; ASSERT_NEAR(split.loss_chg, best_score_, 1e-16); } } // namespace tree diff --git a/tests/python-gpu/test_gpu_with_dask.py b/tests/python-gpu/test_gpu_with_dask.py index 6edcbd2f5b47..8a947312eacc 100644 --- a/tests/python-gpu/test_gpu_with_dask.py +++ b/tests/python-gpu/test_gpu_with_dask.py @@ -285,13 +285,15 @@ def test_early_stopping(self, local_cuda_cluster: LocalCUDACluster) -> None: 'booster'] assert hasattr(booster, 'best_score') dump = booster.get_dump(dump_format='json') + print(booster.best_iteration) assert len(dump) - booster.best_iteration == early_stopping_rounds + 1 valid_X = X valid_y = y cls = dxgb.DaskXGBClassifier(objective='binary:logistic', - tree_method='gpu_hist', - n_estimators=100) + tree_method='gpu_hist', + eval_metric='error', + n_estimators=100) cls.client = client cls.fit(X, y, early_stopping_rounds=early_stopping_rounds, eval_set=[(valid_X, valid_y)])