diff --git a/.clang-tidy b/.clang-tidy index cf3bcbffe7c5..3be1d9e0cdce 100644 --- a/.clang-tidy +++ b/.clang-tidy @@ -1,21 +1,21 @@ Checks: 'modernize-*,-modernize-make-*,-modernize-use-auto,-modernize-raw-string-literal,-modernize-avoid-c-arrays,-modernize-use-trailing-return-type,google-*,-google-default-arguments,-clang-diagnostic-#pragma-messages,readability-identifier-naming' CheckOptions: - - { key: readability-identifier-naming.ClassCase, value: CamelCase } - - { key: readability-identifier-naming.StructCase, value: CamelCase } - - { key: readability-identifier-naming.TypeAliasCase, value: CamelCase } - - { key: readability-identifier-naming.TypedefCase, value: CamelCase } - - { key: readability-identifier-naming.TypeTemplateParameterCase, value: CamelCase } - - { key: readability-identifier-naming.MemberCase, value: lower_case } - - { key: readability-identifier-naming.PrivateMemberSuffix, value: '_' } - - { key: readability-identifier-naming.ProtectedMemberSuffix, value: '_' } - - { key: readability-identifier-naming.EnumCase, value: CamelCase } - - { key: readability-identifier-naming.EnumConstant, value: CamelCase } - - { key: readability-identifier-naming.EnumConstantPrefix, value: k } - - { key: readability-identifier-naming.GlobalConstantCase, value: CamelCase } - - { key: readability-identifier-naming.GlobalConstantPrefix, value: k } - - { key: readability-identifier-naming.StaticConstantCase, value: CamelCase } - - { key: readability-identifier-naming.StaticConstantPrefix, value: k } - - { key: readability-identifier-naming.ConstexprVariableCase, value: CamelCase } - - { key: readability-identifier-naming.ConstexprVariablePrefix, value: k } - - { key: readability-identifier-naming.FunctionCase, value: CamelCase } - - { key: readability-identifier-naming.NamespaceCase, value: lower_case } + - { key: readability-identifier-naming.ClassCase, value: CamelCase } + - { key: readability-identifier-naming.StructCase, value: CamelCase } + - { key: readability-identifier-naming.TypeAliasCase, value: CamelCase } + - { key: readability-identifier-naming.TypedefCase, value: CamelCase } + - { key: readability-identifier-naming.TypeTemplateParameterCase, value: CamelCase } + - { key: readability-identifier-naming.MemberCase, value: lower_case } + - { key: readability-identifier-naming.PrivateMemberSuffix, value: '_' } + - { key: readability-identifier-naming.ProtectedMemberSuffix, value: '_' } + - { key: readability-identifier-naming.EnumCase, value: CamelCase } + - { key: readability-identifier-naming.EnumConstant, value: CamelCase } + - { key: readability-identifier-naming.EnumConstantPrefix, value: k } + - { key: readability-identifier-naming.GlobalConstantCase, value: CamelCase } + - { key: readability-identifier-naming.GlobalConstantPrefix, value: k } + - { key: readability-identifier-naming.StaticConstantCase, value: CamelCase } + - { key: readability-identifier-naming.StaticConstantPrefix, value: k } + - { key: readability-identifier-naming.ConstexprVariableCase, value: CamelCase } + - { key: readability-identifier-naming.ConstexprVariablePrefix, value: k } + - { key: readability-identifier-naming.FunctionCase, value: CamelCase } + - { key: readability-identifier-naming.NamespaceCase, value: lower_case } diff --git a/include/xgboost/span.h b/include/xgboost/span.h index 5e5ed40f9b27..4850d7543bf9 100644 --- a/include/xgboost/span.h +++ b/include/xgboost/span.h @@ -137,7 +137,7 @@ class SpanIterator { IsConst, const ElementType, ElementType>::type&; using pointer = typename std::add_pointer::type; // NOLINT - XGBOOST_DEVICE constexpr SpanIterator() : span_{nullptr}, index_{0} {} // NOLINT + constexpr SpanIterator() = default; XGBOOST_DEVICE constexpr SpanIterator( const SpanType* _span, @@ -410,8 +410,7 @@ class Span { using const_reverse_iterator = const detail::SpanIterator, true>; // NOLINT // constructors - - XGBOOST_DEVICE constexpr Span() __span_noexcept : size_(0), data_(nullptr) {} // NOLINT + constexpr Span() __span_noexcept = default; XGBOOST_DEVICE Span(pointer _ptr, index_type _count) : size_(_count), data_(_ptr) { diff --git a/src/common/bitfield.h b/src/common/bitfield.h index 9a919615dbbd..4353a5269473 100644 --- a/src/common/bitfield.h +++ b/src/common/bitfield.h @@ -86,8 +86,8 @@ struct BitFieldContainer { XGBOOST_DEVICE explicit BitFieldContainer(common::Span bits) : bits_{bits} {} XGBOOST_DEVICE BitFieldContainer(BitFieldContainer const& other) : bits_{other.bits_} {} - common::Span Bits() { return bits_; }; - common::Span Bits() const { return bits_; }; + common::Span Bits() { return bits_; } + common::Span Bits() const { return bits_; } /*\brief Compute the size of needed memory allocation. The returned value is in terms * of number of elements with `BitFieldContainer::value_type'. diff --git a/src/common/device_helpers.cuh b/src/common/device_helpers.cuh index 5bf5c420b38f..6d51c2097da6 100644 --- a/src/common/device_helpers.cuh +++ b/src/common/device_helpers.cuh @@ -868,7 +868,7 @@ template void SparseTransformLbs(int device_idx, dh::CubMemory *temp_memory, OffsetT count, SegmentIterT segments, OffsetT num_segments, FunctionT f) { - typedef typename cub::CubVector::Type CoordinateT; + using CoordinateT = typename cub::CubVector::Type; dh::safe_cuda(cudaSetDevice(device_idx)); const int BLOCK_THREADS = 256; const int ITEMS_PER_THREAD = 1; diff --git a/src/common/hist_util.h b/src/common/hist_util.h index c6e6a04c6bef..d94067289f1b 100644 --- a/src/common/hist_util.h +++ b/src/common/hist_util.h @@ -293,14 +293,14 @@ struct Index { return reinterpret_cast(t)[i]; } - using Func = uint32_t (*)(void* ,size_t); + using Func = uint32_t (*)(void*, size_t); std::vector data_; std::vector offset_; // size of this field is equal to number of features void* data_ptr_; - BinTypeSize binTypeSize_; - size_t p_; - uint32_t* offset_ptr_; + BinTypeSize binTypeSize_ {kUint8BinsTypeSize}; + size_t p_ {1}; + uint32_t* offset_ptr_ {nullptr}; Func func_; }; diff --git a/src/common/row_set.h b/src/common/row_set.h index ad0149e8b1d3..25f7c739d8d7 100644 --- a/src/common/row_set.h +++ b/src/common/row_set.h @@ -90,7 +90,7 @@ class RowSetCollection { elem_of_each_node_.emplace_back(Elem(begin, end, 0)); } - std::vector* Data() { return &row_indices_; }; + std::vector* Data() { return &row_indices_; } // split rowset into two inline void AddSplit(unsigned node_id, unsigned left_node_id, diff --git a/src/common/transform.h b/src/common/transform.h index d204ebf8655d..fa2d0d3794a3 100644 --- a/src/common/transform.h +++ b/src/common/transform.h @@ -133,8 +133,9 @@ class Transform { template ::type* = nullptr, typename... HDV> void LaunchCUDA(Functor _func, HDV*... _vectors) const { - if (shard_) + if (shard_) { UnpackShard(device_, _vectors...); + } size_t range_size = *range_.end() - *range_.begin(); diff --git a/src/data/ellpack_page.cuh b/src/data/ellpack_page.cuh index 88faa995bad2..98ed3587a490 100644 --- a/src/data/ellpack_page.cuh +++ b/src/data/ellpack_page.cuh @@ -185,6 +185,9 @@ class EllpackPageImpl { base_rowid = row_id; } + common::HistogramCuts& Cuts() { return cuts_; } + common::HistogramCuts const& Cuts() const { return cuts_; } + /*! \return Estimation of memory cost of this page. */ static size_t MemCostBytes(size_t num_rows, size_t row_stride, const common::HistogramCuts&cuts) ; @@ -220,8 +223,9 @@ public: size_t n_rows{}; /*! \brief global index of histogram, which is stored in ELLPack format. */ HostDeviceVector gidx_buffer; + + private: common::HistogramCuts cuts_; -private: common::Monitor monitor_; }; diff --git a/src/data/ellpack_page_raw_format.cu b/src/data/ellpack_page_raw_format.cu index 147d8fb4dfd9..d4caf37e2be3 100644 --- a/src/data/ellpack_page_raw_format.cu +++ b/src/data/ellpack_page_raw_format.cu @@ -17,9 +17,9 @@ class EllpackPageRawFormat : public SparsePageFormat { public: bool Read(EllpackPage* page, dmlc::SeekStream* fi) override { auto* impl = page->Impl(); - fi->Read(&impl->cuts_.cut_values_.HostVector()); - fi->Read(&impl->cuts_.cut_ptrs_.HostVector()); - fi->Read(&impl->cuts_.min_vals_.HostVector()); + fi->Read(&impl->Cuts().cut_values_.HostVector()); + fi->Read(&impl->Cuts().cut_ptrs_.HostVector()); + fi->Read(&impl->Cuts().min_vals_.HostVector()); fi->Read(&impl->n_rows); fi->Read(&impl->is_dense); fi->Read(&impl->row_stride); @@ -38,9 +38,9 @@ class EllpackPageRawFormat : public SparsePageFormat { void Write(const EllpackPage& page, dmlc::Stream* fo) override { auto* impl = page.Impl(); - fo->Write(impl->cuts_.cut_values_.ConstHostVector()); - fo->Write(impl->cuts_.cut_ptrs_.ConstHostVector()); - fo->Write(impl->cuts_.min_vals_.ConstHostVector()); + fo->Write(impl->Cuts().cut_values_.ConstHostVector()); + fo->Write(impl->Cuts().cut_ptrs_.ConstHostVector()); + fo->Write(impl->Cuts().min_vals_.ConstHostVector()); fo->Write(impl->n_rows); fo->Write(impl->is_dense); fo->Write(impl->row_stride); diff --git a/src/metric/elementwise_metric.cu b/src/metric/elementwise_metric.cu index 58532dcf8c65..72381d5d6696 100644 --- a/src/metric/elementwise_metric.cu +++ b/src/metric/elementwise_metric.cu @@ -350,7 +350,7 @@ struct EvalEWiseBase : public Metric { } private: - Policy policy_ {}; + Policy policy_; ElementWiseMetricsReduction reducer_{policy_}; }; diff --git a/src/metric/metric_common.h b/src/metric/metric_common.h index de814970bc5f..d676797f4873 100644 --- a/src/metric/metric_common.h +++ b/src/metric/metric_common.h @@ -66,7 +66,7 @@ class PackedReduceResult { double weights_sum_ { 0 }; public: - XGBOOST_DEVICE PackedReduceResult() : residue_sum_{0}, weights_sum_{0} {} // NOLINT + XGBOOST_DEVICE PackedReduceResult() {} // NOLINT XGBOOST_DEVICE PackedReduceResult(double residue, double weight) : residue_sum_{residue}, weights_sum_{weight} {} diff --git a/src/tree/gpu_hist/gradient_based_sampler.cu b/src/tree/gpu_hist/gradient_based_sampler.cu index c89e719d9795..169eee0593eb 100644 --- a/src/tree/gpu_hist/gradient_based_sampler.cu +++ b/src/tree/gpu_hist/gradient_based_sampler.cu @@ -153,7 +153,7 @@ ExternalMemoryNoSampling::ExternalMemoryNoSampling(EllpackPageImpl* page, size_t n_rows, const BatchParam& batch_param) : batch_param_(batch_param), - page_(new EllpackPageImpl(batch_param.gpu_id, page->cuts_, page->is_dense, + page_(new EllpackPageImpl(batch_param.gpu_id, page->Cuts(), page->is_dense, page->row_stride, n_rows)) {} GradientBasedSample ExternalMemoryNoSampling::Sample(common::Span gpair, @@ -218,7 +218,7 @@ GradientBasedSample ExternalMemoryUniformSampling::Sample(common::Spancuts_, original_page_->is_dense, + batch_param_.gpu_id, original_page_->Cuts(), original_page_->is_dense, original_page_->row_stride, sample_rows)); // Compact the ELLPACK pages into the single sample page. @@ -298,7 +298,7 @@ GradientBasedSample ExternalMemoryGradientBasedSampling::Sample(common::Spancuts_, + page_.reset(new EllpackPageImpl(batch_param_.gpu_id, original_page_->Cuts(), original_page_->is_dense, original_page_->row_stride, sample_rows)); diff --git a/src/tree/gpu_hist/row_partitioner.cu b/src/tree/gpu_hist/row_partitioner.cu index ad21fcf63074..7427362e9609 100644 --- a/src/tree/gpu_hist/row_partitioner.cu +++ b/src/tree/gpu_hist/row_partitioner.cu @@ -64,54 +64,55 @@ void RowPartitioner::SortPosition(common::Span position, cub::DeviceScan::ExclusiveSum(temp_storage.data().get(), temp_storage_bytes, in_itr, out_itr, position.size(), stream); } + RowPartitioner::RowPartitioner(int device_idx, size_t num_rows) - : device_idx(device_idx) { - dh::safe_cuda(cudaSetDevice(device_idx)); - ridx_a.resize(num_rows); - ridx_b.resize(num_rows); - position_a.resize(num_rows); - position_b.resize(num_rows); - ridx = dh::DoubleBuffer{&ridx_a, &ridx_b}; - position = dh::DoubleBuffer{&position_a, &position_b}; - ridx_segments.emplace_back(Segment(0, num_rows)); + : device_idx_(device_idx) { + dh::safe_cuda(cudaSetDevice(device_idx_)); + ridx_a_.resize(num_rows); + ridx_b_.resize(num_rows); + position_a_.resize(num_rows); + position_b_.resize(num_rows); + ridx_ = dh::DoubleBuffer{&ridx_a_, &ridx_b_}; + position_ = dh::DoubleBuffer{&position_a_, &position_b_}; + ridx_segments_.emplace_back(Segment(0, num_rows)); thrust::sequence( - thrust::device_pointer_cast(ridx.CurrentSpan().data()), - thrust::device_pointer_cast(ridx.CurrentSpan().data() + ridx.Size())); + thrust::device_pointer_cast(ridx_.CurrentSpan().data()), + thrust::device_pointer_cast(ridx_.CurrentSpan().data() + ridx_.Size())); thrust::fill( - thrust::device_pointer_cast(position.Current()), - thrust::device_pointer_cast(position.Current() + position.Size()), 0); - left_counts.resize(256); - thrust::fill(left_counts.begin(), left_counts.end(), 0); - streams.resize(2); - for (auto& stream : streams) { + thrust::device_pointer_cast(position_.Current()), + thrust::device_pointer_cast(position_.Current() + position_.Size()), 0); + left_counts_.resize(256); + thrust::fill(left_counts_.begin(), left_counts_.end(), 0); + streams_.resize(2); + for (auto& stream : streams_) { dh::safe_cuda(cudaStreamCreate(&stream)); } } RowPartitioner::~RowPartitioner() { - dh::safe_cuda(cudaSetDevice(device_idx)); - for (auto& stream : streams) { + dh::safe_cuda(cudaSetDevice(device_idx_)); + for (auto& stream : streams_) { dh::safe_cuda(cudaStreamDestroy(stream)); } } common::Span RowPartitioner::GetRows( bst_node_t nidx) { - auto segment = ridx_segments.at(nidx); + auto segment = ridx_segments_.at(nidx); // Return empty span here as a valid result // Will error if we try to construct a span from a pointer with size 0 if (segment.Size() == 0) { return common::Span(); } - return ridx.CurrentSpan().subspan(segment.begin, segment.Size()); + return ridx_.CurrentSpan().subspan(segment.begin, segment.Size()); } common::Span RowPartitioner::GetRows() { - return ridx.CurrentSpan(); + return ridx_.CurrentSpan(); } common::Span RowPartitioner::GetPosition() { - return position.CurrentSpan(); + return position_.CurrentSpan(); } std::vector RowPartitioner::GetRowsHost( bst_node_t nidx) { @@ -135,22 +136,22 @@ void RowPartitioner::SortPositionAndCopy(const Segment& segment, cudaStream_t stream) { SortPosition( // position_in - common::Span(position.Current() + segment.begin, + common::Span(position_.Current() + segment.begin, segment.Size()), // position_out - common::Span(position.Other() + segment.begin, + common::Span(position_.Other() + segment.begin, segment.Size()), // row index in - common::Span(ridx.Current() + segment.begin, segment.Size()), + common::Span(ridx_.Current() + segment.begin, segment.Size()), // row index out - common::Span(ridx.Other() + segment.begin, segment.Size()), + common::Span(ridx_.Other() + segment.begin, segment.Size()), left_nidx, right_nidx, d_left_count, stream); // Copy back key/value - const auto d_position_current = position.Current() + segment.begin; - const auto d_position_other = position.Other() + segment.begin; - const auto d_ridx_current = ridx.Current() + segment.begin; - const auto d_ridx_other = ridx.Other() + segment.begin; - dh::LaunchN(device_idx, segment.Size(), stream, [=] __device__(size_t idx) { + const auto d_position_current = position_.Current() + segment.begin; + const auto d_position_other = position_.Other() + segment.begin; + const auto d_ridx_current = ridx_.Current() + segment.begin; + const auto d_ridx_other = ridx_.Other() + segment.begin; + dh::LaunchN(device_idx_, segment.Size(), stream, [=] __device__(size_t idx) { d_position_current[idx] = d_position_other[idx]; d_ridx_current[idx] = d_ridx_other[idx]; }); diff --git a/src/tree/gpu_hist/row_partitioner.cuh b/src/tree/gpu_hist/row_partitioner.cuh index 4818d71abc9f..03334efe6ecd 100644 --- a/src/tree/gpu_hist/row_partitioner.cuh +++ b/src/tree/gpu_hist/row_partitioner.cuh @@ -36,7 +36,7 @@ class RowPartitioner { static constexpr bst_node_t kIgnoredTreePosition = -1; private: - int device_idx; + int device_idx_; /*! \brief In here if you want to find the rows belong to a node nid, first you need to * get the indices segment from ridx_segments[nid], then get the row index that * represents position of row in input data X. `RowPartitioner::GetRows` would be a @@ -45,22 +45,22 @@ class RowPartitioner { * node id -> segment -> indices of rows belonging to node */ /*! \brief Range of row index for each node, pointers into ridx below. */ - std::vector ridx_segments; - dh::caching_device_vector ridx_a; - dh::caching_device_vector ridx_b; - dh::caching_device_vector position_a; - dh::caching_device_vector position_b; + std::vector ridx_segments_; + dh::caching_device_vector ridx_a_; + dh::caching_device_vector ridx_b_; + dh::caching_device_vector position_a_; + dh::caching_device_vector position_b_; /*! \brief mapping for node id -> rows. * This looks like: * node id | 1 | 2 | * rows idx | 3, 5, 1 | 13, 31 | */ - dh::DoubleBuffer ridx; + dh::DoubleBuffer ridx_; /*! \brief mapping for row -> node id. */ - dh::DoubleBuffer position; + dh::DoubleBuffer position_; dh::caching_device_vector - left_counts; // Useful to keep a bunch of zeroed memory for sort position - std::vector streams; + left_counts_; // Useful to keep a bunch of zeroed memory for sort position + std::vector streams_; public: RowPartitioner(int device_idx, size_t num_rows); @@ -108,19 +108,19 @@ class RowPartitioner { template void UpdatePosition(bst_node_t nidx, bst_node_t left_nidx, bst_node_t right_nidx, UpdatePositionOpT op) { - dh::safe_cuda(cudaSetDevice(device_idx)); - Segment segment = ridx_segments.at(nidx); // rows belongs to node nidx - auto d_ridx = ridx.CurrentSpan(); - auto d_position = position.CurrentSpan(); - if (left_counts.size() <= nidx) { - left_counts.resize((nidx * 2) + 1); - thrust::fill(left_counts.begin(), left_counts.end(), 0); + dh::safe_cuda(cudaSetDevice(device_idx_)); + Segment segment = ridx_segments_.at(nidx); // rows belongs to node nidx + auto d_ridx = ridx_.CurrentSpan(); + auto d_position = position_.CurrentSpan(); + if (left_counts_.size() <= nidx) { + left_counts_.resize((nidx * 2) + 1); + thrust::fill(left_counts_.begin(), left_counts_.end(), 0); } // Now we divide the row segment into left and right node. - int64_t* d_left_count = left_counts.data().get() + nidx; + int64_t* d_left_count = left_counts_.data().get() + nidx; // Launch 1 thread for each row - dh::LaunchN<1, 128>(device_idx, segment.Size(), [=] __device__(size_t idx) { + dh::LaunchN<1, 128>(device_idx_, segment.Size(), [=] __device__(size_t idx) { // LaunchN starts from zero, so we restore the row index by adding segment.begin idx += segment.begin; RowIndexT ridx = d_ridx[idx]; @@ -132,19 +132,19 @@ class RowPartitioner { // Overlap device to host memory copy (left_count) with sort int64_t left_count; dh::safe_cuda(cudaMemcpyAsync(&left_count, d_left_count, sizeof(int64_t), - cudaMemcpyDeviceToHost, streams[0])); + cudaMemcpyDeviceToHost, streams_[0])); SortPositionAndCopy(segment, left_nidx, right_nidx, d_left_count, - streams[1]); + streams_[1]); - dh::safe_cuda(cudaStreamSynchronize(streams[0])); + dh::safe_cuda(cudaStreamSynchronize(streams_[0])); CHECK_LE(left_count, segment.Size()); CHECK_GE(left_count, 0); - ridx_segments.resize(std::max(int(ridx_segments.size()), - std::max(left_nidx, right_nidx) + 1)); - ridx_segments[left_nidx] = + ridx_segments_.resize(std::max(static_cast(ridx_segments_.size()), + std::max(left_nidx, right_nidx) + 1)); + ridx_segments_[left_nidx] = Segment(segment.begin, segment.begin + left_count); - ridx_segments[right_nidx] = + ridx_segments_[right_nidx] = Segment(segment.begin + left_count, segment.end); } @@ -159,9 +159,9 @@ class RowPartitioner { */ template void FinalisePosition(FinalisePositionOpT op) { - auto d_position = position.Current(); - const auto d_ridx = ridx.Current(); - dh::LaunchN(device_idx, position.Size(), [=] __device__(size_t idx) { + auto d_position = position_.Current(); + const auto d_ridx = ridx_.Current(); + dh::LaunchN(device_idx_, position_.Size(), [=] __device__(size_t idx) { auto position = d_position[idx]; RowIndexT ridx = d_ridx[idx]; bst_node_t new_position = op(ridx, position); @@ -189,10 +189,10 @@ class RowPartitioner { /** \brief Used to demarcate a contiguous set of row indices associated with * some tree node. */ struct Segment { - size_t begin; - size_t end; + size_t begin { 0 }; + size_t end { 0 }; - Segment() : begin{0}, end{0} {} + Segment() = default; Segment(size_t begin, size_t end) : begin(begin), end(end) { CHECK_GE(end, begin); diff --git a/src/tree/updater_gpu_common.cuh b/src/tree/updater_gpu_common.cuh index 81ce1819e199..87008f8da984 100644 --- a/src/tree/updater_gpu_common.cuh +++ b/src/tree/updater_gpu_common.cuh @@ -53,16 +53,15 @@ enum DefaultDirection { }; struct DeviceSplitCandidate { - float loss_chg; - DefaultDirection dir; - int findex; - float fvalue; + float loss_chg {-FLT_MAX}; + DefaultDirection dir {kLeftDir}; + int findex {-1}; + float fvalue {0}; GradientPair left_sum; GradientPair right_sum; - XGBOOST_DEVICE DeviceSplitCandidate() - : loss_chg(-FLT_MAX), dir(kLeftDir), fvalue(0), findex(-1) {} + XGBOOST_DEVICE DeviceSplitCandidate() {} // NOLINT template XGBOOST_DEVICE void Update(const DeviceSplitCandidate& other, @@ -105,7 +104,7 @@ struct DeviceSplitCandidate { struct DeviceSplitCandidateReduceOp { GPUTrainingParam param; - DeviceSplitCandidateReduceOp(GPUTrainingParam param) : param(param) {} + explicit DeviceSplitCandidateReduceOp(GPUTrainingParam param) : param(std::move(param)) {} XGBOOST_DEVICE DeviceSplitCandidate operator()( const DeviceSplitCandidate& a, const DeviceSplitCandidate& b) const { DeviceSplitCandidate best; @@ -117,38 +116,26 @@ struct DeviceSplitCandidateReduceOp { struct DeviceNodeStats { GradientPair sum_gradients; - float root_gain; - float weight; + float root_gain {-FLT_MAX}; + float weight {-FLT_MAX}; /** default direction for missing values */ - DefaultDirection dir; + DefaultDirection dir {kLeftDir}; /** threshold value for comparison */ - float fvalue; + float fvalue {0.0f}; GradientPair left_sum; GradientPair right_sum; /** \brief The feature index. */ - int fidx; + int fidx{kUnusedNode}; /** node id (used as key for reduce/scan) */ - NodeIdT idx; - - HOST_DEV_INLINE DeviceNodeStats() - : sum_gradients(), - root_gain(-FLT_MAX), - weight(-FLT_MAX), - dir(kLeftDir), - fvalue(0.f), - left_sum(), - right_sum(), - fidx(kUnusedNode), - idx(kUnusedNode) {} + NodeIdT idx{kUnusedNode}; + + XGBOOST_DEVICE DeviceNodeStats() {} // NOLINT template HOST_DEV_INLINE DeviceNodeStats(GradientPair sum_gradients, NodeIdT nidx, const ParamT& param) : sum_gradients(sum_gradients), - dir(kLeftDir), - fvalue(0.f), - fidx(kUnusedNode), idx(nidx) { this->root_gain = CalcGain(param, sum_gradients.GetGrad(), sum_gradients.GetHess()); diff --git a/src/tree/updater_gpu_hist.cu b/src/tree/updater_gpu_hist.cu index fd0c6ea2d4ba..65085bb70622 100644 --- a/src/tree/updater_gpu_hist.cu +++ b/src/tree/updater_gpu_hist.cu @@ -628,7 +628,7 @@ struct GPUHistMakerDevice { auto d_node_hist_histogram = hist.GetNodeHistogram(nidx_histogram); auto d_node_hist_subtraction = hist.GetNodeHistogram(nidx_subtraction); - dh::LaunchN(device_id, page->cuts_.TotalBins(), [=] __device__(size_t idx) { + dh::LaunchN(device_id, page->Cuts().TotalBins(), [=] __device__(size_t idx) { d_node_hist_subtraction[idx] = d_node_hist_parent[idx] - d_node_hist_histogram[idx]; }); @@ -756,7 +756,7 @@ struct GPUHistMakerDevice { reducer->AllReduceSum( reinterpret_cast(d_node_hist), reinterpret_cast(d_node_hist), - page->cuts_.TotalBins() * (sizeof(GradientSumT) / sizeof(typename GradientSumT::ValueT))); + page->Cuts().TotalBins() * (sizeof(GradientSumT) / sizeof(typename GradientSumT::ValueT))); reducer->Synchronize(); monitor.StopCuda("AllReduce"); @@ -945,14 +945,14 @@ inline void GPUHistMakerDevice::InitHistogram() { // check if we can use shared memory for building histograms // (assuming atleast we need 2 CTAs per SM to maintain decent latency // hiding) - auto histogram_size = sizeof(GradientSumT) * page->cuts_.TotalBins(); + auto histogram_size = sizeof(GradientSumT) * page->Cuts().TotalBins(); auto max_smem = dh::MaxSharedMemory(device_id); if (histogram_size <= max_smem) { use_shared_memory_histograms = true; } // Init histogram - hist.Init(device_id, page->cuts_.TotalBins()); + hist.Init(device_id, page->Cuts().TotalBins()); } template diff --git a/tests/ci_build/tidy.py b/tests/ci_build/tidy.py index 533a97a844ce..1ee86de5aafd 100755 --- a/tests/ci_build/tidy.py +++ b/tests/ci_build/tidy.py @@ -17,7 +17,10 @@ def call(args): stdout=subprocess.PIPE, stderr=subprocess.PIPE) error_msg = completed.stdout.decode('utf-8') - matched = re.search('(src|tests|include)/.*warning:', error_msg, + # `workspace` is a name used in Jenkins CI. Normally we should keep the + # dir as `xgboost`. + matched = re.search('(workspace|xgboost)/.*(src|tests|include)/.*warning:', + error_msg, re.MULTILINE) if matched is None: return_code = 0 @@ -211,8 +214,6 @@ def run(self): for i, (process_status, tidy_status, msg) in enumerate(results): # Don't enforce clang-tidy to pass for now due to namespace # for cub in thrust is not correct. - if process_status != 0: - print('Command returned an error. Ignoring.', all_files[i]) if tidy_status == 1: passed = False print(BAR, '\n' @@ -221,7 +222,8 @@ def run(self): 'Message:\n', msg, BAR, '\n') if not passed: - print('Please correct clang-tidy warnings.') + print('Errors in `thrust` namespace can be safely ignored.', + 'Please address rest of the clang-tidy warnings.') return passed @@ -266,7 +268,8 @@ def test_tidy(args): if __name__ == '__main__': parser = argparse.ArgumentParser(description='Run clang-tidy.') parser.add_argument('--cpp', type=int, default=1) - parser.add_argument('--tidy-version', type=int, default=None) + parser.add_argument('--tidy-version', type=int, default=None, + help='Specify the version of preferred clang-tidy.') parser.add_argument('--cuda', type=int, default=1) parser.add_argument('--use-dmlc-gtest', type=int, default=1, help='Whether to use gtest bundled in dmlc-core.') diff --git a/tests/cpp/c_api/test_c_api.cc b/tests/cpp/c_api/test_c_api.cc index 835d8fd46f3b..164acd8ddc07 100644 --- a/tests/cpp/c_api/test_c_api.cc +++ b/tests/cpp/c_api/test_c_api.cc @@ -11,7 +11,7 @@ #include "../../../src/common/io.h" -TEST(c_api, XGDMatrixCreateFromMatDT) { +TEST(CAPI, XGDMatrixCreateFromMatDT) { std::vector col0 = {0, -1, 3}; std::vector col1 = {-4.0f, 2.0f, 0.0f}; const char *col0_type = "int32"; @@ -38,7 +38,7 @@ TEST(c_api, XGDMatrixCreateFromMatDT) { delete dmat; } -TEST(c_api, XGDMatrixCreateFromMat_omp) { +TEST(CAPI, XGDMatrixCreateFromMatOmp) { std::vector num_rows = {100, 11374, 15000}; for (auto row : num_rows) { int num_cols = 50; @@ -74,13 +74,13 @@ TEST(c_api, XGDMatrixCreateFromMat_omp) { namespace xgboost { -TEST(c_api, Version) { +TEST(CAPI, Version) { int patch {0}; XGBoostVersion(NULL, NULL, &patch); // NOLINT ASSERT_EQ(patch, XGBOOST_VER_PATCH); } -TEST(c_api, ConfigIO) { +TEST(CAPI, ConfigIO) { size_t constexpr kRows = 10; auto p_dmat = RandomDataGenerator(kRows, 10, 0).GenerateDMatix(); std::vector> mat {p_dmat}; @@ -111,7 +111,7 @@ TEST(c_api, ConfigIO) { ASSERT_EQ(config_0, config_1); } -TEST(c_api, JsonModelIO) { +TEST(CAPI, JsonModelIO) { size_t constexpr kRows = 10; dmlc::TemporaryDirectory tempdir; diff --git a/tests/cpp/common/test_bitfield.cu b/tests/cpp/common/test_bitfield.cu index d641debd8b7e..98fbd2ad10d2 100644 --- a/tests/cpp/common/test_bitfield.cu +++ b/tests/cpp/common/test_bitfield.cu @@ -27,7 +27,7 @@ TEST(BitField, StorageSize) { ASSERT_EQ(2, size); } -TEST(BitField, GPU_Set) { +TEST(BitField, GPUSet) { dh::device_vector storage; uint32_t constexpr kBits = 128; storage.resize(128); @@ -49,7 +49,7 @@ __global__ void TestOrKernel(LBitField64 lhs, LBitField64 rhs) { lhs |= rhs; } -TEST(BitField, GPU_And) { +TEST(BitField, GPUAnd) { uint32_t constexpr kBits = 128; dh::device_vector lhs_storage(kBits); dh::device_vector rhs_storage(kBits); diff --git a/tests/cpp/common/test_device_helpers.cu b/tests/cpp/common/test_device_helpers.cu index 09232a30898c..cb69c24f151f 100644 --- a/tests/cpp/common/test_device_helpers.cu +++ b/tests/cpp/common/test_device_helpers.cu @@ -55,11 +55,11 @@ void TestLbs() { } } -TEST(cub_lbs, Test) { +TEST(CubLBS, Test) { TestLbs(); } -TEST(sumReduce, Test) { +TEST(SumReduce, Test) { thrust::device_vector data(100, 1.0f); dh::CubMemory temp; auto sum = dh::SumReduction(&temp, dh::Raw(data), data.size()); @@ -81,7 +81,7 @@ void TestAllocator() { } // Define the test in a function so we can use device lambda -TEST(bulkAllocator, Test) { +TEST(BulkAllocator, Test) { TestAllocator(); } diff --git a/tests/cpp/common/test_group_data.cc b/tests/cpp/common/test_group_data.cc index d71315999b81..94bb23e4a5dc 100644 --- a/tests/cpp/common/test_group_data.cc +++ b/tests/cpp/common/test_group_data.cc @@ -8,7 +8,7 @@ namespace xgboost { namespace common { -TEST(group_data, ParallelGroupBuilder) { +TEST(GroupData, ParallelGroupBuilder) { std::vector offsets; std::vector data; ParallelGroupBuilder builder(&offsets, &data); diff --git a/tests/cpp/common/test_hist_util.cc b/tests/cpp/common/test_hist_util.cc index d0e45e89df36..8b7b93be67d7 100644 --- a/tests/cpp/common/test_hist_util.cc +++ b/tests/cpp/common/test_hist_util.cc @@ -218,7 +218,7 @@ TEST(SparseCuts, MultiThreadedBuild) { omp_set_num_threads(ori_nthreads); } -TEST(hist_util, DenseCutsCategorical) { +TEST(HistUtil, DenseCutsCategorical) { int categorical_sizes[] = {2, 6, 8, 12}; int num_bins = 256; int sizes[] = {25, 100, 1000}; @@ -240,7 +240,7 @@ TEST(hist_util, DenseCutsCategorical) { } } -TEST(hist_util, DenseCutsAccuracyTest) { +TEST(HistUtil, DenseCutsAccuracyTest) { int bin_sizes[] = {2, 16, 256, 512}; int sizes[] = {100, 1000, 1500}; int num_columns = 5; @@ -256,7 +256,7 @@ TEST(hist_util, DenseCutsAccuracyTest) { } } -TEST(hist_util, DenseCutsAccuracyTestWeights) { +TEST(HistUtil, DenseCutsAccuracyTestWeights) { int bin_sizes[] = {2, 16, 256, 512}; int sizes[] = {100, 1000, 1500}; int num_columns = 5; @@ -274,7 +274,7 @@ TEST(hist_util, DenseCutsAccuracyTestWeights) { } } -TEST(hist_util, DenseCutsExternalMemory) { +TEST(HistUtil, DenseCutsExternalMemory) { int bin_sizes[] = {2, 16, 256, 512}; int sizes[] = {100, 1000, 1500}; int num_columns = 5; @@ -292,7 +292,7 @@ TEST(hist_util, DenseCutsExternalMemory) { } } -TEST(hist_util, SparseCutsAccuracyTest) { +TEST(HistUtil, SparseCutsAccuracyTest) { int bin_sizes[] = {2, 16, 256, 512}; int sizes[] = {100, 1000, 1500}; int num_columns = 5; @@ -308,7 +308,7 @@ TEST(hist_util, SparseCutsAccuracyTest) { } } -TEST(hist_util, SparseCutsCategorical) { +TEST(HistUtil, SparseCutsCategorical) { int categorical_sizes[] = {2, 6, 8, 12}; int num_bins = 256; int sizes[] = {25, 100, 1000}; @@ -330,7 +330,7 @@ TEST(hist_util, SparseCutsCategorical) { } } -TEST(hist_util, SparseCutsExternalMemory) { +TEST(HistUtil, SparseCutsExternalMemory) { int bin_sizes[] = {2, 16, 256, 512}; int sizes[] = {100, 1000, 1500}; int num_columns = 5; @@ -348,7 +348,7 @@ TEST(hist_util, SparseCutsExternalMemory) { } } -TEST(hist_util, IndexBinBound) { +TEST(HistUtil, IndexBinBound) { uint64_t bin_sizes[] = { static_cast(std::numeric_limits::max()) + 1, static_cast(std::numeric_limits::max()) + 1, static_cast(std::numeric_limits::max()) + 2 }; @@ -369,7 +369,7 @@ TEST(hist_util, IndexBinBound) { } } -TEST(hist_util, SparseIndexBinBound) { +TEST(HistUtil, SparseIndexBinBound) { uint64_t bin_sizes[] = { static_cast(std::numeric_limits::max()) + 1, static_cast(std::numeric_limits::max()) + 1, static_cast(std::numeric_limits::max()) + 2 }; @@ -396,7 +396,7 @@ void CheckIndexData(T* data_ptr, uint32_t* offsets, } } -TEST(hist_util, IndexBinData) { +TEST(HistUtil, IndexBinData) { uint64_t constexpr kBinSizes[] = { static_cast(std::numeric_limits::max()) + 1, static_cast(std::numeric_limits::max()) + 1, static_cast(std::numeric_limits::max()) + 2 }; @@ -426,7 +426,7 @@ TEST(hist_util, IndexBinData) { } } -TEST(hist_util, SparseIndexBinData) { +TEST(HistUtil, SparseIndexBinData) { uint64_t bin_sizes[] = { static_cast(std::numeric_limits::max()) + 1, static_cast(std::numeric_limits::max()) + 1, static_cast(std::numeric_limits::max()) + 2 }; diff --git a/tests/cpp/common/test_hist_util.cu b/tests/cpp/common/test_hist_util.cu index 014a00a048b7..b728acb684d9 100644 --- a/tests/cpp/common/test_hist_util.cu +++ b/tests/cpp/common/test_hist_util.cu @@ -32,7 +32,7 @@ HistogramCuts GetHostCuts(AdapterT *adapter, int num_bins, float missing) { builder.Build(&dmat, num_bins); return cuts; } -TEST(hist_util, DeviceSketch) { +TEST(HistUtil, DeviceSketch) { int num_rows = 5; int num_columns = 1; int num_bins = 4; @@ -61,7 +61,7 @@ size_t RequiredSampleCutsTest(int max_bins, size_t num_rows) { return std::min(num_cuts, num_rows); } -TEST(hist_util, DeviceSketchMemory) { +TEST(HistUtil, DeviceSketchMemory) { int num_columns = 100; int num_rows = 1000; int num_bins = 256; @@ -81,7 +81,7 @@ TEST(hist_util, DeviceSketchMemory) { bytes_num_elements + bytes_cuts + bytes_constant); } -TEST(hist_util, DeviceSketchMemoryWeights) { +TEST(HistUtil, DeviceSketchMemoryWeights) { int num_columns = 100; int num_rows = 1000; int num_bins = 256; @@ -102,7 +102,7 @@ TEST(hist_util, DeviceSketchMemoryWeights) { size_t((bytes_num_elements + bytes_cuts) * 1.05)); } -TEST(hist_util, DeviceSketchDeterminism) { +TEST(HistUtil, DeviceSketchDeterminism) { int num_rows = 500; int num_columns = 5; int num_bins = 256; @@ -117,7 +117,7 @@ TEST(hist_util, DeviceSketchDeterminism) { } } - TEST(hist_util, DeviceSketchCategorical) { + TEST(HistUtil, DeviceSketchCategorical) { int categorical_sizes[] = {2, 6, 8, 12}; int num_bins = 256; int sizes[] = {25, 100, 1000}; @@ -131,7 +131,7 @@ TEST(hist_util, DeviceSketchDeterminism) { } } -TEST(hist_util, DeviceSketchMultipleColumns) { +TEST(HistUtil, DeviceSketchMultipleColumns) { int bin_sizes[] = {2, 16, 256, 512}; int sizes[] = {100, 1000, 1500}; int num_columns = 5; @@ -146,7 +146,7 @@ TEST(hist_util, DeviceSketchMultipleColumns) { } -TEST(hist_util, DeviceSketchMultipleColumnsWeights) { +TEST(HistUtil, DeviceSketchMultipleColumnsWeights) { int bin_sizes[] = {2, 16, 256, 512}; int sizes[] = {100, 1000, 1500}; int num_columns = 5; @@ -161,7 +161,7 @@ TEST(hist_util, DeviceSketchMultipleColumnsWeights) { } } -TEST(hist_util, DeviceSketchBatches) { +TEST(HistUtil, DeviceSketchBatches) { int num_bins = 256; int num_rows = 5000; int batch_sizes[] = {0, 100, 1500, 6000}; @@ -174,7 +174,7 @@ TEST(hist_util, DeviceSketchBatches) { } } -TEST(hist_util, DeviceSketchMultipleColumnsExternal) { +TEST(HistUtil, DeviceSketchMultipleColumnsExternal) { int bin_sizes[] = {2, 16, 256, 512}; int sizes[] = {100, 1000, 1500}; int num_columns =5; @@ -190,7 +190,7 @@ TEST(hist_util, DeviceSketchMultipleColumnsExternal) { } } -TEST(hist_util, AdapterDeviceSketch) +TEST(HistUtil, AdapterDeviceSketch) { int rows = 5; int cols = 1; @@ -212,7 +212,7 @@ TEST(hist_util, AdapterDeviceSketch) EXPECT_EQ(device_cuts.MinValues(), host_cuts.MinValues()); } -TEST(hist_util, AdapterDeviceSketchMemory) { +TEST(HistUtil, AdapterDeviceSketchMemory) { int num_columns = 100; int num_rows = 1000; int num_bins = 256; @@ -235,7 +235,7 @@ TEST(hist_util, AdapterDeviceSketchMemory) { bytes_num_elements + bytes_cuts + bytes_num_columns + bytes_constant); } - TEST(hist_util, AdapterDeviceSketchCategorical) { + TEST(HistUtil, AdapterDeviceSketchCategorical) { int categorical_sizes[] = {2, 6, 8, 12}; int num_bins = 256; int sizes[] = {25, 100, 1000}; @@ -252,7 +252,7 @@ TEST(hist_util, AdapterDeviceSketchMemory) { } } -TEST(hist_util, AdapterDeviceSketchMultipleColumns) { +TEST(HistUtil, AdapterDeviceSketchMultipleColumns) { int bin_sizes[] = {2, 16, 256, 512}; int sizes[] = {100, 1000, 1500}; int num_columns = 5; @@ -268,7 +268,7 @@ TEST(hist_util, AdapterDeviceSketchMultipleColumns) { } } } -TEST(hist_util, AdapterDeviceSketchBatches) { +TEST(HistUtil, AdapterDeviceSketchBatches) { int num_bins = 256; int num_rows = 5000; int batch_sizes[] = {0, 100, 1500, 6000}; @@ -287,7 +287,7 @@ TEST(hist_util, AdapterDeviceSketchBatches) { // Check sketching from adapter or DMatrix results in the same answer // Consistency here is useful for testing and user experience -TEST(hist_util, SketchingEquivalent) { +TEST(HistUtil, SketchingEquivalent) { int bin_sizes[] = {2, 16, 256, 512}; int sizes[] = {100, 1000, 1500}; int num_columns = 5; diff --git a/tests/cpp/common/test_host_device_vector.cu b/tests/cpp/common/test_host_device_vector.cu index b483edf3906e..e5ac339e794f 100644 --- a/tests/cpp/common/test_host_device_vector.cu +++ b/tests/cpp/common/test_host_device_vector.cu @@ -176,7 +176,7 @@ TEST(HostDeviceVector, Span) { ASSERT_TRUE(vec.HostCanWrite()); } -TEST(HostDeviceVector, MGPU_Basic) { +TEST(HostDeviceVector, MGPU_Basic) { // NOLINT if (AllVisibleGPUs() < 2) { LOG(WARNING) << "Not testing in multi-gpu environment."; return; diff --git a/tests/cpp/common/test_json.cc b/tests/cpp/common/test_json.cc index 42eb8eb6a488..b636ef7e3db3 100644 --- a/tests/cpp/common/test_json.cc +++ b/tests/cpp/common/test_json.cc @@ -406,7 +406,7 @@ TEST(Json, WrongCasts) { } } -TEST(Json, Int_vs_Float) { +TEST(Json, IntVSFloat) { // If integer is parsed as float, calling `get()' will throw. { std::string str = R"json( diff --git a/tests/cpp/common/test_transform_range.cu b/tests/cpp/common/test_transform_range.cu index 4e4c259e4391..a1958ded5111 100644 --- a/tests/cpp/common/test_transform_range.cu +++ b/tests/cpp/common/test_transform_range.cu @@ -5,7 +5,7 @@ namespace xgboost { namespace common { -TEST(Transform, MGPU_SpecifiedGpuId) { +TEST(Transform, MGPU_SpecifiedGpuId) { // NOLINT if (AllVisibleGPUs() < 2) { LOG(WARNING) << "Not testing in multi-gpu environment."; return; diff --git a/tests/cpp/data/test_adapter.cc b/tests/cpp/data/test_adapter.cc index 5ead2064b556..04e2fa074f99 100644 --- a/tests/cpp/data/test_adapter.cc +++ b/tests/cpp/data/test_adapter.cc @@ -67,7 +67,7 @@ TEST(Adapter, CSCAdapterColsMoreThanRows) { EXPECT_EQ(inst[3].index, 3); } -TEST(c_api, DMatrixSliceAdapterFromSimpleDMatrix) { +TEST(CAPI, DMatrixSliceAdapterFromSimpleDMatrix) { auto p_dmat = RandomDataGenerator(6, 2, 1.0).GenerateDMatix(); std::vector ridx_set = {1, 3, 5}; diff --git a/tests/cpp/data/test_device_adapter.cu b/tests/cpp/data/test_device_adapter.cu index 09de9bb88998..e652db377c5c 100644 --- a/tests/cpp/data/test_device_adapter.cu +++ b/tests/cpp/data/test_device_adapter.cu @@ -50,6 +50,6 @@ void TestCudfAdapter() }); } -TEST(device_adapter, CudfAdapter) { +TEST(DeviceAdapter, CudfAdapter) { TestCudfAdapter(); } diff --git a/tests/cpp/data/test_device_dmatrix.cu b/tests/cpp/data/test_device_dmatrix.cu index 61ea4754255f..88077910714c 100644 --- a/tests/cpp/data/test_device_dmatrix.cu +++ b/tests/cpp/data/test_device_dmatrix.cu @@ -32,7 +32,7 @@ TEST(DeviceDMatrix, RowMajor) { for(auto i = 0ull; i < x.size(); i++) { int column_idx = i % num_columns; - EXPECT_EQ(impl->cuts_.SearchBin(x[i], column_idx), iterator[i]); + EXPECT_EQ(impl->Cuts().SearchBin(x[i], column_idx), iterator[i]); } EXPECT_EQ(dmat.Info().num_col_, num_columns); EXPECT_EQ(dmat.Info().num_row_, num_rows); @@ -93,9 +93,9 @@ TEST(DeviceDMatrix, ColumnMajor) { for (auto i = 0ull; i < kRows; i++) { for (auto j = 0ull; j < columns.size(); j++) { if (j == 0) { - EXPECT_EQ(iterator[i * 2 + j], impl->cuts_.SearchBin(d_data_0[i], j)); + EXPECT_EQ(iterator[i * 2 + j], impl->Cuts().SearchBin(d_data_0[i], j)); } else { - EXPECT_EQ(iterator[i * 2 + j], impl->cuts_.SearchBin(d_data_1[i], j)); + EXPECT_EQ(iterator[i * 2 + j], impl->Cuts().SearchBin(d_data_1[i], j)); } } } @@ -123,7 +123,7 @@ TEST(DeviceDMatrix, Equivalent) { const auto &device_dmat_batch = *device_dmat.GetBatches({0, num_bins}).begin(); - ASSERT_EQ(batch.Impl()->cuts_.Values(), device_dmat_batch.Impl()->cuts_.Values()); + ASSERT_EQ(batch.Impl()->Cuts().Values(), device_dmat_batch.Impl()->Cuts().Values()); ASSERT_EQ(batch.Impl()->gidx_buffer.HostVector(), device_dmat_batch.Impl()->gidx_buffer.HostVector()); } diff --git a/tests/cpp/data/test_ellpack_page.cu b/tests/cpp/data/test_ellpack_page.cu index a713979e3b87..14a05c09b2ad 100644 --- a/tests/cpp/data/test_ellpack_page.cu +++ b/tests/cpp/data/test_ellpack_page.cu @@ -21,7 +21,7 @@ TEST(EllpackPage, EmptyDMatrix) { auto& page = *dmat->GetBatches({0, kMaxBin}).begin(); auto impl = page.Impl(); ASSERT_EQ(impl->row_stride, 0); - ASSERT_EQ(impl->cuts_.TotalBins(), 0); + ASSERT_EQ(impl->Cuts().TotalBins(), 0); ASSERT_EQ(impl->gidx_buffer.Size(), 4); } @@ -106,7 +106,7 @@ TEST(EllpackPage, Copy) { auto page = (*dmat->GetBatches(param).begin()).Impl(); // Create an empty result page. - EllpackPageImpl result(0, page->cuts_, page->is_dense, page->row_stride, + EllpackPageImpl result(0, page->Cuts(), page->is_dense, page->row_stride, kRows); // Copy batch pages into the result page. @@ -152,7 +152,7 @@ TEST(EllpackPage, Compact) { auto page = (*dmat->GetBatches(param).begin()).Impl(); // Create an empty result page. - EllpackPageImpl result(0, page->cuts_, page->is_dense, page->row_stride, + EllpackPageImpl result(0, page->Cuts(), page->is_dense, page->row_stride, kCompactedRows); // Compact batch pages into the result page. diff --git a/tests/cpp/data/test_sparse_page_dmatrix.cu b/tests/cpp/data/test_sparse_page_dmatrix.cu index c89e6c15e561..cf4059d406d1 100644 --- a/tests/cpp/data/test_sparse_page_dmatrix.cu +++ b/tests/cpp/data/test_sparse_page_dmatrix.cu @@ -63,14 +63,14 @@ TEST(SparsePageDMatrix, EllpackPageContent) { EXPECT_EQ(impl->n_rows, kRows); EXPECT_FALSE(impl->is_dense); EXPECT_EQ(impl->row_stride, 2); - EXPECT_EQ(impl->cuts_.TotalBins(), 4); + EXPECT_EQ(impl->Cuts().TotalBins(), 4); auto impl_ext = (*dmat_ext->GetBatches(param).begin()).Impl(); EXPECT_EQ(impl_ext->base_rowid, 0); EXPECT_EQ(impl_ext->n_rows, kRows); EXPECT_FALSE(impl_ext->is_dense); EXPECT_EQ(impl_ext->row_stride, 2); - EXPECT_EQ(impl_ext->cuts_.TotalBins(), 4); + EXPECT_EQ(impl_ext->Cuts().TotalBins(), 4); std::vector buffer(impl->gidx_buffer.HostVector()); std::vector buffer_ext(impl_ext->gidx_buffer.HostVector()); diff --git a/tests/cpp/predictor/test_gpu_predictor.cu b/tests/cpp/predictor/test_gpu_predictor.cu index 7c8dac3736fe..91f5b9ef6fe7 100644 --- a/tests/cpp/predictor/test_gpu_predictor.cu +++ b/tests/cpp/predictor/test_gpu_predictor.cu @@ -126,7 +126,7 @@ TEST(GPUPredictor, InplacePredictCuDF) { TestInplacePrediction(x, "gpu_predictor", kRows, kCols, 0); } -TEST(GPUPredictor, MGPU_InplacePredict) { +TEST(GPUPredictor, MGPU_InplacePredict) { // NOLINT int32_t n_gpus = xgboost::common::AllVisibleGPUs(); if (n_gpus <= 1) { LOG(WARNING) << "GPUPredictor.MGPU_InplacePredict is skipped."; diff --git a/tests/cpp/test_learner.cc b/tests/cpp/test_learner.cc index 5308d9baaea4..986890639f98 100644 --- a/tests/cpp/test_learner.cc +++ b/tests/cpp/test_learner.cc @@ -86,7 +86,7 @@ TEST(Learner, CheckGroup) { EXPECT_ANY_THROW(learner->UpdateOneIter(0, p_mat)); } -TEST(Learner, SLOW_CheckMultiBatch) { +TEST(Learner, SLOW_CheckMultiBatch) { // NOLINT // Create sufficiently large data to make two row pages dmlc::TemporaryDirectory tempdir; const std::string tmp_file = tempdir.path + "/big.libsvm"; diff --git a/tests/cpp/test_serialization.cc b/tests/cpp/test_serialization.cc index dc74cd890658..cd4cf13f3bdd 100644 --- a/tests/cpp/test_serialization.cc +++ b/tests/cpp/test_serialization.cc @@ -254,7 +254,7 @@ TEST_F(SerializationTest, Hist) { fmap_, p_dmat_); } -TEST_F(SerializationTest, CPU_CoordDescent) { +TEST_F(SerializationTest, CPUCoordDescent) { TestLearnerSerialization({{"booster", "gblinear"}, {"seed", "0"}, {"nthread", "1"}, @@ -264,7 +264,7 @@ TEST_F(SerializationTest, CPU_CoordDescent) { } #if defined(XGBOOST_USE_CUDA) -TEST_F(SerializationTest, GPU_Hist) { +TEST_F(SerializationTest, GPUHist) { TestLearnerSerialization({{"booster", "gbtree"}, {"seed", "0"}, {"enable_experimental_json_serialization", "1"}, @@ -338,7 +338,7 @@ TEST_F(SerializationTest, ConfigurationCount) { xgboost::ConsoleLogger::Configure({{"verbosity", "2"}}); } -TEST_F(SerializationTest, GPU_CoordDescent) { +TEST_F(SerializationTest, GPUCoordDescent) { TestLearnerSerialization({{"booster", "gblinear"}, {"seed", "0"}, {"nthread", "1"}, @@ -431,7 +431,7 @@ TEST_F(LogitSerializationTest, Hist) { fmap_, p_dmat_); } -TEST_F(LogitSerializationTest, CPU_CoordDescent) { +TEST_F(LogitSerializationTest, CPUCoordDescent) { TestLearnerSerialization({{"booster", "gblinear"}, {"seed", "0"}, {"nthread", "1"}, @@ -441,7 +441,7 @@ TEST_F(LogitSerializationTest, CPU_CoordDescent) { } #if defined(XGBOOST_USE_CUDA) -TEST_F(LogitSerializationTest, GPU_Hist) { +TEST_F(LogitSerializationTest, GPUHist) { TestLearnerSerialization({{"booster", "gbtree"}, {"objective", "binary:logistic"}, {"seed", "0"}, @@ -471,7 +471,7 @@ TEST_F(LogitSerializationTest, GPU_Hist) { fmap_, p_dmat_); } -TEST_F(LogitSerializationTest, GPU_CoordDescent) { +TEST_F(LogitSerializationTest, GPUCoordDescent) { TestLearnerSerialization({{"booster", "gblinear"}, {"objective", "binary:logistic"}, {"seed", "0"}, @@ -586,7 +586,7 @@ TEST_F(MultiClassesSerializationTest, Hist) { fmap_, p_dmat_); } -TEST_F(MultiClassesSerializationTest, CPU_CoordDescent) { +TEST_F(MultiClassesSerializationTest, CPUCoordDescent) { TestLearnerSerialization({{"booster", "gblinear"}, {"seed", "0"}, {"nthread", "1"}, @@ -596,7 +596,7 @@ TEST_F(MultiClassesSerializationTest, CPU_CoordDescent) { } #if defined(XGBOOST_USE_CUDA) -TEST_F(MultiClassesSerializationTest, GPU_Hist) { +TEST_F(MultiClassesSerializationTest, GPUHist) { TestLearnerSerialization({{"booster", "gbtree"}, {"num_class", std::to_string(kClasses)}, {"seed", "0"}, @@ -632,7 +632,7 @@ TEST_F(MultiClassesSerializationTest, GPU_Hist) { fmap_, p_dmat_); } -TEST_F(MultiClassesSerializationTest, GPU_CoordDescent) { +TEST_F(MultiClassesSerializationTest, GPUCoordDescent) { TestLearnerSerialization({{"booster", "gblinear"}, {"num_class", std::to_string(kClasses)}, {"seed", "0"}, diff --git a/tests/cpp/tree/gpu_hist/test_gradient_based_sampler.cu b/tests/cpp/tree/gpu_hist/test_gradient_based_sampler.cu index bde4276701de..183d5b3f1bdd 100644 --- a/tests/cpp/tree/gpu_hist/test_gradient_based_sampler.cu +++ b/tests/cpp/tree/gpu_hist/test_gradient_based_sampler.cu @@ -69,7 +69,7 @@ TEST(GradientBasedSampler, NoSampling) { } // In external mode, when not sampling, we concatenate the pages together. -TEST(GradientBasedSampler, NoSampling_ExternalMemory) { +TEST(GradientBasedSampler, NoSamplingExternalMemory) { constexpr size_t kRows = 2048; constexpr size_t kCols = 1; constexpr float kSubsample = 1.0f; @@ -121,7 +121,7 @@ TEST(GradientBasedSampler, UniformSampling) { VerifySampling(kPageSize, kSubsample, kSamplingMethod, kFixedSizeSampling, kCheckSum); } -TEST(GradientBasedSampler, UniformSampling_ExternalMemory) { +TEST(GradientBasedSampler, UniformSamplingExternalMemory) { constexpr size_t kPageSize = 1024; constexpr float kSubsample = 0.5; constexpr int kSamplingMethod = TrainParam::kUniform; @@ -137,7 +137,7 @@ TEST(GradientBasedSampler, GradientBasedSampling) { VerifySampling(kPageSize, kSubsample, kSamplingMethod); } -TEST(GradientBasedSampler, GradientBasedSampling_ExternalMemory) { +TEST(GradientBasedSampler, GradientBasedSamplingExternalMemory) { constexpr size_t kPageSize = 1024; constexpr float kSubsample = 0.8; constexpr int kSamplingMethod = TrainParam::kGradientBased; diff --git a/tests/cpp/tree/test_gpu_hist.cu b/tests/cpp/tree/test_gpu_hist.cu index 9eca3e78d6fd..930dc01c5bc4 100644 --- a/tests/cpp/tree/test_gpu_hist.cu +++ b/tests/cpp/tree/test_gpu_hist.cu @@ -193,7 +193,7 @@ TEST(GpuHist, EvaluateSplits) { auto cmat = GetHostCutMatrix(); // Copy cut matrix to device. - page->cuts_ = cmat; + page->Cuts() = cmat; maker.ba.Allocate(0, &(maker.monotone_constraints), kNCols); dh::CopyVectorToDeviceSpan(maker.monotone_constraints, param.monotone_constraints); @@ -271,7 +271,7 @@ void TestHistogramIndexImpl() { const auto &maker_ext = hist_maker_ext.maker; std::vector h_gidx_buffer_ext(maker_ext->page->gidx_buffer.HostVector()); - ASSERT_EQ(maker->page->cuts_.TotalBins(), maker_ext->page->cuts_.TotalBins()); + ASSERT_EQ(maker->page->Cuts().TotalBins(), maker_ext->page->Cuts().TotalBins()); ASSERT_EQ(maker->page->gidx_buffer.Size(), maker_ext->page->gidx_buffer.Size()); } @@ -498,7 +498,7 @@ TEST(GpuHist, ExternalMemoryWithSampling) { } } -TEST(GpuHist, Config_IO) { +TEST(GpuHist, ConfigIO) { GenericParameter generic_param(CreateEmptyGenericParam(0)); std::unique_ptr updater {TreeUpdater::Create("grow_gpu_hist", &generic_param) }; updater->Configure(Args{}); diff --git a/tests/cpp/tree/test_quantile_hist.cc b/tests/cpp/tree/test_quantile_hist.cc index ba325a82a2b2..a0541ae78eae 100644 --- a/tests/cpp/tree/test_quantile_hist.cc +++ b/tests/cpp/tree/test_quantile_hist.cc @@ -285,14 +285,14 @@ class QuantileHistMock : public QuantileHistMaker { } }; -TEST(Updater, QuantileHist_InitData) { +TEST(QuantileHist, InitData) { std::vector> cfg {{"num_feature", std::to_string(QuantileHistMock::GetNumColumns())}}; QuantileHistMock maker(cfg); maker.TestInitData(); } -TEST(Updater, QuantileHist_BuildHist) { +TEST(QuantileHist, BuildHist) { // Don't enable feature grouping std::vector> cfg {{"num_feature", std::to_string(QuantileHistMock::GetNumColumns())}, @@ -301,7 +301,7 @@ TEST(Updater, QuantileHist_BuildHist) { maker.TestBuildHist(); } -TEST(Updater, QuantileHist_EvalSplits) { +TEST(QuantileHist, EvalSplits) { std::vector> cfg {{"num_feature", std::to_string(QuantileHistMock::GetNumColumns())}, {"split_evaluator", "elastic_net"},