Skip to content

Commit

Permalink
Tidy's happy.
Browse files Browse the repository at this point in the history
  • Loading branch information
trivialfis committed Apr 3, 2020
1 parent 449d2e4 commit 5a3f4b5
Show file tree
Hide file tree
Showing 37 changed files with 209 additions and 214 deletions.
38 changes: 19 additions & 19 deletions .clang-tidy
Original file line number Diff line number Diff line change
@@ -1,21 +1,21 @@
Checks: 'modernize-*,-modernize-make-*,-modernize-use-auto,-modernize-raw-string-literal,-modernize-avoid-c-arrays,-modernize-use-trailing-return-type,google-*,-google-default-arguments,-clang-diagnostic-#pragma-messages,readability-identifier-naming'
CheckOptions:
- { key: readability-identifier-naming.ClassCase, value: CamelCase }
- { key: readability-identifier-naming.StructCase, value: CamelCase }
- { key: readability-identifier-naming.TypeAliasCase, value: CamelCase }
- { key: readability-identifier-naming.TypedefCase, value: CamelCase }
- { key: readability-identifier-naming.TypeTemplateParameterCase, value: CamelCase }
- { key: readability-identifier-naming.MemberCase, value: lower_case }
- { key: readability-identifier-naming.PrivateMemberSuffix, value: '_' }
- { key: readability-identifier-naming.ProtectedMemberSuffix, value: '_' }
- { key: readability-identifier-naming.EnumCase, value: CamelCase }
- { key: readability-identifier-naming.EnumConstant, value: CamelCase }
- { key: readability-identifier-naming.EnumConstantPrefix, value: k }
- { key: readability-identifier-naming.GlobalConstantCase, value: CamelCase }
- { key: readability-identifier-naming.GlobalConstantPrefix, value: k }
- { key: readability-identifier-naming.StaticConstantCase, value: CamelCase }
- { key: readability-identifier-naming.StaticConstantPrefix, value: k }
- { key: readability-identifier-naming.ConstexprVariableCase, value: CamelCase }
- { key: readability-identifier-naming.ConstexprVariablePrefix, value: k }
- { key: readability-identifier-naming.FunctionCase, value: CamelCase }
- { key: readability-identifier-naming.NamespaceCase, value: lower_case }
- { key: readability-identifier-naming.ClassCase, value: CamelCase }
- { key: readability-identifier-naming.StructCase, value: CamelCase }
- { key: readability-identifier-naming.TypeAliasCase, value: CamelCase }
- { key: readability-identifier-naming.TypedefCase, value: CamelCase }
- { key: readability-identifier-naming.TypeTemplateParameterCase, value: CamelCase }
- { key: readability-identifier-naming.MemberCase, value: lower_case }
- { key: readability-identifier-naming.PrivateMemberSuffix, value: '_' }
- { key: readability-identifier-naming.ProtectedMemberSuffix, value: '_' }
- { key: readability-identifier-naming.EnumCase, value: CamelCase }
- { key: readability-identifier-naming.EnumConstant, value: CamelCase }
- { key: readability-identifier-naming.EnumConstantPrefix, value: k }
- { key: readability-identifier-naming.GlobalConstantCase, value: CamelCase }
- { key: readability-identifier-naming.GlobalConstantPrefix, value: k }
- { key: readability-identifier-naming.StaticConstantCase, value: CamelCase }
- { key: readability-identifier-naming.StaticConstantPrefix, value: k }
- { key: readability-identifier-naming.ConstexprVariableCase, value: CamelCase }
- { key: readability-identifier-naming.ConstexprVariablePrefix, value: k }
- { key: readability-identifier-naming.FunctionCase, value: CamelCase }
- { key: readability-identifier-naming.NamespaceCase, value: lower_case }
5 changes: 2 additions & 3 deletions include/xgboost/span.h
Original file line number Diff line number Diff line change
Expand Up @@ -137,7 +137,7 @@ class SpanIterator {
IsConst, const ElementType, ElementType>::type&;
using pointer = typename std::add_pointer<reference>::type; // NOLINT

XGBOOST_DEVICE constexpr SpanIterator() : span_{nullptr}, index_{0} {} // NOLINT
constexpr SpanIterator() = default;

XGBOOST_DEVICE constexpr SpanIterator(
const SpanType* _span,
Expand Down Expand Up @@ -410,8 +410,7 @@ class Span {
using const_reverse_iterator = const detail::SpanIterator<Span<T, Extent>, true>; // NOLINT

// constructors

XGBOOST_DEVICE constexpr Span() __span_noexcept : size_(0), data_(nullptr) {} // NOLINT
constexpr Span() __span_noexcept = default;

XGBOOST_DEVICE Span(pointer _ptr, index_type _count) :
size_(_count), data_(_ptr) {
Expand Down
4 changes: 2 additions & 2 deletions src/common/bitfield.h
Original file line number Diff line number Diff line change
Expand Up @@ -86,8 +86,8 @@ struct BitFieldContainer {
XGBOOST_DEVICE explicit BitFieldContainer(common::Span<value_type> bits) : bits_{bits} {}
XGBOOST_DEVICE BitFieldContainer(BitFieldContainer const& other) : bits_{other.bits_} {}

common::Span<value_type> Bits() { return bits_; };
common::Span<value_type const> Bits() const { return bits_; };
common::Span<value_type> Bits() { return bits_; }
common::Span<value_type const> Bits() const { return bits_; }

/*\brief Compute the size of needed memory allocation. The returned value is in terms
* of number of elements with `BitFieldContainer::value_type'.
Expand Down
2 changes: 1 addition & 1 deletion src/common/device_helpers.cuh
Original file line number Diff line number Diff line change
Expand Up @@ -868,7 +868,7 @@ template <typename FunctionT, typename SegmentIterT, typename OffsetT>
void SparseTransformLbs(int device_idx, dh::CubMemory *temp_memory,
OffsetT count, SegmentIterT segments,
OffsetT num_segments, FunctionT f) {
typedef typename cub::CubVector<OffsetT, 2>::Type CoordinateT;
using CoordinateT = typename cub::CubVector<OffsetT, 2>::Type;
dh::safe_cuda(cudaSetDevice(device_idx));
const int BLOCK_THREADS = 256;
const int ITEMS_PER_THREAD = 1;
Expand Down
8 changes: 4 additions & 4 deletions src/common/hist_util.h
Original file line number Diff line number Diff line change
Expand Up @@ -293,14 +293,14 @@ struct Index {
return reinterpret_cast<uint32_t*>(t)[i];
}

using Func = uint32_t (*)(void* ,size_t);
using Func = uint32_t (*)(void*, size_t);

std::vector<uint8_t> data_;
std::vector<uint32_t> offset_; // size of this field is equal to number of features
void* data_ptr_;
BinTypeSize binTypeSize_;
size_t p_;
uint32_t* offset_ptr_;
BinTypeSize binTypeSize_ {kUint8BinsTypeSize};
size_t p_ {1};
uint32_t* offset_ptr_ {nullptr};
Func func_;
};

Expand Down
2 changes: 1 addition & 1 deletion src/common/row_set.h
Original file line number Diff line number Diff line change
Expand Up @@ -90,7 +90,7 @@ class RowSetCollection {
elem_of_each_node_.emplace_back(Elem(begin, end, 0));
}

std::vector<size_t>* Data() { return &row_indices_; };
std::vector<size_t>* Data() { return &row_indices_; }
// split rowset into two
inline void AddSplit(unsigned node_id,
unsigned left_node_id,
Expand Down
3 changes: 2 additions & 1 deletion src/common/transform.h
Original file line number Diff line number Diff line change
Expand Up @@ -133,8 +133,9 @@ class Transform {
template <typename std::enable_if<CompiledWithCuda>::type* = nullptr,
typename... HDV>
void LaunchCUDA(Functor _func, HDV*... _vectors) const {
if (shard_)
if (shard_) {
UnpackShard(device_, _vectors...);
}

size_t range_size = *range_.end() - *range_.begin();

Expand Down
6 changes: 5 additions & 1 deletion src/data/ellpack_page.cuh
Original file line number Diff line number Diff line change
Expand Up @@ -185,6 +185,9 @@ class EllpackPageImpl {
base_rowid = row_id;
}

common::HistogramCuts& Cuts() { return cuts_; }
common::HistogramCuts const& Cuts() const { return cuts_; }

/*! \return Estimation of memory cost of this page. */
static size_t MemCostBytes(size_t num_rows, size_t row_stride, const common::HistogramCuts&cuts) ;

Expand Down Expand Up @@ -220,8 +223,9 @@ public:
size_t n_rows{};
/*! \brief global index of histogram, which is stored in ELLPack format. */
HostDeviceVector<common::CompressedByteT> gidx_buffer;

private:
common::HistogramCuts cuts_;
private:
common::Monitor monitor_;
};

Expand Down
12 changes: 6 additions & 6 deletions src/data/ellpack_page_raw_format.cu
Original file line number Diff line number Diff line change
Expand Up @@ -17,9 +17,9 @@ class EllpackPageRawFormat : public SparsePageFormat<EllpackPage> {
public:
bool Read(EllpackPage* page, dmlc::SeekStream* fi) override {
auto* impl = page->Impl();
fi->Read(&impl->cuts_.cut_values_.HostVector());
fi->Read(&impl->cuts_.cut_ptrs_.HostVector());
fi->Read(&impl->cuts_.min_vals_.HostVector());
fi->Read(&impl->Cuts().cut_values_.HostVector());
fi->Read(&impl->Cuts().cut_ptrs_.HostVector());
fi->Read(&impl->Cuts().min_vals_.HostVector());
fi->Read(&impl->n_rows);
fi->Read(&impl->is_dense);
fi->Read(&impl->row_stride);
Expand All @@ -38,9 +38,9 @@ class EllpackPageRawFormat : public SparsePageFormat<EllpackPage> {

void Write(const EllpackPage& page, dmlc::Stream* fo) override {
auto* impl = page.Impl();
fo->Write(impl->cuts_.cut_values_.ConstHostVector());
fo->Write(impl->cuts_.cut_ptrs_.ConstHostVector());
fo->Write(impl->cuts_.min_vals_.ConstHostVector());
fo->Write(impl->Cuts().cut_values_.ConstHostVector());
fo->Write(impl->Cuts().cut_ptrs_.ConstHostVector());
fo->Write(impl->Cuts().min_vals_.ConstHostVector());
fo->Write(impl->n_rows);
fo->Write(impl->is_dense);
fo->Write(impl->row_stride);
Expand Down
2 changes: 1 addition & 1 deletion src/metric/elementwise_metric.cu
Original file line number Diff line number Diff line change
Expand Up @@ -350,7 +350,7 @@ struct EvalEWiseBase : public Metric {
}

private:
Policy policy_ {};
Policy policy_;
ElementWiseMetricsReduction<Policy> reducer_{policy_};
};

Expand Down
2 changes: 1 addition & 1 deletion src/metric/metric_common.h
Original file line number Diff line number Diff line change
Expand Up @@ -66,7 +66,7 @@ class PackedReduceResult {
double weights_sum_ { 0 };

public:
XGBOOST_DEVICE PackedReduceResult() : residue_sum_{0}, weights_sum_{0} {} // NOLINT
XGBOOST_DEVICE PackedReduceResult() {} // NOLINT
XGBOOST_DEVICE PackedReduceResult(double residue, double weight)
: residue_sum_{residue}, weights_sum_{weight} {}

Expand Down
6 changes: 3 additions & 3 deletions src/tree/gpu_hist/gradient_based_sampler.cu
Original file line number Diff line number Diff line change
Expand Up @@ -153,7 +153,7 @@ ExternalMemoryNoSampling::ExternalMemoryNoSampling(EllpackPageImpl* page,
size_t n_rows,
const BatchParam& batch_param)
: batch_param_(batch_param),
page_(new EllpackPageImpl(batch_param.gpu_id, page->cuts_, page->is_dense,
page_(new EllpackPageImpl(batch_param.gpu_id, page->Cuts(), page->is_dense,
page->row_stride, n_rows)) {}

GradientBasedSample ExternalMemoryNoSampling::Sample(common::Span<GradientPair> gpair,
Expand Down Expand Up @@ -218,7 +218,7 @@ GradientBasedSample ExternalMemoryUniformSampling::Sample(common::Span<GradientP
// Create a new ELLPACK page with empty rows.
page_.reset(); // Release the device memory first before reallocating
page_.reset(new EllpackPageImpl(
batch_param_.gpu_id, original_page_->cuts_, original_page_->is_dense,
batch_param_.gpu_id, original_page_->Cuts(), original_page_->is_dense,
original_page_->row_stride, sample_rows));

// Compact the ELLPACK pages into the single sample page.
Expand Down Expand Up @@ -298,7 +298,7 @@ GradientBasedSample ExternalMemoryGradientBasedSampling::Sample(common::Span<Gra

// Create a new ELLPACK page with empty rows.
page_.reset(); // Release the device memory first before reallocating
page_.reset(new EllpackPageImpl(batch_param_.gpu_id, original_page_->cuts_,
page_.reset(new EllpackPageImpl(batch_param_.gpu_id, original_page_->Cuts(),
original_page_->is_dense,
original_page_->row_stride, sample_rows));

Expand Down
65 changes: 33 additions & 32 deletions src/tree/gpu_hist/row_partitioner.cu
Original file line number Diff line number Diff line change
Expand Up @@ -64,54 +64,55 @@ void RowPartitioner::SortPosition(common::Span<bst_node_t> position,
cub::DeviceScan::ExclusiveSum(temp_storage.data().get(), temp_storage_bytes,
in_itr, out_itr, position.size(), stream);
}

RowPartitioner::RowPartitioner(int device_idx, size_t num_rows)
: device_idx(device_idx) {
dh::safe_cuda(cudaSetDevice(device_idx));
ridx_a.resize(num_rows);
ridx_b.resize(num_rows);
position_a.resize(num_rows);
position_b.resize(num_rows);
ridx = dh::DoubleBuffer<RowIndexT>{&ridx_a, &ridx_b};
position = dh::DoubleBuffer<bst_node_t>{&position_a, &position_b};
ridx_segments.emplace_back(Segment(0, num_rows));
: device_idx_(device_idx) {
dh::safe_cuda(cudaSetDevice(device_idx_));
ridx_a_.resize(num_rows);
ridx_b_.resize(num_rows);
position_a_.resize(num_rows);
position_b_.resize(num_rows);
ridx_ = dh::DoubleBuffer<RowIndexT>{&ridx_a_, &ridx_b_};
position_ = dh::DoubleBuffer<bst_node_t>{&position_a_, &position_b_};
ridx_segments_.emplace_back(Segment(0, num_rows));

thrust::sequence(
thrust::device_pointer_cast(ridx.CurrentSpan().data()),
thrust::device_pointer_cast(ridx.CurrentSpan().data() + ridx.Size()));
thrust::device_pointer_cast(ridx_.CurrentSpan().data()),
thrust::device_pointer_cast(ridx_.CurrentSpan().data() + ridx_.Size()));
thrust::fill(
thrust::device_pointer_cast(position.Current()),
thrust::device_pointer_cast(position.Current() + position.Size()), 0);
left_counts.resize(256);
thrust::fill(left_counts.begin(), left_counts.end(), 0);
streams.resize(2);
for (auto& stream : streams) {
thrust::device_pointer_cast(position_.Current()),
thrust::device_pointer_cast(position_.Current() + position_.Size()), 0);
left_counts_.resize(256);
thrust::fill(left_counts_.begin(), left_counts_.end(), 0);
streams_.resize(2);
for (auto& stream : streams_) {
dh::safe_cuda(cudaStreamCreate(&stream));
}
}
RowPartitioner::~RowPartitioner() {
dh::safe_cuda(cudaSetDevice(device_idx));
for (auto& stream : streams) {
dh::safe_cuda(cudaSetDevice(device_idx_));
for (auto& stream : streams_) {
dh::safe_cuda(cudaStreamDestroy(stream));
}
}

common::Span<const RowPartitioner::RowIndexT> RowPartitioner::GetRows(
bst_node_t nidx) {
auto segment = ridx_segments.at(nidx);
auto segment = ridx_segments_.at(nidx);
// Return empty span here as a valid result
// Will error if we try to construct a span from a pointer with size 0
if (segment.Size() == 0) {
return common::Span<const RowPartitioner::RowIndexT>();
}
return ridx.CurrentSpan().subspan(segment.begin, segment.Size());
return ridx_.CurrentSpan().subspan(segment.begin, segment.Size());
}

common::Span<const RowPartitioner::RowIndexT> RowPartitioner::GetRows() {
return ridx.CurrentSpan();
return ridx_.CurrentSpan();
}

common::Span<const bst_node_t> RowPartitioner::GetPosition() {
return position.CurrentSpan();
return position_.CurrentSpan();
}
std::vector<RowPartitioner::RowIndexT> RowPartitioner::GetRowsHost(
bst_node_t nidx) {
Expand All @@ -135,22 +136,22 @@ void RowPartitioner::SortPositionAndCopy(const Segment& segment,
cudaStream_t stream) {
SortPosition(
// position_in
common::Span<bst_node_t>(position.Current() + segment.begin,
common::Span<bst_node_t>(position_.Current() + segment.begin,
segment.Size()),
// position_out
common::Span<bst_node_t>(position.Other() + segment.begin,
common::Span<bst_node_t>(position_.Other() + segment.begin,
segment.Size()),
// row index in
common::Span<RowIndexT>(ridx.Current() + segment.begin, segment.Size()),
common::Span<RowIndexT>(ridx_.Current() + segment.begin, segment.Size()),
// row index out
common::Span<RowIndexT>(ridx.Other() + segment.begin, segment.Size()),
common::Span<RowIndexT>(ridx_.Other() + segment.begin, segment.Size()),
left_nidx, right_nidx, d_left_count, stream);
// Copy back key/value
const auto d_position_current = position.Current() + segment.begin;
const auto d_position_other = position.Other() + segment.begin;
const auto d_ridx_current = ridx.Current() + segment.begin;
const auto d_ridx_other = ridx.Other() + segment.begin;
dh::LaunchN(device_idx, segment.Size(), stream, [=] __device__(size_t idx) {
const auto d_position_current = position_.Current() + segment.begin;
const auto d_position_other = position_.Other() + segment.begin;
const auto d_ridx_current = ridx_.Current() + segment.begin;
const auto d_ridx_other = ridx_.Other() + segment.begin;
dh::LaunchN(device_idx_, segment.Size(), stream, [=] __device__(size_t idx) {
d_position_current[idx] = d_position_other[idx];
d_ridx_current[idx] = d_ridx_other[idx];
});
Expand Down
Loading

0 comments on commit 5a3f4b5

Please sign in to comment.