From d1e0e1017102fd3e45bb7851ef69028104d0b16c Mon Sep 17 00:00:00 2001 From: fis Date: Thu, 7 Feb 2019 15:39:54 +0800 Subject: [PATCH] Fix clang-tidy. --- src/common/device_helpers.cuh | 2 +- src/common/host_device_vector.cu | 29 ++++++--- src/linear/updater_gpu_coordinate.cu | 33 +++++----- src/tree/updater_gpu.cu | 16 ++--- src/tree/updater_gpu_hist.cu | 3 +- tests/cpp/data/test_simple_dmatrix.cc | 4 +- tests/cpp/predictor/test_cpu_predictor.cc | 4 +- tests/cpp/tree/test_gpu_exact.cu | 10 +-- tests/cpp/tree/test_gpu_hist.cu | 77 ++++++++++++----------- tests/cpp/tree/test_prune.cc | 6 +- tests/cpp/tree/test_refresh.cc | 6 +- 11 files changed, 101 insertions(+), 89 deletions(-) diff --git a/src/common/device_helpers.cuh b/src/common/device_helpers.cuh index 1fd8e24079c0..3c2eecef8e21 100644 --- a/src/common/device_helpers.cuh +++ b/src/common/device_helpers.cuh @@ -771,7 +771,7 @@ template typename std::iterator_traits::value_type SumReduction( dh::CubMemory &tmp_mem, T in, int nVals) { using ValueT = typename std::iterator_traits::value_type; - size_t tmpSize; + size_t tmpSize {0}; ValueT *dummy_out = nullptr; dh::safe_cuda(cub::DeviceReduce::Sum(nullptr, tmpSize, in, dummy_out, nVals)); // Allocate small extra memory for the return value diff --git a/src/common/host_device_vector.cu b/src/common/host_device_vector.cu index 9cb39d5527e2..4850c4bb1700 100644 --- a/src/common/host_device_vector.cu +++ b/src/common/host_device_vector.cu @@ -153,6 +153,13 @@ struct HostDeviceVectorImpl { } } + T* Raw() { return data_.data().get(); } + size_t Start() const { return start_; } + size_t DataSize() const { return data_.size(); } + Permissions& Perm() { return perm_d_; } + Permissions const& Perm() const { return perm_d_; } + + private: int device_; thrust::device_vector data_; // cached vector size @@ -215,41 +222,42 @@ struct HostDeviceVectorImpl { T* DevicePointer(int device) { CHECK(distribution_.devices_.Contains(device)); LazySyncDevice(device, GPUAccess::kWrite); - return shards_.at(distribution_.devices_.Index(device)).data_.data().get(); + return shards_.at(distribution_.devices_.Index(device)).Raw(); } const T* ConstDevicePointer(int device) { CHECK(distribution_.devices_.Contains(device)); LazySyncDevice(device, GPUAccess::kRead); - return shards_.at(distribution_.devices_.Index(device)).data_.data().get(); + return shards_.at(distribution_.devices_.Index(device)).Raw(); } common::Span DeviceSpan(int device) { GPUSet devices = distribution_.devices_; CHECK(devices.Contains(device)); LazySyncDevice(device, GPUAccess::kWrite); - return {shards_.at(devices.Index(device)).data_.data().get(), - static_cast::index_type>(DeviceSize(device))}; + return {shards_.at(devices.Index(device)).Raw(), + static_cast::index_type>(DeviceSize(device))}; } common::Span ConstDeviceSpan(int device) { GPUSet devices = distribution_.devices_; CHECK(devices.Contains(device)); LazySyncDevice(device, GPUAccess::kRead); - return {shards_.at(devices.Index(device)).data_.data().get(), - static_cast::index_type>(DeviceSize(device))}; + using SpanInd = typename common::Span::index_type; + return {shards_.at(devices.Index(device)).Raw(), + static_cast(DeviceSize(device))}; } size_t DeviceSize(int device) { CHECK(distribution_.devices_.Contains(device)); LazySyncDevice(device, GPUAccess::kRead); - return shards_.at(distribution_.devices_.Index(device)).data_.size(); + return shards_.at(distribution_.devices_.Index(device)).DataSize(); } size_t DeviceStart(int device) { CHECK(distribution_.devices_.Contains(device)); LazySyncDevice(device, GPUAccess::kRead); - return shards_.at(distribution_.devices_.Index(device)).start_; + return shards_.at(distribution_.devices_.Index(device)).Start(); } thrust::device_ptr tbegin(int device) { // NOLINT @@ -388,7 +396,7 @@ struct HostDeviceVectorImpl { if (perm_h_.CanRead()) { // data is present, just need to deny access to the device dh::ExecuteIndexShards(&shards_, [&](int idx, DeviceShard& shard) { - shard.perm_d_.DenyComplementary(access); + shard.Perm().DenyComplementary(access); }); perm_h_.Grant(access); return; @@ -411,9 +419,10 @@ struct HostDeviceVectorImpl { bool DeviceCanAccess(int device, GPUAccess access) { GPUSet devices = distribution_.Devices(); if (!devices.Contains(device)) { return false; } - return shards_.at(devices.Index(device)).perm_d_.CanAccess(access); + return shards_.at(devices.Index(device)).Perm().CanAccess(access); } + private: std::vector data_h_; Permissions perm_h_; // the total size of the data stored on the devices diff --git a/src/linear/updater_gpu_coordinate.cu b/src/linear/updater_gpu_coordinate.cu index 0d4cbe824dd3..89e99bf0f847 100644 --- a/src/linear/updater_gpu_coordinate.cu +++ b/src/linear/updater_gpu_coordinate.cu @@ -154,8 +154,8 @@ class GPUCoordinateUpdater : public LinearUpdater { void Init( const std::vector> &args) override { tparam_.InitAllowUnknown(args); - selector.reset(FeatureSelector::Create(tparam_.feature_selector)); - monitor.Init("GPUCoordinateUpdater"); + selector_.reset(FeatureSelector::Create(tparam_.feature_selector)); + monitor_.Init("GPUCoordinateUpdater"); } void LazyInitShards(DMatrix *p_fmat, @@ -196,38 +196,38 @@ class GPUCoordinateUpdater : public LinearUpdater { void Update(HostDeviceVector *in_gpair, DMatrix *p_fmat, gbm::GBLinearModel *model, double sum_instance_weight) override { tparam_.DenormalizePenalties(sum_instance_weight); - monitor.Start("LazyInitShards"); + monitor_.Start("LazyInitShards"); this->LazyInitShards(p_fmat, model->param); - monitor.Stop("LazyInitShards"); + monitor_.Stop("LazyInitShards"); - monitor.Start("UpdateGpair"); + monitor_.Start("UpdateGpair"); // Update gpair dh::ExecuteIndexShards(&shards, [&](int idx, std::unique_ptr& shard) { if (!shard->IsEmpty()) { shard->UpdateGpair(in_gpair->ConstHostVector(), model->param); } }); - monitor.Stop("UpdateGpair"); + monitor_.Stop("UpdateGpair"); - monitor.Start("UpdateBias"); + monitor_.Start("UpdateBias"); this->UpdateBias(p_fmat, model); - monitor.Stop("UpdateBias"); + monitor_.Stop("UpdateBias"); // prepare for updating the weights - selector->Setup(*model, in_gpair->ConstHostVector(), p_fmat, - tparam_.reg_alpha_denorm, tparam_.reg_lambda_denorm, - coord_param_.top_k); - monitor.Start("UpdateFeature"); + selector_->Setup(*model, in_gpair->ConstHostVector(), p_fmat, + tparam_.reg_alpha_denorm, tparam_.reg_lambda_denorm, + coord_param_.top_k); + monitor_.Start("UpdateFeature"); for (auto group_idx = 0; group_idx < model->param.num_output_group; ++group_idx) { for (auto i = 0U; i < model->param.num_feature; i++) { - auto fidx = selector->NextFeature( + auto fidx = selector_->NextFeature( i, *model, group_idx, in_gpair->ConstHostVector(), p_fmat, tparam_.reg_alpha_denorm, tparam_.reg_lambda_denorm); if (fidx < 0) break; this->UpdateFeature(fidx, group_idx, &in_gpair->HostVector(), model); } } - monitor.Stop("UpdateFeature"); + monitor_.Stop("UpdateFeature"); } void UpdateBias(DMatrix *p_fmat, gbm::GBLinearModel *model) { @@ -288,12 +288,13 @@ class GPUCoordinateUpdater : public LinearUpdater { }); } + private: // training parameter LinearTrainParam tparam_; CoordinateParam coord_param_; GPUDistribution dist_; - std::unique_ptr selector; - common::Monitor monitor; + std::unique_ptr selector_; + common::Monitor monitor_; std::vector> shards; }; diff --git a/src/tree/updater_gpu.cu b/src/tree/updater_gpu.cu index cfbefa89e2be..f37746d00072 100644 --- a/src/tree/updater_gpu.cu +++ b/src/tree/updater_gpu.cu @@ -102,7 +102,7 @@ struct AddByKey { * @param instIds instance index buffer * @return the expected gradient value */ -HOST_DEV_INLINE GradientPair get(int id, +HOST_DEV_INLINE GradientPair Get(int id, common::Span vals, common::Span instIds) { id = instIds[id]; @@ -129,7 +129,7 @@ __global__ void CubScanByKeyL1( int tid = blockIdx.x * BLKDIM_L1L3 + threadIdx.x; if (tid < size) { myKey = Abs2UniqueKey(tid, keys, colIds, nodeStart, nUniqKeys); - myValue = get(tid, vals, instIds); + myValue = Get(tid, vals, instIds); } else { myKey = kNoneKey; myValue = {}; @@ -201,19 +201,19 @@ __global__ void CubScanByKeyL3(common::Span sums, int previousKey = tid == 0 ? kNoneKey : Abs2UniqueKey(tid - 1, keys, colIds, nodeStart, nUniqKeys); - GradientPair myValue = scans[tid]; + GradientPair my_value = scans[tid]; __syncthreads(); if (blockIdx.x > 0 && s_mKeys == previousKey) { - myValue += s_mScans[0]; + my_value += s_mScans[0]; } if (tid == size - 1) { - sums[previousKey] = myValue + get(tid, vals, instIds); + sums[previousKey] = my_value + Get(tid, vals, instIds); } if ((previousKey != myKey) && (previousKey >= 0)) { - sums[previousKey] = myValue; - myValue = GradientPair(0.0f, 0.0f); + sums[previousKey] = my_value; + my_value = GradientPair(0.0f, 0.0f); } - scans[tid] = myValue; + scans[tid] = my_value; } /** diff --git a/src/tree/updater_gpu_hist.cu b/src/tree/updater_gpu_hist.cu index 95a5008ba5aa..cdcfaa38b047 100644 --- a/src/tree/updater_gpu_hist.cu +++ b/src/tree/updater_gpu_hist.cu @@ -278,7 +278,7 @@ struct DeviceHistogram { /*! \brief Map nidx to starting index of its histogram. */ std::map nidx_map; thrust::device_vector data; - const size_t kStopGrowingSize = 1 << 26; // Do not grow beyond this size + static constexpr size_t kStopGrowingSize = 1 << 26; // Do not grow beyond this size int n_bins; int device_id_; @@ -1373,6 +1373,7 @@ class GPUHistMakerSpecialised{ /*! List storing device id. */ std::vector device_list_; + private: DMatrix* p_last_fmat_; GPUDistribution dist_; }; diff --git a/tests/cpp/data/test_simple_dmatrix.cc b/tests/cpp/data/test_simple_dmatrix.cc index 50b8652583c8..d9c648fa98f7 100644 --- a/tests/cpp/data/test_simple_dmatrix.cc +++ b/tests/cpp/data/test_simple_dmatrix.cc @@ -27,7 +27,7 @@ TEST(SimpleDMatrix, RowAccess) { xgboost::DMatrix * dmat = xgboost::DMatrix::Load(tmp_file, false, false); // Loop over the batches and count the records - long row_count = 0; + int64_t row_count = 0; for (auto &batch : dmat->GetRowBatches()) { row_count += batch.Size(); } @@ -54,7 +54,7 @@ TEST(SimpleDMatrix, ColAccessWithoutBatches) { ASSERT_TRUE(dmat->SingleColBlock()); // Loop over the batches and assert the data is as expected - long num_col_batch = 0; + int64_t num_col_batch = 0; for (const auto &batch : dmat->GetSortedColumnBatches()) { num_col_batch += 1; EXPECT_EQ(batch.Size(), dmat->Info().num_col_) diff --git a/tests/cpp/predictor/test_cpu_predictor.cc b/tests/cpp/predictor/test_cpu_predictor.cc index 752fdee92e01..73a8a8bc3370 100644 --- a/tests/cpp/predictor/test_cpu_predictor.cc +++ b/tests/cpp/predictor/test_cpu_predictor.cc @@ -41,8 +41,8 @@ TEST(cpu_predictor, Test) { // Test predict leaf std::vector leaf_out_predictions; cpu_predictor->PredictLeaf((*dmat).get(), &leaf_out_predictions, model); - for (int i = 0; i < leaf_out_predictions.size(); i++) { - ASSERT_EQ(leaf_out_predictions[i], 0); + for (auto v : leaf_out_predictions) { + ASSERT_EQ(v, 0); } // Test predict contribution diff --git a/tests/cpp/tree/test_gpu_exact.cu b/tests/cpp/tree/test_gpu_exact.cu index aabe46fc303a..23a6c8a88901 100644 --- a/tests/cpp/tree/test_gpu_exact.cu +++ b/tests/cpp/tree/test_gpu_exact.cu @@ -20,13 +20,13 @@ TEST(GPUExact, Update) { auto* p_gpuexact_maker = TreeUpdater::Create("grow_gpu"); p_gpuexact_maker->Init(args); - size_t constexpr n_rows = 4; - size_t constexpr n_cols = 8; + size_t constexpr kNRows = 4; + size_t constexpr kNCols = 8; bst_float constexpr sparsity = 0.0f; - auto dmat = CreateDMatrix(n_rows, n_cols, sparsity, 3); - std::vector h_gpair(n_rows); - for (size_t i = 0; i < n_rows; ++i) { + auto dmat = CreateDMatrix(kNRows, kNCols, sparsity, 3); + std::vector h_gpair(kNRows); + for (size_t i = 0; i < kNRows; ++i) { h_gpair[i] = GradientPair(i % 2, 1); } HostDeviceVector gpair (h_gpair); diff --git a/tests/cpp/tree/test_gpu_hist.cu b/tests/cpp/tree/test_gpu_hist.cu index 83896ebc8536..3731d78da06a 100644 --- a/tests/cpp/tree/test_gpu_hist.cu +++ b/tests/cpp/tree/test_gpu_hist.cu @@ -43,20 +43,20 @@ void BuildGidx(DeviceShard* shard, int n_rows, int n_cols, } TEST(GpuHist, BuildGidxDense) { - int const n_rows = 16, n_cols = 8; + int constexpr kNRows = 16, kNCols = 8; TrainParam param; param.max_depth = 1; param.n_gpus = 1; param.max_leaves = 0; - DeviceShard shard(0, 0, n_rows, param); - BuildGidx(&shard, n_rows, n_cols); + DeviceShard shard(0, 0, kNRows, param); + BuildGidx(&shard, kNRows, kNCols); std::vector h_gidx_buffer; h_gidx_buffer = shard.gidx_buffer.AsVector(); common::CompressedIterator gidx(h_gidx_buffer.data(), 25); - ASSERT_EQ(shard.row_stride, n_cols); + ASSERT_EQ(shard.row_stride, kNCols); std::vector solution = { 0, 3, 8, 9, 14, 17, 20, 21, @@ -76,20 +76,20 @@ TEST(GpuHist, BuildGidxDense) { 2, 4, 8, 10, 14, 15, 19, 22, 1, 4, 7, 10, 14, 16, 19, 21, }; - for (size_t i = 0; i < n_rows * n_cols; ++i) { + for (size_t i = 0; i < kNRows * kNCols; ++i) { ASSERT_EQ(solution[i], gidx[i]); } } TEST(GpuHist, BuildGidxSparse) { - int const n_rows = 16, n_cols = 8; + int constexpr kNRows = 16, kNCols = 8; TrainParam param; param.max_depth = 1; param.n_gpus = 1; param.max_leaves = 0; - DeviceShard shard(0, 0, n_rows, param); - BuildGidx(&shard, n_rows, n_cols, 0.9f); + DeviceShard shard(0, 0, kNRows, param); + BuildGidx(&shard, kNRows, kNCols, 0.9f); std::vector h_gidx_buffer; h_gidx_buffer = shard.gidx_buffer.AsVector(); @@ -103,7 +103,7 @@ TEST(GpuHist, BuildGidxSparse) { 24, 24, 24, 24, 24, 5, 24, 24, 0, 16, 24, 15, 24, 24, 24, 24, 24, 7, 14, 16, 4, 24, 24, 24, 24, 24, 9, 24, 24, 1, 24, 24 }; - for (size_t i = 0; i < n_rows * shard.row_stride; ++i) { + for (size_t i = 0; i < kNRows * shard.row_stride; ++i) { ASSERT_EQ(solution[i], gidx[i]); } } @@ -125,27 +125,27 @@ std::vector GetHostHistGpair() { template void TestBuildHist(GPUHistBuilderBase& builder) { - int const n_rows = 16, n_cols = 8; + int const kNRows = 16, kNCols = 8; TrainParam param; param.max_depth = 6; param.n_gpus = 1; param.max_leaves = 0; - DeviceShard shard(0, 0, n_rows, param); + DeviceShard shard(0, 0, kNRows, param); - BuildGidx(&shard, n_rows, n_cols); + BuildGidx(&shard, kNRows, kNCols); xgboost::SimpleLCG gen; xgboost::SimpleRealUniformDistribution dist(0.0f, 1.0f); - std::vector h_gpair(n_rows); + std::vector h_gpair(kNRows); for (size_t i = 0; i < h_gpair.size(); ++i) { bst_float grad = dist(&gen); bst_float hess = dist(&gen); h_gpair[i] = GradientPair(grad, hess); } - thrust::device_vector gpair (n_rows); + thrust::device_vector gpair (kNRows); gpair = h_gpair; int num_symbols = shard.n_bins + 1; @@ -161,7 +161,7 @@ void TestBuildHist(GPUHistBuilderBase& builder) { num_symbols); shard.ridx_segments.resize(1); - shard.ridx_segments[0] = Segment(0, n_rows); + shard.ridx_segments[0] = Segment(0, kNRows); shard.hist.AllocateHistogram(0); shard.gpair.copy(gpair.begin(), gpair.end()); thrust::sequence(shard.ridx.CurrentDVec().tbegin(), @@ -221,8 +221,8 @@ common::HistCutMatrix GetHostCutMatrix () { // TODO(trivialfis): This test is over simplified. TEST(GpuHist, EvaluateSplits) { - constexpr int n_rows = 16; - constexpr int n_cols = 8; + constexpr int kNRows = 16; + constexpr int kNCols = 8; TrainParam param; param.max_depth = 1; @@ -237,14 +237,15 @@ TEST(GpuHist, EvaluateSplits) { param.reg_lambda = 0; param.max_delta_step = 0.0; - for (size_t i = 0; i < n_cols; ++i) { + for (size_t i = 0; i < kNCols; ++i) { param.monotone_constraints.emplace_back(0); } int max_bins = 4; // Initialize DeviceShard - std::unique_ptr> shard {new DeviceShard(0, 0, n_rows, param)}; + std::unique_ptr> shard { + new DeviceShard(0, 0, kNRows, param)}; // Initialize DeviceShard::node_sum_gradients shard->node_sum_gradients = {{6.4f, 12.8f}}; @@ -257,14 +258,14 @@ TEST(GpuHist, EvaluateSplits) { &(shard->cut_.feature_segments), cmat.row_ptr.size(), &(shard->cut_.min_fvalue), cmat.min_val.size(), &(shard->cut_.gidx_fvalue_map), 24, - &(shard->monotone_constraints), n_cols); + &(shard->monotone_constraints), kNCols); shard->cut_.feature_segments.copy(cmat.row_ptr.begin(), cmat.row_ptr.end()); shard->cut_.gidx_fvalue_map.copy(cmat.cut.begin(), cmat.cut.end()); shard->monotone_constraints.copy(param.monotone_constraints.begin(), param.monotone_constraints.end()); // Initialize DeviceShard::hist - shard->hist.Init(0, (max_bins - 1) * n_cols); + shard->hist.Init(0, (max_bins - 1) * kNCols); shard->hist.AllocateHistogram(0); // Each row of hist_gpair represents gpairs for one feature. // Each entry represents a bin. @@ -284,7 +285,7 @@ TEST(GpuHist, EvaluateSplits) { GPUHistMakerSpecialised(); hist_maker.param_ = param; hist_maker.shards_.push_back(std::move(shard)); - hist_maker.column_sampler_.Init(n_cols, + hist_maker.column_sampler_.Init(kNCols, param.colsample_bynode, param.colsample_bylevel, param.colsample_bytree, @@ -292,8 +293,8 @@ TEST(GpuHist, EvaluateSplits) { RegTree tree; MetaInfo info; - info.num_row_ = n_rows; - info.num_col_ = n_cols; + info.num_row_ = kNRows; + info.num_col_ = kNCols; hist_maker.info_ = &info; hist_maker.node_value_constraints_.resize(1); @@ -311,27 +312,27 @@ TEST(GpuHist, ApplySplit) { GPUHistMakerSpecialised hist_maker = GPUHistMakerSpecialised(); int constexpr nid = 0; - int constexpr n_rows = 16; - int constexpr n_cols = 8; + int constexpr kNRows = 16; + int constexpr kNCols = 8; TrainParam param; // Initialize shard - for (size_t i = 0; i < n_cols; ++i) { + for (size_t i = 0; i < kNCols; ++i) { param.monotone_constraints.emplace_back(0); } hist_maker.shards_.resize(1); - hist_maker.shards_[0].reset(new DeviceShard(0, 0, n_rows, param)); + hist_maker.shards_[0].reset(new DeviceShard(0, 0, kNRows, param)); auto& shard = hist_maker.shards_.at(0); shard->ridx_segments.resize(3); // 3 nodes. shard->node_sum_gradients.resize(3); - shard->ridx_segments[0] = Segment(0, n_rows); - shard->ba.Allocate(0, &(shard->ridx), n_rows, - &(shard->position), n_rows); - shard->row_stride = n_cols; + shard->ridx_segments[0] = Segment(0, kNRows); + shard->ba.Allocate(0, &(shard->ridx), kNRows, + &(shard->position), kNRows); + shard->row_stride = kNCols; thrust::sequence(shard->ridx.CurrentDVec().tbegin(), shard->ridx.CurrentDVec().tend()); // Initialize GPUHistMaker @@ -354,21 +355,21 @@ TEST(GpuHist, ApplySplit) { hist_maker.hmat_ = cmat; MetaInfo info; - info.num_row_ = n_rows; - info.num_col_ = n_cols; - info.num_nonzero_ = n_rows * n_cols; // Dense + info.num_row_ = kNRows; + info.num_col_ = kNCols; + info.num_nonzero_ = kNRows * kNCols; // Dense // Initialize gidx int n_bins = 24; - int row_stride = n_cols; + int row_stride = kNCols; int num_symbols = n_bins + 1; size_t compressed_size_bytes = common::CompressedBufferWriter::CalculateBufferSize( - row_stride * n_rows, num_symbols); + row_stride * kNRows, num_symbols); shard->ba.Allocate(0, &(shard->gidx_buffer), compressed_size_bytes); common::CompressedBufferWriter wr(num_symbols); - std::vector h_gidx (n_rows * row_stride); + std::vector h_gidx (kNRows * row_stride); std::iota(h_gidx.begin(), h_gidx.end(), 0); std::vector h_gidx_compressed (compressed_size_bytes); diff --git a/tests/cpp/tree/test_prune.cc b/tests/cpp/tree/test_prune.cc index 8206a39be556..26171458477d 100644 --- a/tests/cpp/tree/test_prune.cc +++ b/tests/cpp/tree/test_prune.cc @@ -16,11 +16,11 @@ TEST(Updater, Prune) { int constexpr n_rows = 32, n_cols = 16; std::vector> cfg; - cfg.push_back(std::pair( + cfg.emplace_back(std::pair( "num_feature", std::to_string(n_cols))); - cfg.push_back(std::pair( + cfg.emplace_back(std::pair( "min_split_loss", "10")); - cfg.push_back(std::pair( + cfg.emplace_back(std::pair( "silent", "1")); // These data are just place holders. diff --git a/tests/cpp/tree/test_refresh.cc b/tests/cpp/tree/test_refresh.cc index cbd06d609c45..029e4479a7c1 100644 --- a/tests/cpp/tree/test_refresh.cc +++ b/tests/cpp/tree/test_refresh.cc @@ -13,15 +13,15 @@ namespace xgboost { namespace tree { TEST(Updater, Refresh) { - int constexpr n_rows = 8, n_cols = 16; + int constexpr kNRows = 8, kNCols = 16; HostDeviceVector gpair = { {0.23f, 0.24f}, {0.23f, 0.24f}, {0.23f, 0.24f}, {0.23f, 0.24f}, {0.27f, 0.29f}, {0.27f, 0.29f}, {0.27f, 0.29f}, {0.27f, 0.29f} }; - auto dmat = CreateDMatrix(n_rows, n_cols, 0.4, 3); + auto dmat = CreateDMatrix(kNRows, kNCols, 0.4, 3); std::vector> cfg { {"reg_alpha", "0.0"}, - {"num_feature", std::to_string(n_cols)}, + {"num_feature", std::to_string(kNCols)}, {"reg_lambda", "1"}}; RegTree tree = RegTree();