-
-
Notifications
You must be signed in to change notification settings - Fork 8.7k
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
Fix dart inplace prediction with GPU input. #6777
Changes from all commits
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -92,7 +92,10 @@ class HostDeviceVectorImpl { | |
} else { | ||
gpu_access_ = GPUAccess::kWrite; | ||
SetDevice(); | ||
thrust::fill(data_d_->begin(), data_d_->end(), v); | ||
auto s_data = dh::ToSpan(*data_d_); | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Avoid synchronization. |
||
dh::LaunchN(device_, data_d_->size(), [=]XGBOOST_DEVICE(size_t i) { | ||
s_data[i] = v; | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Should we use the bound-checked interface here, given that the size of There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. I can use pointer. |
||
}); | ||
} | ||
} | ||
|
||
|
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -407,7 +407,6 @@ void EllpackPageImpl::CreateHistIndices(int device, | |
size_t gpu_batch_nrows = | ||
std::min(dh::TotalMemory(device) / (16 * row_stride * sizeof(Entry)), | ||
static_cast<size_t>(row_batch.Size())); | ||
const std::vector<Entry>& data_vec = row_batch.data.ConstHostVector(); | ||
|
||
size_t gpu_nbatches = common::DivRoundUp(row_batch.Size(), gpu_batch_nrows); | ||
|
||
|
@@ -429,9 +428,18 @@ void EllpackPageImpl::CreateHistIndices(int device, | |
size_t n_entries = ent_cnt_end - ent_cnt_begin; | ||
dh::device_vector<Entry> entries_d(n_entries); | ||
// copy data entries to device. | ||
dh::safe_cuda(cudaMemcpyAsync(entries_d.data().get(), | ||
data_vec.data() + ent_cnt_begin, | ||
n_entries * sizeof(Entry), cudaMemcpyDefault)); | ||
if (row_batch.data.DeviceCanRead()) { | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Avoid copying data when it's already on GPU. |
||
auto const& d_data = row_batch.data.ConstDeviceSpan(); | ||
dh::safe_cuda(cudaMemcpyAsync( | ||
entries_d.data().get(), d_data.data() + ent_cnt_begin, | ||
n_entries * sizeof(Entry), cudaMemcpyDefault)); | ||
} else { | ||
const std::vector<Entry>& data_vec = row_batch.data.ConstHostVector(); | ||
dh::safe_cuda(cudaMemcpyAsync( | ||
entries_d.data().get(), data_vec.data() + ent_cnt_begin, | ||
n_entries * sizeof(Entry), cudaMemcpyDefault)); | ||
} | ||
|
||
const dim3 block3(32, 8, 1); // 256 threads | ||
const dim3 grid3(common::DivRoundUp(batch_nrows, block3.x), | ||
common::DivRoundUp(row_stride, block3.y), 1); | ||
|
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,18 @@ | ||
/*! | ||
* Copyright 2021 by Contributors | ||
*/ | ||
#include "xgboost/span.h" | ||
#include "../common/device_helpers.cuh" | ||
|
||
namespace xgboost { | ||
namespace gbm { | ||
void GPUDartPredictInc(common::Span<float> out_predts, | ||
common::Span<float> predts, float tree_w, size_t n_rows, | ||
bst_group_t n_groups, bst_group_t group) { | ||
dh::LaunchN(dh::CurrentDevice(), n_rows, [=]XGBOOST_DEVICE(size_t ridx) { | ||
const size_t offset = ridx * n_groups + group; | ||
out_predts[offset] += (predts[offset] * tree_w); | ||
}); | ||
} | ||
} // namespace gbm | ||
} // namespace xgboost |
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
This is to avoid copying data when input is already on GPU.