Skip to content

Commit

Permalink
Delete cudnn6 code (#31835)
Browse files Browse the repository at this point in the history
  • Loading branch information
tianshuo78520a authored Mar 29, 2021
1 parent b48841b commit 8829a30
Show file tree
Hide file tree
Showing 4 changed files with 1 addition and 59 deletions.
5 changes: 0 additions & 5 deletions paddle/fluid/operators/conv_cudnn_op_cache.h
Original file line number Diff line number Diff line change
Expand Up @@ -40,11 +40,6 @@ static constexpr size_t kNUM_CUDNN_BWD_FILTER_ALGS =
CUDNN_CONVOLUTION_BWD_FILTER_ALGO_COUNT;
static constexpr size_t kNUM_CUDNN_BWD_DATA_ALGS =
CUDNN_CONVOLUTION_BWD_DATA_ALGO_COUNT;
#else
// cuDNN v5 has no CUDNN_CONVOLUTION_FWD_ALGO_COUNT etc.
static constexpr size_t kNUM_CUDNN_FWD_ALGS = 7;
static constexpr size_t kNUM_CUDNN_BWD_FILTER_ALGS = 4;
static constexpr size_t kNUM_CUDNN_BWD_DATA_ALGS = 5;
#endif

} // namespace operators
Expand Down
10 changes: 1 addition & 9 deletions paddle/fluid/operators/cudnn_lstm_cache.h
Original file line number Diff line number Diff line change
Expand Up @@ -85,20 +85,12 @@ class ScopedRNNBase {
dropout_desc_.descriptor(handle, place, initialized_, dropout_prob_,
dropout_state, seed_, state_size);

// ------------------- cudnn rnn descriptors ---------------------
#if CUDNN_VERSION >= 6000
// ------------------- cudnn rnn descriptors ---------------------
PADDLE_ENFORCE_CUDA_SUCCESS(platform::dynload::cudnnSetRNNDescriptor_v6(
handle, rnn_desc_.desc(), hidden_size_, num_layers_,
dropout_desc_.desc(), CUDNN_LINEAR_INPUT,
is_bidirec_ ? CUDNN_BIDIRECTIONAL : CUDNN_UNIDIRECTIONAL, CUDNN_LSTM,
CUDNN_RNN_ALGO_STANDARD, cudnn_type));
#else
PADDLE_ENFORCE_CUDA_SUCCESS(platform::dynload::cudnnSetRNNDescriptor(
rnn_desc_.desc(), hidden_size_, num_layers_, dropout_desc_.desc(),
CUDNN_LINEAR_INPUT,
is_bidirec_ ? CUDNN_BIDIRECTIONAL : CUDNN_UNIDIRECTIONAL, CUDNN_LSTM,
cudnn_type));
#endif

#if CUDNN_VERSION >= 7201
if (!sequence_length.empty()) {
Expand Down
7 changes: 0 additions & 7 deletions paddle/fluid/operators/cudnn_rnn_cache.h
Original file line number Diff line number Diff line change
Expand Up @@ -168,18 +168,11 @@ struct CudnnRNNCache {
PADDLE_ENFORCE_CUDA_SUCCESS(
platform::dynload::cudnnCreateRNNDescriptor(&rnn_desc_));

#if CUDNN_VERSION >= 6000
PADDLE_ENFORCE_CUDA_SUCCESS(platform::dynload::cudnnSetRNNDescriptor_v6(
handle, rnn_desc_, hidden_size_, num_layers_, dropout_desc_,
CUDNN_LINEAR_INPUT,
is_bidirec_ ? CUDNN_BIDIRECTIONAL : CUDNN_UNIDIRECTIONAL, CUDNN_LSTM,
CUDNN_RNN_ALGO_STANDARD, cudnn_type));
#else
PADDLE_ENFORCE_CUDA_SUCCESS(platform::dynload::cudnnSetRNNDescriptor(
rnn_desc_, hidden_size_, num_layers_, dropout_desc_, CUDNN_LINEAR_INPUT,
is_bidirec_ ? CUDNN_BIDIRECTIONAL : CUDNN_UNIDIRECTIONAL, CUDNN_LSTM,
cudnn_type));
#endif

PADDLE_ENFORCE_CUDA_SUCCESS(
platform::dynload::cudnnCreateFilterDescriptor(&w_desc_));
Expand Down
38 changes: 0 additions & 38 deletions paddle/fluid/platform/cudnn_helper.h
Original file line number Diff line number Diff line change
Expand Up @@ -91,30 +91,6 @@ enum class ActivationMode {
kBandPass,
};

#if CUDNN_VERSION < 6000
#pragma message "CUDNN version under 6.0 is supported at best effort."
#pragma message "We strongly encourage you to move to 6.0 and above."
#pragma message "This message is intended to annoy you enough to update."
#pragma message \
"please see https://docs.nvidia.com/deeplearning/sdk/cudnn-release-notes/"

inline cudnnPoolingMode_t GetPoolingMode(const PoolingMode& mode) {
switch (mode) {
case PoolingMode::kMaximumDeterministic:
return CUDNN_POOLING_MAX;
case PoolingMode::kAverageExclusive:
return CUDNN_POOLING_AVERAGE_COUNT_EXCLUDE_PADDING;
case PoolingMode::kAverageInclusive:
return CUDNN_POOLING_AVERAGE_COUNT_INCLUDE_PADDING;
case PoolingMode::kMaximum:
return CUDNN_POOLING_MAX;
default:
PADDLE_THROW(
platform::errors::Unimplemented("Unexpected CUDNN pooling mode."));
}
}
#else

inline cudnnPoolingMode_t GetPoolingMode(const PoolingMode& mode) {
switch (mode) {
case PoolingMode::kMaximumDeterministic:
Expand All @@ -130,7 +106,6 @@ inline cudnnPoolingMode_t GetPoolingMode(const PoolingMode& mode) {
platform::errors::Unimplemented("Unexpected CUDNN pooling mode."));
}
}
#endif // CUDNN_VERSION < 6000

inline ActivationMode StringToActivationMode(const std::string& str) {
if (str == "identity") {
Expand Down Expand Up @@ -471,19 +446,6 @@ class ScopedConvolutionDescriptor {
"of pads is %d, size of dilations is %d.",
pads.size(), dilations.size()));

#if !CUDNN_VERSION_MIN(6, 0, 0)
// cudnn v5 does not support dilation conv, the argument is called upscale
// instead of dilations and it is must be one.
for (size_t i = 0; i < dilations.size(); ++i) {
PADDLE_ENFORCE_EQ(dilations[i], 1,
platform::errors::InvalidArgument(
"Dilations conv is not supported in this cuDNN "
"version(%d.%d.%d).",
CUDNN_VERSION / 1000, CUDNN_VERSION % 1000 / 100,
CUDNN_VERSION % 100));
}
#endif

cudnnDataType_t compute_type =
(type == CUDNN_DATA_DOUBLE) ? CUDNN_DATA_DOUBLE : CUDNN_DATA_FLOAT;
PADDLE_ENFORCE_CUDA_SUCCESS(dynload::cudnnSetConvolutionNdDescriptor(
Expand Down

0 comments on commit 8829a30

Please sign in to comment.