Skip to content

Commit

Permalink
Merge pull request PaddlePaddle#45 from mthreads/cpp_lint
Browse files Browse the repository at this point in the history
[MTAI-484] fix(code-style): modify code format for cpplint check
  • Loading branch information
caizhi-mt authored and mt-robot committed Aug 15, 2023
2 parents db90713 + 0b8290c commit 4924282
Show file tree
Hide file tree
Showing 102 changed files with 578 additions and 341 deletions.
2 changes: 1 addition & 1 deletion .pre-commit-config.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -97,7 +97,7 @@ repos:
files: \.(c|cc|cxx|cpp|cu|h|hpp|hxx)$
args:
- --extensions=c,cc,cxx,cpp,cu,cuh,h,hpp,hxx,kps
- --filter=-readability/fn_size,-build/include_what_you_use,-build/c++11,-whitespace/parens
- --filter=-readability/fn_size,-build/include_what_you_use,-build/c++11,-whitespace/parens,-whitespace/braces
- --quiet
# Exclude third-party libraries
exclude: |
Expand Down
8 changes: 6 additions & 2 deletions cmake/generic.cmake
Original file line number Diff line number Diff line change
Expand Up @@ -1356,8 +1356,12 @@ function(math_library TARGET)
elseif(WITH_MUSA)
musa_library(
${TARGET}
SRCS ${cc_srcs} ${cu_srcs}
DEPS ${math_library_DEPS} ${math_common_deps})
SRCS
${cc_srcs}
${cu_srcs}
DEPS
${math_library_DEPS}
${math_common_deps})
elseif(${cc_srcs_len} GREATER 0)
cc_library(
${TARGET}
Expand Down
13 changes: 7 additions & 6 deletions cmake/mccl.cmake
Original file line number Diff line number Diff line change
Expand Up @@ -23,15 +23,15 @@ if(WITH_MCCL)
string(REGEX MATCH "define MCCL_MAJOR +([0-9]+)" MCCL_MAJOR_VERSION
"${MCCL_VERSION_FILE_CONTENTS}")
string(REGEX REPLACE "define MCCL_MAJOR +([0-9]+)" "\\1" MCCL_MAJOR_VERSION
"${MCCL_MAJOR_VERSION}")
"${MCCL_MAJOR_VERSION}")
string(REGEX MATCH "define MCCL_MINOR +([0-9]+)" MCCL_MINOR_VERSION
"${MCCL_VERSION_FILE_CONTENTS}")
string(REGEX REPLACE "define MCCL_MINOR +([0-9]+)" "\\1" MCCL_MINOR_VERSION
"${MCCL_MINOR_VERSION}")
"${MCCL_MINOR_VERSION}")
string(REGEX MATCH "define MCCL_PATCH +([0-9]+)" MCCL_PATCH_VERSION
"${MCCL_VERSION_FILE_CONTENTS}")
string(REGEX REPLACE "define MCCL_PATCH +([0-9]+)" "\\1" MCCL_PATCH_VERSION
"${MCCL_PATCH_VERSION}")
"${MCCL_PATCH_VERSION}")
if(NOT MCCL_MAJOR_VERSION)
set(MCCL_VERSION "???")
else()
Expand All @@ -42,10 +42,11 @@ if(WITH_MCCL)
include_directories(${MCCL_INCLUDE_DIR})

message(STATUS "Current MCCL header is ${MCCL_INCLUDE_DIR}/mccl.h. ")
message(STATUS "Current MCCL version is "
message(
STATUS
"Current MCCL version is "
"v${MCCL_MAJOR_VERSION}.${MCCL_MINOR_VERSION}.${MCCL_PATCH_VERSION} ")
else()
message(FATAL_ERROR "WITH_MCCL is enabled but mccl.h file is not found!")
message(FATAL_ERROR "WITH_MCCL is enabled but mccl.h file is not found!")
endif()
endif()

6 changes: 3 additions & 3 deletions cmake/mudnn.cmake
Original file line number Diff line number Diff line change
Expand Up @@ -62,15 +62,15 @@ macro(find_mudnn_version mudnn_version_file)
string(REGEX MATCH "define MUDNN_VERSION_MAJOR +([0-9]+)" MUDNN_MAJOR_VERSION
"${MUDNN_VERSION_FILE_CONTENTS}")
string(REGEX REPLACE "define MUDNN_VERSION_MAJOR +([0-9]+)" "\\1"
MUDNN_MAJOR_VERSION "${MUDNN_MAJOR_VERSION}")
MUDNN_MAJOR_VERSION "${MUDNN_MAJOR_VERSION}")
string(REGEX MATCH "define MUDNN_VERSION_MINOR +([0-9]+)" MUDNN_MINOR_VERSION
"${MUDNN_VERSION_FILE_CONTENTS}")
string(REGEX REPLACE "define MUDNN_VERSION_MINOR +([0-9]+)" "\\1"
MUDNN_MINOR_VERSION "${MUDNN_MINOR_VERSION}")
MUDNN_MINOR_VERSION "${MUDNN_MINOR_VERSION}")
string(REGEX MATCH "define MUDNN_VERSION_PATCH +([0-9]+)" MUDNN_PATCH_VERSION
"${MUDNN_VERSION_FILE_CONTENTS}")
string(REGEX REPLACE "define MUDNN_VERSION_PATCH +([0-9]+)" "\\1"
MUDNN_PATCH_VERSION "${MUDNN_PATCH_VERSION}")
MUDNN_PATCH_VERSION "${MUDNN_PATCH_VERSION}")

if(NOT MUDNN_MAJOR_VERSION)
set(MUDNN_VERSION "???")
Expand Down
27 changes: 14 additions & 13 deletions cmake/musa.cmake
Original file line number Diff line number Diff line change
Expand Up @@ -24,14 +24,15 @@ endforeach()

find_path(
OPENMP_INCLUDE_DIR omp.h
PATHS ${llvm_openmp_search_list}
REQUIRED
PATHS ${llvm_openmp_search_list} REQUIRED
NO_DEFAULT_PATH)
include_directories(${OPENMP_INCLUDE_DIR})

macro(find_musa_version musa_version_file)
set(python_file ${PROJECT_BINARY_DIR}/get_version.py)
set(MUSA_VERSION "None" CACHE STRING "musa version" FORCE)
set(MUSA_VERSION
"None"
CACHE STRING "musa version" FORCE)
file(
WRITE ${python_file}
""
Expand All @@ -52,27 +53,27 @@ macro(find_musa_version musa_version_file)
if(python_res EQUAL 0)
set(MUSA_VERSION ${python_out})
endif()
string(REGEX REPLACE "([0-9]+)\.([0-9]+)\.([0-9]+)" "\\1" MUSA_MAJOR_VERSION "${MUSA_VERSION}")
string(REGEX REPLACE "([0-9]+)\.([0-9]+)\.([0-9]+)" "\\2" MUSA_MINOR_VERSION "${MUSA_VERSION}")
string(REGEX REPLACE "([0-9]+)\.([0-9]+)\.([0-9]+)" "\\3" MUSA_PATCH_VERSION "${MUSA_VERSION}")
string(REGEX REPLACE "([0-9]+)\.([0-9]+)\.([0-9]+)" "\\1" MUSA_MAJOR_VERSION
"${MUSA_VERSION}")
string(REGEX REPLACE "([0-9]+)\.([0-9]+)\.([0-9]+)" "\\2" MUSA_MINOR_VERSION
"${MUSA_VERSION}")
string(REGEX REPLACE "([0-9]+)\.([0-9]+)\.([0-9]+)" "\\3" MUSA_PATCH_VERSION
"${MUSA_VERSION}")

if(NOT MUSA_MAJOR_VERSION)
set(MUSA_VERSION "???")
message(
WARNING "Cannot find MUSA version in ${MUSA_PATH}/version.json"
)
message(WARNING "Cannot find MUSA version in ${MUSA_PATH}/version.json")
else()
math(
EXPR
MUSA_VERSION
"${MUSA_MAJOR_VERSION} * 10000 + ${MUSA_MINOR_VERSION} * 100 + ${MUSA_PATCH_VERSION}"
)
message(STATUS "Current MUSA version file is ${MUSA_PATH}/version.json.")
message(
STATUS
"Current MUSA version file is ${MUSA_PATH}/version.json.")
message(
STATUS
"Current MUSA version is v${MUSA_MAJOR_VERSION}.${MUSA_MINOR_VERSION}.${MUSA_PATCH_VERSION} ")
"Current MUSA version is v${MUSA_MAJOR_VERSION}.${MUSA_MINOR_VERSION}.${MUSA_PATCH_VERSION} "
)
endif()
endmacro()
find_musa_version(${MUSA_PATH}/version.json)
Expand Down
7 changes: 4 additions & 3 deletions cmake/version.cmake
Original file line number Diff line number Diff line change
Expand Up @@ -94,9 +94,10 @@ function(version version_file)
"CUDNN version: v${CUDNN_MAJOR_VERSION}.${CUDNN_MINOR_VERSION}\n")
endif()
if(WITH_MUSA)
file(APPEND ${version_file}
"MUSA version: v${MUSA_MAJOR_VERSION}.${MUSA_MINOR_VERSION}.${MUSA_PATCH_VERSION}\n"
"MUDNN version: v${MUDNN_MAJOR_VERSION}.${MUDNN_MINOR_VERSION}\n")
file(
APPEND ${version_file}
"MUSA version: v${MUSA_MAJOR_VERSION}.${MUSA_MINOR_VERSION}.${MUSA_PATCH_VERSION}\n"
"MUDNN version: v${MUDNN_MAJOR_VERSION}.${MUDNN_MINOR_VERSION}\n")
endif()
if(WITH_ROCM)
file(APPEND ${version_file}
Expand Down
3 changes: 2 additions & 1 deletion paddle/fluid/distributed/fleet_executor/carrier.cc
Original file line number Diff line number Diff line change
Expand Up @@ -272,7 +272,8 @@ static std::shared_ptr<framework::GarbageCollector> GetGC(
int64_t max_memory_size = framework::GetEagerDeletionThreshold();
std::shared_ptr<framework::GarbageCollector> gc;
if (max_memory_size >= 0) {
#if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP) || defined(PADDLE_WITH_MUSA)
#if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP) || \
defined(PADDLE_WITH_MUSA)
if (platform::is_gpu_place(place)) {
if (framework::IsFastEagerDeletionModeEnabled()) {
gc.reset(new framework::UnsafeFastGPUGarbageCollector(place,
Expand Down
3 changes: 2 additions & 1 deletion paddle/fluid/distributed/fleet_executor/cond_interceptor.cc
Original file line number Diff line number Diff line change
Expand Up @@ -71,7 +71,8 @@ bool CondInterceptor::GetCondResult() {
const auto& cond_tensor = cond_var->Get<phi::DenseTensor>();
bool res = false;
if (platform::is_gpu_place(cond_tensor.place())) {
#if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP) || defined(PADDLE_WITH_MUSA)
#if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP) || \
defined(PADDLE_WITH_MUSA)
phi::DenseTensor cpu_tensor;
framework::TensorCopy(cond_tensor, platform::CPUPlace(), &cpu_tensor);
platform::DeviceContextPool::Instance().Get(cond_tensor.place())->Wait();
Expand Down
3 changes: 2 additions & 1 deletion paddle/fluid/distributed/fleet_executor/dist_model.cc
Original file line number Diff line number Diff line change
Expand Up @@ -76,7 +76,8 @@ bool LoadDataFromDistModelTensor(const DistModelTensor &input_data,
input_data.data.length());
} else if (platform::is_gpu_place(place)) {
VLOG(3) << "Loading data for GPU.";
#if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP) || defined(PADDLE_WITH_MUSA)
#if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP) || \
defined(PADDLE_WITH_MUSA)
platform::DeviceContextPool &pool = platform::DeviceContextPool::Instance();
auto *dev_ctx = dynamic_cast<const phi::GPUContext *>(pool.Get(place));
auto gpu_place = place;
Expand Down
3 changes: 2 additions & 1 deletion paddle/fluid/eager/nan_inf_utils.cc
Original file line number Diff line number Diff line change
Expand Up @@ -98,7 +98,8 @@ void CheckTensorHasNanOrInf(const std::string& api_name, const Tensor& tensor) {

auto& place = dense_tensor->place();
if (paddle::platform::is_gpu_place(place)) {
#if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP) || defined(PADDLE_WITH_MUSA)
#if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP) || \
defined(PADDLE_WITH_MUSA)
paddle::framework::details::tensor_check<phi::GPUContext>(
api_name, tensor_name, *dense_tensor, place);
#else
Expand Down
3 changes: 2 additions & 1 deletion paddle/fluid/framework/copy_same_tensor_test.cc
Original file line number Diff line number Diff line change
Expand Up @@ -32,7 +32,8 @@ namespace framework {
static std::vector<platform::Place> CreatePlaceList() {
std::vector<platform::Place> places;
places.emplace_back(platform::CPUPlace());
#if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP) || defined(PADDLE_WITH_MUSA)
#if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP) || \
defined(PADDLE_WITH_MUSA)
places.emplace_back(platform::CUDAPlace(0));
#endif
return places;
Expand Down
6 changes: 4 additions & 2 deletions paddle/fluid/framework/custom_operator.cc
Original file line number Diff line number Diff line change
Expand Up @@ -123,7 +123,8 @@ static void RunKernelFunc(
"Input tensor (%s) is not initialized.", in_name));
paddle::Tensor custom_in;
custom_in.set_impl(std::make_shared<phi::DenseTensor>(*x));
#if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP) || defined(PADDLE_WITH_MUSA)
#if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP) || \
defined(PADDLE_WITH_MUSA)
if (custom_in.is_gpu_pinned()) {
VLOG(3) << "Custom Operator: custom input is gpu pinned tensor";
auto gpu_place = phi::GPUPlace(platform::GetCurrentDeviceId());
Expand Down Expand Up @@ -1174,7 +1175,8 @@ static void RegisterOperatorKernel(
}
RegisterOperatorKernelWithPlace(
name, op_kernel_func, proto::VarType::RAW, platform::CPUPlace());
#if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP) || defined(PADDLE_WITH_MUSA)
#if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP) || \
defined(PADDLE_WITH_MUSA)
RegisterOperatorKernelWithPlace(
name, op_kernel_func, proto::VarType::RAW, platform::CUDAPlace());
#endif
Expand Down
4 changes: 3 additions & 1 deletion paddle/fluid/framework/data_feed.cc
Original file line number Diff line number Diff line change
Expand Up @@ -1526,7 +1526,9 @@ void MultiSlotInMemoryDataFeed::PutToFeedVec(
#endif
}

#if (defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP) || defined(PADDLE_WITH_MUSA)) && !defined(_WIN32)
#if (defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP) || \
defined(PADDLE_WITH_MUSA)) && \
!defined(_WIN32)
template <typename T>
void PrivateInstantDataFeed<T>::PutToFeedVec() {
for (size_t i = 0; i < use_slots_.size(); ++i) {
Expand Down
4 changes: 3 additions & 1 deletion paddle/fluid/framework/data_feed.h
Original file line number Diff line number Diff line change
Expand Up @@ -1951,7 +1951,9 @@ class PaddleBoxDataFeed : public MultiSlotInMemoryDataFeed {
int pv_batch_size_;
};

#if (defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP) || defined(PADDLE_WITH_MUSA)) && !defined(_WIN32)
#if (defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP) || \
defined(PADDLE_WITH_MUSA)) && \
!defined(_WIN32)
template <typename T>
class PrivateInstantDataFeed : public DataFeed {
public:
Expand Down
4 changes: 3 additions & 1 deletion paddle/fluid/framework/data_feed_factory.cc
Original file line number Diff line number Diff line change
Expand Up @@ -70,7 +70,9 @@ REGISTER_DATAFEED_CLASS(MultiSlotDataFeed);
REGISTER_DATAFEED_CLASS(MultiSlotInMemoryDataFeed);
REGISTER_DATAFEED_CLASS(PaddleBoxDataFeed);
REGISTER_DATAFEED_CLASS(SlotRecordInMemoryDataFeed);
#if (defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP) || defined(PADDLE_WITH_MUSA)) && !defined(_WIN32)
#if (defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP) || \
defined(PADDLE_WITH_MUSA)) && \
!defined(_WIN32)
REGISTER_DATAFEED_CLASS(MultiSlotFileInstantDataFeed);
#endif
} // namespace framework
Expand Down
114 changes: 72 additions & 42 deletions paddle/fluid/framework/details/CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -205,63 +205,91 @@ elseif(WITH_ROCM)
elseif(WITH_MUSA)
musa_library(
nan_inf_utils
SRCS nan_inf_utils_detail.cc
DEPS framework_proto scope place phi)
SRCS
nan_inf_utils_detail.cc
DEPS
framework_proto
scope
place
phi)
musa_library(
all_reduce_op_handle
SRCS all_reduce_op_handle.cc
DEPS op_handle_base
scope
lod_tensor
phi
memory
dynload_cuda
variable_visitor)
SRCS
all_reduce_op_handle.cc
DEPS
op_handle_base
scope
lod_tensor
phi
memory
dynload_cuda
variable_visitor)
musa_library(
fused_all_reduce_op_handle
SRCS fused_all_reduce_op_handle.cc
DEPS all_reduce_op_handle
op_handle_base
variable_visitor
scope
lod_tensor
phi
memory
dynload_cuda
place)
SRCS
fused_all_reduce_op_handle.cc
DEPS
all_reduce_op_handle
op_handle_base
variable_visitor
scope
lod_tensor
phi
memory
dynload_cuda
place)
musa_library(
grad_merge_all_reduce_op_handle
SRCS grad_merge_all_reduce_op_handle.cc
DEPS fused_all_reduce_op_handle
op_handle_base
scope
lod_tensor
phi
memory
dynload_cuda
variable_visitor
place
all_reduce_op_handle)
SRCS
grad_merge_all_reduce_op_handle.cc
DEPS
fused_all_reduce_op_handle
op_handle_base
scope
lod_tensor
phi
memory
dynload_cuda
variable_visitor
place
all_reduce_op_handle)

if(WITH_DISTRIBUTE)
musa_library(
reduce_op_handle
SRCS reduce_op_handle.cc
DEPS op_handle_base variable_visitor scope phi dynload_cuda)
SRCS
reduce_op_handle.cc
DEPS
op_handle_base
variable_visitor
scope
phi
dynload_cuda)
else()
musa_library(
reduce_op_handle
SRCS reduce_op_handle.cc
DEPS op_handle_base variable_visitor scope phi dynload_cuda)
SRCS
reduce_op_handle.cc
DEPS
op_handle_base
variable_visitor
scope
phi
dynload_cuda)
endif()
musa_library(
broadcast_op_handle
SRCS broadcast_op_handle.cc
DEPS op_handle_base scope phi memory variable_visitor dynload_cuda)
musa_library(
fused_broadcast_op_handle
SRCS fused_broadcast_op_handle.cc
DEPS broadcast_op_handle)
SRCS
broadcast_op_handle.cc
DEPS
op_handle_base
scope
phi
memory
variable_visitor
dynload_cuda)
musa_library(fused_broadcast_op_handle SRCS fused_broadcast_op_handle.cc DEPS
broadcast_op_handle)
else()
cc_library(
nan_inf_utils
Expand Down Expand Up @@ -446,7 +474,9 @@ endif()

if(NOT APPLE
AND NOT WIN32
AND (WITH_GPU OR WITH_ROCM OR WITH_MUSA))
AND (WITH_GPU
OR WITH_ROCM
OR WITH_MUSA))
set(IR_PASS_DEPS ${IR_PASS_DEPS} fusion_group_pass)
endif()
cc_library(
Expand Down
Loading

0 comments on commit 4924282

Please sign in to comment.