Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

enable dynamic load mklml lib on fluid #11596

Merged
merged 6 commits into from
Jun 26, 2018
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
7 changes: 6 additions & 1 deletion cmake/external/openblas.cmake
Original file line number Diff line number Diff line change
Expand Up @@ -114,7 +114,12 @@ INCLUDE_DIRECTORIES(${CBLAS_INC_DIR})
SET(dummyfile ${CMAKE_CURRENT_BINARY_DIR}/cblas_dummy.c)
FILE(WRITE ${dummyfile} "const char *dummy_cblas = \"${dummyfile}\";")
ADD_LIBRARY(cblas STATIC ${dummyfile})
TARGET_LINK_LIBRARIES(cblas ${CBLAS_LIBRARIES})

IF("${CBLAS_PROVIDER}" STREQUAL "MKLML")
TARGET_LINK_LIBRARIES(cblas dynload_mklml)
ELSE()
TARGET_LINK_LIBRARIES(cblas ${CBLAS_LIBRARIES})
ENDIF("${CBLAS_PROVIDER}" STREQUAL "MKLML")

IF(NOT ${CBLAS_FOUND})
ADD_DEPENDENCIES(cblas extern_openblas)
Expand Down
9 changes: 9 additions & 0 deletions cmake/generic.cmake
Original file line number Diff line number Diff line change
Expand Up @@ -195,6 +195,15 @@ function(cc_library TARGET_NAME)
list(REMOVE_ITEM cc_library_DEPS warpctc)
add_dependencies(${TARGET_NAME} warpctc)
endif()
# Only deps libmklml.so, not link
if("${cc_library_DEPS};" MATCHES "mklml;")
list(REMOVE_ITEM cc_library_DEPS mklml)
if(NOT "${TARGET_NAME}" MATCHES "dynload_mklml")
list(APPEND cc_library_DEPS dynload_mklml)
endif()
add_dependencies(${TARGET_NAME} mklml)
target_link_libraries(${TARGET_NAME} "-L${MKLML_LIB_DIR} -liomp5 -Wl,--as-needed")
endif()
target_link_libraries(${TARGET_NAME} ${cc_library_DEPS})
add_dependencies(${TARGET_NAME} ${cc_library_DEPS})
endif()
Expand Down
4 changes: 2 additions & 2 deletions paddle/fluid/inference/tests/book/test_inference_nlp.cc
Original file line number Diff line number Diff line change
Expand Up @@ -19,8 +19,8 @@ limitations under the License. */
#include "gflags/gflags.h"
#include "gtest/gtest.h"
#include "paddle/fluid/inference/tests/test_helper.h"
#include "paddle/fluid/operators/math/blas.h"
#ifdef PADDLE_WITH_MKLML
#include <mkl_service.h>
#include <omp.h>
#endif

Expand Down Expand Up @@ -164,7 +164,7 @@ TEST(inference, nlp) {
// only use 1 thread number per std::thread
omp_set_dynamic(0);
omp_set_num_threads(1);
mkl_set_num_threads(1);
paddle::operators::math::SetNumThreads(1);
#endif

double start_ms = 0, stop_ms = 0;
Expand Down
7 changes: 2 additions & 5 deletions paddle/fluid/operators/math/blas.h
Original file line number Diff line number Diff line change
Expand Up @@ -18,10 +18,7 @@
#include "paddle/fluid/framework/tensor.h"

#ifdef PADDLE_WITH_MKLML
#include <mkl_cblas.h>
#include <mkl_lapacke.h>
#include <mkl_service.h>
#include <mkl_vml_functions.h>
#include "paddle/fluid/platform/dynload/mklml.h"
#endif

#ifdef PADDLE_USE_OPENBLAS
Expand Down Expand Up @@ -55,7 +52,7 @@ static void SetNumThreads(int num_threads) {
openblas_set_num_threads(real_num_threads);
#elif defined(PADDLE_WITH_MKLML)
int real_num_threads = num_threads > 1 ? num_threads : 1;
mkl_set_num_threads(real_num_threads);
platform::dynload::MKL_Set_Num_Threads(real_num_threads);
#else
PADDLE_ENFORCE(false, "To be implemented.");
#endif
Expand Down
91 changes: 66 additions & 25 deletions paddle/fluid/operators/math/blas_impl.h
Original file line number Diff line number Diff line change
Expand Up @@ -22,61 +22,109 @@ namespace math {
template <typename T>
struct CBlas;

#ifdef PADDLE_WITH_MKLML
template <>
struct CBlas<float> {
template <typename... ARGS>
static void GEMM(ARGS... args) {
cblas_sgemm(args...);
platform::dynload::cblas_sgemm(args...);
}

template <typename... ARGS>
static void AXPY(ARGS... args) {
cblas_saxpy(args...);
platform::dynload::cblas_saxpy(args...);
}

template <typename... ARGS>
static void VCOPY(ARGS... args) {
platform::dynload::cblas_scopy(args...);
}

template <typename... ARGS>
static void GEMV(ARGS... args) {
platform::dynload::cblas_sgemv(args...);
}

template <typename... ARGS>
static void GEMM_BATCH(ARGS... args) {
platform::dynload::cblas_sgemm_batch(args...);
}

#ifdef PADDLE_WITH_MKLML
template <typename... ARGS>
static void VADD(ARGS... args) {
vsAdd(args...);
platform::dynload::vsAdd(args...);
}
};

template <>
struct CBlas<double> {
template <typename... ARGS>
static void GEMM(ARGS... args) {
platform::dynload::cblas_dgemm(args...);
}

template <typename... ARGS>
static void AXPY(ARGS... args) {
platform::dynload::cblas_daxpy(args...);
}
#endif

template <typename... ARGS>
static void VCOPY(ARGS... args) {
cblas_scopy(args...);
platform::dynload::cblas_dcopy(args...);
}

template <typename... ARGS>
static void GEMV(ARGS... args) {
cblas_sgemv(args...);
platform::dynload::cblas_dgemv(args...);
}

#ifdef PADDLE_WITH_MKLML
template <typename... ARGS>
static void GEMM_BATCH(ARGS... args) {
cblas_sgemm_batch(args...);
platform::dynload::cblas_dgemm_batch(args...);
}

template <typename... ARGS>
static void VADD(ARGS... args) {
platform::dynload::vdAdd(args...);
}
#endif
};

#else

template <>
struct CBlas<double> {
struct CBlas<float> {
template <typename... ARGS>
static void GEMM(ARGS... args) {
cblas_dgemm(args...);
cblas_sgemm(args...);
}

template <typename... ARGS>
static void AXPY(ARGS... args) {
cblas_daxpy(args...);
cblas_saxpy(args...);
}

#ifdef PADDLE_WITH_MKLML
template <typename... ARGS>
static void VADD(ARGS... args) {
vdAdd(args...);
static void VCOPY(ARGS... args) {
cblas_scopy(args...);
}

template <typename... ARGS>
static void GEMV(ARGS... args) {
cblas_sgemv(args...);
}
};

template <>
struct CBlas<double> {
template <typename... ARGS>
static void GEMM(ARGS... args) {
cblas_dgemm(args...);
}

template <typename... ARGS>
static void AXPY(ARGS... args) {
cblas_daxpy(args...);
}
#endif

template <typename... ARGS>
static void VCOPY(ARGS... args) {
Expand All @@ -87,15 +135,8 @@ struct CBlas<double> {
static void GEMV(ARGS... args) {
cblas_dgemv(args...);
}

#ifdef PADDLE_WITH_MKLML
template <typename... ARGS>
static void GEMM_BATCH(ARGS... args) {
cblas_dgemm_batch(args...);
}
#endif
};

#endif
template <>
struct CBlas<platform::float16> {
static void GEMM(...) { PADDLE_THROW("float16 GEMM not supported on CPU"); }
Expand Down
4 changes: 1 addition & 3 deletions paddle/fluid/operators/math/math_function.h
Original file line number Diff line number Diff line change
Expand Up @@ -14,9 +14,7 @@ limitations under the License. */

#pragma once
#ifdef PADDLE_WITH_MKLML
#include <mkl_cblas.h>
#include <mkl_lapacke.h>
#include <mkl_vml_functions.h>
#include "paddle/fluid/platform/dynload/mklml.h"
#endif

#ifdef PADDLE_USE_OPENBLAS
Expand Down
4 changes: 4 additions & 0 deletions paddle/fluid/platform/dynload/CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -12,3 +12,7 @@ if (CUPTI_FOUND)
endif(CUPTI_FOUND)
nv_library(dynload_cuda SRCS ${CUDA_SRCS} DEPS dynamic_loader)
cc_library(dynload_warpctc SRCS warpctc.cc DEPS dynamic_loader warpctc)
if (WITH_MKLML)
cc_library(dynload_mklml SRCS mklml.cc DEPS dynamic_loader mklml)
endif()
# TODO(TJ): add iomp, mkldnn?
15 changes: 15 additions & 0 deletions paddle/fluid/platform/dynload/dynamic_loader.cc
Original file line number Diff line number Diff line change
Expand Up @@ -49,6 +49,8 @@ DEFINE_string(
tensorrt_dir, "",
"Specify path for loading tensorrt library, such as libnvinfer.so.");

DEFINE_string(mklml_dir, "", "Specify path for loading libmklml_intel.so.");

namespace paddle {
namespace platform {
namespace dynload {
Expand Down Expand Up @@ -76,6 +78,7 @@ static inline void* GetDsoHandleFromDefaultPath(const std::string& dso_path,
VLOG(3) << "Try to find library: " << dso_path
<< " from default system path.";
// default search from LD_LIBRARY_PATH/DYLD_LIBRARY_PATH
// and /usr/local/lib path
void* dso_handle = dlopen(dso_path.c_str(), dynload_flags);

// DYLD_LIBRARY_PATH is disabled after Mac OS 10.11 to
Expand All @@ -97,6 +100,10 @@ static inline void* GetDsoHandleFromDefaultPath(const std::string& dso_path,
}
#endif

if (nullptr == dso_handle) {
LOG(WARNING) << "Can not find library: " << dso_path
<< ". Please try to add the lib path to LD_LIBRARY_PATH.";
}
return dso_handle;
}

Expand Down Expand Up @@ -206,6 +213,14 @@ void* GetTensorRtDsoHandle() {
#endif
}

void* GetMKLMLDsoHandle() {
#if defined(__APPLE__) || defined(__OSX__)
return GetDsoHandleFromSearchPath(FLAGS_mklml_dir, "libmklml_intel.dylib");
#else
return GetDsoHandleFromSearchPath(FLAGS_mklml_dir, "libmklml_intel.so");
#endif
}

} // namespace dynload
} // namespace platform
} // namespace paddle
1 change: 1 addition & 0 deletions paddle/fluid/platform/dynload/dynamic_loader.h
Original file line number Diff line number Diff line change
Expand Up @@ -26,6 +26,7 @@ void* GetWarpCTCDsoHandle();
void* GetLapackDsoHandle();
void* GetNCCLDsoHandle();
void* GetTensorRtDsoHandle();
void* GetMKLMLDsoHandle();

} // namespace dynload
} // namespace platform
Expand Down
30 changes: 30 additions & 0 deletions paddle/fluid/platform/dynload/mklml.cc
Original file line number Diff line number Diff line change
@@ -0,0 +1,30 @@
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved.

Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at

http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */

#include "paddle/fluid/platform/dynload/mklml.h"

namespace paddle {
namespace platform {
namespace dynload {

std::once_flag mklml_dso_flag;
void* mklml_dso_handle = nullptr;

#define DEFINE_WRAP(__name) DynLoad__##__name __name

MKLML_ROUTINE_EACH(DEFINE_WRAP);

} // namespace dynload
} // namespace platform
} // namespace paddle
71 changes: 71 additions & 0 deletions paddle/fluid/platform/dynload/mklml.h
Original file line number Diff line number Diff line change
@@ -0,0 +1,71 @@
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved.

Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at

http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */

#pragma once

#include <dlfcn.h>
#include <mkl.h>
#include <mutex> // NOLINT
#include "paddle/fluid/platform/dynload/dynamic_loader.h"

namespace paddle {
namespace platform {
namespace dynload {

extern std::once_flag mklml_dso_flag;
extern void* mklml_dso_handle;

/**
* The following macro definition can generate structs
* (for each function) to dynamic load mklml routine
* via operator overloading.
*/
#define DYNAMIC_LOAD_MKLML_WRAP(__name) \
struct DynLoad__##__name { \
template <typename... Args> \
auto operator()(Args... args) -> decltype(__name(args...)) { \
using mklmlFunc = decltype(&::__name); \
std::call_once(mklml_dso_flag, []() { \
mklml_dso_handle = paddle::platform::dynload::GetMKLMLDsoHandle(); \
}); \
static void* p_##_name = dlsym(mklml_dso_handle, #__name); \
return reinterpret_cast<mklmlFunc>(p_##_name)(args...); \
} \
}; \
extern DynLoad__##__name __name

#define DECLARE_DYNAMIC_LOAD_MKLML_WRAP(__name) DYNAMIC_LOAD_MKLML_WRAP(__name)

#define MKLML_ROUTINE_EACH(__macro) \
__macro(cblas_sgemm); \
__macro(cblas_saxpy); \
__macro(cblas_scopy); \
__macro(cblas_sgemv); \
__macro(cblas_sgemm_batch); \
__macro(cblas_dgemm); \
__macro(cblas_daxpy); \
__macro(cblas_dcopy); \
__macro(cblas_dgemv); \
__macro(cblas_dgemm_batch); \
__macro(vsAdd); \
__macro(vdAdd); \
__macro(MKL_Set_Num_Threads)

MKLML_ROUTINE_EACH(DECLARE_DYNAMIC_LOAD_MKLML_WRAP);

#undef DYNAMIC_LOAD_MKLML_WRAP

} // namespace dynload
} // namespace platform
} // namespace paddle