From fbb1b0e4647dfd974fa5bef4670d2d4411f81c37 Mon Sep 17 00:00:00 2001 From: Yu Yang Date: Tue, 3 Jan 2017 12:49:05 +0800 Subject: [PATCH 01/43] Start Doing C-API for predict. --- paddle/CMakeLists.txt | 2 +- paddle/capi/Arguments.cpp | 49 +++++++++++++++ paddle/capi/CMakeLists.txt | 47 ++++++++++++++ paddle/capi/Matrix.cpp | 61 +++++++++++++++++++ paddle/capi/PaddleCAPI.h | 54 ++++++++++++++++ paddle/capi/PaddleCAPIPrivate.h | 27 ++++++++ paddle/capi/Vector.cpp | 26 ++++++++ paddle/capi/config.h.in | 6 ++ paddle/capi/tests/CMakeLists.txt | 15 +++++ paddle/capi/tests/test_Arguments.cpp | 54 ++++++++++++++++ paddle/capi/tests/test_Matrix.cpp | 33 ++++++++++ paddle/capi/tests/test_Vector.cpp | 11 ++++ paddle/utils/ForceLink.h | 46 ++++++++++++++ paddle/utils/tests/CMakeLists.txt | 8 +++ paddle/utils/tests/test_ClassRegistrar.cpp | 27 ++++++++ .../tests/test_ClassRegistrarGlobals.cpp | 16 +++++ paddle/utils/tests/test_ClassRegistrarLib.cpp | 31 ++++++++++ paddle/utils/tests/test_ClassRegistrarLib.h | 23 +++++++ 18 files changed, 535 insertions(+), 1 deletion(-) create mode 100644 paddle/capi/Arguments.cpp create mode 100644 paddle/capi/CMakeLists.txt create mode 100644 paddle/capi/Matrix.cpp create mode 100644 paddle/capi/PaddleCAPI.h create mode 100644 paddle/capi/PaddleCAPIPrivate.h create mode 100644 paddle/capi/Vector.cpp create mode 100644 paddle/capi/config.h.in create mode 100644 paddle/capi/tests/CMakeLists.txt create mode 100644 paddle/capi/tests/test_Arguments.cpp create mode 100644 paddle/capi/tests/test_Matrix.cpp create mode 100644 paddle/capi/tests/test_Vector.cpp create mode 100644 paddle/utils/ForceLink.h create mode 100644 paddle/utils/tests/test_ClassRegistrar.cpp create mode 100644 paddle/utils/tests/test_ClassRegistrarGlobals.cpp create mode 100644 paddle/utils/tests/test_ClassRegistrarLib.cpp create mode 100644 paddle/utils/tests/test_ClassRegistrarLib.h diff --git a/paddle/CMakeLists.txt b/paddle/CMakeLists.txt index 503024cff338d..b3f3b2fbcedd7 100644 --- a/paddle/CMakeLists.txt +++ b/paddle/CMakeLists.txt @@ -8,7 +8,7 @@ add_subdirectory(gserver) add_subdirectory(pserver) add_subdirectory(trainer) add_subdirectory(scripts) - +add_subdirectory(capi) configure_file(${CMAKE_CURRENT_SOURCE_DIR}/setup.py.in ${CMAKE_CURRENT_SOURCE_DIR}/setup.py) diff --git a/paddle/capi/Arguments.cpp b/paddle/capi/Arguments.cpp new file mode 100644 index 0000000000000..cf773a65872fc --- /dev/null +++ b/paddle/capi/Arguments.cpp @@ -0,0 +1,49 @@ +#include "PaddleCAPI.h" +#include "PaddleCAPIPrivate.h" + +#define cast(v) paddle::capi::cast(v) + +extern "C" { +int PDArgsCreateNone(PD_Arguments* args) { + auto ptr = new paddle::capi::CArguments(); + *args = ptr; + return PD_NO_ERROR; +} + +int PDArgsDestroy(PD_Arguments args) { + if (args == nullptr) return PD_NULLPTR; + delete cast(args); + return PD_NO_ERROR; +} + +int PDArgsGetSize(PD_Arguments args, uint64_t* size) { + if (args == nullptr || size == nullptr) return PD_NULLPTR; + *size = cast(args)->args.size(); + return PD_NO_ERROR; +} + +int PDArgsResize(PD_Arguments args, uint64_t size) { + if (args == nullptr) return PD_NULLPTR; + cast(args)->args.resize(size); + return PD_NO_ERROR; +} + +int PDArgsSetValue(PD_Arguments args, uint64_t ID, PD_Matrix mat) { + if (args == nullptr || mat == nullptr) return PD_NULLPTR; + auto m = paddle::capi::cast(mat); + if (m->mat == nullptr) return PD_NULLPTR; + auto a = cast(args); + if (ID >= a->args.size()) return PD_OUT_OF_RANGE; + a->args[ID].value = m->mat; + return PD_NO_ERROR; +} + +int PDArgsGetValue(PD_Arguments args, uint64_t ID, PD_Matrix mat) { + if (args == nullptr || mat == nullptr) return PD_NULLPTR; + auto m = paddle::capi::cast(mat); + auto a = cast(args); + if (ID >= a->args.size()) return PD_OUT_OF_RANGE; + m->mat = a->args[ID].value; + return PD_NO_ERROR; +} +} diff --git a/paddle/capi/CMakeLists.txt b/paddle/capi/CMakeLists.txt new file mode 100644 index 0000000000000..6162267dab7cd --- /dev/null +++ b/paddle/capi/CMakeLists.txt @@ -0,0 +1,47 @@ +if (WITH_DOUBLE) + set(PADDLE_FLOAT_TYPE double) +else () + set(PADDLE_FLOAT_TYPE float) +endif() + +configure_file(config.h.in config.h @ONLY) + +set(CAPI_HEADER + PaddleCAPI.h) +set(CAPI_PRIVATE_HEADER + PaddleCAPIPrivate.h) +file(GLOB CAPI_SOURCES *.cpp) + +add_library(paddle_capi SHARED ${CAPI_SOURCES}) + +target_include_directories(paddle_capi PUBLIC ${CMAKE_CURRENT_BINARY_DIR}) +add_dependencies(paddle_capi gen_proto_cpp) + +target_link_libraries(paddle_capi + paddle_gserver + paddle_function + paddle_pserver + paddle_trainer_lib + paddle_network + paddle_math + paddle_utils + paddle_parameter + paddle_proto + paddle_cuda + ${PROTOBUF_LIBRARY} + ${LIBGLOG_LIBRARY} + ${GFLAGS_LIBRARIES} + ${CMAKE_THREAD_LIBS_INIT} + ${CBLAS_LIBS} + ${ZLIB_LIBRARIES} + ${INTERAL_LIBS} + ${CMAKE_DL_LIBS}) + + +set(PADDLE_CAPI_INC_PATH + ${CMAKE_CURRENT_BINARY_DIR} + ${CMAKE_CURRENT_SOURCE_DIR}) + +if (WITH_TESTING) + add_subdirectory(tests) +endif() diff --git a/paddle/capi/Matrix.cpp b/paddle/capi/Matrix.cpp new file mode 100644 index 0000000000000..71598b1714d19 --- /dev/null +++ b/paddle/capi/Matrix.cpp @@ -0,0 +1,61 @@ +#include "PaddleCAPI.h" +#include "PaddleCAPIPrivate.h" +#include "hl_cuda.h" + +#define cast(v) paddle::capi::cast(v) +extern "C" { +int PDMatCreate(PD_Matrix* mat, uint64_t height, uint64_t width, bool useGpu) { + auto ptr = new paddle::capi::CMatrix(); + ptr->mat = paddle::Matrix::create(height, width, false, useGpu); + *mat = ptr; + return PD_NO_ERROR; +} + +int PDMatCreateNone(PD_Matrix* mat) { + auto ptr = new paddle::capi::CMatrix(); + *mat = ptr; + return PD_NO_ERROR; +} + +int PDMatDestroy(PD_Matrix mat) { + if (mat == nullptr) return PD_NULLPTR; + auto ptr = cast(mat); + delete ptr; + return PD_NO_ERROR; +} + +int PDMatCopyToRow(PD_Matrix mat, uint64_t rowID, pd_real* rowArray) { + if (mat == nullptr) return PD_NULLPTR; + auto ptr = cast(mat); + if (ptr->mat == nullptr) return PD_NULLPTR; + if (rowID >= ptr->mat->getHeight()) return PD_OUT_OF_RANGE; + paddle::real* buf = ptr->mat->getRowBuf(rowID); + size_t width = ptr->mat->getWidth(); +#ifndef PADDLE_ONLY_CPU + hl_memcpy(buf, rowArray, sizeof(paddle::real) * width); +#else + std::copy(rowArray, rowArray + width, buf); +#endif + return PD_NO_ERROR; +} + +int PDMatGetRow(PD_Matrix mat, uint64_t rowID, pd_real** rawRowBuffer) { + if (mat == nullptr) return PD_NULLPTR; + auto ptr = cast(mat); + if (ptr->mat == nullptr) return PD_NULLPTR; + if (rowID >= ptr->mat->getHeight()) return PD_OUT_OF_RANGE; + *rawRowBuffer = ptr->mat->getRowBuf(rowID); + return PD_NO_ERROR; +} + +int PDMatGetShape(PD_Matrix mat, uint64_t* height, uint64_t* width) { + if (mat == nullptr) return PD_NULLPTR; + if (height != nullptr) { + *height = cast(mat)->mat->getHeight(); + } + if (width != nullptr) { + *width = cast(mat)->mat->getWidth(); + } + return PD_NO_ERROR; +} +} diff --git a/paddle/capi/PaddleCAPI.h b/paddle/capi/PaddleCAPI.h new file mode 100644 index 0000000000000..2eff0bc7da25c --- /dev/null +++ b/paddle/capi/PaddleCAPI.h @@ -0,0 +1,54 @@ +#ifndef __PADDLE_PADDLE_CAPI_PADDLECAPI_H_INCLUDED__ +#define __PADDLE_PADDLE_CAPI_PADDLECAPI_H_INCLUDED__ +#include +#include +#include "config.h" +#ifdef __cplusplus +extern "C" { +#endif + +#define PD_NO_ERROR 0 +#define PD_NULLPTR 1 +#define PD_OUT_OF_RANGE 2 +#define PD_UNDEFINED_ERROR -1 + +typedef void* PD_Vector; + +int PDVecCreate(PD_Vector* vec, uint64_t size, bool useGpu); + +int PDVecDestroy(PD_Vector vec); + +int PDVecIsSparse(PD_Vector vec, bool* isSparse); + +typedef void* PD_Matrix; + +int PDMatCreate(PD_Matrix* mat, uint64_t height, uint64_t width, bool useGpu); + +int PDMatDestroy(PD_Matrix mat); + +int PDMatCopyToRow(PD_Matrix mat, uint64_t rowID, pd_real* rowArray); + +int PDMatGetRow(PD_Matrix mat, uint64_t rowID, pd_real** rawRowBuffer); + +int PDMatCreateNone(PD_Matrix* mat); + +int PDMatGetShape(PD_Matrix mat, uint64_t* height, uint64_t* width); + +typedef void* PD_Arguments; + +int PDArgsCreateNone(PD_Arguments* args); + +int PDArgsDestroy(PD_Arguments args); + +int PDArgsGetSize(PD_Arguments args, uint64_t* size); + +int PDArgsResize(PD_Arguments args, uint64_t size); + +int PDArgsSetValue(PD_Arguments args, uint64_t ID, PD_Matrix mat); + +int PDArgsGetValue(PD_Arguments args, uint64_t ID, PD_Matrix mat); + +#ifdef __cplusplus +} +#endif +#endif diff --git a/paddle/capi/PaddleCAPIPrivate.h b/paddle/capi/PaddleCAPIPrivate.h new file mode 100644 index 0000000000000..efec60fbb68ec --- /dev/null +++ b/paddle/capi/PaddleCAPIPrivate.h @@ -0,0 +1,27 @@ +#include "PaddleCAPI.h" +#include "paddle/math/Matrix.h" +#include "paddle/math/Vector.h" +#include "paddle/parameter/Argument.h" +#pragma once + +namespace paddle { +namespace capi { + +struct CVector { + VectorPtr vec; +}; + +struct CMatrix { + MatrixPtr mat; +}; + +struct CArguments { + std::vector args; +}; + +template +inline T* cast(void* ptr) { + return reinterpret_cast(ptr); +} +} +} diff --git a/paddle/capi/Vector.cpp b/paddle/capi/Vector.cpp new file mode 100644 index 0000000000000..10dee7816c376 --- /dev/null +++ b/paddle/capi/Vector.cpp @@ -0,0 +1,26 @@ +#include "PaddleCAPI.h" +#include "PaddleCAPIPrivate.h" + +#define cast(v) paddle::capi::cast(v) +extern "C" { +int PDVecCreate(PD_Vector* vec, uint64_t size, bool useGpu) { + auto ptr = new paddle::capi::CVector(); + ptr->vec = paddle::Vector::create(size, useGpu); + *vec = ptr; + return PD_NO_ERROR; +} +int PDVecDestroy(PD_Vector vec) { + auto v = cast(vec); + v->vec.reset(); + delete v; + return PD_NO_ERROR; +} + +int PDVecIsSparse(PD_Vector vec, bool* isSparse) { + if (isSparse == nullptr || vec == nullptr) { + return PD_NULLPTR; + } + *isSparse = cast(vec)->vec->isSparse(); + return PD_NO_ERROR; +} +} diff --git a/paddle/capi/config.h.in b/paddle/capi/config.h.in new file mode 100644 index 0000000000000..32d8a364e0eaa --- /dev/null +++ b/paddle/capi/config.h.in @@ -0,0 +1,6 @@ +#ifndef __PADDLE_PADDLE_CAPI_CONFIG_H_INCLUDED__ +#define __PADDLE_PADDLE_CAPI_CONFIG_H_INCLUDED__ + +typedef @PADDLE_FLOAT_TYPE@ pd_real; + +#endif diff --git a/paddle/capi/tests/CMakeLists.txt b/paddle/capi/tests/CMakeLists.txt new file mode 100644 index 0000000000000..cd6b1d7c6271b --- /dev/null +++ b/paddle/capi/tests/CMakeLists.txt @@ -0,0 +1,15 @@ +function(add_capi_unittest TARGET_NAME) + add_executable( + ${TARGET_NAME} + ${ARGN}) + target_link_libraries( + ${TARGET_NAME} + paddle_capi + paddle_test_main + ${GTEST_LIBRARIES}) + target_include_directories(${TARGET_NAME} PUBLIC ${PADDLE_CAPI_INC_PATH}) + add_test(NAME ${TARGET_NAME} COMMAND ${TARGET_NAME}) +endfunction() + +add_capi_unittest(capi_test_mats test_Vector.cpp + test_Matrix.cpp test_Arguments.cpp) diff --git a/paddle/capi/tests/test_Arguments.cpp b/paddle/capi/tests/test_Arguments.cpp new file mode 100644 index 0000000000000..c74abd60d1b10 --- /dev/null +++ b/paddle/capi/tests/test_Arguments.cpp @@ -0,0 +1,54 @@ +#include "PaddleCAPI.h" +#include "gtest/gtest.h" +#include "paddle/utils/ThreadLocal.h" + +static std::vector randomBuffer(size_t bufSize) { + auto& eng = paddle::ThreadLocalRandomEngine::get(); + std::uniform_real_distribution dist(-1.0, 1.0); + std::vector retv; + retv.reserve(bufSize); + for (size_t i = 0; i < bufSize; ++i) { + retv.push_back(dist(eng)); + } + return retv; +} + +TEST(CAPIArguments, create) { + PD_Arguments args; + ASSERT_EQ(PD_NO_ERROR, PDArgsCreateNone(&args)); + uint64_t size; + ASSERT_EQ(PD_NO_ERROR, PDArgsGetSize(args, &size)); + ASSERT_EQ(0UL, size); + ASSERT_EQ(PD_NO_ERROR, PDArgsDestroy(args)); +} + +TEST(CAPIArguments, value) { + PD_Arguments args; + ASSERT_EQ(PD_NO_ERROR, PDArgsCreateNone(&args)); + ASSERT_EQ(PD_NO_ERROR, PDArgsResize(args, 1)); + + PD_Matrix mat; + ASSERT_EQ(PD_NO_ERROR, PDMatCreate(&mat, 128, 64, false)); + for (size_t i = 0; i < 128; ++i) { + std::vector sampleBuf = randomBuffer(64); + PDMatCopyToRow(mat, i, sampleBuf.data()); + } + ASSERT_EQ(PD_NO_ERROR, PDArgsSetValue(args, 0, mat)); + + PD_Matrix val; + ASSERT_EQ(PD_NO_ERROR, PDMatCreateNone(&val)); + + ASSERT_EQ(PD_NO_ERROR, PDArgsGetValue(args, 0, val)); + + for (size_t i = 0; i < 128; ++i) { + pd_real* row1; + pd_real* row2; + + ASSERT_EQ(PD_NO_ERROR, PDMatGetRow(mat, i, &row1)); + ASSERT_EQ(PD_NO_ERROR, PDMatGetRow(val, i, &row2)); + ASSERT_EQ(row1, row2); + } + ASSERT_EQ(PD_NO_ERROR, PDMatDestroy(val)); + ASSERT_EQ(PD_NO_ERROR, PDMatDestroy(mat)); + ASSERT_EQ(PD_NO_ERROR, PDArgsDestroy(args)); +} diff --git a/paddle/capi/tests/test_Matrix.cpp b/paddle/capi/tests/test_Matrix.cpp new file mode 100644 index 0000000000000..0f04a4683049a --- /dev/null +++ b/paddle/capi/tests/test_Matrix.cpp @@ -0,0 +1,33 @@ +#include "PaddleCAPI.h" +#include "gtest/gtest.h" + +TEST(CAPIMatrix, create) { + PD_Matrix mat; + ASSERT_EQ(PD_NO_ERROR, PDMatCreate(&mat, 128, 32, false)); + std::vector sampleRow; + sampleRow.resize(32); + for (size_t i = 0; i < sampleRow.size(); ++i) { + sampleRow[i] = 1.0 / (i + 1.0); + } + ASSERT_EQ(PD_NO_ERROR, PDMatCopyToRow(mat, 0, sampleRow.data())); + ASSERT_EQ(PD_OUT_OF_RANGE, PDMatCopyToRow(mat, 128, sampleRow.data())); + + pd_real* arrayPtr; + + ASSERT_EQ(PD_NO_ERROR, PDMatGetRow(mat, 0, &arrayPtr)); + for (size_t i = 0; i < sampleRow.size(); ++i) { + ASSERT_NEAR(sampleRow[i], arrayPtr[i], 1e-5); + } + + uint64_t height, width; + ASSERT_EQ(PD_NO_ERROR, PDMatGetShape(mat, &height, &width)); + ASSERT_EQ(128, height); + ASSERT_EQ(32, width); + ASSERT_EQ(PD_NO_ERROR, PDMatDestroy(mat)); +} + +TEST(CAPIMatrix, createNone) { + PD_Matrix mat; + ASSERT_EQ(PD_NO_ERROR, PDMatCreateNone(&mat)); + ASSERT_EQ(PD_NO_ERROR, PDMatDestroy(mat)); +} diff --git a/paddle/capi/tests/test_Vector.cpp b/paddle/capi/tests/test_Vector.cpp new file mode 100644 index 0000000000000..dbb987d440a84 --- /dev/null +++ b/paddle/capi/tests/test_Vector.cpp @@ -0,0 +1,11 @@ +#include "PaddleCAPI.h" +#include "gtest/gtest.h" + +TEST(CAPIVector, create) { + PD_Vector tmp; + ASSERT_EQ(PD_NO_ERROR, PDVecCreate(&tmp, 128, false)); + bool isSparse; + ASSERT_EQ(PD_NO_ERROR, PDVecIsSparse(tmp, &isSparse)); + ASSERT_FALSE(isSparse); + ASSERT_EQ(PD_NO_ERROR, PDVecDestroy(tmp)); +} diff --git a/paddle/utils/ForceLink.h b/paddle/utils/ForceLink.h new file mode 100644 index 0000000000000..66005e2992e48 --- /dev/null +++ b/paddle/utils/ForceLink.h @@ -0,0 +1,46 @@ +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#pragma once + +/// Declare a force link file ID. It can be enabled by +/// `PADDLE_ENABLE_FORCE_LINK_FILE`. It is +/// +/// Example: +/// +/// In some_file.cpp +/// @code{cpp} +/// static paddle::InitFunction init([]{...}); +/// PADDLE_REGISTER_FORCE_LINK_FILE(some_file) +/// @endcode{cpp} +/// +/// In main.cpp +/// @code{cpp} +/// PADDLE_ENABLE_FORCE_LINK_FILE(some_file); +/// +/// int main() { +/// ... +/// } +/// @endcode{cpp} +/// +/// Then the InitFunction in some_file.cpp can be invoked. +#define PADDLE_REGISTER_FORCE_LINK_FILE(ID) \ + int __paddle_register_force_link_file_##ID##_method__() { return 0; } + +/// Enable a force link file. The file with ID's static variables could +/// be all initialized. +#define PADDLE_ENABLE_FORCE_LINK_FILE(ID) \ + extern int __paddle_register_force_link_file_##ID##_method__(); \ + static int __paddle_register_force_link_file_##ID##_handler__ = \ + __paddle_register_force_link_file_##ID##_method__(); diff --git a/paddle/utils/tests/CMakeLists.txt b/paddle/utils/tests/CMakeLists.txt index 26fafbd1ab3f2..d9b018ebbb27e 100644 --- a/paddle/utils/tests/CMakeLists.txt +++ b/paddle/utils/tests/CMakeLists.txt @@ -15,3 +15,11 @@ if(NOT APPLE) COMMAND ${PROJ_ROOT}/paddle/utils/tests/test_CustomStackTracePrint.sh WORKING_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}) endif() + +add_library(test_class_registrar_lib STATIC + test_ClassRegistrarLib.cpp + test_ClassRegistrarGlobals.cpp) + +add_simple_unittest(test_ClassRegistrar) +target_link_libraries(test_ClassRegistrar + test_class_registrar_lib) diff --git a/paddle/utils/tests/test_ClassRegistrar.cpp b/paddle/utils/tests/test_ClassRegistrar.cpp new file mode 100644 index 0000000000000..c867045cb68b5 --- /dev/null +++ b/paddle/utils/tests/test_ClassRegistrar.cpp @@ -0,0 +1,27 @@ +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#include +#include +#include "test_ClassRegistrarLib.h" +// Enable link test_ClassRegistrarLib.cpp +PADDLE_ENABLE_FORCE_LINK_FILE(test_registrar); + +TEST(ClassRegistrar, test) { + std::vector types; + gTestRegistrar_.forEachType( + [&types](const std::string& tp) { types.push_back(tp); }); + ASSERT_EQ(1, types.size()); + ASSERT_EQ("test", types[0]); +} diff --git a/paddle/utils/tests/test_ClassRegistrarGlobals.cpp b/paddle/utils/tests/test_ClassRegistrarGlobals.cpp new file mode 100644 index 0000000000000..0f36da137f9b9 --- /dev/null +++ b/paddle/utils/tests/test_ClassRegistrarGlobals.cpp @@ -0,0 +1,16 @@ +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#include "test_ClassRegistrarLib.h" +paddle::ClassRegistrar gTestRegistrar_; diff --git a/paddle/utils/tests/test_ClassRegistrarLib.cpp b/paddle/utils/tests/test_ClassRegistrarLib.cpp new file mode 100644 index 0000000000000..27071579f940a --- /dev/null +++ b/paddle/utils/tests/test_ClassRegistrarLib.cpp @@ -0,0 +1,31 @@ +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#include "test_ClassRegistrarLib.h" +#include +BaseClass::~BaseClass() {} + +class TestRegistrar : public BaseClass { +public: + TestRegistrar() {} + + virtual ~TestRegistrar() {} +}; + +static paddle::InitFunction init([] { + gTestRegistrar_.registerClass( + "test", []() -> BaseClass* { return new TestRegistrar(); }); +}); + +PADDLE_REGISTER_FORCE_LINK_FILE(test_registrar); diff --git a/paddle/utils/tests/test_ClassRegistrarLib.h b/paddle/utils/tests/test_ClassRegistrarLib.h new file mode 100644 index 0000000000000..de2d02e70cc7b --- /dev/null +++ b/paddle/utils/tests/test_ClassRegistrarLib.h @@ -0,0 +1,23 @@ +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#pragma once +#include "paddle/utils/ClassRegistrar.h" + +class BaseClass { +public: + virtual ~BaseClass(); +}; + +extern paddle::ClassRegistrar gTestRegistrar_; From aa6e25215e906d1ec0fc501849ca19e20abd0a0d Mon Sep 17 00:00:00 2001 From: Yu Yang Date: Wed, 4 Jan 2017 18:02:45 +0800 Subject: [PATCH 02/43] Doing C-API --- paddle/capi/CMakeLists.txt | 3 +- paddle/capi/GradientMachine.cpp | 48 +++++++++++++++++++ paddle/capi/Main.cpp | 43 +++++++++++++++++ paddle/capi/PaddleCAPI.h | 25 ++++++++-- paddle/capi/PaddleCAPIPrivate.h | 5 ++ paddle/capi/tests/CMakeLists.txt | 29 +++++++++-- paddle/capi/tests/test_GradientMachine.cpp | 24 ++++++++++ paddle/capi/tests/test_Init.cpp | 0 paddle/capi/tests/vgg_16_cifar.py | 1 + paddle/utils/ForceLink.h | 46 ------------------ paddle/utils/tests/CMakeLists.txt | 8 ---- paddle/utils/tests/test_ClassRegistrar.cpp | 27 ----------- .../tests/test_ClassRegistrarGlobals.cpp | 16 ------- paddle/utils/tests/test_ClassRegistrarLib.cpp | 31 ------------ paddle/utils/tests/test_ClassRegistrarLib.h | 23 --------- 15 files changed, 169 insertions(+), 160 deletions(-) create mode 100644 paddle/capi/GradientMachine.cpp create mode 100644 paddle/capi/Main.cpp create mode 100644 paddle/capi/tests/test_GradientMachine.cpp create mode 100644 paddle/capi/tests/test_Init.cpp create mode 120000 paddle/capi/tests/vgg_16_cifar.py delete mode 100644 paddle/utils/ForceLink.h delete mode 100644 paddle/utils/tests/test_ClassRegistrar.cpp delete mode 100644 paddle/utils/tests/test_ClassRegistrarGlobals.cpp delete mode 100644 paddle/utils/tests/test_ClassRegistrarLib.cpp delete mode 100644 paddle/utils/tests/test_ClassRegistrarLib.h diff --git a/paddle/capi/CMakeLists.txt b/paddle/capi/CMakeLists.txt index 6162267dab7cd..62e9e5ccef379 100644 --- a/paddle/capi/CMakeLists.txt +++ b/paddle/capi/CMakeLists.txt @@ -35,7 +35,8 @@ target_link_libraries(paddle_capi ${CBLAS_LIBS} ${ZLIB_LIBRARIES} ${INTERAL_LIBS} - ${CMAKE_DL_LIBS}) + ${CMAKE_DL_LIBS} + ${PYTHON_LIBRARIES}) set(PADDLE_CAPI_INC_PATH diff --git a/paddle/capi/GradientMachine.cpp b/paddle/capi/GradientMachine.cpp new file mode 100644 index 0000000000000..2969b5f198f76 --- /dev/null +++ b/paddle/capi/GradientMachine.cpp @@ -0,0 +1,48 @@ +#include "PaddleCAPI.h" +#include "PaddleCAPIPrivate.h" +#include "paddle/gserver/gradientmachines/NeuralNetwork.h" + +#define cast(v) paddle::capi::cast(v) + +enum GradientMatchineCreateMode { + CREATE_MODE_NORMAL = 0, + CREATE_MODE_TESTING = 4 +}; + +namespace paddle { + +class MyNeuralNetwork : public NeuralNetwork { +public: + MyNeuralNetwork(const std::string& name, NeuralNetwork* network) + : NeuralNetwork(name, network) {} +}; + +NeuralNetwork* newCustomNerualNetwork(const std::string& name, + NeuralNetwork* network) { + return new MyNeuralNetwork(name, network); +} +} + +extern "C" { +int PDGradientMachineCreateForPredict(PD_GradiemtMachine* machine, + void* modelConfigProtobuf, + int size) { + if (modelConfigProtobuf == nullptr) return PD_NULLPTR; + paddle::ModelConfig config; + if (!config.ParseFromArray(modelConfigProtobuf, size) || + !config.IsInitialized()) { + return PD_PROTOBUF_ERROR; + } + + auto ptr = new paddle::capi::CGradientMachine(); + ptr->machine.reset(paddle::GradientMachine::create( + config, CREATE_MODE_TESTING, {paddle::PARAMETER_VALUE})); + *machine = ptr; + return PD_NO_ERROR; +} + +int PDGradientMachineDestroy(PD_GradiemtMachine machine) { + delete cast(machine); + return PD_NO_ERROR; +} +} diff --git a/paddle/capi/Main.cpp b/paddle/capi/Main.cpp new file mode 100644 index 0000000000000..49606e1f942b7 --- /dev/null +++ b/paddle/capi/Main.cpp @@ -0,0 +1,43 @@ +#include +#include +#include +#include +#include "PaddleCAPI.h" +#include "PaddleCAPIPrivate.h" +#include "paddle/trainer/TrainerConfigHelper.h" +#include "paddle/utils/Excepts.h" +#include "paddle/utils/PythonUtil.h" + +static void initPaddle(int argc, char** argv) { + paddle::initMain(argc, argv); + paddle::initPython(argc, argv); + feenableexcept(FE_INVALID | FE_DIVBYZERO | FE_OVERFLOW); +} + +extern "C" { +int PDInit(int argc, char** argv) { + std::vector realArgv; + realArgv.reserve(argc + 1); + realArgv.push_back(strdup("")); + for (int i = 0; i < argc; ++i) { + realArgv.push_back(argv[i]); + } + initPaddle(argc + 1, realArgv.data()); + free(realArgv[0]); + return PD_NO_ERROR; +} + +int PDParseTrainerConfigFromFile(char* filename, + void** modelConfigProtobuf, + int* size) { + if (filename == nullptr || modelConfigProtobuf == nullptr || size == nullptr) + return PD_NULLPTR; + paddle::TrainerConfigHelper conf(filename); + if (!conf.getConfig().IsInitialized()) return PD_PROTOBUF_ERROR; + *size = conf.getConfig().ByteSize(); + *modelConfigProtobuf = malloc(*size); + if (!conf.getConfig().SerializeToArray(*modelConfigProtobuf, *size)) + return PD_PROTOBUF_ERROR; + return PD_NO_ERROR; +} +} diff --git a/paddle/capi/PaddleCAPI.h b/paddle/capi/PaddleCAPI.h index 2eff0bc7da25c..fa43b3b40bbe5 100644 --- a/paddle/capi/PaddleCAPI.h +++ b/paddle/capi/PaddleCAPI.h @@ -7,10 +7,13 @@ extern "C" { #endif -#define PD_NO_ERROR 0 -#define PD_NULLPTR 1 -#define PD_OUT_OF_RANGE 2 -#define PD_UNDEFINED_ERROR -1 +typedef enum { + PD_NO_ERROR = 0, + PD_NULLPTR = 1, + PD_OUT_OF_RANGE = 2, + PD_PROTOBUF_ERROR = 3, + PD_UNDEFINED_ERROR = -1, +} PD_Error; typedef void* PD_Vector; @@ -48,6 +51,20 @@ int PDArgsSetValue(PD_Arguments args, uint64_t ID, PD_Matrix mat); int PDArgsGetValue(PD_Arguments args, uint64_t ID, PD_Matrix mat); +typedef void* PD_GradiemtMachine; + +int PDGradientMachineCreateForPredict(PD_GradiemtMachine* machine, + void* modelConfigProtobuf, + int size); + +int PDGradientMachineDestroy(PD_GradiemtMachine machine); + +int PDInit(int argc, char** argv); + +int PDParseTrainerConfigFromFile(char* filename, + void** modelConfigProtobuf, + int* size); + #ifdef __cplusplus } #endif diff --git a/paddle/capi/PaddleCAPIPrivate.h b/paddle/capi/PaddleCAPIPrivate.h index efec60fbb68ec..07e731f6cd0c2 100644 --- a/paddle/capi/PaddleCAPIPrivate.h +++ b/paddle/capi/PaddleCAPIPrivate.h @@ -1,4 +1,5 @@ #include "PaddleCAPI.h" +#include "paddle/gserver/gradientmachines/GradientMachine.h" #include "paddle/math/Matrix.h" #include "paddle/math/Vector.h" #include "paddle/parameter/Argument.h" @@ -19,6 +20,10 @@ struct CArguments { std::vector args; }; +struct CGradientMachine { + paddle::GradientMachinePtr machine; +}; + template inline T* cast(void* ptr) { return reinterpret_cast(ptr); diff --git a/paddle/capi/tests/CMakeLists.txt b/paddle/capi/tests/CMakeLists.txt index cd6b1d7c6271b..e1fa3d6b79139 100644 --- a/paddle/capi/tests/CMakeLists.txt +++ b/paddle/capi/tests/CMakeLists.txt @@ -1,15 +1,36 @@ -function(add_capi_unittest TARGET_NAME) +function(add_capi_unittest_without_exec TARGET_NAME) + set(with_test_main ON) + set(sources) + foreach(source_file ${ARGN}) + if (${source_file} STREQUAL "NO_MAIN") + set(with_test_main OFF) + else() + list(APPEND sources ${source_file}) + endif() + endforeach() + add_executable( ${TARGET_NAME} - ${ARGN}) + ${sources}) + + target_link_libraries( ${TARGET_NAME} paddle_capi - paddle_test_main ${GTEST_LIBRARIES}) + + if (with_test_main) + target_link_libraries( + ${TARGET_NAME} paddle_test_main) + endif() target_include_directories(${TARGET_NAME} PUBLIC ${PADDLE_CAPI_INC_PATH}) - add_test(NAME ${TARGET_NAME} COMMAND ${TARGET_NAME}) endfunction() +function(add_capi_unittest TARGET_NAME) + add_capi_unittest_without_exec(${TARGET_NAME} ${ARGN}) + add_test(NAME ${TARGET_NAME} COMMAND ${TARGET_NAME}) +endfunction() add_capi_unittest(capi_test_mats test_Vector.cpp test_Matrix.cpp test_Arguments.cpp) + +add_capi_unittest(capi_test_gradientMachine NO_MAIN test_GradientMachine.cpp) diff --git a/paddle/capi/tests/test_GradientMachine.cpp b/paddle/capi/tests/test_GradientMachine.cpp new file mode 100644 index 0000000000000..8c1ea73ae6cd6 --- /dev/null +++ b/paddle/capi/tests/test_GradientMachine.cpp @@ -0,0 +1,24 @@ +#include +#include +#include +#include "PaddleCAPI.h" + +TEST(GradientMachine, load) { + void* buf; + int size; + ASSERT_EQ( + PD_NO_ERROR, + PDParseTrainerConfigFromFile(strdup("./vgg_16_cifar.py"), &buf, &size)); + free(buf); +} + +int main(int argc, char** argv) { + testing::InitGoogleTest(&argc, argv); + std::vector argvs; + argvs.push_back(strdup("--use_gpu=false")); + PDInit((int)argvs.size(), argvs.data()); + for (auto each : argvs) { + free(each); + } + return RUN_ALL_TESTS(); +} diff --git a/paddle/capi/tests/test_Init.cpp b/paddle/capi/tests/test_Init.cpp new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/paddle/capi/tests/vgg_16_cifar.py b/paddle/capi/tests/vgg_16_cifar.py new file mode 120000 index 0000000000000..81250eefde639 --- /dev/null +++ b/paddle/capi/tests/vgg_16_cifar.py @@ -0,0 +1 @@ +../../../demo/image_classification/vgg_16_cifar.py \ No newline at end of file diff --git a/paddle/utils/ForceLink.h b/paddle/utils/ForceLink.h deleted file mode 100644 index 66005e2992e48..0000000000000 --- a/paddle/utils/ForceLink.h +++ /dev/null @@ -1,46 +0,0 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. */ - -#pragma once - -/// Declare a force link file ID. It can be enabled by -/// `PADDLE_ENABLE_FORCE_LINK_FILE`. It is -/// -/// Example: -/// -/// In some_file.cpp -/// @code{cpp} -/// static paddle::InitFunction init([]{...}); -/// PADDLE_REGISTER_FORCE_LINK_FILE(some_file) -/// @endcode{cpp} -/// -/// In main.cpp -/// @code{cpp} -/// PADDLE_ENABLE_FORCE_LINK_FILE(some_file); -/// -/// int main() { -/// ... -/// } -/// @endcode{cpp} -/// -/// Then the InitFunction in some_file.cpp can be invoked. -#define PADDLE_REGISTER_FORCE_LINK_FILE(ID) \ - int __paddle_register_force_link_file_##ID##_method__() { return 0; } - -/// Enable a force link file. The file with ID's static variables could -/// be all initialized. -#define PADDLE_ENABLE_FORCE_LINK_FILE(ID) \ - extern int __paddle_register_force_link_file_##ID##_method__(); \ - static int __paddle_register_force_link_file_##ID##_handler__ = \ - __paddle_register_force_link_file_##ID##_method__(); diff --git a/paddle/utils/tests/CMakeLists.txt b/paddle/utils/tests/CMakeLists.txt index d9b018ebbb27e..26fafbd1ab3f2 100644 --- a/paddle/utils/tests/CMakeLists.txt +++ b/paddle/utils/tests/CMakeLists.txt @@ -15,11 +15,3 @@ if(NOT APPLE) COMMAND ${PROJ_ROOT}/paddle/utils/tests/test_CustomStackTracePrint.sh WORKING_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}) endif() - -add_library(test_class_registrar_lib STATIC - test_ClassRegistrarLib.cpp - test_ClassRegistrarGlobals.cpp) - -add_simple_unittest(test_ClassRegistrar) -target_link_libraries(test_ClassRegistrar - test_class_registrar_lib) diff --git a/paddle/utils/tests/test_ClassRegistrar.cpp b/paddle/utils/tests/test_ClassRegistrar.cpp deleted file mode 100644 index c867045cb68b5..0000000000000 --- a/paddle/utils/tests/test_ClassRegistrar.cpp +++ /dev/null @@ -1,27 +0,0 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. */ - -#include -#include -#include "test_ClassRegistrarLib.h" -// Enable link test_ClassRegistrarLib.cpp -PADDLE_ENABLE_FORCE_LINK_FILE(test_registrar); - -TEST(ClassRegistrar, test) { - std::vector types; - gTestRegistrar_.forEachType( - [&types](const std::string& tp) { types.push_back(tp); }); - ASSERT_EQ(1, types.size()); - ASSERT_EQ("test", types[0]); -} diff --git a/paddle/utils/tests/test_ClassRegistrarGlobals.cpp b/paddle/utils/tests/test_ClassRegistrarGlobals.cpp deleted file mode 100644 index 0f36da137f9b9..0000000000000 --- a/paddle/utils/tests/test_ClassRegistrarGlobals.cpp +++ /dev/null @@ -1,16 +0,0 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. */ - -#include "test_ClassRegistrarLib.h" -paddle::ClassRegistrar gTestRegistrar_; diff --git a/paddle/utils/tests/test_ClassRegistrarLib.cpp b/paddle/utils/tests/test_ClassRegistrarLib.cpp deleted file mode 100644 index 27071579f940a..0000000000000 --- a/paddle/utils/tests/test_ClassRegistrarLib.cpp +++ /dev/null @@ -1,31 +0,0 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. */ - -#include "test_ClassRegistrarLib.h" -#include -BaseClass::~BaseClass() {} - -class TestRegistrar : public BaseClass { -public: - TestRegistrar() {} - - virtual ~TestRegistrar() {} -}; - -static paddle::InitFunction init([] { - gTestRegistrar_.registerClass( - "test", []() -> BaseClass* { return new TestRegistrar(); }); -}); - -PADDLE_REGISTER_FORCE_LINK_FILE(test_registrar); diff --git a/paddle/utils/tests/test_ClassRegistrarLib.h b/paddle/utils/tests/test_ClassRegistrarLib.h deleted file mode 100644 index de2d02e70cc7b..0000000000000 --- a/paddle/utils/tests/test_ClassRegistrarLib.h +++ /dev/null @@ -1,23 +0,0 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. */ - -#pragma once -#include "paddle/utils/ClassRegistrar.h" - -class BaseClass { -public: - virtual ~BaseClass(); -}; - -extern paddle::ClassRegistrar gTestRegistrar_; From 3fcd81fde757488d03c6d6fcc510c8d2deb9cd17 Mon Sep 17 00:00:00 2001 From: Yu Yang Date: Thu, 5 Jan 2017 16:38:31 +0800 Subject: [PATCH 03/43] Stash --- CMakeLists.txt | 2 +- cmake/util.cmake | 2 +- paddle/capi/CMakeLists.txt | 5 +++-- paddle/capi/Main.cpp | 14 -------------- paddle/capi/PaddleCAPI.h | 4 ---- paddle/capi/tests/CMakeLists.txt | 1 - paddle/capi/tests/test_GradientMachine.cpp | 15 +++++++++------ 7 files changed, 14 insertions(+), 29 deletions(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index 65fbbb481c432..bbe47d4ff1dc5 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -26,7 +26,7 @@ find_package(NumPy REQUIRED) find_package(Threads REQUIRED) find_package(AVX QUIET) find_package(Glog REQUIRED) -find_package(Gflags REQUIRED) +find_package(Gflags COMPONENTS nothreads_static REQUIRED) find_package(GTest) find_package(Sphinx) find_package(Doxygen) diff --git a/cmake/util.cmake b/cmake/util.cmake index 43a56378df009..3b4258654f05c 100644 --- a/cmake/util.cmake +++ b/cmake/util.cmake @@ -110,7 +110,7 @@ function(link_paddle_exe TARGET_NAME) ${METRIC_LIBS} ${PROTOBUF_LIBRARY} ${LIBGLOG_LIBRARY} - ${GFLAGS_LIBRARIES} + gflags ${CMAKE_THREAD_LIBS_INIT} ${CBLAS_LIBS} ${ZLIB_LIBRARIES} diff --git a/paddle/capi/CMakeLists.txt b/paddle/capi/CMakeLists.txt index 62e9e5ccef379..80cf2c7fa913d 100644 --- a/paddle/capi/CMakeLists.txt +++ b/paddle/capi/CMakeLists.txt @@ -18,7 +18,9 @@ target_include_directories(paddle_capi PUBLIC ${CMAKE_CURRENT_BINARY_DIR}) add_dependencies(paddle_capi gen_proto_cpp) target_link_libraries(paddle_capi + "-Wl,-force_load" paddle_gserver + "-Wl,-force_load" paddle_function paddle_pserver paddle_trainer_lib @@ -30,7 +32,7 @@ target_link_libraries(paddle_capi paddle_cuda ${PROTOBUF_LIBRARY} ${LIBGLOG_LIBRARY} - ${GFLAGS_LIBRARIES} + gflags ${CMAKE_THREAD_LIBS_INIT} ${CBLAS_LIBS} ${ZLIB_LIBRARIES} @@ -38,7 +40,6 @@ target_link_libraries(paddle_capi ${CMAKE_DL_LIBS} ${PYTHON_LIBRARIES}) - set(PADDLE_CAPI_INC_PATH ${CMAKE_CURRENT_BINARY_DIR} ${CMAKE_CURRENT_SOURCE_DIR}) diff --git a/paddle/capi/Main.cpp b/paddle/capi/Main.cpp index 49606e1f942b7..cc07e2ba4ef81 100644 --- a/paddle/capi/Main.cpp +++ b/paddle/capi/Main.cpp @@ -26,18 +26,4 @@ int PDInit(int argc, char** argv) { free(realArgv[0]); return PD_NO_ERROR; } - -int PDParseTrainerConfigFromFile(char* filename, - void** modelConfigProtobuf, - int* size) { - if (filename == nullptr || modelConfigProtobuf == nullptr || size == nullptr) - return PD_NULLPTR; - paddle::TrainerConfigHelper conf(filename); - if (!conf.getConfig().IsInitialized()) return PD_PROTOBUF_ERROR; - *size = conf.getConfig().ByteSize(); - *modelConfigProtobuf = malloc(*size); - if (!conf.getConfig().SerializeToArray(*modelConfigProtobuf, *size)) - return PD_PROTOBUF_ERROR; - return PD_NO_ERROR; -} } diff --git a/paddle/capi/PaddleCAPI.h b/paddle/capi/PaddleCAPI.h index fa43b3b40bbe5..17a2498671859 100644 --- a/paddle/capi/PaddleCAPI.h +++ b/paddle/capi/PaddleCAPI.h @@ -61,10 +61,6 @@ int PDGradientMachineDestroy(PD_GradiemtMachine machine); int PDInit(int argc, char** argv); -int PDParseTrainerConfigFromFile(char* filename, - void** modelConfigProtobuf, - int* size); - #ifdef __cplusplus } #endif diff --git a/paddle/capi/tests/CMakeLists.txt b/paddle/capi/tests/CMakeLists.txt index e1fa3d6b79139..e54a53e2935b4 100644 --- a/paddle/capi/tests/CMakeLists.txt +++ b/paddle/capi/tests/CMakeLists.txt @@ -13,7 +13,6 @@ function(add_capi_unittest_without_exec TARGET_NAME) ${TARGET_NAME} ${sources}) - target_link_libraries( ${TARGET_NAME} paddle_capi diff --git a/paddle/capi/tests/test_GradientMachine.cpp b/paddle/capi/tests/test_GradientMachine.cpp index 8c1ea73ae6cd6..f07d1e4e7294f 100644 --- a/paddle/capi/tests/test_GradientMachine.cpp +++ b/paddle/capi/tests/test_GradientMachine.cpp @@ -1,15 +1,18 @@ #include +#include #include #include #include "PaddleCAPI.h" TEST(GradientMachine, load) { - void* buf; - int size; - ASSERT_EQ( - PD_NO_ERROR, - PDParseTrainerConfigFromFile(strdup("./vgg_16_cifar.py"), &buf, &size)); - free(buf); + paddle::TrainerConfigHelper config("./vgg_16_cifar.py"); + std::string buffer; + ASSERT_TRUE(config.getModelConfig().SerializeToString(&buffer)); + PD_GradiemtMachine machine; + + ASSERT_EQ(PD_NO_ERROR, + PDGradientMachineCreateForPredict( + &machine, &buffer[0], (int)buffer.size())); } int main(int argc, char** argv) { From a873a40e508a9a9de87c389a1418c5191f3d3b2e Mon Sep 17 00:00:00 2001 From: Yu Yang Date: Thu, 5 Jan 2017 16:44:33 +0800 Subject: [PATCH 04/43] Try to use standard way to import gflags. * See [here](https://gflags.github.io/gflags/#cmake). --- CMakeLists.txt | 2 +- cmake/FindGflags.cmake | 582 ---------------------------------- cmake/util.cmake | 4 +- paddle/api/paddle_ld_flags.py | 8 +- 4 files changed, 5 insertions(+), 591 deletions(-) delete mode 100644 cmake/FindGflags.cmake diff --git a/CMakeLists.txt b/CMakeLists.txt index 65fbbb481c432..0b7870cde5252 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -26,7 +26,7 @@ find_package(NumPy REQUIRED) find_package(Threads REQUIRED) find_package(AVX QUIET) find_package(Glog REQUIRED) -find_package(Gflags REQUIRED) +find_package(gflags REQUIRED) find_package(GTest) find_package(Sphinx) find_package(Doxygen) diff --git a/cmake/FindGflags.cmake b/cmake/FindGflags.cmake deleted file mode 100644 index 6587089ba382d..0000000000000 --- a/cmake/FindGflags.cmake +++ /dev/null @@ -1,582 +0,0 @@ -# Ceres Solver - A fast non-linear least squares minimizer -# Copyright 2015 Google Inc. All rights reserved. -# http://ceres-solver.org/ -# -# Redistribution and use in source and binary forms, with or without -# modification, are permitted provided that the following conditions are met: -# -# * Redistributions of source code must retain the above copyright notice, -# this list of conditions and the following disclaimer. -# * Redistributions in binary form must reproduce the above copyright notice, -# this list of conditions and the following disclaimer in the documentation -# and/or other materials provided with the distribution. -# * Neither the name of Google Inc. nor the names of its contributors may be -# used to endorse or promote products derived from this software without -# specific prior written permission. -# -# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" -# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE -# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE -# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR -# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF -# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS -# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN -# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) -# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE -# POSSIBILITY OF SUCH DAMAGE. -# -# Author: alexs.mac@gmail.com (Alex Stewart) -# - -# FindGflags.cmake - Find Google gflags logging library. -# -# This module will attempt to find gflags, either via an exported CMake -# configuration (generated by gflags >= 2.1 which are built with CMake), or -# by performing a standard search for all gflags components. The order of -# precedence for these two methods of finding gflags is controlled by: -# GFLAGS_PREFER_EXPORTED_GFLAGS_CMAKE_CONFIGURATION. -# -# This module defines the following variables: -# -# GFLAGS_FOUND: TRUE iff gflags is found. -# GFLAGS_INCLUDE_DIRS: Include directories for gflags. -# GFLAGS_LIBRARIES: Libraries required to link gflags. -# GFLAGS_NAMESPACE: The namespace in which gflags is defined. In versions of -# gflags < 2.1, this was google, for versions >= 2.1 it is -# by default gflags, although can be configured when building -# gflags to be something else (i.e. google for legacy -# compatibility). -# -# The following variables control the behaviour of this module when an exported -# gflags CMake configuration is not found. -# -# GFLAGS_PREFER_EXPORTED_GFLAGS_CMAKE_CONFIGURATION: TRUE/FALSE, iff TRUE then -# then prefer using an exported CMake configuration -# generated by gflags >= 2.1 over searching for the -# gflags components manually. Otherwise (FALSE) -# ignore any exported gflags CMake configurations and -# always perform a manual search for the components. -# Default: TRUE iff user does not define this variable -# before we are called, and does NOT specify either -# GFLAGS_INCLUDE_DIR_HINTS or GFLAGS_LIBRARY_DIR_HINTS -# otherwise FALSE. -# GFLAGS_INCLUDE_DIR_HINTS: List of additional directories in which to -# search for gflags includes, e.g: /timbuktu/include. -# GFLAGS_LIBRARY_DIR_HINTS: List of additional directories in which to -# search for gflags libraries, e.g: /timbuktu/lib. -# -# The following variables are also defined by this module, but in line with -# CMake recommended FindPackage() module style should NOT be referenced directly -# by callers (use the plural variables detailed above instead). These variables -# do however affect the behaviour of the module via FIND_[PATH/LIBRARY]() which -# are NOT re-called (i.e. search for library is not repeated) if these variables -# are set with valid values _in the CMake cache_. This means that if these -# variables are set directly in the cache, either by the user in the CMake GUI, -# or by the user passing -DVAR=VALUE directives to CMake when called (which -# explicitly defines a cache variable), then they will be used verbatim, -# bypassing the HINTS variables and other hard-coded search locations. -# -# GFLAGS_INCLUDE_DIR: Include directory for gflags, not including the -# include directory of any dependencies. -# GFLAGS_LIBRARY: gflags library, not including the libraries of any -# dependencies. - -# Reset CALLERS_CMAKE_FIND_LIBRARY_PREFIXES to its value when FindGflags was -# invoked, necessary for MSVC. -macro(GFLAGS_RESET_FIND_LIBRARY_PREFIX) - if (MSVC) - set(CMAKE_FIND_LIBRARY_PREFIXES "${CALLERS_CMAKE_FIND_LIBRARY_PREFIXES}") - endif (MSVC) -endmacro(GFLAGS_RESET_FIND_LIBRARY_PREFIX) - -# Called if we failed to find gflags or any of it's required dependencies, -# unsets all public (designed to be used externally) variables and reports -# error message at priority depending upon [REQUIRED/QUIET/] argument. -macro(GFLAGS_REPORT_NOT_FOUND REASON_MSG) - unset(GFLAGS_FOUND) - unset(GFLAGS_INCLUDE_DIRS) - unset(GFLAGS_LIBRARIES) - # Do not use unset, as we want to keep GFLAGS_NAMESPACE in the cache, - # but simply clear its value. - set(GFLAGS_NAMESPACE "" CACHE STRING - "gflags namespace (google or gflags)" FORCE) - - # Make results of search visible in the CMake GUI if gflags has not - # been found so that user does not have to toggle to advanced view. - mark_as_advanced(CLEAR GFLAGS_INCLUDE_DIR - GFLAGS_LIBRARY - GFLAGS_NAMESPACE) - - gflags_reset_find_library_prefix() - - # Note _FIND_[REQUIRED/QUIETLY] variables defined by FindPackage() - # use the camelcase library name, not uppercase. - if (Gflags_FIND_QUIETLY) - message(STATUS "Failed to find gflags - " ${REASON_MSG} ${ARGN}) - elseif (Gflags_FIND_REQUIRED) - message(FATAL_ERROR "Failed to find gflags - " ${REASON_MSG} ${ARGN}) - else() - # Neither QUIETLY nor REQUIRED, use no priority which emits a message - # but continues configuration and allows generation. - message("-- Failed to find gflags - " ${REASON_MSG} ${ARGN}) - endif () - return() -endmacro(GFLAGS_REPORT_NOT_FOUND) - -# Verify that all variable names passed as arguments are defined (can be empty -# but must be defined) or raise a fatal error. -macro(GFLAGS_CHECK_VARS_DEFINED) - foreach(CHECK_VAR ${ARGN}) - if (NOT DEFINED ${CHECK_VAR}) - message(FATAL_ERROR "Ceres Bug: ${CHECK_VAR} is not defined.") - endif() - endforeach() -endmacro(GFLAGS_CHECK_VARS_DEFINED) - -# Use check_cxx_source_compiles() to compile trivial test programs to determine -# the gflags namespace. This works on all OSs except Windows. If using Visual -# Studio, it fails because msbuild forces check_cxx_source_compiles() to use -# CMAKE_BUILD_TYPE=Debug for the test project, which usually breaks detection -# because MSVC requires that the test project use the same build type as gflags, -# which would normally be built in Release. -# -# Defines: GFLAGS_NAMESPACE in the caller's scope with the detected namespace, -# which is blank (empty string, will test FALSE is CMake conditionals) -# if detection failed. -function(GFLAGS_CHECK_GFLAGS_NAMESPACE_USING_TRY_COMPILE) - # Verify that all required variables are defined. - gflags_check_vars_defined( - GFLAGS_INCLUDE_DIR GFLAGS_LIBRARY) - # Ensure that GFLAGS_NAMESPACE is always unset on completion unless - # we explicitly set if after having the correct namespace. - set(GFLAGS_NAMESPACE "" PARENT_SCOPE) - - include(CheckCXXSourceCompiles) - # Setup include path & link library for gflags for CHECK_CXX_SOURCE_COMPILES. - set(CMAKE_REQUIRED_INCLUDES ${GFLAGS_INCLUDE_DIR}) - set(CMAKE_REQUIRED_LIBRARIES ${GFLAGS_LIBRARY} ${GFLAGS_LINK_LIBRARIES}) - # First try the (older) google namespace. Note that the output variable - # MUST be unique to the build type as otherwise the test is not repeated as - # it is assumed to have already been performed. - check_cxx_source_compiles( - "#include - int main(int argc, char * argv[]) { - google::ParseCommandLineFlags(&argc, &argv, true); - return 0; - }" - GFLAGS_IN_GOOGLE_NAMESPACE) - if (GFLAGS_IN_GOOGLE_NAMESPACE) - set(GFLAGS_NAMESPACE google PARENT_SCOPE) - return() - endif() - - # Try (newer) gflags namespace instead. Note that the output variable - # MUST be unique to the build type as otherwise the test is not repeated as - # it is assumed to have already been performed. - set(CMAKE_REQUIRED_INCLUDES ${GFLAGS_INCLUDE_DIR}) - set(CMAKE_REQUIRED_LIBRARIES ${GFLAGS_LIBRARY} ${GFLAGS_LINK_LIBRARIES}) - check_cxx_source_compiles( - "#include - int main(int argc, char * argv[]) { - gflags::ParseCommandLineFlags(&argc, &argv, true); - return 0; - }" - GFLAGS_IN_GFLAGS_NAMESPACE) - if (GFLAGS_IN_GFLAGS_NAMESPACE) - set(GFLAGS_NAMESPACE gflags PARENT_SCOPE) - return() - endif (GFLAGS_IN_GFLAGS_NAMESPACE) -endfunction(GFLAGS_CHECK_GFLAGS_NAMESPACE_USING_TRY_COMPILE) - -# Use regex on the gflags headers to attempt to determine the gflags namespace. -# Checks both gflags.h (contained namespace on versions < 2.1.2) and -# gflags_declare.h, which contains the namespace on versions >= 2.1.2. -# In general, this method should only be used when -# GFLAGS_CHECK_GFLAGS_NAMESPACE_USING_TRY_COMPILE() cannot be used, or has -# failed. -# -# Defines: GFLAGS_NAMESPACE in the caller's scope with the detected namespace, -# which is blank (empty string, will test FALSE is CMake conditionals) -# if detection failed. -function(GFLAGS_CHECK_GFLAGS_NAMESPACE_USING_REGEX) - # Verify that all required variables are defined. - gflags_check_vars_defined(GFLAGS_INCLUDE_DIR) - # Ensure that GFLAGS_NAMESPACE is always undefined on completion unless - # we explicitly set if after having the correct namespace. - set(GFLAGS_NAMESPACE "" PARENT_SCOPE) - - # Scan gflags.h to identify what namespace gflags was built with. On - # versions of gflags < 2.1.2, gflags.h was configured with the namespace - # directly, on >= 2.1.2, gflags.h uses the GFLAGS_NAMESPACE #define which - # is defined in gflags_declare.h, we try each location in turn. - set(GFLAGS_HEADER_FILE ${GFLAGS_INCLUDE_DIR}/gflags/gflags.h) - if (NOT EXISTS ${GFLAGS_HEADER_FILE}) - gflags_report_not_found( - "Could not find file: ${GFLAGS_HEADER_FILE} " - "containing namespace information in gflags install located at: " - "${GFLAGS_INCLUDE_DIR}.") - endif() - file(READ ${GFLAGS_HEADER_FILE} GFLAGS_HEADER_FILE_CONTENTS) - - string(REGEX MATCH "namespace [A-Za-z]+" - GFLAGS_NAMESPACE "${GFLAGS_HEADER_FILE_CONTENTS}") - string(REGEX REPLACE "namespace ([A-Za-z]+)" "\\1" - GFLAGS_NAMESPACE "${GFLAGS_NAMESPACE}") - - if (NOT GFLAGS_NAMESPACE) - gflags_report_not_found( - "Failed to extract gflags namespace from header file: " - "${GFLAGS_HEADER_FILE}.") - endif (NOT GFLAGS_NAMESPACE) - - if (GFLAGS_NAMESPACE STREQUAL "google" OR - GFLAGS_NAMESPACE STREQUAL "gflags") - # Found valid gflags namespace from gflags.h. - set(GFLAGS_NAMESPACE "${GFLAGS_NAMESPACE}" PARENT_SCOPE) - return() - endif() - - # Failed to find gflags namespace from gflags.h, gflags is likely a new - # version, check gflags_declare.h, which in newer versions (>= 2.1.2) contains - # the GFLAGS_NAMESPACE #define, which is then referenced in gflags.h. - set(GFLAGS_DECLARE_FILE ${GFLAGS_INCLUDE_DIR}/gflags/gflags_declare.h) - if (NOT EXISTS ${GFLAGS_DECLARE_FILE}) - gflags_report_not_found( - "Could not find file: ${GFLAGS_DECLARE_FILE} " - "containing namespace information in gflags install located at: " - "${GFLAGS_INCLUDE_DIR}.") - endif() - file(READ ${GFLAGS_DECLARE_FILE} GFLAGS_DECLARE_FILE_CONTENTS) - - string(REGEX MATCH "#define GFLAGS_NAMESPACE [A-Za-z]+" - GFLAGS_NAMESPACE "${GFLAGS_DECLARE_FILE_CONTENTS}") - string(REGEX REPLACE "#define GFLAGS_NAMESPACE ([A-Za-z]+)" "\\1" - GFLAGS_NAMESPACE "${GFLAGS_NAMESPACE}") - - if (NOT GFLAGS_NAMESPACE) - gflags_report_not_found( - "Failed to extract gflags namespace from declare file: " - "${GFLAGS_DECLARE_FILE}.") - endif (NOT GFLAGS_NAMESPACE) - - if (GFLAGS_NAMESPACE STREQUAL "google" OR - GFLAGS_NAMESPACE STREQUAL "gflags") - # Found valid gflags namespace from gflags.h. - set(GFLAGS_NAMESPACE "${GFLAGS_NAMESPACE}" PARENT_SCOPE) - return() - endif() -endfunction(GFLAGS_CHECK_GFLAGS_NAMESPACE_USING_REGEX) - -# ----------------------------------------------------------------- -# By default, if the user has expressed no preference for using an exported -# gflags CMake configuration over performing a search for the installed -# components, and has not specified any hints for the search locations, then -# prefer a gflags exported configuration if available. -if (NOT DEFINED GFLAGS_PREFER_EXPORTED_GFLAGS_CMAKE_CONFIGURATION - AND NOT GFLAGS_INCLUDE_DIR_HINTS - AND NOT GFLAGS_LIBRARY_DIR_HINTS) - message(STATUS "No preference for use of exported gflags CMake configuration " - "set, and no hints for include/library directories provided. " - "Defaulting to preferring an installed/exported gflags CMake configuration " - "if available.") - set(GFLAGS_PREFER_EXPORTED_GFLAGS_CMAKE_CONFIGURATION TRUE) -endif() - -if (GFLAGS_PREFER_EXPORTED_GFLAGS_CMAKE_CONFIGURATION) - # Try to find an exported CMake configuration for gflags, as generated by - # gflags versions >= 2.1. - # - # We search twice, s/t we can invert the ordering of precedence used by - # find_package() for exported package build directories, and installed - # packages (found via CMAKE_SYSTEM_PREFIX_PATH), listed as items 6) and 7) - # respectively in [1]. - # - # By default, exported build directories are (in theory) detected first, and - # this is usually the case on Windows. However, on OS X & Linux, the install - # path (/usr/local) is typically present in the PATH environment variable - # which is checked in item 4) in [1] (i.e. before both of the above, unless - # NO_SYSTEM_ENVIRONMENT_PATH is passed). As such on those OSs installed - # packages are usually detected in preference to exported package build - # directories. - # - # To ensure a more consistent response across all OSs, and as users usually - # want to prefer an installed version of a package over a locally built one - # where both exist (esp. as the exported build directory might be removed - # after installation), we first search with NO_CMAKE_PACKAGE_REGISTRY which - # means any build directories exported by the user are ignored, and thus - # installed directories are preferred. If this fails to find the package - # we then research again, but without NO_CMAKE_PACKAGE_REGISTRY, so any - # exported build directories will now be detected. - # - # To prevent confusion on Windows, we also pass NO_CMAKE_BUILDS_PATH (which - # is item 5) in [1]), to not preferentially use projects that were built - # recently with the CMake GUI to ensure that we always prefer an installed - # version if available. - # - # [1] http://www.cmake.org/cmake/help/v2.8.11/cmake.html#command:find_package - find_package(gflags QUIET - NO_MODULE - NO_CMAKE_PACKAGE_REGISTRY - NO_CMAKE_BUILDS_PATH) - if (gflags_FOUND) - message(STATUS "Found installed version of gflags: ${gflags_DIR}") - else(gflags_FOUND) - # Failed to find an installed version of gflags, repeat search allowing - # exported build directories. - message(STATUS "Failed to find installed gflags CMake configuration, " - "searching for gflags build directories exported with CMake.") - # Again pass NO_CMAKE_BUILDS_PATH, as we know that gflags is exported and - # do not want to treat projects built with the CMake GUI preferentially. - find_package(gflags QUIET - NO_MODULE - NO_CMAKE_BUILDS_PATH) - if (gflags_FOUND) - message(STATUS "Found exported gflags build directory: ${gflags_DIR}") - endif(gflags_FOUND) - endif(gflags_FOUND) - - set(FOUND_INSTALLED_GFLAGS_CMAKE_CONFIGURATION ${gflags_FOUND}) - - # gflags v2.1 - 2.1.2 shipped with a bug in their gflags-config.cmake [1] - # whereby gflags_LIBRARIES = "gflags", but there was no imported target - # called "gflags", they were called: gflags[_nothreads]-[static/shared]. - # As this causes linker errors when gflags is not installed in a location - # on the current library paths, detect if this problem is present and - # fix it. - # - # [1] https://github.com/gflags/gflags/issues/110 - if (gflags_FOUND) - # NOTE: This is not written as additional conditions in the outer - # if (gflags_FOUND) as the NOT TARGET "${gflags_LIBRARIES}" - # condition causes problems if gflags is not found. - if (${gflags_VERSION} VERSION_LESS 2.1.3 AND - NOT TARGET "${gflags_LIBRARIES}") - message(STATUS "Detected broken gflags install in: ${gflags_DIR}, " - "version: ${gflags_VERSION} <= 2.1.2 which defines gflags_LIBRARIES = " - "${gflags_LIBRARIES} which is not an imported CMake target, see: " - "https://github.com/gflags/gflags/issues/110. Attempting to fix by " - "detecting correct gflags target.") - # Ordering here expresses preference for detection, specifically we do not - # want to use the _nothreads variants if the full library is available. - list(APPEND CHECK_GFLAGS_IMPORTED_TARGET_NAMES - gflags-shared gflags-static - gflags_nothreads-shared gflags_nothreads-static) - foreach(CHECK_GFLAGS_TARGET ${CHECK_GFLAGS_IMPORTED_TARGET_NAMES}) - if (TARGET ${CHECK_GFLAGS_TARGET}) - message(STATUS "Found valid gflags target: ${CHECK_GFLAGS_TARGET}, " - "updating gflags_LIBRARIES.") - set(gflags_LIBRARIES ${CHECK_GFLAGS_TARGET}) - break() - endif() - endforeach() - if (NOT TARGET ${gflags_LIBRARIES}) - message(STATUS "Failed to fix detected broken gflags install in: " - "${gflags_DIR}, version: ${gflags_VERSION} <= 2.1.2, none of the " - "imported targets for gflags: ${CHECK_GFLAGS_IMPORTED_TARGET_NAMES} " - "are defined. Will continue with a manual search for gflags " - "components. We recommend you build/install a version of gflags > " - "2.1.2 (or master).") - set(FOUND_INSTALLED_GFLAGS_CMAKE_CONFIGURATION FALSE) - endif() - endif() - endif() - - if (FOUND_INSTALLED_GFLAGS_CMAKE_CONFIGURATION) - message(STATUS "Detected gflags version: ${gflags_VERSION}") - set(GFLAGS_FOUND ${gflags_FOUND}) - set(GFLAGS_INCLUDE_DIR ${gflags_INCLUDE_DIR}) - set(GFLAGS_LIBRARY ${gflags_LIBRARIES}) - - # gflags does not export the namespace in their CMake configuration, so - # use our function to determine what it should be, as it can be either - # gflags or google dependent upon version & configuration. - # - # NOTE: We use the regex method to determine the namespace here, as - # check_cxx_source_compiles() will not use imported targets, which - # is what gflags will be in this case. - gflags_check_gflags_namespace_using_regex() - - if (NOT GFLAGS_NAMESPACE) - gflags_report_not_found( - "Failed to determine gflags namespace using regex for gflags " - "version: ${gflags_VERSION} exported here: ${gflags_DIR} using CMake.") - endif (NOT GFLAGS_NAMESPACE) - else (FOUND_INSTALLED_GFLAGS_CMAKE_CONFIGURATION) - message(STATUS "Failed to find an installed/exported CMake configuration " - "for gflags, will perform search for installed gflags components.") - endif (FOUND_INSTALLED_GFLAGS_CMAKE_CONFIGURATION) -endif(GFLAGS_PREFER_EXPORTED_GFLAGS_CMAKE_CONFIGURATION) - -if (NOT GFLAGS_FOUND) - # Either failed to find an exported gflags CMake configuration, or user - # told us not to use one. Perform a manual search for all gflags components. - - # Handle possible presence of lib prefix for libraries on MSVC, see - # also GFLAGS_RESET_FIND_LIBRARY_PREFIX(). - if (MSVC) - # Preserve the caller's original values for CMAKE_FIND_LIBRARY_PREFIXES - # s/t we can set it back before returning. - set(CALLERS_CMAKE_FIND_LIBRARY_PREFIXES "${CMAKE_FIND_LIBRARY_PREFIXES}") - # The empty string in this list is important, it represents the case when - # the libraries have no prefix (shared libraries / DLLs). - set(CMAKE_FIND_LIBRARY_PREFIXES "lib" "" "${CMAKE_FIND_LIBRARY_PREFIXES}") - endif (MSVC) - - # Search user-installed locations first, so that we prefer user installs - # to system installs where both exist. - list(APPEND GFLAGS_CHECK_INCLUDE_DIRS - /usr/local/include - /usr/local/homebrew/include # Mac OS X - /opt/local/var/macports/software # Mac OS X. - /opt/local/include - /usr/include) - list(APPEND GFLAGS_CHECK_PATH_SUFFIXES - gflags/include # Windows (for C:/Program Files prefix). - gflags/Include ) # Windows (for C:/Program Files prefix). - - list(APPEND GFLAGS_CHECK_LIBRARY_DIRS - /usr/local/lib - /usr/local/homebrew/lib # Mac OS X. - /opt/local/lib - /usr/lib) - list(APPEND GFLAGS_CHECK_LIBRARY_SUFFIXES - gflags/lib # Windows (for C:/Program Files prefix). - gflags/Lib ) # Windows (for C:/Program Files prefix). - - # Search supplied hint directories first if supplied. - find_path(GFLAGS_INCLUDE_DIR - NAMES gflags/gflags.h - PATHS ${GFLAGS_INCLUDE_DIR_HINTS} - ${GFLAGS_CHECK_INCLUDE_DIRS} - PATH_SUFFIXES ${GFLAGS_CHECK_PATH_SUFFIXES}) - if (NOT GFLAGS_INCLUDE_DIR OR - NOT EXISTS ${GFLAGS_INCLUDE_DIR}) - gflags_report_not_found( - "Could not find gflags include directory, set GFLAGS_INCLUDE_DIR " - "to directory containing gflags/gflags.h") - endif (NOT GFLAGS_INCLUDE_DIR OR - NOT EXISTS ${GFLAGS_INCLUDE_DIR}) - - find_library(GFLAGS_LIBRARY NAMES gflags - PATHS ${GFLAGS_LIBRARY_DIR_HINTS} - ${GFLAGS_CHECK_LIBRARY_DIRS} - PATH_SUFFIXES ${GFLAGS_CHECK_LIBRARY_SUFFIXES}) - if (NOT GFLAGS_LIBRARY OR - NOT EXISTS ${GFLAGS_LIBRARY}) - gflags_report_not_found( - "Could not find gflags library, set GFLAGS_LIBRARY " - "to full path to libgflags.") - endif (NOT GFLAGS_LIBRARY OR - NOT EXISTS ${GFLAGS_LIBRARY}) - - # gflags typically requires a threading library (which is OS dependent), note - # that this defines the CMAKE_THREAD_LIBS_INIT variable. If we are able to - # detect threads, we assume that gflags requires it. - find_package(Threads QUIET) - set(GFLAGS_LINK_LIBRARIES ${CMAKE_THREAD_LIBS_INIT}) - # On Windows (including MinGW), the Shlwapi library is used by gflags if - # available. - if (WIN32) - include(CheckIncludeFileCXX) - check_include_file_cxx("shlwapi.h" HAVE_SHLWAPI) - if (HAVE_SHLWAPI) - list(APPEND GFLAGS_LINK_LIBRARIES shlwapi.lib) - endif(HAVE_SHLWAPI) - endif (WIN32) - - # Mark internally as found, then verify. GFLAGS_REPORT_NOT_FOUND() unsets - # if called. - set(GFLAGS_FOUND TRUE) - - # Identify what namespace gflags was built with. - if (GFLAGS_INCLUDE_DIR AND NOT GFLAGS_NAMESPACE) - # To handle Windows peculiarities / CMake bugs on MSVC we try two approaches - # to detect the gflags namespace: - # - # 1) Try to use check_cxx_source_compiles() to compile a trivial program - # with the two choices for the gflags namespace. - # - # 2) [In the event 1) fails] Use regex on the gflags headers to try to - # determine the gflags namespace. Whilst this is less robust than 1), - # it does avoid any interaction with msbuild. - gflags_check_gflags_namespace_using_try_compile() - - if (NOT GFLAGS_NAMESPACE) - # Failed to determine gflags namespace using check_cxx_source_compiles() - # method, try and obtain it using regex on the gflags headers instead. - message(STATUS "Failed to find gflags namespace using using " - "check_cxx_source_compiles(), trying namespace regex instead, " - "this is expected on Windows.") - gflags_check_gflags_namespace_using_regex() - - if (NOT GFLAGS_NAMESPACE) - gflags_report_not_found( - "Failed to determine gflags namespace either by " - "check_cxx_source_compiles(), or namespace regex.") - endif (NOT GFLAGS_NAMESPACE) - endif (NOT GFLAGS_NAMESPACE) - endif (GFLAGS_INCLUDE_DIR AND NOT GFLAGS_NAMESPACE) - - # Make the GFLAGS_NAMESPACE a cache variable s/t the user can view it, and could - # overwrite it in the CMake GUI. - set(GFLAGS_NAMESPACE "${GFLAGS_NAMESPACE}" CACHE STRING - "gflags namespace (google or gflags)" FORCE) - - # gflags does not seem to provide any record of the version in its - # source tree, thus cannot extract version. - - # Catch case when caller has set GFLAGS_NAMESPACE in the cache / GUI - # with an invalid value. - if (GFLAGS_NAMESPACE AND - NOT GFLAGS_NAMESPACE STREQUAL "google" AND - NOT GFLAGS_NAMESPACE STREQUAL "gflags") - gflags_report_not_found( - "Caller defined GFLAGS_NAMESPACE:" - " ${GFLAGS_NAMESPACE} is not valid, not google or gflags.") - endif () - # Catch case when caller has set GFLAGS_INCLUDE_DIR in the cache / GUI and - # thus FIND_[PATH/LIBRARY] are not called, but specified locations are - # invalid, otherwise we would report the library as found. - if (GFLAGS_INCLUDE_DIR AND - NOT EXISTS ${GFLAGS_INCLUDE_DIR}/gflags/gflags.h) - gflags_report_not_found( - "Caller defined GFLAGS_INCLUDE_DIR:" - " ${GFLAGS_INCLUDE_DIR} does not contain gflags/gflags.h header.") - endif (GFLAGS_INCLUDE_DIR AND - NOT EXISTS ${GFLAGS_INCLUDE_DIR}/gflags/gflags.h) - # TODO: This regex for gflags library is pretty primitive, we use lowercase - # for comparison to handle Windows using CamelCase library names, could - # this check be better? - string(TOLOWER "${GFLAGS_LIBRARY}" LOWERCASE_GFLAGS_LIBRARY) - if (GFLAGS_LIBRARY AND - NOT "${LOWERCASE_GFLAGS_LIBRARY}" MATCHES ".*gflags[^/]*") - gflags_report_not_found( - "Caller defined GFLAGS_LIBRARY: " - "${GFLAGS_LIBRARY} does not match gflags.") - endif (GFLAGS_LIBRARY AND - NOT "${LOWERCASE_GFLAGS_LIBRARY}" MATCHES ".*gflags[^/]*") - - gflags_reset_find_library_prefix() - -endif(NOT GFLAGS_FOUND) - -# Set standard CMake FindPackage variables if found. -if (GFLAGS_FOUND) - set(GFLAGS_INCLUDE_DIRS ${GFLAGS_INCLUDE_DIR}) - set(GFLAGS_LIBRARIES ${GFLAGS_LIBRARY} ${GFLAGS_LINK_LIBRARIES}) -endif (GFLAGS_FOUND) - -# Handle REQUIRED / QUIET optional arguments. -include(FindPackageHandleStandardArgs) -find_package_handle_standard_args(Gflags DEFAULT_MSG - GFLAGS_INCLUDE_DIRS GFLAGS_LIBRARIES GFLAGS_NAMESPACE) - -# Only mark internal variables as advanced if we found gflags, otherwise -# leave them visible in the standard GUI for the user to set manually. -if (GFLAGS_FOUND) - mark_as_advanced(FORCE GFLAGS_INCLUDE_DIR - GFLAGS_LIBRARY - GFLAGS_NAMESPACE - gflags_DIR) # Autogenerated by find_package(gflags) -endif (GFLAGS_FOUND) diff --git a/cmake/util.cmake b/cmake/util.cmake index 43a56378df009..ab77d9e5defb9 100644 --- a/cmake/util.cmake +++ b/cmake/util.cmake @@ -110,8 +110,8 @@ function(link_paddle_exe TARGET_NAME) ${METRIC_LIBS} ${PROTOBUF_LIBRARY} ${LIBGLOG_LIBRARY} - ${GFLAGS_LIBRARIES} - ${CMAKE_THREAD_LIBS_INIT} + gflags + ${CMAKE_THREAD_LIBS_INIT} ${CBLAS_LIBS} ${ZLIB_LIBRARIES} ${INTERAL_LIBS} diff --git a/paddle/api/paddle_ld_flags.py b/paddle/api/paddle_ld_flags.py index b4d27b1cc728f..152085db847b4 100644 --- a/paddle/api/paddle_ld_flags.py +++ b/paddle/api/paddle_ld_flags.py @@ -50,7 +50,6 @@ def __init__(self): self.glog_libs = LIBGLOG_LIBRARY self.with_coverage = PaddleLDFlag.cmake_bool(WITH_COVERALLS) - self.gflags_libs = GFLAGS_LIBRARIES self.gflags_location = GFLAGS_LOCATION self.cblas_libs = CBLAS_LIBRARIES self.curt = CUDA_LIBRARIES @@ -88,7 +87,7 @@ def libs_str(self): "-lpaddle_api", self.normalize_flag(self.protolib), self.normalize_flag(self.glog_libs), - self.normalize_flag(self.gflags_libs), + self.normalize_flag("gflags"), self.normalize_flag(self.zlib), self.normalize_flag(self.thread), self.normalize_flag(self.dl_libs), @@ -114,10 +113,7 @@ def normalize_flag(self, cmake_flag): return cmake_flag elif cmake_flag.startswith("-l"): # normal link command return cmake_flag - elif cmake_flag in [ - "gflags-shared", "gflags-static", "gflags_nothreads-shared", - "gflags_nothreads-static" - ]: # special for gflags + elif cmake_flag == "gflags": # special for gflags assert PaddleLDFlag.cmake_bool(self.gflags_location) return self.gflags_location elif len(cmake_flag) != 0: From fdb64acc50e88ae08d074550de97fa39d151b653 Mon Sep 17 00:00:00 2001 From: Yu Yang Date: Thu, 5 Jan 2017 20:54:37 +0800 Subject: [PATCH 05/43] add unittest for prediction --- paddle/capi/Arguments.cpp | 52 ++++++++------ paddle/capi/CMakeLists.txt | 25 +------ paddle/capi/GradientMachine.cpp | 62 ++++++++++++++-- paddle/capi/Main.cpp | 2 +- paddle/capi/Matrix.cpp | 28 ++++---- paddle/capi/PaddleCAPI.h | 33 ++++++--- paddle/capi/PaddleCAPIPrivate.h | 24 ++++++- paddle/capi/Vector.cpp | 36 +++++----- paddle/capi/tests/.gitignore | 2 + paddle/capi/tests/CMakeLists.txt | 41 +++-------- paddle/capi/tests/test_Arguments.cpp | 32 +++++---- paddle/capi/tests/test_GradientMachine.cpp | 84 +++++++++++++++++++++- paddle/capi/tests/test_Matrix.cpp | 16 ++--- paddle/capi/tests/test_Vector.cpp | 9 +-- paddle/capi/tests/test_predict_network.py | 13 ++++ paddle/capi/tests/vgg_16_cifar.py | 1 - 16 files changed, 304 insertions(+), 156 deletions(-) create mode 100644 paddle/capi/tests/.gitignore create mode 100644 paddle/capi/tests/test_predict_network.py delete mode 120000 paddle/capi/tests/vgg_16_cifar.py diff --git a/paddle/capi/Arguments.cpp b/paddle/capi/Arguments.cpp index cf773a65872fc..b983d72bb4271 100644 --- a/paddle/capi/Arguments.cpp +++ b/paddle/capi/Arguments.cpp @@ -1,49 +1,61 @@ #include "PaddleCAPI.h" #include "PaddleCAPIPrivate.h" -#define cast(v) paddle::capi::cast(v) +using paddle::capi::cast; + +#define castArg(v) cast(v) +#define castIVec(v) cast(v) extern "C" { int PDArgsCreateNone(PD_Arguments* args) { auto ptr = new paddle::capi::CArguments(); *args = ptr; - return PD_NO_ERROR; + return kPD_NO_ERROR; } int PDArgsDestroy(PD_Arguments args) { - if (args == nullptr) return PD_NULLPTR; - delete cast(args); - return PD_NO_ERROR; + if (args == nullptr) return kPD_NULLPTR; + delete castArg(args); + return kPD_NO_ERROR; } int PDArgsGetSize(PD_Arguments args, uint64_t* size) { - if (args == nullptr || size == nullptr) return PD_NULLPTR; - *size = cast(args)->args.size(); - return PD_NO_ERROR; + if (args == nullptr || size == nullptr) return kPD_NULLPTR; + *size = castArg(args)->args.size(); + return kPD_NO_ERROR; } int PDArgsResize(PD_Arguments args, uint64_t size) { - if (args == nullptr) return PD_NULLPTR; - cast(args)->args.resize(size); - return PD_NO_ERROR; + if (args == nullptr) return kPD_NULLPTR; + castArg(args)->args.resize(size); + return kPD_NO_ERROR; } int PDArgsSetValue(PD_Arguments args, uint64_t ID, PD_Matrix mat) { - if (args == nullptr || mat == nullptr) return PD_NULLPTR; + if (args == nullptr || mat == nullptr) return kPD_NULLPTR; auto m = paddle::capi::cast(mat); - if (m->mat == nullptr) return PD_NULLPTR; - auto a = cast(args); - if (ID >= a->args.size()) return PD_OUT_OF_RANGE; + if (m->mat == nullptr) return kPD_NULLPTR; + auto a = castArg(args); + if (ID >= a->args.size()) return kPD_OUT_OF_RANGE; a->args[ID].value = m->mat; - return PD_NO_ERROR; + return kPD_NO_ERROR; } int PDArgsGetValue(PD_Arguments args, uint64_t ID, PD_Matrix mat) { - if (args == nullptr || mat == nullptr) return PD_NULLPTR; + if (args == nullptr || mat == nullptr) return kPD_NULLPTR; auto m = paddle::capi::cast(mat); - auto a = cast(args); - if (ID >= a->args.size()) return PD_OUT_OF_RANGE; + auto a = castArg(args); + if (ID >= a->args.size()) return kPD_OUT_OF_RANGE; m->mat = a->args[ID].value; - return PD_NO_ERROR; + return kPD_NO_ERROR; +} + +int PDArgsGetIds(PD_Arguments args, uint64_t ID, PD_IVector ids) { + if (args == nullptr || ids == nullptr) return kPD_NULLPTR; + auto iv = castIVec(ids); + auto a = castArg(args); + if (ID >= a->args.size()) return kPD_OUT_OF_RANGE; + iv->vec = a->args[ID].ids; + return kPD_NO_ERROR; } } diff --git a/paddle/capi/CMakeLists.txt b/paddle/capi/CMakeLists.txt index 80cf2c7fa913d..93b6b41254d2e 100644 --- a/paddle/capi/CMakeLists.txt +++ b/paddle/capi/CMakeLists.txt @@ -12,34 +12,11 @@ set(CAPI_PRIVATE_HEADER PaddleCAPIPrivate.h) file(GLOB CAPI_SOURCES *.cpp) -add_library(paddle_capi SHARED ${CAPI_SOURCES}) +add_library(paddle_capi STATIC ${CAPI_SOURCES}) target_include_directories(paddle_capi PUBLIC ${CMAKE_CURRENT_BINARY_DIR}) add_dependencies(paddle_capi gen_proto_cpp) -target_link_libraries(paddle_capi - "-Wl,-force_load" - paddle_gserver - "-Wl,-force_load" - paddle_function - paddle_pserver - paddle_trainer_lib - paddle_network - paddle_math - paddle_utils - paddle_parameter - paddle_proto - paddle_cuda - ${PROTOBUF_LIBRARY} - ${LIBGLOG_LIBRARY} - gflags - ${CMAKE_THREAD_LIBS_INIT} - ${CBLAS_LIBS} - ${ZLIB_LIBRARIES} - ${INTERAL_LIBS} - ${CMAKE_DL_LIBS} - ${PYTHON_LIBRARIES}) - set(PADDLE_CAPI_INC_PATH ${CMAKE_CURRENT_BINARY_DIR} ${CMAKE_CURRENT_SOURCE_DIR}) diff --git a/paddle/capi/GradientMachine.cpp b/paddle/capi/GradientMachine.cpp index 2969b5f198f76..ef584ed8d0d0d 100644 --- a/paddle/capi/GradientMachine.cpp +++ b/paddle/capi/GradientMachine.cpp @@ -27,22 +27,76 @@ extern "C" { int PDGradientMachineCreateForPredict(PD_GradiemtMachine* machine, void* modelConfigProtobuf, int size) { - if (modelConfigProtobuf == nullptr) return PD_NULLPTR; + if (modelConfigProtobuf == nullptr) return kPD_NULLPTR; paddle::ModelConfig config; if (!config.ParseFromArray(modelConfigProtobuf, size) || !config.IsInitialized()) { - return PD_PROTOBUF_ERROR; + return kPD_PROTOBUF_ERROR; } auto ptr = new paddle::capi::CGradientMachine(); ptr->machine.reset(paddle::GradientMachine::create( config, CREATE_MODE_TESTING, {paddle::PARAMETER_VALUE})); *machine = ptr; - return PD_NO_ERROR; + return kPD_NO_ERROR; } int PDGradientMachineDestroy(PD_GradiemtMachine machine) { delete cast(machine); - return PD_NO_ERROR; + return kPD_NO_ERROR; +} + +int PDGradientMachineLoadParameterFromDisk(PD_GradiemtMachine machine, + const char* path) { + auto m = cast(machine); + if (m == nullptr || path == nullptr || m->machine == nullptr) + return kPD_NULLPTR; + m->machine->loadParameters(path); + return kPD_NO_ERROR; +} + +int PDGradientMachineForward(PD_GradiemtMachine machine, + PD_Arguments inArgs, + PD_Arguments outArgs, + bool isTrain) { + auto m = cast(machine); + auto in = paddle::capi::cast(inArgs); + auto out = paddle::capi::cast(outArgs); + if (m == nullptr || in == nullptr || out == nullptr || m->machine == nullptr) + return kPD_NULLPTR; + m->machine->forward( + in->args, &out->args, isTrain ? paddle::PASS_TRAIN : paddle::PASS_TEST); + return kPD_NO_ERROR; +} + +int PDGradientMachineCreateSharedParam(PD_GradiemtMachine origin, + void* modelConfigProtobuf, + int size, + PD_GradiemtMachine* slave) { + auto o = cast(origin); + if (origin == nullptr || slave == nullptr || o->machine == nullptr) { + return kPD_NULLPTR; + } + paddle::ModelConfig config; + if (!config.ParseFromArray(modelConfigProtobuf, size) || + !config.IsInitialized()) { + return kPD_PROTOBUF_ERROR; + } + + std::unique_ptr ptr( + new paddle::capi::CGradientMachine()); + auto nn = paddle::NeuralNetwork::create(config); + nn->init(config, + [&o](int paramId, paddle::Parameter* param) { + auto p = o->machine->getParameters()[paramId]; + param->enableSharedType(paddle::PARAMETER_VALUE, + p->getBuf(paddle::PARAMETER_VALUE)); + + }, + {paddle::PARAMETER_VALUE}, + false); + ptr->machine.reset(nn); + *slave = ptr.release(); + return kPD_NO_ERROR; } } diff --git a/paddle/capi/Main.cpp b/paddle/capi/Main.cpp index cc07e2ba4ef81..8cd0104be2e07 100644 --- a/paddle/capi/Main.cpp +++ b/paddle/capi/Main.cpp @@ -24,6 +24,6 @@ int PDInit(int argc, char** argv) { } initPaddle(argc + 1, realArgv.data()); free(realArgv[0]); - return PD_NO_ERROR; + return kPD_NO_ERROR; } } diff --git a/paddle/capi/Matrix.cpp b/paddle/capi/Matrix.cpp index 71598b1714d19..dc1b4f3379d2b 100644 --- a/paddle/capi/Matrix.cpp +++ b/paddle/capi/Matrix.cpp @@ -8,27 +8,27 @@ int PDMatCreate(PD_Matrix* mat, uint64_t height, uint64_t width, bool useGpu) { auto ptr = new paddle::capi::CMatrix(); ptr->mat = paddle::Matrix::create(height, width, false, useGpu); *mat = ptr; - return PD_NO_ERROR; + return kPD_NO_ERROR; } int PDMatCreateNone(PD_Matrix* mat) { auto ptr = new paddle::capi::CMatrix(); *mat = ptr; - return PD_NO_ERROR; + return kPD_NO_ERROR; } int PDMatDestroy(PD_Matrix mat) { - if (mat == nullptr) return PD_NULLPTR; + if (mat == nullptr) return kPD_NULLPTR; auto ptr = cast(mat); delete ptr; - return PD_NO_ERROR; + return kPD_NO_ERROR; } int PDMatCopyToRow(PD_Matrix mat, uint64_t rowID, pd_real* rowArray) { - if (mat == nullptr) return PD_NULLPTR; + if (mat == nullptr) return kPD_NULLPTR; auto ptr = cast(mat); - if (ptr->mat == nullptr) return PD_NULLPTR; - if (rowID >= ptr->mat->getHeight()) return PD_OUT_OF_RANGE; + if (ptr->mat == nullptr) return kPD_NULLPTR; + if (rowID >= ptr->mat->getHeight()) return kPD_OUT_OF_RANGE; paddle::real* buf = ptr->mat->getRowBuf(rowID); size_t width = ptr->mat->getWidth(); #ifndef PADDLE_ONLY_CPU @@ -36,26 +36,26 @@ int PDMatCopyToRow(PD_Matrix mat, uint64_t rowID, pd_real* rowArray) { #else std::copy(rowArray, rowArray + width, buf); #endif - return PD_NO_ERROR; + return kPD_NO_ERROR; } int PDMatGetRow(PD_Matrix mat, uint64_t rowID, pd_real** rawRowBuffer) { - if (mat == nullptr) return PD_NULLPTR; + if (mat == nullptr) return kPD_NULLPTR; auto ptr = cast(mat); - if (ptr->mat == nullptr) return PD_NULLPTR; - if (rowID >= ptr->mat->getHeight()) return PD_OUT_OF_RANGE; + if (ptr->mat == nullptr) return kPD_NULLPTR; + if (rowID >= ptr->mat->getHeight()) return kPD_OUT_OF_RANGE; *rawRowBuffer = ptr->mat->getRowBuf(rowID); - return PD_NO_ERROR; + return kPD_NO_ERROR; } int PDMatGetShape(PD_Matrix mat, uint64_t* height, uint64_t* width) { - if (mat == nullptr) return PD_NULLPTR; + if (mat == nullptr) return kPD_NULLPTR; if (height != nullptr) { *height = cast(mat)->mat->getHeight(); } if (width != nullptr) { *width = cast(mat)->mat->getWidth(); } - return PD_NO_ERROR; + return kPD_NO_ERROR; } } diff --git a/paddle/capi/PaddleCAPI.h b/paddle/capi/PaddleCAPI.h index 17a2498671859..b848603e8a345 100644 --- a/paddle/capi/PaddleCAPI.h +++ b/paddle/capi/PaddleCAPI.h @@ -8,20 +8,20 @@ extern "C" { #endif typedef enum { - PD_NO_ERROR = 0, - PD_NULLPTR = 1, - PD_OUT_OF_RANGE = 2, - PD_PROTOBUF_ERROR = 3, - PD_UNDEFINED_ERROR = -1, + kPD_NO_ERROR = 0, + kPD_NULLPTR = 1, + kPD_OUT_OF_RANGE = 2, + kPD_PROTOBUF_ERROR = 3, + kPD_UNDEFINED_ERROR = -1, } PD_Error; -typedef void* PD_Vector; +typedef void* PD_IVector; -int PDVecCreate(PD_Vector* vec, uint64_t size, bool useGpu); +int PDIVecCreateNone(PD_IVector* ivec); -int PDVecDestroy(PD_Vector vec); +int PDIVecDestroy(PD_IVector ivec); -int PDVecIsSparse(PD_Vector vec, bool* isSparse); +int PDIVectorGet(PD_IVector ivec, int** buffer); typedef void* PD_Matrix; @@ -51,12 +51,27 @@ int PDArgsSetValue(PD_Arguments args, uint64_t ID, PD_Matrix mat); int PDArgsGetValue(PD_Arguments args, uint64_t ID, PD_Matrix mat); +int PDArgsGetIds(PD_Arguments args, uint64_t ID, PD_IVector ids); + typedef void* PD_GradiemtMachine; int PDGradientMachineCreateForPredict(PD_GradiemtMachine* machine, void* modelConfigProtobuf, int size); +int PDGradientMachineLoadParameterFromDisk(PD_GradiemtMachine machine, + const char* path); + +int PDGradientMachineForward(PD_GradiemtMachine machine, + PD_Arguments inArgs, + PD_Arguments outArgs, + bool isTrain); + +int PDGradientMachineCreateSharedParam(PD_GradiemtMachine origin, + void* modelConfigProtobuf, + int size, + PD_GradiemtMachine* slave); + int PDGradientMachineDestroy(PD_GradiemtMachine machine); int PDInit(int argc, char** argv); diff --git a/paddle/capi/PaddleCAPIPrivate.h b/paddle/capi/PaddleCAPIPrivate.h index 07e731f6cd0c2..1aae3cedf384b 100644 --- a/paddle/capi/PaddleCAPIPrivate.h +++ b/paddle/capi/PaddleCAPIPrivate.h @@ -8,20 +8,40 @@ namespace paddle { namespace capi { -struct CVector { - VectorPtr vec; +enum CType { kIVECTOR = 0, kMATRIX, kARGUMENTS, kGRADIENT_MACHINE }; + +#define STRUCT_HEADER CType type; + +struct CHeader { + STRUCT_HEADER +}; + +struct CIVector { + STRUCT_HEADER + IVectorPtr vec; + + CIVector() : type(kIVECTOR) {} }; struct CMatrix { + STRUCT_HEADER MatrixPtr mat; + + CMatrix() : type(kMATRIX) {} }; struct CArguments { + STRUCT_HEADER std::vector args; + + CArguments() : type(kARGUMENTS) {} }; struct CGradientMachine { + STRUCT_HEADER paddle::GradientMachinePtr machine; + + CGradientMachine() : type(kGRADIENT_MACHINE) {} }; template diff --git a/paddle/capi/Vector.cpp b/paddle/capi/Vector.cpp index 10dee7816c376..2ac795668ffc9 100644 --- a/paddle/capi/Vector.cpp +++ b/paddle/capi/Vector.cpp @@ -1,26 +1,28 @@ #include "PaddleCAPI.h" #include "PaddleCAPIPrivate.h" -#define cast(v) paddle::capi::cast(v) +using paddle::capi::cast; + extern "C" { -int PDVecCreate(PD_Vector* vec, uint64_t size, bool useGpu) { - auto ptr = new paddle::capi::CVector(); - ptr->vec = paddle::Vector::create(size, useGpu); - *vec = ptr; - return PD_NO_ERROR; + +int PDIVecCreateNone(PD_IVector* ivec) { + if (ivec == nullptr) return kPD_NULLPTR; + auto ptr = new paddle::capi::CIVector(); + *ivec = ptr; + return kPD_NO_ERROR; } -int PDVecDestroy(PD_Vector vec) { - auto v = cast(vec); - v->vec.reset(); - delete v; - return PD_NO_ERROR; + +int PDIVecDestroy(PD_IVector ivec) { + if (ivec == nullptr) return kPD_NULLPTR; + delete cast(ivec); + return kPD_NO_ERROR; } -int PDVecIsSparse(PD_Vector vec, bool* isSparse) { - if (isSparse == nullptr || vec == nullptr) { - return PD_NULLPTR; - } - *isSparse = cast(vec)->vec->isSparse(); - return PD_NO_ERROR; +int PDIVectorGet(PD_IVector ivec, int** buffer) { + if (ivec == nullptr || buffer == nullptr) return kPD_NULLPTR; + auto v = cast(ivec); + if (v->vec == nullptr) return kPD_NULLPTR; + *buffer = v->vec->getData(); + return kPD_NO_ERROR; } } diff --git a/paddle/capi/tests/.gitignore b/paddle/capi/tests/.gitignore new file mode 100644 index 0000000000000..7ab6be95e397f --- /dev/null +++ b/paddle/capi/tests/.gitignore @@ -0,0 +1,2 @@ +w +b diff --git a/paddle/capi/tests/CMakeLists.txt b/paddle/capi/tests/CMakeLists.txt index e54a53e2935b4..d81453982bfcf 100644 --- a/paddle/capi/tests/CMakeLists.txt +++ b/paddle/capi/tests/CMakeLists.txt @@ -1,35 +1,10 @@ -function(add_capi_unittest_without_exec TARGET_NAME) - set(with_test_main ON) - set(sources) - foreach(source_file ${ARGN}) - if (${source_file} STREQUAL "NO_MAIN") - set(with_test_main OFF) - else() - list(APPEND sources ${source_file}) - endif() - endforeach() - - add_executable( - ${TARGET_NAME} - ${sources}) - - target_link_libraries( - ${TARGET_NAME} - paddle_capi - ${GTEST_LIBRARIES}) - - if (with_test_main) - target_link_libraries( - ${TARGET_NAME} paddle_test_main) - endif() - target_include_directories(${TARGET_NAME} PUBLIC ${PADDLE_CAPI_INC_PATH}) -endfunction() - -function(add_capi_unittest TARGET_NAME) - add_capi_unittest_without_exec(${TARGET_NAME} ${ARGN}) - add_test(NAME ${TARGET_NAME} COMMAND ${TARGET_NAME}) -endfunction() -add_capi_unittest(capi_test_mats test_Vector.cpp +add_unittest(capi_test_mats test_Vector.cpp test_Matrix.cpp test_Arguments.cpp) -add_capi_unittest(capi_test_gradientMachine NO_MAIN test_GradientMachine.cpp) +target_include_directories(capi_test_mats PUBLIC ${PADDLE_CAPI_INC_PATH}) +target_link_libraries(capi_test_mats paddle_capi) +add_unittest(capi_test_gradientMachine test_GradientMachine.cpp) + +target_include_directories(capi_test_gradientMachine PUBLIC + ${PADDLE_CAPI_INC_PATH}) +target_link_libraries(capi_test_gradientMachine paddle_capi) diff --git a/paddle/capi/tests/test_Arguments.cpp b/paddle/capi/tests/test_Arguments.cpp index c74abd60d1b10..fe9762deed9b3 100644 --- a/paddle/capi/tests/test_Arguments.cpp +++ b/paddle/capi/tests/test_Arguments.cpp @@ -15,40 +15,44 @@ static std::vector randomBuffer(size_t bufSize) { TEST(CAPIArguments, create) { PD_Arguments args; - ASSERT_EQ(PD_NO_ERROR, PDArgsCreateNone(&args)); + ASSERT_EQ(kPD_NO_ERROR, PDArgsCreateNone(&args)); uint64_t size; - ASSERT_EQ(PD_NO_ERROR, PDArgsGetSize(args, &size)); + ASSERT_EQ(kPD_NO_ERROR, PDArgsGetSize(args, &size)); ASSERT_EQ(0UL, size); - ASSERT_EQ(PD_NO_ERROR, PDArgsDestroy(args)); + ASSERT_EQ(kPD_NO_ERROR, PDArgsDestroy(args)); } TEST(CAPIArguments, value) { PD_Arguments args; - ASSERT_EQ(PD_NO_ERROR, PDArgsCreateNone(&args)); - ASSERT_EQ(PD_NO_ERROR, PDArgsResize(args, 1)); + ASSERT_EQ(kPD_NO_ERROR, PDArgsCreateNone(&args)); + ASSERT_EQ(kPD_NO_ERROR, PDArgsResize(args, 1)); PD_Matrix mat; - ASSERT_EQ(PD_NO_ERROR, PDMatCreate(&mat, 128, 64, false)); + ASSERT_EQ(kPD_NO_ERROR, PDMatCreate(&mat, 128, 64, false)); for (size_t i = 0; i < 128; ++i) { std::vector sampleBuf = randomBuffer(64); PDMatCopyToRow(mat, i, sampleBuf.data()); } - ASSERT_EQ(PD_NO_ERROR, PDArgsSetValue(args, 0, mat)); + ASSERT_EQ(kPD_NO_ERROR, PDArgsSetValue(args, 0, mat)); PD_Matrix val; - ASSERT_EQ(PD_NO_ERROR, PDMatCreateNone(&val)); + ASSERT_EQ(kPD_NO_ERROR, PDMatCreateNone(&val)); - ASSERT_EQ(PD_NO_ERROR, PDArgsGetValue(args, 0, val)); + ASSERT_EQ(kPD_NO_ERROR, PDArgsGetValue(args, 0, val)); for (size_t i = 0; i < 128; ++i) { pd_real* row1; pd_real* row2; - ASSERT_EQ(PD_NO_ERROR, PDMatGetRow(mat, i, &row1)); - ASSERT_EQ(PD_NO_ERROR, PDMatGetRow(val, i, &row2)); + ASSERT_EQ(kPD_NO_ERROR, PDMatGetRow(mat, i, &row1)); + ASSERT_EQ(kPD_NO_ERROR, PDMatGetRow(val, i, &row2)); ASSERT_EQ(row1, row2); } - ASSERT_EQ(PD_NO_ERROR, PDMatDestroy(val)); - ASSERT_EQ(PD_NO_ERROR, PDMatDestroy(mat)); - ASSERT_EQ(PD_NO_ERROR, PDArgsDestroy(args)); + + PD_IVector ivec; + ASSERT_EQ(kPD_NO_ERROR, PDIVecCreateNone(&ivec)); + ASSERT_EQ(kPD_NO_ERROR, PDIVecDestroy(ivec)); + ASSERT_EQ(kPD_NO_ERROR, PDMatDestroy(val)); + ASSERT_EQ(kPD_NO_ERROR, PDMatDestroy(mat)); + ASSERT_EQ(kPD_NO_ERROR, PDArgsDestroy(args)); } diff --git a/paddle/capi/tests/test_GradientMachine.cpp b/paddle/capi/tests/test_GradientMachine.cpp index f07d1e4e7294f..63fb47bd27be2 100644 --- a/paddle/capi/tests/test_GradientMachine.cpp +++ b/paddle/capi/tests/test_GradientMachine.cpp @@ -1,18 +1,96 @@ #include +#include #include #include #include +#include #include "PaddleCAPI.h" +#include "paddle/utils/ThreadLocal.h" -TEST(GradientMachine, load) { - paddle::TrainerConfigHelper config("./vgg_16_cifar.py"); +static std::vector randomBuffer(size_t bufSize) { + auto& eng = paddle::ThreadLocalRandomEngine::get(); + std::uniform_real_distribution dist(-1.0, 1.0); + std::vector retv; + retv.reserve(bufSize); + for (size_t i = 0; i < bufSize; ++i) { + retv.push_back(dist(eng)); + } + return retv; +} + +TEST(GradientMachine, testPredict) { + paddle::TrainerConfigHelper config("./test_predict_network.py"); std::string buffer; ASSERT_TRUE(config.getModelConfig().SerializeToString(&buffer)); PD_GradiemtMachine machine; - ASSERT_EQ(PD_NO_ERROR, + ASSERT_EQ(kPD_NO_ERROR, PDGradientMachineCreateForPredict( &machine, &buffer[0], (int)buffer.size())); + std::unique_ptr gm( + paddle::GradientMachine::create(config.getModelConfig())); + ASSERT_NE(nullptr, gm); + gm->randParameters(); + gm->saveParameters("./"); + + ASSERT_EQ(kPD_NO_ERROR, + PDGradientMachineLoadParameterFromDisk(machine, "./")); + + PD_GradiemtMachine machineSlave; + ASSERT_EQ(kPD_NO_ERROR, + PDGradientMachineCreateSharedParam( + machine, &buffer[0], (int)buffer.size(), &machineSlave)); + std::swap(machineSlave, machine); + PD_Arguments outArgs; + ASSERT_EQ(kPD_NO_ERROR, PDArgsCreateNone(&outArgs)); + + PD_Arguments inArgs; + ASSERT_EQ(kPD_NO_ERROR, PDArgsCreateNone(&inArgs)); + ASSERT_EQ(kPD_NO_ERROR, PDArgsResize(inArgs, 1)); + PD_Matrix mat; + ASSERT_EQ(kPD_NO_ERROR, PDMatCreate(&mat, 1, 100, false)); + static_assert(std::is_same::value, ""); + + auto data = randomBuffer(100); + pd_real* rowPtr; + ASSERT_EQ(kPD_NO_ERROR, PDMatGetRow(mat, 0, &rowPtr)); + memcpy(rowPtr, data.data(), data.size() * sizeof(pd_real)); + + ASSERT_EQ(kPD_NO_ERROR, PDArgsSetValue(inArgs, 0, mat)); + ASSERT_EQ(kPD_NO_ERROR, + PDGradientMachineForward(machine, inArgs, outArgs, false)); + + uint64_t sz; + ASSERT_EQ(kPD_NO_ERROR, PDArgsGetSize(outArgs, &sz)); + ASSERT_EQ(1UL, sz); + + ASSERT_EQ(kPD_NO_ERROR, PDArgsGetValue(outArgs, 0, mat)); + std::vector paddleInArgs; + std::vector paddleOutArgs; + paddleInArgs.resize(1); + paddleInArgs[0].value = + paddle::Matrix::create(data.data(), 1, 100, false, false); + + gm->forward(paddleInArgs, &paddleOutArgs, paddle::PASS_TEST); + + auto matPaddle = paddleOutArgs[0].value; + + uint64_t height, width; + ASSERT_EQ(kPD_NO_ERROR, PDMatGetShape(mat, &height, &width)); + ASSERT_EQ(matPaddle->getHeight(), height); + ASSERT_EQ(matPaddle->getWidth(), width); + + ASSERT_EQ(kPD_NO_ERROR, PDMatGetRow(mat, 0, &rowPtr)); + for (size_t i = 0; i < width; ++i) { + ASSERT_NEAR(matPaddle->getData()[i], rowPtr[i], 1e-5); + } + + ASSERT_EQ(kPD_NO_ERROR, PDMatDestroy(mat)); + ASSERT_EQ(kPD_NO_ERROR, PDArgsDestroy(inArgs)); + ASSERT_EQ(kPD_NO_ERROR, PDArgsDestroy(outArgs)); + std::swap(machineSlave, machine); + ASSERT_EQ(kPD_NO_ERROR, PDGradientMachineDestroy(machineSlave)); + ASSERT_EQ(kPD_NO_ERROR, PDGradientMachineDestroy(machine)); } int main(int argc, char** argv) { diff --git a/paddle/capi/tests/test_Matrix.cpp b/paddle/capi/tests/test_Matrix.cpp index 0f04a4683049a..97913f7229f3e 100644 --- a/paddle/capi/tests/test_Matrix.cpp +++ b/paddle/capi/tests/test_Matrix.cpp @@ -3,31 +3,31 @@ TEST(CAPIMatrix, create) { PD_Matrix mat; - ASSERT_EQ(PD_NO_ERROR, PDMatCreate(&mat, 128, 32, false)); + ASSERT_EQ(kPD_NO_ERROR, PDMatCreate(&mat, 128, 32, false)); std::vector sampleRow; sampleRow.resize(32); for (size_t i = 0; i < sampleRow.size(); ++i) { sampleRow[i] = 1.0 / (i + 1.0); } - ASSERT_EQ(PD_NO_ERROR, PDMatCopyToRow(mat, 0, sampleRow.data())); - ASSERT_EQ(PD_OUT_OF_RANGE, PDMatCopyToRow(mat, 128, sampleRow.data())); + ASSERT_EQ(kPD_NO_ERROR, PDMatCopyToRow(mat, 0, sampleRow.data())); + ASSERT_EQ(kPD_OUT_OF_RANGE, PDMatCopyToRow(mat, 128, sampleRow.data())); pd_real* arrayPtr; - ASSERT_EQ(PD_NO_ERROR, PDMatGetRow(mat, 0, &arrayPtr)); + ASSERT_EQ(kPD_NO_ERROR, PDMatGetRow(mat, 0, &arrayPtr)); for (size_t i = 0; i < sampleRow.size(); ++i) { ASSERT_NEAR(sampleRow[i], arrayPtr[i], 1e-5); } uint64_t height, width; - ASSERT_EQ(PD_NO_ERROR, PDMatGetShape(mat, &height, &width)); + ASSERT_EQ(kPD_NO_ERROR, PDMatGetShape(mat, &height, &width)); ASSERT_EQ(128, height); ASSERT_EQ(32, width); - ASSERT_EQ(PD_NO_ERROR, PDMatDestroy(mat)); + ASSERT_EQ(kPD_NO_ERROR, PDMatDestroy(mat)); } TEST(CAPIMatrix, createNone) { PD_Matrix mat; - ASSERT_EQ(PD_NO_ERROR, PDMatCreateNone(&mat)); - ASSERT_EQ(PD_NO_ERROR, PDMatDestroy(mat)); + ASSERT_EQ(kPD_NO_ERROR, PDMatCreateNone(&mat)); + ASSERT_EQ(kPD_NO_ERROR, PDMatDestroy(mat)); } diff --git a/paddle/capi/tests/test_Vector.cpp b/paddle/capi/tests/test_Vector.cpp index dbb987d440a84..907a63bc9e03d 100644 --- a/paddle/capi/tests/test_Vector.cpp +++ b/paddle/capi/tests/test_Vector.cpp @@ -2,10 +2,7 @@ #include "gtest/gtest.h" TEST(CAPIVector, create) { - PD_Vector tmp; - ASSERT_EQ(PD_NO_ERROR, PDVecCreate(&tmp, 128, false)); - bool isSparse; - ASSERT_EQ(PD_NO_ERROR, PDVecIsSparse(tmp, &isSparse)); - ASSERT_FALSE(isSparse); - ASSERT_EQ(PD_NO_ERROR, PDVecDestroy(tmp)); + PD_IVector vec; + ASSERT_EQ(kPD_NO_ERROR, PDIVecCreateNone(&vec)); + ASSERT_EQ(kPD_NO_ERROR, PDIVecDestroy(vec)); } diff --git a/paddle/capi/tests/test_predict_network.py b/paddle/capi/tests/test_predict_network.py new file mode 100644 index 0000000000000..82ef5cb1a7039 --- /dev/null +++ b/paddle/capi/tests/test_predict_network.py @@ -0,0 +1,13 @@ +from paddle.trainer_config_helpers import * + +settings(batch_size=100) + +x = data_layer(name='x', size=100) + +y = fc_layer( + input=x, + size=100, + bias_attr=ParamAttr(name='b'), + param_attr=ParamAttr(name='w')) + +outputs(y) diff --git a/paddle/capi/tests/vgg_16_cifar.py b/paddle/capi/tests/vgg_16_cifar.py deleted file mode 120000 index 81250eefde639..0000000000000 --- a/paddle/capi/tests/vgg_16_cifar.py +++ /dev/null @@ -1 +0,0 @@ -../../../demo/image_classification/vgg_16_cifar.py \ No newline at end of file From 873368f8422ea5815e6f0c02df84ec1960438b4a Mon Sep 17 00:00:00 2001 From: Yu Yang Date: Tue, 10 Jan 2017 16:53:32 +0800 Subject: [PATCH 06/43] Add style check to target --- paddle/capi/Arguments.cpp | 14 ++++++++++++++ paddle/capi/CMakeLists.txt | 2 ++ paddle/capi/GradientMachine.cpp | 17 +++++++++++++++-- paddle/capi/Main.cpp | 14 ++++++++++++++ paddle/capi/Matrix.cpp | 14 ++++++++++++++ paddle/capi/PaddleCAPI.h | 21 ++++++++++++++++++--- paddle/capi/PaddleCAPIPrivate.h | 18 ++++++++++++++++-- paddle/capi/Vector.cpp | 14 ++++++++++++++ paddle/capi/tests/test_Arguments.cpp | 14 ++++++++++++++ paddle/capi/tests/test_GradientMachine.cpp | 14 ++++++++++++++ paddle/capi/tests/test_Matrix.cpp | 18 ++++++++++++++++-- paddle/capi/tests/test_Vector.cpp | 14 ++++++++++++++ 12 files changed, 165 insertions(+), 9 deletions(-) diff --git a/paddle/capi/Arguments.cpp b/paddle/capi/Arguments.cpp index b983d72bb4271..60bdea15ae76d 100644 --- a/paddle/capi/Arguments.cpp +++ b/paddle/capi/Arguments.cpp @@ -1,3 +1,17 @@ +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + #include "PaddleCAPI.h" #include "PaddleCAPIPrivate.h" diff --git a/paddle/capi/CMakeLists.txt b/paddle/capi/CMakeLists.txt index 93b6b41254d2e..6eb1e9949a466 100644 --- a/paddle/capi/CMakeLists.txt +++ b/paddle/capi/CMakeLists.txt @@ -15,6 +15,8 @@ file(GLOB CAPI_SOURCES *.cpp) add_library(paddle_capi STATIC ${CAPI_SOURCES}) target_include_directories(paddle_capi PUBLIC ${CMAKE_CURRENT_BINARY_DIR}) +add_style_check_target(paddle_capi ${CAPI_SOURCES} ${CAPI_HEADER} + ${CAPI_PRIVATE_HEADER}) add_dependencies(paddle_capi gen_proto_cpp) set(PADDLE_CAPI_INC_PATH diff --git a/paddle/capi/GradientMachine.cpp b/paddle/capi/GradientMachine.cpp index ef584ed8d0d0d..8299e6442f667 100644 --- a/paddle/capi/GradientMachine.cpp +++ b/paddle/capi/GradientMachine.cpp @@ -1,3 +1,17 @@ +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + #include "PaddleCAPI.h" #include "PaddleCAPIPrivate.h" #include "paddle/gserver/gradientmachines/NeuralNetwork.h" @@ -21,7 +35,7 @@ NeuralNetwork* newCustomNerualNetwork(const std::string& name, NeuralNetwork* network) { return new MyNeuralNetwork(name, network); } -} +} // namespace paddle extern "C" { int PDGradientMachineCreateForPredict(PD_GradiemtMachine* machine, @@ -91,7 +105,6 @@ int PDGradientMachineCreateSharedParam(PD_GradiemtMachine origin, auto p = o->machine->getParameters()[paramId]; param->enableSharedType(paddle::PARAMETER_VALUE, p->getBuf(paddle::PARAMETER_VALUE)); - }, {paddle::PARAMETER_VALUE}, false); diff --git a/paddle/capi/Main.cpp b/paddle/capi/Main.cpp index 8cd0104be2e07..e310eb540475d 100644 --- a/paddle/capi/Main.cpp +++ b/paddle/capi/Main.cpp @@ -1,3 +1,17 @@ +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + #include #include #include diff --git a/paddle/capi/Matrix.cpp b/paddle/capi/Matrix.cpp index dc1b4f3379d2b..db32c945412ec 100644 --- a/paddle/capi/Matrix.cpp +++ b/paddle/capi/Matrix.cpp @@ -1,3 +1,17 @@ +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + #include "PaddleCAPI.h" #include "PaddleCAPIPrivate.h" #include "hl_cuda.h" diff --git a/paddle/capi/PaddleCAPI.h b/paddle/capi/PaddleCAPI.h index b848603e8a345..8cd78429f34cd 100644 --- a/paddle/capi/PaddleCAPI.h +++ b/paddle/capi/PaddleCAPI.h @@ -1,5 +1,19 @@ -#ifndef __PADDLE_PADDLE_CAPI_PADDLECAPI_H_INCLUDED__ -#define __PADDLE_PADDLE_CAPI_PADDLECAPI_H_INCLUDED__ +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#ifndef PADDLECAPI_H_ +#define PADDLECAPI_H_ #include #include #include "config.h" @@ -79,4 +93,5 @@ int PDInit(int argc, char** argv); #ifdef __cplusplus } #endif -#endif + +#endif // PADDLECAPI_H_ diff --git a/paddle/capi/PaddleCAPIPrivate.h b/paddle/capi/PaddleCAPIPrivate.h index 1aae3cedf384b..bb8baea4e1cd7 100644 --- a/paddle/capi/PaddleCAPIPrivate.h +++ b/paddle/capi/PaddleCAPIPrivate.h @@ -1,3 +1,17 @@ +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + #include "PaddleCAPI.h" #include "paddle/gserver/gradientmachines/GradientMachine.h" #include "paddle/math/Matrix.h" @@ -48,5 +62,5 @@ template inline T* cast(void* ptr) { return reinterpret_cast(ptr); } -} -} +} // namespace capi +} // namespace paddle diff --git a/paddle/capi/Vector.cpp b/paddle/capi/Vector.cpp index 2ac795668ffc9..5b4fe0666cbba 100644 --- a/paddle/capi/Vector.cpp +++ b/paddle/capi/Vector.cpp @@ -1,3 +1,17 @@ +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + #include "PaddleCAPI.h" #include "PaddleCAPIPrivate.h" diff --git a/paddle/capi/tests/test_Arguments.cpp b/paddle/capi/tests/test_Arguments.cpp index fe9762deed9b3..4a18ffbf47413 100644 --- a/paddle/capi/tests/test_Arguments.cpp +++ b/paddle/capi/tests/test_Arguments.cpp @@ -1,3 +1,17 @@ +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + #include "PaddleCAPI.h" #include "gtest/gtest.h" #include "paddle/utils/ThreadLocal.h" diff --git a/paddle/capi/tests/test_GradientMachine.cpp b/paddle/capi/tests/test_GradientMachine.cpp index 63fb47bd27be2..fcade7fb5c910 100644 --- a/paddle/capi/tests/test_GradientMachine.cpp +++ b/paddle/capi/tests/test_GradientMachine.cpp @@ -1,3 +1,17 @@ +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + #include #include #include diff --git a/paddle/capi/tests/test_Matrix.cpp b/paddle/capi/tests/test_Matrix.cpp index 97913f7229f3e..4192dd6bfb533 100644 --- a/paddle/capi/tests/test_Matrix.cpp +++ b/paddle/capi/tests/test_Matrix.cpp @@ -1,3 +1,17 @@ +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + #include "PaddleCAPI.h" #include "gtest/gtest.h" @@ -21,8 +35,8 @@ TEST(CAPIMatrix, create) { uint64_t height, width; ASSERT_EQ(kPD_NO_ERROR, PDMatGetShape(mat, &height, &width)); - ASSERT_EQ(128, height); - ASSERT_EQ(32, width); + ASSERT_EQ(128UL, height); + ASSERT_EQ(32UL, width); ASSERT_EQ(kPD_NO_ERROR, PDMatDestroy(mat)); } diff --git a/paddle/capi/tests/test_Vector.cpp b/paddle/capi/tests/test_Vector.cpp index 907a63bc9e03d..122f7df176d4c 100644 --- a/paddle/capi/tests/test_Vector.cpp +++ b/paddle/capi/tests/test_Vector.cpp @@ -1,3 +1,17 @@ +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + #include "PaddleCAPI.h" #include "gtest/gtest.h" From fe8d5ff39f3c628c294fce0d7f88ee8d5626329e Mon Sep 17 00:00:00 2001 From: Yu Yang Date: Tue, 10 Jan 2017 17:36:12 +0800 Subject: [PATCH 07/43] Add WITH_C_API option --- CMakeLists.txt | 2 ++ paddle/CMakeLists.txt | 4 +++- 2 files changed, 5 insertions(+), 1 deletion(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index 8f53abacb4052..2048796ef3e06 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -43,6 +43,7 @@ option(WITH_DOC "Compile PaddlePaddle with documentation" OFF) option(ON_COVERALLS "Compile PaddlePaddle with code coverage" OFF) option(COVERALLS_UPLOAD "Package code coverage data to coveralls" OFF) option(ON_TRAVIS "Exclude special unit test on Travis CI" OFF) +option(WITH_C_API "Compile PaddlePaddle with C-API(Prediction)" ON) # CMAKE_BUILD_TYPE if(NOT CMAKE_BUILD_TYPE) @@ -53,6 +54,7 @@ endif() set(THIRD_PARTY_PATH "${PROJ_ROOT}/third_party" CACHE STRING "A path setting third party libraries download & build directories.") + ######################################################################################## include(external/zlib) # download, build, install zlib diff --git a/paddle/CMakeLists.txt b/paddle/CMakeLists.txt index b3f3b2fbcedd7..48905f3237326 100644 --- a/paddle/CMakeLists.txt +++ b/paddle/CMakeLists.txt @@ -8,7 +8,9 @@ add_subdirectory(gserver) add_subdirectory(pserver) add_subdirectory(trainer) add_subdirectory(scripts) -add_subdirectory(capi) +if(WITH_C_API) + add_subdirectory(capi) +endif() configure_file(${CMAKE_CURRENT_SOURCE_DIR}/setup.py.in ${CMAKE_CURRENT_SOURCE_DIR}/setup.py) From 06b1a6a6805c43bd1415493c4d28c2f787fb7614 Mon Sep 17 00:00:00 2001 From: Yu Yang Date: Wed, 11 Jan 2017 11:39:07 +0800 Subject: [PATCH 08/43] Fix unittest --- paddle/capi/tests/CMakeLists.txt | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/paddle/capi/tests/CMakeLists.txt b/paddle/capi/tests/CMakeLists.txt index d81453982bfcf..d73f6b7733950 100644 --- a/paddle/capi/tests/CMakeLists.txt +++ b/paddle/capi/tests/CMakeLists.txt @@ -3,8 +3,12 @@ add_unittest(capi_test_mats test_Vector.cpp target_include_directories(capi_test_mats PUBLIC ${PADDLE_CAPI_INC_PATH}) target_link_libraries(capi_test_mats paddle_capi) -add_unittest(capi_test_gradientMachine test_GradientMachine.cpp) + +add_unittest_without_exec(capi_test_gradientMachine test_GradientMachine.cpp) target_include_directories(capi_test_gradientMachine PUBLIC ${PADDLE_CAPI_INC_PATH}) target_link_libraries(capi_test_gradientMachine paddle_capi) +add_test(NAME capi_test_gradientMachine + COMMAND ${PROJ_ROOT}/paddle/.set_python_path.sh -d ${PROJ_ROOT}/python ${CMAKE_CURRENT_BINARY_DIR}/capi_test_gradientMachine + WORKING_DIRECTORY ${PROJ_ROOT}/paddle/capi/tests) From 4fd6888eb1786acca863d8db5e0fdb8fbee15ada Mon Sep 17 00:00:00 2001 From: Yu Yang Date: Wed, 11 Jan 2017 13:11:05 +0800 Subject: [PATCH 09/43] C-API for model inference. --- paddle/capi/CMakeLists.txt | 48 ++++++++++++++++++++++++++++++++++---- 1 file changed, 44 insertions(+), 4 deletions(-) diff --git a/paddle/capi/CMakeLists.txt b/paddle/capi/CMakeLists.txt index 6eb1e9949a466..38277915716ed 100644 --- a/paddle/capi/CMakeLists.txt +++ b/paddle/capi/CMakeLists.txt @@ -4,21 +4,61 @@ else () set(PADDLE_FLOAT_TYPE float) endif() +# config.h used for C-API. It will store Paddle building configuration as a +# header. Make user just include PaddleCAPI.h then can get building +# configuration without explicitly set -DPADDLE_WITH_DOUBLE when building their +# libraries. configure_file(config.h.in config.h @ONLY) -set(CAPI_HEADER - PaddleCAPI.h) -set(CAPI_PRIVATE_HEADER - PaddleCAPIPrivate.h) +# PaddleCAPI.h is the only header we exposed. It currently only used for model +# inference. +set(CAPI_HEADER PaddleCAPI.h) + + +set(CAPI_PRIVATE_HEADER PaddleCAPIPrivate.h) file(GLOB CAPI_SOURCES *.cpp) +# building paddle_capi add_library(paddle_capi STATIC ${CAPI_SOURCES}) target_include_directories(paddle_capi PUBLIC ${CMAKE_CURRENT_BINARY_DIR}) + add_style_check_target(paddle_capi ${CAPI_SOURCES} ${CAPI_HEADER} ${CAPI_PRIVATE_HEADER}) + add_dependencies(paddle_capi gen_proto_cpp) + +# combine all paddle static libraries together, into libpaddle_capi_whole.a +# user should use PaddleCAPI as -lpaddle_capi_whole +set(capi_whole_library libpaddle_capi_whole.a) +add_custom_target(paddle_capi_whole + COMMAND mkdir -p o_files/capi && cd o_files/capi/ && ar -x $ + COMMAND mkdir -p o_files/utils && cd o_files/utils/ && ar -x $ + COMMAND mkdir -p o_files/parameter && cd o_files/parameter/ && ar -x $ + COMMAND mkdir -p o_files/math && cd o_files/math/ && ar -x $ + COMMAND mkdir -p o_files/cuda && cd o_files/cuda/ && ar -x $ + COMMAND mkdir -p o_files/function && cd o_files/function/ && ar -x $ + COMMAND mkdir -p o_files/pserver && cd o_files/pserver/ && ar -x $ + COMMAND mkdir -p o_files/gserver && cd o_files/gserver/ && ar -x $ + COMMAND mkdir -p o_files/proto && cd o_files/proto/ && ar -x $ + COMMAND ar crs ${capi_whole_library} `find ./o_files -name '*.o'` + COMMAND rm -rf o_files + WORKING_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR} + DEPENDS paddle_capi paddle_utils paddle_parameter paddle_math + paddle_cuda paddle_function paddle_pserver paddle_gserver + paddle_proto + ) +set_target_properties(paddle_capi_whole + PROPERTIES IMPORTED_LOCATION ${CMAKE_CURRENT_BINARY_DIR}/${capi_whole_library}) + +# install library & headers. +install(FILES ${CMAKE_CURRENT_BINARY_DIR}/${capi_whole_library} DESTINATION lib) +install(FILES ${CAPI_HEADER} DESTINATION include/paddle) +install(FILES ${CMAKE_CURRENT_BINARY_DIR}/config.h DESTINATION include/paddle) + + +# this variable used for unittest set(PADDLE_CAPI_INC_PATH ${CMAKE_CURRENT_BINARY_DIR} ${CMAKE_CURRENT_SOURCE_DIR}) From 005ac1f8496f4c17287b32580778dd94a98324e5 Mon Sep 17 00:00:00 2001 From: Yu Yang Date: Wed, 11 Jan 2017 14:12:35 +0800 Subject: [PATCH 10/43] Add warning message --- CMakeLists.txt | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index 2048796ef3e06..c80c5c03c618d 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -43,7 +43,7 @@ option(WITH_DOC "Compile PaddlePaddle with documentation" OFF) option(ON_COVERALLS "Compile PaddlePaddle with code coverage" OFF) option(COVERALLS_UPLOAD "Package code coverage data to coveralls" OFF) option(ON_TRAVIS "Exclude special unit test on Travis CI" OFF) -option(WITH_C_API "Compile PaddlePaddle with C-API(Prediction)" ON) +option(WITH_C_API "Compile PaddlePaddle with C-API(Prediction)" OFF) # CMAKE_BUILD_TYPE if(NOT CMAKE_BUILD_TYPE) @@ -55,6 +55,12 @@ endif() set(THIRD_PARTY_PATH "${PROJ_ROOT}/third_party" CACHE STRING "A path setting third party libraries download & build directories.") +if (WITH_C_API AND WITH_PYTHON) + message(WARNING "It is suggest not embedded a python interpreter in Paddle " + "when using C-API. It will give an unpredictable behavior when using a " + "different Python interpreter from compiling.") +endif() + ######################################################################################## include(external/zlib) # download, build, install zlib From 3bc0d8b2c1892442be03ef4df490870fb9168898 Mon Sep 17 00:00:00 2001 From: Yu Yang Date: Wed, 11 Jan 2017 14:13:48 +0800 Subject: [PATCH 11/43] Revert unchanged files --- paddle/api/paddle_ld_flags.py | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/paddle/api/paddle_ld_flags.py b/paddle/api/paddle_ld_flags.py index 59b0657bd0e39..ad5dce209bf8e 100644 --- a/paddle/api/paddle_ld_flags.py +++ b/paddle/api/paddle_ld_flags.py @@ -50,6 +50,7 @@ def __init__(self): self.glog_libs = GLOG_LIBRARIES self.with_coverage = PaddleLDFlag.cmake_bool(WITH_COVERALLS) + self.gflags_libs = GFLAGS_LIBRARIES self.gflags_location = GFLAGS_LOCATION self.cblas_libs = CBLAS_LIBRARIES self.curt = CUDA_LIBRARIES @@ -87,7 +88,7 @@ def libs_str(self): "-lpaddle_api", self.normalize_flag(self.protolib), self.normalize_flag(self.glog_libs), - self.normalize_flag("gflags"), + self.normalize_flag(self.gflags_libs), self.normalize_flag(self.zlib), self.normalize_flag(self.thread), self.normalize_flag(self.dl_libs), @@ -113,7 +114,10 @@ def normalize_flag(self, cmake_flag): return cmake_flag elif cmake_flag.startswith("-l"): # normal link command return cmake_flag - elif cmake_flag == "gflags": # special for gflags + elif cmake_flag in [ + "gflags-shared", "gflags-static", "gflags_nothreads-shared", + "gflags_nothreads-static" + ]: # special for gflags assert PaddleLDFlag.cmake_bool(self.gflags_location) return self.gflags_location elif len(cmake_flag) != 0: From 987a908f294f9c96c8d7c1def850cd3945e39c65 Mon Sep 17 00:00:00 2001 From: Yu Yang Date: Thu, 12 Jan 2017 14:27:24 +0800 Subject: [PATCH 12/43] Fix a bug, should be ALL in custom_target --- paddle/capi/CMakeLists.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/paddle/capi/CMakeLists.txt b/paddle/capi/CMakeLists.txt index 38277915716ed..172be7b1224a7 100644 --- a/paddle/capi/CMakeLists.txt +++ b/paddle/capi/CMakeLists.txt @@ -32,7 +32,7 @@ add_dependencies(paddle_capi gen_proto_cpp) # combine all paddle static libraries together, into libpaddle_capi_whole.a # user should use PaddleCAPI as -lpaddle_capi_whole set(capi_whole_library libpaddle_capi_whole.a) -add_custom_target(paddle_capi_whole +add_custom_target(paddle_capi_whole ALL COMMAND mkdir -p o_files/capi && cd o_files/capi/ && ar -x $ COMMAND mkdir -p o_files/utils && cd o_files/utils/ && ar -x $ COMMAND mkdir -p o_files/parameter && cd o_files/parameter/ && ar -x $ From 3b5bed68d0fec8e95eb5a706fd17e80662f27835 Mon Sep 17 00:00:00 2001 From: Yu Yang Date: Thu, 12 Jan 2017 15:20:57 +0800 Subject: [PATCH 13/43] Add dump binary config --- python/paddle/utils/dump_config.py | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/python/paddle/utils/dump_config.py b/python/paddle/utils/dump_config.py index 73bf349c46726..d1d54b6a2951a 100644 --- a/python/paddle/utils/dump_config.py +++ b/python/paddle/utils/dump_config.py @@ -20,6 +20,7 @@ if __name__ == '__main__': whole_conf = False + binary = False if len(sys.argv) == 2: conf = parse_config(sys.argv[1], '') elif len(sys.argv) == 3: @@ -28,6 +29,8 @@ conf = parse_config(sys.argv[1], sys.argv[2]) if sys.argv[3] == '--whole': whole_conf = True + elif sys.argv[3] == '--binary': + binary = True else: raise RuntimeError() @@ -36,4 +39,7 @@ if whole_conf: print conf else: - print conf.model_config + if binary: + sys.stdout.write(conf.SerializeToString()) + else: + print conf.model_config From 0874a7e866fa15676e60eb13305bffefbbae6fa7 Mon Sep 17 00:00:00 2001 From: Yu Yang Date: Thu, 12 Jan 2017 16:48:24 +0800 Subject: [PATCH 14/43] Fix typo in API.h --- paddle/capi/CMakeLists.txt | 9 ++++++--- paddle/capi/PaddleCAPI.h | 14 +++++++------- paddle/capi/tests/test_GradientMachine.cpp | 4 ++-- python/paddle/utils/dump_config.py | 3 ++- 4 files changed, 17 insertions(+), 13 deletions(-) diff --git a/paddle/capi/CMakeLists.txt b/paddle/capi/CMakeLists.txt index 172be7b1224a7..b46fed3a3b47d 100644 --- a/paddle/capi/CMakeLists.txt +++ b/paddle/capi/CMakeLists.txt @@ -39,15 +39,18 @@ add_custom_target(paddle_capi_whole ALL COMMAND mkdir -p o_files/math && cd o_files/math/ && ar -x $ COMMAND mkdir -p o_files/cuda && cd o_files/cuda/ && ar -x $ COMMAND mkdir -p o_files/function && cd o_files/function/ && ar -x $ - COMMAND mkdir -p o_files/pserver && cd o_files/pserver/ && ar -x $ COMMAND mkdir -p o_files/gserver && cd o_files/gserver/ && ar -x $ COMMAND mkdir -p o_files/proto && cd o_files/proto/ && ar -x $ + COMMAND mkdir -p o_files/network && cd o_files/network/ && ar -x + +$ + + COMMAND mkdir -p o_files/pserver && cd o_files/pserver/ && ar -x + +$ Date: Thu, 12 Jan 2017 17:05:39 +0800 Subject: [PATCH 15/43] Add comments. --- paddle/capi/PaddleCAPI.h | 26 ++++++++++++++++++++++++++ 1 file changed, 26 insertions(+) diff --git a/paddle/capi/PaddleCAPI.h b/paddle/capi/PaddleCAPI.h index b88254c93030c..f2340b8a750af 100644 --- a/paddle/capi/PaddleCAPI.h +++ b/paddle/capi/PaddleCAPI.h @@ -21,6 +21,16 @@ limitations under the License. */ extern "C" { #endif +/** + * Paddle C API. It will replace SWIG as Multiple Language API for model + * training & inference. Currently it is only used in model infernece. + * + * NOTE: This is an experimental API, it could be changed. + */ + +/** + * Error Type for Paddle API. + */ typedef enum { kPD_NO_ERROR = 0, kPD_NULLPTR = 1, @@ -29,6 +39,9 @@ typedef enum { kPD_UNDEFINED_ERROR = -1, } PD_Error; +/** + * Int Vector Functions. Return will be a PD_Error type. + */ typedef void* PD_IVector; int PDIVecCreateNone(PD_IVector* ivec); @@ -37,6 +50,9 @@ int PDIVecDestroy(PD_IVector ivec); int PDIVectorGet(PD_IVector ivec, int** buffer); +/** + * Matrix functions. Return will be a PD_Error type. + */ typedef void* PD_Matrix; int PDMatCreate(PD_Matrix* mat, uint64_t height, uint64_t width, bool useGpu); @@ -51,6 +67,10 @@ int PDMatCreateNone(PD_Matrix* mat); int PDMatGetShape(PD_Matrix mat, uint64_t* height, uint64_t* width); +/** + * Arguments functions. Each argument means layer output. Arguments means a + * array of arguemnt. + */ typedef void* PD_Arguments; int PDArgsCreateNone(PD_Arguments* args); @@ -67,6 +87,9 @@ int PDArgsGetValue(PD_Arguments args, uint64_t ID, PD_Matrix mat); int PDArgsGetIds(PD_Arguments args, uint64_t ID, PD_IVector ids); +/** + * @brief GradientMachine means a neural network. + */ typedef void* PD_GradientMachine; int PDGradientMachineCreateForPredict(PD_GradientMachine* machine, @@ -88,6 +111,9 @@ int PDGradientMachineCreateSharedParam(PD_GradientMachine origin, int PDGradientMachineDestroy(PD_GradientMachine machine); +/** + * Initialize Paddle. + */ int PDInit(int argc, char** argv); #ifdef __cplusplus From 64022143d8855a5303133e478f21f9c1106291f0 Mon Sep 17 00:00:00 2001 From: Yu Yang Date: Fri, 13 Jan 2017 10:04:19 +0800 Subject: [PATCH 16/43] Fix unittest --- .../tests/CMakeLists.txt | 20 ++++++------------- .../tests/configs/run_tests.sh | 14 +++++++------ python/paddle/utils/dump_config.py | 1 - 3 files changed, 14 insertions(+), 21 deletions(-) diff --git a/python/paddle/trainer_config_helpers/tests/CMakeLists.txt b/python/paddle/trainer_config_helpers/tests/CMakeLists.txt index 403aafabe9143..6c860fd49702e 100644 --- a/python/paddle/trainer_config_helpers/tests/CMakeLists.txt +++ b/python/paddle/trainer_config_helpers/tests/CMakeLists.txt @@ -9,17 +9,9 @@ add_test(NAME test_reset_hook ${PYTHON_EXECUTABLE} ${PROJ_ROOT}/python/paddle/trainer_config_helpers/tests/test_reset_hook.py WORKING_DIRECTORY ${PROJ_ROOT}/python/paddle) -if (PROTOBUF_3) - add_paddle_exe(protobuf_equal - ProtobufEqualMain.cpp) - add_test(NAME test_layerHelpers - COMMAND - ${PROJ_ROOT}/python/paddle/trainer_config_helpers/tests/configs/run_tests.sh ${PYTHON_EXECUTABLE} - ${CMAKE_CURRENT_BINARY_DIR}/protobuf_equal - ) -else() - add_test(NAME test_layerHelpers - COMMAND - ${PROJ_ROOT}/python/paddle/trainer_config_helpers/tests/configs/run_tests.sh ${PYTHON_EXECUTABLE} - ) -endif() +add_paddle_exe(protobuf_equal ProtobufEqualMain.cpp) +add_test(NAME test_layerHelpers + COMMAND + ${PROJ_ROOT}/python/paddle/trainer_config_helpers/tests/configs/run_tests.sh ${PYTHON_EXECUTABLE} + ${CMAKE_CURRENT_BINARY_DIR}/protobuf_equal +) diff --git a/python/paddle/trainer_config_helpers/tests/configs/run_tests.sh b/python/paddle/trainer_config_helpers/tests/configs/run_tests.sh index a37eb6439e6d2..c8a3b190b1914 100755 --- a/python/paddle/trainer_config_helpers/tests/configs/run_tests.sh +++ b/python/paddle/trainer_config_helpers/tests/configs/run_tests.sh @@ -2,16 +2,18 @@ cd `dirname $0` set -e +PYTHON_EXEC=$1 +COMPARE_PROTO_UTIL=$2 protostr=`dirname $0`/protostr files=`ls $protostr | grep -v "unittest"` -./generate_protostr.sh $1 +./generate_protostr.sh ${PYTHON_EXEC} . ./file_list.sh -if [ -z $1 ]; then +if [ -z ${COMPARE_PROTO_UTIL} ]; then for file in $files do base_protostr=$protostr/$file @@ -22,20 +24,20 @@ if [ -z $1 ]; then else for file in ${configs[*]} do - if ! $1 $protostr/$file.protostr $protostr/$file.protostr.unittest; then + if ! ${COMPARE_PROTO_UTIL} $protostr/$file.protostr $protostr/$file.protostr.unittest; then diff $protostr/$file.protostr $protostr/$file.protostr.unittest -u fi - if ! $1 $protostr/$file.protostr $protostr/$file.protostr.non_file_config.unittest; then + if ! ${COMPARE_PROTO_UTIL} $protostr/$file.protostr $protostr/$file.protostr.non_file_config.unittest; then diff $protostr/$file.protostr $protostr/$file.protostr.non_file_config.unittest -u fi done for file in ${whole_configs[*]} do - if ! $1 $protostr/$file.protostr $protostr/$file.protostr.unittest --whole; then + if ! ${COMPARE_PROTO_UTIL} $protostr/$file.protostr $protostr/$file.protostr.unittest --whole; then diff $protostr/$file.protostr $protostr/$file.protostr.unittest -u fi - if ! $1 $protostr/$file.protostr $protostr/$file.protostr.non_file_config.unittest --whole; then + if ! ${COMPARE_PROTO_UTIL} $protostr/$file.protostr $protostr/$file.protostr.non_file_config.unittest --whole; then diff $protostr/$file.protostr $protostr/$file.protostr.non_file_config.unittest -u fi done diff --git a/python/paddle/utils/dump_config.py b/python/paddle/utils/dump_config.py index 2ce83bc8a8034..d27af7f76246a 100644 --- a/python/paddle/utils/dump_config.py +++ b/python/paddle/utils/dump_config.py @@ -21,7 +21,6 @@ if __name__ == '__main__': whole_conf = False binary = False - print sys.argv if len(sys.argv) == 2: conf = parse_config(sys.argv[1], '') elif len(sys.argv) == 3: From 30a6f9b39ae6c1c3238001b07faec2c3db6494ec Mon Sep 17 00:00:00 2001 From: Yu Yang Date: Thu, 19 Jan 2017 10:50:25 +0800 Subject: [PATCH 17/43] Start doing shared c_api library --- paddle/capi/CMakeLists.txt | 4 ++++ paddle/capi/GradientMachine.cpp | 12 ++++++------ 2 files changed, 10 insertions(+), 6 deletions(-) diff --git a/paddle/capi/CMakeLists.txt b/paddle/capi/CMakeLists.txt index b46fed3a3b47d..b5bf08dc24529 100644 --- a/paddle/capi/CMakeLists.txt +++ b/paddle/capi/CMakeLists.txt @@ -55,6 +55,10 @@ add_custom_target(paddle_capi_whole ALL set_target_properties(paddle_capi_whole PROPERTIES IMPORTED_LOCATION ${CMAKE_CURRENT_BINARY_DIR}/${capi_whole_library}) +add_library(paddle_capi_shared SHARED ${CAPI_SOURCES}) +target_include_directories(paddle_capi_shared PUBLIC ${CMAKE_CURRENT_BINARY_DIR}) +link_paddle_exe(paddle_capi_shared) + # install library & headers. install(FILES ${CMAKE_CURRENT_BINARY_DIR}/${capi_whole_library} DESTINATION lib) install(FILES ${CAPI_HEADER} DESTINATION include/paddle) diff --git a/paddle/capi/GradientMachine.cpp b/paddle/capi/GradientMachine.cpp index 8299e6442f667..de3a339fa62a8 100644 --- a/paddle/capi/GradientMachine.cpp +++ b/paddle/capi/GradientMachine.cpp @@ -38,7 +38,7 @@ NeuralNetwork* newCustomNerualNetwork(const std::string& name, } // namespace paddle extern "C" { -int PDGradientMachineCreateForPredict(PD_GradiemtMachine* machine, +int PDGradientMachineCreateForPredict(PD_GradientMachine* machine, void* modelConfigProtobuf, int size) { if (modelConfigProtobuf == nullptr) return kPD_NULLPTR; @@ -55,12 +55,12 @@ int PDGradientMachineCreateForPredict(PD_GradiemtMachine* machine, return kPD_NO_ERROR; } -int PDGradientMachineDestroy(PD_GradiemtMachine machine) { +int PDGradientMachineDestroy(PD_GradientMachine machine) { delete cast(machine); return kPD_NO_ERROR; } -int PDGradientMachineLoadParameterFromDisk(PD_GradiemtMachine machine, +int PDGradientMachineLoadParameterFromDisk(PD_GradientMachine machine, const char* path) { auto m = cast(machine); if (m == nullptr || path == nullptr || m->machine == nullptr) @@ -69,7 +69,7 @@ int PDGradientMachineLoadParameterFromDisk(PD_GradiemtMachine machine, return kPD_NO_ERROR; } -int PDGradientMachineForward(PD_GradiemtMachine machine, +int PDGradientMachineForward(PD_GradientMachine machine, PD_Arguments inArgs, PD_Arguments outArgs, bool isTrain) { @@ -83,10 +83,10 @@ int PDGradientMachineForward(PD_GradiemtMachine machine, return kPD_NO_ERROR; } -int PDGradientMachineCreateSharedParam(PD_GradiemtMachine origin, +int PDGradientMachineCreateSharedParam(PD_GradientMachine origin, void* modelConfigProtobuf, int size, - PD_GradiemtMachine* slave) { + PD_GradientMachine* slave) { auto o = cast(origin); if (origin == nullptr || slave == nullptr || o->machine == nullptr) { return kPD_NULLPTR; From 510ccfef1cdb0ad32e3f622dbeec280c4928aa26 Mon Sep 17 00:00:00 2001 From: Yu Yang Date: Thu, 19 Jan 2017 11:12:00 +0800 Subject: [PATCH 18/43] Make Paddle exports the symbols --- cmake/flags.cmake | 1 + paddle/capi/PaddleCAPI.h | 70 ++++++++++++++++++++++------------------ 2 files changed, 40 insertions(+), 31 deletions(-) diff --git a/cmake/flags.cmake b/cmake/flags.cmake index b76852fc6c50e..137a27c8fe87a 100644 --- a/cmake/flags.cmake +++ b/cmake/flags.cmake @@ -185,3 +185,4 @@ if(CUDA_ARCH) endif() set(CUDA_NVCC_FLAGS ${__arch_flags} ${CUDA_NVCC_FLAGS}) + diff --git a/paddle/capi/PaddleCAPI.h b/paddle/capi/PaddleCAPI.h index f2340b8a750af..6ca413117886e 100644 --- a/paddle/capi/PaddleCAPI.h +++ b/paddle/capi/PaddleCAPI.h @@ -17,6 +17,11 @@ limitations under the License. */ #include #include #include "config.h" + +// Since we only support linux and macos in compile, always use clang or +// gcc 4.8+. DLL_IMPORT/DLL_EXPORT is as simple as below. +#define PD_API __attribute__((visibility("default"))) + #ifdef __cplusplus extern "C" { #endif @@ -44,28 +49,31 @@ typedef enum { */ typedef void* PD_IVector; -int PDIVecCreateNone(PD_IVector* ivec); +PD_API int PDIVecCreateNone(PD_IVector* ivec); -int PDIVecDestroy(PD_IVector ivec); +PD_API int PDIVecDestroy(PD_IVector ivec); -int PDIVectorGet(PD_IVector ivec, int** buffer); +PD_API int PDIVectorGet(PD_IVector ivec, int** buffer); /** * Matrix functions. Return will be a PD_Error type. */ typedef void* PD_Matrix; -int PDMatCreate(PD_Matrix* mat, uint64_t height, uint64_t width, bool useGpu); +PD_API int PDMatCreate(PD_Matrix* mat, + uint64_t height, + uint64_t width, + bool useGpu); -int PDMatDestroy(PD_Matrix mat); +PD_API int PDMatDestroy(PD_Matrix mat); -int PDMatCopyToRow(PD_Matrix mat, uint64_t rowID, pd_real* rowArray); +PD_API int PDMatCopyToRow(PD_Matrix mat, uint64_t rowID, pd_real* rowArray); -int PDMatGetRow(PD_Matrix mat, uint64_t rowID, pd_real** rawRowBuffer); +PD_API int PDMatGetRow(PD_Matrix mat, uint64_t rowID, pd_real** rawRowBuffer); -int PDMatCreateNone(PD_Matrix* mat); +PD_API int PDMatCreateNone(PD_Matrix* mat); -int PDMatGetShape(PD_Matrix mat, uint64_t* height, uint64_t* width); +PD_API int PDMatGetShape(PD_Matrix mat, uint64_t* height, uint64_t* width); /** * Arguments functions. Each argument means layer output. Arguments means a @@ -73,48 +81,48 @@ int PDMatGetShape(PD_Matrix mat, uint64_t* height, uint64_t* width); */ typedef void* PD_Arguments; -int PDArgsCreateNone(PD_Arguments* args); +PD_API int PDArgsCreateNone(PD_Arguments* args); -int PDArgsDestroy(PD_Arguments args); +PD_API int PDArgsDestroy(PD_Arguments args); -int PDArgsGetSize(PD_Arguments args, uint64_t* size); +PD_API int PDArgsGetSize(PD_Arguments args, uint64_t* size); -int PDArgsResize(PD_Arguments args, uint64_t size); +PD_API int PDArgsResize(PD_Arguments args, uint64_t size); -int PDArgsSetValue(PD_Arguments args, uint64_t ID, PD_Matrix mat); +PD_API int PDArgsSetValue(PD_Arguments args, uint64_t ID, PD_Matrix mat); -int PDArgsGetValue(PD_Arguments args, uint64_t ID, PD_Matrix mat); +PD_API int PDArgsGetValue(PD_Arguments args, uint64_t ID, PD_Matrix mat); -int PDArgsGetIds(PD_Arguments args, uint64_t ID, PD_IVector ids); +PD_API int PDArgsGetIds(PD_Arguments args, uint64_t ID, PD_IVector ids); /** * @brief GradientMachine means a neural network. */ typedef void* PD_GradientMachine; -int PDGradientMachineCreateForPredict(PD_GradientMachine* machine, - void* modelConfigProtobuf, - int size); +PD_API int PDGradientMachineCreateForPredict(PD_GradientMachine* machine, + void* modelConfigProtobuf, + int size); -int PDGradientMachineLoadParameterFromDisk(PD_GradientMachine machine, - const char* path); +PD_API int PDGradientMachineLoadParameterFromDisk(PD_GradientMachine machine, + const char* path); -int PDGradientMachineForward(PD_GradientMachine machine, - PD_Arguments inArgs, - PD_Arguments outArgs, - bool isTrain); +PD_API int PDGradientMachineForward(PD_GradientMachine machine, + PD_Arguments inArgs, + PD_Arguments outArgs, + bool isTrain); -int PDGradientMachineCreateSharedParam(PD_GradientMachine origin, - void* modelConfigProtobuf, - int size, - PD_GradientMachine* slave); +PD_API int PDGradientMachineCreateSharedParam(PD_GradientMachine origin, + void* modelConfigProtobuf, + int size, + PD_GradientMachine* slave); -int PDGradientMachineDestroy(PD_GradientMachine machine); +PD_API int PDGradientMachineDestroy(PD_GradientMachine machine); /** * Initialize Paddle. */ -int PDInit(int argc, char** argv); +PD_API int PDInit(int argc, char** argv); #ifdef __cplusplus } From 8a1e32d812b9cfd5a33756822c4d0f3051db8672 Mon Sep 17 00:00:00 2001 From: Yu Yang Date: Tue, 7 Mar 2017 15:42:54 +0800 Subject: [PATCH 19/43] Fix compile error. --- paddle/capi/CMakeLists.txt | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/paddle/capi/CMakeLists.txt b/paddle/capi/CMakeLists.txt index b5bf08dc24529..f5827317b0d6a 100644 --- a/paddle/capi/CMakeLists.txt +++ b/paddle/capi/CMakeLists.txt @@ -41,10 +41,8 @@ add_custom_target(paddle_capi_whole ALL COMMAND mkdir -p o_files/function && cd o_files/function/ && ar -x $ COMMAND mkdir -p o_files/gserver && cd o_files/gserver/ && ar -x $ COMMAND mkdir -p o_files/proto && cd o_files/proto/ && ar -x $ - COMMAND mkdir -p o_files/network && cd o_files/network/ && ar -x - +$ - + COMMAND mkdir -p o_files/pserver && cd o_files/pserver/ && ar -x - +$ + COMMAND mkdir -p o_files/pserver && cd o_files/pserver/ && ar -x $ COMMAND ar crs ${capi_whole_library} `find ./o_files -name '*.o'` COMMAND rm -rf o_files WORKING_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR} From c32ade74c2bfd5afbece2453f3bf8d967b00a63b Mon Sep 17 00:00:00 2001 From: Yu Yang Date: Tue, 7 Mar 2017 15:54:24 +0800 Subject: [PATCH 20/43] Add todo --- paddle/capi/PaddleCAPI.h | 4 ++++ paddle/capi/Vector.cpp | 10 ++++++++++ paddle/capi/tests/test_Vector.cpp | 4 ++++ 3 files changed, 18 insertions(+) diff --git a/paddle/capi/PaddleCAPI.h b/paddle/capi/PaddleCAPI.h index 6ca413117886e..4819be3447f41 100644 --- a/paddle/capi/PaddleCAPI.h +++ b/paddle/capi/PaddleCAPI.h @@ -55,6 +55,10 @@ PD_API int PDIVecDestroy(PD_IVector ivec); PD_API int PDIVectorGet(PD_IVector ivec, int** buffer); +PD_API int PDIVectorResize(PD_IVector ivec, uint64_t size); + +PD_API int PDIVectorGetSize(PD_IVector ivec, uint64_t* size); + /** * Matrix functions. Return will be a PD_Error type. */ diff --git a/paddle/capi/Vector.cpp b/paddle/capi/Vector.cpp index 5b4fe0666cbba..eb3501fb5b8ff 100644 --- a/paddle/capi/Vector.cpp +++ b/paddle/capi/Vector.cpp @@ -39,4 +39,14 @@ int PDIVectorGet(PD_IVector ivec, int** buffer) { *buffer = v->vec->getData(); return kPD_NO_ERROR; } + +int PDIVectorResize(PD_IVector ivec, uint64_t size) { + // TODO(lizhao): Complete this method. + return 0; +} + +int PDIVectorGetSize(PD_IVector ivec, uint64_t* size) { + // TODO(lizhao): Complete this method. + return 0; +} } diff --git a/paddle/capi/tests/test_Vector.cpp b/paddle/capi/tests/test_Vector.cpp index 122f7df176d4c..2697e80b92c22 100644 --- a/paddle/capi/tests/test_Vector.cpp +++ b/paddle/capi/tests/test_Vector.cpp @@ -18,5 +18,9 @@ limitations under the License. */ TEST(CAPIVector, create) { PD_IVector vec; ASSERT_EQ(kPD_NO_ERROR, PDIVecCreateNone(&vec)); + ASSERT_EQ(kPD_NO_ERROR, PDIVectorResize(vec, 1000)); + uint64_t size; + ASSERT_EQ(kPD_NO_ERROR, PDIVectorGetSize(vec, &size)); + ASSERT_EQ(1000, size); ASSERT_EQ(kPD_NO_ERROR, PDIVecDestroy(vec)); } From 97c64254568b4c68ceb778c1721e36b43e2ad5d1 Mon Sep 17 00:00:00 2001 From: Yu Yang Date: Tue, 7 Mar 2017 16:16:46 +0800 Subject: [PATCH 21/43] Add some more interfaces --- paddle/capi/Arguments.cpp | 33 +++++++++++++++++++++++++++++++++ paddle/capi/PaddleCAPI.h | 30 ++++++++++++++++++++++++++++++ paddle/capi/Vector.cpp | 13 +++++++++---- 3 files changed, 72 insertions(+), 4 deletions(-) diff --git a/paddle/capi/Arguments.cpp b/paddle/capi/Arguments.cpp index 60bdea15ae76d..678a80ac3f4d5 100644 --- a/paddle/capi/Arguments.cpp +++ b/paddle/capi/Arguments.cpp @@ -72,4 +72,37 @@ int PDArgsGetIds(PD_Arguments args, uint64_t ID, PD_IVector ids) { iv->vec = a->args[ID].ids; return kPD_NO_ERROR; } + +int PDArgsSetIds(PD_Arguments args, uint64_t ID, PD_IVector ids) { + //! TODO(lizhao): Complete this method. + return kPD_UNDEFINED_ERROR; +} + +int PDArgsSetSequenceStartPos(PD_Arguments args, + uint64_t ID, + PD_IVector seqPos) { + //! TODO(lizhao): Complete this method. + return kPD_UNDEFINED_ERROR; +} + +int PDArgsSetSubSequenceStartPos(PD_Arguments args, + uint64_t ID, + PD_IVector subSeqPos) { + //! TODO(lizhao): Complete this method. + return kPD_UNDEFINED_ERROR; +} + +int PDArgsGetSequenceStartPos(PD_Arguments args, + uint64_t ID, + PD_IVector seqPos) { + //! TODO(lizhao): Complete this method. + return kPD_UNDEFINED_ERROR; +} + +int PDArgsGetSubSequenceStartPos(PD_Arguments args, + uint64_t ID, + PD_IVector subSeqPos) { + //! TODO(lizhao): Complete this method. + return kPD_UNDEFINED_ERROR; +} } diff --git a/paddle/capi/PaddleCAPI.h b/paddle/capi/PaddleCAPI.h index 4819be3447f41..6eea60ef74915 100644 --- a/paddle/capi/PaddleCAPI.h +++ b/paddle/capi/PaddleCAPI.h @@ -51,6 +51,19 @@ typedef void* PD_IVector; PD_API int PDIVecCreateNone(PD_IVector* ivec); +/** + * @brief PDIVectorCreate create a paddle int vector + * @param [out] ivec: output int vector. + * @param [in] array: input array. + * @param [in] size: input array size. + * @param [in] copy: memory copy or just use same memory. True if copy. + * @return PD_Error + */ +PD_API int PDIVectorCreate(PD_IVector* ivec, + int* array, + uint64_t size, + bool copy); + PD_API int PDIVecDestroy(PD_IVector ivec); PD_API int PDIVectorGet(PD_IVector ivec, int** buffer); @@ -99,6 +112,23 @@ PD_API int PDArgsGetValue(PD_Arguments args, uint64_t ID, PD_Matrix mat); PD_API int PDArgsGetIds(PD_Arguments args, uint64_t ID, PD_IVector ids); +PD_API int PDArgsSetIds(PD_Arguments args, uint64_t ID, PD_IVector ids); + +PD_API int PDArgsSetSequenceStartPos(PD_Arguments args, + uint64_t ID, + PD_IVector seqPos); + +PD_API int PDArgsGetSequenceStartPos(PD_Arguments args, + uint64_t ID, + PD_IVector seqPos); + +PD_API int PDArgsSetSubSequenceStartPos(PD_Arguments args, + uint64_t ID, + PD_IVector subSeqPos); + +PD_API int PDArgsGetSubSequenceStartPos(PD_Arguments args, + uint64_t ID, + PD_IVector subSeqPos); /** * @brief GradientMachine means a neural network. */ diff --git a/paddle/capi/Vector.cpp b/paddle/capi/Vector.cpp index eb3501fb5b8ff..a2e6f3507de3d 100644 --- a/paddle/capi/Vector.cpp +++ b/paddle/capi/Vector.cpp @@ -26,6 +26,11 @@ int PDIVecCreateNone(PD_IVector* ivec) { return kPD_NO_ERROR; } +int PDIVectorCreate(PD_IVector* ivec, int* array, uint64_t size, bool copy) { + //! TODO(lizhao): Complete this method. + return kPD_UNDEFINED_ERROR; +} + int PDIVecDestroy(PD_IVector ivec) { if (ivec == nullptr) return kPD_NULLPTR; delete cast(ivec); @@ -41,12 +46,12 @@ int PDIVectorGet(PD_IVector ivec, int** buffer) { } int PDIVectorResize(PD_IVector ivec, uint64_t size) { - // TODO(lizhao): Complete this method. - return 0; + //! TODO(lizhao): Complete this method. + return kPD_UNDEFINED_ERROR; } int PDIVectorGetSize(PD_IVector ivec, uint64_t* size) { - // TODO(lizhao): Complete this method. - return 0; + //! TODO(lizhao): Complete this method. + return kPD_UNDEFINED_ERROR; } } From 3519c63034e4eb86c72de218ef67e921b6cc38eb Mon Sep 17 00:00:00 2001 From: livc Date: Thu, 9 Mar 2017 13:51:50 +0800 Subject: [PATCH 22/43] complete some functions of c-api. --- paddle/capi/Arguments.cpp | 42 ++++++++++++++++++++++++---- paddle/capi/Vector.cpp | 23 +++++++++++++-- paddle/capi/tests/test_Arguments.cpp | 36 ++++++++++++++++++++++++ paddle/capi/tests/test_Vector.cpp | 11 ++++++-- 4 files changed, 102 insertions(+), 10 deletions(-) diff --git a/paddle/capi/Arguments.cpp b/paddle/capi/Arguments.cpp index 678a80ac3f4d5..baabd44cc0c63 100644 --- a/paddle/capi/Arguments.cpp +++ b/paddle/capi/Arguments.cpp @@ -75,34 +75,66 @@ int PDArgsGetIds(PD_Arguments args, uint64_t ID, PD_IVector ids) { int PDArgsSetIds(PD_Arguments args, uint64_t ID, PD_IVector ids) { //! TODO(lizhao): Complete this method. - return kPD_UNDEFINED_ERROR; + if (args == nullptr || ids == nullptr) return kPD_NULLPTR; + auto iv = paddle::capi::cast(ids); + if (iv->vec == nullptr) return kPD_NULLPTR; + auto a = castArg(args); + if (ID >= a->args.size()) return kPD_OUT_OF_RANGE; + a->args[ID].ids = iv->vec; + return kPD_NO_ERROR; } int PDArgsSetSequenceStartPos(PD_Arguments args, uint64_t ID, PD_IVector seqPos) { //! TODO(lizhao): Complete this method. - return kPD_UNDEFINED_ERROR; + if (args == nullptr || seqPos == nullptr) return kPD_NULLPTR; + auto iv = paddle::capi::cast(seqPos); + if (iv->vec == nullptr) return kPD_NULLPTR; + auto a = castArg(args); + if (ID >= a->args.size()) return kPD_OUT_OF_RANGE; + a->args[ID].sequenceStartPositions = + std::make_shared(iv->vec); + return kPD_NO_ERROR; } int PDArgsSetSubSequenceStartPos(PD_Arguments args, uint64_t ID, PD_IVector subSeqPos) { //! TODO(lizhao): Complete this method. - return kPD_UNDEFINED_ERROR; + if (args == nullptr || subSeqPos == nullptr) return kPD_NULLPTR; + auto iv = paddle::capi::cast(subSeqPos); + if (iv->vec == nullptr) return kPD_NULLPTR; + auto a = castArg(args); + if (ID >= a->args.size()) return kPD_OUT_OF_RANGE; + a->args[ID].sequenceStartPositions = + std::make_shared(iv->vec); + return kPD_NO_ERROR; } int PDArgsGetSequenceStartPos(PD_Arguments args, uint64_t ID, PD_IVector seqPos) { //! TODO(lizhao): Complete this method. - return kPD_UNDEFINED_ERROR; + if (args == nullptr || seqPos == nullptr) return kPD_NULLPTR; + auto iv = castIVec(seqPos); + auto a = castArg(args); + if (ID >= a->args.size()) return kPD_OUT_OF_RANGE; + std::make_shared(iv->vec) = + a->args[ID].sequenceStartPositions; + return kPD_NO_ERROR; } int PDArgsGetSubSequenceStartPos(PD_Arguments args, uint64_t ID, PD_IVector subSeqPos) { //! TODO(lizhao): Complete this method. - return kPD_UNDEFINED_ERROR; + if (args == nullptr || subSeqPos == nullptr) return kPD_NULLPTR; + auto iv = castIVec(subSeqPos); + auto a = castArg(args); + if (ID >= a->args.size()) return kPD_OUT_OF_RANGE; + std::make_shared(iv->vec) = + a->args[ID].sequenceStartPositions; + return kPD_NO_ERROR; } } diff --git a/paddle/capi/Vector.cpp b/paddle/capi/Vector.cpp index a2e6f3507de3d..38a5fbc00a8be 100644 --- a/paddle/capi/Vector.cpp +++ b/paddle/capi/Vector.cpp @@ -28,7 +28,16 @@ int PDIVecCreateNone(PD_IVector* ivec) { int PDIVectorCreate(PD_IVector* ivec, int* array, uint64_t size, bool copy) { //! TODO(lizhao): Complete this method. - return kPD_UNDEFINED_ERROR; + if (ivec == nullptr) return kPD_NULLPTR; + auto ptr = new paddle::capi::CIVector(); + if (copy) { + ptr->vec = paddle::IVector::create(size, false); + ptr->vec->copyFrom(array, size); + } else { + ptr->vec = paddle::IVector::create(array, size, false); + } + *ivec = ptr; + return kPD_NO_ERROR; } int PDIVecDestroy(PD_IVector ivec) { @@ -47,11 +56,19 @@ int PDIVectorGet(PD_IVector ivec, int** buffer) { int PDIVectorResize(PD_IVector ivec, uint64_t size) { //! TODO(lizhao): Complete this method. - return kPD_UNDEFINED_ERROR; + if (ivec == nullptr) return kPD_NULLPTR; + auto v = cast(ivec); + if (v->vec == nullptr) return kPD_NULLPTR; + v->vec->resize(size); + return kPD_NO_ERROR; } int PDIVectorGetSize(PD_IVector ivec, uint64_t* size) { //! TODO(lizhao): Complete this method. - return kPD_UNDEFINED_ERROR; + if (ivec == nullptr) return kPD_NULLPTR; + auto v = cast(ivec); + if (v->vec == nullptr) return kPD_NULLPTR; + *size = v->vec->getSize(); + return kPD_NO_ERROR; } } diff --git a/paddle/capi/tests/test_Arguments.cpp b/paddle/capi/tests/test_Arguments.cpp index 4a18ffbf47413..1186d2921ba46 100644 --- a/paddle/capi/tests/test_Arguments.cpp +++ b/paddle/capi/tests/test_Arguments.cpp @@ -70,3 +70,39 @@ TEST(CAPIArguments, value) { ASSERT_EQ(kPD_NO_ERROR, PDMatDestroy(mat)); ASSERT_EQ(kPD_NO_ERROR, PDArgsDestroy(args)); } + +TEST(CAPIArguments, ids) { + PD_Arguments args; + ASSERT_EQ(kPD_NO_ERROR, PDArgsCreateNone(&args)); + ASSERT_EQ(kPD_NO_ERROR, PDArgsResize(args, 1)); + + PD_IVector ivec; + int array[3] = {1, 2, 3}; + ASSERT_EQ(kPD_NO_ERROR, PDIVectorCreate(&ivec, array, 3, true)); + ASSERT_EQ(kPD_NO_ERROR, PDArgsSetIds(args, 0, ivec)); + + PD_IVector val; + ASSERT_EQ(kPD_NO_ERROR, PDIVecCreateNone(&val)); + ASSERT_EQ(kPD_NO_ERROR, PDArgsGetIds(args, 0, val)); + ASSERT_EQ(kPD_NO_ERROR, PDIVecDestroy(ivec)); + ASSERT_EQ(kPD_NO_ERROR, PDIVecDestroy(val)); + ASSERT_EQ(kPD_NO_ERROR, PDArgsDestroy(args)); +} + +TEST(CAPIArguments, Sequence) { + PD_Arguments args; + ASSERT_EQ(kPD_NO_ERROR, PDArgsCreateNone(&args)); + ASSERT_EQ(kPD_NO_ERROR, PDArgsResize(args, 1)); + + PD_IVector ivec; + int array[3] = {1, 2, 3}; + ASSERT_EQ(kPD_NO_ERROR, PDIVectorCreate(&ivec, array, 3, true)); + ASSERT_EQ(kPD_NO_ERROR, PDArgsSetSequenceStartPos(args, 0, ivec)); + + PD_IVector val; + ASSERT_EQ(kPD_NO_ERROR, PDIVecCreateNone(&val)); + ASSERT_EQ(kPD_NO_ERROR, PDArgsGetSequenceStartPos(args, 0, val)); + ASSERT_EQ(kPD_NO_ERROR, PDIVecDestroy(ivec)); + ASSERT_EQ(kPD_NO_ERROR, PDIVecDestroy(val)); + ASSERT_EQ(kPD_NO_ERROR, PDArgsDestroy(args)); +} diff --git a/paddle/capi/tests/test_Vector.cpp b/paddle/capi/tests/test_Vector.cpp index 2697e80b92c22..547d0ef20d168 100644 --- a/paddle/capi/tests/test_Vector.cpp +++ b/paddle/capi/tests/test_Vector.cpp @@ -17,10 +17,17 @@ limitations under the License. */ TEST(CAPIVector, create) { PD_IVector vec; - ASSERT_EQ(kPD_NO_ERROR, PDIVecCreateNone(&vec)); + int array[3] = {1, 2, 3}; + ASSERT_EQ(kPD_NO_ERROR, PDIVectorCreate(&vec, array, 3, true)); + ASSERT_EQ(kPD_NO_ERROR, PDIVectorCreate(&vec, array, 3, false)); ASSERT_EQ(kPD_NO_ERROR, PDIVectorResize(vec, 1000)); uint64_t size; ASSERT_EQ(kPD_NO_ERROR, PDIVectorGetSize(vec, &size)); - ASSERT_EQ(1000, size); + ASSERT_EQ(kPD_NO_ERROR, PDIVecDestroy(vec)); +} + +TEST(CAPIVector, createNone) { + PD_IVector vec; + ASSERT_EQ(kPD_NO_ERROR, PDIVecCreateNone(&vec)); ASSERT_EQ(kPD_NO_ERROR, PDIVecDestroy(vec)); } From 5a9987a317eef6898f3e0386331bc34174cb0a53 Mon Sep 17 00:00:00 2001 From: Yu Yang Date: Thu, 9 Mar 2017 15:30:25 +0800 Subject: [PATCH 23/43] Fix bugs in lizhao's code --- paddle/capi/Arguments.cpp | 14 +++++--------- paddle/capi/Vector.cpp | 3 --- paddle/capi/tests/test_Arguments.cpp | 22 +++++++++++++++++++--- 3 files changed, 24 insertions(+), 15 deletions(-) diff --git a/paddle/capi/Arguments.cpp b/paddle/capi/Arguments.cpp index baabd44cc0c63..8d00bda3cb90c 100644 --- a/paddle/capi/Arguments.cpp +++ b/paddle/capi/Arguments.cpp @@ -87,7 +87,6 @@ int PDArgsSetIds(PD_Arguments args, uint64_t ID, PD_IVector ids) { int PDArgsSetSequenceStartPos(PD_Arguments args, uint64_t ID, PD_IVector seqPos) { - //! TODO(lizhao): Complete this method. if (args == nullptr || seqPos == nullptr) return kPD_NULLPTR; auto iv = paddle::capi::cast(seqPos); if (iv->vec == nullptr) return kPD_NULLPTR; @@ -101,13 +100,12 @@ int PDArgsSetSequenceStartPos(PD_Arguments args, int PDArgsSetSubSequenceStartPos(PD_Arguments args, uint64_t ID, PD_IVector subSeqPos) { - //! TODO(lizhao): Complete this method. if (args == nullptr || subSeqPos == nullptr) return kPD_NULLPTR; auto iv = paddle::capi::cast(subSeqPos); if (iv->vec == nullptr) return kPD_NULLPTR; auto a = castArg(args); if (ID >= a->args.size()) return kPD_OUT_OF_RANGE; - a->args[ID].sequenceStartPositions = + a->args[ID].subSequenceStartPositions = std::make_shared(iv->vec); return kPD_NO_ERROR; } @@ -115,26 +113,24 @@ int PDArgsSetSubSequenceStartPos(PD_Arguments args, int PDArgsGetSequenceStartPos(PD_Arguments args, uint64_t ID, PD_IVector seqPos) { - //! TODO(lizhao): Complete this method. if (args == nullptr || seqPos == nullptr) return kPD_NULLPTR; auto iv = castIVec(seqPos); auto a = castArg(args); if (ID >= a->args.size()) return kPD_OUT_OF_RANGE; - std::make_shared(iv->vec) = - a->args[ID].sequenceStartPositions; + paddle::Argument& arg = a->args[ID]; + iv->vec = arg.sequenceStartPositions->getMutableVector(false); return kPD_NO_ERROR; } int PDArgsGetSubSequenceStartPos(PD_Arguments args, uint64_t ID, PD_IVector subSeqPos) { - //! TODO(lizhao): Complete this method. if (args == nullptr || subSeqPos == nullptr) return kPD_NULLPTR; auto iv = castIVec(subSeqPos); auto a = castArg(args); if (ID >= a->args.size()) return kPD_OUT_OF_RANGE; - std::make_shared(iv->vec) = - a->args[ID].sequenceStartPositions; + paddle::Argument& arg = a->args[ID]; + iv->vec = arg.subSequenceStartPositions->getMutableVector(false); return kPD_NO_ERROR; } } diff --git a/paddle/capi/Vector.cpp b/paddle/capi/Vector.cpp index 38a5fbc00a8be..af2192551370f 100644 --- a/paddle/capi/Vector.cpp +++ b/paddle/capi/Vector.cpp @@ -27,7 +27,6 @@ int PDIVecCreateNone(PD_IVector* ivec) { } int PDIVectorCreate(PD_IVector* ivec, int* array, uint64_t size, bool copy) { - //! TODO(lizhao): Complete this method. if (ivec == nullptr) return kPD_NULLPTR; auto ptr = new paddle::capi::CIVector(); if (copy) { @@ -55,7 +54,6 @@ int PDIVectorGet(PD_IVector ivec, int** buffer) { } int PDIVectorResize(PD_IVector ivec, uint64_t size) { - //! TODO(lizhao): Complete this method. if (ivec == nullptr) return kPD_NULLPTR; auto v = cast(ivec); if (v->vec == nullptr) return kPD_NULLPTR; @@ -64,7 +62,6 @@ int PDIVectorResize(PD_IVector ivec, uint64_t size) { } int PDIVectorGetSize(PD_IVector ivec, uint64_t* size) { - //! TODO(lizhao): Complete this method. if (ivec == nullptr) return kPD_NULLPTR; auto v = cast(ivec); if (v->vec == nullptr) return kPD_NULLPTR; diff --git a/paddle/capi/tests/test_Arguments.cpp b/paddle/capi/tests/test_Arguments.cpp index 1186d2921ba46..9357f3a58468e 100644 --- a/paddle/capi/tests/test_Arguments.cpp +++ b/paddle/capi/tests/test_Arguments.cpp @@ -89,7 +89,8 @@ TEST(CAPIArguments, ids) { ASSERT_EQ(kPD_NO_ERROR, PDArgsDestroy(args)); } -TEST(CAPIArguments, Sequence) { +template +void testSequenceHelper(T1 setter, T2 getter) { PD_Arguments args; ASSERT_EQ(kPD_NO_ERROR, PDArgsCreateNone(&args)); ASSERT_EQ(kPD_NO_ERROR, PDArgsResize(args, 1)); @@ -97,12 +98,27 @@ TEST(CAPIArguments, Sequence) { PD_IVector ivec; int array[3] = {1, 2, 3}; ASSERT_EQ(kPD_NO_ERROR, PDIVectorCreate(&ivec, array, 3, true)); - ASSERT_EQ(kPD_NO_ERROR, PDArgsSetSequenceStartPos(args, 0, ivec)); + ASSERT_EQ(kPD_NO_ERROR, setter(args, 0, ivec)); PD_IVector val; ASSERT_EQ(kPD_NO_ERROR, PDIVecCreateNone(&val)); - ASSERT_EQ(kPD_NO_ERROR, PDArgsGetSequenceStartPos(args, 0, val)); + ASSERT_EQ(kPD_NO_ERROR, getter(args, 0, val)); + uint64_t size; + ASSERT_EQ(kPD_NO_ERROR, PDIVectorGetSize(val, &size)); + + int* rawBuf; + ASSERT_EQ(kPD_NO_ERROR, PDIVectorGet(val, &rawBuf)); + for (size_t i = 0; i < size; ++i) { + ASSERT_EQ(array[i], rawBuf[i]); + } + ASSERT_EQ(kPD_NO_ERROR, PDIVecDestroy(ivec)); ASSERT_EQ(kPD_NO_ERROR, PDIVecDestroy(val)); ASSERT_EQ(kPD_NO_ERROR, PDArgsDestroy(args)); } + +TEST(CAPIArguments, Sequence) { + testSequenceHelper(PDArgsSetSequenceStartPos, PDArgsGetSequenceStartPos); + testSequenceHelper(PDArgsSetSubSequenceStartPos, + PDArgsGetSubSequenceStartPos); +} From 7bb12fdb50e45b2a98bfd731b2dde105edb64ee0 Mon Sep 17 00:00:00 2001 From: Yu Yang Date: Fri, 10 Mar 2017 13:18:48 +0800 Subject: [PATCH 24/43] Refactor API follow comments. --- paddle/capi/Arguments.cpp | 44 ++- paddle/capi/GradientMachine.cpp | 28 +- paddle/capi/Main.cpp | 2 +- paddle/capi/Matrix.cpp | 19 +- paddle/capi/PaddleCAPI.h | 303 +++++++++++++++++---- paddle/capi/Vector.cpp | 25 +- paddle/capi/tests/test_Arguments.cpp | 31 +-- paddle/capi/tests/test_GradientMachine.cpp | 9 +- paddle/capi/tests/test_Matrix.cpp | 6 +- paddle/capi/tests/test_Vector.cpp | 6 +- 10 files changed, 314 insertions(+), 159 deletions(-) diff --git a/paddle/capi/Arguments.cpp b/paddle/capi/Arguments.cpp index 8d00bda3cb90c..3d60165962d5d 100644 --- a/paddle/capi/Arguments.cpp +++ b/paddle/capi/Arguments.cpp @@ -21,31 +21,27 @@ using paddle::capi::cast; #define castIVec(v) cast(v) extern "C" { -int PDArgsCreateNone(PD_Arguments* args) { - auto ptr = new paddle::capi::CArguments(); - *args = ptr; - return kPD_NO_ERROR; -} +PD_Arguments PDArgsCreateNone() { return new paddle::capi::CArguments(); } -int PDArgsDestroy(PD_Arguments args) { +PD_Error PDArgsDestroy(PD_Arguments args) { if (args == nullptr) return kPD_NULLPTR; delete castArg(args); return kPD_NO_ERROR; } -int PDArgsGetSize(PD_Arguments args, uint64_t* size) { +PD_Error PDArgsGetSize(PD_Arguments args, uint64_t* size) { if (args == nullptr || size == nullptr) return kPD_NULLPTR; *size = castArg(args)->args.size(); return kPD_NO_ERROR; } -int PDArgsResize(PD_Arguments args, uint64_t size) { +PD_Error PDArgsResize(PD_Arguments args, uint64_t size) { if (args == nullptr) return kPD_NULLPTR; castArg(args)->args.resize(size); return kPD_NO_ERROR; } -int PDArgsSetValue(PD_Arguments args, uint64_t ID, PD_Matrix mat) { +PD_Error PDArgsSetValue(PD_Arguments args, uint64_t ID, PD_Matrix mat) { if (args == nullptr || mat == nullptr) return kPD_NULLPTR; auto m = paddle::capi::cast(mat); if (m->mat == nullptr) return kPD_NULLPTR; @@ -55,7 +51,7 @@ int PDArgsSetValue(PD_Arguments args, uint64_t ID, PD_Matrix mat) { return kPD_NO_ERROR; } -int PDArgsGetValue(PD_Arguments args, uint64_t ID, PD_Matrix mat) { +PD_Error PDArgsGetValue(PD_Arguments args, uint64_t ID, PD_Matrix mat) { if (args == nullptr || mat == nullptr) return kPD_NULLPTR; auto m = paddle::capi::cast(mat); auto a = castArg(args); @@ -64,7 +60,7 @@ int PDArgsGetValue(PD_Arguments args, uint64_t ID, PD_Matrix mat) { return kPD_NO_ERROR; } -int PDArgsGetIds(PD_Arguments args, uint64_t ID, PD_IVector ids) { +PD_Error PDArgsGetIds(PD_Arguments args, uint64_t ID, PD_IVector ids) { if (args == nullptr || ids == nullptr) return kPD_NULLPTR; auto iv = castIVec(ids); auto a = castArg(args); @@ -73,7 +69,7 @@ int PDArgsGetIds(PD_Arguments args, uint64_t ID, PD_IVector ids) { return kPD_NO_ERROR; } -int PDArgsSetIds(PD_Arguments args, uint64_t ID, PD_IVector ids) { +PD_Error PDArgsSetIds(PD_Arguments args, uint64_t ID, PD_IVector ids) { //! TODO(lizhao): Complete this method. if (args == nullptr || ids == nullptr) return kPD_NULLPTR; auto iv = paddle::capi::cast(ids); @@ -84,9 +80,9 @@ int PDArgsSetIds(PD_Arguments args, uint64_t ID, PD_IVector ids) { return kPD_NO_ERROR; } -int PDArgsSetSequenceStartPos(PD_Arguments args, - uint64_t ID, - PD_IVector seqPos) { +PD_Error PDArgsSetSequenceStartPos(PD_Arguments args, + uint64_t ID, + PD_IVector seqPos) { if (args == nullptr || seqPos == nullptr) return kPD_NULLPTR; auto iv = paddle::capi::cast(seqPos); if (iv->vec == nullptr) return kPD_NULLPTR; @@ -97,9 +93,9 @@ int PDArgsSetSequenceStartPos(PD_Arguments args, return kPD_NO_ERROR; } -int PDArgsSetSubSequenceStartPos(PD_Arguments args, - uint64_t ID, - PD_IVector subSeqPos) { +PD_Error PDArgsSetSubSequenceStartPos(PD_Arguments args, + uint64_t ID, + PD_IVector subSeqPos) { if (args == nullptr || subSeqPos == nullptr) return kPD_NULLPTR; auto iv = paddle::capi::cast(subSeqPos); if (iv->vec == nullptr) return kPD_NULLPTR; @@ -110,9 +106,9 @@ int PDArgsSetSubSequenceStartPos(PD_Arguments args, return kPD_NO_ERROR; } -int PDArgsGetSequenceStartPos(PD_Arguments args, - uint64_t ID, - PD_IVector seqPos) { +PD_Error PDArgsGetSequenceStartPos(PD_Arguments args, + uint64_t ID, + PD_IVector seqPos) { if (args == nullptr || seqPos == nullptr) return kPD_NULLPTR; auto iv = castIVec(seqPos); auto a = castArg(args); @@ -122,9 +118,9 @@ int PDArgsGetSequenceStartPos(PD_Arguments args, return kPD_NO_ERROR; } -int PDArgsGetSubSequenceStartPos(PD_Arguments args, - uint64_t ID, - PD_IVector subSeqPos) { +PD_Error PDArgsGetSubSequenceStartPos(PD_Arguments args, + uint64_t ID, + PD_IVector subSeqPos) { if (args == nullptr || subSeqPos == nullptr) return kPD_NULLPTR; auto iv = castIVec(subSeqPos); auto a = castArg(args); diff --git a/paddle/capi/GradientMachine.cpp b/paddle/capi/GradientMachine.cpp index de3a339fa62a8..ed0cfd8840935 100644 --- a/paddle/capi/GradientMachine.cpp +++ b/paddle/capi/GradientMachine.cpp @@ -38,9 +38,9 @@ NeuralNetwork* newCustomNerualNetwork(const std::string& name, } // namespace paddle extern "C" { -int PDGradientMachineCreateForPredict(PD_GradientMachine* machine, - void* modelConfigProtobuf, - int size) { +PD_Error PDGradientMachineCreateForPredict(PD_GradientMachine* machine, + void* modelConfigProtobuf, + int size) { if (modelConfigProtobuf == nullptr) return kPD_NULLPTR; paddle::ModelConfig config; if (!config.ParseFromArray(modelConfigProtobuf, size) || @@ -55,13 +55,13 @@ int PDGradientMachineCreateForPredict(PD_GradientMachine* machine, return kPD_NO_ERROR; } -int PDGradientMachineDestroy(PD_GradientMachine machine) { +PD_Error PDGradientMachineDestroy(PD_GradientMachine machine) { delete cast(machine); return kPD_NO_ERROR; } -int PDGradientMachineLoadParameterFromDisk(PD_GradientMachine machine, - const char* path) { +PD_Error PDGradientMachineLoadParameterFromDisk(PD_GradientMachine machine, + const char* path) { auto m = cast(machine); if (m == nullptr || path == nullptr || m->machine == nullptr) return kPD_NULLPTR; @@ -69,10 +69,10 @@ int PDGradientMachineLoadParameterFromDisk(PD_GradientMachine machine, return kPD_NO_ERROR; } -int PDGradientMachineForward(PD_GradientMachine machine, - PD_Arguments inArgs, - PD_Arguments outArgs, - bool isTrain) { +PD_Error PDGradientMachineForward(PD_GradientMachine machine, + PD_Arguments inArgs, + PD_Arguments outArgs, + bool isTrain) { auto m = cast(machine); auto in = paddle::capi::cast(inArgs); auto out = paddle::capi::cast(outArgs); @@ -83,10 +83,10 @@ int PDGradientMachineForward(PD_GradientMachine machine, return kPD_NO_ERROR; } -int PDGradientMachineCreateSharedParam(PD_GradientMachine origin, - void* modelConfigProtobuf, - int size, - PD_GradientMachine* slave) { +PD_Error PDGradientMachineCreateSharedParam(PD_GradientMachine origin, + void* modelConfigProtobuf, + int size, + PD_GradientMachine* slave) { auto o = cast(origin); if (origin == nullptr || slave == nullptr || o->machine == nullptr) { return kPD_NULLPTR; diff --git a/paddle/capi/Main.cpp b/paddle/capi/Main.cpp index e310eb540475d..9314071b4bcf9 100644 --- a/paddle/capi/Main.cpp +++ b/paddle/capi/Main.cpp @@ -29,7 +29,7 @@ static void initPaddle(int argc, char** argv) { } extern "C" { -int PDInit(int argc, char** argv) { +PD_Error PDInit(int argc, char** argv) { std::vector realArgv; realArgv.reserve(argc + 1); realArgv.push_back(strdup("")); diff --git a/paddle/capi/Matrix.cpp b/paddle/capi/Matrix.cpp index db32c945412ec..bc25f84344c1b 100644 --- a/paddle/capi/Matrix.cpp +++ b/paddle/capi/Matrix.cpp @@ -18,27 +18,22 @@ limitations under the License. */ #define cast(v) paddle::capi::cast(v) extern "C" { -int PDMatCreate(PD_Matrix* mat, uint64_t height, uint64_t width, bool useGpu) { +PD_Matrix PDMatCreate(uint64_t height, uint64_t width, bool useGpu) { auto ptr = new paddle::capi::CMatrix(); ptr->mat = paddle::Matrix::create(height, width, false, useGpu); - *mat = ptr; - return kPD_NO_ERROR; + return ptr; } -int PDMatCreateNone(PD_Matrix* mat) { - auto ptr = new paddle::capi::CMatrix(); - *mat = ptr; - return kPD_NO_ERROR; -} +PD_Matrix PDMatCreateNone() { return new paddle::capi::CMatrix(); } -int PDMatDestroy(PD_Matrix mat) { +PD_Error PDMatDestroy(PD_Matrix mat) { if (mat == nullptr) return kPD_NULLPTR; auto ptr = cast(mat); delete ptr; return kPD_NO_ERROR; } -int PDMatCopyToRow(PD_Matrix mat, uint64_t rowID, pd_real* rowArray) { +PD_Error PDMatCopyToRow(PD_Matrix mat, uint64_t rowID, pd_real* rowArray) { if (mat == nullptr) return kPD_NULLPTR; auto ptr = cast(mat); if (ptr->mat == nullptr) return kPD_NULLPTR; @@ -53,7 +48,7 @@ int PDMatCopyToRow(PD_Matrix mat, uint64_t rowID, pd_real* rowArray) { return kPD_NO_ERROR; } -int PDMatGetRow(PD_Matrix mat, uint64_t rowID, pd_real** rawRowBuffer) { +PD_Error PDMatGetRow(PD_Matrix mat, uint64_t rowID, pd_real** rawRowBuffer) { if (mat == nullptr) return kPD_NULLPTR; auto ptr = cast(mat); if (ptr->mat == nullptr) return kPD_NULLPTR; @@ -62,7 +57,7 @@ int PDMatGetRow(PD_Matrix mat, uint64_t rowID, pd_real** rawRowBuffer) { return kPD_NO_ERROR; } -int PDMatGetShape(PD_Matrix mat, uint64_t* height, uint64_t* width) { +PD_Error PDMatGetShape(PD_Matrix mat, uint64_t* height, uint64_t* width) { if (mat == nullptr) return kPD_NULLPTR; if (height != nullptr) { *height = cast(mat)->mat->getHeight(); diff --git a/paddle/capi/PaddleCAPI.h b/paddle/capi/PaddleCAPI.h index 6eea60ef74915..94a9fc497f93a 100644 --- a/paddle/capi/PaddleCAPI.h +++ b/paddle/capi/PaddleCAPI.h @@ -49,48 +49,115 @@ typedef enum { */ typedef void* PD_IVector; -PD_API int PDIVecCreateNone(PD_IVector* ivec); +/** + * @brief Create an none int vector. It just a handler and store nothing. Used + * to get output from other api. + * @return None int vector. + */ +PD_API PD_IVector PDIVecCreateNone(); /** * @brief PDIVectorCreate create a paddle int vector - * @param [out] ivec: output int vector. - * @param [in] array: input array. - * @param [in] size: input array size. - * @param [in] copy: memory copy or just use same memory. True if copy. + * @param array: input array. + * @param size: input array size. + * @param copy: memory copy or just use same memory. True if copy. + * @param useGPU: True if use GPU * @return PD_Error */ -PD_API int PDIVectorCreate(PD_IVector* ivec, - int* array, - uint64_t size, - bool copy); +PD_API PD_IVector PDIVectorCreate(int* array, + uint64_t size, + bool copy, + bool useGPU); -PD_API int PDIVecDestroy(PD_IVector ivec); +/** + * @brief PDIVecDestroy destory an int vector. + * @param ivec vector to be destoried. + * @return PD_Error + */ +PD_API PD_Error PDIVecDestroy(PD_IVector ivec); -PD_API int PDIVectorGet(PD_IVector ivec, int** buffer); +/** + * @brief PDIVectorGet get raw buffer stored inside this int vector. It could be + * GPU memory if this int vector is stored in GPU. + * @param [in] ivec int vector + * @param [out] buffer the return buffer pointer. + * @return PD_Error + */ +PD_API PD_Error PDIVectorGet(PD_IVector ivec, int** buffer); -PD_API int PDIVectorResize(PD_IVector ivec, uint64_t size); +/** + * @brief PDIVectorResize resize the int vector. + * @param [in] ivec: int vector + * @param [in] size: size to change + * @return PD_Error + */ +PD_API PD_Error PDIVectorResize(PD_IVector ivec, uint64_t size); -PD_API int PDIVectorGetSize(PD_IVector ivec, uint64_t* size); +/** + * @brief PDIVectorGetSize get the size of int vector. + * @param [in] ivec: int vector + * @param [out] size: return size of this int vector. + * @return PD_Error + */ +PD_API PD_Error PDIVectorGetSize(PD_IVector ivec, uint64_t* size); /** * Matrix functions. Return will be a PD_Error type. */ typedef void* PD_Matrix; -PD_API int PDMatCreate(PD_Matrix* mat, - uint64_t height, - uint64_t width, - bool useGpu); +/** + * @brief PDMatCreate Create a dense matrix + * @param height matrix height. + * @param width matrix width + * @param useGpu use GPU of not + * @return Matrix handler + */ +PD_API PD_Matrix PDMatCreate(uint64_t height, uint64_t width, bool useGpu); -PD_API int PDMatDestroy(PD_Matrix mat); +/** + * @brief PDMatDestroy Destroy a matrix. + * @param mat + * @return PD_Error + */ +PD_API PD_Error PDMatDestroy(PD_Matrix mat); -PD_API int PDMatCopyToRow(PD_Matrix mat, uint64_t rowID, pd_real* rowArray); +/** + * @brief PDMatCopyToRow Copy a row to matrix. + * @param mat Target Matrix + * @param rowID Index of row + * @param rowArray Row data. + * @return PD_Error + */ +PD_API PD_Error PDMatCopyToRow(PD_Matrix mat, + uint64_t rowID, + pd_real* rowArray); -PD_API int PDMatGetRow(PD_Matrix mat, uint64_t rowID, pd_real** rawRowBuffer); +/** + * @brief PDMatGetRow Get raw row buffer from matrix + * @param [in] mat Target matrix + * @param [in] rowID Index of row. + * @param [out] rawRowBuffer Row Buffer + * @return PD_Error + */ +PD_API PD_Error PDMatGetRow(PD_Matrix mat, + uint64_t rowID, + pd_real** rawRowBuffer); -PD_API int PDMatCreateNone(PD_Matrix* mat); +/** + * @brief PDMatCreateNone Create None Matrix + * @return + */ +PD_API PD_Matrix PDMatCreateNone(); -PD_API int PDMatGetShape(PD_Matrix mat, uint64_t* height, uint64_t* width); +/** + * @brief PDMatGetShape get the shape of matrix + * @param mat target matrix + * @param height The height of matrix + * @param width The width of matrix + * @return PD_Error + */ +PD_API PD_Error PDMatGetShape(PD_Matrix mat, uint64_t* height, uint64_t* width); /** * Arguments functions. Each argument means layer output. Arguments means a @@ -98,65 +165,185 @@ PD_API int PDMatGetShape(PD_Matrix mat, uint64_t* height, uint64_t* width); */ typedef void* PD_Arguments; -PD_API int PDArgsCreateNone(PD_Arguments* args); - -PD_API int PDArgsDestroy(PD_Arguments args); +/** + * @brief PDArgsCreateNone Create a array of arguments, which size is zero. + * @return Arguemnts + */ +PD_API PD_Arguments PDArgsCreateNone(); -PD_API int PDArgsGetSize(PD_Arguments args, uint64_t* size); +/** + * @brief PDArgsDestroy Destroy the arguments + * @param args arguments to destroy + * @return PD_Error + */ +PD_API PD_Error PDArgsDestroy(PD_Arguments args); -PD_API int PDArgsResize(PD_Arguments args, uint64_t size); +/** + * @brief PDArgsGetSize Get size of arguments array + * @param [in] args arguments array + * @param [out] size array size + * @return PD_Error + */ +PD_API PD_Error PDArgsGetSize(PD_Arguments args, uint64_t* size); -PD_API int PDArgsSetValue(PD_Arguments args, uint64_t ID, PD_Matrix mat); +/** + * @brief PDArgsResize Resize a arguments array. + * @param args arguments array. + * @param size target size of array + * @return PD_Error + */ +PD_API PD_Error PDArgsResize(PD_Arguments args, uint64_t size); -PD_API int PDArgsGetValue(PD_Arguments args, uint64_t ID, PD_Matrix mat); +/** + * @brief PDArgsSetValue Set value matrix of one argument in array, which index + * is `ID`. + * @param args arguments array + * @param ID array index + * @param mat matrix pointer + * @return PD_Error + */ +PD_API PD_Error PDArgsSetValue(PD_Arguments args, uint64_t ID, PD_Matrix mat); -PD_API int PDArgsGetIds(PD_Arguments args, uint64_t ID, PD_IVector ids); +/** + * @brief PDArgsGetValue Get value matrix of one argument in array, which index + * is `ID`. + * @param [in] args arguments array + * @param [in] ID array index + * @param [out] mat matrix pointer + * @return PD_Error + */ +PD_API PD_Error PDArgsGetValue(PD_Arguments args, uint64_t ID, PD_Matrix mat); -PD_API int PDArgsSetIds(PD_Arguments args, uint64_t ID, PD_IVector ids); +/** + * @brief PDArgsGetIds Get the integer vector of one argument in array, which + * index is `ID`. + * @param args arguments array + * @param ID array index + * @param ids integer vector pointer + * @return PD_Error + */ +PD_API PD_Error PDArgsGetIds(PD_Arguments args, uint64_t ID, PD_IVector ids); -PD_API int PDArgsSetSequenceStartPos(PD_Arguments args, - uint64_t ID, - PD_IVector seqPos); +/** + * @brief PDArgsSetIds Set the integer vector of one argument in array, which + * index is `ID`. + * @param [in] args arguments array + * @param [in] ID array index + * @param [out] ids integer vector pointer + * @return PD_Error + */ +PD_API PD_Error PDArgsSetIds(PD_Arguments args, uint64_t ID, PD_IVector ids); -PD_API int PDArgsGetSequenceStartPos(PD_Arguments args, - uint64_t ID, - PD_IVector seqPos); +/** + * @brief PDArgsSetSequenceStartPos Set sequence start position vector of one + * argument in array, which index is `ID`. + * @param args arguments array + * @param ID array index + * @param seqPos sequence position array. + * @return PD_Error + */ +PD_API PD_Error PDArgsSetSequenceStartPos(PD_Arguments args, + uint64_t ID, + PD_IVector seqPos); +/** + * @brief PDArgsGetSequenceStartPos Get sequence start position vector of one + * argument in array, which index is `ID`. + * @param [in] args arguments array + * @param [in] ID array index + * @param [out] seqPos sequence position array + * @return PD_Error + */ +PD_API PD_Error PDArgsGetSequenceStartPos(PD_Arguments args, + uint64_t ID, + PD_IVector seqPos); -PD_API int PDArgsSetSubSequenceStartPos(PD_Arguments args, - uint64_t ID, - PD_IVector subSeqPos); +/** + * @brief PDArgsSetSubSequenceStartPos Set sub-sequence start position vector of + * one argument in array, which index is `ID`. + * @param args arguments array + * @param ID array index + * @param subSeqPos sub-sequence start position array. + * @return PD_Error + */ +PD_API PD_Error PDArgsSetSubSequenceStartPos(PD_Arguments args, + uint64_t ID, + PD_IVector subSeqPos); -PD_API int PDArgsGetSubSequenceStartPos(PD_Arguments args, - uint64_t ID, - PD_IVector subSeqPos); +/** + * @brief PDArgsGetSubSequenceStartPos Get sub-sequence start position vector of + * one argument in array, which index is `ID`. + * @param args arguments array + * @param ID array index + * @param subSeqPos sub-sequence start position array + * @return PD_Error + */ +PD_API PD_Error PDArgsGetSubSequenceStartPos(PD_Arguments args, + uint64_t ID, + PD_IVector subSeqPos); /** * @brief GradientMachine means a neural network. */ typedef void* PD_GradientMachine; -PD_API int PDGradientMachineCreateForPredict(PD_GradientMachine* machine, - void* modelConfigProtobuf, - int size); +/** + * @brief PDGradientMachineCreateForPredict Create a gradient machine used for + * model inference. + * @param [out] machine that used for model inference. + * @param [in] modelConfigProtobuf + * @param [in] size + * @return PD_Error + */ +PD_API PD_Error PDGradientMachineCreateForPredict(PD_GradientMachine* machine, + void* modelConfigProtobuf, + int size); -PD_API int PDGradientMachineLoadParameterFromDisk(PD_GradientMachine machine, - const char* path); +/** + * @brief PDGradientMachineLoadParameterFromDisk Load parameter from disk. + * @param machine Gradient Machine. + * @param path local directory path. + * @return PD_Error + */ +PD_API PD_Error PDGradientMachineLoadParameterFromDisk( + PD_GradientMachine machine, const char* path); -PD_API int PDGradientMachineForward(PD_GradientMachine machine, - PD_Arguments inArgs, - PD_Arguments outArgs, - bool isTrain); +/** + * @brief PDGradientMachineForward Forward a gradient machine + * @param machine Gradient machine + * @param inArgs input arguments + * @param outArgs output arguments + * @param isTrain is train or not + * @return PD_Error + */ +PD_API PD_Error PDGradientMachineForward(PD_GradientMachine machine, + PD_Arguments inArgs, + PD_Arguments outArgs, + bool isTrain); -PD_API int PDGradientMachineCreateSharedParam(PD_GradientMachine origin, - void* modelConfigProtobuf, - int size, - PD_GradientMachine* slave); +/** + * @brief PDGradientMachineCreateSharedParam Create a gradient machine, which + * parameters are shared from another gradient machine. + * @param [in] origin gradient machine + * @param [in] modelConfigProtobuf model config protobuf + * @param [in] size of model config buffer. + * @param [out] slave gradient machine, the output value. + * @return PD_Error + */ +PD_API PD_Error PDGradientMachineCreateSharedParam(PD_GradientMachine origin, + void* modelConfigProtobuf, + int size, + PD_GradientMachine* slave); -PD_API int PDGradientMachineDestroy(PD_GradientMachine machine); +/** + * @brief PDGradientMachineDestroy Destroy a gradient machine + * @param machine that need to destroy + * @return PD_Error + */ +PD_API PD_Error PDGradientMachineDestroy(PD_GradientMachine machine); /** * Initialize Paddle. */ -PD_API int PDInit(int argc, char** argv); +PD_API PD_Error PDInit(int argc, char** argv); #ifdef __cplusplus } diff --git a/paddle/capi/Vector.cpp b/paddle/capi/Vector.cpp index af2192551370f..514d65fec8f6a 100644 --- a/paddle/capi/Vector.cpp +++ b/paddle/capi/Vector.cpp @@ -19,33 +19,26 @@ using paddle::capi::cast; extern "C" { -int PDIVecCreateNone(PD_IVector* ivec) { - if (ivec == nullptr) return kPD_NULLPTR; - auto ptr = new paddle::capi::CIVector(); - *ivec = ptr; - return kPD_NO_ERROR; -} +PD_IVector PDIVecCreateNone() { return new paddle::capi::CIVector(); } -int PDIVectorCreate(PD_IVector* ivec, int* array, uint64_t size, bool copy) { - if (ivec == nullptr) return kPD_NULLPTR; +PD_IVector PDIVectorCreate(int* array, uint64_t size, bool copy, bool useGPU) { auto ptr = new paddle::capi::CIVector(); if (copy) { - ptr->vec = paddle::IVector::create(size, false); + ptr->vec = paddle::IVector::create(size, useGPU); ptr->vec->copyFrom(array, size); } else { - ptr->vec = paddle::IVector::create(array, size, false); + ptr->vec = paddle::IVector::create(array, size, useGPU); } - *ivec = ptr; - return kPD_NO_ERROR; + return ptr; } -int PDIVecDestroy(PD_IVector ivec) { +PD_Error PDIVecDestroy(PD_IVector ivec) { if (ivec == nullptr) return kPD_NULLPTR; delete cast(ivec); return kPD_NO_ERROR; } -int PDIVectorGet(PD_IVector ivec, int** buffer) { +PD_Error PDIVectorGet(PD_IVector ivec, int** buffer) { if (ivec == nullptr || buffer == nullptr) return kPD_NULLPTR; auto v = cast(ivec); if (v->vec == nullptr) return kPD_NULLPTR; @@ -53,7 +46,7 @@ int PDIVectorGet(PD_IVector ivec, int** buffer) { return kPD_NO_ERROR; } -int PDIVectorResize(PD_IVector ivec, uint64_t size) { +PD_Error PDIVectorResize(PD_IVector ivec, uint64_t size) { if (ivec == nullptr) return kPD_NULLPTR; auto v = cast(ivec); if (v->vec == nullptr) return kPD_NULLPTR; @@ -61,7 +54,7 @@ int PDIVectorResize(PD_IVector ivec, uint64_t size) { return kPD_NO_ERROR; } -int PDIVectorGetSize(PD_IVector ivec, uint64_t* size) { +PD_Error PDIVectorGetSize(PD_IVector ivec, uint64_t* size) { if (ivec == nullptr) return kPD_NULLPTR; auto v = cast(ivec); if (v->vec == nullptr) return kPD_NULLPTR; diff --git a/paddle/capi/tests/test_Arguments.cpp b/paddle/capi/tests/test_Arguments.cpp index 9357f3a58468e..e015b94e12cd9 100644 --- a/paddle/capi/tests/test_Arguments.cpp +++ b/paddle/capi/tests/test_Arguments.cpp @@ -28,8 +28,7 @@ static std::vector randomBuffer(size_t bufSize) { } TEST(CAPIArguments, create) { - PD_Arguments args; - ASSERT_EQ(kPD_NO_ERROR, PDArgsCreateNone(&args)); + PD_Arguments args = PDArgsCreateNone(); uint64_t size; ASSERT_EQ(kPD_NO_ERROR, PDArgsGetSize(args, &size)); ASSERT_EQ(0UL, size); @@ -37,20 +36,17 @@ TEST(CAPIArguments, create) { } TEST(CAPIArguments, value) { - PD_Arguments args; - ASSERT_EQ(kPD_NO_ERROR, PDArgsCreateNone(&args)); + PD_Arguments args = PDArgsCreateNone(); ASSERT_EQ(kPD_NO_ERROR, PDArgsResize(args, 1)); - PD_Matrix mat; - ASSERT_EQ(kPD_NO_ERROR, PDMatCreate(&mat, 128, 64, false)); + PD_Matrix mat = PDMatCreate(128, 64, false); for (size_t i = 0; i < 128; ++i) { std::vector sampleBuf = randomBuffer(64); PDMatCopyToRow(mat, i, sampleBuf.data()); } ASSERT_EQ(kPD_NO_ERROR, PDArgsSetValue(args, 0, mat)); - PD_Matrix val; - ASSERT_EQ(kPD_NO_ERROR, PDMatCreateNone(&val)); + PD_Matrix val = PDMatCreateNone(); ASSERT_EQ(kPD_NO_ERROR, PDArgsGetValue(args, 0, val)); @@ -63,8 +59,7 @@ TEST(CAPIArguments, value) { ASSERT_EQ(row1, row2); } - PD_IVector ivec; - ASSERT_EQ(kPD_NO_ERROR, PDIVecCreateNone(&ivec)); + PD_IVector ivec = PDIVecCreateNone(); ASSERT_EQ(kPD_NO_ERROR, PDIVecDestroy(ivec)); ASSERT_EQ(kPD_NO_ERROR, PDMatDestroy(val)); ASSERT_EQ(kPD_NO_ERROR, PDMatDestroy(mat)); @@ -72,17 +67,15 @@ TEST(CAPIArguments, value) { } TEST(CAPIArguments, ids) { - PD_Arguments args; - ASSERT_EQ(kPD_NO_ERROR, PDArgsCreateNone(&args)); + PD_Arguments args = PDArgsCreateNone(); ASSERT_EQ(kPD_NO_ERROR, PDArgsResize(args, 1)); PD_IVector ivec; int array[3] = {1, 2, 3}; - ASSERT_EQ(kPD_NO_ERROR, PDIVectorCreate(&ivec, array, 3, true)); + ivec = PDIVectorCreate(array, 3, true, false); ASSERT_EQ(kPD_NO_ERROR, PDArgsSetIds(args, 0, ivec)); - PD_IVector val; - ASSERT_EQ(kPD_NO_ERROR, PDIVecCreateNone(&val)); + PD_IVector val = PDIVecCreateNone(); ASSERT_EQ(kPD_NO_ERROR, PDArgsGetIds(args, 0, val)); ASSERT_EQ(kPD_NO_ERROR, PDIVecDestroy(ivec)); ASSERT_EQ(kPD_NO_ERROR, PDIVecDestroy(val)); @@ -91,17 +84,15 @@ TEST(CAPIArguments, ids) { template void testSequenceHelper(T1 setter, T2 getter) { - PD_Arguments args; - ASSERT_EQ(kPD_NO_ERROR, PDArgsCreateNone(&args)); + PD_Arguments args = PDArgsCreateNone(); ASSERT_EQ(kPD_NO_ERROR, PDArgsResize(args, 1)); PD_IVector ivec; int array[3] = {1, 2, 3}; - ASSERT_EQ(kPD_NO_ERROR, PDIVectorCreate(&ivec, array, 3, true)); + ivec = PDIVectorCreate(array, 3, true, false); ASSERT_EQ(kPD_NO_ERROR, setter(args, 0, ivec)); - PD_IVector val; - ASSERT_EQ(kPD_NO_ERROR, PDIVecCreateNone(&val)); + PD_IVector val = PDIVecCreateNone(); ASSERT_EQ(kPD_NO_ERROR, getter(args, 0, val)); uint64_t size; ASSERT_EQ(kPD_NO_ERROR, PDIVectorGetSize(val, &size)); diff --git a/paddle/capi/tests/test_GradientMachine.cpp b/paddle/capi/tests/test_GradientMachine.cpp index 427b37b085a0a..acee99bcc4b23 100644 --- a/paddle/capi/tests/test_GradientMachine.cpp +++ b/paddle/capi/tests/test_GradientMachine.cpp @@ -55,14 +55,11 @@ TEST(GradientMachine, testPredict) { PDGradientMachineCreateSharedParam( machine, &buffer[0], (int)buffer.size(), &machineSlave)); std::swap(machineSlave, machine); - PD_Arguments outArgs; - ASSERT_EQ(kPD_NO_ERROR, PDArgsCreateNone(&outArgs)); + PD_Arguments outArgs = PDArgsCreateNone(); - PD_Arguments inArgs; - ASSERT_EQ(kPD_NO_ERROR, PDArgsCreateNone(&inArgs)); + PD_Arguments inArgs = PDArgsCreateNone(); ASSERT_EQ(kPD_NO_ERROR, PDArgsResize(inArgs, 1)); - PD_Matrix mat; - ASSERT_EQ(kPD_NO_ERROR, PDMatCreate(&mat, 1, 100, false)); + PD_Matrix mat = PDMatCreate(1, 100, false); static_assert(std::is_same::value, ""); auto data = randomBuffer(100); diff --git a/paddle/capi/tests/test_Matrix.cpp b/paddle/capi/tests/test_Matrix.cpp index 4192dd6bfb533..1d38162add21a 100644 --- a/paddle/capi/tests/test_Matrix.cpp +++ b/paddle/capi/tests/test_Matrix.cpp @@ -16,8 +16,7 @@ limitations under the License. */ #include "gtest/gtest.h" TEST(CAPIMatrix, create) { - PD_Matrix mat; - ASSERT_EQ(kPD_NO_ERROR, PDMatCreate(&mat, 128, 32, false)); + PD_Matrix mat = PDMatCreate(128, 32, false); std::vector sampleRow; sampleRow.resize(32); for (size_t i = 0; i < sampleRow.size(); ++i) { @@ -41,7 +40,6 @@ TEST(CAPIMatrix, create) { } TEST(CAPIMatrix, createNone) { - PD_Matrix mat; - ASSERT_EQ(kPD_NO_ERROR, PDMatCreateNone(&mat)); + PD_Matrix mat = PDMatCreateNone(); ASSERT_EQ(kPD_NO_ERROR, PDMatDestroy(mat)); } diff --git a/paddle/capi/tests/test_Vector.cpp b/paddle/capi/tests/test_Vector.cpp index 547d0ef20d168..b3558fe0fdd17 100644 --- a/paddle/capi/tests/test_Vector.cpp +++ b/paddle/capi/tests/test_Vector.cpp @@ -18,8 +18,7 @@ limitations under the License. */ TEST(CAPIVector, create) { PD_IVector vec; int array[3] = {1, 2, 3}; - ASSERT_EQ(kPD_NO_ERROR, PDIVectorCreate(&vec, array, 3, true)); - ASSERT_EQ(kPD_NO_ERROR, PDIVectorCreate(&vec, array, 3, false)); + vec = PDIVectorCreate(array, 3, true, false); ASSERT_EQ(kPD_NO_ERROR, PDIVectorResize(vec, 1000)); uint64_t size; ASSERT_EQ(kPD_NO_ERROR, PDIVectorGetSize(vec, &size)); @@ -27,7 +26,6 @@ TEST(CAPIVector, create) { } TEST(CAPIVector, createNone) { - PD_IVector vec; - ASSERT_EQ(kPD_NO_ERROR, PDIVecCreateNone(&vec)); + PD_IVector vec = PDIVecCreateNone(); ASSERT_EQ(kPD_NO_ERROR, PDIVecDestroy(vec)); } From 5ac9c22633d3dacf3b37e56e8680a8255ebe252f Mon Sep 17 00:00:00 2001 From: Yu Yang Date: Fri, 10 Mar 2017 17:10:14 +0800 Subject: [PATCH 25/43] Install shared lib --- paddle/capi/CMakeLists.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/paddle/capi/CMakeLists.txt b/paddle/capi/CMakeLists.txt index f5827317b0d6a..7fd15e10377f8 100644 --- a/paddle/capi/CMakeLists.txt +++ b/paddle/capi/CMakeLists.txt @@ -61,7 +61,7 @@ link_paddle_exe(paddle_capi_shared) install(FILES ${CMAKE_CURRENT_BINARY_DIR}/${capi_whole_library} DESTINATION lib) install(FILES ${CAPI_HEADER} DESTINATION include/paddle) install(FILES ${CMAKE_CURRENT_BINARY_DIR}/config.h DESTINATION include/paddle) - +install(TARGETS paddle_capi_shared DESTINATION lib) # this variable used for unittest set(PADDLE_CAPI_INC_PATH From b5288289e1b0029aae88bb4534ca1d1e6a22dac8 Mon Sep 17 00:00:00 2001 From: Yu Yang Date: Tue, 21 Mar 2017 11:15:17 +0800 Subject: [PATCH 26/43] Rename some API to C-Style --- paddle/capi/Arguments.cpp | 38 ++-- paddle/capi/CMakeLists.txt | 8 +- paddle/capi/GradientMachine.cpp | 28 +-- paddle/capi/Main.cpp | 2 +- paddle/capi/Matrix.cpp | 22 +- paddle/capi/PaddleCAPI.h | 239 ++++++--------------- paddle/capi/Vector.cpp | 17 +- paddle/capi/config.h.in | 4 + paddle/capi/error.h | 15 ++ paddle/capi/matrix.h | 91 ++++++++ paddle/capi/tests/test_Arguments.cpp | 42 ++-- paddle/capi/tests/test_GradientMachine.cpp | 10 +- paddle/capi/tests/test_Matrix.cpp | 17 +- paddle/capi/tests/test_Vector.cpp | 14 +- paddle/capi/vector.h | 89 ++++++++ 15 files changed, 366 insertions(+), 270 deletions(-) create mode 100644 paddle/capi/error.h create mode 100644 paddle/capi/matrix.h create mode 100644 paddle/capi/vector.h diff --git a/paddle/capi/Arguments.cpp b/paddle/capi/Arguments.cpp index 3d60165962d5d..29aa0858dd833 100644 --- a/paddle/capi/Arguments.cpp +++ b/paddle/capi/Arguments.cpp @@ -23,25 +23,25 @@ using paddle::capi::cast; extern "C" { PD_Arguments PDArgsCreateNone() { return new paddle::capi::CArguments(); } -PD_Error PDArgsDestroy(PD_Arguments args) { +paddle_error PDArgsDestroy(PD_Arguments args) { if (args == nullptr) return kPD_NULLPTR; delete castArg(args); return kPD_NO_ERROR; } -PD_Error PDArgsGetSize(PD_Arguments args, uint64_t* size) { +paddle_error PDArgsGetSize(PD_Arguments args, uint64_t* size) { if (args == nullptr || size == nullptr) return kPD_NULLPTR; *size = castArg(args)->args.size(); return kPD_NO_ERROR; } -PD_Error PDArgsResize(PD_Arguments args, uint64_t size) { +paddle_error PDArgsResize(PD_Arguments args, uint64_t size) { if (args == nullptr) return kPD_NULLPTR; castArg(args)->args.resize(size); return kPD_NO_ERROR; } -PD_Error PDArgsSetValue(PD_Arguments args, uint64_t ID, PD_Matrix mat) { +paddle_error PDArgsSetValue(PD_Arguments args, uint64_t ID, paddle_matrix mat) { if (args == nullptr || mat == nullptr) return kPD_NULLPTR; auto m = paddle::capi::cast(mat); if (m->mat == nullptr) return kPD_NULLPTR; @@ -51,7 +51,7 @@ PD_Error PDArgsSetValue(PD_Arguments args, uint64_t ID, PD_Matrix mat) { return kPD_NO_ERROR; } -PD_Error PDArgsGetValue(PD_Arguments args, uint64_t ID, PD_Matrix mat) { +paddle_error PDArgsGetValue(PD_Arguments args, uint64_t ID, paddle_matrix mat) { if (args == nullptr || mat == nullptr) return kPD_NULLPTR; auto m = paddle::capi::cast(mat); auto a = castArg(args); @@ -60,7 +60,7 @@ PD_Error PDArgsGetValue(PD_Arguments args, uint64_t ID, PD_Matrix mat) { return kPD_NO_ERROR; } -PD_Error PDArgsGetIds(PD_Arguments args, uint64_t ID, PD_IVector ids) { +paddle_error PDArgsGetIds(PD_Arguments args, uint64_t ID, paddle_ivector ids) { if (args == nullptr || ids == nullptr) return kPD_NULLPTR; auto iv = castIVec(ids); auto a = castArg(args); @@ -69,7 +69,7 @@ PD_Error PDArgsGetIds(PD_Arguments args, uint64_t ID, PD_IVector ids) { return kPD_NO_ERROR; } -PD_Error PDArgsSetIds(PD_Arguments args, uint64_t ID, PD_IVector ids) { +paddle_error PDArgsSetIds(PD_Arguments args, uint64_t ID, paddle_ivector ids) { //! TODO(lizhao): Complete this method. if (args == nullptr || ids == nullptr) return kPD_NULLPTR; auto iv = paddle::capi::cast(ids); @@ -80,9 +80,9 @@ PD_Error PDArgsSetIds(PD_Arguments args, uint64_t ID, PD_IVector ids) { return kPD_NO_ERROR; } -PD_Error PDArgsSetSequenceStartPos(PD_Arguments args, - uint64_t ID, - PD_IVector seqPos) { +paddle_error PDArgsSetSequenceStartPos(PD_Arguments args, + uint64_t ID, + paddle_ivector seqPos) { if (args == nullptr || seqPos == nullptr) return kPD_NULLPTR; auto iv = paddle::capi::cast(seqPos); if (iv->vec == nullptr) return kPD_NULLPTR; @@ -93,9 +93,9 @@ PD_Error PDArgsSetSequenceStartPos(PD_Arguments args, return kPD_NO_ERROR; } -PD_Error PDArgsSetSubSequenceStartPos(PD_Arguments args, - uint64_t ID, - PD_IVector subSeqPos) { +paddle_error PDArgsSetSubSequenceStartPos(PD_Arguments args, + uint64_t ID, + paddle_ivector subSeqPos) { if (args == nullptr || subSeqPos == nullptr) return kPD_NULLPTR; auto iv = paddle::capi::cast(subSeqPos); if (iv->vec == nullptr) return kPD_NULLPTR; @@ -106,9 +106,9 @@ PD_Error PDArgsSetSubSequenceStartPos(PD_Arguments args, return kPD_NO_ERROR; } -PD_Error PDArgsGetSequenceStartPos(PD_Arguments args, - uint64_t ID, - PD_IVector seqPos) { +paddle_error PDArgsGetSequenceStartPos(PD_Arguments args, + uint64_t ID, + paddle_ivector seqPos) { if (args == nullptr || seqPos == nullptr) return kPD_NULLPTR; auto iv = castIVec(seqPos); auto a = castArg(args); @@ -118,9 +118,9 @@ PD_Error PDArgsGetSequenceStartPos(PD_Arguments args, return kPD_NO_ERROR; } -PD_Error PDArgsGetSubSequenceStartPos(PD_Arguments args, - uint64_t ID, - PD_IVector subSeqPos) { +paddle_error PDArgsGetSubSequenceStartPos(PD_Arguments args, + uint64_t ID, + paddle_ivector subSeqPos) { if (args == nullptr || subSeqPos == nullptr) return kPD_NULLPTR; auto iv = castIVec(subSeqPos); auto a = castArg(args); diff --git a/paddle/capi/CMakeLists.txt b/paddle/capi/CMakeLists.txt index 7fd15e10377f8..a2b1929e4b747 100644 --- a/paddle/capi/CMakeLists.txt +++ b/paddle/capi/CMakeLists.txt @@ -12,14 +12,14 @@ configure_file(config.h.in config.h @ONLY) # PaddleCAPI.h is the only header we exposed. It currently only used for model # inference. -set(CAPI_HEADER PaddleCAPI.h) - - +file(GLOB CAPI_HEADERS *.h) set(CAPI_PRIVATE_HEADER PaddleCAPIPrivate.h) +list(REMOVE_ITEM CAPI_HEADERS ${CAPI_PRIVATE_HEADER}) file(GLOB CAPI_SOURCES *.cpp) # building paddle_capi -add_library(paddle_capi STATIC ${CAPI_SOURCES}) +add_library(paddle_capi STATIC ${CAPI_HEADERS} ${CAPI_PRIVATE_HEADER} + ${CAPI_SOURCES}) target_include_directories(paddle_capi PUBLIC ${CMAKE_CURRENT_BINARY_DIR}) diff --git a/paddle/capi/GradientMachine.cpp b/paddle/capi/GradientMachine.cpp index ed0cfd8840935..6ca3cf7e4d8dc 100644 --- a/paddle/capi/GradientMachine.cpp +++ b/paddle/capi/GradientMachine.cpp @@ -38,9 +38,9 @@ NeuralNetwork* newCustomNerualNetwork(const std::string& name, } // namespace paddle extern "C" { -PD_Error PDGradientMachineCreateForPredict(PD_GradientMachine* machine, - void* modelConfigProtobuf, - int size) { +paddle_error PDGradientMachineCreateForPredict(PD_GradientMachine* machine, + void* modelConfigProtobuf, + int size) { if (modelConfigProtobuf == nullptr) return kPD_NULLPTR; paddle::ModelConfig config; if (!config.ParseFromArray(modelConfigProtobuf, size) || @@ -55,13 +55,13 @@ PD_Error PDGradientMachineCreateForPredict(PD_GradientMachine* machine, return kPD_NO_ERROR; } -PD_Error PDGradientMachineDestroy(PD_GradientMachine machine) { +paddle_error PDGradientMachineDestroy(PD_GradientMachine machine) { delete cast(machine); return kPD_NO_ERROR; } -PD_Error PDGradientMachineLoadParameterFromDisk(PD_GradientMachine machine, - const char* path) { +paddle_error PDGradientMachineLoadParameterFromDisk(PD_GradientMachine machine, + const char* path) { auto m = cast(machine); if (m == nullptr || path == nullptr || m->machine == nullptr) return kPD_NULLPTR; @@ -69,10 +69,10 @@ PD_Error PDGradientMachineLoadParameterFromDisk(PD_GradientMachine machine, return kPD_NO_ERROR; } -PD_Error PDGradientMachineForward(PD_GradientMachine machine, - PD_Arguments inArgs, - PD_Arguments outArgs, - bool isTrain) { +paddle_error PDGradientMachineForward(PD_GradientMachine machine, + PD_Arguments inArgs, + PD_Arguments outArgs, + bool isTrain) { auto m = cast(machine); auto in = paddle::capi::cast(inArgs); auto out = paddle::capi::cast(outArgs); @@ -83,10 +83,10 @@ PD_Error PDGradientMachineForward(PD_GradientMachine machine, return kPD_NO_ERROR; } -PD_Error PDGradientMachineCreateSharedParam(PD_GradientMachine origin, - void* modelConfigProtobuf, - int size, - PD_GradientMachine* slave) { +paddle_error PDGradientMachineCreateSharedParam(PD_GradientMachine origin, + void* modelConfigProtobuf, + int size, + PD_GradientMachine* slave) { auto o = cast(origin); if (origin == nullptr || slave == nullptr || o->machine == nullptr) { return kPD_NULLPTR; diff --git a/paddle/capi/Main.cpp b/paddle/capi/Main.cpp index 9314071b4bcf9..5051dff845ab5 100644 --- a/paddle/capi/Main.cpp +++ b/paddle/capi/Main.cpp @@ -29,7 +29,7 @@ static void initPaddle(int argc, char** argv) { } extern "C" { -PD_Error PDInit(int argc, char** argv) { +paddle_error PDInit(int argc, char** argv) { std::vector realArgv; realArgv.reserve(argc + 1); realArgv.push_back(strdup("")); diff --git a/paddle/capi/Matrix.cpp b/paddle/capi/Matrix.cpp index bc25f84344c1b..fe60832d70a48 100644 --- a/paddle/capi/Matrix.cpp +++ b/paddle/capi/Matrix.cpp @@ -18,22 +18,28 @@ limitations under the License. */ #define cast(v) paddle::capi::cast(v) extern "C" { -PD_Matrix PDMatCreate(uint64_t height, uint64_t width, bool useGpu) { +paddle_matrix paddle_matrix_create(uint64_t height, + uint64_t width, + bool useGpu) { auto ptr = new paddle::capi::CMatrix(); ptr->mat = paddle::Matrix::create(height, width, false, useGpu); return ptr; } -PD_Matrix PDMatCreateNone() { return new paddle::capi::CMatrix(); } +paddle_matrix paddle_matrix_create_none() { + return new paddle::capi::CMatrix(); +} -PD_Error PDMatDestroy(PD_Matrix mat) { +paddle_error paddle_matrix_destroy(paddle_matrix mat) { if (mat == nullptr) return kPD_NULLPTR; auto ptr = cast(mat); delete ptr; return kPD_NO_ERROR; } -PD_Error PDMatCopyToRow(PD_Matrix mat, uint64_t rowID, pd_real* rowArray) { +paddle_error paddle_matrix_set_row(paddle_matrix mat, + uint64_t rowID, + pd_real* rowArray) { if (mat == nullptr) return kPD_NULLPTR; auto ptr = cast(mat); if (ptr->mat == nullptr) return kPD_NULLPTR; @@ -48,7 +54,9 @@ PD_Error PDMatCopyToRow(PD_Matrix mat, uint64_t rowID, pd_real* rowArray) { return kPD_NO_ERROR; } -PD_Error PDMatGetRow(PD_Matrix mat, uint64_t rowID, pd_real** rawRowBuffer) { +paddle_error paddle_matrix_get_row(paddle_matrix mat, + uint64_t rowID, + pd_real** rawRowBuffer) { if (mat == nullptr) return kPD_NULLPTR; auto ptr = cast(mat); if (ptr->mat == nullptr) return kPD_NULLPTR; @@ -57,7 +65,9 @@ PD_Error PDMatGetRow(PD_Matrix mat, uint64_t rowID, pd_real** rawRowBuffer) { return kPD_NO_ERROR; } -PD_Error PDMatGetShape(PD_Matrix mat, uint64_t* height, uint64_t* width) { +paddle_error paddle_matrix_get_shape(paddle_matrix mat, + uint64_t* height, + uint64_t* width) { if (mat == nullptr) return kPD_NULLPTR; if (height != nullptr) { *height = cast(mat)->mat->getHeight(); diff --git a/paddle/capi/PaddleCAPI.h b/paddle/capi/PaddleCAPI.h index 94a9fc497f93a..37dfb13814b05 100644 --- a/paddle/capi/PaddleCAPI.h +++ b/paddle/capi/PaddleCAPI.h @@ -17,10 +17,9 @@ limitations under the License. */ #include #include #include "config.h" - -// Since we only support linux and macos in compile, always use clang or -// gcc 4.8+. DLL_IMPORT/DLL_EXPORT is as simple as below. -#define PD_API __attribute__((visibility("default"))) +#include "error.h" +#include "matrix.h" +#include "vector.h" #ifdef __cplusplus extern "C" { @@ -33,132 +32,6 @@ extern "C" { * NOTE: This is an experimental API, it could be changed. */ -/** - * Error Type for Paddle API. - */ -typedef enum { - kPD_NO_ERROR = 0, - kPD_NULLPTR = 1, - kPD_OUT_OF_RANGE = 2, - kPD_PROTOBUF_ERROR = 3, - kPD_UNDEFINED_ERROR = -1, -} PD_Error; - -/** - * Int Vector Functions. Return will be a PD_Error type. - */ -typedef void* PD_IVector; - -/** - * @brief Create an none int vector. It just a handler and store nothing. Used - * to get output from other api. - * @return None int vector. - */ -PD_API PD_IVector PDIVecCreateNone(); - -/** - * @brief PDIVectorCreate create a paddle int vector - * @param array: input array. - * @param size: input array size. - * @param copy: memory copy or just use same memory. True if copy. - * @param useGPU: True if use GPU - * @return PD_Error - */ -PD_API PD_IVector PDIVectorCreate(int* array, - uint64_t size, - bool copy, - bool useGPU); - -/** - * @brief PDIVecDestroy destory an int vector. - * @param ivec vector to be destoried. - * @return PD_Error - */ -PD_API PD_Error PDIVecDestroy(PD_IVector ivec); - -/** - * @brief PDIVectorGet get raw buffer stored inside this int vector. It could be - * GPU memory if this int vector is stored in GPU. - * @param [in] ivec int vector - * @param [out] buffer the return buffer pointer. - * @return PD_Error - */ -PD_API PD_Error PDIVectorGet(PD_IVector ivec, int** buffer); - -/** - * @brief PDIVectorResize resize the int vector. - * @param [in] ivec: int vector - * @param [in] size: size to change - * @return PD_Error - */ -PD_API PD_Error PDIVectorResize(PD_IVector ivec, uint64_t size); - -/** - * @brief PDIVectorGetSize get the size of int vector. - * @param [in] ivec: int vector - * @param [out] size: return size of this int vector. - * @return PD_Error - */ -PD_API PD_Error PDIVectorGetSize(PD_IVector ivec, uint64_t* size); - -/** - * Matrix functions. Return will be a PD_Error type. - */ -typedef void* PD_Matrix; - -/** - * @brief PDMatCreate Create a dense matrix - * @param height matrix height. - * @param width matrix width - * @param useGpu use GPU of not - * @return Matrix handler - */ -PD_API PD_Matrix PDMatCreate(uint64_t height, uint64_t width, bool useGpu); - -/** - * @brief PDMatDestroy Destroy a matrix. - * @param mat - * @return PD_Error - */ -PD_API PD_Error PDMatDestroy(PD_Matrix mat); - -/** - * @brief PDMatCopyToRow Copy a row to matrix. - * @param mat Target Matrix - * @param rowID Index of row - * @param rowArray Row data. - * @return PD_Error - */ -PD_API PD_Error PDMatCopyToRow(PD_Matrix mat, - uint64_t rowID, - pd_real* rowArray); - -/** - * @brief PDMatGetRow Get raw row buffer from matrix - * @param [in] mat Target matrix - * @param [in] rowID Index of row. - * @param [out] rawRowBuffer Row Buffer - * @return PD_Error - */ -PD_API PD_Error PDMatGetRow(PD_Matrix mat, - uint64_t rowID, - pd_real** rawRowBuffer); - -/** - * @brief PDMatCreateNone Create None Matrix - * @return - */ -PD_API PD_Matrix PDMatCreateNone(); - -/** - * @brief PDMatGetShape get the shape of matrix - * @param mat target matrix - * @param height The height of matrix - * @param width The width of matrix - * @return PD_Error - */ -PD_API PD_Error PDMatGetShape(PD_Matrix mat, uint64_t* height, uint64_t* width); - /** * Arguments functions. Each argument means layer output. Arguments means a * array of arguemnt. @@ -174,25 +47,25 @@ PD_API PD_Arguments PDArgsCreateNone(); /** * @brief PDArgsDestroy Destroy the arguments * @param args arguments to destroy - * @return PD_Error + * @return paddle_error */ -PD_API PD_Error PDArgsDestroy(PD_Arguments args); +PD_API paddle_error PDArgsDestroy(PD_Arguments args); /** * @brief PDArgsGetSize Get size of arguments array * @param [in] args arguments array * @param [out] size array size - * @return PD_Error + * @return paddle_error */ -PD_API PD_Error PDArgsGetSize(PD_Arguments args, uint64_t* size); +PD_API paddle_error PDArgsGetSize(PD_Arguments args, uint64_t* size); /** * @brief PDArgsResize Resize a arguments array. * @param args arguments array. * @param size target size of array - * @return PD_Error + * @return paddle_error */ -PD_API PD_Error PDArgsResize(PD_Arguments args, uint64_t size); +PD_API paddle_error PDArgsResize(PD_Arguments args, uint64_t size); /** * @brief PDArgsSetValue Set value matrix of one argument in array, which index @@ -200,9 +73,11 @@ PD_API PD_Error PDArgsResize(PD_Arguments args, uint64_t size); * @param args arguments array * @param ID array index * @param mat matrix pointer - * @return PD_Error + * @return paddle_error */ -PD_API PD_Error PDArgsSetValue(PD_Arguments args, uint64_t ID, PD_Matrix mat); +PD_API paddle_error PDArgsSetValue(PD_Arguments args, + uint64_t ID, + paddle_matrix mat); /** * @brief PDArgsGetValue Get value matrix of one argument in array, which index @@ -210,9 +85,11 @@ PD_API PD_Error PDArgsSetValue(PD_Arguments args, uint64_t ID, PD_Matrix mat); * @param [in] args arguments array * @param [in] ID array index * @param [out] mat matrix pointer - * @return PD_Error + * @return paddle_error */ -PD_API PD_Error PDArgsGetValue(PD_Arguments args, uint64_t ID, PD_Matrix mat); +PD_API paddle_error PDArgsGetValue(PD_Arguments args, + uint64_t ID, + paddle_matrix mat); /** * @brief PDArgsGetIds Get the integer vector of one argument in array, which @@ -220,9 +97,11 @@ PD_API PD_Error PDArgsGetValue(PD_Arguments args, uint64_t ID, PD_Matrix mat); * @param args arguments array * @param ID array index * @param ids integer vector pointer - * @return PD_Error + * @return paddle_error */ -PD_API PD_Error PDArgsGetIds(PD_Arguments args, uint64_t ID, PD_IVector ids); +PD_API paddle_error PDArgsGetIds(PD_Arguments args, + uint64_t ID, + paddle_ivector ids); /** * @brief PDArgsSetIds Set the integer vector of one argument in array, which @@ -230,9 +109,11 @@ PD_API PD_Error PDArgsGetIds(PD_Arguments args, uint64_t ID, PD_IVector ids); * @param [in] args arguments array * @param [in] ID array index * @param [out] ids integer vector pointer - * @return PD_Error + * @return paddle_error */ -PD_API PD_Error PDArgsSetIds(PD_Arguments args, uint64_t ID, PD_IVector ids); +PD_API paddle_error PDArgsSetIds(PD_Arguments args, + uint64_t ID, + paddle_ivector ids); /** * @brief PDArgsSetSequenceStartPos Set sequence start position vector of one @@ -240,22 +121,22 @@ PD_API PD_Error PDArgsSetIds(PD_Arguments args, uint64_t ID, PD_IVector ids); * @param args arguments array * @param ID array index * @param seqPos sequence position array. - * @return PD_Error + * @return paddle_error */ -PD_API PD_Error PDArgsSetSequenceStartPos(PD_Arguments args, - uint64_t ID, - PD_IVector seqPos); +PD_API paddle_error PDArgsSetSequenceStartPos(PD_Arguments args, + uint64_t ID, + paddle_ivector seqPos); /** * @brief PDArgsGetSequenceStartPos Get sequence start position vector of one * argument in array, which index is `ID`. * @param [in] args arguments array * @param [in] ID array index * @param [out] seqPos sequence position array - * @return PD_Error + * @return paddle_error */ -PD_API PD_Error PDArgsGetSequenceStartPos(PD_Arguments args, - uint64_t ID, - PD_IVector seqPos); +PD_API paddle_error PDArgsGetSequenceStartPos(PD_Arguments args, + uint64_t ID, + paddle_ivector seqPos); /** * @brief PDArgsSetSubSequenceStartPos Set sub-sequence start position vector of @@ -263,11 +144,11 @@ PD_API PD_Error PDArgsGetSequenceStartPos(PD_Arguments args, * @param args arguments array * @param ID array index * @param subSeqPos sub-sequence start position array. - * @return PD_Error + * @return paddle_error */ -PD_API PD_Error PDArgsSetSubSequenceStartPos(PD_Arguments args, - uint64_t ID, - PD_IVector subSeqPos); +PD_API paddle_error PDArgsSetSubSequenceStartPos(PD_Arguments args, + uint64_t ID, + paddle_ivector subSeqPos); /** * @brief PDArgsGetSubSequenceStartPos Get sub-sequence start position vector of @@ -275,11 +156,11 @@ PD_API PD_Error PDArgsSetSubSequenceStartPos(PD_Arguments args, * @param args arguments array * @param ID array index * @param subSeqPos sub-sequence start position array - * @return PD_Error + * @return paddle_error */ -PD_API PD_Error PDArgsGetSubSequenceStartPos(PD_Arguments args, - uint64_t ID, - PD_IVector subSeqPos); +PD_API paddle_error PDArgsGetSubSequenceStartPos(PD_Arguments args, + uint64_t ID, + paddle_ivector subSeqPos); /** * @brief GradientMachine means a neural network. */ @@ -291,19 +172,18 @@ typedef void* PD_GradientMachine; * @param [out] machine that used for model inference. * @param [in] modelConfigProtobuf * @param [in] size - * @return PD_Error + * @return paddle_error */ -PD_API PD_Error PDGradientMachineCreateForPredict(PD_GradientMachine* machine, - void* modelConfigProtobuf, - int size); +PD_API paddle_error PDGradientMachineCreateForPredict( + PD_GradientMachine* machine, void* modelConfigProtobuf, int size); /** * @brief PDGradientMachineLoadParameterFromDisk Load parameter from disk. * @param machine Gradient Machine. * @param path local directory path. - * @return PD_Error + * @return paddle_error */ -PD_API PD_Error PDGradientMachineLoadParameterFromDisk( +PD_API paddle_error PDGradientMachineLoadParameterFromDisk( PD_GradientMachine machine, const char* path); /** @@ -312,12 +192,12 @@ PD_API PD_Error PDGradientMachineLoadParameterFromDisk( * @param inArgs input arguments * @param outArgs output arguments * @param isTrain is train or not - * @return PD_Error + * @return paddle_error */ -PD_API PD_Error PDGradientMachineForward(PD_GradientMachine machine, - PD_Arguments inArgs, - PD_Arguments outArgs, - bool isTrain); +PD_API paddle_error PDGradientMachineForward(PD_GradientMachine machine, + PD_Arguments inArgs, + PD_Arguments outArgs, + bool isTrain); /** * @brief PDGradientMachineCreateSharedParam Create a gradient machine, which @@ -326,24 +206,25 @@ PD_API PD_Error PDGradientMachineForward(PD_GradientMachine machine, * @param [in] modelConfigProtobuf model config protobuf * @param [in] size of model config buffer. * @param [out] slave gradient machine, the output value. - * @return PD_Error + * @return paddle_error */ -PD_API PD_Error PDGradientMachineCreateSharedParam(PD_GradientMachine origin, - void* modelConfigProtobuf, - int size, - PD_GradientMachine* slave); +PD_API paddle_error +PDGradientMachineCreateSharedParam(PD_GradientMachine origin, + void* modelConfigProtobuf, + int size, + PD_GradientMachine* slave); /** * @brief PDGradientMachineDestroy Destroy a gradient machine * @param machine that need to destroy - * @return PD_Error + * @return paddle_error */ -PD_API PD_Error PDGradientMachineDestroy(PD_GradientMachine machine); +PD_API paddle_error PDGradientMachineDestroy(PD_GradientMachine machine); /** * Initialize Paddle. */ -PD_API PD_Error PDInit(int argc, char** argv); +PD_API paddle_error PDInit(int argc, char** argv); #ifdef __cplusplus } diff --git a/paddle/capi/Vector.cpp b/paddle/capi/Vector.cpp index 514d65fec8f6a..4ccb167fec21b 100644 --- a/paddle/capi/Vector.cpp +++ b/paddle/capi/Vector.cpp @@ -19,9 +19,14 @@ using paddle::capi::cast; extern "C" { -PD_IVector PDIVecCreateNone() { return new paddle::capi::CIVector(); } +paddle_ivector paddle_ivector_create_none() { + return new paddle::capi::CIVector(); +} -PD_IVector PDIVectorCreate(int* array, uint64_t size, bool copy, bool useGPU) { +paddle_ivector paddle_ivector_create(int* array, + uint64_t size, + bool copy, + bool useGPU) { auto ptr = new paddle::capi::CIVector(); if (copy) { ptr->vec = paddle::IVector::create(size, useGPU); @@ -32,13 +37,13 @@ PD_IVector PDIVectorCreate(int* array, uint64_t size, bool copy, bool useGPU) { return ptr; } -PD_Error PDIVecDestroy(PD_IVector ivec) { +paddle_error paddle_ivector_destroy(paddle_ivector ivec) { if (ivec == nullptr) return kPD_NULLPTR; delete cast(ivec); return kPD_NO_ERROR; } -PD_Error PDIVectorGet(PD_IVector ivec, int** buffer) { +paddle_error paddle_ivector_get(paddle_ivector ivec, int** buffer) { if (ivec == nullptr || buffer == nullptr) return kPD_NULLPTR; auto v = cast(ivec); if (v->vec == nullptr) return kPD_NULLPTR; @@ -46,7 +51,7 @@ PD_Error PDIVectorGet(PD_IVector ivec, int** buffer) { return kPD_NO_ERROR; } -PD_Error PDIVectorResize(PD_IVector ivec, uint64_t size) { +paddle_error paddle_ivector_resize(paddle_ivector ivec, uint64_t size) { if (ivec == nullptr) return kPD_NULLPTR; auto v = cast(ivec); if (v->vec == nullptr) return kPD_NULLPTR; @@ -54,7 +59,7 @@ PD_Error PDIVectorResize(PD_IVector ivec, uint64_t size) { return kPD_NO_ERROR; } -PD_Error PDIVectorGetSize(PD_IVector ivec, uint64_t* size) { +paddle_error paddle_ivector_get_size(paddle_ivector ivec, uint64_t* size) { if (ivec == nullptr) return kPD_NULLPTR; auto v = cast(ivec); if (v->vec == nullptr) return kPD_NULLPTR; diff --git a/paddle/capi/config.h.in b/paddle/capi/config.h.in index 32d8a364e0eaa..af4e80dea144f 100644 --- a/paddle/capi/config.h.in +++ b/paddle/capi/config.h.in @@ -3,4 +3,8 @@ typedef @PADDLE_FLOAT_TYPE@ pd_real; +// Since we only support linux and macos in compile, always use clang or +// gcc 4.8+. DLL_IMPORT/DLL_EXPORT is as simple as below. +#define PD_API __attribute__((visibility("default"))) + #endif diff --git a/paddle/capi/error.h b/paddle/capi/error.h new file mode 100644 index 0000000000000..8dbb6d9548721 --- /dev/null +++ b/paddle/capi/error.h @@ -0,0 +1,15 @@ +#ifndef __PADDLE_CAPI_ERROR_H__ +#define __PADDLE_CAPI_ERROR_H__ + +/** + * Error Type for Paddle API. + */ +typedef enum { + kPD_NO_ERROR = 0, + kPD_NULLPTR = 1, + kPD_OUT_OF_RANGE = 2, + kPD_PROTOBUF_ERROR = 3, + kPD_UNDEFINED_ERROR = -1, +} paddle_error; + +#endif diff --git a/paddle/capi/matrix.h b/paddle/capi/matrix.h new file mode 100644 index 0000000000000..2f6488f38386a --- /dev/null +++ b/paddle/capi/matrix.h @@ -0,0 +1,91 @@ +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#ifndef __PADDLE_CAPI_MATRIX_H__ +#define __PADDLE_CAPI_MATRIX_H__ + +#include +#include "config.h" +#include "error.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/** + * Matrix functions. Return will be a paddle_error type. + */ +typedef void* paddle_matrix; + +/** + * @brief paddle_matrix_create Create a dense matrix + * @param height matrix height. + * @param width matrix width + * @param useGpu use GPU of not + * @return Matrix handler + */ +PD_API paddle_matrix paddle_matrix_create(uint64_t height, + uint64_t width, + bool useGpu); + +/** + * @brief paddle_matrix_destroy Destroy a matrix. + * @param mat + * @return paddle_error + */ +PD_API paddle_error paddle_matrix_destroy(paddle_matrix mat); + +/** + * @brief paddle_matrix_set_row Set a row to matrix. + * @param mat Target Matrix + * @param rowID Index of row + * @param rowArray Row data. + * @return paddle_error + */ +PD_API paddle_error paddle_matrix_set_row(paddle_matrix mat, + uint64_t rowID, + pd_real* rowArray); + +/** + * @brief PDMatGetRow Get raw row buffer from matrix + * @param [in] mat Target matrix + * @param [in] rowID Index of row. + * @param [out] rawRowBuffer Row Buffer + * @return paddle_error + */ +PD_API paddle_error paddle_matrix_get_row(paddle_matrix mat, + uint64_t rowID, + pd_real** rawRowBuffer); + +/** + * @brief PDMatCreateNone Create None Matrix + * @return + */ +PD_API paddle_matrix paddle_matrix_create_none(); + +/** + * @brief PDMatGetShape get the shape of matrix + * @param mat target matrix + * @param height The height of matrix + * @param width The width of matrix + * @return paddle_error + */ +PD_API paddle_error paddle_matrix_get_shape(paddle_matrix mat, + uint64_t* height, + uint64_t* width); + +#ifdef __cplusplus +} +#endif +#endif diff --git a/paddle/capi/tests/test_Arguments.cpp b/paddle/capi/tests/test_Arguments.cpp index e015b94e12cd9..92dcf6bf9c580 100644 --- a/paddle/capi/tests/test_Arguments.cpp +++ b/paddle/capi/tests/test_Arguments.cpp @@ -39,14 +39,14 @@ TEST(CAPIArguments, value) { PD_Arguments args = PDArgsCreateNone(); ASSERT_EQ(kPD_NO_ERROR, PDArgsResize(args, 1)); - PD_Matrix mat = PDMatCreate(128, 64, false); + paddle_matrix mat = paddle_matrix_create(128, 64, false); for (size_t i = 0; i < 128; ++i) { std::vector sampleBuf = randomBuffer(64); - PDMatCopyToRow(mat, i, sampleBuf.data()); + paddle_matrix_set_row(mat, i, sampleBuf.data()); } ASSERT_EQ(kPD_NO_ERROR, PDArgsSetValue(args, 0, mat)); - PD_Matrix val = PDMatCreateNone(); + paddle_matrix val = paddle_matrix_create_none(); ASSERT_EQ(kPD_NO_ERROR, PDArgsGetValue(args, 0, val)); @@ -54,15 +54,15 @@ TEST(CAPIArguments, value) { pd_real* row1; pd_real* row2; - ASSERT_EQ(kPD_NO_ERROR, PDMatGetRow(mat, i, &row1)); - ASSERT_EQ(kPD_NO_ERROR, PDMatGetRow(val, i, &row2)); + ASSERT_EQ(kPD_NO_ERROR, paddle_matrix_get_row(mat, i, &row1)); + ASSERT_EQ(kPD_NO_ERROR, paddle_matrix_get_row(val, i, &row2)); ASSERT_EQ(row1, row2); } - PD_IVector ivec = PDIVecCreateNone(); - ASSERT_EQ(kPD_NO_ERROR, PDIVecDestroy(ivec)); - ASSERT_EQ(kPD_NO_ERROR, PDMatDestroy(val)); - ASSERT_EQ(kPD_NO_ERROR, PDMatDestroy(mat)); + paddle_ivector ivec = paddle_ivector_create_none(); + ASSERT_EQ(kPD_NO_ERROR, paddle_ivector_destroy(ivec)); + ASSERT_EQ(kPD_NO_ERROR, paddle_matrix_destroy(val)); + ASSERT_EQ(kPD_NO_ERROR, paddle_matrix_destroy(mat)); ASSERT_EQ(kPD_NO_ERROR, PDArgsDestroy(args)); } @@ -70,15 +70,15 @@ TEST(CAPIArguments, ids) { PD_Arguments args = PDArgsCreateNone(); ASSERT_EQ(kPD_NO_ERROR, PDArgsResize(args, 1)); - PD_IVector ivec; + paddle_ivector ivec; int array[3] = {1, 2, 3}; - ivec = PDIVectorCreate(array, 3, true, false); + ivec = paddle_ivector_create(array, 3, true, false); ASSERT_EQ(kPD_NO_ERROR, PDArgsSetIds(args, 0, ivec)); - PD_IVector val = PDIVecCreateNone(); + paddle_ivector val = paddle_ivector_create_none(); ASSERT_EQ(kPD_NO_ERROR, PDArgsGetIds(args, 0, val)); - ASSERT_EQ(kPD_NO_ERROR, PDIVecDestroy(ivec)); - ASSERT_EQ(kPD_NO_ERROR, PDIVecDestroy(val)); + ASSERT_EQ(kPD_NO_ERROR, paddle_ivector_destroy(ivec)); + ASSERT_EQ(kPD_NO_ERROR, paddle_ivector_destroy(val)); ASSERT_EQ(kPD_NO_ERROR, PDArgsDestroy(args)); } @@ -87,24 +87,24 @@ void testSequenceHelper(T1 setter, T2 getter) { PD_Arguments args = PDArgsCreateNone(); ASSERT_EQ(kPD_NO_ERROR, PDArgsResize(args, 1)); - PD_IVector ivec; + paddle_ivector ivec; int array[3] = {1, 2, 3}; - ivec = PDIVectorCreate(array, 3, true, false); + ivec = paddle_ivector_create(array, 3, true, false); ASSERT_EQ(kPD_NO_ERROR, setter(args, 0, ivec)); - PD_IVector val = PDIVecCreateNone(); + paddle_ivector val = paddle_ivector_create_none(); ASSERT_EQ(kPD_NO_ERROR, getter(args, 0, val)); uint64_t size; - ASSERT_EQ(kPD_NO_ERROR, PDIVectorGetSize(val, &size)); + ASSERT_EQ(kPD_NO_ERROR, paddle_ivector_get_size(val, &size)); int* rawBuf; - ASSERT_EQ(kPD_NO_ERROR, PDIVectorGet(val, &rawBuf)); + ASSERT_EQ(kPD_NO_ERROR, paddle_ivector_get(val, &rawBuf)); for (size_t i = 0; i < size; ++i) { ASSERT_EQ(array[i], rawBuf[i]); } - ASSERT_EQ(kPD_NO_ERROR, PDIVecDestroy(ivec)); - ASSERT_EQ(kPD_NO_ERROR, PDIVecDestroy(val)); + ASSERT_EQ(kPD_NO_ERROR, paddle_ivector_destroy(ivec)); + ASSERT_EQ(kPD_NO_ERROR, paddle_ivector_destroy(val)); ASSERT_EQ(kPD_NO_ERROR, PDArgsDestroy(args)); } diff --git a/paddle/capi/tests/test_GradientMachine.cpp b/paddle/capi/tests/test_GradientMachine.cpp index acee99bcc4b23..05a06445c2727 100644 --- a/paddle/capi/tests/test_GradientMachine.cpp +++ b/paddle/capi/tests/test_GradientMachine.cpp @@ -59,12 +59,12 @@ TEST(GradientMachine, testPredict) { PD_Arguments inArgs = PDArgsCreateNone(); ASSERT_EQ(kPD_NO_ERROR, PDArgsResize(inArgs, 1)); - PD_Matrix mat = PDMatCreate(1, 100, false); + paddle_matrix mat = paddle_matrix_create(1, 100, false); static_assert(std::is_same::value, ""); auto data = randomBuffer(100); pd_real* rowPtr; - ASSERT_EQ(kPD_NO_ERROR, PDMatGetRow(mat, 0, &rowPtr)); + ASSERT_EQ(kPD_NO_ERROR, paddle_matrix_get_row(mat, 0, &rowPtr)); memcpy(rowPtr, data.data(), data.size() * sizeof(pd_real)); ASSERT_EQ(kPD_NO_ERROR, PDArgsSetValue(inArgs, 0, mat)); @@ -87,16 +87,16 @@ TEST(GradientMachine, testPredict) { auto matPaddle = paddleOutArgs[0].value; uint64_t height, width; - ASSERT_EQ(kPD_NO_ERROR, PDMatGetShape(mat, &height, &width)); + ASSERT_EQ(kPD_NO_ERROR, paddle_matrix_get_shape(mat, &height, &width)); ASSERT_EQ(matPaddle->getHeight(), height); ASSERT_EQ(matPaddle->getWidth(), width); - ASSERT_EQ(kPD_NO_ERROR, PDMatGetRow(mat, 0, &rowPtr)); + ASSERT_EQ(kPD_NO_ERROR, paddle_matrix_get_row(mat, 0, &rowPtr)); for (size_t i = 0; i < width; ++i) { ASSERT_NEAR(matPaddle->getData()[i], rowPtr[i], 1e-5); } - ASSERT_EQ(kPD_NO_ERROR, PDMatDestroy(mat)); + ASSERT_EQ(kPD_NO_ERROR, paddle_matrix_destroy(mat)); ASSERT_EQ(kPD_NO_ERROR, PDArgsDestroy(inArgs)); ASSERT_EQ(kPD_NO_ERROR, PDArgsDestroy(outArgs)); std::swap(machineSlave, machine); diff --git a/paddle/capi/tests/test_Matrix.cpp b/paddle/capi/tests/test_Matrix.cpp index 1d38162add21a..1b3b881caee55 100644 --- a/paddle/capi/tests/test_Matrix.cpp +++ b/paddle/capi/tests/test_Matrix.cpp @@ -16,30 +16,31 @@ limitations under the License. */ #include "gtest/gtest.h" TEST(CAPIMatrix, create) { - PD_Matrix mat = PDMatCreate(128, 32, false); + paddle_matrix mat = paddle_matrix_create(128, 32, false); std::vector sampleRow; sampleRow.resize(32); for (size_t i = 0; i < sampleRow.size(); ++i) { sampleRow[i] = 1.0 / (i + 1.0); } - ASSERT_EQ(kPD_NO_ERROR, PDMatCopyToRow(mat, 0, sampleRow.data())); - ASSERT_EQ(kPD_OUT_OF_RANGE, PDMatCopyToRow(mat, 128, sampleRow.data())); + ASSERT_EQ(kPD_NO_ERROR, paddle_matrix_set_row(mat, 0, sampleRow.data())); + ASSERT_EQ(kPD_OUT_OF_RANGE, + paddle_matrix_set_row(mat, 128, sampleRow.data())); pd_real* arrayPtr; - ASSERT_EQ(kPD_NO_ERROR, PDMatGetRow(mat, 0, &arrayPtr)); + ASSERT_EQ(kPD_NO_ERROR, paddle_matrix_get_row(mat, 0, &arrayPtr)); for (size_t i = 0; i < sampleRow.size(); ++i) { ASSERT_NEAR(sampleRow[i], arrayPtr[i], 1e-5); } uint64_t height, width; - ASSERT_EQ(kPD_NO_ERROR, PDMatGetShape(mat, &height, &width)); + ASSERT_EQ(kPD_NO_ERROR, paddle_matrix_get_shape(mat, &height, &width)); ASSERT_EQ(128UL, height); ASSERT_EQ(32UL, width); - ASSERT_EQ(kPD_NO_ERROR, PDMatDestroy(mat)); + ASSERT_EQ(kPD_NO_ERROR, paddle_matrix_destroy(mat)); } TEST(CAPIMatrix, createNone) { - PD_Matrix mat = PDMatCreateNone(); - ASSERT_EQ(kPD_NO_ERROR, PDMatDestroy(mat)); + paddle_matrix mat = paddle_matrix_create_none(); + ASSERT_EQ(kPD_NO_ERROR, paddle_matrix_destroy(mat)); } diff --git a/paddle/capi/tests/test_Vector.cpp b/paddle/capi/tests/test_Vector.cpp index b3558fe0fdd17..64c19265e3d05 100644 --- a/paddle/capi/tests/test_Vector.cpp +++ b/paddle/capi/tests/test_Vector.cpp @@ -16,16 +16,16 @@ limitations under the License. */ #include "gtest/gtest.h" TEST(CAPIVector, create) { - PD_IVector vec; + paddle_ivector vec; int array[3] = {1, 2, 3}; - vec = PDIVectorCreate(array, 3, true, false); - ASSERT_EQ(kPD_NO_ERROR, PDIVectorResize(vec, 1000)); + vec = paddle_ivector_create(array, 3, true, false); + ASSERT_EQ(kPD_NO_ERROR, paddle_ivector_resize(vec, 1000)); uint64_t size; - ASSERT_EQ(kPD_NO_ERROR, PDIVectorGetSize(vec, &size)); - ASSERT_EQ(kPD_NO_ERROR, PDIVecDestroy(vec)); + ASSERT_EQ(kPD_NO_ERROR, paddle_ivector_get_size(vec, &size)); + ASSERT_EQ(kPD_NO_ERROR, paddle_ivector_destroy(vec)); } TEST(CAPIVector, createNone) { - PD_IVector vec = PDIVecCreateNone(); - ASSERT_EQ(kPD_NO_ERROR, PDIVecDestroy(vec)); + paddle_ivector vec = paddle_ivector_create_none(); + ASSERT_EQ(kPD_NO_ERROR, paddle_ivector_destroy(vec)); } diff --git a/paddle/capi/vector.h b/paddle/capi/vector.h new file mode 100644 index 0000000000000..a92aeff164257 --- /dev/null +++ b/paddle/capi/vector.h @@ -0,0 +1,89 @@ +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#ifndef __PADDLE_CAPI_VECTOR_H__ +#define __PADDLE_CAPI_VECTOR_H__ + +#include +#include +#include "config.h" +#include "error.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/** + * Int Vector Functions. Return will be a paddle_error type. + */ +typedef void* paddle_ivector; + +/** + * @brief Create an none int vector. It just a handler and store nothing. Used + * to get output from other api. + * @return None int vector. + */ +PD_API paddle_ivector paddle_ivector_create_none(); + +/** + * @brief paddle_ivector_create create a paddle int vector + * @param array: input array. + * @param size: input array size. + * @param copy: memory copy or just use same memory. True if copy. + * @param useGPU: True if use GPU + * @return paddle_error + */ +PD_API paddle_ivector paddle_ivector_create(int* array, + uint64_t size, + bool copy, + bool useGPU); + +/** + * @brief paddle_ivector_destroy destory an int vector. + * @param ivec vector to be destoried. + * @return paddle_error + */ +PD_API paddle_error paddle_ivector_destroy(paddle_ivector ivec); + +/** + * @brief paddle_ivector_get get raw buffer stored inside this int vector. It + * could be GPU memory if this int vector is stored in GPU. + * @param [in] ivec int vector + * @param [out] buffer the return buffer pointer. + * @return paddle_error + */ +PD_API paddle_error paddle_ivector_get(paddle_ivector ivec, int** buffer); + +/** + * @brief paddle_ivector_resize resize the int vector. + * @param [in] ivec: int vector + * @param [in] size: size to change + * @return paddle_error + */ +PD_API paddle_error paddle_ivector_resize(paddle_ivector ivec, uint64_t size); + +/** + * @brief paddle_ivector_get_size get the size of int vector. + * @param [in] ivec: int vector + * @param [out] size: return size of this int vector. + * @return paddle_error + */ +PD_API paddle_error paddle_ivector_get_size(paddle_ivector ivec, + uint64_t* size); + +#ifdef __cplusplus +} +#endif + +#endif From 0afd5c30a85340fa639d4f2f4805c2c8548dce58 Mon Sep 17 00:00:00 2001 From: Yu Yang Date: Tue, 21 Mar 2017 13:27:39 +0800 Subject: [PATCH 27/43] Stash --- paddle/capi/Arguments.cpp | 49 +++++++++------- paddle/capi/GradientMachine.cpp | 4 +- paddle/capi/PaddleCAPI.h | 66 +++++++++++----------- paddle/capi/tests/test_Arguments.cpp | 39 ++++++------- paddle/capi/tests/test_GradientMachine.cpp | 16 +++--- 5 files changed, 92 insertions(+), 82 deletions(-) diff --git a/paddle/capi/Arguments.cpp b/paddle/capi/Arguments.cpp index 29aa0858dd833..792b8c58a9a08 100644 --- a/paddle/capi/Arguments.cpp +++ b/paddle/capi/Arguments.cpp @@ -21,27 +21,31 @@ using paddle::capi::cast; #define castIVec(v) cast(v) extern "C" { -PD_Arguments PDArgsCreateNone() { return new paddle::capi::CArguments(); } +paddle_arguments paddle_arguments_create_none() { + return new paddle::capi::CArguments(); +} -paddle_error PDArgsDestroy(PD_Arguments args) { +paddle_error paddle_arguments_destroy(paddle_arguments args) { if (args == nullptr) return kPD_NULLPTR; delete castArg(args); return kPD_NO_ERROR; } -paddle_error PDArgsGetSize(PD_Arguments args, uint64_t* size) { +paddle_error paddle_arguments_size(paddle_arguments args, uint64_t* size) { if (args == nullptr || size == nullptr) return kPD_NULLPTR; *size = castArg(args)->args.size(); return kPD_NO_ERROR; } -paddle_error PDArgsResize(PD_Arguments args, uint64_t size) { +paddle_error paddle_arguments_resize(paddle_arguments args, uint64_t size) { if (args == nullptr) return kPD_NULLPTR; castArg(args)->args.resize(size); return kPD_NO_ERROR; } -paddle_error PDArgsSetValue(PD_Arguments args, uint64_t ID, paddle_matrix mat) { +paddle_error paddle_arguments_set_value(paddle_arguments args, + uint64_t ID, + paddle_matrix mat) { if (args == nullptr || mat == nullptr) return kPD_NULLPTR; auto m = paddle::capi::cast(mat); if (m->mat == nullptr) return kPD_NULLPTR; @@ -51,7 +55,9 @@ paddle_error PDArgsSetValue(PD_Arguments args, uint64_t ID, paddle_matrix mat) { return kPD_NO_ERROR; } -paddle_error PDArgsGetValue(PD_Arguments args, uint64_t ID, paddle_matrix mat) { +paddle_error paddle_arguments_value(paddle_arguments args, + uint64_t ID, + paddle_matrix mat) { if (args == nullptr || mat == nullptr) return kPD_NULLPTR; auto m = paddle::capi::cast(mat); auto a = castArg(args); @@ -60,7 +66,9 @@ paddle_error PDArgsGetValue(PD_Arguments args, uint64_t ID, paddle_matrix mat) { return kPD_NO_ERROR; } -paddle_error PDArgsGetIds(PD_Arguments args, uint64_t ID, paddle_ivector ids) { +paddle_error paddle_arguments_ids(paddle_arguments args, + uint64_t ID, + paddle_ivector ids) { if (args == nullptr || ids == nullptr) return kPD_NULLPTR; auto iv = castIVec(ids); auto a = castArg(args); @@ -69,7 +77,9 @@ paddle_error PDArgsGetIds(PD_Arguments args, uint64_t ID, paddle_ivector ids) { return kPD_NO_ERROR; } -paddle_error PDArgsSetIds(PD_Arguments args, uint64_t ID, paddle_ivector ids) { +paddle_error paddle_arguments_set_ids(paddle_arguments args, + uint64_t ID, + paddle_ivector ids) { //! TODO(lizhao): Complete this method. if (args == nullptr || ids == nullptr) return kPD_NULLPTR; auto iv = paddle::capi::cast(ids); @@ -80,9 +90,9 @@ paddle_error PDArgsSetIds(PD_Arguments args, uint64_t ID, paddle_ivector ids) { return kPD_NO_ERROR; } -paddle_error PDArgsSetSequenceStartPos(PD_Arguments args, - uint64_t ID, - paddle_ivector seqPos) { +paddle_error paddle_arguments_set_sequence_start_pos(paddle_arguments args, + uint64_t ID, + paddle_ivector seqPos) { if (args == nullptr || seqPos == nullptr) return kPD_NULLPTR; auto iv = paddle::capi::cast(seqPos); if (iv->vec == nullptr) return kPD_NULLPTR; @@ -93,9 +103,8 @@ paddle_error PDArgsSetSequenceStartPos(PD_Arguments args, return kPD_NO_ERROR; } -paddle_error PDArgsSetSubSequenceStartPos(PD_Arguments args, - uint64_t ID, - paddle_ivector subSeqPos) { +paddle_error paddle_arguments_set_sub_sequence_start_pos( + paddle_arguments args, uint64_t ID, paddle_ivector subSeqPos) { if (args == nullptr || subSeqPos == nullptr) return kPD_NULLPTR; auto iv = paddle::capi::cast(subSeqPos); if (iv->vec == nullptr) return kPD_NULLPTR; @@ -106,9 +115,9 @@ paddle_error PDArgsSetSubSequenceStartPos(PD_Arguments args, return kPD_NO_ERROR; } -paddle_error PDArgsGetSequenceStartPos(PD_Arguments args, - uint64_t ID, - paddle_ivector seqPos) { +paddle_error paddle_arguments_sequence_start_pos(paddle_arguments args, + uint64_t ID, + paddle_ivector seqPos) { if (args == nullptr || seqPos == nullptr) return kPD_NULLPTR; auto iv = castIVec(seqPos); auto a = castArg(args); @@ -118,9 +127,9 @@ paddle_error PDArgsGetSequenceStartPos(PD_Arguments args, return kPD_NO_ERROR; } -paddle_error PDArgsGetSubSequenceStartPos(PD_Arguments args, - uint64_t ID, - paddle_ivector subSeqPos) { +paddle_error paddle_arguments_sub_sequence_start_pos(paddle_arguments args, + uint64_t ID, + paddle_ivector subSeqPos) { if (args == nullptr || subSeqPos == nullptr) return kPD_NULLPTR; auto iv = castIVec(subSeqPos); auto a = castArg(args); diff --git a/paddle/capi/GradientMachine.cpp b/paddle/capi/GradientMachine.cpp index 6ca3cf7e4d8dc..9f0ffd6599fc4 100644 --- a/paddle/capi/GradientMachine.cpp +++ b/paddle/capi/GradientMachine.cpp @@ -70,8 +70,8 @@ paddle_error PDGradientMachineLoadParameterFromDisk(PD_GradientMachine machine, } paddle_error PDGradientMachineForward(PD_GradientMachine machine, - PD_Arguments inArgs, - PD_Arguments outArgs, + paddle_arguments inArgs, + paddle_arguments outArgs, bool isTrain) { auto m = cast(machine); auto in = paddle::capi::cast(inArgs); diff --git a/paddle/capi/PaddleCAPI.h b/paddle/capi/PaddleCAPI.h index 37dfb13814b05..eea7c3bd05f74 100644 --- a/paddle/capi/PaddleCAPI.h +++ b/paddle/capi/PaddleCAPI.h @@ -36,20 +36,21 @@ extern "C" { * Arguments functions. Each argument means layer output. Arguments means a * array of arguemnt. */ -typedef void* PD_Arguments; +typedef void* paddle_arguments; /** - * @brief PDArgsCreateNone Create a array of arguments, which size is zero. + * @brief paddle_arguments_create_none Create a array of arguments, which size + * is zero. * @return Arguemnts */ -PD_API PD_Arguments PDArgsCreateNone(); +PD_API paddle_arguments paddle_arguments_create_none(); /** - * @brief PDArgsDestroy Destroy the arguments + * @brief paddle_arguments_destroy Destroy the arguments * @param args arguments to destroy * @return paddle_error */ -PD_API paddle_error PDArgsDestroy(PD_Arguments args); +PD_API paddle_error paddle_arguments_destroy(paddle_arguments args); /** * @brief PDArgsGetSize Get size of arguments array @@ -57,7 +58,8 @@ PD_API paddle_error PDArgsDestroy(PD_Arguments args); * @param [out] size array size * @return paddle_error */ -PD_API paddle_error PDArgsGetSize(PD_Arguments args, uint64_t* size); +PD_API paddle_error paddle_arguments_size(paddle_arguments args, + uint64_t* size); /** * @brief PDArgsResize Resize a arguments array. @@ -65,7 +67,8 @@ PD_API paddle_error PDArgsGetSize(PD_Arguments args, uint64_t* size); * @param size target size of array * @return paddle_error */ -PD_API paddle_error PDArgsResize(PD_Arguments args, uint64_t size); +PD_API paddle_error paddle_arguments_resize(paddle_arguments args, + uint64_t size); /** * @brief PDArgsSetValue Set value matrix of one argument in array, which index @@ -75,9 +78,9 @@ PD_API paddle_error PDArgsResize(PD_Arguments args, uint64_t size); * @param mat matrix pointer * @return paddle_error */ -PD_API paddle_error PDArgsSetValue(PD_Arguments args, - uint64_t ID, - paddle_matrix mat); +PD_API paddle_error paddle_arguments_set_value(paddle_arguments args, + uint64_t ID, + paddle_matrix mat); /** * @brief PDArgsGetValue Get value matrix of one argument in array, which index @@ -87,9 +90,9 @@ PD_API paddle_error PDArgsSetValue(PD_Arguments args, * @param [out] mat matrix pointer * @return paddle_error */ -PD_API paddle_error PDArgsGetValue(PD_Arguments args, - uint64_t ID, - paddle_matrix mat); +PD_API paddle_error paddle_arguments_value(paddle_arguments args, + uint64_t ID, + paddle_matrix mat); /** * @brief PDArgsGetIds Get the integer vector of one argument in array, which @@ -99,9 +102,9 @@ PD_API paddle_error PDArgsGetValue(PD_Arguments args, * @param ids integer vector pointer * @return paddle_error */ -PD_API paddle_error PDArgsGetIds(PD_Arguments args, - uint64_t ID, - paddle_ivector ids); +PD_API paddle_error paddle_arguments_ids(paddle_arguments args, + uint64_t ID, + paddle_ivector ids); /** * @brief PDArgsSetIds Set the integer vector of one argument in array, which @@ -111,9 +114,9 @@ PD_API paddle_error PDArgsGetIds(PD_Arguments args, * @param [out] ids integer vector pointer * @return paddle_error */ -PD_API paddle_error PDArgsSetIds(PD_Arguments args, - uint64_t ID, - paddle_ivector ids); +PD_API paddle_error paddle_arguments_set_ids(paddle_arguments args, + uint64_t ID, + paddle_ivector ids); /** * @brief PDArgsSetSequenceStartPos Set sequence start position vector of one @@ -123,9 +126,8 @@ PD_API paddle_error PDArgsSetIds(PD_Arguments args, * @param seqPos sequence position array. * @return paddle_error */ -PD_API paddle_error PDArgsSetSequenceStartPos(PD_Arguments args, - uint64_t ID, - paddle_ivector seqPos); +PD_API paddle_error paddle_arguments_set_sequence_start_pos( + paddle_arguments args, uint64_t ID, paddle_ivector seqPos); /** * @brief PDArgsGetSequenceStartPos Get sequence start position vector of one * argument in array, which index is `ID`. @@ -134,9 +136,9 @@ PD_API paddle_error PDArgsSetSequenceStartPos(PD_Arguments args, * @param [out] seqPos sequence position array * @return paddle_error */ -PD_API paddle_error PDArgsGetSequenceStartPos(PD_Arguments args, - uint64_t ID, - paddle_ivector seqPos); +PD_API paddle_error paddle_arguments_sequence_start_pos(paddle_arguments args, + uint64_t ID, + paddle_ivector seqPos); /** * @brief PDArgsSetSubSequenceStartPos Set sub-sequence start position vector of @@ -146,9 +148,8 @@ PD_API paddle_error PDArgsGetSequenceStartPos(PD_Arguments args, * @param subSeqPos sub-sequence start position array. * @return paddle_error */ -PD_API paddle_error PDArgsSetSubSequenceStartPos(PD_Arguments args, - uint64_t ID, - paddle_ivector subSeqPos); +PD_API paddle_error paddle_arguments_set_sub_sequence_start_pos( + paddle_arguments args, uint64_t ID, paddle_ivector subSeqPos); /** * @brief PDArgsGetSubSequenceStartPos Get sub-sequence start position vector of @@ -158,9 +159,8 @@ PD_API paddle_error PDArgsSetSubSequenceStartPos(PD_Arguments args, * @param subSeqPos sub-sequence start position array * @return paddle_error */ -PD_API paddle_error PDArgsGetSubSequenceStartPos(PD_Arguments args, - uint64_t ID, - paddle_ivector subSeqPos); +PD_API paddle_error paddle_arguments_sub_sequence_start_pos( + paddle_arguments args, uint64_t ID, paddle_ivector subSeqPos); /** * @brief GradientMachine means a neural network. */ @@ -195,8 +195,8 @@ PD_API paddle_error PDGradientMachineLoadParameterFromDisk( * @return paddle_error */ PD_API paddle_error PDGradientMachineForward(PD_GradientMachine machine, - PD_Arguments inArgs, - PD_Arguments outArgs, + paddle_arguments inArgs, + paddle_arguments outArgs, bool isTrain); /** diff --git a/paddle/capi/tests/test_Arguments.cpp b/paddle/capi/tests/test_Arguments.cpp index 92dcf6bf9c580..b445b396f3c44 100644 --- a/paddle/capi/tests/test_Arguments.cpp +++ b/paddle/capi/tests/test_Arguments.cpp @@ -28,27 +28,27 @@ static std::vector randomBuffer(size_t bufSize) { } TEST(CAPIArguments, create) { - PD_Arguments args = PDArgsCreateNone(); + paddle_arguments args = paddle_arguments_create_none(); uint64_t size; - ASSERT_EQ(kPD_NO_ERROR, PDArgsGetSize(args, &size)); + ASSERT_EQ(kPD_NO_ERROR, paddle_arguments_size(args, &size)); ASSERT_EQ(0UL, size); - ASSERT_EQ(kPD_NO_ERROR, PDArgsDestroy(args)); + ASSERT_EQ(kPD_NO_ERROR, paddle_arguments_destroy(args)); } TEST(CAPIArguments, value) { - PD_Arguments args = PDArgsCreateNone(); - ASSERT_EQ(kPD_NO_ERROR, PDArgsResize(args, 1)); + paddle_arguments args = paddle_arguments_create_none(); + ASSERT_EQ(kPD_NO_ERROR, paddle_arguments_resize(args, 1)); paddle_matrix mat = paddle_matrix_create(128, 64, false); for (size_t i = 0; i < 128; ++i) { std::vector sampleBuf = randomBuffer(64); paddle_matrix_set_row(mat, i, sampleBuf.data()); } - ASSERT_EQ(kPD_NO_ERROR, PDArgsSetValue(args, 0, mat)); + ASSERT_EQ(kPD_NO_ERROR, paddle_arguments_set_value(args, 0, mat)); paddle_matrix val = paddle_matrix_create_none(); - ASSERT_EQ(kPD_NO_ERROR, PDArgsGetValue(args, 0, val)); + ASSERT_EQ(kPD_NO_ERROR, paddle_arguments_value(args, 0, val)); for (size_t i = 0; i < 128; ++i) { pd_real* row1; @@ -63,29 +63,29 @@ TEST(CAPIArguments, value) { ASSERT_EQ(kPD_NO_ERROR, paddle_ivector_destroy(ivec)); ASSERT_EQ(kPD_NO_ERROR, paddle_matrix_destroy(val)); ASSERT_EQ(kPD_NO_ERROR, paddle_matrix_destroy(mat)); - ASSERT_EQ(kPD_NO_ERROR, PDArgsDestroy(args)); + ASSERT_EQ(kPD_NO_ERROR, paddle_arguments_destroy(args)); } TEST(CAPIArguments, ids) { - PD_Arguments args = PDArgsCreateNone(); - ASSERT_EQ(kPD_NO_ERROR, PDArgsResize(args, 1)); + paddle_arguments args = paddle_arguments_create_none(); + ASSERT_EQ(kPD_NO_ERROR, paddle_arguments_resize(args, 1)); paddle_ivector ivec; int array[3] = {1, 2, 3}; ivec = paddle_ivector_create(array, 3, true, false); - ASSERT_EQ(kPD_NO_ERROR, PDArgsSetIds(args, 0, ivec)); + ASSERT_EQ(kPD_NO_ERROR, paddle_arguments_set_ids(args, 0, ivec)); paddle_ivector val = paddle_ivector_create_none(); - ASSERT_EQ(kPD_NO_ERROR, PDArgsGetIds(args, 0, val)); + ASSERT_EQ(kPD_NO_ERROR, paddle_arguments_ids(args, 0, val)); ASSERT_EQ(kPD_NO_ERROR, paddle_ivector_destroy(ivec)); ASSERT_EQ(kPD_NO_ERROR, paddle_ivector_destroy(val)); - ASSERT_EQ(kPD_NO_ERROR, PDArgsDestroy(args)); + ASSERT_EQ(kPD_NO_ERROR, paddle_arguments_destroy(args)); } template void testSequenceHelper(T1 setter, T2 getter) { - PD_Arguments args = PDArgsCreateNone(); - ASSERT_EQ(kPD_NO_ERROR, PDArgsResize(args, 1)); + paddle_arguments args = paddle_arguments_create_none(); + ASSERT_EQ(kPD_NO_ERROR, paddle_arguments_resize(args, 1)); paddle_ivector ivec; int array[3] = {1, 2, 3}; @@ -105,11 +105,12 @@ void testSequenceHelper(T1 setter, T2 getter) { ASSERT_EQ(kPD_NO_ERROR, paddle_ivector_destroy(ivec)); ASSERT_EQ(kPD_NO_ERROR, paddle_ivector_destroy(val)); - ASSERT_EQ(kPD_NO_ERROR, PDArgsDestroy(args)); + ASSERT_EQ(kPD_NO_ERROR, paddle_arguments_destroy(args)); } TEST(CAPIArguments, Sequence) { - testSequenceHelper(PDArgsSetSequenceStartPos, PDArgsGetSequenceStartPos); - testSequenceHelper(PDArgsSetSubSequenceStartPos, - PDArgsGetSubSequenceStartPos); + testSequenceHelper(paddle_arguments_set_sequence_start_pos, + paddle_arguments_sequence_start_pos); + testSequenceHelper(paddle_arguments_set_sub_sequence_start_pos, + paddle_arguments_sub_sequence_start_pos); } diff --git a/paddle/capi/tests/test_GradientMachine.cpp b/paddle/capi/tests/test_GradientMachine.cpp index 05a06445c2727..c35432288b425 100644 --- a/paddle/capi/tests/test_GradientMachine.cpp +++ b/paddle/capi/tests/test_GradientMachine.cpp @@ -55,10 +55,10 @@ TEST(GradientMachine, testPredict) { PDGradientMachineCreateSharedParam( machine, &buffer[0], (int)buffer.size(), &machineSlave)); std::swap(machineSlave, machine); - PD_Arguments outArgs = PDArgsCreateNone(); + paddle_arguments outArgs = paddle_arguments_create_none(); - PD_Arguments inArgs = PDArgsCreateNone(); - ASSERT_EQ(kPD_NO_ERROR, PDArgsResize(inArgs, 1)); + paddle_arguments inArgs = paddle_arguments_create_none(); + ASSERT_EQ(kPD_NO_ERROR, paddle_arguments_resize(inArgs, 1)); paddle_matrix mat = paddle_matrix_create(1, 100, false); static_assert(std::is_same::value, ""); @@ -67,15 +67,15 @@ TEST(GradientMachine, testPredict) { ASSERT_EQ(kPD_NO_ERROR, paddle_matrix_get_row(mat, 0, &rowPtr)); memcpy(rowPtr, data.data(), data.size() * sizeof(pd_real)); - ASSERT_EQ(kPD_NO_ERROR, PDArgsSetValue(inArgs, 0, mat)); + ASSERT_EQ(kPD_NO_ERROR, paddle_arguments_set_value(inArgs, 0, mat)); ASSERT_EQ(kPD_NO_ERROR, PDGradientMachineForward(machine, inArgs, outArgs, false)); uint64_t sz; - ASSERT_EQ(kPD_NO_ERROR, PDArgsGetSize(outArgs, &sz)); + ASSERT_EQ(kPD_NO_ERROR, paddle_arguments_size(outArgs, &sz)); ASSERT_EQ(1UL, sz); - ASSERT_EQ(kPD_NO_ERROR, PDArgsGetValue(outArgs, 0, mat)); + ASSERT_EQ(kPD_NO_ERROR, paddle_arguments_value(outArgs, 0, mat)); std::vector paddleInArgs; std::vector paddleOutArgs; paddleInArgs.resize(1); @@ -97,8 +97,8 @@ TEST(GradientMachine, testPredict) { } ASSERT_EQ(kPD_NO_ERROR, paddle_matrix_destroy(mat)); - ASSERT_EQ(kPD_NO_ERROR, PDArgsDestroy(inArgs)); - ASSERT_EQ(kPD_NO_ERROR, PDArgsDestroy(outArgs)); + ASSERT_EQ(kPD_NO_ERROR, paddle_arguments_destroy(inArgs)); + ASSERT_EQ(kPD_NO_ERROR, paddle_arguments_destroy(outArgs)); std::swap(machineSlave, machine); ASSERT_EQ(kPD_NO_ERROR, PDGradientMachineDestroy(machineSlave)); ASSERT_EQ(kPD_NO_ERROR, PDGradientMachineDestroy(machine)); From c5eac0ab2d54cded942f555f352e236fba0acfb9 Mon Sep 17 00:00:00 2001 From: Yu Yang Date: Fri, 24 Mar 2017 15:38:12 +0800 Subject: [PATCH 28/43] Rename API --- paddle/capi/Arguments.cpp | 44 +--- paddle/capi/Main.cpp | 2 +- paddle/capi/PaddleCAPI.h | 215 +----------------- paddle/capi/PaddleCAPIPrivate.h | 16 ++ paddle/capi/arguments.h | 130 +++++++++++ paddle/capi/error.h | 14 ++ ...adientMachine.cpp => gradient_machine.cpp} | 28 +-- paddle/capi/gradient_machine.h | 74 ++++++ paddle/capi/main.h | 19 ++ paddle/capi/tests/test_Arguments.cpp | 20 +- paddle/capi/tests/test_GradientMachine.cpp | 18 +- 11 files changed, 310 insertions(+), 270 deletions(-) create mode 100644 paddle/capi/arguments.h rename paddle/capi/{GradientMachine.cpp => gradient_machine.cpp} (76%) create mode 100644 paddle/capi/gradient_machine.h create mode 100644 paddle/capi/main.h diff --git a/paddle/capi/Arguments.cpp b/paddle/capi/Arguments.cpp index 792b8c58a9a08..d9b207af705fa 100644 --- a/paddle/capi/Arguments.cpp +++ b/paddle/capi/Arguments.cpp @@ -12,8 +12,8 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include "PaddleCAPI.h" #include "PaddleCAPIPrivate.h" +#include "arguments.h" using paddle::capi::cast; @@ -92,50 +92,26 @@ paddle_error paddle_arguments_set_ids(paddle_arguments args, paddle_error paddle_arguments_set_sequence_start_pos(paddle_arguments args, uint64_t ID, + uint32_t nestedLevel, paddle_ivector seqPos) { if (args == nullptr || seqPos == nullptr) return kPD_NULLPTR; auto iv = paddle::capi::cast(seqPos); if (iv->vec == nullptr) return kPD_NULLPTR; auto a = castArg(args); - if (ID >= a->args.size()) return kPD_OUT_OF_RANGE; - a->args[ID].sequenceStartPositions = - std::make_shared(iv->vec); - return kPD_NO_ERROR; -} - -paddle_error paddle_arguments_set_sub_sequence_start_pos( - paddle_arguments args, uint64_t ID, paddle_ivector subSeqPos) { - if (args == nullptr || subSeqPos == nullptr) return kPD_NULLPTR; - auto iv = paddle::capi::cast(subSeqPos); - if (iv->vec == nullptr) return kPD_NULLPTR; - auto a = castArg(args); - if (ID >= a->args.size()) return kPD_OUT_OF_RANGE; - a->args[ID].subSequenceStartPositions = - std::make_shared(iv->vec); - return kPD_NO_ERROR; + return a->accessSeqPos(ID, nestedLevel, [&iv](paddle::ICpuGpuVectorPtr& ptr) { + ptr = std::make_shared(iv->vec); + }); } paddle_error paddle_arguments_sequence_start_pos(paddle_arguments args, uint64_t ID, + uint32_t nestedLevel, paddle_ivector seqPos) { if (args == nullptr || seqPos == nullptr) return kPD_NULLPTR; - auto iv = castIVec(seqPos); - auto a = castArg(args); - if (ID >= a->args.size()) return kPD_OUT_OF_RANGE; - paddle::Argument& arg = a->args[ID]; - iv->vec = arg.sequenceStartPositions->getMutableVector(false); - return kPD_NO_ERROR; -} - -paddle_error paddle_arguments_sub_sequence_start_pos(paddle_arguments args, - uint64_t ID, - paddle_ivector subSeqPos) { - if (args == nullptr || subSeqPos == nullptr) return kPD_NULLPTR; - auto iv = castIVec(subSeqPos); + auto iv = paddle::capi::cast(seqPos); auto a = castArg(args); - if (ID >= a->args.size()) return kPD_OUT_OF_RANGE; - paddle::Argument& arg = a->args[ID]; - iv->vec = arg.subSequenceStartPositions->getMutableVector(false); - return kPD_NO_ERROR; + return a->accessSeqPos(ID, nestedLevel, [&iv](paddle::ICpuGpuVectorPtr& ptr) { + iv->vec = ptr->getMutableVector(false); + }); } } diff --git a/paddle/capi/Main.cpp b/paddle/capi/Main.cpp index 5051dff845ab5..7604945de7df6 100644 --- a/paddle/capi/Main.cpp +++ b/paddle/capi/Main.cpp @@ -29,7 +29,7 @@ static void initPaddle(int argc, char** argv) { } extern "C" { -paddle_error PDInit(int argc, char** argv) { +paddle_error paddle_init(int argc, char** argv) { std::vector realArgv; realArgv.reserve(argc + 1); realArgv.push_back(strdup("")); diff --git a/paddle/capi/PaddleCAPI.h b/paddle/capi/PaddleCAPI.h index eea7c3bd05f74..1e275c5c1fab5 100644 --- a/paddle/capi/PaddleCAPI.h +++ b/paddle/capi/PaddleCAPI.h @@ -14,16 +14,6 @@ limitations under the License. */ #ifndef PADDLECAPI_H_ #define PADDLECAPI_H_ -#include -#include -#include "config.h" -#include "error.h" -#include "matrix.h" -#include "vector.h" - -#ifdef __cplusplus -extern "C" { -#endif /** * Paddle C API. It will replace SWIG as Multiple Language API for model @@ -31,203 +21,12 @@ extern "C" { * * NOTE: This is an experimental API, it could be changed. */ - -/** - * Arguments functions. Each argument means layer output. Arguments means a - * array of arguemnt. - */ -typedef void* paddle_arguments; - -/** - * @brief paddle_arguments_create_none Create a array of arguments, which size - * is zero. - * @return Arguemnts - */ -PD_API paddle_arguments paddle_arguments_create_none(); - -/** - * @brief paddle_arguments_destroy Destroy the arguments - * @param args arguments to destroy - * @return paddle_error - */ -PD_API paddle_error paddle_arguments_destroy(paddle_arguments args); - -/** - * @brief PDArgsGetSize Get size of arguments array - * @param [in] args arguments array - * @param [out] size array size - * @return paddle_error - */ -PD_API paddle_error paddle_arguments_size(paddle_arguments args, - uint64_t* size); - -/** - * @brief PDArgsResize Resize a arguments array. - * @param args arguments array. - * @param size target size of array - * @return paddle_error - */ -PD_API paddle_error paddle_arguments_resize(paddle_arguments args, - uint64_t size); - -/** - * @brief PDArgsSetValue Set value matrix of one argument in array, which index - * is `ID`. - * @param args arguments array - * @param ID array index - * @param mat matrix pointer - * @return paddle_error - */ -PD_API paddle_error paddle_arguments_set_value(paddle_arguments args, - uint64_t ID, - paddle_matrix mat); - -/** - * @brief PDArgsGetValue Get value matrix of one argument in array, which index - * is `ID`. - * @param [in] args arguments array - * @param [in] ID array index - * @param [out] mat matrix pointer - * @return paddle_error - */ -PD_API paddle_error paddle_arguments_value(paddle_arguments args, - uint64_t ID, - paddle_matrix mat); - -/** - * @brief PDArgsGetIds Get the integer vector of one argument in array, which - * index is `ID`. - * @param args arguments array - * @param ID array index - * @param ids integer vector pointer - * @return paddle_error - */ -PD_API paddle_error paddle_arguments_ids(paddle_arguments args, - uint64_t ID, - paddle_ivector ids); - -/** - * @brief PDArgsSetIds Set the integer vector of one argument in array, which - * index is `ID`. - * @param [in] args arguments array - * @param [in] ID array index - * @param [out] ids integer vector pointer - * @return paddle_error - */ -PD_API paddle_error paddle_arguments_set_ids(paddle_arguments args, - uint64_t ID, - paddle_ivector ids); - -/** - * @brief PDArgsSetSequenceStartPos Set sequence start position vector of one - * argument in array, which index is `ID`. - * @param args arguments array - * @param ID array index - * @param seqPos sequence position array. - * @return paddle_error - */ -PD_API paddle_error paddle_arguments_set_sequence_start_pos( - paddle_arguments args, uint64_t ID, paddle_ivector seqPos); -/** - * @brief PDArgsGetSequenceStartPos Get sequence start position vector of one - * argument in array, which index is `ID`. - * @param [in] args arguments array - * @param [in] ID array index - * @param [out] seqPos sequence position array - * @return paddle_error - */ -PD_API paddle_error paddle_arguments_sequence_start_pos(paddle_arguments args, - uint64_t ID, - paddle_ivector seqPos); - -/** - * @brief PDArgsSetSubSequenceStartPos Set sub-sequence start position vector of - * one argument in array, which index is `ID`. - * @param args arguments array - * @param ID array index - * @param subSeqPos sub-sequence start position array. - * @return paddle_error - */ -PD_API paddle_error paddle_arguments_set_sub_sequence_start_pos( - paddle_arguments args, uint64_t ID, paddle_ivector subSeqPos); - -/** - * @brief PDArgsGetSubSequenceStartPos Get sub-sequence start position vector of - * one argument in array, which index is `ID`. - * @param args arguments array - * @param ID array index - * @param subSeqPos sub-sequence start position array - * @return paddle_error - */ -PD_API paddle_error paddle_arguments_sub_sequence_start_pos( - paddle_arguments args, uint64_t ID, paddle_ivector subSeqPos); -/** - * @brief GradientMachine means a neural network. - */ -typedef void* PD_GradientMachine; - -/** - * @brief PDGradientMachineCreateForPredict Create a gradient machine used for - * model inference. - * @param [out] machine that used for model inference. - * @param [in] modelConfigProtobuf - * @param [in] size - * @return paddle_error - */ -PD_API paddle_error PDGradientMachineCreateForPredict( - PD_GradientMachine* machine, void* modelConfigProtobuf, int size); - -/** - * @brief PDGradientMachineLoadParameterFromDisk Load parameter from disk. - * @param machine Gradient Machine. - * @param path local directory path. - * @return paddle_error - */ -PD_API paddle_error PDGradientMachineLoadParameterFromDisk( - PD_GradientMachine machine, const char* path); - -/** - * @brief PDGradientMachineForward Forward a gradient machine - * @param machine Gradient machine - * @param inArgs input arguments - * @param outArgs output arguments - * @param isTrain is train or not - * @return paddle_error - */ -PD_API paddle_error PDGradientMachineForward(PD_GradientMachine machine, - paddle_arguments inArgs, - paddle_arguments outArgs, - bool isTrain); - -/** - * @brief PDGradientMachineCreateSharedParam Create a gradient machine, which - * parameters are shared from another gradient machine. - * @param [in] origin gradient machine - * @param [in] modelConfigProtobuf model config protobuf - * @param [in] size of model config buffer. - * @param [out] slave gradient machine, the output value. - * @return paddle_error - */ -PD_API paddle_error -PDGradientMachineCreateSharedParam(PD_GradientMachine origin, - void* modelConfigProtobuf, - int size, - PD_GradientMachine* slave); - -/** - * @brief PDGradientMachineDestroy Destroy a gradient machine - * @param machine that need to destroy - * @return paddle_error - */ -PD_API paddle_error PDGradientMachineDestroy(PD_GradientMachine machine); - -/** - * Initialize Paddle. - */ -PD_API paddle_error PDInit(int argc, char** argv); - -#ifdef __cplusplus -} -#endif +#include "arguments.h" +#include "config.h" +#include "error.h" +#include "gradient_machine.h" +#include "main.h" +#include "matrix.h" +#include "vector.h" #endif // PADDLECAPI_H_ diff --git a/paddle/capi/PaddleCAPIPrivate.h b/paddle/capi/PaddleCAPIPrivate.h index bb8baea4e1cd7..072e9a37a6310 100644 --- a/paddle/capi/PaddleCAPIPrivate.h +++ b/paddle/capi/PaddleCAPIPrivate.h @@ -49,6 +49,22 @@ struct CArguments { std::vector args; CArguments() : type(kARGUMENTS) {} + + template + paddle_error accessSeqPos(uint64_t ID, uint32_t nestedLevel, T callback) { + if (ID >= args.size()) return kPD_OUT_OF_RANGE; + switch (nestedLevel) { + case 0: + callback(args[ID].sequenceStartPositions); + break; + case 1: + callback(args[ID].subSequenceStartPositions); + break; + default: + return kPD_OUT_OF_RANGE; + } + return kPD_NO_ERROR; + } }; struct CGradientMachine { diff --git a/paddle/capi/arguments.h b/paddle/capi/arguments.h new file mode 100644 index 0000000000000..3f3594b282b6b --- /dev/null +++ b/paddle/capi/arguments.h @@ -0,0 +1,130 @@ +#ifndef __PADDLE_CAPI_ARGUMENTS_H__ +#define __PADDLE_CAPI_ARGUMENTS_H__ + +#include +#include "config.h" +#include "error.h" +#include "matrix.h" +#include "vector.h" + +/** + * Arguments functions. Each argument means layer output. Arguments means a + * array of arguemnt. + */ +typedef void* paddle_arguments; + +#ifdef __cplusplus +extern "C" { +#endif + +/** + * @brief paddle_arguments_create_none Create a array of arguments, which size + * is zero. + * @return Arguemnts + */ +PD_API paddle_arguments paddle_arguments_create_none(); + +/** + * @brief paddle_arguments_destroy Destroy the arguments + * @param args arguments to destroy + * @return paddle_error + */ +PD_API paddle_error paddle_arguments_destroy(paddle_arguments args); + +/** + * @brief PDArgsGetSize Get size of arguments array + * @param [in] args arguments array + * @param [out] size array size + * @return paddle_error + */ +PD_API paddle_error paddle_arguments_size(paddle_arguments args, + uint64_t* size); + +/** + * @brief PDArgsResize Resize a arguments array. + * @param args arguments array. + * @param size target size of array + * @return paddle_error + */ +PD_API paddle_error paddle_arguments_resize(paddle_arguments args, + uint64_t size); + +/** + * @brief PDArgsSetValue Set value matrix of one argument in array, which index + * is `ID`. + * @param args arguments array + * @param ID array index + * @param mat matrix pointer + * @return paddle_error + */ +PD_API paddle_error paddle_arguments_set_value(paddle_arguments args, + uint64_t ID, + paddle_matrix mat); + +/** + * @brief PDArgsGetValue Get value matrix of one argument in array, which index + * is `ID`. + * @param [in] args arguments array + * @param [in] ID array index + * @param [out] mat matrix pointer + * @return paddle_error + */ +PD_API paddle_error paddle_arguments_value(paddle_arguments args, + uint64_t ID, + paddle_matrix mat); + +/** + * @brief PDArgsGetIds Get the integer vector of one argument in array, which + * index is `ID`. + * @param args arguments array + * @param ID array index + * @param ids integer vector pointer + * @return paddle_error + */ +PD_API paddle_error paddle_arguments_ids(paddle_arguments args, + uint64_t ID, + paddle_ivector ids); + +/** + * @brief PDArgsSetIds Set the integer vector of one argument in array, which + * index is `ID`. + * @param [in] args arguments array + * @param [in] ID array index + * @param [out] ids integer vector pointer + * @return paddle_error + */ +PD_API paddle_error paddle_arguments_set_ids(paddle_arguments args, + uint64_t ID, + paddle_ivector ids); + +/** + * @brief PDArgsSetSequenceStartPos Set sequence start position vector of one + * argument in array, which index is `ID`. + * @param args arguments array + * @param ID array index + * @param seqPos sequence position array. + * @return paddle_error + */ +PD_API paddle_error +paddle_arguments_set_sequence_start_pos(paddle_arguments args, + uint64_t ID, + uint32_t nestedLevel, + paddle_ivector seqPos); +/** + * @brief PDArgsGetSequenceStartPos Get sequence start position vector of one + * argument in array, which index is `ID`. + * @param [in] args arguments array + * @param [in] ID array index + * @param [out] seqPos sequence position array + * @return paddle_error + */ +PD_API paddle_error paddle_arguments_sequence_start_pos(paddle_arguments args, + uint64_t ID, + uint32_t nestedLevel, + paddle_ivector seqPos); + +#ifdef __cplusplus +} +#endif + +#endif diff --git a/paddle/capi/error.h b/paddle/capi/error.h index 8dbb6d9548721..6a5907b869f60 100644 --- a/paddle/capi/error.h +++ b/paddle/capi/error.h @@ -1,3 +1,17 @@ +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + #ifndef __PADDLE_CAPI_ERROR_H__ #define __PADDLE_CAPI_ERROR_H__ diff --git a/paddle/capi/GradientMachine.cpp b/paddle/capi/gradient_machine.cpp similarity index 76% rename from paddle/capi/GradientMachine.cpp rename to paddle/capi/gradient_machine.cpp index 9f0ffd6599fc4..ab8e747ae9544 100644 --- a/paddle/capi/GradientMachine.cpp +++ b/paddle/capi/gradient_machine.cpp @@ -38,9 +38,8 @@ NeuralNetwork* newCustomNerualNetwork(const std::string& name, } // namespace paddle extern "C" { -paddle_error PDGradientMachineCreateForPredict(PD_GradientMachine* machine, - void* modelConfigProtobuf, - int size) { +paddle_error paddle_gradient_machine_create_for_inference( + paddle_gradient_machine* machine, void* modelConfigProtobuf, int size) { if (modelConfigProtobuf == nullptr) return kPD_NULLPTR; paddle::ModelConfig config; if (!config.ParseFromArray(modelConfigProtobuf, size) || @@ -55,13 +54,13 @@ paddle_error PDGradientMachineCreateForPredict(PD_GradientMachine* machine, return kPD_NO_ERROR; } -paddle_error PDGradientMachineDestroy(PD_GradientMachine machine) { +paddle_error paddle_gradient_machine_destroy(paddle_gradient_machine machine) { delete cast(machine); return kPD_NO_ERROR; } -paddle_error PDGradientMachineLoadParameterFromDisk(PD_GradientMachine machine, - const char* path) { +paddle_error paddle_gradient_machine_load_parameter_from_disk( + paddle_gradient_machine machine, const char* path) { auto m = cast(machine); if (m == nullptr || path == nullptr || m->machine == nullptr) return kPD_NULLPTR; @@ -69,10 +68,10 @@ paddle_error PDGradientMachineLoadParameterFromDisk(PD_GradientMachine machine, return kPD_NO_ERROR; } -paddle_error PDGradientMachineForward(PD_GradientMachine machine, - paddle_arguments inArgs, - paddle_arguments outArgs, - bool isTrain) { +paddle_error paddle_gradient_machine_forward(paddle_gradient_machine machine, + paddle_arguments inArgs, + paddle_arguments outArgs, + bool isTrain) { auto m = cast(machine); auto in = paddle::capi::cast(inArgs); auto out = paddle::capi::cast(outArgs); @@ -83,10 +82,11 @@ paddle_error PDGradientMachineForward(PD_GradientMachine machine, return kPD_NO_ERROR; } -paddle_error PDGradientMachineCreateSharedParam(PD_GradientMachine origin, - void* modelConfigProtobuf, - int size, - PD_GradientMachine* slave) { +paddle_error paddle_gradient_machine_create_shared_param( + paddle_gradient_machine origin, + void* modelConfigProtobuf, + int size, + paddle_gradient_machine* slave) { auto o = cast(origin); if (origin == nullptr || slave == nullptr || o->machine == nullptr) { return kPD_NULLPTR; diff --git a/paddle/capi/gradient_machine.h b/paddle/capi/gradient_machine.h new file mode 100644 index 0000000000000..f3cfd67c220cd --- /dev/null +++ b/paddle/capi/gradient_machine.h @@ -0,0 +1,74 @@ +#ifndef __PADDLE_CAPI_GRADIENT_MACHINE_H__ +#define __PADDLE_CAPI_GRADIENT_MACHINE_H__ +#include "arguments.h" +#include "config.h" +#include "error.h" + +#ifdef __cplusplus +extern "C" { +#endif +/** + * @brief GradientMachine means a neural network. + */ +typedef void* paddle_gradient_machine; + +/** + * @brief Create a gradient machine used for model inference. + * @param [out] machine that used for model inference. + * @param [in] modelConfigProtobuf + * @param [in] size + * @return paddle_error + */ +PD_API paddle_error paddle_gradient_machine_create_for_inference( + paddle_gradient_machine* machine, void* modelConfigProtobuf, int size); + +/** + * @brief Load parameter from disk. + * @param machine Gradient Machine. + * @param path local directory path. + * @return paddle_error + */ +PD_API paddle_error paddle_gradient_machine_load_parameter_from_disk( + paddle_gradient_machine machine, const char* path); + +/** + * @brief Forward a gradient machine + * @param machine Gradient machine + * @param inArgs input arguments + * @param outArgs output arguments + * @param isTrain is train or not + * @return paddle_error + */ +PD_API paddle_error +paddle_gradient_machine_forward(paddle_gradient_machine machine, + paddle_arguments inArgs, + paddle_arguments outArgs, + bool isTrain); + +/** + * @brief Create a gradient machine, which parameters are shared from another + * gradient machine. + * @param [in] origin gradient machine + * @param [in] modelConfigProtobuf model config protobuf + * @param [in] size of model config buffer. + * @param [out] slave gradient machine, the output value. + * @return paddle_error + */ +PD_API paddle_error +paddle_gradient_machine_create_shared_param(paddle_gradient_machine origin, + void* modelConfigProtobuf, + int size, + paddle_gradient_machine* slave); + +/** + * @brief Destroy a gradient machine + * @param machine that need to destroy + * @return paddle_error + */ +PD_API paddle_error +paddle_gradient_machine_destroy(paddle_gradient_machine machine); + +#ifdef __cplusplus +} +#endif +#endif diff --git a/paddle/capi/main.h b/paddle/capi/main.h new file mode 100644 index 0000000000000..814c8cce24444 --- /dev/null +++ b/paddle/capi/main.h @@ -0,0 +1,19 @@ +#ifndef __PADDLE_CAPI_MAIN_H__ +#define __PADDLE_CAPI_MAIN_H__ +#include "config.h" +#include "error.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/** + * Initialize Paddle. + */ +PD_API paddle_error paddle_init(int argc, char** argv); + +#ifdef __cplusplus +} +#endif + +#endif diff --git a/paddle/capi/tests/test_Arguments.cpp b/paddle/capi/tests/test_Arguments.cpp index b445b396f3c44..60fa57517fbb7 100644 --- a/paddle/capi/tests/test_Arguments.cpp +++ b/paddle/capi/tests/test_Arguments.cpp @@ -12,6 +12,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ +#include #include "PaddleCAPI.h" #include "gtest/gtest.h" #include "paddle/utils/ThreadLocal.h" @@ -109,8 +110,19 @@ void testSequenceHelper(T1 setter, T2 getter) { } TEST(CAPIArguments, Sequence) { - testSequenceHelper(paddle_arguments_set_sequence_start_pos, - paddle_arguments_sequence_start_pos); - testSequenceHelper(paddle_arguments_set_sub_sequence_start_pos, - paddle_arguments_sub_sequence_start_pos); + auto testSequence = [](uint32_t nestedLevel) { + testSequenceHelper(std::bind(paddle_arguments_set_sequence_start_pos, + std::placeholders::_1, + std::placeholders::_2, + nestedLevel, + std::placeholders::_3), + std::bind(paddle_arguments_sequence_start_pos, + std::placeholders::_1, + std::placeholders::_2, + nestedLevel, + std::placeholders::_3)); + }; + for (uint32_t i = 0; i < 2; ++i) { // test seq and sub-seq. + testSequence(i); + } } diff --git a/paddle/capi/tests/test_GradientMachine.cpp b/paddle/capi/tests/test_GradientMachine.cpp index c35432288b425..3e8ba8e9d8f54 100644 --- a/paddle/capi/tests/test_GradientMachine.cpp +++ b/paddle/capi/tests/test_GradientMachine.cpp @@ -36,10 +36,10 @@ TEST(GradientMachine, testPredict) { paddle::TrainerConfigHelper config("./test_predict_network.py"); std::string buffer; ASSERT_TRUE(config.getModelConfig().SerializeToString(&buffer)); - PD_GradientMachine machine; + paddle_gradient_machine machine; ASSERT_EQ(kPD_NO_ERROR, - PDGradientMachineCreateForPredict( + paddle_gradient_machine_create_for_inference( &machine, &buffer[0], (int)buffer.size())); std::unique_ptr gm( paddle::GradientMachine::create(config.getModelConfig())); @@ -48,11 +48,11 @@ TEST(GradientMachine, testPredict) { gm->saveParameters("./"); ASSERT_EQ(kPD_NO_ERROR, - PDGradientMachineLoadParameterFromDisk(machine, "./")); + paddle_gradient_machine_load_parameter_from_disk(machine, "./")); - PD_GradientMachine machineSlave; + paddle_gradient_machine machineSlave; ASSERT_EQ(kPD_NO_ERROR, - PDGradientMachineCreateSharedParam( + paddle_gradient_machine_create_shared_param( machine, &buffer[0], (int)buffer.size(), &machineSlave)); std::swap(machineSlave, machine); paddle_arguments outArgs = paddle_arguments_create_none(); @@ -69,7 +69,7 @@ TEST(GradientMachine, testPredict) { ASSERT_EQ(kPD_NO_ERROR, paddle_arguments_set_value(inArgs, 0, mat)); ASSERT_EQ(kPD_NO_ERROR, - PDGradientMachineForward(machine, inArgs, outArgs, false)); + paddle_gradient_machine_forward(machine, inArgs, outArgs, false)); uint64_t sz; ASSERT_EQ(kPD_NO_ERROR, paddle_arguments_size(outArgs, &sz)); @@ -100,15 +100,15 @@ TEST(GradientMachine, testPredict) { ASSERT_EQ(kPD_NO_ERROR, paddle_arguments_destroy(inArgs)); ASSERT_EQ(kPD_NO_ERROR, paddle_arguments_destroy(outArgs)); std::swap(machineSlave, machine); - ASSERT_EQ(kPD_NO_ERROR, PDGradientMachineDestroy(machineSlave)); - ASSERT_EQ(kPD_NO_ERROR, PDGradientMachineDestroy(machine)); + ASSERT_EQ(kPD_NO_ERROR, paddle_gradient_machine_destroy(machineSlave)); + ASSERT_EQ(kPD_NO_ERROR, paddle_gradient_machine_destroy(machine)); } int main(int argc, char** argv) { testing::InitGoogleTest(&argc, argv); std::vector argvs; argvs.push_back(strdup("--use_gpu=false")); - PDInit((int)argvs.size(), argvs.data()); + paddle_init((int)argvs.size(), argvs.data()); for (auto each : argvs) { free(each); } From 58e5b87831a194bb0a1652314b0d2a81a7040bee Mon Sep 17 00:00:00 2001 From: Yu Yang Date: Fri, 24 Mar 2017 15:40:43 +0800 Subject: [PATCH 29/43] Add license --- paddle/capi/arguments.h | 14 ++++++++++++++ paddle/capi/gradient_machine.h | 14 ++++++++++++++ paddle/capi/main.h | 14 ++++++++++++++ 3 files changed, 42 insertions(+) diff --git a/paddle/capi/arguments.h b/paddle/capi/arguments.h index 3f3594b282b6b..1bb6516ea0235 100644 --- a/paddle/capi/arguments.h +++ b/paddle/capi/arguments.h @@ -1,3 +1,17 @@ +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + #ifndef __PADDLE_CAPI_ARGUMENTS_H__ #define __PADDLE_CAPI_ARGUMENTS_H__ diff --git a/paddle/capi/gradient_machine.h b/paddle/capi/gradient_machine.h index f3cfd67c220cd..36c1a2b1b4b91 100644 --- a/paddle/capi/gradient_machine.h +++ b/paddle/capi/gradient_machine.h @@ -1,3 +1,17 @@ +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + #ifndef __PADDLE_CAPI_GRADIENT_MACHINE_H__ #define __PADDLE_CAPI_GRADIENT_MACHINE_H__ #include "arguments.h" diff --git a/paddle/capi/main.h b/paddle/capi/main.h index 814c8cce24444..893ebcbd58dd2 100644 --- a/paddle/capi/main.h +++ b/paddle/capi/main.h @@ -1,3 +1,17 @@ +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + #ifndef __PADDLE_CAPI_MAIN_H__ #define __PADDLE_CAPI_MAIN_H__ #include "config.h" From d49c6274ca2f7dafeaa116bc422f7d0c0c67f96b Mon Sep 17 00:00:00 2001 From: Yu Yang Date: Fri, 24 Mar 2017 16:07:05 +0800 Subject: [PATCH 30/43] GNU Style API --- paddle/capi/Arguments.cpp | 2 +- paddle/capi/CMakeLists.txt | 4 ++-- paddle/capi/Main.cpp | 4 ++-- paddle/capi/Matrix.cpp | 4 ++-- paddle/capi/Vector.cpp | 4 ++-- paddle/capi/{PaddleCAPI.h => capi.h} | 4 ++-- paddle/capi/{PaddleCAPIPrivate.h => capi_private.h} | 2 +- paddle/capi/gradient_machine.cpp | 4 ++-- paddle/capi/tests/test_Arguments.cpp | 2 +- paddle/capi/tests/test_GradientMachine.cpp | 2 +- paddle/capi/tests/test_Matrix.cpp | 2 +- paddle/capi/tests/test_Vector.cpp | 2 +- 12 files changed, 18 insertions(+), 18 deletions(-) rename paddle/capi/{PaddleCAPI.h => capi.h} (94%) rename paddle/capi/{PaddleCAPIPrivate.h => capi_private.h} (98%) diff --git a/paddle/capi/Arguments.cpp b/paddle/capi/Arguments.cpp index d9b207af705fa..2954f522c95fa 100644 --- a/paddle/capi/Arguments.cpp +++ b/paddle/capi/Arguments.cpp @@ -12,8 +12,8 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include "PaddleCAPIPrivate.h" #include "arguments.h" +#include "capi_private.h" using paddle::capi::cast; diff --git a/paddle/capi/CMakeLists.txt b/paddle/capi/CMakeLists.txt index a2b1929e4b747..1b52a79cebb12 100644 --- a/paddle/capi/CMakeLists.txt +++ b/paddle/capi/CMakeLists.txt @@ -13,7 +13,7 @@ configure_file(config.h.in config.h @ONLY) # PaddleCAPI.h is the only header we exposed. It currently only used for model # inference. file(GLOB CAPI_HEADERS *.h) -set(CAPI_PRIVATE_HEADER PaddleCAPIPrivate.h) +set(CAPI_PRIVATE_HEADER capi_private.h) list(REMOVE_ITEM CAPI_HEADERS ${CAPI_PRIVATE_HEADER}) file(GLOB CAPI_SOURCES *.cpp) @@ -59,7 +59,7 @@ link_paddle_exe(paddle_capi_shared) # install library & headers. install(FILES ${CMAKE_CURRENT_BINARY_DIR}/${capi_whole_library} DESTINATION lib) -install(FILES ${CAPI_HEADER} DESTINATION include/paddle) +install(FILES ${CAPI_HEADERS} DESTINATION include/paddle) install(FILES ${CMAKE_CURRENT_BINARY_DIR}/config.h DESTINATION include/paddle) install(TARGETS paddle_capi_shared DESTINATION lib) diff --git a/paddle/capi/Main.cpp b/paddle/capi/Main.cpp index 7604945de7df6..7f24561e9aafc 100644 --- a/paddle/capi/Main.cpp +++ b/paddle/capi/Main.cpp @@ -16,8 +16,8 @@ limitations under the License. */ #include #include #include -#include "PaddleCAPI.h" -#include "PaddleCAPIPrivate.h" +#include "capi_private.h" +#include "main.h" #include "paddle/trainer/TrainerConfigHelper.h" #include "paddle/utils/Excepts.h" #include "paddle/utils/PythonUtil.h" diff --git a/paddle/capi/Matrix.cpp b/paddle/capi/Matrix.cpp index fe60832d70a48..85269e1885467 100644 --- a/paddle/capi/Matrix.cpp +++ b/paddle/capi/Matrix.cpp @@ -12,9 +12,9 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include "PaddleCAPI.h" -#include "PaddleCAPIPrivate.h" +#include "capi_private.h" #include "hl_cuda.h" +#include "matrix.h" #define cast(v) paddle::capi::cast(v) extern "C" { diff --git a/paddle/capi/Vector.cpp b/paddle/capi/Vector.cpp index 4ccb167fec21b..564708e963b40 100644 --- a/paddle/capi/Vector.cpp +++ b/paddle/capi/Vector.cpp @@ -12,8 +12,8 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include "PaddleCAPI.h" -#include "PaddleCAPIPrivate.h" +#include "capi_private.h" +#include "vector.h" using paddle::capi::cast; diff --git a/paddle/capi/PaddleCAPI.h b/paddle/capi/capi.h similarity index 94% rename from paddle/capi/PaddleCAPI.h rename to paddle/capi/capi.h index 1e275c5c1fab5..4097a1a35a643 100644 --- a/paddle/capi/PaddleCAPI.h +++ b/paddle/capi/capi.h @@ -12,8 +12,8 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#ifndef PADDLECAPI_H_ -#define PADDLECAPI_H_ +#ifndef __PADDLE_CAPI_H__ +#define __PADDLE_CAPI_H__ /** * Paddle C API. It will replace SWIG as Multiple Language API for model diff --git a/paddle/capi/PaddleCAPIPrivate.h b/paddle/capi/capi_private.h similarity index 98% rename from paddle/capi/PaddleCAPIPrivate.h rename to paddle/capi/capi_private.h index 072e9a37a6310..c7cdbd5f6f347 100644 --- a/paddle/capi/PaddleCAPIPrivate.h +++ b/paddle/capi/capi_private.h @@ -12,7 +12,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include "PaddleCAPI.h" +#include "capi.h" #include "paddle/gserver/gradientmachines/GradientMachine.h" #include "paddle/math/Matrix.h" #include "paddle/math/Vector.h" diff --git a/paddle/capi/gradient_machine.cpp b/paddle/capi/gradient_machine.cpp index ab8e747ae9544..6e7740a455b5d 100644 --- a/paddle/capi/gradient_machine.cpp +++ b/paddle/capi/gradient_machine.cpp @@ -12,8 +12,8 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include "PaddleCAPI.h" -#include "PaddleCAPIPrivate.h" +#include "gradient_machine.h" +#include "capi_private.h" #include "paddle/gserver/gradientmachines/NeuralNetwork.h" #define cast(v) paddle::capi::cast(v) diff --git a/paddle/capi/tests/test_Arguments.cpp b/paddle/capi/tests/test_Arguments.cpp index 60fa57517fbb7..e6e4ac9937e5e 100644 --- a/paddle/capi/tests/test_Arguments.cpp +++ b/paddle/capi/tests/test_Arguments.cpp @@ -13,7 +13,7 @@ See the License for the specific language governing permissions and limitations under the License. */ #include -#include "PaddleCAPI.h" +#include "capi.h" #include "gtest/gtest.h" #include "paddle/utils/ThreadLocal.h" diff --git a/paddle/capi/tests/test_GradientMachine.cpp b/paddle/capi/tests/test_GradientMachine.cpp index 3e8ba8e9d8f54..b37fe83a30bd1 100644 --- a/paddle/capi/tests/test_GradientMachine.cpp +++ b/paddle/capi/tests/test_GradientMachine.cpp @@ -18,7 +18,7 @@ limitations under the License. */ #include #include #include -#include "PaddleCAPI.h" +#include "capi.h" #include "paddle/utils/ThreadLocal.h" static std::vector randomBuffer(size_t bufSize) { diff --git a/paddle/capi/tests/test_Matrix.cpp b/paddle/capi/tests/test_Matrix.cpp index 1b3b881caee55..162df448d2b27 100644 --- a/paddle/capi/tests/test_Matrix.cpp +++ b/paddle/capi/tests/test_Matrix.cpp @@ -12,7 +12,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include "PaddleCAPI.h" +#include "capi.h" #include "gtest/gtest.h" TEST(CAPIMatrix, create) { diff --git a/paddle/capi/tests/test_Vector.cpp b/paddle/capi/tests/test_Vector.cpp index 64c19265e3d05..c5c57b7288d24 100644 --- a/paddle/capi/tests/test_Vector.cpp +++ b/paddle/capi/tests/test_Vector.cpp @@ -12,7 +12,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include "PaddleCAPI.h" +#include "capi.h" #include "gtest/gtest.h" TEST(CAPIVector, create) { From 470bbcf9e5bf05a3dc089da07009175f206ab9b8 Mon Sep 17 00:00:00 2001 From: Yu Yang Date: Fri, 24 Mar 2017 17:23:53 +0800 Subject: [PATCH 31/43] Add example --- paddle/capi/Matrix.cpp | 47 ++++++++- paddle/capi/config.h.in | 2 +- paddle/capi/error.h | 1 + paddle/capi/examples/.gitignore | 2 + paddle/capi/examples/common/common.h | 26 +++++ paddle/capi/examples/dense/CMakeLists.txt | 6 ++ .../capi/examples/dense/convert_protobin.sh | 2 + paddle/capi/examples/dense/main.c | 63 ++++++++++++ paddle/capi/examples/dense/trainer_config.py | 18 ++++ paddle/capi/examples/multi_thread/.gitignore | 73 ++++++++++++++ .../capi/examples/multi_thread/CMakeLists.txt | 8 ++ .../examples/multi_thread/convert_protobin.sh | 1 + paddle/capi/examples/multi_thread/main.c | 96 +++++++++++++++++++ .../examples/multi_thread/trainer_config.py | 1 + paddle/capi/examples/sparse_binary/.gitignore | 73 ++++++++++++++ .../examples/sparse_binary/CMakeLists.txt | 7 ++ .../sparse_binary/convert_protobin.sh | 1 + paddle/capi/examples/sparse_binary/main.c | 64 +++++++++++++ .../examples/sparse_binary/trainer_config.py | 1 + paddle/capi/gradient_machine.cpp | 8 ++ paddle/capi/gradient_machine.h | 3 + paddle/capi/matrix.h | 38 +++++++- paddle/capi/tests/test_Arguments.cpp | 12 +-- paddle/capi/tests/test_GradientMachine.cpp | 12 +-- paddle/capi/tests/test_Matrix.cpp | 4 +- 25 files changed, 550 insertions(+), 19 deletions(-) create mode 100644 paddle/capi/examples/.gitignore create mode 100644 paddle/capi/examples/common/common.h create mode 100644 paddle/capi/examples/dense/CMakeLists.txt create mode 100755 paddle/capi/examples/dense/convert_protobin.sh create mode 100644 paddle/capi/examples/dense/main.c create mode 100644 paddle/capi/examples/dense/trainer_config.py create mode 100644 paddle/capi/examples/multi_thread/.gitignore create mode 100644 paddle/capi/examples/multi_thread/CMakeLists.txt create mode 120000 paddle/capi/examples/multi_thread/convert_protobin.sh create mode 100644 paddle/capi/examples/multi_thread/main.c create mode 120000 paddle/capi/examples/multi_thread/trainer_config.py create mode 100644 paddle/capi/examples/sparse_binary/.gitignore create mode 100644 paddle/capi/examples/sparse_binary/CMakeLists.txt create mode 120000 paddle/capi/examples/sparse_binary/convert_protobin.sh create mode 100644 paddle/capi/examples/sparse_binary/main.c create mode 120000 paddle/capi/examples/sparse_binary/trainer_config.py diff --git a/paddle/capi/Matrix.cpp b/paddle/capi/Matrix.cpp index 85269e1885467..d898ebe2612d7 100644 --- a/paddle/capi/Matrix.cpp +++ b/paddle/capi/Matrix.cpp @@ -39,7 +39,7 @@ paddle_error paddle_matrix_destroy(paddle_matrix mat) { paddle_error paddle_matrix_set_row(paddle_matrix mat, uint64_t rowID, - pd_real* rowArray) { + paddle_real* rowArray) { if (mat == nullptr) return kPD_NULLPTR; auto ptr = cast(mat); if (ptr->mat == nullptr) return kPD_NULLPTR; @@ -56,7 +56,7 @@ paddle_error paddle_matrix_set_row(paddle_matrix mat, paddle_error paddle_matrix_get_row(paddle_matrix mat, uint64_t rowID, - pd_real** rawRowBuffer) { + paddle_real** rawRowBuffer) { if (mat == nullptr) return kPD_NULLPTR; auto ptr = cast(mat); if (ptr->mat == nullptr) return kPD_NULLPTR; @@ -78,3 +78,46 @@ paddle_error paddle_matrix_get_shape(paddle_matrix mat, return kPD_NO_ERROR; } } + +paddle_matrix paddle_matrix_create_sparse( + uint64_t height, uint64_t width, uint64_t nnz, bool isBinary, bool useGpu) { + auto ptr = new paddle::capi::CMatrix(); + ptr->mat = paddle::Matrix::createSparseMatrix( + height, + width, + nnz, + isBinary ? paddle::NO_VALUE : paddle::FLOAT_VALUE, + paddle::SPARSE_CSR, + false, + useGpu); + return ptr; +} + +paddle_error paddle_matrix_sparse_copy_from(paddle_matrix mat, + int* rowArray, + uint64_t rowSize, + int* colArray, + uint64_t colSize, + float* valueArray, + uint64_t valueSize) { + if (mat == nullptr) return kPD_NULLPTR; + auto ptr = cast(mat); + if (rowArray == nullptr || colArray == nullptr || + (valueSize != 0 && valueArray == nullptr) || ptr->mat == nullptr) { + return kPD_NULLPTR; + } + if (auto sparseMat = dynamic_cast(ptr->mat.get())) { + std::vector row(rowSize); + row.assign(rowArray, rowArray + rowSize); + std::vector col(colSize); + col.assign(colArray, colArray + colSize); + std::vector val(valueSize); + if (valueSize) { + val.assign(valueArray, valueArray + valueSize); + } + sparseMat->copyFrom(row, col, val); + return kPD_NO_ERROR; + } else { + return kPD_NOT_SUPPORTED; + } +} diff --git a/paddle/capi/config.h.in b/paddle/capi/config.h.in index af4e80dea144f..d205307588eb6 100644 --- a/paddle/capi/config.h.in +++ b/paddle/capi/config.h.in @@ -1,7 +1,7 @@ #ifndef __PADDLE_PADDLE_CAPI_CONFIG_H_INCLUDED__ #define __PADDLE_PADDLE_CAPI_CONFIG_H_INCLUDED__ -typedef @PADDLE_FLOAT_TYPE@ pd_real; +typedef @PADDLE_FLOAT_TYPE@ paddle_real; // Since we only support linux and macos in compile, always use clang or // gcc 4.8+. DLL_IMPORT/DLL_EXPORT is as simple as below. diff --git a/paddle/capi/error.h b/paddle/capi/error.h index 6a5907b869f60..44d8c2040d1aa 100644 --- a/paddle/capi/error.h +++ b/paddle/capi/error.h @@ -23,6 +23,7 @@ typedef enum { kPD_NULLPTR = 1, kPD_OUT_OF_RANGE = 2, kPD_PROTOBUF_ERROR = 3, + kPD_NOT_SUPPORTED = 4, kPD_UNDEFINED_ERROR = -1, } paddle_error; diff --git a/paddle/capi/examples/.gitignore b/paddle/capi/examples/.gitignore new file mode 100644 index 0000000000000..2caa0a5a298d8 --- /dev/null +++ b/paddle/capi/examples/.gitignore @@ -0,0 +1,2 @@ +*.bin +build-* diff --git a/paddle/capi/examples/common/common.h b/paddle/capi/examples/common/common.h new file mode 100644 index 0000000000000..a78522e4a7c3c --- /dev/null +++ b/paddle/capi/examples/common/common.h @@ -0,0 +1,26 @@ +#ifndef __CAPI_EXAMPLE_COMMON_H__ +#define __CAPI_EXAMPLE_COMMON_H__ +#include +#include + +#define CHECK(stmt) \ + do { \ + paddle_error __err__ = stmt; \ + if (__err__ != kPD_NO_ERROR) { \ + fprintf(stderr, "Invoke paddle error %d \n" #stmt, __err__); \ + exit(__err__); \ + } \ + } while (0) + +void* read_config(const char* filename, long* size) { + FILE* file = fopen(filename, "r"); + if (file == NULL) return NULL; + fseek(file, 0L, SEEK_END); + *size = ftell(file); + fseek(file, 0L, SEEK_SET); + void* buf = malloc(*size); + fread(buf, 1, *size, file); + fclose(file); + return buf; +} +#endif diff --git a/paddle/capi/examples/dense/CMakeLists.txt b/paddle/capi/examples/dense/CMakeLists.txt new file mode 100644 index 0000000000000..008a488fd9e6f --- /dev/null +++ b/paddle/capi/examples/dense/CMakeLists.txt @@ -0,0 +1,6 @@ +project(dense) +cmake_minimum_required(VERSION 2.8) +aux_source_directory(. SRC_LIST) +add_executable(${PROJECT_NAME} ${SRC_LIST}) +set_property(TARGET ${PROJECT_NAME} PROPERTY C_STANDARD 99) +target_link_libraries(${PROJECT_NAME} -lpaddle_capi_shared) diff --git a/paddle/capi/examples/dense/convert_protobin.sh b/paddle/capi/examples/dense/convert_protobin.sh new file mode 100755 index 0000000000000..30ffc316ecb76 --- /dev/null +++ b/paddle/capi/examples/dense/convert_protobin.sh @@ -0,0 +1,2 @@ +#!/bin/bash +python -m paddle.utils.dump_config trainer_config.py '' --binary > trainer_config.bin diff --git a/paddle/capi/examples/dense/main.c b/paddle/capi/examples/dense/main.c new file mode 100644 index 0000000000000..4dddd65bbfb3c --- /dev/null +++ b/paddle/capi/examples/dense/main.c @@ -0,0 +1,63 @@ +#include +#include +#include "../common/common.h" + +#define CONFIG_BIN "./trainer_config.bin" + +int main() { + // Initalize Paddle + char* argv[] = {"--use_gpu=False"}; + CHECK(paddle_init(1, (char**)argv)); + + // Reading config binary file. It is generated by `convert_protobin.sh` + long size; + void* buf = read_config(CONFIG_BIN, &size); + + // Create a gradient machine for inference. + paddle_gradient_machine machine; + CHECK(paddle_gradient_machine_create_for_inference(&machine, buf, (int)size)); + CHECK(paddle_gradient_machine_randomize_param(machine)); + + // Loading parameter. Uncomment the following line and change the directory. + // CHECK(paddle_gradient_machine_load_parameter_from_disk(machine, + // "./some_where_to_params")); + paddle_arguments in_args = paddle_arguments_create_none(); + + // There is only one input of this network. + CHECK(paddle_arguments_resize(in_args, 1)); + + // Create input matrix. + paddle_matrix mat = paddle_matrix_create(/* sample_num */ 1, + /* size */ 784, + /* useGPU */ false); + srand(time(0)); + paddle_real* array; + + // Get First row. + CHECK(paddle_matrix_get_row(mat, 0, &array)); + + for (int i = 0; i < 784; ++i) { + array[i] = rand() / ((float)RAND_MAX); + } + + CHECK(paddle_arguments_set_value(in_args, 0, mat)); + + paddle_arguments out_args = paddle_arguments_create_none(); + CHECK(paddle_gradient_machine_forward(machine, + in_args, + out_args, + /* isTrain */ false)); + paddle_matrix prob = paddle_matrix_create_none(); + + CHECK(paddle_arguments_value(out_args, 0, prob)); + + CHECK(paddle_matrix_get_row(prob, 0, &array)); + + printf("Prob: "); + for (int i = 0; i < 10; ++i) { + printf("%.2f ", array[i]); + } + printf("\n"); + + return 0; +} diff --git a/paddle/capi/examples/dense/trainer_config.py b/paddle/capi/examples/dense/trainer_config.py new file mode 100644 index 0000000000000..873ec119e7a3d --- /dev/null +++ b/paddle/capi/examples/dense/trainer_config.py @@ -0,0 +1,18 @@ +from paddle.trainer_config_helpers import * + +img = data_layer(name='pixel', size=784) + +hidden = fc_layer( + input=img, + size=200, + param_attr=ParamAttr(name='hidden.w'), + bias_attr=ParamAttr(name='hidden.b')) + +prob = fc_layer( + input=hidden, + size=10, + act=SoftmaxActivation(), + param_attr=ParamAttr(name='prob.w'), + bias_attr=ParamAttr(name='prob.b')) + +outputs(prob) diff --git a/paddle/capi/examples/multi_thread/.gitignore b/paddle/capi/examples/multi_thread/.gitignore new file mode 100644 index 0000000000000..fab7372d796ea --- /dev/null +++ b/paddle/capi/examples/multi_thread/.gitignore @@ -0,0 +1,73 @@ +# This file is used to ignore files which are generated +# ---------------------------------------------------------------------------- + +*~ +*.autosave +*.a +*.core +*.moc +*.o +*.obj +*.orig +*.rej +*.so +*.so.* +*_pch.h.cpp +*_resource.rc +*.qm +.#* +*.*# +core +!core/ +tags +.DS_Store +.directory +*.debug +Makefile* +*.prl +*.app +moc_*.cpp +ui_*.h +qrc_*.cpp +Thumbs.db +*.res +*.rc +/.qmake.cache +/.qmake.stash + +# qtcreator generated files +*.pro.user* + +# xemacs temporary files +*.flc + +# Vim temporary files +.*.swp + +# Visual Studio generated files +*.ib_pdb_index +*.idb +*.ilk +*.pdb +*.sln +*.suo +*.vcproj +*vcproj.*.*.user +*.ncb +*.sdf +*.opensdf +*.vcxproj +*vcxproj.* + +# MinGW generated files +*.Debug +*.Release + +# Python byte code +*.pyc + +# Binaries +# -------- +*.dll +*.exe + diff --git a/paddle/capi/examples/multi_thread/CMakeLists.txt b/paddle/capi/examples/multi_thread/CMakeLists.txt new file mode 100644 index 0000000000000..98e411ddc02a4 --- /dev/null +++ b/paddle/capi/examples/multi_thread/CMakeLists.txt @@ -0,0 +1,8 @@ +project(multi_thread) +cmake_minimum_required(VERSION 2.8) +aux_source_directory(. SRC_LIST) +add_executable(${PROJECT_NAME} ${SRC_LIST}) +find_package (Threads) +set_property(TARGET ${PROJECT_NAME} PROPERTY C_STANDARD 99) +target_link_libraries(${PROJECT_NAME} -lpaddle_capi_shared + ${CMAKE_THREAD_LIBS_INIT}) diff --git a/paddle/capi/examples/multi_thread/convert_protobin.sh b/paddle/capi/examples/multi_thread/convert_protobin.sh new file mode 120000 index 0000000000000..3c1b3533523cf --- /dev/null +++ b/paddle/capi/examples/multi_thread/convert_protobin.sh @@ -0,0 +1 @@ +../dense/convert_protobin.sh \ No newline at end of file diff --git a/paddle/capi/examples/multi_thread/main.c b/paddle/capi/examples/multi_thread/main.c new file mode 100644 index 0000000000000..23f8629765d8a --- /dev/null +++ b/paddle/capi/examples/multi_thread/main.c @@ -0,0 +1,96 @@ +#include +#include +#include +#include "../common/common.h" + +#define CONFIG_BIN "./trainer_config.bin" +#define NUM_THREAD 1000 +#define NUM_ITER 1000 + +pthread_mutex_t mutex; + +void* thread_main(void* gm_ptr) { + paddle_gradient_machine machine = (paddle_gradient_machine)(gm_ptr); + + for (int iter = 0; iter < NUM_ITER; ++iter) { + paddle_arguments in_args = paddle_arguments_create_none(); + // There is only one input of this network. + CHECK(paddle_arguments_resize(in_args, 1)); + + // Create input matrix. + paddle_matrix mat = paddle_matrix_create(/* sample_num */ 1, + /* size */ 784, + /* useGPU */ false); + + paddle_real* array; + + // Get First row. + CHECK(paddle_matrix_get_row(mat, 0, &array)); + + for (int i = 0; i < 784; ++i) { + array[i] = rand() / ((float)RAND_MAX); + } + + CHECK(paddle_arguments_set_value(in_args, 0, mat)); + + paddle_arguments out_args = paddle_arguments_create_none(); + CHECK(paddle_gradient_machine_forward(machine, + in_args, + out_args, + /* isTrain */ false)); + paddle_matrix prob = paddle_matrix_create_none(); + + CHECK(paddle_arguments_value(out_args, 0, prob)); + + CHECK(paddle_matrix_get_row(prob, 0, &array)); + + pthread_mutex_lock(&mutex); + printf("Prob: "); + for (int i = 0; i < 10; ++i) { + printf("%.2f ", array[i]); + } + printf("\n"); + pthread_mutex_unlock(&mutex); + } + + CHECK(paddle_gradient_machine_destroy(machine)); + return NULL; +} + +int main() { + // Initalize Paddle + char* argv[] = {"--use_gpu=False"}; + CHECK(paddle_init(1, (char**)argv)); + + // Reading config binary file. It is generated by `convert_protobin.sh` + long size; + void* buf = read_config(CONFIG_BIN, &size); + + // Create a gradient machine for inference. + paddle_gradient_machine machine; + CHECK(paddle_gradient_machine_create_for_inference(&machine, buf, (int)size)); + CHECK(paddle_gradient_machine_randomize_param(machine)); + + // Loading parameter. Uncomment the following line and change the directory. + // CHECK(paddle_gradient_machine_load_parameter_from_disk(machine, + // "./some_where_to_params")); + srand(time(0)); + pthread_mutex_init(&mutex, NULL); + + pthread_t threads[NUM_THREAD]; + + for (int i = 0; i < NUM_THREAD; ++i) { + paddle_gradient_machine thread_local_machine; + CHECK(paddle_gradient_machine_create_shared_param( + machine, buf, size, &thread_local_machine)); + pthread_create(&threads[i], NULL, thread_main, thread_local_machine); + } + + for (int i = 0; i < NUM_THREAD; ++i) { + pthread_join(threads[i], NULL); + } + + pthread_mutex_destroy(&mutex); + + return 0; +} diff --git a/paddle/capi/examples/multi_thread/trainer_config.py b/paddle/capi/examples/multi_thread/trainer_config.py new file mode 120000 index 0000000000000..70cfb1f7f4cfe --- /dev/null +++ b/paddle/capi/examples/multi_thread/trainer_config.py @@ -0,0 +1 @@ +../dense/trainer_config.py \ No newline at end of file diff --git a/paddle/capi/examples/sparse_binary/.gitignore b/paddle/capi/examples/sparse_binary/.gitignore new file mode 100644 index 0000000000000..fab7372d796ea --- /dev/null +++ b/paddle/capi/examples/sparse_binary/.gitignore @@ -0,0 +1,73 @@ +# This file is used to ignore files which are generated +# ---------------------------------------------------------------------------- + +*~ +*.autosave +*.a +*.core +*.moc +*.o +*.obj +*.orig +*.rej +*.so +*.so.* +*_pch.h.cpp +*_resource.rc +*.qm +.#* +*.*# +core +!core/ +tags +.DS_Store +.directory +*.debug +Makefile* +*.prl +*.app +moc_*.cpp +ui_*.h +qrc_*.cpp +Thumbs.db +*.res +*.rc +/.qmake.cache +/.qmake.stash + +# qtcreator generated files +*.pro.user* + +# xemacs temporary files +*.flc + +# Vim temporary files +.*.swp + +# Visual Studio generated files +*.ib_pdb_index +*.idb +*.ilk +*.pdb +*.sln +*.suo +*.vcproj +*vcproj.*.*.user +*.ncb +*.sdf +*.opensdf +*.vcxproj +*vcxproj.* + +# MinGW generated files +*.Debug +*.Release + +# Python byte code +*.pyc + +# Binaries +# -------- +*.dll +*.exe + diff --git a/paddle/capi/examples/sparse_binary/CMakeLists.txt b/paddle/capi/examples/sparse_binary/CMakeLists.txt new file mode 100644 index 0000000000000..c82195688902a --- /dev/null +++ b/paddle/capi/examples/sparse_binary/CMakeLists.txt @@ -0,0 +1,7 @@ +project(sparse_binary) +cmake_minimum_required(VERSION 2.8) +aux_source_directory(. SRC_LIST) +add_executable(${PROJECT_NAME} ${SRC_LIST}) +find_package (Threads) +set_property(TARGET ${PROJECT_NAME} PROPERTY C_STANDARD 99) +target_link_libraries(${PROJECT_NAME} -lpaddle_capi_shared) diff --git a/paddle/capi/examples/sparse_binary/convert_protobin.sh b/paddle/capi/examples/sparse_binary/convert_protobin.sh new file mode 120000 index 0000000000000..3c1b3533523cf --- /dev/null +++ b/paddle/capi/examples/sparse_binary/convert_protobin.sh @@ -0,0 +1 @@ +../dense/convert_protobin.sh \ No newline at end of file diff --git a/paddle/capi/examples/sparse_binary/main.c b/paddle/capi/examples/sparse_binary/main.c new file mode 100644 index 0000000000000..c5e653dbc2876 --- /dev/null +++ b/paddle/capi/examples/sparse_binary/main.c @@ -0,0 +1,64 @@ +#include +#include +#include "../common/common.h" + +#define CONFIG_BIN "./trainer_config.bin" + +int main() { + // Initalize Paddle + char* argv[] = {"--use_gpu=False"}; + CHECK(paddle_init(1, (char**)argv)); + + // Reading config binary file. It is generated by `convert_protobin.sh` + long size; + void* buf = read_config(CONFIG_BIN, &size); + + // Create a gradient machine for inference. + paddle_gradient_machine machine; + CHECK(paddle_gradient_machine_create_for_inference(&machine, buf, (int)size)); + CHECK(paddle_gradient_machine_randomize_param(machine)); + + // Loading parameter. Uncomment the following line and change the directory. + // CHECK(paddle_gradient_machine_load_parameter_from_disk(machine, + // "./some_where_to_params")); + paddle_arguments in_args = paddle_arguments_create_none(); + + // There is only one input of this network. + CHECK(paddle_arguments_resize(in_args, 1)); + + // Create input matrix. + paddle_matrix mat = paddle_matrix_create_sparse(1, 784, 3, true, false); + srand(time(0)); + paddle_real* array; + int colBuf[] = {9, 93, 109}; + int rowBuf[] = {0, sizeof(colBuf) / sizeof(int)}; + + CHECK(paddle_matrix_sparse_copy_from(mat, + rowBuf, + sizeof(rowBuf) / sizeof(int), + colBuf, + sizeof(colBuf) / sizeof(int), + NULL, + 0)); + + CHECK(paddle_arguments_set_value(in_args, 0, mat)); + + paddle_arguments out_args = paddle_arguments_create_none(); + CHECK(paddle_gradient_machine_forward(machine, + in_args, + out_args, + /* isTrain */ false)); + paddle_matrix prob = paddle_matrix_create_none(); + + CHECK(paddle_arguments_value(out_args, 0, prob)); + + CHECK(paddle_matrix_get_row(prob, 0, &array)); + + printf("Prob: "); + for (int i = 0; i < 10; ++i) { + printf("%.2f ", array[i]); + } + printf("\n"); + + return 0; +} diff --git a/paddle/capi/examples/sparse_binary/trainer_config.py b/paddle/capi/examples/sparse_binary/trainer_config.py new file mode 120000 index 0000000000000..70cfb1f7f4cfe --- /dev/null +++ b/paddle/capi/examples/sparse_binary/trainer_config.py @@ -0,0 +1 @@ +../dense/trainer_config.py \ No newline at end of file diff --git a/paddle/capi/gradient_machine.cpp b/paddle/capi/gradient_machine.cpp index 6e7740a455b5d..00f76e0152366 100644 --- a/paddle/capi/gradient_machine.cpp +++ b/paddle/capi/gradient_machine.cpp @@ -113,3 +113,11 @@ paddle_error paddle_gradient_machine_create_shared_param( return kPD_NO_ERROR; } } + +paddle_error paddle_gradient_machine_randomize_param( + paddle_gradient_machine machine) { + auto m = cast(machine); + if (m == nullptr || m->machine == nullptr) return kPD_NULLPTR; + m->machine->randParameters(); + return kPD_NO_ERROR; +} diff --git a/paddle/capi/gradient_machine.h b/paddle/capi/gradient_machine.h index 36c1a2b1b4b91..d7e2dd9bf8037 100644 --- a/paddle/capi/gradient_machine.h +++ b/paddle/capi/gradient_machine.h @@ -74,6 +74,9 @@ paddle_gradient_machine_create_shared_param(paddle_gradient_machine origin, int size, paddle_gradient_machine* slave); +PD_API paddle_error +paddle_gradient_machine_randomize_param(paddle_gradient_machine machine); + /** * @brief Destroy a gradient machine * @param machine that need to destroy diff --git a/paddle/capi/matrix.h b/paddle/capi/matrix.h index 2f6488f38386a..f15f7f3bbbd14 100644 --- a/paddle/capi/matrix.h +++ b/paddle/capi/matrix.h @@ -15,6 +15,7 @@ limitations under the License. */ #ifndef __PADDLE_CAPI_MATRIX_H__ #define __PADDLE_CAPI_MATRIX_H__ +#include #include #include "config.h" #include "error.h" @@ -39,6 +40,18 @@ PD_API paddle_matrix paddle_matrix_create(uint64_t height, uint64_t width, bool useGpu); +/** + * @brief paddle_matrix_create_sparse Create a sparse matrix. + * @param height the matrix height. + * @param width the matrix width. + * @param nnz the number of non-zero elements. + * @param isBinary is binary (either 1 or 0 in matrix) or not. + * @param useGpu is using GPU or not. + * @return paddle_matrix. + */ +PD_API paddle_matrix paddle_matrix_create_sparse( + uint64_t height, uint64_t width, uint64_t nnz, bool isBinary, bool useGpu); + /** * @brief paddle_matrix_destroy Destroy a matrix. * @param mat @@ -55,7 +68,7 @@ PD_API paddle_error paddle_matrix_destroy(paddle_matrix mat); */ PD_API paddle_error paddle_matrix_set_row(paddle_matrix mat, uint64_t rowID, - pd_real* rowArray); + paddle_real* rowArray); /** * @brief PDMatGetRow Get raw row buffer from matrix @@ -66,7 +79,7 @@ PD_API paddle_error paddle_matrix_set_row(paddle_matrix mat, */ PD_API paddle_error paddle_matrix_get_row(paddle_matrix mat, uint64_t rowID, - pd_real** rawRowBuffer); + paddle_real** rawRowBuffer); /** * @brief PDMatCreateNone Create None Matrix @@ -85,6 +98,27 @@ PD_API paddle_error paddle_matrix_get_shape(paddle_matrix mat, uint64_t* height, uint64_t* width); +/** + * @brief paddle_matrix_sparse_copy_from Copy from a CSR format matrix + * @param [out] mat output matrix + * @param [in] rowArray row array. The array slices in column array. + * @param [in] rowSize length of row array. + * @param [in] colArray the column array. It means the non-zero element indices + * in each row. + * @param [in] colSize length of column array. + * @param [in] valueArray the value array. It means the non-zero elemnt values. + * NULL if the matrix is binary. + * @param [in] valueSize length of value array. Zero if the matrix is binary. + * @return paddle_error + */ +PD_API paddle_error paddle_matrix_sparse_copy_from(paddle_matrix mat, + int* rowArray, + uint64_t rowSize, + int* colArray, + uint64_t colSize, + float* valueArray, + uint64_t valueSize); + #ifdef __cplusplus } #endif diff --git a/paddle/capi/tests/test_Arguments.cpp b/paddle/capi/tests/test_Arguments.cpp index e6e4ac9937e5e..f56391d51e32a 100644 --- a/paddle/capi/tests/test_Arguments.cpp +++ b/paddle/capi/tests/test_Arguments.cpp @@ -17,10 +17,10 @@ limitations under the License. */ #include "gtest/gtest.h" #include "paddle/utils/ThreadLocal.h" -static std::vector randomBuffer(size_t bufSize) { +static std::vector randomBuffer(size_t bufSize) { auto& eng = paddle::ThreadLocalRandomEngine::get(); - std::uniform_real_distribution dist(-1.0, 1.0); - std::vector retv; + std::uniform_real_distribution dist(-1.0, 1.0); + std::vector retv; retv.reserve(bufSize); for (size_t i = 0; i < bufSize; ++i) { retv.push_back(dist(eng)); @@ -42,7 +42,7 @@ TEST(CAPIArguments, value) { paddle_matrix mat = paddle_matrix_create(128, 64, false); for (size_t i = 0; i < 128; ++i) { - std::vector sampleBuf = randomBuffer(64); + std::vector sampleBuf = randomBuffer(64); paddle_matrix_set_row(mat, i, sampleBuf.data()); } ASSERT_EQ(kPD_NO_ERROR, paddle_arguments_set_value(args, 0, mat)); @@ -52,8 +52,8 @@ TEST(CAPIArguments, value) { ASSERT_EQ(kPD_NO_ERROR, paddle_arguments_value(args, 0, val)); for (size_t i = 0; i < 128; ++i) { - pd_real* row1; - pd_real* row2; + paddle_real* row1; + paddle_real* row2; ASSERT_EQ(kPD_NO_ERROR, paddle_matrix_get_row(mat, i, &row1)); ASSERT_EQ(kPD_NO_ERROR, paddle_matrix_get_row(val, i, &row2)); diff --git a/paddle/capi/tests/test_GradientMachine.cpp b/paddle/capi/tests/test_GradientMachine.cpp index b37fe83a30bd1..be7dfadddc35e 100644 --- a/paddle/capi/tests/test_GradientMachine.cpp +++ b/paddle/capi/tests/test_GradientMachine.cpp @@ -21,10 +21,10 @@ limitations under the License. */ #include "capi.h" #include "paddle/utils/ThreadLocal.h" -static std::vector randomBuffer(size_t bufSize) { +static std::vector randomBuffer(size_t bufSize) { auto& eng = paddle::ThreadLocalRandomEngine::get(); - std::uniform_real_distribution dist(-1.0, 1.0); - std::vector retv; + std::uniform_real_distribution dist(-1.0, 1.0); + std::vector retv; retv.reserve(bufSize); for (size_t i = 0; i < bufSize; ++i) { retv.push_back(dist(eng)); @@ -60,12 +60,12 @@ TEST(GradientMachine, testPredict) { paddle_arguments inArgs = paddle_arguments_create_none(); ASSERT_EQ(kPD_NO_ERROR, paddle_arguments_resize(inArgs, 1)); paddle_matrix mat = paddle_matrix_create(1, 100, false); - static_assert(std::is_same::value, ""); + static_assert(std::is_same::value, ""); auto data = randomBuffer(100); - pd_real* rowPtr; + paddle_real* rowPtr; ASSERT_EQ(kPD_NO_ERROR, paddle_matrix_get_row(mat, 0, &rowPtr)); - memcpy(rowPtr, data.data(), data.size() * sizeof(pd_real)); + memcpy(rowPtr, data.data(), data.size() * sizeof(paddle_real)); ASSERT_EQ(kPD_NO_ERROR, paddle_arguments_set_value(inArgs, 0, mat)); ASSERT_EQ(kPD_NO_ERROR, diff --git a/paddle/capi/tests/test_Matrix.cpp b/paddle/capi/tests/test_Matrix.cpp index 162df448d2b27..71dc2064dd02d 100644 --- a/paddle/capi/tests/test_Matrix.cpp +++ b/paddle/capi/tests/test_Matrix.cpp @@ -17,7 +17,7 @@ limitations under the License. */ TEST(CAPIMatrix, create) { paddle_matrix mat = paddle_matrix_create(128, 32, false); - std::vector sampleRow; + std::vector sampleRow; sampleRow.resize(32); for (size_t i = 0; i < sampleRow.size(); ++i) { sampleRow[i] = 1.0 / (i + 1.0); @@ -26,7 +26,7 @@ TEST(CAPIMatrix, create) { ASSERT_EQ(kPD_OUT_OF_RANGE, paddle_matrix_set_row(mat, 128, sampleRow.data())); - pd_real* arrayPtr; + paddle_real* arrayPtr; ASSERT_EQ(kPD_NO_ERROR, paddle_matrix_get_row(mat, 0, &arrayPtr)); for (size_t i = 0; i < sampleRow.size(); ++i) { From 34b3ee31fafae7d0b0ad2977b1ab14094018e2b1 Mon Sep 17 00:00:00 2001 From: Yu Yang Date: Sun, 26 Mar 2017 14:06:38 +0800 Subject: [PATCH 32/43] Add sequence exampleAdd sequence exampleAdd sequence exampleAdd sequence exampleAdd sequence exampleAdd sequence exampleAdd sequence exampleAdd sequence exampleAdd sequence example --- paddle/capi/examples/sequence/.gitignore | 73 +++++++++++++++++++ paddle/capi/examples/sequence/CMakeLists.txt | 6 ++ .../examples/sequence/convert_protobin.sh | 1 + paddle/capi/examples/sequence/main.c | 63 ++++++++++++++++ .../capi/examples/sequence/trainer_config.py | 13 ++++ 5 files changed, 156 insertions(+) create mode 100644 paddle/capi/examples/sequence/.gitignore create mode 100644 paddle/capi/examples/sequence/CMakeLists.txt create mode 120000 paddle/capi/examples/sequence/convert_protobin.sh create mode 100644 paddle/capi/examples/sequence/main.c create mode 100644 paddle/capi/examples/sequence/trainer_config.py diff --git a/paddle/capi/examples/sequence/.gitignore b/paddle/capi/examples/sequence/.gitignore new file mode 100644 index 0000000000000..fab7372d796ea --- /dev/null +++ b/paddle/capi/examples/sequence/.gitignore @@ -0,0 +1,73 @@ +# This file is used to ignore files which are generated +# ---------------------------------------------------------------------------- + +*~ +*.autosave +*.a +*.core +*.moc +*.o +*.obj +*.orig +*.rej +*.so +*.so.* +*_pch.h.cpp +*_resource.rc +*.qm +.#* +*.*# +core +!core/ +tags +.DS_Store +.directory +*.debug +Makefile* +*.prl +*.app +moc_*.cpp +ui_*.h +qrc_*.cpp +Thumbs.db +*.res +*.rc +/.qmake.cache +/.qmake.stash + +# qtcreator generated files +*.pro.user* + +# xemacs temporary files +*.flc + +# Vim temporary files +.*.swp + +# Visual Studio generated files +*.ib_pdb_index +*.idb +*.ilk +*.pdb +*.sln +*.suo +*.vcproj +*vcproj.*.*.user +*.ncb +*.sdf +*.opensdf +*.vcxproj +*vcxproj.* + +# MinGW generated files +*.Debug +*.Release + +# Python byte code +*.pyc + +# Binaries +# -------- +*.dll +*.exe + diff --git a/paddle/capi/examples/sequence/CMakeLists.txt b/paddle/capi/examples/sequence/CMakeLists.txt new file mode 100644 index 0000000000000..71b73acba7cde --- /dev/null +++ b/paddle/capi/examples/sequence/CMakeLists.txt @@ -0,0 +1,6 @@ +project(sequence) +cmake_minimum_required(VERSION 2.8) +aux_source_directory(. SRC_LIST) +add_executable(${PROJECT_NAME} ${SRC_LIST}) +set_property(TARGET ${PROJECT_NAME} PROPERTY C_STANDARD 99) +target_link_libraries(${PROJECT_NAME} -lpaddle_capi_shared) diff --git a/paddle/capi/examples/sequence/convert_protobin.sh b/paddle/capi/examples/sequence/convert_protobin.sh new file mode 120000 index 0000000000000..3c1b3533523cf --- /dev/null +++ b/paddle/capi/examples/sequence/convert_protobin.sh @@ -0,0 +1 @@ +../dense/convert_protobin.sh \ No newline at end of file diff --git a/paddle/capi/examples/sequence/main.c b/paddle/capi/examples/sequence/main.c new file mode 100644 index 0000000000000..7e71bb8b8aff4 --- /dev/null +++ b/paddle/capi/examples/sequence/main.c @@ -0,0 +1,63 @@ +#include +#include +#include "../common/common.h" + +#define CONFIG_BIN "./trainer_config.bin" + +int main() { + // Initalize Paddle + char* argv[] = {"--use_gpu=False"}; + CHECK(paddle_init(1, (char**)argv)); + + // Reading config binary file. It is generated by `convert_protobin.sh` + long size; + void* buf = read_config(CONFIG_BIN, &size); + + // Create a gradient machine for inference. + paddle_gradient_machine machine; + CHECK(paddle_gradient_machine_create_for_inference(&machine, buf, (int)size)); + CHECK(paddle_gradient_machine_randomize_param(machine)); + + // Loading parameter. Uncomment the following line and change the directory. + // CHECK(paddle_gradient_machine_load_parameter_from_disk(machine, + // "./some_where_to_params")); + paddle_arguments in_args = paddle_arguments_create_none(); + + // There is only one input of this network. + CHECK(paddle_arguments_resize(in_args, 1)); + + // Create input ids. + int sentence_ids[] = {83, 48, 20, 84, 394, 853, 64, 53, 64}; + + paddle_ivector sentence = paddle_ivector_create( + sentence_ids, sizeof(sentence_ids) / sizeof(int), false, false); + CHECK(paddle_arguments_set_ids(in_args, 0, sentence)); + + int seq_pos_array[] = {0, sizeof(sentence_ids) / sizeof(int)}; + + paddle_ivector seq_pos = paddle_ivector_create( + seq_pos_array, sizeof(seq_pos_array) / sizeof(int), false, false); + + CHECK(paddle_arguments_set_sequence_start_pos(in_args, 0, 0, seq_pos)); + + paddle_arguments out_args = paddle_arguments_create_none(); + CHECK(paddle_gradient_machine_forward(machine, + in_args, + out_args, + /* isTrain */ false)); + paddle_matrix prob = paddle_matrix_create_none(); + + CHECK(paddle_arguments_value(out_args, 0, prob)); + + paddle_real* array; + + CHECK(paddle_matrix_get_row(prob, 0, &array)); + + printf("Prob: "); + for (int i = 0; i < 2; ++i) { + printf("%.2f ", array[i]); + } + printf("\n"); + + return 0; +} diff --git a/paddle/capi/examples/sequence/trainer_config.py b/paddle/capi/examples/sequence/trainer_config.py new file mode 100644 index 0000000000000..6bbc7a909aa03 --- /dev/null +++ b/paddle/capi/examples/sequence/trainer_config.py @@ -0,0 +1,13 @@ +from paddle.trainer_config_helpers import * + +WORD_DIM = 3000 + +sentence = data_layer(name='sentence', size=WORD_DIM) +sentence_embedding = embedding_layer( + input=sentence, + size=64, + param_attr=ParameterAttribute( + initial_max=1.0, initial_min=0.5)) +lstm = simple_lstm(input=sentence_embedding, size=64) +lstm_last = last_seq(input=lstm) +outputs(fc_layer(input=lstm_last, size=2, act=SoftmaxActivation())) From 852a94f8042d08b817eb38abac7c530bb2833749 Mon Sep 17 00:00:00 2001 From: Yu Yang Date: Sun, 26 Mar 2017 14:07:21 +0800 Subject: [PATCH 33/43] Add model_inference directory --- paddle/capi/examples/{ => model_inference}/common/common.h | 0 paddle/capi/examples/{ => model_inference}/dense/CMakeLists.txt | 0 .../capi/examples/{ => model_inference}/dense/convert_protobin.sh | 0 paddle/capi/examples/{ => model_inference}/dense/main.c | 0 .../capi/examples/{ => model_inference}/dense/trainer_config.py | 0 .../capi/examples/{ => model_inference}/multi_thread/.gitignore | 0 .../examples/{ => model_inference}/multi_thread/CMakeLists.txt | 0 .../{ => model_inference}/multi_thread/convert_protobin.sh | 0 paddle/capi/examples/{ => model_inference}/multi_thread/main.c | 0 .../examples/{ => model_inference}/multi_thread/trainer_config.py | 0 paddle/capi/examples/{ => model_inference}/sequence/.gitignore | 0 .../capi/examples/{ => model_inference}/sequence/CMakeLists.txt | 0 .../examples/{ => model_inference}/sequence/convert_protobin.sh | 0 paddle/capi/examples/{ => model_inference}/sequence/main.c | 0 .../examples/{ => model_inference}/sequence/trainer_config.py | 0 .../capi/examples/{ => model_inference}/sparse_binary/.gitignore | 0 .../examples/{ => model_inference}/sparse_binary/CMakeLists.txt | 0 .../{ => model_inference}/sparse_binary/convert_protobin.sh | 0 paddle/capi/examples/{ => model_inference}/sparse_binary/main.c | 0 .../{ => model_inference}/sparse_binary/trainer_config.py | 0 20 files changed, 0 insertions(+), 0 deletions(-) rename paddle/capi/examples/{ => model_inference}/common/common.h (100%) rename paddle/capi/examples/{ => model_inference}/dense/CMakeLists.txt (100%) rename paddle/capi/examples/{ => model_inference}/dense/convert_protobin.sh (100%) rename paddle/capi/examples/{ => model_inference}/dense/main.c (100%) rename paddle/capi/examples/{ => model_inference}/dense/trainer_config.py (100%) rename paddle/capi/examples/{ => model_inference}/multi_thread/.gitignore (100%) rename paddle/capi/examples/{ => model_inference}/multi_thread/CMakeLists.txt (100%) rename paddle/capi/examples/{ => model_inference}/multi_thread/convert_protobin.sh (100%) rename paddle/capi/examples/{ => model_inference}/multi_thread/main.c (100%) rename paddle/capi/examples/{ => model_inference}/multi_thread/trainer_config.py (100%) rename paddle/capi/examples/{ => model_inference}/sequence/.gitignore (100%) rename paddle/capi/examples/{ => model_inference}/sequence/CMakeLists.txt (100%) rename paddle/capi/examples/{ => model_inference}/sequence/convert_protobin.sh (100%) rename paddle/capi/examples/{ => model_inference}/sequence/main.c (100%) rename paddle/capi/examples/{ => model_inference}/sequence/trainer_config.py (100%) rename paddle/capi/examples/{ => model_inference}/sparse_binary/.gitignore (100%) rename paddle/capi/examples/{ => model_inference}/sparse_binary/CMakeLists.txt (100%) rename paddle/capi/examples/{ => model_inference}/sparse_binary/convert_protobin.sh (100%) rename paddle/capi/examples/{ => model_inference}/sparse_binary/main.c (100%) rename paddle/capi/examples/{ => model_inference}/sparse_binary/trainer_config.py (100%) diff --git a/paddle/capi/examples/common/common.h b/paddle/capi/examples/model_inference/common/common.h similarity index 100% rename from paddle/capi/examples/common/common.h rename to paddle/capi/examples/model_inference/common/common.h diff --git a/paddle/capi/examples/dense/CMakeLists.txt b/paddle/capi/examples/model_inference/dense/CMakeLists.txt similarity index 100% rename from paddle/capi/examples/dense/CMakeLists.txt rename to paddle/capi/examples/model_inference/dense/CMakeLists.txt diff --git a/paddle/capi/examples/dense/convert_protobin.sh b/paddle/capi/examples/model_inference/dense/convert_protobin.sh similarity index 100% rename from paddle/capi/examples/dense/convert_protobin.sh rename to paddle/capi/examples/model_inference/dense/convert_protobin.sh diff --git a/paddle/capi/examples/dense/main.c b/paddle/capi/examples/model_inference/dense/main.c similarity index 100% rename from paddle/capi/examples/dense/main.c rename to paddle/capi/examples/model_inference/dense/main.c diff --git a/paddle/capi/examples/dense/trainer_config.py b/paddle/capi/examples/model_inference/dense/trainer_config.py similarity index 100% rename from paddle/capi/examples/dense/trainer_config.py rename to paddle/capi/examples/model_inference/dense/trainer_config.py diff --git a/paddle/capi/examples/multi_thread/.gitignore b/paddle/capi/examples/model_inference/multi_thread/.gitignore similarity index 100% rename from paddle/capi/examples/multi_thread/.gitignore rename to paddle/capi/examples/model_inference/multi_thread/.gitignore diff --git a/paddle/capi/examples/multi_thread/CMakeLists.txt b/paddle/capi/examples/model_inference/multi_thread/CMakeLists.txt similarity index 100% rename from paddle/capi/examples/multi_thread/CMakeLists.txt rename to paddle/capi/examples/model_inference/multi_thread/CMakeLists.txt diff --git a/paddle/capi/examples/multi_thread/convert_protobin.sh b/paddle/capi/examples/model_inference/multi_thread/convert_protobin.sh similarity index 100% rename from paddle/capi/examples/multi_thread/convert_protobin.sh rename to paddle/capi/examples/model_inference/multi_thread/convert_protobin.sh diff --git a/paddle/capi/examples/multi_thread/main.c b/paddle/capi/examples/model_inference/multi_thread/main.c similarity index 100% rename from paddle/capi/examples/multi_thread/main.c rename to paddle/capi/examples/model_inference/multi_thread/main.c diff --git a/paddle/capi/examples/multi_thread/trainer_config.py b/paddle/capi/examples/model_inference/multi_thread/trainer_config.py similarity index 100% rename from paddle/capi/examples/multi_thread/trainer_config.py rename to paddle/capi/examples/model_inference/multi_thread/trainer_config.py diff --git a/paddle/capi/examples/sequence/.gitignore b/paddle/capi/examples/model_inference/sequence/.gitignore similarity index 100% rename from paddle/capi/examples/sequence/.gitignore rename to paddle/capi/examples/model_inference/sequence/.gitignore diff --git a/paddle/capi/examples/sequence/CMakeLists.txt b/paddle/capi/examples/model_inference/sequence/CMakeLists.txt similarity index 100% rename from paddle/capi/examples/sequence/CMakeLists.txt rename to paddle/capi/examples/model_inference/sequence/CMakeLists.txt diff --git a/paddle/capi/examples/sequence/convert_protobin.sh b/paddle/capi/examples/model_inference/sequence/convert_protobin.sh similarity index 100% rename from paddle/capi/examples/sequence/convert_protobin.sh rename to paddle/capi/examples/model_inference/sequence/convert_protobin.sh diff --git a/paddle/capi/examples/sequence/main.c b/paddle/capi/examples/model_inference/sequence/main.c similarity index 100% rename from paddle/capi/examples/sequence/main.c rename to paddle/capi/examples/model_inference/sequence/main.c diff --git a/paddle/capi/examples/sequence/trainer_config.py b/paddle/capi/examples/model_inference/sequence/trainer_config.py similarity index 100% rename from paddle/capi/examples/sequence/trainer_config.py rename to paddle/capi/examples/model_inference/sequence/trainer_config.py diff --git a/paddle/capi/examples/sparse_binary/.gitignore b/paddle/capi/examples/model_inference/sparse_binary/.gitignore similarity index 100% rename from paddle/capi/examples/sparse_binary/.gitignore rename to paddle/capi/examples/model_inference/sparse_binary/.gitignore diff --git a/paddle/capi/examples/sparse_binary/CMakeLists.txt b/paddle/capi/examples/model_inference/sparse_binary/CMakeLists.txt similarity index 100% rename from paddle/capi/examples/sparse_binary/CMakeLists.txt rename to paddle/capi/examples/model_inference/sparse_binary/CMakeLists.txt diff --git a/paddle/capi/examples/sparse_binary/convert_protobin.sh b/paddle/capi/examples/model_inference/sparse_binary/convert_protobin.sh similarity index 100% rename from paddle/capi/examples/sparse_binary/convert_protobin.sh rename to paddle/capi/examples/model_inference/sparse_binary/convert_protobin.sh diff --git a/paddle/capi/examples/sparse_binary/main.c b/paddle/capi/examples/model_inference/sparse_binary/main.c similarity index 100% rename from paddle/capi/examples/sparse_binary/main.c rename to paddle/capi/examples/model_inference/sparse_binary/main.c diff --git a/paddle/capi/examples/sparse_binary/trainer_config.py b/paddle/capi/examples/model_inference/sparse_binary/trainer_config.py similarity index 100% rename from paddle/capi/examples/sparse_binary/trainer_config.py rename to paddle/capi/examples/model_inference/sparse_binary/trainer_config.py From 0d73f4c2655bcef0d2c037b9b67f9279ea4fab23 Mon Sep 17 00:00:00 2001 From: Yu Yang Date: Sun, 26 Mar 2017 15:41:20 +0800 Subject: [PATCH 34/43] Add usage documentation of C-API. --- paddle/capi/examples/README.md | 3 ++ .../capi/examples/model_inference/README.md | 42 +++++++++++++++++++ 2 files changed, 45 insertions(+) create mode 100644 paddle/capi/examples/README.md create mode 100644 paddle/capi/examples/model_inference/README.md diff --git a/paddle/capi/examples/README.md b/paddle/capi/examples/README.md new file mode 100644 index 0000000000000..14013e281ff50 --- /dev/null +++ b/paddle/capi/examples/README.md @@ -0,0 +1,3 @@ +# C-API Example Usage + +* [Model Inference](./model_inference/README.md) diff --git a/paddle/capi/examples/model_inference/README.md b/paddle/capi/examples/model_inference/README.md new file mode 100644 index 0000000000000..58e6c83140b5f --- /dev/null +++ b/paddle/capi/examples/model_inference/README.md @@ -0,0 +1,42 @@ +# Use C-API for Model Inference + +There are several examples in this directory about how to use Paddle C-API for model inference. + +## Convert configuration file to protobuf binary. + +Firstly, the user should convert Paddle's model configuration file into a protobuf binary file. In each example directory, there is a file named `convert_protobin.sh`. It will convert `trainer_config.conf` into `trainer_config.bin`. + +The `convert_protobin.sh` is very simple, just invoke `dump_config` Python module to dump the binary file. The command line usages are: + +```bash +python -m paddle.utils.dump_config YOUR_CONFIG_FILE 'CONFIG_EXTRA_ARGS' --binary > YOUR_CONFIG_FILE.bin +``` + +## Initialize paddle + +```c++ +char* argv[] = {"--use_gpu=False"}; +paddle_init(1, (char**)argv); +``` + +We must initialize global context before we invoke other interfaces in Paddle. The initialize commands just like the `paddle_trainer` command line arguments. `paddle train --help`, will show the list of arguments. The most important argument is `use_gpu` or not. + +## Load network and parameters + +```c +paddle_gradient_machine machine; +paddle_gradient_machine_create_for_inference(&machine, config_file_content, content_size)); +paddle_gradient_machine_load_parameter_from_disk(machine, "./some_where_to_params")); +``` + +The gradient machine is a Paddle concept, which represents a neural network can be forwarded and backward. We can create a gradient machine fo model inference, and load the parameter files from disk. + +Moreover, if we want to inference in multi-thread, we could create a thread local gradient machine which shared the same parameter by using `paddle_gradient_machine_create_shared_param` API. Please reference `multi_thread` as an example. + +## Create input + +The input of a neural network is an `arguments`. The examples in this directory will show how to construct different types of inputs for prediction. Please look at `dense`, `sparse_binary`, `sequence` for details. + +## Get inference + +After invoking `paddle_gradient_machine_forward`, we could get the output of the neural network. The `value` matrix of output arguments will store the neural network output values. If the output is a `SoftmaxActivation`, the `value` matrix are the probabilities of each input samples. The height of output matrix is number of sample. The width is the number of categories. From 66230967502f26fb7c026901ea75abb9d3c3bce7 Mon Sep 17 00:00:00 2001 From: Yu Yang Date: Sun, 26 Mar 2017 16:56:57 +0800 Subject: [PATCH 35/43] Add Implementation documentation. Also change previous design file, make concept consistant. --- .../{why_plain_c.md => 00.why_plain_c.md} | 12 +- .../01.inference_implementation.md | 106 ++++++++++++++++++ 2 files changed, 112 insertions(+), 6 deletions(-) rename doc/design/multi_language_interface/{why_plain_c.md => 00.why_plain_c.md} (97%) create mode 100644 doc/design/multi_language_interface/01.inference_implementation.md diff --git a/doc/design/multi_language_interface/why_plain_c.md b/doc/design/multi_language_interface/00.why_plain_c.md similarity index 97% rename from doc/design/multi_language_interface/why_plain_c.md rename to doc/design/multi_language_interface/00.why_plain_c.md index a3f41ca7b93de..7cb05f3ec0134 100644 --- a/doc/design/multi_language_interface/why_plain_c.md +++ b/doc/design/multi_language_interface/00.why_plain_c.md @@ -70,20 +70,20 @@ extern "C" paddle_error paddle_matrix_shape(paddle_matrix matrix, uint64_t *width, uint64_t *height) { - auto m = (paddle::math::matrix*)(matrix); + auto m = (paddle::capi::CMatrix*)(matrix); *width = m->width(); *height = m->height(); } ``` -其中`paddle/math/matrix.hpp`文件内容为: +其中`paddle/capi/CMatrix.hpp`文件内容为: ```cpp namespace paddle { namespace math { -class Matrix { - //... +class CMatrix { + std::shared_ptr mat; }; } // namespace math @@ -113,6 +113,6 @@ class Matrix { | 手写多语言绑定 | 不使用SWIG | 使用SWIG需要多语言绑定的开发人员熟练掌握SWIG配置,社区参与困难。SWIG生成的代码不能保证多语言代码风格的一致性 | -## 简单实现 +## 实现 -TBD +参考[预测接口实现](./01.inference_implementation.md) diff --git a/doc/design/multi_language_interface/01.inference_implementation.md b/doc/design/multi_language_interface/01.inference_implementation.md new file mode 100644 index 0000000000000..81dcdd437b0c6 --- /dev/null +++ b/doc/design/multi_language_interface/01.inference_implementation.md @@ -0,0 +1,106 @@ +# C-API 模型推断实现文档 + +本文档描述Paddle C-API的实现细节。Paddle C-API是多语言API的基础部分。Paddle需要暴露的API很多。先实现模型推断的API,通过模型推断API的实现作为一个样例,来进行讨论。至于为什么需要C-API,请参考[这里](./00.why_plain_c.md)。 + +## 暴露接口原则 + +1. 所有的接口均为C接口。即使用`extern "C"` +2. 除构造某种类型的函数(`paddle_matrix_create`等),其他函数均返回`paddle_error`。且调用时不能抛出异常或出现运行时错误。 +3. 所有类型名为`paddle_类型名`,所有与类型相关的函数,函数名为`paddle_类型名_函数名` +4. 如果某一个Paddle Core概念(GradientMachine/Matrix)需要被暴露到其他语言,那么 + * 为了暴露的接口尽量简单。只暴露概念的接口,而不暴露概念的实现。即暴露`GradientMachine`或者`Matrix`但不暴露`RecurrentGradientMachine`和`CpuSparseMatrix`。 + * 暴露这个概念必要函数。`必要`是指,即完成某一个任务的最少函数。 +5. 不在`capi`接口层做过多封装。 + * 如果某一个Paddle概念必须要暴露,但是又过于琐碎。不在`capi`这一层进行封装,而是直接修改Paddle Core。让Paddle核心中,这一概念不再琐碎。 + + +## 目录结构 + +```text +Paddle + `-- paddle + `-- capi + `-- examples # The example project for C-API. + `-- tests # unittests for C-API + `-- capi.h # C-API header file. + `-- capi_private.h # The shared header file between implementation sources. + `-- matrix.{h, cpp} + `-- gradient_machine.{h, cpp} + `-- ... +``` + + +Paddle的C-API目录结构如上图表所示。这个目录中除了`capi_private.h`之外的所有头文件,均会被安装到include/paddle路径下。C-API生成的二进制文件会被安装到`lib`目录下。即,安装后的目录结构为 + +```text +`-- include + `-- paddle + `-- capi.h + `-- matrix.h + `-- gradient_machine.h + `-- ... +`-- lib + `-- libpaddle_capi_shared.{so, dylib} # In mac, dynamic libary's file name extention is `dylib` + `-- libpaddle_capi_whole.a # static library for all symbols of Paddle. +``` + +## 实现方式 + +下面分别介绍某一类文件的实现方式。 + +### capi.h + +`capi.h`是用户使用C-API时所唯一需要引入的头文件。在`capi.h`中,引入了类型的头文件,`matrix.h`, `gradient_machine.h`。在引入其他类型的头文件时,使用相对路径的引用方式。即`#include "matrix.h"` + +### 具体某种类型的头文件 + +具体某种类型的头文件,即例如`matrix.h`,`gradient_machine.h`等。在这些头文件中,包含了某种类型的类型定义和暴露的全部函数。 + +这个头文件不假设其他文件的引用顺序,即使用户直接引用某种类型的头文件,也不应该报错(虽然不鼓励这样)。如果某一个类型需要引用另一个类型,例如`gradient_machine`需要引用`matrix`,则直接引入另一种类型的头文件,即`#include "matrix.h"`。 + +### capi_private.h + +`capi_prviate.h`是各个实现中共享的头文件,他主要包含了实际暴露的类型结构。在用户使用C-API时,Paddle的类型全部退化成`void *`,即`typedef paddle_matrix void*`。但,对于每种C-API暴露的类型,均是在`capi_private.h`中实现的结构体。 + +```cpp +struct CMatrix { + int type = MatrixType; + std::shared_ptr mat; +}; +``` + +通常,这个结构体包含两个项目。 + +* `type`是一个类型的标志。对于每种类型,type字段均不尽相同。这样,即使C-API接受的类型全是`void *`,我们也可以确定每一个参数的类型。 + + ```cpp + void some_c_api_function(void* some_instance) { + int* type = (int *) some_instance; + switch (*type) { + case MatrixType: + CMatrix* mat = (CMatrix *) some_instance; + ... + ... + } + } + ``` +* 这个结构体中的另一个项目是,Paddle Core中这一类型接口的智能指针(shared_ptr)。 + * 使用智能指针的原因是: 用户可以安全的释放某个C-API的实例,而不必在意Paddle Core是否还在使用这个实例。 + * 例如,用户通过C-API获得了神经网络的参数实例。当用户使用完这个参数后,直接删除这个参数即可。即便Paddle Core中的模型还在使用这个参数,这个参数也不会一并删除。 + +### 具体某种类型的实现文件 + +具体某种类型的实现文件,即`matrix.cpp`, `gradient_machine.cpp`等文件。在这些文件中,使用C++ 11实现了C-API的接口,并且使用`extern "C"`导出这些接口。在实现过程中,对输入参数的安全性进行了必要的判断,并将C-API接口的参数转发给`Paddle Core`。 + +### libpaddle\_capi_shared.{so, dylib} + +`libpaddle_capi_shared`是C-API导出的动态库。这个动态库的连接参数与Paddle的其他二进制(例如`paddle_traienr`)类似。用户可以直接使用这个动态库来引入Paddle C-API。具体使用方法为`-lpaddle_capi_shared`。 + +### libpaddle\_capi_whole.a + +`libpaddle_capi_whole`是C-API导出的静态库。这个静态库包含了Paddle的全部符号。他是将`libpaddle_gserver.a`, `libpaddle_math.a`, `libpaddle_capi.a`等全部静态库中的目标文件全部打包后产生的文件。具体使用方法为`--whole-archive -lpaddle_capi_whole --no-whole-archive`。 + + +### examples + +在样例中,使用`C99`开发了模型预测的样例代码。具体请参考[example/README.md](../../../paddle/capi/examples/README.md)。 From 505d20761f86f75dbe978e607bd9e4f2c37a3b06 Mon Sep 17 00:00:00 2001 From: Yu Yang Date: Sun, 26 Mar 2017 17:07:47 +0800 Subject: [PATCH 36/43] Add toc --- .../01.inference_implementation.md | 25 +++++++++++++++++++ 1 file changed, 25 insertions(+) diff --git a/doc/design/multi_language_interface/01.inference_implementation.md b/doc/design/multi_language_interface/01.inference_implementation.md index 81dcdd437b0c6..1324bfcaa3112 100644 --- a/doc/design/multi_language_interface/01.inference_implementation.md +++ b/doc/design/multi_language_interface/01.inference_implementation.md @@ -2,6 +2,21 @@ 本文档描述Paddle C-API的实现细节。Paddle C-API是多语言API的基础部分。Paddle需要暴露的API很多。先实现模型推断的API,通过模型推断API的实现作为一个样例,来进行讨论。至于为什么需要C-API,请参考[这里](./00.why_plain_c.md)。 +## Table of Contents + * [C-API 模型推断实现文档](#c-api-模型推断实现文档) + * [暴露接口原则](#暴露接口原则) + * [目录结构](#目录结构) + * [实现方式](#实现方式) + * [capi.h](#capih) + * [具体某种类型的头文件](#具体某种类型的头文件) + * [capi_private.h](#capi_privateh) + * [具体某种类型的实现文件](#具体某种类型的实现文件) + * [libpaddle_capi_shared.{so, dylib}](#libpaddle_capi_sharedso-dylib) + * [libpaddle_capi_whole.a](#libpaddle_capi_wholea) + * [examples](#examples) + * [编译选项](#编译选项) + + ## 暴露接口原则 1. 所有的接口均为C接口。即使用`extern "C"` @@ -104,3 +119,13 @@ struct CMatrix { ### examples 在样例中,使用`C99`开发了模型预测的样例代码。具体请参考[example/README.md](../../../paddle/capi/examples/README.md)。 + +## 编译选项 + +C-API的编译选项默认关闭,打开这个编译选项,需要在cmake的时候,设置 + +```bash +cmake ${YOUR_SOURCE_ROOT} -DWITH_C_API=ON -DWITH_PYTHON=OFF -DWITH_SWIG_PY=OFF +``` + +编译C-API的时候推荐Paddle不嵌入Python解释器,也不生成`SWIG`接口,具体原因参考[这里](./00.why_plain_c.md)。 From e7bc8802d99439038079ca5d3835532e3af8f5f9 Mon Sep 17 00:00:00 2001 From: Yu Yang Date: Sun, 26 Mar 2017 17:13:06 +0800 Subject: [PATCH 37/43] Revert unchanged file. --- cmake/FindGflags.cmake | 582 +++++++++++++++++++++++++++++++++++++++++ 1 file changed, 582 insertions(+) create mode 100644 cmake/FindGflags.cmake diff --git a/cmake/FindGflags.cmake b/cmake/FindGflags.cmake new file mode 100644 index 0000000000000..6587089ba382d --- /dev/null +++ b/cmake/FindGflags.cmake @@ -0,0 +1,582 @@ +# Ceres Solver - A fast non-linear least squares minimizer +# Copyright 2015 Google Inc. All rights reserved. +# http://ceres-solver.org/ +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are met: +# +# * Redistributions of source code must retain the above copyright notice, +# this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above copyright notice, +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. +# * Neither the name of Google Inc. nor the names of its contributors may be +# used to endorse or promote products derived from this software without +# specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE +# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +# POSSIBILITY OF SUCH DAMAGE. +# +# Author: alexs.mac@gmail.com (Alex Stewart) +# + +# FindGflags.cmake - Find Google gflags logging library. +# +# This module will attempt to find gflags, either via an exported CMake +# configuration (generated by gflags >= 2.1 which are built with CMake), or +# by performing a standard search for all gflags components. The order of +# precedence for these two methods of finding gflags is controlled by: +# GFLAGS_PREFER_EXPORTED_GFLAGS_CMAKE_CONFIGURATION. +# +# This module defines the following variables: +# +# GFLAGS_FOUND: TRUE iff gflags is found. +# GFLAGS_INCLUDE_DIRS: Include directories for gflags. +# GFLAGS_LIBRARIES: Libraries required to link gflags. +# GFLAGS_NAMESPACE: The namespace in which gflags is defined. In versions of +# gflags < 2.1, this was google, for versions >= 2.1 it is +# by default gflags, although can be configured when building +# gflags to be something else (i.e. google for legacy +# compatibility). +# +# The following variables control the behaviour of this module when an exported +# gflags CMake configuration is not found. +# +# GFLAGS_PREFER_EXPORTED_GFLAGS_CMAKE_CONFIGURATION: TRUE/FALSE, iff TRUE then +# then prefer using an exported CMake configuration +# generated by gflags >= 2.1 over searching for the +# gflags components manually. Otherwise (FALSE) +# ignore any exported gflags CMake configurations and +# always perform a manual search for the components. +# Default: TRUE iff user does not define this variable +# before we are called, and does NOT specify either +# GFLAGS_INCLUDE_DIR_HINTS or GFLAGS_LIBRARY_DIR_HINTS +# otherwise FALSE. +# GFLAGS_INCLUDE_DIR_HINTS: List of additional directories in which to +# search for gflags includes, e.g: /timbuktu/include. +# GFLAGS_LIBRARY_DIR_HINTS: List of additional directories in which to +# search for gflags libraries, e.g: /timbuktu/lib. +# +# The following variables are also defined by this module, but in line with +# CMake recommended FindPackage() module style should NOT be referenced directly +# by callers (use the plural variables detailed above instead). These variables +# do however affect the behaviour of the module via FIND_[PATH/LIBRARY]() which +# are NOT re-called (i.e. search for library is not repeated) if these variables +# are set with valid values _in the CMake cache_. This means that if these +# variables are set directly in the cache, either by the user in the CMake GUI, +# or by the user passing -DVAR=VALUE directives to CMake when called (which +# explicitly defines a cache variable), then they will be used verbatim, +# bypassing the HINTS variables and other hard-coded search locations. +# +# GFLAGS_INCLUDE_DIR: Include directory for gflags, not including the +# include directory of any dependencies. +# GFLAGS_LIBRARY: gflags library, not including the libraries of any +# dependencies. + +# Reset CALLERS_CMAKE_FIND_LIBRARY_PREFIXES to its value when FindGflags was +# invoked, necessary for MSVC. +macro(GFLAGS_RESET_FIND_LIBRARY_PREFIX) + if (MSVC) + set(CMAKE_FIND_LIBRARY_PREFIXES "${CALLERS_CMAKE_FIND_LIBRARY_PREFIXES}") + endif (MSVC) +endmacro(GFLAGS_RESET_FIND_LIBRARY_PREFIX) + +# Called if we failed to find gflags or any of it's required dependencies, +# unsets all public (designed to be used externally) variables and reports +# error message at priority depending upon [REQUIRED/QUIET/] argument. +macro(GFLAGS_REPORT_NOT_FOUND REASON_MSG) + unset(GFLAGS_FOUND) + unset(GFLAGS_INCLUDE_DIRS) + unset(GFLAGS_LIBRARIES) + # Do not use unset, as we want to keep GFLAGS_NAMESPACE in the cache, + # but simply clear its value. + set(GFLAGS_NAMESPACE "" CACHE STRING + "gflags namespace (google or gflags)" FORCE) + + # Make results of search visible in the CMake GUI if gflags has not + # been found so that user does not have to toggle to advanced view. + mark_as_advanced(CLEAR GFLAGS_INCLUDE_DIR + GFLAGS_LIBRARY + GFLAGS_NAMESPACE) + + gflags_reset_find_library_prefix() + + # Note _FIND_[REQUIRED/QUIETLY] variables defined by FindPackage() + # use the camelcase library name, not uppercase. + if (Gflags_FIND_QUIETLY) + message(STATUS "Failed to find gflags - " ${REASON_MSG} ${ARGN}) + elseif (Gflags_FIND_REQUIRED) + message(FATAL_ERROR "Failed to find gflags - " ${REASON_MSG} ${ARGN}) + else() + # Neither QUIETLY nor REQUIRED, use no priority which emits a message + # but continues configuration and allows generation. + message("-- Failed to find gflags - " ${REASON_MSG} ${ARGN}) + endif () + return() +endmacro(GFLAGS_REPORT_NOT_FOUND) + +# Verify that all variable names passed as arguments are defined (can be empty +# but must be defined) or raise a fatal error. +macro(GFLAGS_CHECK_VARS_DEFINED) + foreach(CHECK_VAR ${ARGN}) + if (NOT DEFINED ${CHECK_VAR}) + message(FATAL_ERROR "Ceres Bug: ${CHECK_VAR} is not defined.") + endif() + endforeach() +endmacro(GFLAGS_CHECK_VARS_DEFINED) + +# Use check_cxx_source_compiles() to compile trivial test programs to determine +# the gflags namespace. This works on all OSs except Windows. If using Visual +# Studio, it fails because msbuild forces check_cxx_source_compiles() to use +# CMAKE_BUILD_TYPE=Debug for the test project, which usually breaks detection +# because MSVC requires that the test project use the same build type as gflags, +# which would normally be built in Release. +# +# Defines: GFLAGS_NAMESPACE in the caller's scope with the detected namespace, +# which is blank (empty string, will test FALSE is CMake conditionals) +# if detection failed. +function(GFLAGS_CHECK_GFLAGS_NAMESPACE_USING_TRY_COMPILE) + # Verify that all required variables are defined. + gflags_check_vars_defined( + GFLAGS_INCLUDE_DIR GFLAGS_LIBRARY) + # Ensure that GFLAGS_NAMESPACE is always unset on completion unless + # we explicitly set if after having the correct namespace. + set(GFLAGS_NAMESPACE "" PARENT_SCOPE) + + include(CheckCXXSourceCompiles) + # Setup include path & link library for gflags for CHECK_CXX_SOURCE_COMPILES. + set(CMAKE_REQUIRED_INCLUDES ${GFLAGS_INCLUDE_DIR}) + set(CMAKE_REQUIRED_LIBRARIES ${GFLAGS_LIBRARY} ${GFLAGS_LINK_LIBRARIES}) + # First try the (older) google namespace. Note that the output variable + # MUST be unique to the build type as otherwise the test is not repeated as + # it is assumed to have already been performed. + check_cxx_source_compiles( + "#include + int main(int argc, char * argv[]) { + google::ParseCommandLineFlags(&argc, &argv, true); + return 0; + }" + GFLAGS_IN_GOOGLE_NAMESPACE) + if (GFLAGS_IN_GOOGLE_NAMESPACE) + set(GFLAGS_NAMESPACE google PARENT_SCOPE) + return() + endif() + + # Try (newer) gflags namespace instead. Note that the output variable + # MUST be unique to the build type as otherwise the test is not repeated as + # it is assumed to have already been performed. + set(CMAKE_REQUIRED_INCLUDES ${GFLAGS_INCLUDE_DIR}) + set(CMAKE_REQUIRED_LIBRARIES ${GFLAGS_LIBRARY} ${GFLAGS_LINK_LIBRARIES}) + check_cxx_source_compiles( + "#include + int main(int argc, char * argv[]) { + gflags::ParseCommandLineFlags(&argc, &argv, true); + return 0; + }" + GFLAGS_IN_GFLAGS_NAMESPACE) + if (GFLAGS_IN_GFLAGS_NAMESPACE) + set(GFLAGS_NAMESPACE gflags PARENT_SCOPE) + return() + endif (GFLAGS_IN_GFLAGS_NAMESPACE) +endfunction(GFLAGS_CHECK_GFLAGS_NAMESPACE_USING_TRY_COMPILE) + +# Use regex on the gflags headers to attempt to determine the gflags namespace. +# Checks both gflags.h (contained namespace on versions < 2.1.2) and +# gflags_declare.h, which contains the namespace on versions >= 2.1.2. +# In general, this method should only be used when +# GFLAGS_CHECK_GFLAGS_NAMESPACE_USING_TRY_COMPILE() cannot be used, or has +# failed. +# +# Defines: GFLAGS_NAMESPACE in the caller's scope with the detected namespace, +# which is blank (empty string, will test FALSE is CMake conditionals) +# if detection failed. +function(GFLAGS_CHECK_GFLAGS_NAMESPACE_USING_REGEX) + # Verify that all required variables are defined. + gflags_check_vars_defined(GFLAGS_INCLUDE_DIR) + # Ensure that GFLAGS_NAMESPACE is always undefined on completion unless + # we explicitly set if after having the correct namespace. + set(GFLAGS_NAMESPACE "" PARENT_SCOPE) + + # Scan gflags.h to identify what namespace gflags was built with. On + # versions of gflags < 2.1.2, gflags.h was configured with the namespace + # directly, on >= 2.1.2, gflags.h uses the GFLAGS_NAMESPACE #define which + # is defined in gflags_declare.h, we try each location in turn. + set(GFLAGS_HEADER_FILE ${GFLAGS_INCLUDE_DIR}/gflags/gflags.h) + if (NOT EXISTS ${GFLAGS_HEADER_FILE}) + gflags_report_not_found( + "Could not find file: ${GFLAGS_HEADER_FILE} " + "containing namespace information in gflags install located at: " + "${GFLAGS_INCLUDE_DIR}.") + endif() + file(READ ${GFLAGS_HEADER_FILE} GFLAGS_HEADER_FILE_CONTENTS) + + string(REGEX MATCH "namespace [A-Za-z]+" + GFLAGS_NAMESPACE "${GFLAGS_HEADER_FILE_CONTENTS}") + string(REGEX REPLACE "namespace ([A-Za-z]+)" "\\1" + GFLAGS_NAMESPACE "${GFLAGS_NAMESPACE}") + + if (NOT GFLAGS_NAMESPACE) + gflags_report_not_found( + "Failed to extract gflags namespace from header file: " + "${GFLAGS_HEADER_FILE}.") + endif (NOT GFLAGS_NAMESPACE) + + if (GFLAGS_NAMESPACE STREQUAL "google" OR + GFLAGS_NAMESPACE STREQUAL "gflags") + # Found valid gflags namespace from gflags.h. + set(GFLAGS_NAMESPACE "${GFLAGS_NAMESPACE}" PARENT_SCOPE) + return() + endif() + + # Failed to find gflags namespace from gflags.h, gflags is likely a new + # version, check gflags_declare.h, which in newer versions (>= 2.1.2) contains + # the GFLAGS_NAMESPACE #define, which is then referenced in gflags.h. + set(GFLAGS_DECLARE_FILE ${GFLAGS_INCLUDE_DIR}/gflags/gflags_declare.h) + if (NOT EXISTS ${GFLAGS_DECLARE_FILE}) + gflags_report_not_found( + "Could not find file: ${GFLAGS_DECLARE_FILE} " + "containing namespace information in gflags install located at: " + "${GFLAGS_INCLUDE_DIR}.") + endif() + file(READ ${GFLAGS_DECLARE_FILE} GFLAGS_DECLARE_FILE_CONTENTS) + + string(REGEX MATCH "#define GFLAGS_NAMESPACE [A-Za-z]+" + GFLAGS_NAMESPACE "${GFLAGS_DECLARE_FILE_CONTENTS}") + string(REGEX REPLACE "#define GFLAGS_NAMESPACE ([A-Za-z]+)" "\\1" + GFLAGS_NAMESPACE "${GFLAGS_NAMESPACE}") + + if (NOT GFLAGS_NAMESPACE) + gflags_report_not_found( + "Failed to extract gflags namespace from declare file: " + "${GFLAGS_DECLARE_FILE}.") + endif (NOT GFLAGS_NAMESPACE) + + if (GFLAGS_NAMESPACE STREQUAL "google" OR + GFLAGS_NAMESPACE STREQUAL "gflags") + # Found valid gflags namespace from gflags.h. + set(GFLAGS_NAMESPACE "${GFLAGS_NAMESPACE}" PARENT_SCOPE) + return() + endif() +endfunction(GFLAGS_CHECK_GFLAGS_NAMESPACE_USING_REGEX) + +# ----------------------------------------------------------------- +# By default, if the user has expressed no preference for using an exported +# gflags CMake configuration over performing a search for the installed +# components, and has not specified any hints for the search locations, then +# prefer a gflags exported configuration if available. +if (NOT DEFINED GFLAGS_PREFER_EXPORTED_GFLAGS_CMAKE_CONFIGURATION + AND NOT GFLAGS_INCLUDE_DIR_HINTS + AND NOT GFLAGS_LIBRARY_DIR_HINTS) + message(STATUS "No preference for use of exported gflags CMake configuration " + "set, and no hints for include/library directories provided. " + "Defaulting to preferring an installed/exported gflags CMake configuration " + "if available.") + set(GFLAGS_PREFER_EXPORTED_GFLAGS_CMAKE_CONFIGURATION TRUE) +endif() + +if (GFLAGS_PREFER_EXPORTED_GFLAGS_CMAKE_CONFIGURATION) + # Try to find an exported CMake configuration for gflags, as generated by + # gflags versions >= 2.1. + # + # We search twice, s/t we can invert the ordering of precedence used by + # find_package() for exported package build directories, and installed + # packages (found via CMAKE_SYSTEM_PREFIX_PATH), listed as items 6) and 7) + # respectively in [1]. + # + # By default, exported build directories are (in theory) detected first, and + # this is usually the case on Windows. However, on OS X & Linux, the install + # path (/usr/local) is typically present in the PATH environment variable + # which is checked in item 4) in [1] (i.e. before both of the above, unless + # NO_SYSTEM_ENVIRONMENT_PATH is passed). As such on those OSs installed + # packages are usually detected in preference to exported package build + # directories. + # + # To ensure a more consistent response across all OSs, and as users usually + # want to prefer an installed version of a package over a locally built one + # where both exist (esp. as the exported build directory might be removed + # after installation), we first search with NO_CMAKE_PACKAGE_REGISTRY which + # means any build directories exported by the user are ignored, and thus + # installed directories are preferred. If this fails to find the package + # we then research again, but without NO_CMAKE_PACKAGE_REGISTRY, so any + # exported build directories will now be detected. + # + # To prevent confusion on Windows, we also pass NO_CMAKE_BUILDS_PATH (which + # is item 5) in [1]), to not preferentially use projects that were built + # recently with the CMake GUI to ensure that we always prefer an installed + # version if available. + # + # [1] http://www.cmake.org/cmake/help/v2.8.11/cmake.html#command:find_package + find_package(gflags QUIET + NO_MODULE + NO_CMAKE_PACKAGE_REGISTRY + NO_CMAKE_BUILDS_PATH) + if (gflags_FOUND) + message(STATUS "Found installed version of gflags: ${gflags_DIR}") + else(gflags_FOUND) + # Failed to find an installed version of gflags, repeat search allowing + # exported build directories. + message(STATUS "Failed to find installed gflags CMake configuration, " + "searching for gflags build directories exported with CMake.") + # Again pass NO_CMAKE_BUILDS_PATH, as we know that gflags is exported and + # do not want to treat projects built with the CMake GUI preferentially. + find_package(gflags QUIET + NO_MODULE + NO_CMAKE_BUILDS_PATH) + if (gflags_FOUND) + message(STATUS "Found exported gflags build directory: ${gflags_DIR}") + endif(gflags_FOUND) + endif(gflags_FOUND) + + set(FOUND_INSTALLED_GFLAGS_CMAKE_CONFIGURATION ${gflags_FOUND}) + + # gflags v2.1 - 2.1.2 shipped with a bug in their gflags-config.cmake [1] + # whereby gflags_LIBRARIES = "gflags", but there was no imported target + # called "gflags", they were called: gflags[_nothreads]-[static/shared]. + # As this causes linker errors when gflags is not installed in a location + # on the current library paths, detect if this problem is present and + # fix it. + # + # [1] https://github.com/gflags/gflags/issues/110 + if (gflags_FOUND) + # NOTE: This is not written as additional conditions in the outer + # if (gflags_FOUND) as the NOT TARGET "${gflags_LIBRARIES}" + # condition causes problems if gflags is not found. + if (${gflags_VERSION} VERSION_LESS 2.1.3 AND + NOT TARGET "${gflags_LIBRARIES}") + message(STATUS "Detected broken gflags install in: ${gflags_DIR}, " + "version: ${gflags_VERSION} <= 2.1.2 which defines gflags_LIBRARIES = " + "${gflags_LIBRARIES} which is not an imported CMake target, see: " + "https://github.com/gflags/gflags/issues/110. Attempting to fix by " + "detecting correct gflags target.") + # Ordering here expresses preference for detection, specifically we do not + # want to use the _nothreads variants if the full library is available. + list(APPEND CHECK_GFLAGS_IMPORTED_TARGET_NAMES + gflags-shared gflags-static + gflags_nothreads-shared gflags_nothreads-static) + foreach(CHECK_GFLAGS_TARGET ${CHECK_GFLAGS_IMPORTED_TARGET_NAMES}) + if (TARGET ${CHECK_GFLAGS_TARGET}) + message(STATUS "Found valid gflags target: ${CHECK_GFLAGS_TARGET}, " + "updating gflags_LIBRARIES.") + set(gflags_LIBRARIES ${CHECK_GFLAGS_TARGET}) + break() + endif() + endforeach() + if (NOT TARGET ${gflags_LIBRARIES}) + message(STATUS "Failed to fix detected broken gflags install in: " + "${gflags_DIR}, version: ${gflags_VERSION} <= 2.1.2, none of the " + "imported targets for gflags: ${CHECK_GFLAGS_IMPORTED_TARGET_NAMES} " + "are defined. Will continue with a manual search for gflags " + "components. We recommend you build/install a version of gflags > " + "2.1.2 (or master).") + set(FOUND_INSTALLED_GFLAGS_CMAKE_CONFIGURATION FALSE) + endif() + endif() + endif() + + if (FOUND_INSTALLED_GFLAGS_CMAKE_CONFIGURATION) + message(STATUS "Detected gflags version: ${gflags_VERSION}") + set(GFLAGS_FOUND ${gflags_FOUND}) + set(GFLAGS_INCLUDE_DIR ${gflags_INCLUDE_DIR}) + set(GFLAGS_LIBRARY ${gflags_LIBRARIES}) + + # gflags does not export the namespace in their CMake configuration, so + # use our function to determine what it should be, as it can be either + # gflags or google dependent upon version & configuration. + # + # NOTE: We use the regex method to determine the namespace here, as + # check_cxx_source_compiles() will not use imported targets, which + # is what gflags will be in this case. + gflags_check_gflags_namespace_using_regex() + + if (NOT GFLAGS_NAMESPACE) + gflags_report_not_found( + "Failed to determine gflags namespace using regex for gflags " + "version: ${gflags_VERSION} exported here: ${gflags_DIR} using CMake.") + endif (NOT GFLAGS_NAMESPACE) + else (FOUND_INSTALLED_GFLAGS_CMAKE_CONFIGURATION) + message(STATUS "Failed to find an installed/exported CMake configuration " + "for gflags, will perform search for installed gflags components.") + endif (FOUND_INSTALLED_GFLAGS_CMAKE_CONFIGURATION) +endif(GFLAGS_PREFER_EXPORTED_GFLAGS_CMAKE_CONFIGURATION) + +if (NOT GFLAGS_FOUND) + # Either failed to find an exported gflags CMake configuration, or user + # told us not to use one. Perform a manual search for all gflags components. + + # Handle possible presence of lib prefix for libraries on MSVC, see + # also GFLAGS_RESET_FIND_LIBRARY_PREFIX(). + if (MSVC) + # Preserve the caller's original values for CMAKE_FIND_LIBRARY_PREFIXES + # s/t we can set it back before returning. + set(CALLERS_CMAKE_FIND_LIBRARY_PREFIXES "${CMAKE_FIND_LIBRARY_PREFIXES}") + # The empty string in this list is important, it represents the case when + # the libraries have no prefix (shared libraries / DLLs). + set(CMAKE_FIND_LIBRARY_PREFIXES "lib" "" "${CMAKE_FIND_LIBRARY_PREFIXES}") + endif (MSVC) + + # Search user-installed locations first, so that we prefer user installs + # to system installs where both exist. + list(APPEND GFLAGS_CHECK_INCLUDE_DIRS + /usr/local/include + /usr/local/homebrew/include # Mac OS X + /opt/local/var/macports/software # Mac OS X. + /opt/local/include + /usr/include) + list(APPEND GFLAGS_CHECK_PATH_SUFFIXES + gflags/include # Windows (for C:/Program Files prefix). + gflags/Include ) # Windows (for C:/Program Files prefix). + + list(APPEND GFLAGS_CHECK_LIBRARY_DIRS + /usr/local/lib + /usr/local/homebrew/lib # Mac OS X. + /opt/local/lib + /usr/lib) + list(APPEND GFLAGS_CHECK_LIBRARY_SUFFIXES + gflags/lib # Windows (for C:/Program Files prefix). + gflags/Lib ) # Windows (for C:/Program Files prefix). + + # Search supplied hint directories first if supplied. + find_path(GFLAGS_INCLUDE_DIR + NAMES gflags/gflags.h + PATHS ${GFLAGS_INCLUDE_DIR_HINTS} + ${GFLAGS_CHECK_INCLUDE_DIRS} + PATH_SUFFIXES ${GFLAGS_CHECK_PATH_SUFFIXES}) + if (NOT GFLAGS_INCLUDE_DIR OR + NOT EXISTS ${GFLAGS_INCLUDE_DIR}) + gflags_report_not_found( + "Could not find gflags include directory, set GFLAGS_INCLUDE_DIR " + "to directory containing gflags/gflags.h") + endif (NOT GFLAGS_INCLUDE_DIR OR + NOT EXISTS ${GFLAGS_INCLUDE_DIR}) + + find_library(GFLAGS_LIBRARY NAMES gflags + PATHS ${GFLAGS_LIBRARY_DIR_HINTS} + ${GFLAGS_CHECK_LIBRARY_DIRS} + PATH_SUFFIXES ${GFLAGS_CHECK_LIBRARY_SUFFIXES}) + if (NOT GFLAGS_LIBRARY OR + NOT EXISTS ${GFLAGS_LIBRARY}) + gflags_report_not_found( + "Could not find gflags library, set GFLAGS_LIBRARY " + "to full path to libgflags.") + endif (NOT GFLAGS_LIBRARY OR + NOT EXISTS ${GFLAGS_LIBRARY}) + + # gflags typically requires a threading library (which is OS dependent), note + # that this defines the CMAKE_THREAD_LIBS_INIT variable. If we are able to + # detect threads, we assume that gflags requires it. + find_package(Threads QUIET) + set(GFLAGS_LINK_LIBRARIES ${CMAKE_THREAD_LIBS_INIT}) + # On Windows (including MinGW), the Shlwapi library is used by gflags if + # available. + if (WIN32) + include(CheckIncludeFileCXX) + check_include_file_cxx("shlwapi.h" HAVE_SHLWAPI) + if (HAVE_SHLWAPI) + list(APPEND GFLAGS_LINK_LIBRARIES shlwapi.lib) + endif(HAVE_SHLWAPI) + endif (WIN32) + + # Mark internally as found, then verify. GFLAGS_REPORT_NOT_FOUND() unsets + # if called. + set(GFLAGS_FOUND TRUE) + + # Identify what namespace gflags was built with. + if (GFLAGS_INCLUDE_DIR AND NOT GFLAGS_NAMESPACE) + # To handle Windows peculiarities / CMake bugs on MSVC we try two approaches + # to detect the gflags namespace: + # + # 1) Try to use check_cxx_source_compiles() to compile a trivial program + # with the two choices for the gflags namespace. + # + # 2) [In the event 1) fails] Use regex on the gflags headers to try to + # determine the gflags namespace. Whilst this is less robust than 1), + # it does avoid any interaction with msbuild. + gflags_check_gflags_namespace_using_try_compile() + + if (NOT GFLAGS_NAMESPACE) + # Failed to determine gflags namespace using check_cxx_source_compiles() + # method, try and obtain it using regex on the gflags headers instead. + message(STATUS "Failed to find gflags namespace using using " + "check_cxx_source_compiles(), trying namespace regex instead, " + "this is expected on Windows.") + gflags_check_gflags_namespace_using_regex() + + if (NOT GFLAGS_NAMESPACE) + gflags_report_not_found( + "Failed to determine gflags namespace either by " + "check_cxx_source_compiles(), or namespace regex.") + endif (NOT GFLAGS_NAMESPACE) + endif (NOT GFLAGS_NAMESPACE) + endif (GFLAGS_INCLUDE_DIR AND NOT GFLAGS_NAMESPACE) + + # Make the GFLAGS_NAMESPACE a cache variable s/t the user can view it, and could + # overwrite it in the CMake GUI. + set(GFLAGS_NAMESPACE "${GFLAGS_NAMESPACE}" CACHE STRING + "gflags namespace (google or gflags)" FORCE) + + # gflags does not seem to provide any record of the version in its + # source tree, thus cannot extract version. + + # Catch case when caller has set GFLAGS_NAMESPACE in the cache / GUI + # with an invalid value. + if (GFLAGS_NAMESPACE AND + NOT GFLAGS_NAMESPACE STREQUAL "google" AND + NOT GFLAGS_NAMESPACE STREQUAL "gflags") + gflags_report_not_found( + "Caller defined GFLAGS_NAMESPACE:" + " ${GFLAGS_NAMESPACE} is not valid, not google or gflags.") + endif () + # Catch case when caller has set GFLAGS_INCLUDE_DIR in the cache / GUI and + # thus FIND_[PATH/LIBRARY] are not called, but specified locations are + # invalid, otherwise we would report the library as found. + if (GFLAGS_INCLUDE_DIR AND + NOT EXISTS ${GFLAGS_INCLUDE_DIR}/gflags/gflags.h) + gflags_report_not_found( + "Caller defined GFLAGS_INCLUDE_DIR:" + " ${GFLAGS_INCLUDE_DIR} does not contain gflags/gflags.h header.") + endif (GFLAGS_INCLUDE_DIR AND + NOT EXISTS ${GFLAGS_INCLUDE_DIR}/gflags/gflags.h) + # TODO: This regex for gflags library is pretty primitive, we use lowercase + # for comparison to handle Windows using CamelCase library names, could + # this check be better? + string(TOLOWER "${GFLAGS_LIBRARY}" LOWERCASE_GFLAGS_LIBRARY) + if (GFLAGS_LIBRARY AND + NOT "${LOWERCASE_GFLAGS_LIBRARY}" MATCHES ".*gflags[^/]*") + gflags_report_not_found( + "Caller defined GFLAGS_LIBRARY: " + "${GFLAGS_LIBRARY} does not match gflags.") + endif (GFLAGS_LIBRARY AND + NOT "${LOWERCASE_GFLAGS_LIBRARY}" MATCHES ".*gflags[^/]*") + + gflags_reset_find_library_prefix() + +endif(NOT GFLAGS_FOUND) + +# Set standard CMake FindPackage variables if found. +if (GFLAGS_FOUND) + set(GFLAGS_INCLUDE_DIRS ${GFLAGS_INCLUDE_DIR}) + set(GFLAGS_LIBRARIES ${GFLAGS_LIBRARY} ${GFLAGS_LINK_LIBRARIES}) +endif (GFLAGS_FOUND) + +# Handle REQUIRED / QUIET optional arguments. +include(FindPackageHandleStandardArgs) +find_package_handle_standard_args(Gflags DEFAULT_MSG + GFLAGS_INCLUDE_DIRS GFLAGS_LIBRARIES GFLAGS_NAMESPACE) + +# Only mark internal variables as advanced if we found gflags, otherwise +# leave them visible in the standard GUI for the user to set manually. +if (GFLAGS_FOUND) + mark_as_advanced(FORCE GFLAGS_INCLUDE_DIR + GFLAGS_LIBRARY + GFLAGS_NAMESPACE + gflags_DIR) # Autogenerated by find_package(gflags) +endif (GFLAGS_FOUND) From ddbb610fd328046ab27e906fe661391485597593 Mon Sep 17 00:00:00 2001 From: Yu Yang Date: Mon, 27 Mar 2017 11:02:41 +0800 Subject: [PATCH 38/43] Find a bug about recommark. --- doc/design/multi_language_interface/00.why_plain_c.md | 2 +- .../multi_language_interface/01.inference_implementation.md | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/doc/design/multi_language_interface/00.why_plain_c.md b/doc/design/multi_language_interface/00.why_plain_c.md index 7cb05f3ec0134..4004f16daf218 100644 --- a/doc/design/multi_language_interface/00.why_plain_c.md +++ b/doc/design/multi_language_interface/00.why_plain_c.md @@ -115,4 +115,4 @@ class CMatrix { ## 实现 -参考[预测接口实现](./01.inference_implementation.md) +参考[Inference implementation](01.inference_implementation.md) diff --git a/doc/design/multi_language_interface/01.inference_implementation.md b/doc/design/multi_language_interface/01.inference_implementation.md index 1324bfcaa3112..9db9ce1834d16 100644 --- a/doc/design/multi_language_interface/01.inference_implementation.md +++ b/doc/design/multi_language_interface/01.inference_implementation.md @@ -1,6 +1,6 @@ # C-API 模型推断实现文档 -本文档描述Paddle C-API的实现细节。Paddle C-API是多语言API的基础部分。Paddle需要暴露的API很多。先实现模型推断的API,通过模型推断API的实现作为一个样例,来进行讨论。至于为什么需要C-API,请参考[这里](./00.why_plain_c.md)。 +本文档描述Paddle C-API的实现细节。Paddle C-API是多语言API的基础部分。Paddle需要暴露的API很多。先实现模型推断的API,通过模型推断API的实现作为一个样例,来进行讨论。至于为什么需要C-API,请参考[Why Plain C](./00.why_plain_c.md)。 ## Table of Contents * [C-API 模型推断实现文档](#c-api-模型推断实现文档) @@ -128,4 +128,4 @@ C-API的编译选项默认关闭,打开这个编译选项,需要在cmake的 cmake ${YOUR_SOURCE_ROOT} -DWITH_C_API=ON -DWITH_PYTHON=OFF -DWITH_SWIG_PY=OFF ``` -编译C-API的时候推荐Paddle不嵌入Python解释器,也不生成`SWIG`接口,具体原因参考[这里](./00.why_plain_c.md)。 +编译C-API的时候推荐Paddle不嵌入Python解释器,也不生成`SWIG`接口,具体原因参考[Why Plain C](./00.why_plain_c.md)。 From 87dfc12a295c17ea02188b0fe0e6b55943191582 Mon Sep 17 00:00:00 2001 From: Yu Yang Date: Fri, 14 Apr 2017 14:40:01 +0800 Subject: [PATCH 39/43] Follow comments --- .../examples/model_inference/dense/main.c | 6 +++++ .../model_inference/multi_thread/main.c | 22 ++++++++++--------- .../examples/model_inference/sequence/main.c | 7 ++++++ .../model_inference/sparse_binary/main.c | 6 +++++ paddle/capi/tests/test_Init.cpp | 0 5 files changed, 31 insertions(+), 10 deletions(-) delete mode 100644 paddle/capi/tests/test_Init.cpp diff --git a/paddle/capi/examples/model_inference/dense/main.c b/paddle/capi/examples/model_inference/dense/main.c index 4dddd65bbfb3c..e03fe748f615f 100644 --- a/paddle/capi/examples/model_inference/dense/main.c +++ b/paddle/capi/examples/model_inference/dense/main.c @@ -59,5 +59,11 @@ int main() { } printf("\n"); + CHECK(paddle_matrix_destroy(prob)); + CHECK(paddle_arguments_destroy(out_args)); + CHECK(paddle_matrix_destroy(mat)); + CHECK(paddle_arguments_destroy(in_args)); + CHECK(paddle_gradient_machine_destroy(machine)); + return 0; } diff --git a/paddle/capi/examples/model_inference/multi_thread/main.c b/paddle/capi/examples/model_inference/multi_thread/main.c index 23f8629765d8a..ab0eb32c5821a 100644 --- a/paddle/capi/examples/model_inference/multi_thread/main.c +++ b/paddle/capi/examples/model_inference/multi_thread/main.c @@ -4,24 +4,24 @@ #include "../common/common.h" #define CONFIG_BIN "./trainer_config.bin" -#define NUM_THREAD 1000 +#define NUM_THREAD 4 #define NUM_ITER 1000 pthread_mutex_t mutex; void* thread_main(void* gm_ptr) { paddle_gradient_machine machine = (paddle_gradient_machine)(gm_ptr); - + paddle_arguments in_args = paddle_arguments_create_none(); + // Create input matrix. + paddle_matrix mat = paddle_matrix_create(/* sample_num */ 1, + /* size */ 784, + /* useGPU */ false); + paddle_arguments out_args = paddle_arguments_create_none(); + paddle_matrix prob = paddle_matrix_create_none(); for (int iter = 0; iter < NUM_ITER; ++iter) { - paddle_arguments in_args = paddle_arguments_create_none(); // There is only one input of this network. CHECK(paddle_arguments_resize(in_args, 1)); - // Create input matrix. - paddle_matrix mat = paddle_matrix_create(/* sample_num */ 1, - /* size */ 784, - /* useGPU */ false); - paddle_real* array; // Get First row. @@ -33,12 +33,10 @@ void* thread_main(void* gm_ptr) { CHECK(paddle_arguments_set_value(in_args, 0, mat)); - paddle_arguments out_args = paddle_arguments_create_none(); CHECK(paddle_gradient_machine_forward(machine, in_args, out_args, /* isTrain */ false)); - paddle_matrix prob = paddle_matrix_create_none(); CHECK(paddle_arguments_value(out_args, 0, prob)); @@ -53,6 +51,10 @@ void* thread_main(void* gm_ptr) { pthread_mutex_unlock(&mutex); } + CHECK(paddle_matrix_destroy(prob)); + CHECK(paddle_arguments_destroy(out_args)); + CHECK(paddle_matrix_destroy(mat)); + CHECK(paddle_arguments_destroy(in_args)); CHECK(paddle_gradient_machine_destroy(machine)); return NULL; } diff --git a/paddle/capi/examples/model_inference/sequence/main.c b/paddle/capi/examples/model_inference/sequence/main.c index 7e71bb8b8aff4..142793cdb3e75 100644 --- a/paddle/capi/examples/model_inference/sequence/main.c +++ b/paddle/capi/examples/model_inference/sequence/main.c @@ -59,5 +59,12 @@ int main() { } printf("\n"); + CHECK(paddle_matrix_destroy(prob)); + CHECK(paddle_arguments_destroy(out_args)); + CHECK(paddle_ivector_destroy(seq_pos)); + CHECK(paddle_ivector_destroy(sentence)); + CHECK(paddle_arguments_destroy(in_args)); + CHECK(paddle_gradient_machine_destroy(machine)); + return 0; } diff --git a/paddle/capi/examples/model_inference/sparse_binary/main.c b/paddle/capi/examples/model_inference/sparse_binary/main.c index c5e653dbc2876..776ad878911b6 100644 --- a/paddle/capi/examples/model_inference/sparse_binary/main.c +++ b/paddle/capi/examples/model_inference/sparse_binary/main.c @@ -60,5 +60,11 @@ int main() { } printf("\n"); + CHECK(paddle_matrix_destroy(prob)); + CHECK(paddle_arguments_destroy(out_args)); + CHECK(paddle_matrix_destroy(mat)); + CHECK(paddle_arguments_destroy(in_args)); + CHECK(paddle_gradient_machine_destroy(machine)); + return 0; } diff --git a/paddle/capi/tests/test_Init.cpp b/paddle/capi/tests/test_Init.cpp deleted file mode 100644 index e69de29bb2d1d..0000000000000 From bda2008630ad22b0be6da96c9fcb53e520c0e70e Mon Sep 17 00:00:00 2001 From: Yu Yang Date: Fri, 14 Apr 2017 14:41:46 +0800 Subject: [PATCH 40/43] Add TODO for GPU unittest --- paddle/capi/tests/test_Arguments.cpp | 1 + paddle/capi/tests/test_GradientMachine.cpp | 1 + paddle/capi/tests/test_Matrix.cpp | 1 + paddle/capi/tests/test_Vector.cpp | 1 + 4 files changed, 4 insertions(+) diff --git a/paddle/capi/tests/test_Arguments.cpp b/paddle/capi/tests/test_Arguments.cpp index f56391d51e32a..012901a49168e 100644 --- a/paddle/capi/tests/test_Arguments.cpp +++ b/paddle/capi/tests/test_Arguments.cpp @@ -29,6 +29,7 @@ static std::vector randomBuffer(size_t bufSize) { } TEST(CAPIArguments, create) { + //! TODO(yuyang18): Test GPU Code. paddle_arguments args = paddle_arguments_create_none(); uint64_t size; ASSERT_EQ(kPD_NO_ERROR, paddle_arguments_size(args, &size)); diff --git a/paddle/capi/tests/test_GradientMachine.cpp b/paddle/capi/tests/test_GradientMachine.cpp index be7dfadddc35e..6c8d74c90b25d 100644 --- a/paddle/capi/tests/test_GradientMachine.cpp +++ b/paddle/capi/tests/test_GradientMachine.cpp @@ -33,6 +33,7 @@ static std::vector randomBuffer(size_t bufSize) { } TEST(GradientMachine, testPredict) { + //! TODO(yuyang18): Test GPU Code. paddle::TrainerConfigHelper config("./test_predict_network.py"); std::string buffer; ASSERT_TRUE(config.getModelConfig().SerializeToString(&buffer)); diff --git a/paddle/capi/tests/test_Matrix.cpp b/paddle/capi/tests/test_Matrix.cpp index 71dc2064dd02d..4bf9a9d6a9f91 100644 --- a/paddle/capi/tests/test_Matrix.cpp +++ b/paddle/capi/tests/test_Matrix.cpp @@ -16,6 +16,7 @@ limitations under the License. */ #include "gtest/gtest.h" TEST(CAPIMatrix, create) { + //! TODO(yuyang18): Test GPU Code. paddle_matrix mat = paddle_matrix_create(128, 32, false); std::vector sampleRow; sampleRow.resize(32); diff --git a/paddle/capi/tests/test_Vector.cpp b/paddle/capi/tests/test_Vector.cpp index c5c57b7288d24..365160dc9a08e 100644 --- a/paddle/capi/tests/test_Vector.cpp +++ b/paddle/capi/tests/test_Vector.cpp @@ -16,6 +16,7 @@ limitations under the License. */ #include "gtest/gtest.h" TEST(CAPIVector, create) { + //! TODO(yuyang18): Test GPU Code. paddle_ivector vec; int array[3] = {1, 2, 3}; vec = paddle_ivector_create(array, 3, true, false); From 91927cc3a20ef805a1fb7dafc7d71a014daade27 Mon Sep 17 00:00:00 2001 From: Yu Yang Date: Thu, 20 Apr 2017 21:28:05 +0800 Subject: [PATCH 41/43] Change name conventions. --- paddle/capi/Arguments.cpp | 22 +++++++-------- paddle/capi/arguments.h | 27 ++++++++++--------- .../examples/model_inference/dense/main.c | 2 +- .../model_inference/multi_thread/main.c | 2 +- .../examples/model_inference/sequence/main.c | 2 +- .../model_inference/sparse_binary/main.c | 2 +- paddle/capi/tests/test_Arguments.cpp | 8 +++--- paddle/capi/tests/test_GradientMachine.cpp | 4 +-- 8 files changed, 35 insertions(+), 34 deletions(-) diff --git a/paddle/capi/Arguments.cpp b/paddle/capi/Arguments.cpp index 2954f522c95fa..8b81ec69e6039 100644 --- a/paddle/capi/Arguments.cpp +++ b/paddle/capi/Arguments.cpp @@ -31,7 +31,7 @@ paddle_error paddle_arguments_destroy(paddle_arguments args) { return kPD_NO_ERROR; } -paddle_error paddle_arguments_size(paddle_arguments args, uint64_t* size) { +paddle_error paddle_arguments_get_size(paddle_arguments args, uint64_t* size) { if (args == nullptr || size == nullptr) return kPD_NULLPTR; *size = castArg(args)->args.size(); return kPD_NO_ERROR; @@ -55,9 +55,9 @@ paddle_error paddle_arguments_set_value(paddle_arguments args, return kPD_NO_ERROR; } -paddle_error paddle_arguments_value(paddle_arguments args, - uint64_t ID, - paddle_matrix mat) { +paddle_error paddle_arguments_get_value(paddle_arguments args, + uint64_t ID, + paddle_matrix mat) { if (args == nullptr || mat == nullptr) return kPD_NULLPTR; auto m = paddle::capi::cast(mat); auto a = castArg(args); @@ -66,9 +66,9 @@ paddle_error paddle_arguments_value(paddle_arguments args, return kPD_NO_ERROR; } -paddle_error paddle_arguments_ids(paddle_arguments args, - uint64_t ID, - paddle_ivector ids) { +paddle_error paddle_arguments_get_ids(paddle_arguments args, + uint64_t ID, + paddle_ivector ids) { if (args == nullptr || ids == nullptr) return kPD_NULLPTR; auto iv = castIVec(ids); auto a = castArg(args); @@ -103,10 +103,10 @@ paddle_error paddle_arguments_set_sequence_start_pos(paddle_arguments args, }); } -paddle_error paddle_arguments_sequence_start_pos(paddle_arguments args, - uint64_t ID, - uint32_t nestedLevel, - paddle_ivector seqPos) { +paddle_error paddle_arguments_get_sequence_start_pos(paddle_arguments args, + uint64_t ID, + uint32_t nestedLevel, + paddle_ivector seqPos) { if (args == nullptr || seqPos == nullptr) return kPD_NULLPTR; auto iv = paddle::capi::cast(seqPos); auto a = castArg(args); diff --git a/paddle/capi/arguments.h b/paddle/capi/arguments.h index 1bb6516ea0235..d71ea26a5d1af 100644 --- a/paddle/capi/arguments.h +++ b/paddle/capi/arguments.h @@ -46,13 +46,13 @@ PD_API paddle_arguments paddle_arguments_create_none(); PD_API paddle_error paddle_arguments_destroy(paddle_arguments args); /** - * @brief PDArgsGetSize Get size of arguments array + * @brief paddle_arguments_get_size Get size of arguments array * @param [in] args arguments array * @param [out] size array size * @return paddle_error */ -PD_API paddle_error paddle_arguments_size(paddle_arguments args, - uint64_t* size); +PD_API paddle_error paddle_arguments_get_size(paddle_arguments args, + uint64_t* size); /** * @brief PDArgsResize Resize a arguments array. @@ -83,9 +83,9 @@ PD_API paddle_error paddle_arguments_set_value(paddle_arguments args, * @param [out] mat matrix pointer * @return paddle_error */ -PD_API paddle_error paddle_arguments_value(paddle_arguments args, - uint64_t ID, - paddle_matrix mat); +PD_API paddle_error paddle_arguments_get_value(paddle_arguments args, + uint64_t ID, + paddle_matrix mat); /** * @brief PDArgsGetIds Get the integer vector of one argument in array, which @@ -95,9 +95,9 @@ PD_API paddle_error paddle_arguments_value(paddle_arguments args, * @param ids integer vector pointer * @return paddle_error */ -PD_API paddle_error paddle_arguments_ids(paddle_arguments args, - uint64_t ID, - paddle_ivector ids); +PD_API paddle_error paddle_arguments_get_ids(paddle_arguments args, + uint64_t ID, + paddle_ivector ids); /** * @brief PDArgsSetIds Set the integer vector of one argument in array, which @@ -132,10 +132,11 @@ paddle_arguments_set_sequence_start_pos(paddle_arguments args, * @param [out] seqPos sequence position array * @return paddle_error */ -PD_API paddle_error paddle_arguments_sequence_start_pos(paddle_arguments args, - uint64_t ID, - uint32_t nestedLevel, - paddle_ivector seqPos); +PD_API paddle_error +paddle_arguments_get_sequence_start_pos(paddle_arguments args, + uint64_t ID, + uint32_t nestedLevel, + paddle_ivector seqPos); #ifdef __cplusplus } diff --git a/paddle/capi/examples/model_inference/dense/main.c b/paddle/capi/examples/model_inference/dense/main.c index e03fe748f615f..3e6bd5285058a 100644 --- a/paddle/capi/examples/model_inference/dense/main.c +++ b/paddle/capi/examples/model_inference/dense/main.c @@ -49,7 +49,7 @@ int main() { /* isTrain */ false)); paddle_matrix prob = paddle_matrix_create_none(); - CHECK(paddle_arguments_value(out_args, 0, prob)); + CHECK(paddle_arguments_get_value(out_args, 0, prob)); CHECK(paddle_matrix_get_row(prob, 0, &array)); diff --git a/paddle/capi/examples/model_inference/multi_thread/main.c b/paddle/capi/examples/model_inference/multi_thread/main.c index ab0eb32c5821a..d7675cd80a52f 100644 --- a/paddle/capi/examples/model_inference/multi_thread/main.c +++ b/paddle/capi/examples/model_inference/multi_thread/main.c @@ -38,7 +38,7 @@ void* thread_main(void* gm_ptr) { out_args, /* isTrain */ false)); - CHECK(paddle_arguments_value(out_args, 0, prob)); + CHECK(paddle_arguments_get_value(out_args, 0, prob)); CHECK(paddle_matrix_get_row(prob, 0, &array)); diff --git a/paddle/capi/examples/model_inference/sequence/main.c b/paddle/capi/examples/model_inference/sequence/main.c index 142793cdb3e75..50bc0c9201f20 100644 --- a/paddle/capi/examples/model_inference/sequence/main.c +++ b/paddle/capi/examples/model_inference/sequence/main.c @@ -47,7 +47,7 @@ int main() { /* isTrain */ false)); paddle_matrix prob = paddle_matrix_create_none(); - CHECK(paddle_arguments_value(out_args, 0, prob)); + CHECK(paddle_arguments_get_value(out_args, 0, prob)); paddle_real* array; diff --git a/paddle/capi/examples/model_inference/sparse_binary/main.c b/paddle/capi/examples/model_inference/sparse_binary/main.c index 776ad878911b6..8ba67aee56023 100644 --- a/paddle/capi/examples/model_inference/sparse_binary/main.c +++ b/paddle/capi/examples/model_inference/sparse_binary/main.c @@ -50,7 +50,7 @@ int main() { /* isTrain */ false)); paddle_matrix prob = paddle_matrix_create_none(); - CHECK(paddle_arguments_value(out_args, 0, prob)); + CHECK(paddle_arguments_get_value(out_args, 0, prob)); CHECK(paddle_matrix_get_row(prob, 0, &array)); diff --git a/paddle/capi/tests/test_Arguments.cpp b/paddle/capi/tests/test_Arguments.cpp index 012901a49168e..4792ceb49a781 100644 --- a/paddle/capi/tests/test_Arguments.cpp +++ b/paddle/capi/tests/test_Arguments.cpp @@ -32,7 +32,7 @@ TEST(CAPIArguments, create) { //! TODO(yuyang18): Test GPU Code. paddle_arguments args = paddle_arguments_create_none(); uint64_t size; - ASSERT_EQ(kPD_NO_ERROR, paddle_arguments_size(args, &size)); + ASSERT_EQ(kPD_NO_ERROR, paddle_arguments_get_size(args, &size)); ASSERT_EQ(0UL, size); ASSERT_EQ(kPD_NO_ERROR, paddle_arguments_destroy(args)); } @@ -50,7 +50,7 @@ TEST(CAPIArguments, value) { paddle_matrix val = paddle_matrix_create_none(); - ASSERT_EQ(kPD_NO_ERROR, paddle_arguments_value(args, 0, val)); + ASSERT_EQ(kPD_NO_ERROR, paddle_arguments_get_value(args, 0, val)); for (size_t i = 0; i < 128; ++i) { paddle_real* row1; @@ -78,7 +78,7 @@ TEST(CAPIArguments, ids) { ASSERT_EQ(kPD_NO_ERROR, paddle_arguments_set_ids(args, 0, ivec)); paddle_ivector val = paddle_ivector_create_none(); - ASSERT_EQ(kPD_NO_ERROR, paddle_arguments_ids(args, 0, val)); + ASSERT_EQ(kPD_NO_ERROR, paddle_arguments_get_ids(args, 0, val)); ASSERT_EQ(kPD_NO_ERROR, paddle_ivector_destroy(ivec)); ASSERT_EQ(kPD_NO_ERROR, paddle_ivector_destroy(val)); ASSERT_EQ(kPD_NO_ERROR, paddle_arguments_destroy(args)); @@ -117,7 +117,7 @@ TEST(CAPIArguments, Sequence) { std::placeholders::_2, nestedLevel, std::placeholders::_3), - std::bind(paddle_arguments_sequence_start_pos, + std::bind(paddle_arguments_get_sequence_start_pos, std::placeholders::_1, std::placeholders::_2, nestedLevel, diff --git a/paddle/capi/tests/test_GradientMachine.cpp b/paddle/capi/tests/test_GradientMachine.cpp index 6c8d74c90b25d..89aa64608dd79 100644 --- a/paddle/capi/tests/test_GradientMachine.cpp +++ b/paddle/capi/tests/test_GradientMachine.cpp @@ -73,10 +73,10 @@ TEST(GradientMachine, testPredict) { paddle_gradient_machine_forward(machine, inArgs, outArgs, false)); uint64_t sz; - ASSERT_EQ(kPD_NO_ERROR, paddle_arguments_size(outArgs, &sz)); + ASSERT_EQ(kPD_NO_ERROR, paddle_arguments_get_size(outArgs, &sz)); ASSERT_EQ(1UL, sz); - ASSERT_EQ(kPD_NO_ERROR, paddle_arguments_value(outArgs, 0, mat)); + ASSERT_EQ(kPD_NO_ERROR, paddle_arguments_get_value(outArgs, 0, mat)); std::vector paddleInArgs; std::vector paddleOutArgs; paddleInArgs.resize(1); From dfd79c8817b74a9599417f9bb48574696b5c2b75 Mon Sep 17 00:00:00 2001 From: Yu Yang Date: Thu, 20 Apr 2017 22:34:51 +0800 Subject: [PATCH 42/43] Follow comments. --- doc/design/multi_language_interface/00.why_plain_c.md | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/doc/design/multi_language_interface/00.why_plain_c.md b/doc/design/multi_language_interface/00.why_plain_c.md index 4004f16daf218..a1443093342c5 100644 --- a/doc/design/multi_language_interface/00.why_plain_c.md +++ b/doc/design/multi_language_interface/00.why_plain_c.md @@ -58,14 +58,14 @@ typedef void* paddle_matrix; typedef int paddle_error; extern "C" -paddle_error paddle_matrix_shape(paddle_matrix matrix, - uint64_t* width, - uint64_t* height); +paddle_error paddle_matrix_get_shape(paddle_matrix matrix, + uint64_t* width, + uint64_t* height); ``` 而在CPP里面实现这个C的接口,文件 `paddle_matrix.cpp` ```cpp -#include "paddle/math/matrix.hpp" +#include "paddle/math/matrix.h" extern "C" paddle_error paddle_matrix_shape(paddle_matrix matrix, uint64_t *width, From 4e0f72e69ae33e23f98dc76af2b4477b9e44b036 Mon Sep 17 00:00:00 2001 From: Yu Yang Date: Fri, 21 Apr 2017 14:08:41 +0800 Subject: [PATCH 43/43] Typo --- .../multi_language_interface/01.inference_implementation.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/doc/design/multi_language_interface/01.inference_implementation.md b/doc/design/multi_language_interface/01.inference_implementation.md index 9db9ce1834d16..9820284523246 100644 --- a/doc/design/multi_language_interface/01.inference_implementation.md +++ b/doc/design/multi_language_interface/01.inference_implementation.md @@ -109,7 +109,7 @@ struct CMatrix { ### libpaddle\_capi_shared.{so, dylib} -`libpaddle_capi_shared`是C-API导出的动态库。这个动态库的连接参数与Paddle的其他二进制(例如`paddle_traienr`)类似。用户可以直接使用这个动态库来引入Paddle C-API。具体使用方法为`-lpaddle_capi_shared`。 +`libpaddle_capi_shared`是C-API导出的动态库。这个动态库的连接参数与Paddle的其他二进制(例如`paddle_trainer`)类似。用户可以直接使用这个动态库来引入Paddle C-API。具体使用方法为`-lpaddle_capi_shared`。 ### libpaddle\_capi_whole.a