Skip to content

Commit

Permalink
Add example
Browse files Browse the repository at this point in the history
  • Loading branch information
reyoung committed Mar 26, 2017
1 parent 9c1c19b commit 470bbcf
Show file tree
Hide file tree
Showing 25 changed files with 550 additions and 19 deletions.
47 changes: 45 additions & 2 deletions paddle/capi/Matrix.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -39,7 +39,7 @@ paddle_error paddle_matrix_destroy(paddle_matrix mat) {

paddle_error paddle_matrix_set_row(paddle_matrix mat,
uint64_t rowID,
pd_real* rowArray) {
paddle_real* rowArray) {
if (mat == nullptr) return kPD_NULLPTR;
auto ptr = cast(mat);
if (ptr->mat == nullptr) return kPD_NULLPTR;
Expand All @@ -56,7 +56,7 @@ paddle_error paddle_matrix_set_row(paddle_matrix mat,

paddle_error paddle_matrix_get_row(paddle_matrix mat,
uint64_t rowID,
pd_real** rawRowBuffer) {
paddle_real** rawRowBuffer) {
if (mat == nullptr) return kPD_NULLPTR;
auto ptr = cast(mat);
if (ptr->mat == nullptr) return kPD_NULLPTR;
Expand All @@ -78,3 +78,46 @@ paddle_error paddle_matrix_get_shape(paddle_matrix mat,
return kPD_NO_ERROR;
}
}

paddle_matrix paddle_matrix_create_sparse(
uint64_t height, uint64_t width, uint64_t nnz, bool isBinary, bool useGpu) {
auto ptr = new paddle::capi::CMatrix();
ptr->mat = paddle::Matrix::createSparseMatrix(
height,
width,
nnz,
isBinary ? paddle::NO_VALUE : paddle::FLOAT_VALUE,
paddle::SPARSE_CSR,
false,
useGpu);
return ptr;
}

paddle_error paddle_matrix_sparse_copy_from(paddle_matrix mat,
int* rowArray,
uint64_t rowSize,
int* colArray,
uint64_t colSize,
float* valueArray,
uint64_t valueSize) {
if (mat == nullptr) return kPD_NULLPTR;
auto ptr = cast(mat);
if (rowArray == nullptr || colArray == nullptr ||
(valueSize != 0 && valueArray == nullptr) || ptr->mat == nullptr) {
return kPD_NULLPTR;
}
if (auto sparseMat = dynamic_cast<paddle::CpuSparseMatrix*>(ptr->mat.get())) {
std::vector<int> row(rowSize);
row.assign(rowArray, rowArray + rowSize);
std::vector<int> col(colSize);
col.assign(colArray, colArray + colSize);
std::vector<paddle_real> val(valueSize);
if (valueSize) {
val.assign(valueArray, valueArray + valueSize);
}
sparseMat->copyFrom(row, col, val);
return kPD_NO_ERROR;
} else {
return kPD_NOT_SUPPORTED;
}
}
2 changes: 1 addition & 1 deletion paddle/capi/config.h.in
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
#ifndef __PADDLE_PADDLE_CAPI_CONFIG_H_INCLUDED__
#define __PADDLE_PADDLE_CAPI_CONFIG_H_INCLUDED__

typedef @PADDLE_FLOAT_TYPE@ pd_real;
typedef @PADDLE_FLOAT_TYPE@ paddle_real;

// Since we only support linux and macos in compile, always use clang or
// gcc 4.8+. DLL_IMPORT/DLL_EXPORT is as simple as below.
Expand Down
1 change: 1 addition & 0 deletions paddle/capi/error.h
Original file line number Diff line number Diff line change
Expand Up @@ -23,6 +23,7 @@ typedef enum {
kPD_NULLPTR = 1,
kPD_OUT_OF_RANGE = 2,
kPD_PROTOBUF_ERROR = 3,
kPD_NOT_SUPPORTED = 4,
kPD_UNDEFINED_ERROR = -1,
} paddle_error;

Expand Down
2 changes: 2 additions & 0 deletions paddle/capi/examples/.gitignore
Original file line number Diff line number Diff line change
@@ -0,0 +1,2 @@
*.bin
build-*
26 changes: 26 additions & 0 deletions paddle/capi/examples/common/common.h
Original file line number Diff line number Diff line change
@@ -0,0 +1,26 @@
#ifndef __CAPI_EXAMPLE_COMMON_H__
#define __CAPI_EXAMPLE_COMMON_H__
#include <stdio.h>
#include <stdlib.h>

#define CHECK(stmt) \
do { \
paddle_error __err__ = stmt; \
if (__err__ != kPD_NO_ERROR) { \
fprintf(stderr, "Invoke paddle error %d \n" #stmt, __err__); \
exit(__err__); \
} \
} while (0)

void* read_config(const char* filename, long* size) {
FILE* file = fopen(filename, "r");
if (file == NULL) return NULL;
fseek(file, 0L, SEEK_END);
*size = ftell(file);
fseek(file, 0L, SEEK_SET);
void* buf = malloc(*size);
fread(buf, 1, *size, file);
fclose(file);
return buf;
}
#endif
6 changes: 6 additions & 0 deletions paddle/capi/examples/dense/CMakeLists.txt
Original file line number Diff line number Diff line change
@@ -0,0 +1,6 @@
project(dense)
cmake_minimum_required(VERSION 2.8)
aux_source_directory(. SRC_LIST)
add_executable(${PROJECT_NAME} ${SRC_LIST})
set_property(TARGET ${PROJECT_NAME} PROPERTY C_STANDARD 99)
target_link_libraries(${PROJECT_NAME} -lpaddle_capi_shared)
2 changes: 2 additions & 0 deletions paddle/capi/examples/dense/convert_protobin.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,2 @@
#!/bin/bash
python -m paddle.utils.dump_config trainer_config.py '' --binary > trainer_config.bin
63 changes: 63 additions & 0 deletions paddle/capi/examples/dense/main.c
Original file line number Diff line number Diff line change
@@ -0,0 +1,63 @@
#include <paddle/capi.h>
#include <time.h>
#include "../common/common.h"

#define CONFIG_BIN "./trainer_config.bin"

int main() {
// Initalize Paddle
char* argv[] = {"--use_gpu=False"};
CHECK(paddle_init(1, (char**)argv));

// Reading config binary file. It is generated by `convert_protobin.sh`
long size;
void* buf = read_config(CONFIG_BIN, &size);

// Create a gradient machine for inference.
paddle_gradient_machine machine;
CHECK(paddle_gradient_machine_create_for_inference(&machine, buf, (int)size));
CHECK(paddle_gradient_machine_randomize_param(machine));

// Loading parameter. Uncomment the following line and change the directory.
// CHECK(paddle_gradient_machine_load_parameter_from_disk(machine,
// "./some_where_to_params"));
paddle_arguments in_args = paddle_arguments_create_none();

// There is only one input of this network.
CHECK(paddle_arguments_resize(in_args, 1));

// Create input matrix.
paddle_matrix mat = paddle_matrix_create(/* sample_num */ 1,
/* size */ 784,
/* useGPU */ false);
srand(time(0));
paddle_real* array;

// Get First row.
CHECK(paddle_matrix_get_row(mat, 0, &array));

for (int i = 0; i < 784; ++i) {
array[i] = rand() / ((float)RAND_MAX);
}

CHECK(paddle_arguments_set_value(in_args, 0, mat));

paddle_arguments out_args = paddle_arguments_create_none();
CHECK(paddle_gradient_machine_forward(machine,
in_args,
out_args,
/* isTrain */ false));
paddle_matrix prob = paddle_matrix_create_none();

CHECK(paddle_arguments_value(out_args, 0, prob));

CHECK(paddle_matrix_get_row(prob, 0, &array));

printf("Prob: ");
for (int i = 0; i < 10; ++i) {
printf("%.2f ", array[i]);
}
printf("\n");

return 0;
}
18 changes: 18 additions & 0 deletions paddle/capi/examples/dense/trainer_config.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,18 @@
from paddle.trainer_config_helpers import *

img = data_layer(name='pixel', size=784)

hidden = fc_layer(
input=img,
size=200,
param_attr=ParamAttr(name='hidden.w'),
bias_attr=ParamAttr(name='hidden.b'))

prob = fc_layer(
input=hidden,
size=10,
act=SoftmaxActivation(),
param_attr=ParamAttr(name='prob.w'),
bias_attr=ParamAttr(name='prob.b'))

outputs(prob)
73 changes: 73 additions & 0 deletions paddle/capi/examples/multi_thread/.gitignore
Original file line number Diff line number Diff line change
@@ -0,0 +1,73 @@
# This file is used to ignore files which are generated
# ----------------------------------------------------------------------------

*~
*.autosave
*.a
*.core
*.moc
*.o
*.obj
*.orig
*.rej
*.so
*.so.*
*_pch.h.cpp
*_resource.rc
*.qm
.#*
*.*#
core
!core/
tags
.DS_Store
.directory
*.debug
Makefile*
*.prl
*.app
moc_*.cpp
ui_*.h
qrc_*.cpp
Thumbs.db
*.res
*.rc
/.qmake.cache
/.qmake.stash

# qtcreator generated files
*.pro.user*

# xemacs temporary files
*.flc

# Vim temporary files
.*.swp

# Visual Studio generated files
*.ib_pdb_index
*.idb
*.ilk
*.pdb
*.sln
*.suo
*.vcproj
*vcproj.*.*.user
*.ncb
*.sdf
*.opensdf
*.vcxproj
*vcxproj.*

# MinGW generated files
*.Debug
*.Release

# Python byte code
*.pyc

# Binaries
# --------
*.dll
*.exe

8 changes: 8 additions & 0 deletions paddle/capi/examples/multi_thread/CMakeLists.txt
Original file line number Diff line number Diff line change
@@ -0,0 +1,8 @@
project(multi_thread)
cmake_minimum_required(VERSION 2.8)
aux_source_directory(. SRC_LIST)
add_executable(${PROJECT_NAME} ${SRC_LIST})
find_package (Threads)
set_property(TARGET ${PROJECT_NAME} PROPERTY C_STANDARD 99)
target_link_libraries(${PROJECT_NAME} -lpaddle_capi_shared
${CMAKE_THREAD_LIBS_INIT})
1 change: 1 addition & 0 deletions paddle/capi/examples/multi_thread/convert_protobin.sh
96 changes: 96 additions & 0 deletions paddle/capi/examples/multi_thread/main.c
Original file line number Diff line number Diff line change
@@ -0,0 +1,96 @@
#include <paddle/capi.h>
#include <pthread.h>
#include <time.h>
#include "../common/common.h"

#define CONFIG_BIN "./trainer_config.bin"
#define NUM_THREAD 1000
#define NUM_ITER 1000

pthread_mutex_t mutex;

void* thread_main(void* gm_ptr) {
paddle_gradient_machine machine = (paddle_gradient_machine)(gm_ptr);

for (int iter = 0; iter < NUM_ITER; ++iter) {
paddle_arguments in_args = paddle_arguments_create_none();
// There is only one input of this network.
CHECK(paddle_arguments_resize(in_args, 1));

// Create input matrix.
paddle_matrix mat = paddle_matrix_create(/* sample_num */ 1,
/* size */ 784,
/* useGPU */ false);

paddle_real* array;

// Get First row.
CHECK(paddle_matrix_get_row(mat, 0, &array));

for (int i = 0; i < 784; ++i) {
array[i] = rand() / ((float)RAND_MAX);
}

CHECK(paddle_arguments_set_value(in_args, 0, mat));

paddle_arguments out_args = paddle_arguments_create_none();
CHECK(paddle_gradient_machine_forward(machine,
in_args,
out_args,
/* isTrain */ false));
paddle_matrix prob = paddle_matrix_create_none();

CHECK(paddle_arguments_value(out_args, 0, prob));

CHECK(paddle_matrix_get_row(prob, 0, &array));

pthread_mutex_lock(&mutex);
printf("Prob: ");
for (int i = 0; i < 10; ++i) {
printf("%.2f ", array[i]);
}
printf("\n");
pthread_mutex_unlock(&mutex);
}

CHECK(paddle_gradient_machine_destroy(machine));
return NULL;
}

int main() {
// Initalize Paddle
char* argv[] = {"--use_gpu=False"};
CHECK(paddle_init(1, (char**)argv));

// Reading config binary file. It is generated by `convert_protobin.sh`
long size;
void* buf = read_config(CONFIG_BIN, &size);

// Create a gradient machine for inference.
paddle_gradient_machine machine;
CHECK(paddle_gradient_machine_create_for_inference(&machine, buf, (int)size));
CHECK(paddle_gradient_machine_randomize_param(machine));

// Loading parameter. Uncomment the following line and change the directory.
// CHECK(paddle_gradient_machine_load_parameter_from_disk(machine,
// "./some_where_to_params"));
srand(time(0));
pthread_mutex_init(&mutex, NULL);

pthread_t threads[NUM_THREAD];

for (int i = 0; i < NUM_THREAD; ++i) {
paddle_gradient_machine thread_local_machine;
CHECK(paddle_gradient_machine_create_shared_param(
machine, buf, size, &thread_local_machine));
pthread_create(&threads[i], NULL, thread_main, thread_local_machine);
}

for (int i = 0; i < NUM_THREAD; ++i) {
pthread_join(threads[i], NULL);
}

pthread_mutex_destroy(&mutex);

return 0;
}
1 change: 1 addition & 0 deletions paddle/capi/examples/multi_thread/trainer_config.py
Loading

0 comments on commit 470bbcf

Please sign in to comment.