Skip to content

Commit

Permalink
add paddleIArray (PaddlePaddle#70)
Browse files Browse the repository at this point in the history
* add paddleIArray

* use final inherit, rm data_
  • Loading branch information
gglin001 authored Aug 17, 2021
1 parent 27a5f7d commit 1ae448d
Show file tree
Hide file tree
Showing 6 changed files with 85 additions and 23 deletions.
23 changes: 7 additions & 16 deletions paddle/fluid/framework/ipu/ipu_backend.cc
Original file line number Diff line number Diff line change
Expand Up @@ -182,29 +182,20 @@ void IpuBackend::Run(const std::vector<const Tensor*>& inputs,
}

std::map<popart::TensorId, popart::IArray&> popart_inputs;
std::map<popart::TensorId, popart::NDArrayWrapper<float>> input_wrappers;
// Prepare input tensor
std::map<popart::TensorId, PaddleIArray> input_wrappers;
for (size_t i = 0; i < inputs.size(); i++) {
auto tensor_id = inputs_[i];
const Tensor* tensor = inputs[i];
std::vector<int64_t> tensor_shape = builder_->getTensorShape(tensor_id);
popart::NDArrayWrapper<float> data(
const_cast<float*>(tensor->data<float>()), tensor_shape);
VLOG(1) << "Preparing Input data for tensor " << tensor_id;
input_wrappers.emplace(tensor_id, std::move(data));
auto tensor = const_cast<Tensor*>(inputs[i]);
input_wrappers.emplace(tensor_id, PaddleIArray(tensor));
popart_inputs.emplace(tensor_id, input_wrappers.at(tensor_id));
}
// Prepare output tensor

std::map<popart::TensorId, popart::IArray&> popart_anchors;
std::map<popart::TensorId, popart::NDArrayWrapper<float>> anchor_wrappers;
std::map<popart::TensorId, PaddleIArray> anchor_wrappers;
for (size_t i = 0; i < outputs.size(); i++) {
auto tensor_id = outputs_[i];
Tensor* tensor = outputs[i];
std::vector<int64_t> tensor_shape = builder_->getTensorShape(tensor_id);
popart::NDArrayWrapper<float> data(
const_cast<float*>(tensor->data<float>()), tensor_shape);
VLOG(1) << "Preparing Output data for tensor " << tensor_id;
anchor_wrappers.emplace(tensor_id, std::move(data));
auto tensor = const_cast<Tensor*>(outputs[i]);
anchor_wrappers.emplace(tensor_id, PaddleIArray(tensor));
popart_anchors.emplace(tensor_id, anchor_wrappers.at(tensor_id));
}

Expand Down
22 changes: 21 additions & 1 deletion paddle/fluid/framework/ipu/ipu_utils.cc
Original file line number Diff line number Diff line change
Expand Up @@ -18,6 +18,25 @@ namespace paddle {
namespace framework {
namespace ipu {

void* PaddleIArray::data() { return const_cast<void*>(tensor_->data<void>()); }

popart::DataType PaddleIArray::dataType() const {
return VarType2PopartType(tensor_->type());
}

std::size_t PaddleIArray::rank() const { return tensor_->dims().size(); }

int64_t PaddleIArray::dim(size_t index) const {
return tensor_->dims().at(index);
}

std::size_t PaddleIArray::nelms() const {
return std::accumulate(shape_.begin(), shape_.end(), static_cast<int64_t>(1),
std::multiplies<int64_t>());
}

const popart::Shape PaddleIArray::shape() const { return shape_; }

popart::DataType VarType2PopartType(proto::VarType::Type type) {
switch (type) {
case proto::VarType::UINT8:
Expand Down Expand Up @@ -85,7 +104,7 @@ popart::DataType OnnxDtype2PopartType(int type) {

// count num should > 0
bool GetBoolEnv(std::string str) {
char *str_val = getenv(str.c_str());
char* str_val = getenv(str.c_str());
if (str_val == NULL) {
return false;
} else {
Expand All @@ -96,6 +115,7 @@ bool GetBoolEnv(std::string str) {
return val;
}
}

} // namespace ipu
} // namespace framework
} // namespace paddle
21 changes: 21 additions & 0 deletions paddle/fluid/framework/ipu/ipu_utils.h
Original file line number Diff line number Diff line change
Expand Up @@ -48,6 +48,27 @@ enum ONNXDataType : int {
BFLOAT16 = 16
};

class PaddleIArray final : public popart::IArray {
public:
explicit PaddleIArray(Tensor *tensor) : tensor_(tensor) {
for (int i = 0; i < tensor->dims().size(); ++i) {
shape_.push_back(tensor->dims().at(i));
}
}

public:
void *data();
popart::DataType dataType() const;
std::size_t rank() const;
int64_t dim(size_t index) const;
std::size_t nelms() const;
const popart::Shape shape() const;

private:
const Tensor *tensor_;
std::vector<int64_t> shape_;
};

popart::DataType VarType2PopartType(proto::VarType::Type type);
popart::DataType OnnxDtype2PopartType(int type);
bool GetBoolEnv(std::string str);
Expand Down
14 changes: 14 additions & 0 deletions paddle/fluid/framework/ir/ipu/ipu_runtime_replacer_pass.cc
Original file line number Diff line number Diff line change
Expand Up @@ -74,6 +74,20 @@ void IpuRuntimeReplacerPass::ApplyImpl(ir::Graph* graph) const {
}
}

// set ipu_runtime_op dtype attr
// TODO(alleng) support more than one output type
if (fetch_list.size() == 1) {
for (auto* node : graph->Nodes()) {
if (node->IsVar()) {
for (auto fetch : fetch_list) {
if (node->Name() == fetch) {
ipu_rt_node->Op()->SetAttr("dtype", node->Var()->GetDataType());
}
}
}
}
}

// Remove unneeded nodes.
std::unordered_set<const Node*> marked_nodes;
for (auto* node : graph->Nodes()) {
Expand Down
24 changes: 19 additions & 5 deletions paddle/fluid/operators/ipu_runtime_op.cc
Original file line number Diff line number Diff line change
Expand Up @@ -25,8 +25,9 @@ class IpuRuntimeOp : public framework::OperatorWithKernel {
protected:
framework::OpKernelType GetExpectedKernelType(
const framework::ExecutionContext& ctx) const override {
return framework::OpKernelType(framework::proto::VarType::FP32,
ctx.device_context());
return framework::OpKernelType(
framework::proto::VarType::Type(ctx.Attr<int>("dtype")),
ctx.device_context());
}
};

Expand All @@ -35,8 +36,12 @@ class IpuRuntimeOpMaker : public framework::OpProtoAndCheckerMaker {
void Make() override {
AddInput("FeedList", "FeedList of Graph").AsDuplicable();
AddOutput("FetchList", "FetchList of Graph").AsDuplicable();
AddAttr<int>("dtype",
"(int, default 5 (FP32)) "
"Output data type")
.SetDefault(framework::proto::VarType::FP32);
AddComment(R"DOC(
Run graph by PopART runtime.
Run graph by PopART runtime.
)DOC");
}
Expand All @@ -47,5 +52,14 @@ Run graph by PopART runtime.

namespace ops = paddle::operators;
REGISTER_OPERATOR(ipu_runtime, ops::IpuRuntimeOp, ops::IpuRuntimeOpMaker);
REGISTER_OP_CPU_KERNEL(ipu_runtime, ops::IpuRuntimeKernel<float>)
REGISTER_OP_IPU_KERNEL(ipu_runtime, ops::IpuRuntimeKernel<float>)
REGISTER_OP_CPU_KERNEL(ipu_runtime, ops::IpuRuntimeKernel<float>,
ops::IpuRuntimeKernel<double>,
ops::IpuRuntimeKernel<int>,
ops::IpuRuntimeKernel<int64_t>,
ops::IpuRuntimeKernel<bool>);

REGISTER_OP_IPU_KERNEL(ipu_runtime, ops::IpuRuntimeKernel<float>,
ops::IpuRuntimeKernel<double>,
ops::IpuRuntimeKernel<int>,
ops::IpuRuntimeKernel<int64_t>,
ops::IpuRuntimeKernel<bool>);
4 changes: 3 additions & 1 deletion paddle/fluid/operators/ipu_runtime_op.h
Original file line number Diff line number Diff line change
Expand Up @@ -45,7 +45,9 @@ class IpuRuntimeKernel : public framework::OpKernel<T> {
auto* out = outputs[i];
auto oshape = ipu_backend->GetTensorShape(output_names[i]);
out->Resize(framework::make_ddim(oshape));
out->mutable_data<float>(ctx.GetPlace());
// TODO(alleng) support muti-output dtypes
// maybe get dtype from ipu_backend
out->mutable_data<T>(ctx.GetPlace());
}
ipu_backend->Run(inputs, outputs);
#else
Expand Down

0 comments on commit 1ae448d

Please sign in to comment.