Skip to content
This repository has been archived by the owner on Nov 17, 2023. It is now read-only.

Commit

Permalink
Fix memory leak reported by ASAN in NNVM to ONNX conversion (#15516)
Browse files Browse the repository at this point in the history
* Fix memory leak reported by ASAN in NNVM to ONNX conversion

* Update template type

* ci

* CI
  • Loading branch information
kevinzh92 authored and wkcn committed Nov 26, 2019
1 parent 6b00b2c commit d2d4876
Showing 1 changed file with 2 additions and 2 deletions.
4 changes: 2 additions & 2 deletions src/operator/subgraph/tensorrt/nnvm_to_onnx.cc
Original file line number Diff line number Diff line change
Expand Up @@ -579,15 +579,15 @@ void ConvertConstant(
auto size = shape.Size();

if (dtype == TensorProto_DataType_FLOAT) {
std::shared_ptr<float> shared_data_ptr(new float[size]);
std::shared_ptr<float[]> shared_data_ptr(new float[size]);
float* const data_ptr = shared_data_ptr.get();
nd.SyncCopyToCPU(static_cast<void*>(data_ptr), size);

for (size_t blob_idx = 0; blob_idx < size; ++blob_idx) {
initializer_proto->add_float_data(data_ptr[blob_idx]);
}
} else if (dtype == TensorProto_DataType_FLOAT16) {
std::shared_ptr<uint16_t> shared_data_ptr(new uint16_t[size]);
std::shared_ptr<uint16_t[]> shared_data_ptr(new uint16_t[size]);
uint16_t* const data_ptr = shared_data_ptr.get();
nd.SyncCopyToCPU(static_cast<void*>(data_ptr), size);
for (size_t blob_idx = 0; blob_idx < size; ++blob_idx) {
Expand Down

0 comments on commit d2d4876

Please sign in to comment.