diff --git a/cookbook/04-BuildEngineByONNXParser/pyTorch-ONNX-TensorRT/C++/calibrator.cpp b/cookbook/04-BuildEngineByONNXParser/pyTorch-ONNX-TensorRT/C++/calibrator.cpp index 81958085..00fe6baf 100755 --- a/cookbook/04-BuildEngineByONNXParser/pyTorch-ONNX-TensorRT/C++/calibrator.cpp +++ b/cookbook/04-BuildEngineByONNXParser/pyTorch-ONNX-TensorRT/C++/calibrator.cpp @@ -2,46 +2,64 @@ using namespace nvinfer1; -MyCalibrator::MyCalibrator(const std::string &calibrationDataFile, const int nCalibration, const Dims32 dim, const std::string &cacheFile): +MyCalibrator::MyCalibrator(const std::string& calibrationDataFile, const int nCalibration, const Dims32 dim, const std::string& cacheFile) : nCalibration(nCalibration), dim(dim), cacheFile(cacheFile), iBatch(0) { +#ifdef DEBUG + std::cout << "[MyCalibrator::MyCalibrator]" << std::endl; +#endif cnpy::npz_t npzFile = cnpy::npz_load(calibrationDataFile); - cnpy::NpyArray array = npzFile[std::string("calibrationData")]; - pData = array.data(); + cnpy::NpyArray array = npzFile[std::string("calibrationData")]; + auto pDataTemp = array.data(); + pData = (float*)malloc(array.num_bytes()); + memcpy(pData, pDataTemp, array.num_bytes()); + if (pData == nullptr) { std::cout << "Failed getting calibration data!" << std::endl; return; } - nBatch = array.num_bytes() / bufferSize; nElement = 1; for (int i = 0; i < dim.nbDims; ++i) { nElement *= dim.d[i]; } bufferSize = sizeof(float) * nElement; - cudaMalloc((void **)&bufferD, bufferSize); + nBatch = array.num_bytes() / bufferSize; + cudaMalloc((void**)&bufferD, bufferSize); return; } MyCalibrator::~MyCalibrator() noexcept { +#ifdef DEBUG + std::cout << "[MyCalibrator::~MyCalibrator]" << std::endl; +#endif if (bufferD != nullptr) { cudaFree(bufferD); } + if (pData != nullptr) { + free(pData); + } return; } int32_t MyCalibrator::getBatchSize() const noexcept { +#ifdef DEBUG + std::cout << "[MyCalibrator::getBatchSize]" << std::endl; +#endif return dim.d[0]; } -bool MyCalibrator::getBatch(void *bindings[], char const *names[], int32_t nbBindings) noexcept +bool MyCalibrator::getBatch(void* bindings[], char const* names[], int32_t nbBindings) noexcept { +#ifdef DEBUG + std::cout << "[MyCalibrator::getBatch]" << std::endl; +#endif if (iBatch < nBatch) { cudaMemcpy(bufferD, &pData[iBatch * nElement], bufferSize, cudaMemcpyHostToDevice); @@ -55,8 +73,11 @@ bool MyCalibrator::getBatch(void *bindings[], char const *names[], int32_t nbBin } } -void const *MyCalibrator::readCalibrationCache(std::size_t &length) noexcept +void const* MyCalibrator::readCalibrationCache(std::size_t& length) noexcept { +#ifdef DEBUG + std::cout << "[MyCalibrator::readCalibrationCache]" << std::endl; +#endif std::fstream f; f.open(cacheFile, std::fstream::in); if (f.fail()) @@ -64,7 +85,7 @@ void const *MyCalibrator::readCalibrationCache(std::size_t &length) noexcept std::cout << "Failed finding cache file!" << std::endl; return nullptr; } - char *ptr = new char[length]; + char* ptr = new char[length]; if (f.is_open()) { f >> ptr; @@ -72,19 +93,22 @@ void const *MyCalibrator::readCalibrationCache(std::size_t &length) noexcept return ptr; } -void MyCalibrator::writeCalibrationCache(void const *ptr, std::size_t length) noexcept +void MyCalibrator::writeCalibrationCache(void const* ptr, std::size_t length) noexcept { +#ifdef DEBUG + std::cout << "[MyCalibrator::writeCalibrationCache]" << std::endl; +#endif std::ofstream f(cacheFile, std::ios::binary); if (f.fail()) { std::cout << "Failed opening cache file to write!" << std::endl; return; } - f.write(static_cast(ptr), length); + f.write(static_cast(ptr), length); if (f.fail()) { std::cout << "Failed saving cache file!" << std::endl; return; } f.close(); -} \ No newline at end of file +} diff --git a/cookbook/04-BuildEngineByONNXParser/pyTorch-ONNX-TensorRT/C++/createCalibrationAndInferenceData.py b/cookbook/04-BuildEngineByONNXParser/pyTorch-ONNX-TensorRT/C++/createCalibrationAndInferenceData.py index 95522bc0..0b5e14a5 100755 --- a/cookbook/04-BuildEngineByONNXParser/pyTorch-ONNX-TensorRT/C++/createCalibrationAndInferenceData.py +++ b/cookbook/04-BuildEngineByONNXParser/pyTorch-ONNX-TensorRT/C++/createCalibrationAndInferenceData.py @@ -28,7 +28,7 @@ inferenceData = cv2.imread(inferenceDataFile, cv2.IMREAD_GRAYSCALE).astype(np.float32) calibrationDataFileList = sorted(glob(calibrationDataPath + "*.jpg"))[:nCalibrationData] -calibrationData = np.empty([nCalibrationData, 1, nHeight, nWidth]) +calibrationData = np.empty([nCalibrationData, 1, nHeight, nWidth], dtype=np.float32) for i in range(nCalibrationData): calibrationData[i, 0] = cv2.imread(calibrationDataFileList[i], cv2.IMREAD_GRAYSCALE).astype(np.float32) @@ -36,4 +36,4 @@ dataDictionary["inferenceData"] = inferenceData dataDictionary["calibrationData"] = calibrationData np.savez("data.npz", **dataDictionary) -print("Succeeded creating data for calibration and inference!") \ No newline at end of file +print("Succeeded creating data for calibration and inference!")