From 1651573c08cac3fc01004d68bc197f751d4a2235 Mon Sep 17 00:00:00 2001 From: Heethesh Vhavle Date: Mon, 11 Dec 2023 09:11:14 +0400 Subject: [PATCH] Add Doppler ICP in tensor registration pipeline (#5237) We would like to contribute the implementation of our Doppler ICP algorithm for point clouds captured by FMCW LiDARs. This PR adds support for Doppler ICP in the tensor registration pipeline within Open3D. This is the implementation of the following paper: @INPROCEEDINGS{Hexsel-RSS-22, AUTHOR = {Bruno Hexsel AND Heethesh Vhavle AND Yi Chen}, TITLE = {{DICP: Doppler Iterative Closest Point Algorithm}}, BOOKTITLE = {Proceedings of Robotics: Science and Systems}, YEAR = {2022}, ADDRESS = {New York City, NY, USA}, MONTH = {June}, DOI = {10.15607/RSS.2022.XVIII.015} } Additions Added CPU and CUDA kernels for computing the DICP residuals and Jacobians. Added macro for dual robust function dispatch. I separated the ICP loop for the Doppler variant since this needs additional arguments such as the time period, calibration, and other DICP related parameters. Added TransformationToPose converter along with their kernels (based on the existing Eigen implementation of utility::TransformMatrix4dToVector6d). Added Python bindings for all the new Doppler ICP methods. Updates to existing API Made matmul3x3_3x1 kernel available for __host__ in addition to __device__. Added num_iterations and converged field to RegistrationResult. Rename correspondences_ to correspondence_set to match the legacy Python parameters. --------- Co-authored-by: Heethesh Vhavle --- CHANGELOG.md | 1 + .../t/pipelines/registration/Registration.cpp | 154 ++++++++++-- cpp/open3d/core/linalg/kernel/Matrix.h | 6 +- cpp/open3d/data/CMakeLists.txt | 1 + cpp/open3d/data/Dataset.h | 35 +++ .../data/dataset/DemoDopplerICPSequence.cpp | 121 +++++++++ cpp/open3d/t/io/PointCloudIO.cpp | 2 + cpp/open3d/t/io/file_format/FileTXT.cpp | 22 ++ .../t/pipelines/kernel/Registration.cpp | 108 ++++++++ cpp/open3d/t/pipelines/kernel/Registration.h | 56 +++++ .../t/pipelines/kernel/RegistrationCPU.cpp | 168 +++++++++++++ .../t/pipelines/kernel/RegistrationCUDA.cu | 158 ++++++++++++ .../t/pipelines/kernel/RegistrationImpl.h | 214 ++++++++++++++++ .../kernel/TransformationConverter.cpp | 47 ++++ .../kernel/TransformationConverter.cu | 26 ++ .../kernel/TransformationConverter.h | 8 + .../kernel/TransformationConverterImpl.h | 26 ++ .../t/pipelines/registration/Registration.cpp | 35 ++- .../t/pipelines/registration/Registration.h | 6 +- .../pipelines/registration/RobustKernelImpl.h | 64 +++++ .../registration/TransformationEstimation.cpp | 99 +++++++- .../registration/TransformationEstimation.h | 187 +++++++++++++- cpp/pybind/data/dataset.cpp | 34 +++ .../t/pipelines/registration/registration.cpp | 146 ++++++++++- cpp/tests/t/io/PointCloudIO.cpp | 6 + .../t/pipelines/registration/Registration.cpp | 113 +++++++++ examples/cpp/CMakeLists.txt | 1 + examples/cpp/RegistrationDopplerICP.cpp | 190 ++++++++++++++ .../pipelines/doppler_icp_registration.py | 235 ++++++++++++++++++ 29 files changed, 2227 insertions(+), 42 deletions(-) create mode 100644 cpp/open3d/data/dataset/DemoDopplerICPSequence.cpp create mode 100644 examples/cpp/RegistrationDopplerICP.cpp create mode 100644 examples/python/pipelines/doppler_icp_registration.py diff --git a/CHANGELOG.md b/CHANGELOG.md index ac4526be541..0ad0b678ef9 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -24,6 +24,7 @@ * Fix `toString`, `CreateFromPoints` methods and improve docs in `AxisAlignedBoundingBox`. 🐛📝 * Migrate Open3d documentation to furo theme ✨ (#6470) * Expose Near Clip + Far Clip parameters to setup_camera in OffscreenRenderer (#6520) +* Add Doppler ICP in tensor registration pipeline (PR #5237) ## 0.13 diff --git a/cpp/benchmarks/t/pipelines/registration/Registration.cpp b/cpp/benchmarks/t/pipelines/registration/Registration.cpp index 524cabaf91a..71edb2cab1e 100644 --- a/cpp/benchmarks/t/pipelines/registration/Registration.cpp +++ b/cpp/benchmarks/t/pipelines/registration/Registration.cpp @@ -10,6 +10,7 @@ #include #include "open3d/core/CUDAUtils.h" +#include "open3d/core/EigenConverter.h" #include "open3d/core/nns/NearestNeighborSearch.h" #include "open3d/data/Dataset.h" #include "open3d/t/io/PointCloudIO.h" @@ -31,7 +32,11 @@ static const double voxel_downsampling_factor = 0.02; // NNS parameter. static const double max_correspondence_distance = 0.05; -// Initial transformation guess for registation. +// Normal estimation parameters. +static const double normals_search_radius = 10.0; +static const int normals_max_neighbors = 30; + +// Initial transformation guess for registration. static const std::vector initial_transform_flat{ 0.862, 0.011, -0.507, 0.5, -0.139, 0.967, -0.215, 0.7, 0.487, 0.255, 0.835, -1.4, 0.0, 0.0, 0.0, 1.0}; @@ -118,36 +123,155 @@ static void BenchmarkICP(benchmark::State& state, } } -#define ENUM_ICP_METHOD_DEVICE(METHOD_NAME, TRANSFORMATION_TYPE, DEVICE) \ - BENCHMARK_CAPTURE(BenchmarkICP, DEVICE METHOD_NAME##_Float32, \ - core::Device(DEVICE), core::Float32, \ - TRANSFORMATION_TYPE) \ - ->Unit(benchmark::kMillisecond); \ - BENCHMARK_CAPTURE(BenchmarkICP, DEVICE METHOD_NAME##_Float64, \ - core::Device(DEVICE), core::Float64, \ - TRANSFORMATION_TYPE) \ +core::Tensor ComputeDirectionVectors(const core::Tensor& positions) { + core::Tensor directions = core::Tensor::Empty( + positions.GetShape(), positions.GetDtype(), positions.GetDevice()); + for (int64_t i = 0; i < positions.GetLength(); ++i) { + // Compute the norm of the position vector. + core::Tensor norm = (positions[i][0] * positions[i][0] + + positions[i][1] * positions[i][1] + + positions[i][2] * positions[i][2]) + .Sqrt(); + + // If the norm is zero, set the direction vector to zero. + if (norm.Item() == 0.0) { + directions[i].Fill(0.0); + } else { + // Otherwise, compute the direction vector by dividing the position + // vector by its norm. + directions[i] = positions[i] / norm; + } + } + return directions; +} + +static std::tuple +LoadTensorDopplerPointCloudFromFile( + const std::string& source_pointcloud_filename, + const std::string& target_pointcloud_filename, + const double voxel_downsample_factor, + const core::Dtype& dtype, + const core::Device& device) { + geometry::PointCloud source, target; + + io::ReadPointCloud(source_pointcloud_filename, source, + {"auto", false, false, true}); + io::ReadPointCloud(target_pointcloud_filename, target, + {"auto", false, false, true}); + + source.SetPointAttr("directions", + ComputeDirectionVectors(source.GetPointPositions())); + + source = source.To(device); + target = target.To(device); + + // Eliminates the case of impractical values (including negative). + if (voxel_downsample_factor > 0.001) { + source = source.VoxelDownSample(voxel_downsample_factor); + target = target.VoxelDownSample(voxel_downsample_factor); + } else { + utility::LogWarning( + "VoxelDownsample: Impractical voxel size [< 0.001], skipping " + "downsampling."); + } + + source.SetPointPositions(source.GetPointPositions().To(dtype)); + source.SetPointAttr("dopplers", source.GetPointAttr("dopplers").To(dtype)); + source.SetPointAttr("directions", + source.GetPointAttr("directions").To(dtype)); + + target.SetPointPositions(target.GetPointPositions().To(dtype)); + target.EstimateNormals(normals_search_radius, normals_max_neighbors); + + return std::make_tuple(source, target); +} + +static void BenchmarkDopplerICP(benchmark::State& state, + const core::Device& device, + const core::Dtype& dtype, + const TransformationEstimationType& type) { + utility::SetVerbosityLevel(utility::VerbosityLevel::Error); + data::DemoDopplerICPSequence demo_sequence; + geometry::PointCloud source, target; + std::tie(source, target) = LoadTensorDopplerPointCloudFromFile( + demo_sequence.GetPath(0), demo_sequence.GetPath(1), + voxel_downsampling_factor, dtype, device); + + Eigen::Matrix4d calibration{Eigen::Matrix4d::Identity()}; + double period{0.0}; + demo_sequence.GetCalibration(calibration, period); + + const core::Tensor calibration_t = + core::eigen_converter::EigenMatrixToTensor(calibration) + .To(device, dtype); + + TransformationEstimationForDopplerICP estimation_dicp; + estimation_dicp.period_ = period; + estimation_dicp.transform_vehicle_to_sensor_ = calibration_t; + + core::Tensor init_trans = core::Tensor::Eye(4, dtype, device); + RegistrationResult reg_result(init_trans); + + // Warm up. + reg_result = ICP(source, target, max_correspondence_distance, init_trans, + estimation_dicp, + ICPConvergenceCriteria(relative_fitness, relative_rmse, + max_iterations)); + + for (auto _ : state) { + reg_result = ICP(source, target, max_correspondence_distance, + init_trans, estimation_dicp, + ICPConvergenceCriteria(relative_fitness, relative_rmse, + max_iterations)); + core::cuda::Synchronize(device); + } +} + +#define ENUM_ICP_METHOD_DEVICE(BENCHMARK_FUNCTION, METHOD_NAME, \ + TRANSFORMATION_TYPE, DEVICE) \ + BENCHMARK_CAPTURE(BENCHMARK_FUNCTION, DEVICE METHOD_NAME##_Float32, \ + core::Device(DEVICE), core::Float32, \ + TRANSFORMATION_TYPE) \ + ->Unit(benchmark::kMillisecond); \ + BENCHMARK_CAPTURE(BENCHMARK_FUNCTION, DEVICE METHOD_NAME##_Float64, \ + core::Device(DEVICE), core::Float64, \ + TRANSFORMATION_TYPE) \ ->Unit(benchmark::kMillisecond); -ENUM_ICP_METHOD_DEVICE(PointToPoint, +ENUM_ICP_METHOD_DEVICE(BenchmarkICP, + PointToPoint, TransformationEstimationType::PointToPoint, "CPU:0") -ENUM_ICP_METHOD_DEVICE(PointToPlane, +ENUM_ICP_METHOD_DEVICE(BenchmarkICP, + PointToPlane, TransformationEstimationType::PointToPlane, "CPU:0") -ENUM_ICP_METHOD_DEVICE(ColoredICP, +ENUM_ICP_METHOD_DEVICE(BenchmarkICP, + ColoredICP, TransformationEstimationType::ColoredICP, "CPU:0") +ENUM_ICP_METHOD_DEVICE(BenchmarkDopplerICP, + DopplerICP, + TransformationEstimationType::DopplerICP, + "CPU:0") #ifdef BUILD_CUDA_MODULE -ENUM_ICP_METHOD_DEVICE(PointToPoint, +ENUM_ICP_METHOD_DEVICE(BenchmarkICP, + PointToPoint, TransformationEstimationType::PointToPoint, "CUDA:0") -ENUM_ICP_METHOD_DEVICE(PointToPlane, +ENUM_ICP_METHOD_DEVICE(BenchmarkICP, + PointToPlane, TransformationEstimationType::PointToPlane, "CUDA:0") -ENUM_ICP_METHOD_DEVICE(ColoredICP, +ENUM_ICP_METHOD_DEVICE(BenchmarkICP, + ColoredICP, TransformationEstimationType::ColoredICP, "CUDA:0") +ENUM_ICP_METHOD_DEVICE(BenchmarkDopplerICP, + DopplerICP, + TransformationEstimationType::DopplerICP, + "CUDA:0") #endif } // namespace registration diff --git a/cpp/open3d/core/linalg/kernel/Matrix.h b/cpp/open3d/core/linalg/kernel/Matrix.h index 7d123d146bf..c0ae13c367b 100644 --- a/cpp/open3d/core/linalg/kernel/Matrix.h +++ b/cpp/open3d/core/linalg/kernel/Matrix.h @@ -36,9 +36,9 @@ static OPEN3D_DEVICE OPEN3D_FORCE_INLINE void matmul3x3_3x1(const scalar_t& m00, } template -OPEN3D_DEVICE OPEN3D_FORCE_INLINE void matmul3x3_3x1(const scalar_t* A_3x3, - const scalar_t* B_3x1, - scalar_t* C_3x1) { +OPEN3D_HOST_DEVICE OPEN3D_FORCE_INLINE void matmul3x3_3x1(const scalar_t* A_3x3, + const scalar_t* B_3x1, + scalar_t* C_3x1) { C_3x1[0] = A_3x3[0] * B_3x1[0] + A_3x3[1] * B_3x1[1] + A_3x3[2] * B_3x1[2]; C_3x1[1] = A_3x3[3] * B_3x1[0] + A_3x3[4] * B_3x1[1] + A_3x3[5] * B_3x1[2]; C_3x1[2] = A_3x3[6] * B_3x1[0] + A_3x3[7] * B_3x1[1] + A_3x3[8] * B_3x1[2]; diff --git a/cpp/open3d/data/CMakeLists.txt b/cpp/open3d/data/CMakeLists.txt index 660db6034e1..84bd2dab44e 100644 --- a/cpp/open3d/data/CMakeLists.txt +++ b/cpp/open3d/data/CMakeLists.txt @@ -14,6 +14,7 @@ target_sources(data PRIVATE dataset/DemoColoredICPPointClouds.cpp dataset/DemoCropPointCloud.cpp dataset/DemoCustomVisualization.cpp + dataset/DemoDopplerICPSequence.cpp dataset/DemoFeatureMatchingPointClouds.cpp dataset/DemoICPPointClouds.cpp dataset/DemoPoseGraphOptimization.cpp diff --git a/cpp/open3d/data/Dataset.h b/cpp/open3d/data/Dataset.h index 417e3515dce..35c448d11e0 100644 --- a/cpp/open3d/data/Dataset.h +++ b/cpp/open3d/data/Dataset.h @@ -12,6 +12,8 @@ #include #include +#include "open3d/utility/Eigen.h" + namespace open3d { namespace data { @@ -343,6 +345,39 @@ class DemoCustomVisualization : public DownloadDataset { std::string render_option_path_; }; +/// \class DemoDopplerICPSequence +/// \brief Data class for `DemoDopplerICPSequence` contains an example sequence +/// of 100 point clouds with Doppler velocity channel and corresponding ground +/// truth poses. The sequence was generated using the CARLA simulator. +class DemoDopplerICPSequence : public DownloadDataset { +public: + DemoDopplerICPSequence(const std::string& data_root = ""); + + /// \brief Returns the list of the point cloud paths in the sequence. + std::vector GetPaths() const { return paths_; } + /// \brief Path to the point cloud at index. + std::string GetPath(std::size_t index) const; + /// \brief Path to the calibration metadata file, containing transformation + /// between the vehicle and sensor frames and the time period. + std::string GetCalibrationPath() const { return calibration_path_; } + /// \brief Path to the ground truth poses for the entire sequence. + std::string GetTrajectoryPath() const { return trajectory_path_; } + /// \brief Returns the vehicle to sensor calibration transformation and the + /// time period (in secs) between sequential point cloud scans. + bool GetCalibration(Eigen::Matrix4d& calibration, double& period) const; + /// \brief Returns a list of (timestamp, pose) representing the ground truth + /// trajectory of the sequence. + std::vector> GetTrajectory() const; + +private: + /// List of paths to the point clouds. + std::vector paths_; + /// Path to the calibration JSON file. + std::string calibration_path_; + /// Path to the TUM ground truth trajectory text file. + std::string trajectory_path_; +}; + /// \class DemoFeatureMatchingPointClouds /// \brief Data class for `DemoFeatureMatchingPointClouds` contains 2 /// point cloud fragments and their respective FPFH features and L32D features. diff --git a/cpp/open3d/data/dataset/DemoDopplerICPSequence.cpp b/cpp/open3d/data/dataset/DemoDopplerICPSequence.cpp new file mode 100644 index 00000000000..f4f29a17645 --- /dev/null +++ b/cpp/open3d/data/dataset/DemoDopplerICPSequence.cpp @@ -0,0 +1,121 @@ +// ---------------------------------------------------------------------------- +// - Open3D: www.open3d.org - +// ---------------------------------------------------------------------------- +// Copyright (c) 2018-2023 www.open3d.org +// SPDX-License-Identifier: MIT +// ---------------------------------------------------------------------------- + +#include + +#include +#include +#include +#include +#include +#include + +#include "open3d/data/Dataset.h" +#include "open3d/utility/IJsonConvertible.h" +#include "open3d/utility/Logging.h" + +namespace open3d { +namespace data { + +namespace { + +bool ReadJSONFromFile(const std::string& path, Json::Value& json) { + std::ifstream file(path); + if (!file.is_open()) { + utility::LogWarning("Failed to open: {}", path); + return false; + } + + Json::CharReaderBuilder builder; + builder["collectComments"] = false; + Json::String errs; + bool is_parse_successful = parseFromStream(builder, file, &json, &errs); + if (!is_parse_successful) { + utility::LogWarning("Read JSON failed: {}.", errs); + return false; + } + return true; +} + +std::vector> LoadTUMTrajectory( + const std::string& filename) { + std::vector> trajectory; + + std::ifstream file(filename); + if (!file.is_open()) { + utility::LogError("Failed to open: {}", filename); + } + + std::string line; + while (std::getline(file, line)) { + std::istringstream iss(line); + double timestamp, x, y, z, qx, qy, qz, qw; + if (!(iss >> timestamp >> x >> y >> z >> qx >> qy >> qz >> qw)) { + utility::LogError("Error parsing line: {}", line); + } + + Eigen::Affine3d transform = Eigen::Affine3d::Identity(); + transform.translate(Eigen::Vector3d(x, y, z)); + transform.rotate(Eigen::Quaterniond(qw, qx, qy, qz)); + + trajectory.emplace_back(timestamp, transform.matrix()); + } + + return trajectory; +} + +} // namespace + +const static DataDescriptor data_descriptor = { + Open3DDownloadsPrefix() + + "doppler-icp-data/carla-town05-curved-walls.zip", + "73a9828fb7790481168124c02398ee01"}; + +DemoDopplerICPSequence::DemoDopplerICPSequence(const std::string& data_root) + : DownloadDataset("DemoDopplerICPSequence", data_descriptor, data_root) { + for (int i = 1; i <= 100; ++i) { + std::stringstream ss; + ss << std::setw(5) << std::setfill('0') << i; + paths_.push_back(GetExtractDir() + "/xyzd_sequence/" + ss.str() + + ".xyzd"); + } + + calibration_path_ = GetExtractDir() + "/calibration.json"; + trajectory_path_ = GetExtractDir() + "/ground_truth_poses.txt"; +} + +std::string DemoDopplerICPSequence::GetPath(std::size_t index) const { + if (index > 99) { + utility::LogError( + "Invalid index. Expected index between 0 to 99 but got {}.", + index); + } + return paths_[index]; +} + +bool DemoDopplerICPSequence::GetCalibration(Eigen::Matrix4d& calibration, + double& period) const { + Json::Value calibration_data; + Eigen::Matrix4d calibration_temp; + if (ReadJSONFromFile(calibration_path_, calibration_data) && + utility::IJsonConvertible::EigenMatrix4dFromJsonArray( + calibration_temp, + calibration_data["transform_vehicle_to_sensor"])) { + calibration = calibration_temp.transpose(); + period = calibration_data["period"].asDouble(); + return true; + } + return false; +} + +std::vector> +DemoDopplerICPSequence::GetTrajectory() const { + return LoadTUMTrajectory(trajectory_path_); +} + +} // namespace data +} // namespace open3d diff --git a/cpp/open3d/t/io/PointCloudIO.cpp b/cpp/open3d/t/io/PointCloudIO.cpp index 57055d5a8b6..fa66d0e2869 100644 --- a/cpp/open3d/t/io/PointCloudIO.cpp +++ b/cpp/open3d/t/io/PointCloudIO.cpp @@ -29,6 +29,7 @@ static const std::unordered_map< file_extension_to_pointcloud_read_function{ {"npz", ReadPointCloudFromNPZ}, {"xyz", ReadPointCloudFromTXT}, + {"xyzd", ReadPointCloudFromTXT}, {"xyzi", ReadPointCloudFromTXT}, {"xyzn", ReadPointCloudFromTXT}, {"xyzrgb", ReadPointCloudFromTXT}, @@ -45,6 +46,7 @@ static const std::unordered_map< file_extension_to_pointcloud_write_function{ {"npz", WritePointCloudToNPZ}, {"xyz", WritePointCloudToTXT}, + {"xyzd", WritePointCloudToTXT}, {"xyzi", WritePointCloudToTXT}, {"xyzn", WritePointCloudToTXT}, {"xyzrgb", WritePointCloudToTXT}, diff --git a/cpp/open3d/t/io/file_format/FileTXT.cpp b/cpp/open3d/t/io/file_format/FileTXT.cpp index b5707a9d375..e0bee8e7027 100644 --- a/cpp/open3d/t/io/file_format/FileTXT.cpp +++ b/cpp/open3d/t/io/file_format/FileTXT.cpp @@ -46,6 +46,8 @@ bool ReadPointCloudFromTXT(const std::string &filename, if (format_txt == "xyz") { num_attrs = 3; + } else if (format_txt == "xyzd") { + num_attrs = 4; } else if (format_txt == "xyzi") { num_attrs = 4; } else if (format_txt == "xyzn") { @@ -99,6 +101,9 @@ bool ReadPointCloudFromTXT(const std::string &filename, pointcloud.SetPointPositions(pcd_buffer.Slice(1, 0, 3, 1).Contiguous()); if (format_txt == "xyz") { // No additional attributes. + } else if (format_txt == "xyzd") { + pointcloud.SetPointAttr("dopplers", + pcd_buffer.Slice(1, 3, 4, 1).Contiguous()); } else if (format_txt == "xyzi") { pointcloud.SetPointAttr("intensities", pcd_buffer.Slice(1, 3, 4, 1).Contiguous()); @@ -153,6 +158,8 @@ bool WritePointCloudToTXT(const std::string &filename, if (format_txt == "xyz") { // No additional attributes. len_attr = num_points; + } else if (format_txt == "xyzd") { + len_attr = pointcloud.GetPointAttr("dopplers").GetLength(); } else if (format_txt == "xyzi") { len_attr = pointcloud.GetPointAttr("intensities").GetLength(); } else if (format_txt == "xyzn") { @@ -190,6 +197,21 @@ bool WritePointCloudToTXT(const std::string &filename, reporter.Update(i); } } + } else if (format_txt == "xyzd") { + attr_ptr = pointcloud.GetPointAttr("dopplers").GetDataPtr(); + for (int i = 0; i < num_points; i++) { + if (fprintf(file.GetFILE(), "%.10f %.10f %.10f %.10f\n", + points_ptr[3 * i + 0], points_ptr[3 * i + 1], + points_ptr[3 * i + 2], attr_ptr[i]) < 0) { + utility::LogWarning( + "Write TXT failed: unable to write file: {}", + filename); + return false; // error happened during writing. + } + if (i % 1000 == 0) { + reporter.Update(i); + } + } } else if (format_txt == "xyzi") { attr_ptr = pointcloud.GetPointAttr("intensities").GetDataPtr(); diff --git a/cpp/open3d/t/pipelines/kernel/Registration.cpp b/cpp/open3d/t/pipelines/kernel/Registration.cpp index e6a528e5677..b99a5065359 100644 --- a/cpp/open3d/t/pipelines/kernel/Registration.cpp +++ b/cpp/open3d/t/pipelines/kernel/Registration.cpp @@ -7,6 +7,7 @@ #include "open3d/t/pipelines/kernel/Registration.h" +#include "open3d/core/Dispatch.h" #include "open3d/core/TensorCheck.h" #include "open3d/t/pipelines/kernel/RegistrationImpl.h" @@ -94,6 +95,113 @@ core::Tensor ComputePoseColoredICP(const core::Tensor &source_points, return pose; } +core::Tensor ComputePoseDopplerICP( + const core::Tensor &source_points, + const core::Tensor &source_dopplers, + const core::Tensor &source_directions, + const core::Tensor &target_points, + const core::Tensor &target_normals, + const core::Tensor &correspondence_indices, + const core::Tensor ¤t_transform, + const core::Tensor &transform_vehicle_to_sensor, + const std::size_t iteration, + const double period, + const double lambda_doppler, + const bool reject_dynamic_outliers, + const double doppler_outlier_threshold, + const std::size_t outlier_rejection_min_iteration, + const std::size_t geometric_robust_loss_min_iteration, + const std::size_t doppler_robust_loss_min_iteration, + const registration::RobustKernel &geometric_kernel, + const registration::RobustKernel &doppler_kernel) { + const core::Device device = source_points.GetDevice(); + const core::Dtype dtype = source_points.GetDtype(); + + // Pose {6,} tensor [ouput]. + core::Tensor output_pose = + core::Tensor::Empty({6}, core::Dtype::Float64, device); + + float residual = 0; + int inlier_count = 0; + + // Use robust kernels only after a specified minimum number of iterations. + const auto kernel_default = registration::RobustKernel( + registration::RobustKernelMethod::L2Loss, 1.0, 1.0); + const auto kernel_geometric = + (iteration >= geometric_robust_loss_min_iteration) + ? geometric_kernel + : kernel_default; + const auto kernel_doppler = (iteration >= doppler_robust_loss_min_iteration) + ? doppler_kernel + : kernel_default; + + // Enable outlier rejection based on the current iteration count. + const bool reject_outliers = reject_dynamic_outliers && + (iteration >= outlier_rejection_min_iteration); + + // Extract the rotation and translation parts from the matrix. + const core::Tensor R_S_to_V = + transform_vehicle_to_sensor + .GetItem({core::TensorKey::Slice(0, 3, 1), + core::TensorKey::Slice(0, 3, 1)}) + .Inverse() + .Flatten() + .To(device, dtype); + const core::Tensor r_v_to_s_in_V = + transform_vehicle_to_sensor + .GetItem({core::TensorKey::Slice(0, 3, 1), + core::TensorKey::Slice(3, 4, 1)}) + .Flatten() + .To(device, dtype); + + // Compute the pose (rotation + translation) vector. + const core::Tensor state_vector = + pipelines::kernel::TransformationToPose(current_transform) + .To(device, dtype); + + // Compute the linear and angular velocity from the pose vector. + const core::Tensor w_v_in_V = + (state_vector.GetItem(core::TensorKey::Slice(0, 3, 1)).Neg() / + period) + .To(device, dtype); + const core::Tensor v_v_in_V = + (state_vector.GetItem(core::TensorKey::Slice(3, 6, 1)).Neg() / + period) + .To(device, dtype); + + core::Device::DeviceType device_type = device.GetType(); + if (device_type == core::Device::DeviceType::CPU) { + ComputePoseDopplerICPCPU( + source_points.Contiguous(), source_dopplers.Contiguous(), + source_directions.Contiguous(), target_points.Contiguous(), + target_normals.Contiguous(), + correspondence_indices.Contiguous(), output_pose, residual, + inlier_count, dtype, device, R_S_to_V.Contiguous(), + r_v_to_s_in_V.Contiguous(), w_v_in_V.Contiguous(), + v_v_in_V.Contiguous(), period, reject_outliers, + doppler_outlier_threshold, kernel_geometric, kernel_doppler, + lambda_doppler); + } else if (device_type == core::Device::DeviceType::CUDA) { + CUDA_CALL(ComputePoseDopplerICPCUDA, source_points.Contiguous(), + source_dopplers.Contiguous(), source_directions.Contiguous(), + target_points.Contiguous(), target_normals.Contiguous(), + correspondence_indices.Contiguous(), output_pose, residual, + inlier_count, dtype, device, R_S_to_V.Contiguous(), + r_v_to_s_in_V.Contiguous(), w_v_in_V.Contiguous(), + v_v_in_V.Contiguous(), period, reject_outliers, + doppler_outlier_threshold, kernel_geometric, kernel_doppler, + lambda_doppler); + } else { + utility::LogError("Unimplemented device."); + } + + utility::LogDebug( + "DopplerPointToPlane Transform: residual {}, inlier_count {}", + residual, inlier_count); + + return output_pose; +} + std::tuple ComputeRtPointToPoint( const core::Tensor &source_points, const core::Tensor &target_points, diff --git a/cpp/open3d/t/pipelines/kernel/Registration.h b/cpp/open3d/t/pipelines/kernel/Registration.h index 60736c0473a..3a375b85c3c 100644 --- a/cpp/open3d/t/pipelines/kernel/Registration.h +++ b/cpp/open3d/t/pipelines/kernel/Registration.h @@ -68,6 +68,62 @@ core::Tensor ComputePoseColoredICP(const core::Tensor &source_positions, const registration::RobustKernel &kernel, const double &lambda_geometric); +/// \brief Computes pose for DopplerICP registration method. +/// +/// \param source_points source point positions of Float32 or Float64 dtype. +/// \param source_dopplers source point Dopplers of same dtype as source point +/// positions. +/// \param source_directions source point direction of same dtype as source +/// point positions. +/// \param target_points target point positions of same dtype as source point +/// positions. +/// \param target_normals target point normals of same dtype as source point +/// positions. +/// \param correspondence_indices Tensor of type Int64 containing indices of +/// corresponding target positions, where the value is the target index and the +/// index of the value itself is the source index. It contains -1 as value at +/// index with no correspondence. +/// \param current_transform The current pose estimate of ICP. +/// \param transform_vehicle_to_sensor The 4x4 extrinsic transformation matrix +/// between the vehicle and the sensor frames. +/// \param iteration current iteration number of the ICP algorithm. +/// \param period Time period (in seconds) between the source and the target +/// point clouds. +/// \param lambda_doppler weight for the Doppler objective. +/// \param reject_dynamic_outliers Whether or not to prune dynamic point +/// outlier correspondences. +/// \param doppler_outlier_threshold Correspondences with Doppler error +/// greater than this threshold are rejected from optimization. +/// \param outlier_rejection_min_iteration Number of iterations of ICP after +/// which outlier rejection is enabled. +/// \param geometric_robust_loss_min_iteration Number of iterations of ICP +/// after which robust loss for geometric term kicks in. +/// \param doppler_robust_loss_min_iteration Number of iterations of ICP +/// after which robust loss for Doppler term kicks in. +/// \param geometric_kernel statistical robust kernel for outlier rejection. +/// \param doppler_kernel statistical robust kernel for outlier rejection. +/// \return Pose [alpha beta gamma, tx, ty, tz], a shape {6} tensor of dtype +/// Float64, where alpha, beta, gamma are the Euler angles in the ZYX order. +core::Tensor ComputePoseDopplerICP( + const core::Tensor &source_points, + const core::Tensor &source_dopplers, + const core::Tensor &source_directions, + const core::Tensor &target_points, + const core::Tensor &target_normals, + const core::Tensor &correspondence_indices, + const core::Tensor ¤t_transform, + const core::Tensor &transform_vehicle_to_sensor, + const std::size_t iteration, + const double period, + const double lambda_doppler, + const bool reject_dynamic_outliers, + const double doppler_outlier_threshold, + const std::size_t outlier_rejection_min_iteration, + const std::size_t geometric_robust_loss_min_iteration, + const std::size_t doppler_robust_loss_min_iteration, + const registration::RobustKernel &geometric_kernel, + const registration::RobustKernel &doppler_kernel); + /// \brief Computes (R) Rotation {3,3} and (t) translation {3,} /// for point to point registration method. /// diff --git a/cpp/open3d/t/pipelines/kernel/RegistrationCPU.cpp b/cpp/open3d/t/pipelines/kernel/RegistrationCPU.cpp index b23280ee92e..d2ea8b584ab 100644 --- a/cpp/open3d/t/pipelines/kernel/RegistrationCPU.cpp +++ b/cpp/open3d/t/pipelines/kernel/RegistrationCPU.cpp @@ -249,6 +249,174 @@ void ComputePoseColoredICPCPU(const core::Tensor &source_points, DecodeAndSolve6x6(global_sum, pose, residual, inlier_count); } +template +static void ComputePoseDopplerICPKernelCPU( + const scalar_t *source_points_ptr, + const scalar_t *source_dopplers_ptr, + const scalar_t *source_directions_ptr, + const scalar_t *target_points_ptr, + const scalar_t *target_normals_ptr, + const int64_t *correspondence_indices, + const scalar_t *R_S_to_V, + const scalar_t *r_v_to_s_in_V, + const scalar_t *v_s_in_S, + const bool reject_dynamic_outliers, + const scalar_t doppler_outlier_threshold, + const scalar_t sqrt_lambda_geometric, + const scalar_t sqrt_lambda_doppler, + const scalar_t sqrt_lambda_doppler_by_dt, + const int n, + scalar_t *global_sum, + funct1_t GetWeightFromRobustKernelFirst, // Geometric kernel + funct2_t GetWeightFromRobustKernelSecond // Doppler kernel +) { + // As, AtA is a symmetric matrix, we only need 21 elements instead of 36. + // Atb is of shape {6,1}. Combining both, A_1x29 is a temp. storage + // with [0:21] elements as AtA, [21:27] elements as Atb, 27th as residual + // and 28th as inlier_count. + std::vector A_1x29(29, 0.0); + +#ifdef _WIN32 + std::vector zeros_29(29, 0.0); + A_1x29 = tbb::parallel_reduce( + tbb::blocked_range(0, n), zeros_29, + [&](tbb::blocked_range r, std::vector A_reduction) { + for (int workload_idx = r.begin(); workload_idx < r.end(); + ++workload_idx) { +#else + scalar_t *A_reduction = A_1x29.data(); +#pragma omp parallel for reduction(+ : A_reduction[:29]) schedule(static) num_threads(utility::EstimateMaxThreads()) + for (int workload_idx = 0; workload_idx < n; ++workload_idx) { +#endif + scalar_t J_G[6] = {0}, J_D[6] = {0}; + scalar_t r_G = 0, r_D = 0; + + bool valid = GetJacobianDopplerICP( + workload_idx, source_points_ptr, + source_dopplers_ptr, source_directions_ptr, + target_points_ptr, target_normals_ptr, + correspondence_indices, R_S_to_V, r_v_to_s_in_V, + v_s_in_S, reject_dynamic_outliers, + doppler_outlier_threshold, sqrt_lambda_geometric, + sqrt_lambda_doppler, sqrt_lambda_doppler_by_dt, J_G, + J_D, r_G, r_D); + + scalar_t w_G = GetWeightFromRobustKernelFirst(r_G); + scalar_t w_D = GetWeightFromRobustKernelSecond(r_D); + + if (valid) { + // Dump J, r into JtJ and Jtr + int i = 0; + for (int j = 0; j < 6; ++j) { + for (int k = 0; k <= j; ++k) { + A_reduction[i] += J_G[j] * w_G * J_G[k] + + J_D[j] * w_D * J_D[k]; + ++i; + } + A_reduction[21 + j] += + J_G[j] * w_G * r_G + J_D[j] * w_D * r_D; + } + A_reduction[27] += r_G * r_G + r_D * r_D; + A_reduction[28] += 1; + } + } +#ifdef _WIN32 + return A_reduction; + }, + // TBB: Defining reduction operation. + [&](std::vector a, std::vector b) { + std::vector result(29); + for (int j = 0; j < 29; ++j) { + result[j] = a[j] + b[j]; + } + return result; + }); +#endif + + for (int i = 0; i < 29; ++i) { + global_sum[i] = A_1x29[i]; + } +} + +template +void PreComputeForDopplerICPKernelCPU(const scalar_t *R_S_to_V, + const scalar_t *r_v_to_s_in_V, + const scalar_t *w_v_in_V, + const scalar_t *v_v_in_V, + scalar_t *v_s_in_S) { + PreComputeForDopplerICP(R_S_to_V, r_v_to_s_in_V, w_v_in_V, v_v_in_V, + v_s_in_S); +} + +void ComputePoseDopplerICPCPU( + const core::Tensor &source_points, + const core::Tensor &source_dopplers, + const core::Tensor &source_directions, + const core::Tensor &target_points, + const core::Tensor &target_normals, + const core::Tensor &correspondence_indices, + core::Tensor &output_pose, + float &residual, + int &inlier_count, + const core::Dtype &dtype, + const core::Device &device, + const core::Tensor &R_S_to_V, + const core::Tensor &r_v_to_s_in_V, + const core::Tensor &w_v_in_V, + const core::Tensor &v_v_in_V, + const double period, + const bool reject_dynamic_outliers, + const double doppler_outlier_threshold, + const registration::RobustKernel &kernel_geometric, + const registration::RobustKernel &kernel_doppler, + const double lambda_doppler) { + const int n = source_points.GetLength(); + + core::Tensor global_sum = core::Tensor::Zeros({29}, dtype, device); + core::Tensor v_s_in_S = core::Tensor::Zeros({3}, dtype, device); + + DISPATCH_FLOAT_DTYPE_TO_TEMPLATE(dtype, [&]() { + const scalar_t sqrt_lambda_geometric = + sqrt(1.0 - static_cast(lambda_doppler)); + const scalar_t sqrt_lambda_doppler = sqrt(lambda_doppler); + const scalar_t sqrt_lambda_doppler_by_dt = + sqrt_lambda_doppler / static_cast(period); + + PreComputeForDopplerICPKernelCPU( + R_S_to_V.GetDataPtr(), + r_v_to_s_in_V.GetDataPtr(), + w_v_in_V.GetDataPtr(), + v_v_in_V.GetDataPtr(), + v_s_in_S.GetDataPtr()); + + DISPATCH_DUAL_ROBUST_KERNEL_FUNCTION( + scalar_t, kernel_geometric.type_, + kernel_geometric.scaling_parameter_, kernel_doppler.type_, + kernel_doppler.scaling_parameter_, [&]() { + kernel::ComputePoseDopplerICPKernelCPU( + source_points.GetDataPtr(), + source_dopplers.GetDataPtr(), + source_directions.GetDataPtr(), + target_points.GetDataPtr(), + target_normals.GetDataPtr(), + correspondence_indices.GetDataPtr(), + R_S_to_V.GetDataPtr(), + r_v_to_s_in_V.GetDataPtr(), + v_s_in_S.GetDataPtr(), + reject_dynamic_outliers, + static_cast(doppler_outlier_threshold), + sqrt_lambda_geometric, sqrt_lambda_doppler, + sqrt_lambda_doppler_by_dt, n, + global_sum.GetDataPtr(), + GetWeightFromRobustKernelFirst, // Geometric kernel + GetWeightFromRobustKernelSecond // Doppler kernel + ); + }); + }); + + DecodeAndSolve6x6(global_sum, output_pose, residual, inlier_count); +} + template static void Get3x3SxyLinearSystem(const scalar_t *source_points_ptr, const scalar_t *target_points_ptr, diff --git a/cpp/open3d/t/pipelines/kernel/RegistrationCUDA.cu b/cpp/open3d/t/pipelines/kernel/RegistrationCUDA.cu index f0b8c2c8bb1..20532dde857 100644 --- a/cpp/open3d/t/pipelines/kernel/RegistrationCUDA.cu +++ b/cpp/open3d/t/pipelines/kernel/RegistrationCUDA.cu @@ -229,6 +229,164 @@ void ComputePoseColoredICPCUDA(const core::Tensor &source_points, DecodeAndSolve6x6(global_sum, pose, residual, inlier_count); } +template +__global__ void ComputePoseDopplerICPKernelCUDA( + const scalar_t *source_points_ptr, + const scalar_t *source_dopplers_ptr, + const scalar_t *source_directions_ptr, + const scalar_t *target_points_ptr, + const scalar_t *target_normals_ptr, + const int64_t *correspondence_indices, + const scalar_t *R_S_to_V, + const scalar_t *r_v_to_s_in_V, + const scalar_t *v_s_in_S, + const bool reject_dynamic_outliers, + const scalar_t doppler_outlier_threshold, + const scalar_t sqrt_lambda_geometric, + const scalar_t sqrt_lambda_doppler, + const scalar_t sqrt_lambda_doppler_by_dt, + const int n, + scalar_t *global_sum, + funct1_t GetWeightFromRobustKernelFirst, // Geometric kernel + funct2_t GetWeightFromRobustKernelSecond // Doppler kernel +) { + typedef utility::MiniVec ReduceVec; + // Create shared memory. + typedef cub::BlockReduce BlockReduce; + __shared__ typename BlockReduce::TempStorage temp_storage; + ReduceVec local_sum(static_cast(0)); + + const int workload_idx = threadIdx.x + blockIdx.x * blockDim.x; + if (workload_idx < n) { + scalar_t J_G[6] = {0}, J_D[6] = {0}; + scalar_t r_G = 0, r_D = 0; + + const bool valid = GetJacobianDopplerICP( + workload_idx, source_points_ptr, source_dopplers_ptr, + source_directions_ptr, target_points_ptr, target_normals_ptr, + correspondence_indices, R_S_to_V, r_v_to_s_in_V, v_s_in_S, + reject_dynamic_outliers, doppler_outlier_threshold, + sqrt_lambda_geometric, sqrt_lambda_doppler, + sqrt_lambda_doppler_by_dt, J_G, J_D, r_G, r_D); + + if (valid) { + const scalar_t w_G = GetWeightFromRobustKernelFirst(r_G); + const scalar_t w_D = GetWeightFromRobustKernelSecond(r_D); + + // Dump J, r into JtJ and Jtr + int i = 0; + for (int j = 0; j < 6; ++j) { + for (int k = 0; k <= j; ++k) { + local_sum[i] += + J_G[j] * w_G * J_G[k] + J_D[j] * w_D * J_D[k]; + ++i; + } + local_sum[21 + j] += J_G[j] * w_G * r_G + J_D[j] * w_D * r_D; + } + local_sum[27] += r_G * r_G + r_D * r_D; + local_sum[28] += 1; + } + } + + // Reduction. + auto result = BlockReduce(temp_storage).Sum(local_sum); + + // Add result to global_sum. + if (threadIdx.x == 0) { +#pragma unroll + for (int i = 0; i < kReduceDim; ++i) { + atomicAdd(&global_sum[i], result[i]); + } + } +} + +template +__global__ void PreComputeForDopplerICPKernelCUDA(const scalar_t *R_S_to_V, + const scalar_t *r_v_to_s_in_V, + const scalar_t *w_v_in_V, + const scalar_t *v_v_in_V, + scalar_t *v_s_in_S) { + PreComputeForDopplerICP(R_S_to_V, r_v_to_s_in_V, w_v_in_V, v_v_in_V, + v_s_in_S); +} + +void ComputePoseDopplerICPCUDA( + const core::Tensor &source_points, + const core::Tensor &source_dopplers, + const core::Tensor &source_directions, + const core::Tensor &target_points, + const core::Tensor &target_normals, + const core::Tensor &correspondence_indices, + core::Tensor &output_pose, + float &residual, + int &inlier_count, + const core::Dtype &dtype, + const core::Device &device, + const core::Tensor &R_S_to_V, + const core::Tensor &r_v_to_s_in_V, + const core::Tensor &w_v_in_V, + const core::Tensor &v_v_in_V, + const double period, + const bool reject_dynamic_outliers, + const double doppler_outlier_threshold, + const registration::RobustKernel &kernel_geometric, + const registration::RobustKernel &kernel_doppler, + const double lambda_doppler) { + core::CUDAScopedDevice scoped_device(source_points.GetDevice()); + int n = source_points.GetLength(); + + core::Tensor global_sum = core::Tensor::Zeros({29}, dtype, device); + const dim3 blocks((n + kThread1DUnit - 1) / kThread1DUnit); + const dim3 threads(kThread1DUnit); + + core::Tensor v_s_in_S = core::Tensor::Zeros({3}, dtype, device); + + DISPATCH_FLOAT_DTYPE_TO_TEMPLATE(dtype, [&]() { + const scalar_t sqrt_lambda_geometric = + sqrt(1.0 - static_cast(lambda_doppler)); + const scalar_t sqrt_lambda_doppler = sqrt(lambda_doppler); + const scalar_t sqrt_lambda_doppler_by_dt = + sqrt_lambda_doppler / static_cast(period); + + PreComputeForDopplerICPKernelCUDA + <<<1, 1, 0, core::cuda::GetStream()>>>( + R_S_to_V.GetDataPtr(), + r_v_to_s_in_V.GetDataPtr(), + w_v_in_V.GetDataPtr(), + v_v_in_V.GetDataPtr(), + v_s_in_S.GetDataPtr()); + + DISPATCH_DUAL_ROBUST_KERNEL_FUNCTION( + scalar_t, kernel_geometric.type_, + kernel_geometric.scaling_parameter_, kernel_doppler.type_, + kernel_doppler.scaling_parameter_, [&]() { + ComputePoseDopplerICPKernelCUDA<<< + blocks, threads, 0, core::cuda::GetStream()>>>( + source_points.GetDataPtr(), + source_dopplers.GetDataPtr(), + source_directions.GetDataPtr(), + target_points.GetDataPtr(), + target_normals.GetDataPtr(), + correspondence_indices.GetDataPtr(), + R_S_to_V.GetDataPtr(), + r_v_to_s_in_V.GetDataPtr(), + v_s_in_S.GetDataPtr(), + reject_dynamic_outliers, + static_cast(doppler_outlier_threshold), + sqrt_lambda_geometric, sqrt_lambda_doppler, + sqrt_lambda_doppler_by_dt, n, + global_sum.GetDataPtr(), + GetWeightFromRobustKernelFirst, // Geometric kernel + GetWeightFromRobustKernelSecond // Doppler kernel + ); + }); + }); + + core::cuda::Synchronize(); + + DecodeAndSolve6x6(global_sum, output_pose, residual, inlier_count); +} + template __global__ void ComputeInformationMatrixKernelCUDA( const scalar_t *target_points_ptr, diff --git a/cpp/open3d/t/pipelines/kernel/RegistrationImpl.h b/cpp/open3d/t/pipelines/kernel/RegistrationImpl.h index effab93558c..9d0309b16c7 100644 --- a/cpp/open3d/t/pipelines/kernel/RegistrationImpl.h +++ b/cpp/open3d/t/pipelines/kernel/RegistrationImpl.h @@ -9,10 +9,18 @@ #pragma once +#include + #include "open3d/core/CUDAUtils.h" #include "open3d/core/Tensor.h" +#include "open3d/core/linalg/kernel/Matrix.h" +#include "open3d/t/pipelines/kernel/TransformationConverter.h" #include "open3d/t/pipelines/registration/RobustKernel.h" +#ifndef __CUDACC__ +using std::abs; +#endif + namespace open3d { namespace t { namespace pipelines { @@ -44,6 +52,29 @@ void ComputePoseColoredICPCPU(const core::Tensor &source_points, const registration::RobustKernel &kernel, const double &lambda_geometric); +void ComputePoseDopplerICPCPU( + const core::Tensor &source_points, + const core::Tensor &source_dopplers, + const core::Tensor &source_directions, + const core::Tensor &target_points, + const core::Tensor &target_normals, + const core::Tensor &correspondence_indices, + core::Tensor &output_pose, + float &residual, + int &inlier_count, + const core::Dtype &dtype, + const core::Device &device, + const core::Tensor &R_S_to_V, + const core::Tensor &r_v_to_s_in_V, + const core::Tensor &w_v_in_V, + const core::Tensor &v_v_in_V, + const double period, + const bool reject_dynamic_outliers, + const double doppler_outlier_threshold, + const registration::RobustKernel &kernel_geometric, + const registration::RobustKernel &kernel_doppler, + const double lambda_doppler); + #ifdef BUILD_CUDA_MODULE void ComputePosePointToPlaneCUDA(const core::Tensor &source_points, const core::Tensor &target_points, @@ -70,6 +101,29 @@ void ComputePoseColoredICPCUDA(const core::Tensor &source_points, const core::Device &device, const registration::RobustKernel &kernel, const double &lambda_geometric); + +void ComputePoseDopplerICPCUDA( + const core::Tensor &source_points, + const core::Tensor &source_dopplers, + const core::Tensor &source_directions, + const core::Tensor &target_points, + const core::Tensor &target_normals, + const core::Tensor &correspondence_indices, + core::Tensor &output_pose, + float &residual, + int &inlier_count, + const core::Dtype &dtype, + const core::Device &device, + const core::Tensor &R_S_to_V, + const core::Tensor &r_v_to_s_in_V, + const core::Tensor &w_v_in_V, + const core::Tensor &v_v_in_V, + const double period, + const bool reject_dynamic_outliers, + const double doppler_outlier_threshold, + const registration::RobustKernel &kernel_geometric, + const registration::RobustKernel &kernel_doppler, + const double lambda_doppler); #endif void ComputeRtPointToPointCPU(const core::Tensor &source_points, @@ -261,6 +315,166 @@ template bool GetJacobianColoredICP(const int64_t workload_idx, double &r_G, double &r_I); +template +OPEN3D_HOST_DEVICE inline void PreComputeForDopplerICP( + const scalar_t *R_S_to_V, + const scalar_t *r_v_to_s_in_V, + const scalar_t *w_v_in_V, + const scalar_t *v_v_in_V, + scalar_t *v_s_in_S) { + // Compute v_s_in_V = v_v_in_V + w_v_in_V.cross(r_v_to_s_in_V). + scalar_t v_s_in_V[3] = {0}; + core::linalg::kernel::cross_3x1(w_v_in_V, r_v_to_s_in_V, v_s_in_V); + v_s_in_V[0] += v_v_in_V[0]; + v_s_in_V[1] += v_v_in_V[1]; + v_s_in_V[2] += v_v_in_V[2]; + + // Compute v_s_in_S = R_S_to_V * v_s_in_V. + core::linalg::kernel::matmul3x3_3x1(R_S_to_V, v_s_in_V, v_s_in_S); +} + +template void PreComputeForDopplerICP(const float *R_S_to_V, + const float *r_v_to_s_in_V, + const float *w_v_in_V, + const float *v_v_in_V, + float *v_s_in_S); + +template void PreComputeForDopplerICP(const double *R_S_to_V, + const double *r_v_to_s_in_V, + const double *w_v_in_V, + const double *v_v_in_V, + double *v_s_in_S); + +template +OPEN3D_HOST_DEVICE inline bool GetJacobianDopplerICP( + const int64_t workload_idx, + const scalar_t *source_points_ptr, + const scalar_t *source_dopplers_ptr, + const scalar_t *source_directions_ptr, + const scalar_t *target_points_ptr, + const scalar_t *target_normals_ptr, + const int64_t *correspondence_indices, + const scalar_t *R_S_to_V, + const scalar_t *r_v_to_s_in_V, + const scalar_t *v_s_in_S, + const bool reject_dynamic_outliers, + const scalar_t doppler_outlier_threshold, + const scalar_t &sqrt_lambda_geometric, + const scalar_t &sqrt_lambda_doppler, + const scalar_t &sqrt_lambda_doppler_by_dt, + scalar_t *J_G, + scalar_t *J_D, + scalar_t &r_G, + scalar_t &r_D) { + if (correspondence_indices[workload_idx] == -1) { + return false; + } + + const int64_t target_idx = 3 * correspondence_indices[workload_idx]; + const int64_t source_idx = 3 * workload_idx; + + const scalar_t &doppler_in_S = source_dopplers_ptr[workload_idx]; + + const scalar_t ds_in_V[3] = {source_directions_ptr[source_idx], + source_directions_ptr[source_idx + 1], + source_directions_ptr[source_idx + 2]}; + + // Compute predicted Doppler velocity (in sensor frame). + scalar_t ds_in_S[3] = {0}; + core::linalg::kernel::matmul3x3_3x1(R_S_to_V, ds_in_V, ds_in_S); + const scalar_t doppler_pred_in_S = + -core::linalg::kernel::dot_3x1(ds_in_S, v_s_in_S); + + // Compute Doppler error. + const double doppler_error = doppler_in_S - doppler_pred_in_S; + + // Dynamic point outlier rejection. + if (reject_dynamic_outliers && + abs(doppler_error) > doppler_outlier_threshold) { + // Jacobian and residual are set to 0 by default. + return true; + } + + // Compute Doppler residual and Jacobian. + scalar_t J_D_w[3] = {0}; + core::linalg::kernel::cross_3x1(ds_in_V, r_v_to_s_in_V, J_D_w); + J_D[0] = sqrt_lambda_doppler_by_dt * J_D_w[0]; + J_D[1] = sqrt_lambda_doppler_by_dt * J_D_w[1]; + J_D[2] = sqrt_lambda_doppler_by_dt * J_D_w[2]; + J_D[3] = sqrt_lambda_doppler_by_dt * -ds_in_V[0]; + J_D[4] = sqrt_lambda_doppler_by_dt * -ds_in_V[1]; + J_D[5] = sqrt_lambda_doppler_by_dt * -ds_in_V[2]; + r_D = sqrt_lambda_doppler * doppler_error; + + const scalar_t ps[3] = {source_points_ptr[source_idx], + source_points_ptr[source_idx + 1], + source_points_ptr[source_idx + 2]}; + + const scalar_t pt[3] = {target_points_ptr[target_idx], + target_points_ptr[target_idx + 1], + target_points_ptr[target_idx + 2]}; + + const scalar_t nt[3] = {target_normals_ptr[target_idx], + target_normals_ptr[target_idx + 1], + target_normals_ptr[target_idx + 2]}; + + // Compute geometric point-to-plane error. + const scalar_t p2p_error = (ps[0] - pt[0]) * nt[0] + + (ps[1] - pt[1]) * nt[1] + + (ps[2] - pt[2]) * nt[2]; + + // Compute geometric point-to-plane residual and Jacobian. + J_G[0] = sqrt_lambda_geometric * (-ps[2] * nt[1] + ps[1] * nt[2]); + J_G[1] = sqrt_lambda_geometric * (ps[2] * nt[0] - ps[0] * nt[2]); + J_G[2] = sqrt_lambda_geometric * (-ps[1] * nt[0] + ps[0] * nt[1]); + J_G[3] = sqrt_lambda_geometric * nt[0]; + J_G[4] = sqrt_lambda_geometric * nt[1]; + J_G[5] = sqrt_lambda_geometric * nt[2]; + r_G = sqrt_lambda_geometric * p2p_error; + + return true; +} + +template bool GetJacobianDopplerICP(const int64_t workload_idx, + const float *source_points_ptr, + const float *source_dopplers_ptr, + const float *source_directions_ptr, + const float *target_points_ptr, + const float *target_normals_ptr, + const int64_t *correspondence_indices, + const float *R_S_to_V, + const float *r_v_to_s_in_V, + const float *v_s_in_S, + const bool reject_dynamic_outliers, + const float doppler_outlier_threshold, + const float &sqrt_lambda_geometric, + const float &sqrt_lambda_doppler, + const float &sqrt_lambda_doppler_by_dt, + float *J_G, + float *J_D, + float &r_G, + float &r_D); + +template bool GetJacobianDopplerICP(const int64_t workload_idx, + const double *source_points_ptr, + const double *source_dopplers_ptr, + const double *source_directions_ptr, + const double *target_points_ptr, + const double *target_normals_ptr, + const int64_t *correspondence_indices, + const double *R_S_to_V, + const double *r_v_to_s_in_V, + const double *v_s_in_S, + const bool reject_dynamic_outliers, + const double doppler_outlier_threshold, + const double &sqrt_lambda_geometric, + const double &sqrt_lambda_doppler, + const double &sqrt_lambda_doppler_by_dt, + double *J_G, + double *J_D, + double &r_G, + double &r_D); + template OPEN3D_HOST_DEVICE inline bool GetInformationJacobians( int64_t workload_idx, diff --git a/cpp/open3d/t/pipelines/kernel/TransformationConverter.cpp b/cpp/open3d/t/pipelines/kernel/TransformationConverter.cpp index 94c381ff694..ec5bac0c9de 100644 --- a/cpp/open3d/t/pipelines/kernel/TransformationConverter.cpp +++ b/cpp/open3d/t/pipelines/kernel/TransformationConverter.cpp @@ -94,6 +94,53 @@ core::Tensor PoseToTransformation(const core::Tensor &pose) { return transformation; } +template +static void TransformationToPoseDevice( + core::Tensor &pose, + const core::Tensor &transformation, + const core::Device::DeviceType &device_type) { + scalar_t *pose_ptr = pose.GetDataPtr(); + const scalar_t *transformation_ptr = transformation.GetDataPtr(); + + if (device_type == core::Device::DeviceType::CPU) { + TransformationToPoseImpl(pose_ptr, transformation_ptr); + } else if (device_type == core::Device::DeviceType::CUDA) { +#ifdef BUILD_CUDA_MODULE + TransformationToPoseCUDA(pose_ptr, transformation_ptr); +#else + utility::LogError("Not compiled with CUDA, but CUDA device is used."); +#endif + } else { + utility::LogError("Unimplemented device."); + } +} + +core::Tensor TransformationToPose(const core::Tensor &transformation) { + core::AssertTensorShape(transformation, {4, 4}); + core::AssertTensorDtypes(transformation, {core::Float32, core::Float64}); + + const core::Device device = transformation.GetDevice(); + const core::Dtype dtype = transformation.GetDtype(); + core::Tensor pose = core::Tensor::Zeros({6}, dtype, device); + pose = pose.Contiguous(); + core::Tensor transformation_ = transformation.Contiguous(); + + DISPATCH_FLOAT_DTYPE_TO_TEMPLATE(dtype, [&]() { + core::Device::DeviceType device_type = device.GetType(); + TransformationToPoseDevice(pose, transformation_, + device_type); + }); + + // Set translation parameters in pose vector. + pose.SetItem(core::TensorKey::Slice(3, 6, 1), + transformation_ + .GetItem({core::TensorKey::Slice(0, 3, 1), + core::TensorKey::Slice(3, 4, 1)}) + .Flatten()); + + return pose; +} + void DecodeAndSolve6x6(const core::Tensor &A_reduction, core::Tensor &delta, float &inlier_residual, diff --git a/cpp/open3d/t/pipelines/kernel/TransformationConverter.cu b/cpp/open3d/t/pipelines/kernel/TransformationConverter.cu index 645e492356a..7eb97abb1b3 100644 --- a/cpp/open3d/t/pipelines/kernel/TransformationConverter.cu +++ b/cpp/open3d/t/pipelines/kernel/TransformationConverter.cu @@ -41,6 +41,32 @@ void PoseToTransformationCUDA(double *transformation_ptr, <<<1, 1, 0, core::cuda::GetStream()>>>(transformation_ptr, X_ptr); } +template +__global__ void TransformationToPoseKernel(scalar_t *X_ptr, + const scalar_t *transformation_ptr) { + TransformationToPoseImpl(X_ptr, transformation_ptr); +} + +template +void TransformationToPoseCUDA(scalar_t *X_ptr, + const scalar_t *transformation_ptr) { + utility::LogError("Unsupported data type."); +} + +template <> +void TransformationToPoseCUDA(float *X_ptr, + const float *transformation_ptr) { + TransformationToPoseKernel + <<<1, 1, 0, core::cuda::GetStream()>>>(X_ptr, transformation_ptr); +} + +template <> +void TransformationToPoseCUDA(double *X_ptr, + const double *transformation_ptr) { + TransformationToPoseKernel + <<<1, 1, 0, core::cuda::GetStream()>>>(X_ptr, transformation_ptr); +} + } // namespace kernel } // namespace pipelines } // namespace t diff --git a/cpp/open3d/t/pipelines/kernel/TransformationConverter.h b/cpp/open3d/t/pipelines/kernel/TransformationConverter.h index 6bc0276ea83..1155f20b4d5 100644 --- a/cpp/open3d/t/pipelines/kernel/TransformationConverter.h +++ b/cpp/open3d/t/pipelines/kernel/TransformationConverter.h @@ -30,6 +30,14 @@ core::Tensor RtToTransformation(const core::Tensor &R, const core::Tensor &t); /// as pose. core::Tensor PoseToTransformation(const core::Tensor &pose); +/// \brief Convert transformation matrix to pose. +/// +/// \param transformation, a tensor of shape {4, 4}, of dtype Float32 +/// \return pose [alpha beta gamma, tx, ty, tz], a shape {6} tensor of dtype and +/// device same as transformation, where alpha, beta, gamma are the Euler angles +/// in the ZYX order. +core::Tensor TransformationToPose(const core::Tensor &transformation); + /// \brief Decodes a 6x6 linear system from a compressed 29x1 tensor. /// \param A_reduction 1x29 tensor storing a linear system, /// (21 for \f$J^T J\f$ matrix, 6 for \f$J^T r\f$, 1 for residual, diff --git a/cpp/open3d/t/pipelines/kernel/TransformationConverterImpl.h b/cpp/open3d/t/pipelines/kernel/TransformationConverterImpl.h index 13c387dae98..86879dbf863 100644 --- a/cpp/open3d/t/pipelines/kernel/TransformationConverterImpl.h +++ b/cpp/open3d/t/pipelines/kernel/TransformationConverterImpl.h @@ -41,6 +41,24 @@ OPEN3D_HOST_DEVICE inline void PoseToTransformationImpl( transformation_ptr[10] = cos(pose_ptr[1]) * cos(pose_ptr[0]); } +/// Shared implementation for TransformationToPose function. +/// Reference method: utility::TransformMatrix4dToVector6d. +template +OPEN3D_HOST_DEVICE inline void TransformationToPoseImpl( + scalar_t *pose_ptr, const scalar_t *transformation_ptr) { + const scalar_t sy = sqrt(transformation_ptr[0] * transformation_ptr[0] + + transformation_ptr[4] * transformation_ptr[4]); + if (!(sy < 1e-6)) { + pose_ptr[0] = atan2(transformation_ptr[9], transformation_ptr[10]); + pose_ptr[1] = atan2(-transformation_ptr[8], sy); + pose_ptr[2] = atan2(transformation_ptr[4], transformation_ptr[0]); + } else { + pose_ptr[0] = atan2(-transformation_ptr[6], transformation_ptr[5]); + pose_ptr[1] = atan2(-transformation_ptr[8], sy); + pose_ptr[2] = 0; + } +} + #ifdef BUILD_CUDA_MODULE /// \brief Helper function for PoseToTransformationCUDA. /// Do not call this independently, as it only sets the transformation part @@ -49,6 +67,14 @@ OPEN3D_HOST_DEVICE inline void PoseToTransformationImpl( template void PoseToTransformationCUDA(scalar_t *transformation_ptr, const scalar_t *pose_ptr); + +/// \brief Helper function for TransformationToPoseCUDA. +/// Do not call this independently, as it only sets the rotation part in the +/// pose, using the Transformation, the rest is set in the parent function +/// TransformationToPose. +template +void TransformationToPoseCUDA(scalar_t *pose_ptr, + const scalar_t *transformation_ptr); #endif } // namespace kernel diff --git a/cpp/open3d/t/pipelines/registration/Registration.cpp b/cpp/open3d/t/pipelines/registration/Registration.cpp index b097b2bf44c..2b7cbb19976 100644 --- a/cpp/open3d/t/pipelines/registration/Registration.cpp +++ b/cpp/open3d/t/pipelines/registration/Registration.cpp @@ -159,6 +159,26 @@ static void AssertInputMultiScaleICP( } } + // Doppler ICP requires Doppler velocities and pre-computed directions for + // source point cloud. + if (estimation.GetTransformationEstimationType() == + TransformationEstimationType::DopplerICP) { + if (!target.HasPointNormals()) { + utility::LogError( + "DopplerICP requires target pointcloud to have normals."); + } + if (!source.HasPointAttr("dopplers")) { + utility::LogError( + "DopplerICP requires source pointcloud to have Doppler " + "velocities."); + } + if (!source.HasPointAttr("directions")) { + utility::LogError( + "DopplerICP requires source pointcloud to have " + "pre-computed direction vectors."); + } + } + if (max_correspondence_distances[0] <= 0.0) { utility::LogError( " Max correspondence distance must be greater than 0, but" @@ -254,11 +274,15 @@ static std::tuple DoSingleScaleICPIterations( int iteration_count = 0; for (iteration_count = 0; iteration_count < criteria.max_iteration_; ++iteration_count) { + // Update the results and find correspondences. result = ComputeRegistrationResult( source.GetPointPositions(), target_nns, max_correspondence_distance, result.transformation_); + // No correspondences. if (result.fitness_ <= std::numeric_limits::min()) { + result.converged_ = false; + result.num_iterations_ = iteration_count; return std::make_tuple(result, prev_iteration_count + iteration_count); } @@ -266,10 +290,11 @@ static std::tuple DoSingleScaleICPIterations( // Computing Transform between source and target, given // correspondences. ComputeTransformation returns {4,4} shaped // Float64 transformation tensor on CPU device. - core::Tensor update = + const core::Tensor update = estimation - .ComputeTransformation(source, target, - result.correspondences_) + .ComputeTransformation( + source, target, result.correspondences_, + result.transformation_, iteration_count) .To(core::Float64); // Multiply the transform to the cumulative transformation (update). @@ -307,6 +332,7 @@ static std::tuple DoSingleScaleICPIterations( criteria.relative_fitness_ && std::abs(prev_inlier_rmse - result.inlier_rmse_) < criteria.relative_rmse_) { + result.converged_ = true; break; } prev_fitness = result.fitness_; @@ -386,11 +412,14 @@ RegistrationResult MultiScaleICP( // No correspondences. if (result.fitness_ <= std::numeric_limits::min()) { + result.converged_ = false; break; } } // ---- Iterating over different resolution scale END -------------------- + result.num_iterations_ = iteration_count; + return result; } diff --git a/cpp/open3d/t/pipelines/registration/Registration.h b/cpp/open3d/t/pipelines/registration/Registration.h index 7164ebe7917..036c126ca43 100644 --- a/cpp/open3d/t/pipelines/registration/Registration.h +++ b/cpp/open3d/t/pipelines/registration/Registration.h @@ -91,6 +91,10 @@ class RegistrationResult { /// For ICP: the overlapping area (# of inlier correspondences / # of points /// in target). Higher is better. double fitness_; + /// Specifies whether the algorithm converged or not. + bool converged_{false}; + /// Number of iterations the algorithm took to converge. + size_t num_iterations_{0}; }; /// \brief Function for evaluating registration between point clouds. @@ -178,7 +182,7 @@ RegistrationResult MultiScaleICP( void(const std::unordered_map &)> &callback_after_iteration = nullptr); -/// \brief Computes `Information Matrix`, from the transformation between source +/// \brief Computes `Information Matrix`, from the transfromation between source /// and target pointcloud. It returns the `Information Matrix` of shape {6, 6}, /// of dtype `Float64` on device `CPU:0`. /// diff --git a/cpp/open3d/t/pipelines/registration/RobustKernelImpl.h b/cpp/open3d/t/pipelines/registration/RobustKernelImpl.h index 134565923f6..95e551a05f7 100644 --- a/cpp/open3d/t/pipelines/registration/RobustKernelImpl.h +++ b/cpp/open3d/t/pipelines/registration/RobustKernelImpl.h @@ -113,3 +113,67 @@ using open3d::t::pipelines::registration::RobustKernelMethod; utility::LogError("Unsupported method."); \ } \ }() + +/// \param scalar_t type: float / double. +/// \param METHOD_1 registration::RobustKernelMethod Loss type. +/// \param scaling_parameter_1 Scaling parameter for loss fine-tuning. +/// \param METHOD_2 registration::RobustKernelMethod Loss type. +/// \param scaling_parameter_2 Scaling parameter for loss fine-tuning. +#define DISPATCH_DUAL_ROBUST_KERNEL_FUNCTION(scalar_t, METHOD_1, \ + scaling_parameter_1, METHOD_2, \ + scaling_parameter_2, ...) \ + [&] { \ + scalar_t scale_1 = static_cast(scaling_parameter_1); \ + scalar_t scale_2 = static_cast(scaling_parameter_2); \ + if (METHOD_1 == RobustKernelMethod::L2Loss && \ + METHOD_2 == RobustKernelMethod::L2Loss) { \ + auto GetWeightFromRobustKernelFirst = \ + [=] OPEN3D_HOST_DEVICE(scalar_t residual) -> scalar_t { \ + return 1.0; \ + }; \ + auto GetWeightFromRobustKernelSecond = \ + [=] OPEN3D_HOST_DEVICE(scalar_t residual) -> scalar_t { \ + return 1.0; \ + }; \ + return __VA_ARGS__(); \ + } else if (METHOD_1 == RobustKernelMethod::L2Loss && \ + METHOD_2 == RobustKernelMethod::TukeyLoss) { \ + auto GetWeightFromRobustKernelFirst = \ + [=] OPEN3D_HOST_DEVICE(scalar_t residual) -> scalar_t { \ + return 1.0; \ + }; \ + auto GetWeightFromRobustKernelSecond = \ + [=] OPEN3D_HOST_DEVICE(scalar_t residual) -> scalar_t { \ + return Square(1.0 - Square(min((scalar_t)1.0, \ + abs(residual) / scale_2))); \ + }; \ + return __VA_ARGS__(); \ + } else if (METHOD_1 == RobustKernelMethod::TukeyLoss && \ + METHOD_2 == RobustKernelMethod::L2Loss) { \ + auto GetWeightFromRobustKernelFirst = \ + [=] OPEN3D_HOST_DEVICE(scalar_t residual) -> scalar_t { \ + return Square(1.0 - Square(min((scalar_t)1.0, \ + abs(residual) / scale_1))); \ + }; \ + auto GetWeightFromRobustKernelSecond = \ + [=] OPEN3D_HOST_DEVICE(scalar_t residual) -> scalar_t { \ + return 1.0; \ + }; \ + return __VA_ARGS__(); \ + } else if (METHOD_1 == RobustKernelMethod::TukeyLoss && \ + METHOD_2 == RobustKernelMethod::TukeyLoss) { \ + auto GetWeightFromRobustKernelFirst = \ + [=] OPEN3D_HOST_DEVICE(scalar_t residual) -> scalar_t { \ + return Square(1.0 - Square(min((scalar_t)1.0, \ + abs(residual) / scale_1))); \ + }; \ + auto GetWeightFromRobustKernelSecond = \ + [=] OPEN3D_HOST_DEVICE(scalar_t residual) -> scalar_t { \ + return Square(1.0 - Square(min((scalar_t)1.0, \ + abs(residual) / scale_2))); \ + }; \ + return __VA_ARGS__(); \ + } else { \ + utility::LogError("Unsupported method."); \ + } \ + }() diff --git a/cpp/open3d/t/pipelines/registration/TransformationEstimation.cpp b/cpp/open3d/t/pipelines/registration/TransformationEstimation.cpp index c17a3104669..5c19333c00a 100644 --- a/cpp/open3d/t/pipelines/registration/TransformationEstimation.cpp +++ b/cpp/open3d/t/pipelines/registration/TransformationEstimation.cpp @@ -70,7 +70,9 @@ double TransformationEstimationPointToPoint::ComputeRMSE( core::Tensor TransformationEstimationPointToPoint::ComputeTransformation( const geometry::PointCloud &source, const geometry::PointCloud &target, - const core::Tensor &correspondences) const { + const core::Tensor &correspondences, + const core::Tensor ¤t_transform, + const std::size_t iteration) const { if (!target.HasPointPositions() || !source.HasPointPositions()) { utility::LogError("Source and/or Target pointcloud is empty."); } @@ -132,7 +134,9 @@ double TransformationEstimationPointToPlane::ComputeRMSE( core::Tensor TransformationEstimationPointToPlane::ComputeTransformation( const geometry::PointCloud &source, const geometry::PointCloud &target, - const core::Tensor &correspondences) const { + const core::Tensor &correspondences, + const core::Tensor ¤t_transform, + const std::size_t iteration) const { if (!target.HasPointPositions() || !source.HasPointPositions()) { utility::LogError("Source and/or Target pointcloud is empty."); } @@ -251,7 +255,9 @@ double TransformationEstimationForColoredICP::ComputeRMSE( core::Tensor TransformationEstimationForColoredICP::ComputeTransformation( const geometry::PointCloud &source, const geometry::PointCloud &target, - const core::Tensor &correspondences) const { + const core::Tensor &correspondences, + const core::Tensor ¤t_transform, + const std::size_t iteration) const { if (!target.HasPointPositions() || !source.HasPointPositions()) { utility::LogError("Source and/or Target pointcloud is empty."); } @@ -298,6 +304,93 @@ core::Tensor TransformationEstimationForColoredICP::ComputeTransformation( return transform; } +double TransformationEstimationForDopplerICP::ComputeRMSE( + const geometry::PointCloud &source, + const geometry::PointCloud &target, + const core::Tensor &correspondences) const { + if (!target.HasPointPositions() || !source.HasPointPositions()) { + utility::LogError("Source and/or Target pointcloud is empty."); + } + if (!target.HasPointNormals()) { + utility::LogError("Target pointcloud missing normals attribute."); + } + + core::AssertTensorDtype(target.GetPointPositions(), + source.GetPointPositions().GetDtype()); + core::AssertTensorDevice(target.GetPointPositions(), source.GetDevice()); + + AssertValidCorrespondences(correspondences, source.GetPointPositions()); + + core::Tensor valid = correspondences.Ne(-1).Reshape({-1}); + core::Tensor neighbour_indices = + correspondences.IndexGet({valid}).Reshape({-1}); + core::Tensor source_points_indexed = + source.GetPointPositions().IndexGet({valid}); + core::Tensor target_points_indexed = + target.GetPointPositions().IndexGet({neighbour_indices}); + core::Tensor target_normals_indexed = + target.GetPointNormals().IndexGet({neighbour_indices}); + + core::Tensor error_t = (source_points_indexed - target_points_indexed) + .Mul_(target_normals_indexed); + error_t.Mul_(error_t); + double error = error_t.Sum({0, 1}).To(core::Float64).Item(); + return std::sqrt(error / + static_cast(neighbour_indices.GetLength())); +} + +core::Tensor TransformationEstimationForDopplerICP::ComputeTransformation( + const geometry::PointCloud &source, + const geometry::PointCloud &target, + const core::Tensor &correspondences, + const core::Tensor ¤t_transform, + const std::size_t iteration) const { + if (!source.HasPointPositions() || !target.HasPointPositions()) { + utility::LogError("Source and/or Target pointcloud is empty."); + } + if (!target.HasPointNormals()) { + utility::LogError("Target pointcloud missing normals attribute."); + } + if (!source.HasPointAttr("dopplers")) { + utility::LogError("Source pointcloud missing dopplers attribute."); + } + if (!source.HasPointAttr("directions")) { + utility::LogError("Source pointcloud missing directions attribute."); + } + + core::AssertTensorDtypes(source.GetPointPositions(), + {core::Float64, core::Float32}); + const core::Dtype dtype = source.GetPointPositions().GetDtype(); + + core::AssertTensorDtype(target.GetPointPositions(), dtype); + core::AssertTensorDtype(target.GetPointNormals(), dtype); + core::AssertTensorDtype(source.GetPointAttr("dopplers"), dtype); + core::AssertTensorDtype(source.GetPointAttr("directions"), dtype); + + core::AssertTensorDevice(target.GetPointPositions(), source.GetDevice()); + + AssertValidCorrespondences(correspondences, source.GetPointPositions()); + + // Get pose {6} of type Float64 from correspondences indexed source + // and target point cloud. + core::Tensor pose = pipelines::kernel::ComputePoseDopplerICP( + source.GetPointPositions(), source.GetPointAttr("dopplers"), + source.GetPointAttr("directions"), target.GetPointPositions(), + target.GetPointNormals(), correspondences, current_transform, + this->transform_vehicle_to_sensor_, iteration, this->period_, + this->lambda_doppler_, this->reject_dynamic_outliers_, + this->doppler_outlier_threshold_, + this->outlier_rejection_min_iteration_, + this->geometric_robust_loss_min_iteration_, + this->doppler_robust_loss_min_iteration_, this->geometric_kernel_, + this->doppler_kernel_); + + // Get transformation {4,4} of type Float64 from pose {6}. + core::Tensor transform = pipelines::kernel::PoseToTransformation(pose); + + return transform; +} + } // namespace registration } // namespace pipelines } // namespace t diff --git a/cpp/open3d/t/pipelines/registration/TransformationEstimation.h b/cpp/open3d/t/pipelines/registration/TransformationEstimation.h index 77f00691d32..acc13f7d726 100644 --- a/cpp/open3d/t/pipelines/registration/TransformationEstimation.h +++ b/cpp/open3d/t/pipelines/registration/TransformationEstimation.h @@ -7,6 +7,7 @@ #pragma once +#include #include #include #include @@ -16,6 +17,7 @@ #include "open3d/t/geometry/PointCloud.h" #include "open3d/t/pipelines/kernel/TransformationConverter.h" #include "open3d/t/pipelines/registration/RobustKernel.h" +#include "open3d/utility/Logging.h" namespace open3d { @@ -27,11 +29,19 @@ class PointCloud; namespace pipelines { namespace registration { +namespace { + +// Minimum time period (sec) between two sequential scans for Doppler ICP. +constexpr double kMinTimePeriod{1e-3}; + +} // namespace + enum class TransformationEstimationType { Unspecified = 0, PointToPoint = 1, PointToPlane = 2, ColoredICP = 3, + DopplerICP = 4, }; /// \class TransformationEstimation @@ -70,12 +80,17 @@ class TransformationEstimation { /// corresponding target points, where the value is the target index and the /// index of the value itself is the source index. It contains -1 as value /// at index with no correspondence. + /// \param current_transform The current pose estimate of ICP. + /// \param iteration The current iteration number of the ICP algorithm. /// \return transformation between source to target, a tensor of shape {4, /// 4}, type Float64 on CPU device. virtual core::Tensor ComputeTransformation( const geometry::PointCloud &source, const geometry::PointCloud &target, - const core::Tensor &correspondences) const = 0; + const core::Tensor &correspondences, + const core::Tensor ¤t_transform = + core::Tensor::Eye(4, core::Float64, core::Device("CPU:0")), + const std::size_t iteration = 0) const = 0; }; /// \class TransformationEstimationPointToPoint @@ -115,12 +130,17 @@ class TransformationEstimationPointToPoint : public TransformationEstimation { /// corresponding target points, where the value is the target index and the /// index of the value itself is the source index. It contains -1 as value /// at index with no correspondence. + /// \param current_transform The current pose estimate of ICP. + /// \param iteration The current iteration number of the ICP algorithm. /// \return transformation between source to target, a tensor of shape {4, /// 4}, type Float64 on CPU device. core::Tensor ComputeTransformation( const geometry::PointCloud &source, const geometry::PointCloud &target, - const core::Tensor &correspondences) const override; + const core::Tensor &correspondences, + const core::Tensor ¤t_transform = + core::Tensor::Eye(4, core::Float64, core::Device("CPU:0")), + const std::size_t iteration = 0) const override; private: const TransformationEstimationType type_ = @@ -174,12 +194,17 @@ class TransformationEstimationPointToPlane : public TransformationEstimation { /// corresponding target points, where the value is the target index and the /// index of the value itself is the source index. It contains -1 as value /// at index with no correspondence. - /// \return transformation between source to target, a tensor - /// of shape {4, 4}, type Float64 on CPU device. + /// \param current_transform The current pose estimate of ICP. + /// \param iteration The current iteration number of the ICP algorithm. + /// \return transformation between source to target, a tensor of shape {4, + /// 4}, type Float64 on CPU device. core::Tensor ComputeTransformation( const geometry::PointCloud &source, const geometry::PointCloud &target, - const core::Tensor &correspondences) const override; + const core::Tensor &correspondences, + const core::Tensor ¤t_transform = + core::Tensor::Eye(4, core::Float64, core::Device("CPU:0")), + const std::size_t iteration = 0) const override; public: /// RobustKernel for outlier rejection. @@ -250,12 +275,17 @@ class TransformationEstimationForColoredICP : public TransformationEstimation { /// corresponding target points, where the value is the target index and the /// index of the value itself is the source index. It contains -1 as value /// at index with no correspondence. + /// \param current_transform The current pose estimate of ICP. + /// \param iteration The current iteration number of the ICP algorithm. /// \return transformation between source to target, a tensor of shape {4, /// 4}, type Float64 on CPU device. core::Tensor ComputeTransformation( const geometry::PointCloud &source, const geometry::PointCloud &target, - const core::Tensor &correspondences) const override; + const core::Tensor &correspondences, + const core::Tensor ¤t_transform = + core::Tensor::Eye(4, core::Float64, core::Device("CPU:0")), + const std::size_t iteration = 0) const override; public: double lambda_geometric_ = 0.968; @@ -267,6 +297,151 @@ class TransformationEstimationForColoredICP : public TransformationEstimation { TransformationEstimationType::ColoredICP; }; +/// \class TransformationEstimationForDopplerICP +/// +/// This is the implementation of the following paper: +/// B. Hexsel, H. Vhavle, Y. Chen, +/// DICP: Doppler Iterative Closest Point Algorithm, RSS 2022. +/// +/// Class to estimate a transformation matrix tensor of shape {4, 4}, dtype +/// Float64, on CPU device for DopplerICP method. +class TransformationEstimationForDopplerICP : public TransformationEstimation { +public: + ~TransformationEstimationForDopplerICP() override{}; + + /// \brief Constructor. + /// + /// \param period Time period (in seconds) between the source and the target + /// point clouds. Default value: 0.1. + /// \param lambda_doppler `λ ∈ [0, 1]` in the overall energy `(1−λ)EG + + /// λED`. Refer the documentation of DopplerICP for more information. + /// \param reject_dynamic_outliers Whether or not to reject dynamic point + /// outlier correspondences. + /// \param doppler_outlier_threshold Correspondences with Doppler error + /// greater than this threshold are rejected from optimization. + /// \param outlier_rejection_min_iteration Number of iterations of ICP after + /// which outlier rejection is enabled. + /// \param geometric_robust_loss_min_iteration Number of iterations of ICP + /// after which robust loss for geometric term kicks in. + /// \param doppler_robust_loss_min_iteration Number of iterations of ICP + /// after which robust loss for Doppler term kicks in. + /// \param geometric_kernel (optional) Any of the implemented statistical + /// robust kernel for outlier rejection for the geometric term. + /// \param doppler_kernel (optional) Any of the implemented statistical + /// robust kernel for outlier rejection for the Doppler term. + /// \param transform_vehicle_to_sensor The 4x4 extrinsic transformation + /// matrix between the vehicle and the sensor frames. Defaults to identity + /// transform. + explicit TransformationEstimationForDopplerICP( + const double period = 0.1, + const double lambda_doppler = 0.01, + const bool reject_dynamic_outliers = false, + const double doppler_outlier_threshold = 2.0, + const std::size_t outlier_rejection_min_iteration = 2, + const std::size_t geometric_robust_loss_min_iteration = 0, + const std::size_t doppler_robust_loss_min_iteration = 2, + const RobustKernel &geometric_kernel = + RobustKernel(RobustKernelMethod::L2Loss, 1.0, 1.0), + const RobustKernel &doppler_kernel = + RobustKernel(RobustKernelMethod::L2Loss, 1.0, 1.0), + const core::Tensor &transform_vehicle_to_sensor = + core::Tensor::Eye(4, core::Float64, core::Device("CPU:0"))) + : period_(period), + lambda_doppler_(lambda_doppler), + reject_dynamic_outliers_(reject_dynamic_outliers), + doppler_outlier_threshold_(doppler_outlier_threshold), + outlier_rejection_min_iteration_(outlier_rejection_min_iteration), + geometric_robust_loss_min_iteration_( + geometric_robust_loss_min_iteration), + doppler_robust_loss_min_iteration_(doppler_robust_loss_min_iteration), + geometric_kernel_(geometric_kernel), + doppler_kernel_(doppler_kernel), + transform_vehicle_to_sensor_(transform_vehicle_to_sensor) { + core::AssertTensorShape(transform_vehicle_to_sensor, {4, 4}); + + if (std::abs(period) < kMinTimePeriod) { + utility::LogError("Time period too small."); + } + + if (lambda_doppler_ < 0 || lambda_doppler_ > 1.0) { + lambda_doppler_ = 0.01; + } + } + + TransformationEstimationType GetTransformationEstimationType() + const override { + return type_; + }; + +public: + /// \brief Computes RMSE (double) for DopplerICP method, between two + /// pointclouds, given correspondences. + /// + /// \param source Source pointcloud. (Float32 or Float64 type). + /// \param target Target pointcloud. (Float32 or Float64 type). It must + /// contain normals, directions, and Doppler of the same shape and dtype + /// as the positions. + /// \param correspondences Tensor of type Int64 containing indices of + /// corresponding target points, where the value is the target index and the + /// index of the value itself is the source index. It contains -1 as value + /// at index with no correspondence. + double ComputeRMSE(const geometry::PointCloud &source, + const geometry::PointCloud &target, + const core::Tensor &correspondences) const override; + + /// \brief Estimates the transformation matrix for DopplerICP method, + /// a tensor of shape {4, 4}, and dtype Float64 on CPU device. + /// + /// \param source Source pointcloud. (Float32 or Float64 type). + /// \param target Target pointcloud. (Float32 or Float64 type). It must + /// contain normals, directions, and Doppler of the same shape and dtype + /// as the positions. + /// \param correspondences Tensor of type Int64 containing indices of + /// corresponding target points, where the value is the target index and the + /// index of the value itself is the source index. It contains -1 as value + /// at index with no correspondence. + /// \param current_transform The current pose estimate of ICP. + /// \param iteration The current iteration number of the ICP algorithm. + /// \return transformation between source to target, a tensor of shape {4, + /// 4}, type Float64 on CPU device. + core::Tensor ComputeTransformation( + const geometry::PointCloud &source, + const geometry::PointCloud &target, + const core::Tensor &correspondences, + const core::Tensor ¤t_transform = + core::Tensor::Eye(4, core::Float64, core::Device("CPU:0")), + const std::size_t iteration = 0) const override; + +public: + /// Time period (in seconds) between the source and the target point clouds. + double period_{0.1}; + /// Factor that weighs the Doppler residual term in DICP objective. + double lambda_doppler_{0.01}; + /// Whether or not to prune dynamic point outlier correspondences. + bool reject_dynamic_outliers_{false}; + /// Correspondences with Doppler error greater than this threshold are + /// rejected from optimization. + double doppler_outlier_threshold_{2.0}; + /// Number of iterations of ICP after which outlier rejection is enabled. + std::size_t outlier_rejection_min_iteration_{2}; + /// Number of iterations of ICP after which robust loss kicks in. + std::size_t geometric_robust_loss_min_iteration_{0}; + std::size_t doppler_robust_loss_min_iteration_{2}; + /// RobustKernel for outlier rejection. + RobustKernel geometric_kernel_ = + RobustKernel(RobustKernelMethod::L2Loss, 1.0, 1.0); + RobustKernel doppler_kernel_ = + RobustKernel(RobustKernelMethod::L2Loss, 1.0, 1.0); + /// The 4x4 extrinsic transformation matrix between the vehicle and the + /// sensor frames. + core::Tensor transform_vehicle_to_sensor_ = + core::Tensor::Eye(4, core::Float64, core::Device("CPU:0")); + +private: + const TransformationEstimationType type_ = + TransformationEstimationType::DopplerICP; +}; + } // namespace registration } // namespace pipelines } // namespace t diff --git a/cpp/pybind/data/dataset.cpp b/cpp/pybind/data/dataset.cpp index b86b40c5464..6526088fc5c 100644 --- a/cpp/pybind/data/dataset.cpp +++ b/cpp/pybind/data/dataset.cpp @@ -167,6 +167,39 @@ void pybind_demo_crop_pointcloud(py::module& m) { "cropped_json_path"); } +void pybind_demo_doppler_icp_sequence(py::module& m) { + // open3d.data.DemoDopplerICPSequence + py::class_, + std::shared_ptr, DownloadDataset> + demo_doppler_icp_sequence( + m, "DemoDopplerICPSequence", + "Data class for `DemoDopplerICPSequence` contains an " + "example sequence of 100 point clouds with Doppler " + "velocity channel and corresponding ground truth poses. " + "The sequence was generated using the CARLA simulator."); + demo_doppler_icp_sequence + .def(py::init(), "data_root"_a = "") + .def_property_readonly( + "paths", &DemoDopplerICPSequence::GetPaths, + "Returns list of the point cloud paths in the sequence.") + .def_property_readonly( + "calibration_path", + &DemoDopplerICPSequence::GetCalibrationPath, + "Path to the calibration metadata file, containing " + "transformation between the vehicle and sensor frames and " + "the time period.") + .def_property_readonly( + "trajectory_path", + &DemoDopplerICPSequence::GetTrajectoryPath, + "Path to the ground truth poses for the entire sequence."); + docstring::ClassMethodDocInject(m, "DemoDopplerICPSequence", "paths"); + docstring::ClassMethodDocInject(m, "DemoDopplerICPSequence", + "calibration_path"); + docstring::ClassMethodDocInject(m, "DemoDopplerICPSequence", + "trajectory_path"); +} + void pybind_demo_feature_matching_point_clouds(py::module& m) { // open3d.data.DemoFeatureMatchingPointClouds py::class_, + TransformationEstimation> + te_dop(m, "TransformationEstimationForDopplerICP", + "Class to estimate a transformation between two point " + "clouds using color information"); + py::detail::bind_default_constructor( + te_dop); + py::detail::bind_copy_functions( + te_dop); + te_dop.def(py::init([](double period, double lambda_doppler, + bool reject_dynamic_outliers, + double doppler_outlier_threshold, + std::size_t outlier_rejection_min_iteration, + std::size_t geometric_robust_loss_min_iteration, + std::size_t doppler_robust_loss_min_iteration, + RobustKernel &geometric_kernel, + RobustKernel &doppler_kernel, + core::Tensor &transform_vehicle_to_sensor) { + return new TransformationEstimationForDopplerICP( + period, lambda_doppler, reject_dynamic_outliers, + doppler_outlier_threshold, + outlier_rejection_min_iteration, + geometric_robust_loss_min_iteration, + doppler_robust_loss_min_iteration, geometric_kernel, + doppler_kernel, transform_vehicle_to_sensor); + }), + "period"_a, "lambda_doppler"_a, "reject_dynamic_outliers"_a, + "doppler_outlier_threshold"_a, + "outlier_rejection_min_iteration"_a, + "geometric_robust_loss_min_iteration"_a, + "doppler_robust_loss_min_iteration"_a, "goemetric_kernel"_a, + "doppler_kernel"_a, "transform_vehicle_to_sensor"_a) + .def(py::init([](const double lambda_doppler) { + return new TransformationEstimationForDopplerICP( + lambda_doppler); + }), + "lambda_doppler"_a) + .def("compute_transformation", + py::overload_cast( + &TransformationEstimationForDopplerICP:: + ComputeTransformation, + py::const_), + "Compute transformation from source to target point cloud " + "given correspondences") + .def("__repr__", + [](const TransformationEstimationForDopplerICP &te) { + return std::string( + "TransformationEstimationForDopplerICP " + "with lambda_doppler: ") + + std::to_string(te.lambda_doppler_); + }) + .def_readwrite("period", + &TransformationEstimationForDopplerICP::period_, + "Time period (in seconds) between the source and " + "the target point clouds.") + .def_readwrite( + "lambda_doppler", + &TransformationEstimationForDopplerICP::lambda_doppler_, + "`λ ∈ [0, 1]` in the overall energy `(1−λ)EG + λED`. Refer " + "the documentation of DopplerICP for more information.") + .def_readwrite("reject_dynamic_outliers", + &TransformationEstimationForDopplerICP:: + reject_dynamic_outliers_, + "Whether or not to reject dynamic point outlier " + "correspondences.") + .def_readwrite("doppler_outlier_threshold", + &TransformationEstimationForDopplerICP:: + doppler_outlier_threshold_, + "Correspondences with Doppler error greater than " + "this threshold are rejected from optimization.") + .def_readwrite("outlier_rejection_min_iteration", + &TransformationEstimationForDopplerICP:: + outlier_rejection_min_iteration_, + "Number of iterations of ICP after which outlier " + "rejection is enabled.") + .def_readwrite("geometric_robust_loss_min_iteration", + &TransformationEstimationForDopplerICP:: + geometric_robust_loss_min_iteration_, + "Minimum iterations after which Robust Kernel is " + "used for the Geometric error") + .def_readwrite("doppler_robust_loss_min_iteration", + &TransformationEstimationForDopplerICP:: + doppler_robust_loss_min_iteration_, + "Minimum iterations after which Robust Kernel is " + "used for the Doppler error") + .def_readwrite( + "geometric_kernel", + &TransformationEstimationForDopplerICP::geometric_kernel_, + "Robust Kernel used in the Geometric Error Optimization") + .def_readwrite( + "doppler_kernel", + &TransformationEstimationForDopplerICP::doppler_kernel_, + "Robust Kernel used in the Doppler Error Optimization") + .def_readwrite("transform_vehicle_to_sensor", + &TransformationEstimationForDopplerICP:: + transform_vehicle_to_sensor_, + "The 4x4 extrinsic transformation matrix between " + "the vehicle and the sensor frames."); } // Registration functions have similar arguments, sharing arg docstrings. diff --git a/cpp/tests/t/io/PointCloudIO.cpp b/cpp/tests/t/io/PointCloudIO.cpp index bcb1ea3a659..46888f05416 100644 --- a/cpp/tests/t/io/PointCloudIO.cpp +++ b/cpp/tests/t/io/PointCloudIO.cpp @@ -54,12 +54,14 @@ const std::unordered_map pc_data_1{ {"colors", {{0.5, 0.3, 0.2, 0, 1, 0.5, 0.6, 0, 1, 1, 0.5, 0.7, 0.3, 0, 0.5}, {5, 3}}}, + {"dopplers", {{1, 0.9, 0.8, 0.7, 0.6}, {5, 1}}}, }; // Bad data. const std::unordered_map pc_data_bad{ {"positions", {{0, 0, 0, 1, 0, 0}, {2, 3}}}, {"intensities", {{0}, {1, 1}}}, + {"dopplers", {{0}, {1, 1}}}, }; const std::vector pcArgs({ @@ -75,6 +77,10 @@ const std::vector pcArgs({ IsAscii::ASCII, Compressed::UNCOMPRESSED, {{"positions", 1e-5}, {"intensities", 1e-5}, {"colors", 1e-5}}}, // 2 + {"test.xyzd", + IsAscii::ASCII, + Compressed::UNCOMPRESSED, + {{"positions", 1e-5}, {"dopplers", 1e-5}}}, // 0 }); class ReadWriteTPC : public testing::TestWithParam {}; diff --git a/cpp/tests/t/pipelines/registration/Registration.cpp b/cpp/tests/t/pipelines/registration/Registration.cpp index d0f94966bc1..75fd20a530d 100644 --- a/cpp/tests/t/pipelines/registration/Registration.cpp +++ b/cpp/tests/t/pipelines/registration/Registration.cpp @@ -283,6 +283,119 @@ TEST_P(RegistrationPermuteDevices, ICPColored) { } } +core::Tensor ComputeDirectionVectors(const core::Tensor& positions) { + core::Tensor directions = core::Tensor::Empty( + positions.GetShape(), positions.GetDtype(), positions.GetDevice()); + for (int64_t i = 0; i < positions.GetLength(); ++i) { + // Compute the norm of the position vector. + core::Tensor norm = (positions[i][0] * positions[i][0] + + positions[i][1] * positions[i][1] + + positions[i][2] * positions[i][2]) + .Sqrt(); + + // If the norm is zero, set the direction vector to zero. + if (norm.Item() == 0.0) { + directions[i].Fill(0.0); + } else { + // Otherwise, compute the direction vector by dividing the position + // vector by its norm. + directions[i] = positions[i] / norm; + } + } + return directions; +} + +static std::tuple +GetDopplerICPRegistrationTestData(core::Dtype& dtype, core::Device& device) { + t::geometry::PointCloud source_tpcd, target_tpcd; + data::DemoDopplerICPSequence demo_sequence; + t::io::ReadPointCloud(demo_sequence.GetPath(0), source_tpcd); + t::io::ReadPointCloud(demo_sequence.GetPath(1), target_tpcd); + + source_tpcd.SetPointAttr( + "directions", + ComputeDirectionVectors(source_tpcd.GetPointPositions())); + + source_tpcd = source_tpcd.To(device).UniformDownSample(5); + target_tpcd = target_tpcd.To(device).UniformDownSample(5); + + Eigen::Matrix4d calibration{Eigen::Matrix4d::Identity()}; + double period{0.0}; + demo_sequence.GetCalibration(calibration, period); + + // Calibration transformation input. + const core::Tensor calibration_t = + core::eigen_converter::EigenMatrixToTensor(calibration) + .To(device, dtype); + + // Get the ground truth pose for the pair<0, 1> (on CPU:0). + auto trajectory = demo_sequence.GetTrajectory(); + const core::Tensor pose_t = + core::eigen_converter::EigenMatrixToTensor(trajectory[1].second); + + const double max_correspondence_dist = 0.3; + const double normals_search_radius = 10.0; + const int normals_max_neighbors = 30; + + target_tpcd.EstimateNormals(normals_search_radius, normals_max_neighbors); + + return std::make_tuple(source_tpcd, target_tpcd, calibration_t, pose_t, + period, max_correspondence_dist); +} + +TEST_P(RegistrationPermuteDevices, ICPDoppler) { + core::Device device = GetParam(); + + for (auto dtype : {core::Float32, core::Float64}) { + // Get data and parameters. + t::geometry::PointCloud source_tpcd, target_tpcd; + // Calibration transformation input. + core::Tensor calibration_t; + // Ground truth pose. + core::Tensor pose_t; + // Time period between each point cloud scan. + double period{0.0}; + // Search radius. + double max_correspondence_dist{0.0}; + std::tie(source_tpcd, target_tpcd, calibration_t, pose_t, period, + max_correspondence_dist) = + GetDopplerICPRegistrationTestData(dtype, device); + + const double relative_fitness = 1e-6; + const double relative_rmse = 1e-6; + const int max_iterations = 20; + + t_reg::TransformationEstimationForDopplerICP estimation_dicp; + estimation_dicp.period_ = period; + estimation_dicp.transform_vehicle_to_sensor_ = calibration_t; + + // DopplerICP - Tensor. + t_reg::RegistrationResult reg_doppler_t = t_reg::ICP( + source_tpcd, target_tpcd, max_correspondence_dist, + core::Tensor::Eye(4, dtype, device), estimation_dicp, + t_reg::ICPConvergenceCriteria(relative_fitness, relative_rmse, + max_iterations), + -1.0); + + core::Tensor estimated_pose = + t::pipelines::kernel::TransformationToPose( + reg_doppler_t.transformation_.Inverse()); + core::Tensor expected_pose = + t::pipelines::kernel::TransformationToPose(pose_t); + + const double pose_diff = + (expected_pose - estimated_pose).Abs().Sum({0}).Item(); + + EXPECT_NEAR(reg_doppler_t.fitness_ - 0.9, 0.0, 0.05); + EXPECT_NEAR(pose_diff - 0.017, 0.0, 0.005); + } +} + TEST_P(RegistrationPermuteDevices, RobustKernel) { double scaling_parameter = 1.0; double shape_parameter = 1.0; diff --git a/examples/cpp/CMakeLists.txt b/examples/cpp/CMakeLists.txt index e09c0edaf78..f98562acdd5 100644 --- a/examples/cpp/CMakeLists.txt +++ b/examples/cpp/CMakeLists.txt @@ -53,6 +53,7 @@ open3d_add_example(PoseGraph) open3d_add_example(ProgramOptions) open3d_add_example(GeneralizedICP) open3d_add_example(RegistrationColoredICP) +open3d_add_example(RegistrationDopplerICP) open3d_add_example(RegistrationRANSAC) open3d_add_example(RegistrationFGR) open3d_add_example(RGBDOdometry) diff --git a/examples/cpp/RegistrationDopplerICP.cpp b/examples/cpp/RegistrationDopplerICP.cpp new file mode 100644 index 00000000000..331bd8dfd56 --- /dev/null +++ b/examples/cpp/RegistrationDopplerICP.cpp @@ -0,0 +1,190 @@ +// ---------------------------------------------------------------------------- +// - Open3D: www.open3d.org - +// ---------------------------------------------------------------------------- +// Copyright (c) 2018-2023 www.open3d.org +// SPDX-License-Identifier: MIT +// ---------------------------------------------------------------------------- + +// Program that runs the tensor version of Doppler ICP registration. +// +// A sample sequence of Doppler ICP point clouds (in XYZD format) is provided in +// open3d.data.DemoDopplerICPSequence. +// +// This is the implementation of the following paper: +// B. Hexsel, H. Vhavle, Y. Chen, +// DICP: Doppler Iterative Closest Point Algorithm, RSS 2022. + +#include +#include +#include + +#include "open3d/Open3D.h" + +using namespace open3d; + +namespace { + +// Parameters for Doppler ICP registration. +constexpr double kLambdaDoppler{0.01}; +constexpr bool kRejectDynamicOutliers{false}; +constexpr double kDopplerOutlierThreshold{2.0}; +constexpr std::size_t kOutlierRejectionMinIteration{2}; +constexpr std::size_t kGeometricRobustLossMinIteration{0}; +constexpr std::size_t kDopplerRobustLossMinIteration{2}; +constexpr double kTukeyLossScale{0.5}; +constexpr double kNormalsSearchRadius{10.0}; +constexpr int kNormalsMaxNeighbors{30}; +constexpr double kMaxCorrespondenceDist{0.3}; +constexpr std::size_t kUniformDownsampleFactor{2}; +constexpr double kFitnessEpsilon{1e-6}; +constexpr std::size_t kMaxIters{200}; + +} // namespace + +void VisualizeRegistration(const geometry::PointCloud &source, + const geometry::PointCloud &target, + const Eigen::Matrix4d &Transformation) { + std::shared_ptr source_transformed_ptr( + new geometry::PointCloud); + std::shared_ptr target_ptr(new geometry::PointCloud); + *source_transformed_ptr = source; + *target_ptr = target; + source_transformed_ptr->Transform(Transformation); + visualization::DrawGeometries({source_transformed_ptr, target_ptr}, + "Registration result"); +} + +core::Tensor ComputeDirectionVectors(const core::Tensor &positions) { + utility::LogDebug("ComputeDirectionVectors"); + + core::Tensor directions = core::Tensor::Empty( + positions.GetShape(), positions.GetDtype(), positions.GetDevice()); + + for (int64_t i = 0; i < positions.GetLength(); ++i) { + // Compute the norm of the position vector. + core::Tensor norm = (positions[i][0] * positions[i][0] + + positions[i][1] * positions[i][1] + + positions[i][2] * positions[i][2]) + .Sqrt(); + + // If the norm is zero, set the direction vector to zero. + if (norm.Item() == 0.0) { + directions[i].Fill(0.0); + } else { + // Otherwise, compute the direction vector by dividing the position + // vector by its norm. + directions[i] = positions[i] / norm; + } + } + + return directions; +} + +void PrintHelp() { + PrintOpen3DVersion(); + // clang-format off + utility::LogInfo("Usage:"); + utility::LogInfo(" > RegistrationDopplerICP source_idx target_idx [--visualize --cuda]"); + // clang-format on + utility::LogInfo(""); +} + +int main(int argc, char *argv[]) { + utility::SetVerbosityLevel(utility::VerbosityLevel::Debug); + + if (argc < 3 || + utility::ProgramOptionExistsAny(argc, argv, {"-h", "--help"})) { + PrintHelp(); + return 1; + } + + bool visualize = false; + if (utility::ProgramOptionExists(argc, argv, "--visualize")) { + visualize = true; + } + + // Device. + core::Device device("CPU:0"); + if (utility::ProgramOptionExistsAny(argc, argv, {"--cuda"})) { + device = core::Device("CUDA:0"); + } + utility::LogInfo("Selected device: {}", device.ToString()); + + data::DemoDopplerICPSequence demo_sequence; + + t::geometry::PointCloud source; + t::geometry::PointCloud target; + + // Read point clouds, t::io::ReadPointCloud copies the pointcloud to CPU. + t::io::ReadPointCloud(demo_sequence.GetPath(std::stoi(argv[1])), source, + {"auto", false, false, true}); + t::io::ReadPointCloud(demo_sequence.GetPath(std::stoi(argv[2])), target, + {"auto", false, false, true}); + + // Set direction vectors. + source.SetPointAttr("directions", + ComputeDirectionVectors(source.GetPointPositions())); + + source = source.To(device).UniformDownSample(kUniformDownsampleFactor); + target = target.To(device).UniformDownSample(kUniformDownsampleFactor); + + target.EstimateNormals(kNormalsSearchRadius, kNormalsMaxNeighbors); + + Eigen::Matrix4d calibration{Eigen::Matrix4d::Identity()}; + double period{0.0}; + demo_sequence.GetCalibration(calibration, period); + + // Vehicle to sensor frame calibration (T_V_S). + const core::Tensor calibration_vehicle_to_sensor = + core::eigen_converter::EigenMatrixToTensor(calibration).To(device); + + // Initial transform from source to target, to initialize ICP. + core::Tensor initial_transform = + core::Tensor::Eye(4, core::Dtype::Float64, device); + + auto kernel = t::pipelines::registration::RobustKernel( + t::pipelines::registration::RobustKernelMethod::TukeyLoss, + kTukeyLossScale); + + t::pipelines::registration::RegistrationResult result = + t::pipelines::registration::ICP( + source, target, kMaxCorrespondenceDist, initial_transform, + t::pipelines::registration:: + TransformationEstimationForDopplerICP( + period, kLambdaDoppler, + kRejectDynamicOutliers, + kDopplerOutlierThreshold, + kOutlierRejectionMinIteration, + kGeometricRobustLossMinIteration, + kDopplerRobustLossMinIteration, kernel, + kernel, calibration_vehicle_to_sensor), + t::pipelines::registration::ICPConvergenceCriteria( + kFitnessEpsilon, kFitnessEpsilon, kMaxIters)); + + if (visualize) { + VisualizeRegistration(source.ToLegacy(), target.ToLegacy(), + core::eigen_converter::TensorToEigenMatrixXd( + result.transformation_)); + } + + // Get the ground truth pose. + auto trajectory = demo_sequence.GetTrajectory(); + const core::Tensor pose_source = core::eigen_converter::EigenMatrixToTensor( + trajectory[std::stoi(argv[1])].second); + const core::Tensor pose_target = core::eigen_converter::EigenMatrixToTensor( + trajectory[std::stoi(argv[2])].second); + const core::Tensor target_to_source = + pose_target.Inverse().Matmul(pose_source); + + utility::LogInfo( + "Estimated pose [rx ry rz tx ty tz]: \n{}", + t::pipelines::kernel::TransformationToPose(result.transformation_) + .ToString(false)); + + utility::LogInfo( + "Ground truth pose [rx ry rz tx ty tz]: \n{}", + t::pipelines::kernel::TransformationToPose(target_to_source) + .ToString(false)); + + return EXIT_SUCCESS; +} diff --git a/examples/python/pipelines/doppler_icp_registration.py b/examples/python/pipelines/doppler_icp_registration.py new file mode 100644 index 00000000000..5fface38e6e --- /dev/null +++ b/examples/python/pipelines/doppler_icp_registration.py @@ -0,0 +1,235 @@ +# ---------------------------------------------------------------------------- +# - Open3D: www.open3d.org - +# ---------------------------------------------------------------------------- +# Copyright (c) 2018-2023 www.open3d.org +# SPDX-License-Identifier: MIT +# ---------------------------------------------------------------------------- +"""Example script to run Doppler ICP point cloud registration. + +This script runs Doppler ICP and point-to-plane ICP on DemoDopplerICPSequence. + +This is the implementation of the following paper: +B. Hexsel, H. Vhavle, Y. Chen, +DICP: Doppler Iterative Closest Point Algorithm, RSS 2022. + +Usage: +python doppler_icp_registration.py [-h] \ + --source SOURCE --target TARGET [--device {cpu,cuda}] +""" + +import argparse +import json +import os + +import numpy as np +import open3d as o3d +import open3d.t.pipelines.registration as o3d_reg +from pyquaternion import Quaternion + + +def translation_quaternion_to_transform(translation, + quaternion, + inverse=False, + quat_xyzw=False): + """Converts translation and WXYZ quaternion to a transformation matrix. + + Args: + translation: (3,) ndarray representing the translation vector. + quaternion: (4,) ndarray representing the quaternion. + inverse: If True, returns the inverse transformation. + quat_xyzw: If True, this indicates that quaternion is in XYZW format. + + Returns: + (4, 4) ndarray representing the transformation matrix. + """ + if quat_xyzw: + quaternion = np.roll(quaternion, 1) + transform = Quaternion(quaternion).transformation_matrix # [w, x, y, z] + transform[:3, -1] = translation # [x, y, z] + return np.linalg.inv(transform) if inverse else transform + + +def load_tum_file(filename): + """Loads poses in TUM RGBD format: [timestamp, x, y, z, qx, qy, qz, qw]. + + Args: + filename (string): Path to the TUM poses file. + + Returns: + A tuple containing an array of 4x4 poses and timestamps. + """ + # Load the TUM text file. + data = np.loadtxt(filename, delimiter=' ') + print('Loaded %d poses from %s (%.2f secs)' % + (len(data), os.path.basename(filename), data[-1][0] - data[0][0])) + + # Parse timestamps and poses. + timestamps = data[:, 0] + poses = np.array([ + translation_quaternion_to_transform(tq[:3], tq[3:], quat_xyzw=True) + for tq in data[:, 1:] + ]) + return poses, timestamps + + +def get_calibration(demo_sequence): + """Returns the vehicle to sensor calibration transformation and the time + period (in secs) between sequential point cloud scans. + + Args: + demo_sequence (DemoDopplerICPSequence): Doppler ICP dataset. + + Returns: + A tuple of 4x4 array representing the transform, and the period. + """ + with open(demo_sequence.calibration_path) as f: + data = json.load(f) + + transform_vehicle_to_sensor = np.array( + data['transform_vehicle_to_sensor']).reshape(4, 4) + period = data['period'] + + return transform_vehicle_to_sensor, period + + +def get_trajectory(demo_sequence): + """Returns the ground truth trajectory of the dataset. + + Args: + demo_sequence (DemoDopplerICPSequence): Doppler ICP dataset. + + Returns: + An array of 4x4 poses for this sequence. + """ + return load_tum_file(demo_sequence.trajectory_path)[0] + + +def get_ground_truth_pose(demo_sequence, source_idx, target_idx): + """Returns the ground truth poses from the dataset. + + Args: + demo_sequence (DemoDopplerICPSequence): Doppler ICP dataset. + source_idx (int): Index of the source point cloud pose. + target_idx (int): Index of the target point cloud pose. + + Returns: + 4x4 array representing the transformation between target and source. + """ + poses = get_trajectory(demo_sequence) + return np.linalg.inv(poses[target_idx]) @ poses[source_idx] + + +def run_doppler_icp(args): + """Runs Doppler ICP on a given pair of point clouds. + + Args: + args: Command line arguments. + """ + # Setup data type and device. + dtype = o3d.core.float32 + device = o3d.core.Device('CUDA:0' if args.device == 'cuda' else 'CPU:0') + + # Load the point clouds. + demo_sequence = o3d.data.DemoDopplerICPSequence() + source = o3d.t.io.read_point_cloud(demo_sequence.paths[args.source]) + target = o3d.t.io.read_point_cloud(demo_sequence.paths[args.target]) + + # Load the calibration parameters. + transform_vehicle_to_sensor, period = get_calibration(demo_sequence) + + # Downsample the pointcloud. + source_in_S = source.uniform_down_sample(5) + target_in_S = target.uniform_down_sample(5) + + # Transform the Open3D point cloud from sensor to vehicle frame. + source_in_V = source_in_S.to(device).transform(transform_vehicle_to_sensor) + target_in_V = target_in_S.to(device).transform(transform_vehicle_to_sensor) + + # Move tensor to device. + init_transform = o3d.core.Tensor(np.eye(4), device=device) + transform_vehicle_to_sensor = o3d.core.Tensor(transform_vehicle_to_sensor, + device=device) + + # Compute normals for target. + target_in_V.estimate_normals(radius=10.0, max_nn=30) + + # Compute direction vectors on source point cloud frame in sensor frame. + directions = source_in_S.point.positions.numpy() + norms = np.tile(np.linalg.norm(directions, axis=1), (3, 1)).T + directions = directions / norms + source_in_V.point['directions'] = o3d.core.Tensor(directions, dtype, device) + + # Setup robust kernels. + kernel = o3d_reg.robust_kernel.RobustKernel(o3d_reg.robust_kernel.TukeyLoss, + scaling_parameter=0.5) + + # Setup convergence criteria. + criteria = o3d_reg.ICPConvergenceCriteria(relative_fitness=1e-6, + relative_rmse=1e-6, + max_iteration=200) + + # Setup transformation estimator. + estimator_p2l = o3d_reg.TransformationEstimationPointToPlane(kernel) + estimator_dicp = o3d_reg.TransformationEstimationForDopplerICP( + period=period * (args.target - args.source), + lambda_doppler=0.01, + reject_dynamic_outliers=False, + doppler_outlier_threshold=2.0, + outlier_rejection_min_iteration=2, + geometric_robust_loss_min_iteration=0, + doppler_robust_loss_min_iteration=2, + goemetric_kernel=kernel, + doppler_kernel=kernel, + transform_vehicle_to_sensor=transform_vehicle_to_sensor) + + # Run Doppler ICP and point-to-plane ICP registration for comparison. + max_neighbor_distance = 0.3 + results = [ + o3d_reg.icp(source_in_V, target_in_V, max_neighbor_distance, + init_transform, estimator, criteria) + for estimator in [estimator_p2l, estimator_dicp] + ] + + # Display the poses. + np.set_printoptions(suppress=True, precision=4) + print('Estimated pose from Point-to-Plane ICP [%s iterations]:' % + results[0].num_iterations) + print(results[0].transformation.numpy()) + + print('\nEstimated pose from Doppler ICP [%s iterations]:' % + results[1].num_iterations) + print(results[1].transformation.numpy()) + + print('\nGround truth pose:') + print(get_ground_truth_pose(demo_sequence, args.source, args.target)) + + +def parse_args(): + """Parses the command line arguments. + + Returns: + The parsed command line arguments. + """ + parser = argparse.ArgumentParser() + parser.add_argument('--source', + '-s', + type=int, + required=True, + help='Source point cloud index') + parser.add_argument('--target', + '-t', + type=int, + required=True, + help='Target point cloud index') + parser.add_argument('--device', + '-d', + default='cpu', + help='Device backend for the tensor', + choices=['cpu', 'cuda']) + + return parser.parse_args() + + +if __name__ == '__main__': + args = parse_args() + run_doppler_icp(args)