From 7fc0f740f8b459627561ce91c9bff40af47a9285 Mon Sep 17 00:00:00 2001 From: Ariel Lowenstein Date: Wed, 23 Oct 2019 13:44:19 +0300 Subject: [PATCH 01/31] DSO-13626 Sensor refactoring - Add Synthetic Sensor class. - Replace unpackers with processing blocks. - Add composite processing blocks. --- include/librealsense2/h/rs_frame.h | 15 + include/librealsense2/h/rs_sensor.h | 5 + include/librealsense2/hpp/rs_processing.hpp | 25 +- src/algo.cpp | 10 +- src/algo.h | 11 +- src/context.cpp | 67 +- src/core/advanced_mode.h | 18 +- src/core/processing.h | 6 + src/core/streaming.h | 9 +- src/device.cpp | 4 +- src/device.h | 2 +- src/ds5/advanced_mode/advanced_mode.cpp | 47 +- src/ds5/ds5-active.cpp | 2 +- src/ds5/ds5-color.cpp | 83 +- src/ds5/ds5-color.h | 15 +- src/ds5/ds5-device.cpp | 224 ++-- src/ds5/ds5-device.h | 14 +- src/ds5/ds5-motion.cpp | 231 ++-- src/ds5/ds5-motion.h | 9 +- src/ds5/ds5-nonmonochrome.cpp | 22 +- src/ds5/ds5-options.cpp | 42 +- src/ds5/ds5-options.h | 12 +- src/ds5/ds5-timestamp.cpp | 83 +- src/ds5/ds5-timestamp.h | 22 +- src/frame-archive.h | 3 +- src/gl/rs-gl.cpp | 4 +- src/global_timestamp_reader.cpp | 14 +- src/global_timestamp_reader.h | 6 +- src/image.cpp | 1070 +--------------- src/image.h | 62 +- src/ivcam/sr300.cpp | 196 ++- src/ivcam/sr300.h | 188 ++- src/l500/l500-color.cpp | 49 +- src/l500/l500-color.h | 19 +- src/l500/l500-depth.cpp | 63 +- src/l500/l500-depth.h | 12 +- src/l500/l500-device.cpp | 111 +- src/l500/l500-device.h | 10 +- src/l500/l500-motion.cpp | 75 +- src/l500/l500-motion.h | 2 +- src/l500/l500-private.h | 26 +- src/media/record/record_device.cpp | 4 +- src/pipeline/config.cpp | 2 +- src/pipeline/resolver.h | 2 +- src/proc/CMakeLists.txt | 18 +- src/proc/auto-exposure-processor.cpp | 31 + src/proc/auto-exposure-processor.h | 27 + src/proc/color-formats-converter.cpp | 697 +++++++++++ src/proc/color-formats-converter.h | 64 + src/proc/colorizer.cpp | 2 +- src/proc/depth-formats-converter.cpp | 139 +++ src/proc/depth-formats-converter.h | 45 + src/proc/identity-processing-block.cpp | 17 + src/proc/identity-processing-block.h | 16 + src/proc/motion-transform.cpp | 119 ++ src/proc/motion-transform.h | 47 + src/proc/processing-blocks-factory.cpp | 72 ++ src/proc/processing-blocks-factory.h | 28 + src/proc/rotation-transform.cpp | 162 +++ src/proc/rotation-transform.h | 50 + src/proc/synthetic-stream.cpp | 231 +++- src/proc/synthetic-stream.h | 111 ++ src/proc/y12i-to-y16y16.cpp | 35 + src/proc/y12i-to-y16y16.h | 21 + src/proc/y8i-to-y8y8.cpp | 37 + src/proc/y8i-to-y8y8.h | 19 + src/proc/yuy2rgb.cpp | 49 - src/proc/yuy2rgb.h | 29 - src/rs.cpp | 19 +- src/sensor.cpp | 1240 ++++++++++++------- src/sensor.h | 174 ++- src/stream.h | 35 +- src/types.cpp | 8 +- src/types.h | 92 +- unit-tests/unit-tests-live.cpp | 80 ++ 75 files changed, 4119 insertions(+), 2491 deletions(-) create mode 100644 src/proc/auto-exposure-processor.cpp create mode 100644 src/proc/auto-exposure-processor.h create mode 100644 src/proc/color-formats-converter.cpp create mode 100644 src/proc/color-formats-converter.h create mode 100644 src/proc/depth-formats-converter.cpp create mode 100644 src/proc/depth-formats-converter.h create mode 100644 src/proc/identity-processing-block.cpp create mode 100644 src/proc/identity-processing-block.h create mode 100644 src/proc/motion-transform.cpp create mode 100644 src/proc/motion-transform.h create mode 100644 src/proc/rotation-transform.cpp create mode 100644 src/proc/rotation-transform.h create mode 100644 src/proc/y12i-to-y16y16.cpp create mode 100644 src/proc/y12i-to-y16y16.h create mode 100644 src/proc/y8i-to-y8y8.cpp create mode 100644 src/proc/y8i-to-y8y8.h delete mode 100644 src/proc/yuy2rgb.cpp delete mode 100644 src/proc/yuy2rgb.h diff --git a/include/librealsense2/h/rs_frame.h b/include/librealsense2/h/rs_frame.h index 9a2f95eb64..4a5afb39d7 100644 --- a/include/librealsense2/h/rs_frame.h +++ b/include/librealsense2/h/rs_frame.h @@ -255,6 +255,21 @@ int rs2_is_frame_extendable_to(const rs2_frame* frame, rs2_extension extension_t rs2_frame* rs2_allocate_synthetic_video_frame(rs2_source* source, const rs2_stream_profile* new_stream, rs2_frame* original, int new_bpp, int new_width, int new_height, int new_stride, rs2_extension frame_type, rs2_error** error); +/** +* Allocate new motion frame using a frame-source provided form a processing block +* \param[in] source Frame pool to allocate the frame from +* \param[in] new_stream New stream profile to assign to newly created frame +* \param[in] original A reference frame that can be used to fill in auxilary information like format, width, height, bpp, stride (if applicable) +* \param[in] new_width New value for width for the allocated frame +* \param[in] new_height New value for height for the allocated frame +* \param[in] frame_type New value for frame type for the allocated frame +* \param[out] error If non-null, receives any error that occurs during this call, otherwise, errors are ignored +* \return reference to a newly allocated frame, must be released with release_frame +* memory for the frame is likely to be re-used from previous frame, but in lack of available frames in the pool will be allocated from the free store +*/ +rs2_frame* rs2_allocate_synthetic_motion_frame(rs2_source* source, const rs2_stream_profile* new_stream, rs2_frame* original, + int new_width, int new_height, rs2_extension frame_type, rs2_error** error); + /** * Allocate new points frame using a frame-source provided from a processing block * \param[in] source Frame pool to allocate the frame from diff --git a/include/librealsense2/h/rs_sensor.h b/include/librealsense2/h/rs_sensor.h index 4d229457f7..8d89925046 100644 --- a/include/librealsense2/h/rs_sensor.h +++ b/include/librealsense2/h/rs_sensor.h @@ -80,6 +80,11 @@ typedef enum rs2_format RS2_FORMAT_Y10BPACK , /**< 16-bit per-pixel grayscale image unpacked from 10 bits per pixel packed ([8:8:8:8:2222]) grey-scale image. The data is unpacked to LSB and padded with 6 zero bits */ RS2_FORMAT_DISTANCE , /**< 32-bit float-point depth distance value. */ RS2_FORMAT_MJPEG , /**< Bitstream encoding for video in which an image of each frame is encoded as JPEG-DIB */ + RS2_FORMAT_Y8I , /**< 8-bit per pixel interleaved. 8-bit left, 8-bit right. */ + RS2_FORMAT_Y12I , /**< 12-bit per pixel interleaved. 12-bit left, 12-bit right. Each pixel is stored in a 24-bit word in little-endian order. */ + RS2_FORMAT_INZI , /**< multi-planar Depth 16bit + IR 10bit. */ + RS2_FORMAT_INVI , /**< 8-bit IR stream. */ + RS2_FORMAT_W10 , /**< Grey-scale image as a bit-packed array. 4 pixel data stream taking 5 bytes */ RS2_FORMAT_COUNT /**< Number of enumeration values. Not a valid input: intended to be used in for-loops. */ } rs2_format; const char* rs2_format_to_string(rs2_format format); diff --git a/include/librealsense2/hpp/rs_processing.hpp b/include/librealsense2/hpp/rs_processing.hpp index e2831c2518..54dad371b7 100644 --- a/include/librealsense2/hpp/rs_processing.hpp +++ b/include/librealsense2/hpp/rs_processing.hpp @@ -26,7 +26,7 @@ namespace rs2 * \param[in] new_bpp Frame bit per pixel to create. * \param[in] new_width Frame width to create. * \param[in] new_height Frame height to create. - * \param[in] new_stride Frame stride to crate. + * \param[in] new_stride Frame stride to create. * \param[in] frame_type Which frame type are going to create. * \return The allocated frame */ @@ -45,6 +45,29 @@ namespace rs2 return result; } + /** + * Allocate a new motion frame with given params + * + * \param[in] profile Stream profile going to allocate. + * \param[in] original Original frame. + * \param[in] new_width Frame width to create. + * \param[in] new_height Frame height to create. + * \param[in] frame_type Which frame type are going to create. + * \return The allocated frame + */ + frame allocate_motion_frame(const stream_profile& profile, + const frame& original, + int new_width = 0, + int new_height = 0, + rs2_extension frame_type = RS2_EXTENSION_MOTION_FRAME) const + { + rs2_error* e = nullptr; + auto result = rs2_allocate_synthetic_motion_frame(_source, profile.get(), + original.get(), new_width, new_height, frame_type, &e); + error::handle(e); + return result; + } + frame allocate_points(const stream_profile& profile, const frame& original) const { diff --git a/src/algo.cpp b/src/algo.cpp index b588d91439..73f1cd920c 100644 --- a/src/algo.cpp +++ b/src/algo.cpp @@ -63,8 +63,8 @@ auto_exposure_mechanism::auto_exposure_mechanism(option& gain_option, option& ex if (!_keep_alive) return; - frame_and_callback frame_callback; - auto frame_sts = _data_queue.dequeue(&frame_callback, RS2_DEFAULT_TIMEOUT); + frame_holder f_holder; + auto frame_sts = _data_queue.dequeue(&f_holder, RS2_DEFAULT_TIMEOUT); lk.unlock(); @@ -75,7 +75,7 @@ auto_exposure_mechanism::auto_exposure_mechanism(option& gain_option, option& ex } try { - auto frame = std::move(frame_callback.f_holder); + auto frame = std::move(f_holder); double values[2] = {}; @@ -147,7 +147,7 @@ void auto_exposure_mechanism::update_auto_exposure_roi(const region_of_interest& _auto_exposure_algo.update_roi(roi); } -void auto_exposure_mechanism::add_frame(frame_holder frame, callback_invocation_holder callback) +void auto_exposure_mechanism::add_frame(frame_holder frame) { if (!_keep_alive || (_skip_frames && (_frames_counter++) != _skip_frames)) @@ -159,7 +159,7 @@ void auto_exposure_mechanism::add_frame(frame_holder frame, callback_invocation_ { std::lock_guard lk(_queue_mtx); - _data_queue.enqueue({std::move(frame), std::move(callback)}); + _data_queue.enqueue(std::move(frame)); } _cv.notify_one(); } diff --git a/src/algo.h b/src/algo.h index c7fcb0f471..e23af666fa 100644 --- a/src/algo.h +++ b/src/algo.h @@ -102,16 +102,11 @@ namespace librealsense std::recursive_mutex state_mutex; }; - struct frame_and_callback{ - frame_holder f_holder; - callback_invocation_holder callback; - }; - class auto_exposure_mechanism { public: auto_exposure_mechanism(option& gain_option, option& exposure_option, const auto_exposure_state& auto_exposure_state); ~auto_exposure_mechanism(); - void add_frame(frame_holder frame, callback_invocation_holder callback); + void add_frame(frame_holder frame); void update_auto_exposure_state(const auto_exposure_state& auto_exposure_state); void update_auto_exposure_roi(const region_of_interest& roi); @@ -129,8 +124,6 @@ namespace librealsense }; private: - bool try_pop_front_data(frame_and_callback* data); - static const int queue_size = 2; option& _gain_option; option& _exposure_option; @@ -138,7 +131,7 @@ namespace librealsense std::shared_ptr _exposure_thread; std::condition_variable _cv; std::atomic _keep_alive; - single_consumer_queue _data_queue; + single_consumer_queue _data_queue; std::mutex _queue_mtx; std::atomic _frames_counter; std::atomic _skip_frames; diff --git a/src/context.cpp b/src/context.cpp index 31f21ef06e..6ff5a2137d 100644 --- a/src/context.cpp +++ b/src/context.cpp @@ -93,6 +93,15 @@ bool contains(const std::shared_ptr& first, namespace librealsense { + std::map platform_color_fourcc_to_rs2_format = { + {rs_fourcc('Y','U','Y','2'), RS2_FORMAT_YUYV}, + {rs_fourcc('U','Y','V','Y'), RS2_FORMAT_UYVY} + }; + std::map platform_color_fourcc_to_rs2_stream = { + {rs_fourcc('Y','U','Y','2'), RS2_STREAM_COLOR}, + {rs_fourcc('U','Y','V','Y'), RS2_STREAM_COLOR} + }; + context::context(backend_type type, const char* filename, const char* section, @@ -210,14 +219,12 @@ namespace librealsense std::vector _uvcs; }; - class platform_camera_sensor : public uvc_sensor + class platform_camera_sensor : public synthetic_sensor { public: - platform_camera_sensor(const std::shared_ptr&, - device* owner, - std::shared_ptr uvc_device, - std::unique_ptr timestamp_reader) - : uvc_sensor("RGB Camera", uvc_device, move(timestamp_reader), owner), + platform_camera_sensor(device* owner, + std::shared_ptr uvc_sensor) + : synthetic_sensor("RGB Camera", uvc_sensor, owner), _default_stream(new stream(RS2_STREAM_COLOR)) { } @@ -226,7 +233,7 @@ namespace librealsense { auto lock = environment::get_instance().get_extrinsics_graph().lock(); - auto results = uvc_sensor::init_stream_profiles(); + auto results = synthetic_sensor::init_stream_profiles(); for (auto p : results) { @@ -255,9 +262,11 @@ namespace librealsense std::vector> devs; for (auto&& info : uvc_infos) devs.push_back(ctx->get_backend().create_uvc_device(info)); - auto color_ep = std::make_shared(ctx, this, - std::make_shared(devs), - std::unique_ptr(new ds5_timestamp_reader(environment::get_instance().get_time_service()))); + auto raw_color_ep = std::make_shared("Raw RGB Camera", + std::make_shared(devs), + std::unique_ptr(new ds5_timestamp_reader(environment::get_instance().get_time_service())), + this); + auto color_ep = std::make_shared(this, raw_color_ep); add_sensor(color_ep); register_info(RS2_CAMERA_INFO_NAME, "Platform Camera"); @@ -268,20 +277,30 @@ namespace librealsense register_info(RS2_CAMERA_INFO_PHYSICAL_PORT, uvc_infos.front().device_path); register_info(RS2_CAMERA_INFO_PRODUCT_ID, pid_str); - color_ep->register_pixel_format(pf_yuy2); - color_ep->register_pixel_format(pf_yuyv); - - color_ep->try_register_pu(RS2_OPTION_BACKLIGHT_COMPENSATION); - color_ep->try_register_pu(RS2_OPTION_BRIGHTNESS); - color_ep->try_register_pu(RS2_OPTION_CONTRAST); - color_ep->try_register_pu(RS2_OPTION_EXPOSURE); - color_ep->try_register_pu(RS2_OPTION_GAMMA); - color_ep->try_register_pu(RS2_OPTION_HUE); - color_ep->try_register_pu(RS2_OPTION_SATURATION); - color_ep->try_register_pu(RS2_OPTION_SHARPNESS); - color_ep->try_register_pu(RS2_OPTION_WHITE_BALANCE); - color_ep->try_register_pu(RS2_OPTION_ENABLE_AUTO_EXPOSURE); - color_ep->try_register_pu(RS2_OPTION_ENABLE_AUTO_WHITE_BALANCE); + color_ep->register_processing_block({ {RS2_FORMAT_YUYV} }, { {RS2_FORMAT_RGB8, RS2_STREAM_COLOR} }, []() { return std::make_shared(RS2_FORMAT_RGB8); }); + color_ep->register_processing_block({ {RS2_FORMAT_YUYV} }, { {RS2_FORMAT_RGBA8, RS2_STREAM_COLOR} }, []() { return std::make_shared(RS2_FORMAT_RGBA8); }); + color_ep->register_processing_block({ {RS2_FORMAT_YUYV} }, { {RS2_FORMAT_BGR8, RS2_STREAM_COLOR} }, []() { return std::make_shared(RS2_FORMAT_BGR8); }); + color_ep->register_processing_block({ {RS2_FORMAT_YUYV} }, { {RS2_FORMAT_BGRA8, RS2_STREAM_COLOR} }, []() { return std::make_shared(RS2_FORMAT_BGRA8); }); + color_ep->register_processing_block({ {RS2_FORMAT_YUYV} }, { {RS2_FORMAT_Y16, RS2_STREAM_COLOR} }, []() { return std::make_shared(RS2_FORMAT_Y16); }); + color_ep->register_processing_block(processing_block_factory::create_id_pbf(RS2_FORMAT_YUYV, RS2_STREAM_COLOR)); + + color_ep->register_processing_block({ {RS2_FORMAT_UYVY} }, { {RS2_FORMAT_RGB8, RS2_STREAM_COLOR} }, []() { return std::make_shared(RS2_FORMAT_RGB8); }); + color_ep->register_processing_block({ {RS2_FORMAT_UYVY} }, { {RS2_FORMAT_RGBA8, RS2_STREAM_COLOR} }, []() { return std::make_shared(RS2_FORMAT_RGBA8); }); + color_ep->register_processing_block({ {RS2_FORMAT_UYVY} }, { {RS2_FORMAT_BGR8, RS2_STREAM_COLOR} }, []() { return std::make_shared(RS2_FORMAT_BGR8); }); + color_ep->register_processing_block({ {RS2_FORMAT_UYVY} }, { {RS2_FORMAT_BGRA8, RS2_STREAM_COLOR} }, []() { return std::make_shared(RS2_FORMAT_BGRA8); }); + color_ep->register_processing_block(processing_block_factory::create_id_pbf(RS2_FORMAT_UYVY, RS2_STREAM_COLOR)); + + raw_color_ep->try_register_pu(RS2_OPTION_BACKLIGHT_COMPENSATION); + raw_color_ep->try_register_pu(RS2_OPTION_BRIGHTNESS); + raw_color_ep->try_register_pu(RS2_OPTION_CONTRAST); + raw_color_ep->try_register_pu(RS2_OPTION_EXPOSURE); + raw_color_ep->try_register_pu(RS2_OPTION_GAMMA); + raw_color_ep->try_register_pu(RS2_OPTION_HUE); + raw_color_ep->try_register_pu(RS2_OPTION_SATURATION); + raw_color_ep->try_register_pu(RS2_OPTION_SHARPNESS); + raw_color_ep->try_register_pu(RS2_OPTION_WHITE_BALANCE); + raw_color_ep->try_register_pu(RS2_OPTION_ENABLE_AUTO_EXPOSURE); + raw_color_ep->try_register_pu(RS2_OPTION_ENABLE_AUTO_WHITE_BALANCE); } virtual rs2_intrinsics get_intrinsics(unsigned int, const stream_profile&) const diff --git a/src/core/advanced_mode.h b/src/core/advanced_mode.h index 06eec05154..3b272d19c7 100644 --- a/src/core/advanced_mode.h +++ b/src/core/advanced_mode.h @@ -108,7 +108,7 @@ namespace librealsense { public: explicit ds5_advanced_mode_base(std::shared_ptr hwm, - uvc_sensor& depth_sensor); + synthetic_sensor& depth_sensor); void create_snapshot(std::shared_ptr& snapshot) const override {}; void enable_recording(std::function recording_function) override {}; @@ -156,10 +156,10 @@ namespace librealsense static const uint16_t HW_MONITOR_BUFFER_SIZE = 1024; private: - void set_exposure(uvc_sensor& sensor, const exposure_control& val); - void set_auto_exposure(uvc_sensor& sensor, const auto_exposure_control& val); - void get_exposure(uvc_sensor& sensor, exposure_control* ptr) const; - void get_auto_exposure(uvc_sensor& sensor, auto_exposure_control* ptr) const; + void set_exposure(synthetic_sensor& sensor, const exposure_control& val); + void set_auto_exposure(synthetic_sensor& sensor, const auto_exposure_control& val); + void get_exposure(synthetic_sensor& sensor, exposure_control* ptr) const; + void get_auto_exposure(synthetic_sensor& sensor, auto_exposure_control* ptr) const; void get_laser_power(laser_power_control* ptr) const; void get_laser_state(laser_state_control* ptr) const; @@ -201,10 +201,10 @@ namespace librealsense void set_color_auto_white_balance(const auto_white_balance_control& val); void set_color_power_line_frequency(const power_line_frequency_control& val); - bool supports_option(const uvc_sensor& sensor, rs2_option opt) const; + bool supports_option(const synthetic_sensor& sensor, rs2_option opt) const; std::shared_ptr _hw_monitor; - uvc_sensor& _depth_sensor; + synthetic_sensor& _depth_sensor; lazy _color_sensor; lazy _enabled; std::shared_ptr _preset_opt; @@ -258,7 +258,7 @@ namespace librealsense class advanced_mode_preset_option : public option_base { public: - advanced_mode_preset_option(ds5_advanced_mode_base& advanced, uvc_sensor& ep, + advanced_mode_preset_option(ds5_advanced_mode_base& advanced, synthetic_sensor& ep, const option_range& opt_range); static rs2_rs400_visual_preset to_preset(float x); @@ -273,7 +273,7 @@ namespace librealsense firmware_version get_firmware_version(const uvc_sensor& sensor) const; std::mutex _mtx; - uvc_sensor& _ep; + synthetic_sensor& _ep; ds5_advanced_mode_base& _advanced; rs2_rs400_visual_preset _last_preset; }; diff --git a/src/core/processing.h b/src/core/processing.h index f7973f56f9..916dd49ba3 100644 --- a/src/core/processing.h +++ b/src/core/processing.h @@ -29,6 +29,12 @@ namespace librealsense int new_stride = 0, rs2_extension frame_type = RS2_EXTENSION_VIDEO_FRAME) = 0; + virtual frame_interface* allocate_motion_frame(std::shared_ptr stream, + frame_interface* original, + int width = 0, + int height = 0, + rs2_extension frame_type = RS2_EXTENSION_MOTION_FRAME) = 0; + virtual frame_interface* allocate_composite_frame(std::vector frames) = 0; virtual frame_interface* allocate_points(std::shared_ptr stream, diff --git a/src/core/streaming.h b/src/core/streaming.h index 6f30ec74b6..7522bc3466 100644 --- a/src/core/streaming.h +++ b/src/core/streaming.h @@ -85,7 +85,6 @@ namespace librealsense virtual bool supports_frame_metadata(const rs2_frame_metadata_value& frame_metadata) const = 0; virtual int get_frame_data_size() const = 0; virtual const byte* get_frame_data() const = 0; - //TODO: add virtual uint64_t get_frame_data_size() const = 0; virtual rs2_time_t get_frame_timestamp() const = 0; virtual rs2_timestamp_domain get_frame_timestamp_domain() const = 0; virtual void set_timestamp(double new_ts) = 0; @@ -165,6 +164,14 @@ namespace librealsense using stream_profiles = std::vector>; using processing_blocks = std::vector>; + inline std::ostream& operator << (std::ostream& os, const stream_profiles& profiles) + { + for (auto&& p : profiles) + { + os << rs2_format_to_string(p->get_format()) << " " << rs2_stream_to_string(p->get_stream_type()) << ", "; + } + return os; + } class recommended_proccesing_blocks_interface { diff --git a/src/device.cpp b/src/device.cpp index 431f88993c..872404cbd6 100644 --- a/src/device.cpp +++ b/src/device.cpp @@ -179,9 +179,9 @@ int device::assign_sensor(std::shared_ptr sensor_base, uint8_t } } -uvc_sensor& device::get_uvc_sensor(int sub) +synthetic_sensor& device::get_uvc_sensor(int sub) { - return dynamic_cast(*_sensors[sub]); + return dynamic_cast(*_sensors[sub]); } size_t device::get_sensors_count() const diff --git a/src/device.h b/src/device.h index 752f9487a9..66be329cb8 100644 --- a/src/device.h +++ b/src/device.h @@ -82,7 +82,7 @@ namespace librealsense int add_sensor(std::shared_ptr sensor_base); int assign_sensor(std::shared_ptr sensor_base, uint8_t idx); void register_stream_to_extrinsic_group(const stream_interface& stream, uint32_t groupd_index); - uvc_sensor& get_uvc_sensor(int subdevice); + synthetic_sensor& get_uvc_sensor(int subdevice); explicit device(std::shared_ptr ctx, const platform::backend_device_group group, diff --git a/src/ds5/advanced_mode/advanced_mode.cpp b/src/ds5/advanced_mode/advanced_mode.cpp index 7fddbb5e80..03223a2cac 100644 --- a/src/ds5/advanced_mode/advanced_mode.cpp +++ b/src/ds5/advanced_mode/advanced_mode.cpp @@ -10,7 +10,7 @@ namespace librealsense { ds5_advanced_mode_base::ds5_advanced_mode_base(std::shared_ptr hwm, - uvc_sensor& depth_sensor) + synthetic_sensor& depth_sensor) : _hw_monitor(hwm), _depth_sensor(depth_sensor), _color_sensor(nullptr) @@ -239,7 +239,7 @@ namespace librealsense []() { STAFactor af; af.amplitude = 0.f; return af; }(); } - bool ds5_advanced_mode_base::supports_option(const uvc_sensor& sensor, rs2_option opt) const + bool ds5_advanced_mode_base::supports_option(const synthetic_sensor& sensor, rs2_option opt) const { return sensor.supports_option(opt); } @@ -262,7 +262,7 @@ namespace librealsense } } - void ds5_advanced_mode_base::get_exposure(uvc_sensor& sensor, exposure_control* ptr) const + void ds5_advanced_mode_base::get_exposure(synthetic_sensor& sensor, exposure_control* ptr) const { if (supports_option(sensor, RS2_OPTION_EXPOSURE)) { @@ -271,7 +271,7 @@ namespace librealsense } } - void ds5_advanced_mode_base::get_auto_exposure(uvc_sensor& sensor, auto_exposure_control* ptr) const + void ds5_advanced_mode_base::get_auto_exposure(synthetic_sensor& sensor, auto_exposure_control* ptr) const { if (supports_option(sensor, RS2_OPTION_ENABLE_AUTO_EXPOSURE)) { @@ -312,7 +312,7 @@ namespace librealsense { if (*_color_sensor) { - get_exposure(*(*_color_sensor), ptr); + get_exposure(**_color_sensor, ptr); } } @@ -326,7 +326,7 @@ namespace librealsense void ds5_advanced_mode_base::get_color_backlight_compensation(backlight_compensation_control* ptr) const { - if (*_color_sensor && supports_option(*(*_color_sensor), RS2_OPTION_BACKLIGHT_COMPENSATION)) + if (*_color_sensor && supports_option(**_color_sensor, RS2_OPTION_BACKLIGHT_COMPENSATION)) { ptr->backlight_compensation = static_cast((*_color_sensor)->get_option(RS2_OPTION_BACKLIGHT_COMPENSATION).query()); ptr->was_set = true; @@ -335,7 +335,7 @@ namespace librealsense void ds5_advanced_mode_base::get_color_brightness(brightness_control* ptr) const { - if (*_color_sensor && supports_option(*(*_color_sensor), RS2_OPTION_BRIGHTNESS)) + if (*_color_sensor && supports_option(**_color_sensor, RS2_OPTION_BRIGHTNESS)) { ptr->brightness = (*_color_sensor)->get_option(RS2_OPTION_BRIGHTNESS).query(); ptr->was_set = true; @@ -344,7 +344,7 @@ namespace librealsense void ds5_advanced_mode_base::get_color_contrast(contrast_control* ptr) const { - if (*_color_sensor && supports_option(*(*_color_sensor), RS2_OPTION_CONTRAST)) + if (*_color_sensor && supports_option(**_color_sensor, RS2_OPTION_CONTRAST)) { ptr->contrast = (*_color_sensor)->get_option(RS2_OPTION_CONTRAST).query(); ptr->was_set = true; @@ -353,7 +353,7 @@ namespace librealsense void ds5_advanced_mode_base::get_color_gain(gain_control* ptr) const { - if (*_color_sensor && supports_option(*(*_color_sensor), RS2_OPTION_GAIN)) + if (*_color_sensor && supports_option(**_color_sensor, RS2_OPTION_GAIN)) { ptr->gain = (*_color_sensor)->get_option(RS2_OPTION_GAIN).query(); ptr->was_set = true; @@ -362,7 +362,7 @@ namespace librealsense void ds5_advanced_mode_base::get_color_gamma(gamma_control* ptr) const { - if (*_color_sensor && supports_option(*(*_color_sensor), RS2_OPTION_GAMMA)) + if (*_color_sensor && supports_option(**_color_sensor, RS2_OPTION_GAMMA)) { ptr->gamma = (*_color_sensor)->get_option(RS2_OPTION_GAMMA).query(); ptr->was_set = true; @@ -371,7 +371,7 @@ namespace librealsense void ds5_advanced_mode_base::get_color_hue(hue_control* ptr) const { - if (*_color_sensor && supports_option(*(*_color_sensor), RS2_OPTION_HUE)) + if (*_color_sensor && supports_option(**_color_sensor, RS2_OPTION_HUE)) { ptr->hue = (*_color_sensor)->get_option(RS2_OPTION_HUE).query(); ptr->was_set = true; @@ -380,7 +380,7 @@ namespace librealsense void ds5_advanced_mode_base::get_color_saturation(saturation_control* ptr) const { - if (*_color_sensor && supports_option(*(*_color_sensor), RS2_OPTION_SATURATION)) + if (*_color_sensor && supports_option(**_color_sensor, RS2_OPTION_SATURATION)) { ptr->saturation = (*_color_sensor)->get_option(RS2_OPTION_SATURATION).query(); ptr->was_set = true; @@ -389,7 +389,7 @@ namespace librealsense void ds5_advanced_mode_base::get_color_sharpness(sharpness_control* ptr) const { - if (*_color_sensor && supports_option(*(*_color_sensor), RS2_OPTION_SHARPNESS)) + if (*_color_sensor && supports_option(**_color_sensor, RS2_OPTION_SHARPNESS)) { ptr->sharpness = (*_color_sensor)->get_option(RS2_OPTION_SHARPNESS).query(); ptr->was_set = true; @@ -398,7 +398,7 @@ namespace librealsense void ds5_advanced_mode_base::get_color_white_balance(white_balance_control* ptr) const { - if (*_color_sensor && supports_option(*(*_color_sensor), RS2_OPTION_WHITE_BALANCE)) + if (*_color_sensor && supports_option(**_color_sensor, RS2_OPTION_WHITE_BALANCE)) { ptr->white_balance = (*_color_sensor)->get_option(RS2_OPTION_WHITE_BALANCE).query(); ptr->was_set = true; @@ -407,7 +407,7 @@ namespace librealsense void ds5_advanced_mode_base::get_color_auto_white_balance(auto_white_balance_control* ptr) const { - if (*_color_sensor && supports_option(*(*_color_sensor), RS2_OPTION_ENABLE_AUTO_WHITE_BALANCE)) + if (*_color_sensor && supports_option(**_color_sensor, RS2_OPTION_ENABLE_AUTO_WHITE_BALANCE)) { ptr->auto_white_balance = static_cast((*_color_sensor)->get_option(RS2_OPTION_ENABLE_AUTO_WHITE_BALANCE).query()); ptr->was_set = true; @@ -416,7 +416,7 @@ namespace librealsense void ds5_advanced_mode_base::get_color_power_line_frequency(power_line_frequency_control* ptr) const { - if (*_color_sensor && supports_option(*(*_color_sensor), RS2_OPTION_POWER_LINE_FREQUENCY)) + if (*_color_sensor && supports_option(**_color_sensor, RS2_OPTION_POWER_LINE_FREQUENCY)) { ptr->power_line_frequency = static_cast((*_color_sensor)->get_option(RS2_OPTION_POWER_LINE_FREQUENCY).query()); ptr->was_set = true; @@ -516,12 +516,12 @@ namespace librealsense _depth_sensor.get_option(RS2_OPTION_EMITTER_ENABLED).set((float)val.laser_state); } - void ds5_advanced_mode_base::set_exposure(uvc_sensor& sensor, const exposure_control& val) + void ds5_advanced_mode_base::set_exposure(synthetic_sensor& sensor, const exposure_control& val) { sensor.get_option(RS2_OPTION_EXPOSURE).set(val.exposure); } - void ds5_advanced_mode_base::set_auto_exposure(uvc_sensor& sensor, const auto_exposure_control& val) + void ds5_advanced_mode_base::set_auto_exposure(synthetic_sensor& sensor, const auto_exposure_control& val) { sensor.get_option(RS2_OPTION_ENABLE_AUTO_EXPOSURE).set(float(val.auto_exposure)); } @@ -853,7 +853,7 @@ namespace librealsense } advanced_mode_preset_option::advanced_mode_preset_option(ds5_advanced_mode_base& advanced, - uvc_sensor& ep, const option_range& opt_range) + synthetic_sensor& ep, const option_range& opt_range) : option_base(opt_range), _ep(ep), _advanced(advanced), @@ -861,8 +861,9 @@ namespace librealsense { _ep.register_on_open([this](std::vector configurations) { std::lock_guard lock(_mtx); + auto uvc_sen = As(_ep.get_raw_sensor()); if (_last_preset != RS2_RS400_VISUAL_PRESET_CUSTOM) - _advanced.apply_preset(configurations, _last_preset, get_device_pid(_ep), get_firmware_version(_ep)); + _advanced.apply_preset(configurations, _last_preset, get_device_pid(*uvc_sen), get_firmware_version(*uvc_sen)); }); } @@ -887,9 +888,9 @@ namespace librealsense return; } - auto uvc_sensor = dynamic_cast(&_ep); - auto configurations = uvc_sensor->get_configuration(); - _advanced.apply_preset(configurations, preset, get_device_pid(_ep), get_firmware_version(_ep)); + auto uvc_sen = As(_ep.get_raw_sensor()); + auto configurations = uvc_sen->get_configuration(); + _advanced.apply_preset(configurations, preset, get_device_pid(*uvc_sen), get_firmware_version(*uvc_sen)); _last_preset = preset; _recording_function(*this); } diff --git a/src/ds5/ds5-active.cpp b/src/ds5/ds5-active.cpp index eae0dda717..02e86bd2d9 100644 --- a/src/ds5/ds5-active.cpp +++ b/src/ds5/ds5-active.cpp @@ -28,7 +28,7 @@ namespace librealsense auto pid = group.uvc_devices.front().pid; if (pid != RS_USB2_PID) { - auto& depth_ep = get_depth_sensor(); + auto& depth_ep = get_raw_depth_sensor(); auto emitter_enabled = std::make_shared(depth_ep); depth_ep.register_option(RS2_OPTION_EMITTER_ENABLED, emitter_enabled); diff --git a/src/ds5/ds5-color.cpp b/src/ds5/ds5-color.cpp index b7ad1cc7fd..567f5d33be 100644 --- a/src/ds5/ds5-color.cpp +++ b/src/ds5/ds5-color.cpp @@ -6,21 +6,38 @@ #include #include #include +#include #include "device.h" #include "context.h" #include "image.h" #include "metadata-parser.h" +#include "global_timestamp_reader.h" +#include "environment.h" #include "ds5-color.h" #include "ds5-private.h" #include "ds5-options.h" #include "ds5-timestamp.h" -#include "global_timestamp_reader.h" -#include "environment.h" + +#include "proc/color-formats-converter.h" +#include "proc/identity-processing-block.h" namespace librealsense { + std::map ds5_color_fourcc_to_rs2_format = { + {rs_fourcc('Y','U','Y','2'), RS2_FORMAT_YUYV}, + {rs_fourcc('U','Y','V','Y'), RS2_FORMAT_UYVY}, + {rs_fourcc('M','J','P','G'), RS2_FORMAT_MJPEG}, + {rs_fourcc('B','Y','R','2'), RS2_FORMAT_RAW16} + }; + std::map ds5_color_fourcc_to_rs2_stream = { + {rs_fourcc('Y','U','Y','2'), RS2_STREAM_COLOR}, + {rs_fourcc('U','Y','V','Y'), RS2_STREAM_COLOR}, + {rs_fourcc('B','Y','R','2'), RS2_STREAM_COLOR}, + {rs_fourcc('M','J','P','G'), RS2_STREAM_COLOR}, + }; + ds5_color::ds5_color(std::shared_ptr ctx, const platform::backend_device_group& group) : ds5_device(ctx, group), device(ctx, group), @@ -41,7 +58,7 @@ namespace librealsense auto color_ep = create_color_device(ctx, color_devs_info); } - std::shared_ptr ds5_color::create_color_device(std::shared_ptr ctx, + std::shared_ptr ds5_color::create_color_device(std::shared_ptr ctx, const std::vector& color_devices_info) { auto&& backend = ctx->get_backend(); @@ -49,16 +66,16 @@ namespace librealsense std::unique_ptr ds5_timestamp_reader_metadata(new ds5_timestamp_reader_from_metadata(std::move(ds5_timestamp_reader_backup))); auto enable_global_time_option = std::shared_ptr(new global_time_option()); - auto color_ep = std::make_shared(this, backend.create_uvc_device(color_devices_info.front()), - std::unique_ptr(new global_timestamp_reader(std::move(ds5_timestamp_reader_metadata), _tf_keeper, enable_global_time_option))); - - _color_device_idx = add_sensor(color_ep); - + auto raw_color_ep = std::make_shared("Raw RGB Sensor", + backend.create_uvc_device(color_devices_info.front()), + std::unique_ptr(new global_timestamp_reader(std::move(ds5_timestamp_reader_metadata), _tf_keeper, enable_global_time_option)), + this); + + auto color_ep = std::make_shared(this, + raw_color_ep, + ds5_color_fourcc_to_rs2_format, + ds5_color_fourcc_to_rs2_stream); color_ep->register_option(RS2_OPTION_GLOBAL_TIME_ENABLED, enable_global_time_option); - color_ep->register_pixel_format(pf_yuyv); - color_ep->register_pixel_format(pf_yuy2); - color_ep->register_pixel_format(pf_bayer16); - color_ep->register_pixel_format(pf_uyvyc); color_ep->register_pu(RS2_OPTION_BRIGHTNESS); color_ep->register_pu(RS2_OPTION_CONTRAST); @@ -77,14 +94,11 @@ namespace librealsense // From 5.11.15 auto-exposure priority is supported on the D465 else if (_fw_version >= firmware_version("5.11.15.0")) { - color_ep->register_pu(RS2_OPTION_AUTO_EXPOSURE_PRIORITY); + raw_color_ep->register_pu(RS2_OPTION_AUTO_EXPOSURE_PRIORITY); } - if (color_devices_info.front().pid == ds::RS465_PID) - color_ep->register_pixel_format(pf_mjpg); - - auto white_balance_option = std::make_shared(*color_ep, RS2_OPTION_WHITE_BALANCE); - auto auto_white_balance_option = std::make_shared(*color_ep, RS2_OPTION_ENABLE_AUTO_WHITE_BALANCE); + auto white_balance_option = std::make_shared(*raw_color_ep, RS2_OPTION_WHITE_BALANCE); + auto auto_white_balance_option = std::make_shared(*raw_color_ep, RS2_OPTION_ENABLE_AUTO_WHITE_BALANCE); color_ep->register_option(RS2_OPTION_WHITE_BALANCE, white_balance_option); color_ep->register_option(RS2_OPTION_ENABLE_AUTO_WHITE_BALANCE, auto_white_balance_option); color_ep->register_option(RS2_OPTION_WHITE_BALANCE, @@ -92,8 +106,8 @@ namespace librealsense white_balance_option, auto_white_balance_option)); - auto exposure_option = std::make_shared(*color_ep, RS2_OPTION_EXPOSURE); - auto auto_exposure_option = std::make_shared(*color_ep, RS2_OPTION_ENABLE_AUTO_EXPOSURE); + auto exposure_option = std::make_shared(*raw_color_ep, RS2_OPTION_EXPOSURE); + auto auto_exposure_option = std::make_shared(*raw_color_ep, RS2_OPTION_ENABLE_AUTO_EXPOSURE); color_ep->register_option(RS2_OPTION_EXPOSURE, exposure_option); color_ep->register_option(RS2_OPTION_ENABLE_AUTO_EXPOSURE, auto_exposure_option); color_ep->register_option(RS2_OPTION_EXPOSURE, @@ -102,7 +116,7 @@ namespace librealsense auto_exposure_option)); color_ep->register_option(RS2_OPTION_POWER_LINE_FREQUENCY, - std::make_shared(*color_ep, RS2_OPTION_POWER_LINE_FREQUENCY, + std::make_shared(*raw_color_ep, RS2_OPTION_POWER_LINE_FREQUENCY, std::map{ { 0.f, "Disabled"}, { 1.f, "50Hz" }, { 2.f, "60Hz" }, @@ -154,10 +168,33 @@ namespace librealsense if (_fw_version >= firmware_version("5.10.9.0")) { roi_sensor_interface* roi_sensor; - if ((roi_sensor = dynamic_cast(color_ep.get()))) + if ((roi_sensor = dynamic_cast(color_ep.get()))) // TODO - Ariel - check if broken roi_sensor->set_roi_method(std::make_shared(*_hw_monitor, ds::fw_cmd::SETRGBAEROI)); } + color_ep->register_processing_block({ {RS2_FORMAT_YUYV} }, { {RS2_FORMAT_RGB8, RS2_STREAM_COLOR} }, []() { return std::make_shared(RS2_FORMAT_RGB8); }); + color_ep->register_processing_block({ {RS2_FORMAT_YUYV} }, { {RS2_FORMAT_RGBA8, RS2_STREAM_COLOR} }, []() { return std::make_shared(RS2_FORMAT_RGBA8); }); + color_ep->register_processing_block({ {RS2_FORMAT_YUYV} }, { {RS2_FORMAT_BGR8, RS2_STREAM_COLOR} }, []() { return std::make_shared(RS2_FORMAT_BGR8); }); + color_ep->register_processing_block({ {RS2_FORMAT_YUYV} }, { {RS2_FORMAT_BGRA8, RS2_STREAM_COLOR} }, []() { return std::make_shared(RS2_FORMAT_BGRA8); }); + color_ep->register_processing_block({ {RS2_FORMAT_YUYV} }, { {RS2_FORMAT_Y16, RS2_STREAM_COLOR} }, []() { return std::make_shared(RS2_FORMAT_Y16); }); + color_ep->register_processing_block(processing_block_factory::create_id_pbf(RS2_FORMAT_YUYV, RS2_STREAM_COLOR)); + + color_ep->register_processing_block({ {RS2_FORMAT_UYVY} }, { {RS2_FORMAT_RGB8, RS2_STREAM_COLOR} }, []() { return std::make_shared(RS2_FORMAT_RGB8); }); + color_ep->register_processing_block({ {RS2_FORMAT_UYVY} }, { {RS2_FORMAT_RGBA8, RS2_STREAM_COLOR} }, []() { return std::make_shared(RS2_FORMAT_RGBA8); }); + color_ep->register_processing_block({ {RS2_FORMAT_UYVY} }, { {RS2_FORMAT_BGR8, RS2_STREAM_COLOR} }, []() { return std::make_shared(RS2_FORMAT_BGR8); }); + color_ep->register_processing_block({ {RS2_FORMAT_UYVY} }, { {RS2_FORMAT_BGRA8, RS2_STREAM_COLOR} }, []() { return std::make_shared(RS2_FORMAT_BGRA8); }); + color_ep->register_processing_block(processing_block_factory::create_id_pbf(RS2_FORMAT_UYVY, RS2_STREAM_COLOR)); + + color_ep->register_processing_block(processing_block_factory::create_id_pbf(RS2_FORMAT_RAW16, RS2_STREAM_COLOR)); + + if (color_devices_info.front().pid == ds::RS465_PID) + { + color_ep->register_processing_block({ {RS2_FORMAT_MJPEG} }, { {RS2_FORMAT_RGB8, RS2_STREAM_COLOR} }, []() { return std::make_shared(RS2_FORMAT_RGB8); }); + color_ep->register_processing_block(processing_block_factory::create_id_pbf(RS2_FORMAT_MJPEG, RS2_STREAM_COLOR)); + } + + _color_device_idx = add_sensor(color_ep); + return color_ep; } @@ -172,7 +209,7 @@ namespace librealsense stream_profiles ds5_color_sensor::init_stream_profiles() { auto lock = environment::get_instance().get_extrinsics_graph().lock(); - auto results = uvc_sensor::init_stream_profiles(); + auto results = synthetic_sensor::init_stream_profiles(); for (auto p : results) { diff --git a/src/ds5/ds5-color.h b/src/ds5/ds5-color.h index 2506c4d7b6..991e2229aa 100644 --- a/src/ds5/ds5-color.h +++ b/src/ds5/ds5-color.h @@ -4,6 +4,9 @@ #pragma once #include "ds5-device.h" + +#include + #include "stream.h" namespace librealsense @@ -11,7 +14,7 @@ namespace librealsense class ds5_color : public virtual ds5_device { public: - std::shared_ptr create_color_device(std::shared_ptr ctx, + std::shared_ptr create_color_device(std::shared_ptr ctx, const std::vector& all_device_infos); ds5_color(std::shared_ptr ctx, @@ -31,15 +34,17 @@ namespace librealsense std::shared_ptr> _color_extrinsic; }; - class ds5_color_sensor : public uvc_sensor, + class ds5_color_sensor : public synthetic_sensor, public video_sensor_interface, public roi_sensor_base { public: explicit ds5_color_sensor(ds5_color* owner, - std::shared_ptr uvc_device, - std::unique_ptr timestamp_reader) - : uvc_sensor("RGB Camera", uvc_device, move(timestamp_reader), owner), _owner(owner) + std::shared_ptr uvc_sensor, + std::map ds5_color_fourcc_to_rs2_format, + std::map ds5_color_fourcc_to_rs2_stream) + : synthetic_sensor("RGB Sensor", uvc_sensor, owner, ds5_color_fourcc_to_rs2_format, ds5_color_fourcc_to_rs2_stream), + _owner(owner) {} rs2_intrinsics get_intrinsics(const stream_profile& profile) const override; diff --git a/src/ds5/ds5-device.cpp b/src/ds5/ds5-device.cpp index cfd798c9fe..b9b2047230 100644 --- a/src/ds5/ds5-device.cpp +++ b/src/ds5/ds5-device.cpp @@ -26,13 +26,41 @@ #include "proc/threshold.h" #include "proc/disparity-transform.h" #include "proc/spatial-filter.h" +#include "proc/colorizer.h" #include "proc/temporal-filter.h" +#include "proc/y8i-to-y8y8.h" +#include "proc/y12i-to-y16y16.h" +#include "proc/color-formats-converter.h" +#include "proc/syncer-processing-block.h" #include "proc/hole-filling-filter.h" +#include "proc/depth-formats-converter.h" #include "../common/fw/firmware-version.h" #include "fw-update/fw-update-unsigned.h" namespace librealsense { + std::map ds5_depth_fourcc_to_rs2_format = { + {rs_fourcc('G','R','E','Y'), RS2_FORMAT_Y8}, + {rs_fourcc('Y','8','I',' '), RS2_FORMAT_Y8I}, + {rs_fourcc('W','1','0',' '), RS2_FORMAT_W10}, + {rs_fourcc('Y','1','6',' '), RS2_FORMAT_Y16}, + {rs_fourcc('Y','1','2','I'), RS2_FORMAT_Y12I}, + {rs_fourcc('Z','1','6',' '), RS2_FORMAT_Z16}, + {rs_fourcc('U','Y','V','Y'), RS2_FORMAT_UYVY}, + {rs_fourcc('R','G','B','2'), RS2_FORMAT_BGR8} + + }; + std::map ds5_depth_fourcc_to_rs2_stream = { + {rs_fourcc('G','R','E','Y'), RS2_STREAM_INFRARED}, + {rs_fourcc('Y','8','I',' '), RS2_STREAM_INFRARED}, + {rs_fourcc('W','1','0',' '), RS2_STREAM_INFRARED}, + {rs_fourcc('Y','1','6',' '), RS2_STREAM_INFRARED}, + {rs_fourcc('Y','1','2','I'), RS2_STREAM_INFRARED}, + {rs_fourcc('U','Y','V','Y'), RS2_STREAM_INFRARED}, + {rs_fourcc('R','G','B','2'), RS2_STREAM_INFRARED}, + {rs_fourcc('Z','1','6',' '), RS2_STREAM_DEPTH}, + }; + ds5_auto_exposure_roi_method::ds5_auto_exposure_roi_method( const hw_monitor& hwm, ds::fw_cmd cmd) @@ -102,7 +130,8 @@ namespace librealsense std::vector flash; flash.reserve(flash_size); - get_depth_sensor().invoke_powered([&](platform::uvc_device& dev) + uvc_sensor& raw_depth_sensor = get_raw_depth_sensor(); + raw_depth_sensor.invoke_powered([&](platform::uvc_device& dev) { for (int i = 0; i < max_iterations; i++) { @@ -219,7 +248,8 @@ namespace librealsense if (_is_locked) throw std::runtime_error("this camera is locked and doesn't allow direct flash write, for firmware update use rs2_update_firmware method (DFU)"); - get_depth_sensor().invoke_powered([&](platform::uvc_device& dev) + auto& raw_depth_sensor = get_raw_depth_sensor(); + raw_depth_sensor.invoke_powered([&](platform::uvc_device& dev) { command cmdPFD(ds::PFD); cmdPFD.require_response = false; @@ -248,13 +278,14 @@ namespace librealsense }); } - class ds5_depth_sensor : public uvc_sensor, public video_sensor_interface, public depth_stereo_sensor, public roi_sensor_base + class ds5_depth_sensor : public synthetic_sensor, public video_sensor_interface, public depth_stereo_sensor, public roi_sensor_base { public: explicit ds5_depth_sensor(ds5_device* owner, - std::shared_ptr uvc_device, - std::unique_ptr timestamp_reader) - : uvc_sensor(ds::DEPTH_STEREO, uvc_device, move(timestamp_reader), owner), _owner(owner), _depth_units(-1) + std::shared_ptr uvc_sensor) + : synthetic_sensor("Depth Sensor", uvc_sensor, owner, ds5_depth_fourcc_to_rs2_format, ds5_depth_fourcc_to_rs2_stream), + _owner(owner), + _depth_units(-1) {} processing_blocks get_recommended_processing_blocks() const override @@ -283,7 +314,7 @@ namespace librealsense void open(const stream_profiles& requests) override { _depth_units = get_option(RS2_OPTION_DEPTH_UNITS).query(); - uvc_sensor::open(requests); + synthetic_sensor::open(requests); } /* @@ -297,7 +328,7 @@ namespace librealsense { auto lock = environment::get_instance().get_extrinsics_graph().lock(); - auto results = uvc_sensor::init_stream_profiles(); + auto results = synthetic_sensor::init_stream_profiles(); auto color_dev = dynamic_cast(&get_device()); auto rolling_shutter_dev = dynamic_cast(&get_device()); @@ -357,7 +388,12 @@ namespace librealsense return results; } - float get_depth_scale() const override { if (_depth_units < 0) _depth_units = get_option(RS2_OPTION_DEPTH_UNITS).query(); return _depth_units; } + float get_depth_scale() const override + { + if (_depth_units < 0) + _depth_units = get_option(RS2_OPTION_DEPTH_UNITS).query(); + return _depth_units; + } void set_depth_scale(float val){ _depth_units = val; } @@ -392,16 +428,15 @@ namespace librealsense { public: explicit ds5u_depth_sensor(ds5u_device* owner, - std::shared_ptr uvc_device, - std::unique_ptr timestamp_reader) - : ds5_depth_sensor(owner, uvc_device, move(timestamp_reader)), _owner(owner) + std::shared_ptr uvc_sensor) + : ds5_depth_sensor(owner, uvc_sensor), _owner(owner) {} stream_profiles init_stream_profiles() override { auto lock = environment::get_instance().get_extrinsics_graph().lock(); - auto results = uvc_sensor::init_stream_profiles(); + auto results = synthetic_sensor::init_stream_profiles(); for (auto p : results) { @@ -512,7 +547,7 @@ namespace librealsense return val; } - std::shared_ptr ds5_device::create_depth_device(std::shared_ptr ctx, + std::shared_ptr ds5_device::create_depth_device(std::shared_ptr ctx, const std::vector& all_device_infos) { using namespace ds; @@ -526,15 +561,19 @@ namespace librealsense std::unique_ptr timestamp_reader_backup(new ds5_timestamp_reader(backend.create_time_service())); std::unique_ptr timestamp_reader_metadata(new ds5_timestamp_reader_from_metadata(std::move(timestamp_reader_backup))); auto enable_global_time_option = std::shared_ptr(new global_time_option()); - auto depth_ep = std::make_shared(this, std::make_shared(depth_devices), - std::unique_ptr(new global_timestamp_reader(std::move(timestamp_reader_metadata), _tf_keeper, enable_global_time_option))); + auto raw_depth_ep = std::make_shared("Raw Depth Sensor", std::make_shared(depth_devices), + std::unique_ptr(new global_timestamp_reader(std::move(timestamp_reader_metadata), _tf_keeper, enable_global_time_option)), this); + + raw_depth_ep->register_xu(depth_xu); // make sure the XU is initialized every time we power the camera + auto depth_ep = std::make_shared(this, raw_depth_ep); depth_ep->register_option(RS2_OPTION_GLOBAL_TIME_ENABLED, enable_global_time_option); - depth_ep->register_xu(depth_xu); // make sure the XU is initialized every time we power the camera - depth_ep->register_pixel_format(pf_z16); // Depth - depth_ep->register_pixel_format(pf_y8); // Left Only - Luminance - depth_ep->register_pixel_format(pf_yuyv); // Left Only + depth_ep->register_processing_block(processing_block_factory::create_id_pbf(RS2_FORMAT_Y8, RS2_STREAM_INFRARED, 1)); + depth_ep->register_processing_block(processing_block_factory::create_id_pbf(RS2_FORMAT_Z16, RS2_STREAM_DEPTH)); + + depth_ep->register_processing_block({ {RS2_FORMAT_W10} }, { {RS2_FORMAT_RAW10, RS2_STREAM_INFRARED, 1} }, []() { return std::make_shared(RS2_FORMAT_RAW10); }); + depth_ep->register_processing_block({ {RS2_FORMAT_W10} }, { {RS2_FORMAT_Y10BPACK, RS2_STREAM_INFRARED, 1} }, []() { return std::make_shared(RS2_FORMAT_Y10BPACK); }); return depth_ep; } @@ -557,20 +596,21 @@ namespace librealsense using namespace ds; auto&& backend = ctx->get_backend(); + auto& raw_sensor = get_raw_depth_sensor(); if (group.usb_devices.size() > 0) { _hw_monitor = std::make_shared( std::make_shared( - backend.create_usb_device(group.usb_devices.front()), get_depth_sensor())); + backend.create_usb_device(group.usb_devices.front()), raw_sensor)); } else { _hw_monitor = std::make_shared( std::make_shared( std::make_shared( - get_depth_sensor(), depth_xu, DS5_HWMONITOR), - get_depth_sensor())); + raw_sensor, depth_xu, DS5_HWMONITOR), + raw_sensor)); } // Define Left-to-Right extrinsics calculation (lazy) @@ -610,7 +650,9 @@ namespace librealsense if (_fw_version >= firmware_version("5.10.4.0")) _device_capabilities = parse_device_capabilities(pid); - auto& depth_ep = get_depth_sensor(); + auto& depth_sensor = get_depth_sensor(); + auto& raw_depth_sensor = get_raw_depth_sensor(); + auto advanced_mode = is_camera_in_advanced_mode(); using namespace platform; @@ -619,7 +661,7 @@ namespace librealsense bool usb_modality = (_fw_version >= firmware_version("5.9.8.0")); if (usb_modality) { - _usb_mode = depth_ep.get_usb_specification(); + _usb_mode = raw_depth_sensor.get_usb_specification(); if (usb_spec_names.count(_usb_mode) && (usb_undefined != _usb_mode)) usb_type_str = usb_spec_names.at(_usb_mode); else // Backend fails to provide USB descriptor - occurs with RS3 build. Requires further work @@ -628,16 +670,25 @@ namespace librealsense if (advanced_mode && (_usb_mode >= usb3_type)) { - depth_ep.register_pixel_format(pf_y8i); // L+R - depth_ep.register_pixel_format(pf_y12i); // L+R - Calibration not rectified + depth_sensor.register_processing_block( + { {RS2_FORMAT_Y8I} }, + { {RS2_FORMAT_Y8, RS2_STREAM_INFRARED, 1} , {RS2_FORMAT_Y8, RS2_STREAM_INFRARED, 2} }, + []() { return std::make_shared(); } + ); // L+R + + depth_sensor.register_processing_block( + {RS2_FORMAT_Y12I}, + {{RS2_FORMAT_Y16, RS2_STREAM_INFRARED, 1}, {RS2_FORMAT_Y16, RS2_STREAM_INFRARED, 2}}, + []() {return std::make_shared(); } + ); } auto pid_hex_str = hexify(pid); if ((pid == RS416_PID) && _fw_version >= firmware_version("5.9.13.0")) { - depth_ep.register_option(RS2_OPTION_HARDWARE_PRESET, - std::make_shared>(depth_ep, depth_xu, DS5_HARDWARE_PRESET, + depth_sensor.register_option(RS2_OPTION_HARDWARE_PRESET, + std::make_shared>(raw_depth_sensor, depth_xu, DS5_HARDWARE_PRESET, "Hardware pipe configuration")); } @@ -653,26 +704,25 @@ namespace librealsense _hw_monitor = std::make_shared( std::make_shared( std::make_shared( - get_depth_sensor(), depth_xu, DS5_HWMONITOR), - get_depth_sensor())); + raw_depth_sensor, depth_xu, DS5_HWMONITOR), + raw_depth_sensor)); } #endif - depth_ep.register_pu(RS2_OPTION_GAIN); - auto exposure_option = std::make_shared>(depth_ep, + depth_sensor.register_pu(RS2_OPTION_GAIN); + auto exposure_option = std::make_shared>(raw_depth_sensor, depth_xu, DS5_EXPOSURE, "Depth Exposure (usec)"); - depth_ep.register_option(RS2_OPTION_EXPOSURE, exposure_option); + depth_sensor.register_option(RS2_OPTION_EXPOSURE, exposure_option); - auto enable_auto_exposure = std::make_shared>(depth_ep, + auto enable_auto_exposure = std::make_shared>(raw_depth_sensor, depth_xu, DS5_ENABLE_AUTO_EXPOSURE, "Enable Auto Exposure"); - depth_ep.register_option(RS2_OPTION_ENABLE_AUTO_EXPOSURE, enable_auto_exposure); + depth_sensor.register_option(RS2_OPTION_ENABLE_AUTO_EXPOSURE, enable_auto_exposure); - - depth_ep.register_option(RS2_OPTION_EXPOSURE, + depth_sensor.register_option(RS2_OPTION_EXPOSURE, std::make_shared( exposure_option, enable_auto_exposure)); @@ -680,22 +730,22 @@ namespace librealsense if (_fw_version >= firmware_version("5.5.8.0")) { - depth_ep.register_option(RS2_OPTION_OUTPUT_TRIGGER_ENABLED, - std::make_shared>(depth_ep, depth_xu, DS5_EXT_TRIGGER, + depth_sensor.register_option(RS2_OPTION_OUTPUT_TRIGGER_ENABLED, + std::make_shared>(raw_depth_sensor, depth_xu, DS5_EXT_TRIGGER, "Generate trigger from the camera to external device once per frame")); - auto error_control = std::unique_ptr>(new uvc_xu_option(depth_ep, depth_xu, DS5_ERROR_REPORTING, "Error reporting")); + auto error_control = std::unique_ptr>(new uvc_xu_option(raw_depth_sensor, depth_xu, DS5_ERROR_REPORTING, "Error reporting")); _polling_error_handler = std::unique_ptr( new polling_error_handler(1000, std::move(error_control), - depth_ep.get_notifications_processor(), + raw_depth_sensor.get_notifications_processor(), std::unique_ptr(new ds5_notification_decoder()))); - depth_ep.register_option(RS2_OPTION_ERROR_POLLING_ENABLED, std::make_shared(_polling_error_handler.get())); + depth_sensor.register_option(RS2_OPTION_ERROR_POLLING_ENABLED, std::make_shared(_polling_error_handler.get())); - depth_ep.register_option(RS2_OPTION_ASIC_TEMPERATURE, - std::make_shared(depth_ep, + depth_sensor.register_option(RS2_OPTION_ASIC_TEMPERATURE, + std::make_shared(raw_depth_sensor, RS2_OPTION_ASIC_TEMPERATURE)); } @@ -703,31 +753,31 @@ namespace librealsense auto mask = d400_caps::CAP_GLOBAL_SHUTTER | d400_caps::CAP_ACTIVE_PROJECTOR; if ((_fw_version >= firmware_version("5.11.3.0")) && ((_device_capabilities & mask) == mask)) { - depth_ep.register_option(RS2_OPTION_EMITTER_ON_OFF, std::make_shared(*_hw_monitor, &depth_ep)); + depth_sensor.register_option(RS2_OPTION_EMITTER_ON_OFF, std::make_shared(*_hw_monitor, &raw_depth_sensor)); } else if (_fw_version >= firmware_version("5.10.9.0") && _fw_version.experimental()) // Not yet available in production firmware { - depth_ep.register_option(RS2_OPTION_EMITTER_ON_OFF, std::make_shared(*_hw_monitor, &depth_ep)); + depth_sensor.register_option(RS2_OPTION_EMITTER_ON_OFF, std::make_shared(*_hw_monitor, &raw_depth_sensor)); } if (_fw_version >= firmware_version("5.9.15.1")) { - get_depth_sensor().register_option(RS2_OPTION_INTER_CAM_SYNC_MODE, + depth_sensor.register_option(RS2_OPTION_INTER_CAM_SYNC_MODE, std::make_shared(*_hw_monitor)); } roi_sensor_interface* roi_sensor; - if ((roi_sensor = dynamic_cast(&depth_ep))) + if ((roi_sensor = dynamic_cast(&raw_depth_sensor))) roi_sensor->set_roi_method(std::make_shared(*_hw_monitor)); - depth_ep.register_option(RS2_OPTION_STEREO_BASELINE, std::make_shared("Distance in mm between the stereo imagers", + depth_sensor.register_option(RS2_OPTION_STEREO_BASELINE, std::make_shared("Distance in mm between the stereo imagers", lazy([this]() { return get_stereo_baseline_mm(); }))); if (advanced_mode && _fw_version >= firmware_version("5.6.3.0")) { auto depth_scale = std::make_shared(*_hw_monitor); - auto depth_sensor = As(&depth_ep); + auto depth_sensor = As(&get_depth_sensor()); assert(depth_sensor); depth_scale->add_observer([depth_sensor](float val) @@ -735,21 +785,21 @@ namespace librealsense depth_sensor->set_depth_scale(val); }); - depth_ep.register_option(RS2_OPTION_DEPTH_UNITS, depth_scale); + depth_sensor->register_option(RS2_OPTION_DEPTH_UNITS, depth_scale); } else - depth_ep.register_option(RS2_OPTION_DEPTH_UNITS, std::make_shared("Number of meters represented by a single depth unit", + depth_sensor.register_option(RS2_OPTION_DEPTH_UNITS, std::make_shared("Number of meters represented by a single depth unit", lazy([]() { return 0.001f; }))); // Metadata registration - depth_ep.register_metadata(RS2_FRAME_METADATA_FRAME_TIMESTAMP, make_uvc_header_parser(&uvc_header::timestamp)); + depth_sensor.register_metadata(RS2_FRAME_METADATA_FRAME_TIMESTAMP, make_uvc_header_parser(&uvc_header::timestamp)); // attributes of md_capture_timing auto md_prop_offset = offsetof(metadata_raw, mode) + offsetof(md_depth_mode, depth_y_mode) + offsetof(md_depth_y_normal_mode, intel_capture_timing); - depth_ep.register_metadata(RS2_FRAME_METADATA_FRAME_COUNTER, make_attribute_parser(&md_capture_timing::frame_counter, md_capture_timing_attributes::frame_counter_attribute, md_prop_offset)); - depth_ep.register_metadata(RS2_FRAME_METADATA_SENSOR_TIMESTAMP, make_rs400_sensor_ts_parser(make_uvc_header_parser(&uvc_header::timestamp), + depth_sensor.register_metadata(RS2_FRAME_METADATA_FRAME_COUNTER, make_attribute_parser(&md_capture_timing::frame_counter, md_capture_timing_attributes::frame_counter_attribute, md_prop_offset)); + depth_sensor.register_metadata(RS2_FRAME_METADATA_SENSOR_TIMESTAMP, make_rs400_sensor_ts_parser(make_uvc_header_parser(&uvc_header::timestamp), make_attribute_parser(&md_capture_timing::sensor_timestamp, md_capture_timing_attributes::sensor_timestamp_attribute, md_prop_offset))); // attributes of md_capture_stats @@ -757,34 +807,34 @@ namespace librealsense offsetof(md_depth_mode, depth_y_mode) + offsetof(md_depth_y_normal_mode, intel_capture_stats); - depth_ep.register_metadata(RS2_FRAME_METADATA_WHITE_BALANCE, make_attribute_parser(&md_capture_stats::white_balance, md_capture_stat_attributes::white_balance_attribute, md_prop_offset)); + depth_sensor.register_metadata(RS2_FRAME_METADATA_WHITE_BALANCE, make_attribute_parser(&md_capture_stats::white_balance, md_capture_stat_attributes::white_balance_attribute, md_prop_offset)); // attributes of md_depth_control md_prop_offset = offsetof(metadata_raw, mode) + offsetof(md_depth_mode, depth_y_mode) + offsetof(md_depth_y_normal_mode, intel_depth_control); - depth_ep.register_metadata(RS2_FRAME_METADATA_GAIN_LEVEL, make_attribute_parser(&md_depth_control::manual_gain, md_depth_control_attributes::gain_attribute, md_prop_offset)); - depth_ep.register_metadata(RS2_FRAME_METADATA_ACTUAL_EXPOSURE, make_attribute_parser(&md_depth_control::manual_exposure, md_depth_control_attributes::exposure_attribute, md_prop_offset)); - depth_ep.register_metadata(RS2_FRAME_METADATA_AUTO_EXPOSURE, make_attribute_parser(&md_depth_control::auto_exposure_mode, md_depth_control_attributes::ae_mode_attribute, md_prop_offset)); + depth_sensor.register_metadata(RS2_FRAME_METADATA_GAIN_LEVEL, make_attribute_parser(&md_depth_control::manual_gain, md_depth_control_attributes::gain_attribute, md_prop_offset)); + depth_sensor.register_metadata(RS2_FRAME_METADATA_ACTUAL_EXPOSURE, make_attribute_parser(&md_depth_control::manual_exposure, md_depth_control_attributes::exposure_attribute, md_prop_offset)); + depth_sensor.register_metadata(RS2_FRAME_METADATA_AUTO_EXPOSURE, make_attribute_parser(&md_depth_control::auto_exposure_mode, md_depth_control_attributes::ae_mode_attribute, md_prop_offset)); - depth_ep.register_metadata(RS2_FRAME_METADATA_FRAME_LASER_POWER, make_attribute_parser(&md_depth_control::laser_power, md_depth_control_attributes::laser_pwr_attribute, md_prop_offset)); - depth_ep.register_metadata(RS2_FRAME_METADATA_FRAME_LASER_POWER_MODE, make_attribute_parser(&md_depth_control::laserPowerMode, md_depth_control_attributes::laser_pwr_attribute, md_prop_offset)); - depth_ep.register_metadata(RS2_FRAME_METADATA_EXPOSURE_PRIORITY, make_attribute_parser(&md_depth_control::exposure_priority, md_depth_control_attributes::exposure_priority_attribute, md_prop_offset)); - depth_ep.register_metadata(RS2_FRAME_METADATA_EXPOSURE_ROI_LEFT, make_attribute_parser(&md_depth_control::exposure_roi_left, md_depth_control_attributes::roi_attribute, md_prop_offset)); - depth_ep.register_metadata(RS2_FRAME_METADATA_EXPOSURE_ROI_RIGHT, make_attribute_parser(&md_depth_control::exposure_roi_right, md_depth_control_attributes::roi_attribute, md_prop_offset)); - depth_ep.register_metadata(RS2_FRAME_METADATA_EXPOSURE_ROI_TOP, make_attribute_parser(&md_depth_control::exposure_roi_top, md_depth_control_attributes::roi_attribute, md_prop_offset)); - depth_ep.register_metadata(RS2_FRAME_METADATA_EXPOSURE_ROI_BOTTOM, make_attribute_parser(&md_depth_control::exposure_roi_bottom, md_depth_control_attributes::roi_attribute, md_prop_offset)); + depth_sensor.register_metadata(RS2_FRAME_METADATA_FRAME_LASER_POWER, make_attribute_parser(&md_depth_control::laser_power, md_depth_control_attributes::laser_pwr_attribute, md_prop_offset)); + depth_sensor.register_metadata(RS2_FRAME_METADATA_FRAME_LASER_POWER_MODE, make_attribute_parser(&md_depth_control::laserPowerMode, md_depth_control_attributes::laser_pwr_attribute, md_prop_offset)); + depth_sensor.register_metadata(RS2_FRAME_METADATA_EXPOSURE_PRIORITY, make_attribute_parser(&md_depth_control::exposure_priority, md_depth_control_attributes::exposure_priority_attribute, md_prop_offset)); + depth_sensor.register_metadata(RS2_FRAME_METADATA_EXPOSURE_ROI_LEFT, make_attribute_parser(&md_depth_control::exposure_roi_left, md_depth_control_attributes::roi_attribute, md_prop_offset)); + depth_sensor.register_metadata(RS2_FRAME_METADATA_EXPOSURE_ROI_RIGHT, make_attribute_parser(&md_depth_control::exposure_roi_right, md_depth_control_attributes::roi_attribute, md_prop_offset)); + depth_sensor.register_metadata(RS2_FRAME_METADATA_EXPOSURE_ROI_TOP, make_attribute_parser(&md_depth_control::exposure_roi_top, md_depth_control_attributes::roi_attribute, md_prop_offset)); + depth_sensor.register_metadata(RS2_FRAME_METADATA_EXPOSURE_ROI_BOTTOM, make_attribute_parser(&md_depth_control::exposure_roi_bottom, md_depth_control_attributes::roi_attribute, md_prop_offset)); // md_configuration - will be used for internal validation only md_prop_offset = offsetof(metadata_raw, mode) + offsetof(md_depth_mode, depth_y_mode) + offsetof(md_depth_y_normal_mode, intel_configuration); - depth_ep.register_metadata((rs2_frame_metadata_value)RS2_FRAME_METADATA_HW_TYPE, make_attribute_parser(&md_configuration::hw_type, md_configuration_attributes::hw_type_attribute, md_prop_offset)); - depth_ep.register_metadata((rs2_frame_metadata_value)RS2_FRAME_METADATA_SKU_ID, make_attribute_parser(&md_configuration::sku_id, md_configuration_attributes::sku_id_attribute, md_prop_offset)); - depth_ep.register_metadata((rs2_frame_metadata_value)RS2_FRAME_METADATA_FORMAT, make_attribute_parser(&md_configuration::format, md_configuration_attributes::format_attribute, md_prop_offset)); - depth_ep.register_metadata((rs2_frame_metadata_value)RS2_FRAME_METADATA_WIDTH, make_attribute_parser(&md_configuration::width, md_configuration_attributes::width_attribute, md_prop_offset)); - depth_ep.register_metadata((rs2_frame_metadata_value)RS2_FRAME_METADATA_HEIGHT, make_attribute_parser(&md_configuration::height, md_configuration_attributes::height_attribute, md_prop_offset)); - depth_ep.register_metadata((rs2_frame_metadata_value)RS2_FRAME_METADATA_ACTUAL_FPS, std::make_shared ()); + depth_sensor.register_metadata((rs2_frame_metadata_value)RS2_FRAME_METADATA_HW_TYPE, make_attribute_parser(&md_configuration::hw_type, md_configuration_attributes::hw_type_attribute, md_prop_offset)); + depth_sensor.register_metadata((rs2_frame_metadata_value)RS2_FRAME_METADATA_SKU_ID, make_attribute_parser(&md_configuration::sku_id, md_configuration_attributes::sku_id_attribute, md_prop_offset)); + depth_sensor.register_metadata((rs2_frame_metadata_value)RS2_FRAME_METADATA_FORMAT, make_attribute_parser(&md_configuration::format, md_configuration_attributes::format_attribute, md_prop_offset)); + depth_sensor.register_metadata((rs2_frame_metadata_value)RS2_FRAME_METADATA_WIDTH, make_attribute_parser(&md_configuration::width, md_configuration_attributes::width_attribute, md_prop_offset)); + depth_sensor.register_metadata((rs2_frame_metadata_value)RS2_FRAME_METADATA_HEIGHT, make_attribute_parser(&md_configuration::height, md_configuration_attributes::height_attribute, md_prop_offset)); + depth_sensor.register_metadata((rs2_frame_metadata_value)RS2_FRAME_METADATA_ACTUAL_FPS, std::make_shared ()); register_info(RS2_CAMERA_INFO_NAME, device_name); register_info(RS2_CAMERA_INFO_SERIAL_NUMBER, optic_serial); @@ -861,7 +911,7 @@ namespace librealsense return ts; } - std::shared_ptr ds5u_device::create_ds5u_depth_device(std::shared_ptr ctx, + std::shared_ptr ds5u_device::create_ds5u_depth_device(std::shared_ptr ctx, const std::vector& all_device_infos) { using namespace ds; @@ -876,18 +926,22 @@ namespace librealsense std::unique_ptr ds5_timestamp_reader_metadata(new ds5_timestamp_reader_from_metadata(std::move(ds5_timestamp_reader_backup))); auto enable_global_time_option = std::shared_ptr(new global_time_option()); - auto depth_ep = std::make_shared(this, std::make_shared(depth_devices), - std::unique_ptr(new global_timestamp_reader(std::move(ds5_timestamp_reader_metadata), _tf_keeper, enable_global_time_option))); + auto raw_depth_ep = std::make_shared("Depth Sensor", std::make_shared(depth_devices), std::unique_ptr(new global_timestamp_reader(std::move(ds5_timestamp_reader_metadata), _tf_keeper, enable_global_time_option)), this); + auto depth_ep = std::make_shared(this, raw_depth_ep); depth_ep->register_option(RS2_OPTION_GLOBAL_TIME_ENABLED, enable_global_time_option); - depth_ep->register_xu(depth_xu); // make sure the XU is initialized every time we power the camera + raw_depth_ep->register_xu(depth_xu); // make sure the XU is initialized every time we power the camera - depth_ep->register_pixel_format(pf_z16); // Depth + depth_ep->register_processing_block(processing_block_factory::create_id_pbf(RS2_FORMAT_Z16, RS2_STREAM_DEPTH)); + depth_ep->register_processing_block({ {RS2_FORMAT_W10} }, { {RS2_FORMAT_RAW10, RS2_STREAM_INFRARED, 1} }, []() { return std::make_shared(RS2_FORMAT_RAW10); }); + depth_ep->register_processing_block({ {RS2_FORMAT_W10} }, { {RS2_FORMAT_Y10BPACK, RS2_STREAM_INFRARED, 1} }, []() { return std::make_shared(RS2_FORMAT_Y10BPACK); }); - // Support DS5U-specific pixel format - depth_ep->register_pixel_format(pf_w10); - depth_ep->register_pixel_format(pf_uyvyl); + depth_ep->register_processing_block({ {RS2_FORMAT_UYVY} }, { {RS2_FORMAT_RGB8, RS2_STREAM_INFRARED} }, []() { return std::make_shared(RS2_FORMAT_RGB8, RS2_STREAM_INFRARED); }); + depth_ep->register_processing_block({ {RS2_FORMAT_UYVY} }, { {RS2_FORMAT_RGBA8, RS2_STREAM_INFRARED} }, []() { return std::make_shared(RS2_FORMAT_RGBA8, RS2_STREAM_INFRARED); }); + depth_ep->register_processing_block({ {RS2_FORMAT_UYVY} }, { {RS2_FORMAT_BGR8, RS2_STREAM_INFRARED} }, []() { return std::make_shared(RS2_FORMAT_BGR8, RS2_STREAM_INFRARED); }); + depth_ep->register_processing_block({ {RS2_FORMAT_UYVY} }, { {RS2_FORMAT_BGRA8, RS2_STREAM_INFRARED} }, []() { return std::make_shared(RS2_FORMAT_BGRA8, RS2_STREAM_INFRARED); }); + depth_ep->register_processing_block(processing_block_factory::create_id_pbf(RS2_FORMAT_UYVY, RS2_STREAM_INFRARED)); return depth_ep; } @@ -905,12 +959,6 @@ namespace librealsense auto& depth_ep = get_depth_sensor(); - if (!is_camera_in_advanced_mode()) - { - depth_ep.remove_pixel_format(pf_y8i); // L+R - depth_ep.remove_pixel_format(pf_y12i); // L+R - } - // Inhibit specific unresolved options depth_ep.unregister_option(RS2_OPTION_OUTPUT_TRIGGER_ENABLED); depth_ep.unregister_option(RS2_OPTION_ERROR_POLLING_ENABLED); @@ -921,7 +969,7 @@ namespace librealsense auto pid = group.uvc_devices.front().pid; if (pid != RS_USB2_PID) { - auto& depth_ep = get_depth_sensor(); + auto& depth_ep = get_raw_depth_sensor(); auto emitter_enabled = std::make_shared(depth_ep); depth_ep.register_option(RS2_OPTION_EMITTER_ENABLED, emitter_enabled); diff --git a/src/ds5/ds5-device.h b/src/ds5/ds5-device.h index 2ac3ea7cbb..899abf3331 100644 --- a/src/ds5/ds5-device.h +++ b/src/ds5/ds5-device.h @@ -31,12 +31,18 @@ namespace librealsense class ds5_device : public virtual device, public debug_interface, public global_time_interface, public updatable { public: - std::shared_ptr create_depth_device(std::shared_ptr ctx, + std::shared_ptr create_depth_device(std::shared_ptr ctx, const std::vector& all_device_infos); - uvc_sensor& get_depth_sensor() + synthetic_sensor& get_depth_sensor() { - return dynamic_cast(get_sensor(_depth_device_idx)); + return dynamic_cast(get_sensor(_depth_device_idx)); + } + + uvc_sensor& get_raw_depth_sensor() + { + synthetic_sensor& depth_sensor = get_depth_sensor(); + return dynamic_cast(*depth_sensor.get_raw_sensor()); } ds5_device(std::shared_ptr ctx, @@ -94,7 +100,7 @@ namespace librealsense ds5u_device(std::shared_ptr ctx, const platform::backend_device_group& group); - std::shared_ptr create_ds5u_depth_device(std::shared_ptr ctx, + std::shared_ptr create_ds5u_depth_device(std::shared_ptr ctx, const std::vector& all_device_infos); protected: diff --git a/src/ds5/ds5-motion.cpp b/src/ds5/ds5-motion.cpp index 36b427fa03..5063728ef4 100644 --- a/src/ds5/ds5-motion.cpp +++ b/src/ds5/ds5-motion.cpp @@ -1,9 +1,12 @@ // License: Apache 2.0. See LICENSE file in root directory. // Copyright(c) 2016 Intel Corporation. All Rights Reserved. +#include "ds5-motion.h" + #include #include #include +#include #include #include @@ -15,13 +18,24 @@ #include "ds5-timestamp.h" #include "ds5-options.h" #include "ds5-private.h" -#include "ds5-motion.h" #include "core/motion.h" #include "stream.h" #include "environment.h" +#include "proc/motion-transform.h" +#include "proc/identity-processing-block.h" +#include "proc/auto-exposure-processor.h" namespace librealsense { + std::map fisheye_fourcc_to_rs2_format = { + {rs_fourcc('R','A','W','8'), RS2_FORMAT_RAW8}, + {rs_fourcc('G','R','E','Y'), RS2_FORMAT_RAW8}, + }; + std::map fisheye_fourcc_to_rs2_stream = { + {rs_fourcc('R','A','W','8'), RS2_STREAM_FISHEYE}, + {rs_fourcc('G','R','E','Y'), RS2_STREAM_FISHEYE}, + }; + class fisheye_auto_exposure_roi_method : public region_of_interest_method { public: @@ -45,18 +59,16 @@ namespace librealsense region_of_interest _roi{}; }; - class ds5_hid_sensor : public hid_sensor + class ds5_hid_sensor : public synthetic_sensor { public: - explicit ds5_hid_sensor(ds5_motion* owner, std::shared_ptr hid_device, - std::unique_ptr hid_iio_timestamp_reader, - std::unique_ptr custom_hid_timestamp_reader, - std::map> fps_and_sampling_frequency_per_rs2_stream, - std::vector> sensor_name_and_hid_profiles) - : hid_sensor(hid_device, move(hid_iio_timestamp_reader), move(custom_hid_timestamp_reader), - fps_and_sampling_frequency_per_rs2_stream, sensor_name_and_hid_profiles, owner), _owner(owner) - { - } + explicit ds5_hid_sensor(std::string name, + std::shared_ptr sensor, + device* device, + ds5_motion* owner) + : synthetic_sensor(name, sensor, device), + _owner(owner) + {} rs2_motion_device_intrinsic get_motion_intrinsics(rs2_stream stream) const { @@ -66,7 +78,7 @@ namespace librealsense stream_profiles init_stream_profiles() override { auto lock = environment::get_instance().get_extrinsics_graph().lock(); - auto results = hid_sensor::init_stream_profiles(); + auto results = synthetic_sensor::init_stream_profiles(); for (auto p : results) { @@ -80,7 +92,7 @@ namespace librealsense if (p->get_stream_type() == RS2_STREAM_ACCEL || p->get_stream_type() == RS2_STREAM_GYRO) { auto motion = dynamic_cast(p.get()); - assert(motion); //Expecting to succeed for motion stream (since we are under the "if") + assert(motion); auto st = p->get_stream_type(); motion->set_intrinsics([this, st]() { return get_motion_intrinsics(st); }); } @@ -93,12 +105,14 @@ namespace librealsense const ds5_motion* _owner; }; - class ds5_fisheye_sensor : public uvc_sensor, public video_sensor_interface, public roi_sensor_base + class ds5_fisheye_sensor : public synthetic_sensor, public video_sensor_interface, public roi_sensor_base // TODO - Ariel change to synthetic sensor { public: - explicit ds5_fisheye_sensor(ds5_motion* owner, std::shared_ptr uvc_device, - std::unique_ptr timestamp_reader) - : uvc_sensor("Wide FOV Camera", uvc_device, move(timestamp_reader), owner), _owner(owner) + explicit ds5_fisheye_sensor(std::shared_ptr sensor, + device* device, + ds5_motion* owner) + : synthetic_sensor("Wide FOV Camera", sensor, device, fisheye_fourcc_to_rs2_format, fisheye_fourcc_to_rs2_stream), + _owner(owner) {} rs2_intrinsics get_intrinsics(const stream_profile& profile) const override @@ -113,7 +127,7 @@ namespace librealsense { auto lock = environment::get_instance().get_extrinsics_graph().lock(); - auto results = uvc_sensor::init_stream_profiles(); + auto results = synthetic_sensor::init_stream_profiles(); for (auto p : results) { // Register stream types @@ -137,6 +151,12 @@ namespace librealsense return results; } + + std::shared_ptr get_raw_sensor() + { + auto uvc_raw_sensor = As(get_raw_sensor()); + return uvc_raw_sensor; + } private: const ds5_motion* _owner; }; @@ -152,7 +172,7 @@ namespace librealsense throw std::runtime_error(to_string() << "Motion Intrinsics unknown for stream " << rs2_stream_to_string(stream) << "!"); } - std::shared_ptr ds5_motion::create_hid_device(std::shared_ptr ctx, + std::shared_ptr ds5_motion::create_hid_device(std::shared_ptr ctx, const std::vector& all_hid_infos, const firmware_version& camera_fw_version) { @@ -178,79 +198,113 @@ namespace librealsense for (auto&& elem : accel_fps_rates) { - sensor_name_and_hid_profiles.push_back({ accel_sensor_name, { RS2_STREAM_ACCEL, 0, 1, 1, static_cast(elem), RS2_FORMAT_MOTION_XYZ32F } }); + sensor_name_and_hid_profiles.push_back({ accel_sensor_name, { RS2_FORMAT_MOTION_XYZ32F, RS2_STREAM_ACCEL, 0, 1, 1, static_cast(elem)} }); fps_and_frequency_map.emplace(unsigned(elem), hid_fps_translation.at(elem)); } fps_and_sampling_frequency_per_rs2_stream[RS2_STREAM_ACCEL] = fps_and_frequency_map; - auto hid_ep = std::make_shared(this, ctx->get_backend().create_hid_device(all_hid_infos.front()), - std::unique_ptr(new global_timestamp_reader(std::move(iio_hid_ts_reader), _tf_keeper, enable_global_time_option)), - std::unique_ptr(new global_timestamp_reader(std::move(custom_hid_ts_reader), _tf_keeper, enable_global_time_option)), - fps_and_sampling_frequency_per_rs2_stream, - sensor_name_and_hid_profiles); + auto raw_hid_ep = std::make_shared(ctx->get_backend().create_hid_device(all_hid_infos.front()), + std::unique_ptr(new global_timestamp_reader(std::move(iio_hid_ts_reader), _tf_keeper, enable_global_time_option)), + std::unique_ptr(new global_timestamp_reader(std::move(custom_hid_ts_reader), _tf_keeper, enable_global_time_option)), + fps_and_sampling_frequency_per_rs2_stream, + sensor_name_and_hid_profiles, + this); + + auto hid_ep = std::make_shared("Motion Module", raw_hid_ep, this, this); hid_ep->register_option(RS2_OPTION_GLOBAL_TIME_ENABLED, enable_global_time_option); - hid_ep->register_pixel_format(pf_accel_axes); - hid_ep->register_pixel_format(pf_gyro_axes); + + // register pre-processing + bool enable_motion_correction = false; + if (hid_ep->supports_option(RS2_OPTION_ENABLE_MOTION_CORRECTION)) + { + auto&& motion_correction_opt = hid_ep->get_option(RS2_OPTION_ENABLE_MOTION_CORRECTION); + enable_motion_correction = motion_correction_opt.is_enabled(); + } + hid_ep->register_processing_block( + { {RS2_FORMAT_MOTION_XYZ32F, RS2_STREAM_ACCEL} }, + { {RS2_FORMAT_MOTION_XYZ32F, RS2_STREAM_ACCEL} }, + [&, enable_motion_correction]() { return std::make_shared(_mm_calib.get(), enable_motion_correction); + }); + + hid_ep->register_processing_block( + { {RS2_FORMAT_MOTION_XYZ32F, RS2_STREAM_GYRO} }, + { {RS2_FORMAT_MOTION_XYZ32F, RS2_STREAM_GYRO} }, + [&, enable_motion_correction]() { return std::make_shared(_mm_calib.get(), enable_motion_correction); + }); uint16_t pid = static_cast(strtoul(all_hid_infos.front().pid.data(), nullptr, 16)); if ((camera_fw_version >= firmware_version(custom_sensor_fw_ver)) && (!val_in_range(pid, { ds::RS400_IMU_PID, ds::RS435I_PID, ds::RS430I_PID, ds::RS465_PID }))) { hid_ep->register_option(RS2_OPTION_MOTION_MODULE_TEMPERATURE, - std::make_shared(*hid_ep)); - hid_ep->register_pixel_format(pf_gpio_timestamp); + std::make_shared(*raw_hid_ep)); } return hid_ep; } - std::shared_ptr ds5_motion::register_auto_exposure_options(uvc_sensor* uvc_ep, const platform::extension_unit* fisheye_xu) + std::shared_ptr ds5_motion::register_auto_exposure_options(synthetic_sensor* ep, const platform::extension_unit* fisheye_xu) { - auto gain_option = std::make_shared(*uvc_ep, RS2_OPTION_GAIN); + auto uvc_raw_sensor = As(ep->get_raw_sensor()); + auto gain_option = std::make_shared(*uvc_raw_sensor, RS2_OPTION_GAIN); - auto exposure_option = std::make_shared>(*uvc_ep, + auto exposure_option = std::make_shared>(*uvc_raw_sensor, *fisheye_xu, librealsense::ds::FISHEYE_EXPOSURE, "Exposure time of Fisheye camera"); auto ae_state = std::make_shared(); auto auto_exposure = std::make_shared(*gain_option, *exposure_option, *ae_state); - auto auto_exposure_option = std::make_shared(uvc_ep, + auto auto_exposure_option = std::make_shared(ep, auto_exposure, ae_state, option_range{0, 1, 1, 1}); - uvc_ep->register_option(RS2_OPTION_ENABLE_AUTO_EXPOSURE,auto_exposure_option); + ep->register_option(RS2_OPTION_ENABLE_AUTO_EXPOSURE,auto_exposure_option); - uvc_ep->register_option(RS2_OPTION_AUTO_EXPOSURE_MODE, + ep->register_option(RS2_OPTION_AUTO_EXPOSURE_MODE, std::make_shared(auto_exposure, ae_state, option_range{0, 2, 1, 0}, std::map{{0.f, "Static"}, {1.f, "Anti-Flicker"}, {2.f, "Hybrid"}})); - uvc_ep->register_option(RS2_OPTION_AUTO_EXPOSURE_CONVERGE_STEP, + ep->register_option(RS2_OPTION_AUTO_EXPOSURE_CONVERGE_STEP, std::make_shared(auto_exposure, ae_state, option_range{ 0.1f, 1.0f, 0.1f, ae_step_default_value })); - uvc_ep->register_option(RS2_OPTION_POWER_LINE_FREQUENCY, + ep->register_option(RS2_OPTION_POWER_LINE_FREQUENCY, std::make_shared(auto_exposure, ae_state, option_range{50, 60, 10, 60}, std::map{{50.f, "50Hz"}, {60.f, "60Hz"}})); - uvc_ep->register_option(RS2_OPTION_GAIN, + ep->register_option(RS2_OPTION_GAIN, std::make_shared( gain_option, auto_exposure_option)); - uvc_ep->register_option(RS2_OPTION_EXPOSURE, + ep->register_option(RS2_OPTION_EXPOSURE, std::make_shared( exposure_option, auto_exposure_option)); + ep->register_processing_block( + { {RS2_FORMAT_RAW8}}, + { {RS2_FORMAT_RAW8, RS2_STREAM_FISHEYE} }, + [auto_exposure_option]() { + auto id = std::make_shared(); + auto ae = std::make_shared(RS2_STREAM_FISHEYE, *auto_exposure_option); + + auto cpb = std::make_shared(); + cpb->add(id); + cpb->add(ae); + + return cpb; + } + ); return auto_exposure; } @@ -288,47 +342,13 @@ namespace librealsense { _motion_module_device_idx = static_cast(add_sensor(hid_ep)); - std::function align_imu_axes = nullptr; - - // Perform basic IMU transformation to align orientation with Depth sensor CS. - try - { - float3x3 imu_to_depth = _mm_calib->imu_to_depth_alignment(); - align_imu_axes = [imu_to_depth](rs2_stream stream, frame_interface* fr, callback_invocation_holder callback) - { - if (fr->get_stream()->get_format() == RS2_FORMAT_MOTION_XYZ32F) - { - auto xyz = (float3*)(fr->get_frame_data()); - - // The IMU sensor orientation shall be aligned with depth sensor's coordinate system - *xyz = imu_to_depth * (*xyz); - } - }; - } - catch (const std::exception& ex){ - LOG_INFO("Motion Module - no extrinsic calibration, " << ex.what()); - } - - try - { - hid_ep->register_option(RS2_OPTION_ENABLE_MOTION_CORRECTION, - std::make_shared(hid_ep.get(), - *_accel_intrinsic, - *_gyro_intrinsic, - _depth_to_imu, - align_imu_axes, // The motion correction callback also includes the axes rotation routine - option_range{ 0, 1, 1, 1 })); - } - catch (const std::exception& ex) - { - LOG_INFO("Motion Module - no intrinsic calibration, " << ex.what()); - - // transform IMU axes if supported - hid_ep->register_on_before_frame_callback(align_imu_axes); - } + hid_ep->register_option(RS2_OPTION_ENABLE_MOTION_CORRECTION, + std::make_shared(hid_ep.get(), + _depth_to_imu, + option_range{ 0, 1, 1, 1 })); // HID metadata attributes - hid_ep->register_metadata(RS2_FRAME_METADATA_FRAME_TIMESTAMP, make_hid_header_parser(&platform::hid_header::timestamp)); + hid_ep->get_raw_sensor()->register_metadata(RS2_FRAME_METADATA_FRAME_TIMESTAMP, make_hid_header_parser(&platform::hid_header::timestamp)); } } @@ -362,13 +382,12 @@ namespace librealsense auto&& backend = ctx->get_backend(); std::unique_ptr ds5_timestamp_reader_metadata(new ds5_timestamp_reader_from_metadata(std::move(ds5_timestamp_reader_backup))); auto enable_global_time_option = std::shared_ptr(new global_time_option()); - auto fisheye_ep = std::make_shared(this, backend.create_uvc_device(fisheye_infos.front()), - std::unique_ptr(new global_timestamp_reader(std::move(ds5_timestamp_reader_metadata), _tf_keeper, enable_global_time_option))); + auto raw_fisheye_ep = std::make_shared("FishEye Sensor", backend.create_uvc_device(fisheye_infos.front()), + std::unique_ptr(new global_timestamp_reader(std::move(ds5_timestamp_reader_metadata), _tf_keeper, enable_global_time_option)), this); + auto fisheye_ep = std::make_shared(raw_fisheye_ep, this, this); fisheye_ep->register_option(RS2_OPTION_GLOBAL_TIME_ENABLED, enable_global_time_option); - fisheye_ep->register_xu(fisheye_xu); // make sure the XU is initialized everytime we power the camera - fisheye_ep->register_pixel_format(pf_raw8); - fisheye_ep->register_pixel_format(pf_fe_raw8_unpatched_kernel); // W/O for unpatched kernel + raw_fisheye_ep->register_xu(fisheye_xu); // make sure the XU is initialized everytime we power the camera if (_fw_version >= firmware_version("5.6.3.0")) // Create Auto Exposure controls from FW version 5.6.3.0 { @@ -378,26 +397,26 @@ namespace librealsense else { fisheye_ep->register_option(RS2_OPTION_GAIN, - std::make_shared(*fisheye_ep.get(), + std::make_shared(*raw_fisheye_ep.get(), RS2_OPTION_GAIN)); fisheye_ep->register_option(RS2_OPTION_EXPOSURE, - std::make_shared>(*fisheye_ep.get(), + std::make_shared>(*raw_fisheye_ep.get(), fisheye_xu, librealsense::ds::FISHEYE_EXPOSURE, "Exposure time of Fisheye camera")); } // Metadata registration - fisheye_ep->register_metadata(RS2_FRAME_METADATA_FRAME_TIMESTAMP, make_uvc_header_parser(&platform::uvc_header::timestamp)); - fisheye_ep->register_metadata(RS2_FRAME_METADATA_AUTO_EXPOSURE, make_additional_data_parser(&frame_additional_data::fisheye_ae_mode)); + raw_fisheye_ep->register_metadata(RS2_FRAME_METADATA_FRAME_TIMESTAMP, make_uvc_header_parser(&platform::uvc_header::timestamp)); + raw_fisheye_ep->register_metadata(RS2_FRAME_METADATA_AUTO_EXPOSURE, make_additional_data_parser(&frame_additional_data::fisheye_ae_mode)); // attributes of md_capture_timing auto md_prop_offset = offsetof(metadata_raw, mode) + offsetof(md_fisheye_mode, fisheye_mode) + offsetof(md_fisheye_normal_mode, intel_capture_timing); - fisheye_ep->register_metadata(RS2_FRAME_METADATA_FRAME_COUNTER, make_attribute_parser(&md_capture_timing::frame_counter, md_capture_timing_attributes::frame_counter_attribute,md_prop_offset)); - fisheye_ep->register_metadata(RS2_FRAME_METADATA_SENSOR_TIMESTAMP, make_rs400_sensor_ts_parser(make_uvc_header_parser(&platform::uvc_header::timestamp), + raw_fisheye_ep->register_metadata(RS2_FRAME_METADATA_FRAME_COUNTER, make_attribute_parser(&md_capture_timing::frame_counter, md_capture_timing_attributes::frame_counter_attribute,md_prop_offset)); + raw_fisheye_ep->register_metadata(RS2_FRAME_METADATA_SENSOR_TIMESTAMP, make_rs400_sensor_ts_parser(make_uvc_header_parser(&platform::uvc_header::timestamp), make_attribute_parser(&md_capture_timing::sensor_timestamp, md_capture_timing_attributes::sensor_timestamp_attribute, md_prop_offset))); // attributes of md_capture_stats @@ -410,42 +429,22 @@ namespace librealsense offsetof(md_fisheye_mode, fisheye_mode) + offsetof(md_fisheye_normal_mode, intel_configuration); - fisheye_ep->register_metadata((rs2_frame_metadata_value)RS2_FRAME_METADATA_HW_TYPE, make_attribute_parser(&md_configuration::hw_type, md_configuration_attributes::hw_type_attribute, md_prop_offset)); - fisheye_ep->register_metadata((rs2_frame_metadata_value)RS2_FRAME_METADATA_SKU_ID, make_attribute_parser(&md_configuration::sku_id, md_configuration_attributes::sku_id_attribute, md_prop_offset)); - fisheye_ep->register_metadata((rs2_frame_metadata_value)RS2_FRAME_METADATA_FORMAT, make_attribute_parser(&md_configuration::format, md_configuration_attributes::format_attribute, md_prop_offset)); - fisheye_ep->register_metadata((rs2_frame_metadata_value)RS2_FRAME_METADATA_WIDTH, make_attribute_parser(&md_configuration::width, md_configuration_attributes::width_attribute, md_prop_offset)); - fisheye_ep->register_metadata((rs2_frame_metadata_value)RS2_FRAME_METADATA_HEIGHT, make_attribute_parser(&md_configuration::height, md_configuration_attributes::height_attribute, md_prop_offset)); + raw_fisheye_ep->register_metadata((rs2_frame_metadata_value)RS2_FRAME_METADATA_HW_TYPE, make_attribute_parser(&md_configuration::hw_type, md_configuration_attributes::hw_type_attribute, md_prop_offset)); + raw_fisheye_ep->register_metadata((rs2_frame_metadata_value)RS2_FRAME_METADATA_SKU_ID, make_attribute_parser(&md_configuration::sku_id, md_configuration_attributes::sku_id_attribute, md_prop_offset)); + raw_fisheye_ep->register_metadata((rs2_frame_metadata_value)RS2_FRAME_METADATA_FORMAT, make_attribute_parser(&md_configuration::format, md_configuration_attributes::format_attribute, md_prop_offset)); + raw_fisheye_ep->register_metadata((rs2_frame_metadata_value)RS2_FRAME_METADATA_WIDTH, make_attribute_parser(&md_configuration::width, md_configuration_attributes::width_attribute, md_prop_offset)); + raw_fisheye_ep->register_metadata((rs2_frame_metadata_value)RS2_FRAME_METADATA_HEIGHT, make_attribute_parser(&md_configuration::height, md_configuration_attributes::height_attribute, md_prop_offset)); // attributes of md_fisheye_control md_prop_offset = offsetof(metadata_raw, mode) + offsetof(md_fisheye_mode, fisheye_mode) + offsetof(md_fisheye_normal_mode, intel_fisheye_control); - fisheye_ep->register_metadata(RS2_FRAME_METADATA_GAIN_LEVEL, make_attribute_parser(&md_fisheye_control::manual_gain, md_depth_control_attributes::gain_attribute, md_prop_offset)); - fisheye_ep->register_metadata(RS2_FRAME_METADATA_ACTUAL_EXPOSURE, make_attribute_parser(&md_fisheye_control::manual_exposure, md_depth_control_attributes::exposure_attribute, md_prop_offset)); + raw_fisheye_ep->register_metadata(RS2_FRAME_METADATA_GAIN_LEVEL, make_attribute_parser(&md_fisheye_control::manual_gain, md_depth_control_attributes::gain_attribute, md_prop_offset)); + raw_fisheye_ep->register_metadata(RS2_FRAME_METADATA_ACTUAL_EXPOSURE, make_attribute_parser(&md_fisheye_control::manual_exposure, md_depth_control_attributes::exposure_attribute, md_prop_offset)); // Add fisheye endpoint _fisheye_device_idx = add_sensor(fisheye_ep); - - // Not applicable for TM1 - //_depth_to_fisheye = std::make_shared>([this]() - //{ - // auto extr = get_fisheye_extrinsics_data(*_fisheye_extrinsics_raw); - // return from_pose(inverse(extr)); - //}); - - //_fisheye_to_imu = std::make_shared>([this]() - //{ - // auto fe_calib = (*_tm1_eeprom).calibration_table.calib_model.fe_calibration; - - // auto rot = fe_calib.fisheye_to_imu.rotation; - // auto trans = fe_calib.fisheye_to_imu.translation; - - // pose ex = { { rot(0,0), rot(1,0),rot(2,0),rot(0,1), rot(1,1),rot(2,1),rot(0,2), rot(1,2),rot(2,2) }, - // { trans[0], trans[1], trans[2] } }; - - // return from_pose(ex); - //}); } mm_calib_handler::mm_calib_handler(std::shared_ptr hw_monitor, ds::d400_caps dev_cap) : diff --git a/src/ds5/ds5-motion.h b/src/ds5/ds5-motion.h index c9dc0377b2..1c2854da83 100644 --- a/src/ds5/ds5-motion.h +++ b/src/ds5/ds5-motion.h @@ -197,7 +197,6 @@ namespace librealsense { public: mm_calib_handler(std::shared_ptr hw_monitor, ds::d400_caps dev_cap); - mm_calib_handler(const mm_calib_handler&); ~mm_calib_handler() {} ds::imu_intrinsic get_intrinsic(rs2_stream); @@ -217,7 +216,7 @@ namespace librealsense class ds5_motion : public virtual ds5_device { public: - std::shared_ptr create_hid_device(std::shared_ptr ctx, + std::shared_ptr create_hid_device(std::shared_ptr ctx, const std::vector& all_hid_infos, const firmware_version& camera_fw_version); @@ -226,7 +225,7 @@ namespace librealsense rs2_motion_device_intrinsic get_motion_intrinsics(rs2_stream) const; - std::shared_ptr register_auto_exposure_options(uvc_sensor* uvc_ep, + std::shared_ptr register_auto_exposure_options(synthetic_sensor* ep, const platform::extension_unit* fisheye_xu); private: @@ -248,8 +247,8 @@ namespace librealsense // Bandwidth parameters required for HID sensors // The Acceleration configuration will be resolved according to the IMU sensor type at run-time std::vector> sensor_name_and_hid_profiles = - { { gyro_sensor_name, {RS2_STREAM_GYRO, 0, 1, 1, int(odr::IMU_FPS_200), RS2_FORMAT_MOTION_XYZ32F}}, - { gyro_sensor_name, {RS2_STREAM_GYRO, 0, 1, 1, int(odr::IMU_FPS_400), RS2_FORMAT_MOTION_XYZ32F}}}; + { { gyro_sensor_name, {RS2_FORMAT_MOTION_XYZ32F, RS2_STREAM_GYRO, 0, 1, 1, int(odr::IMU_FPS_200)}}, + { gyro_sensor_name, {RS2_FORMAT_MOTION_XYZ32F, RS2_STREAM_GYRO, 0, 1, 1, int(odr::IMU_FPS_400)}}}; // Translate frequency to SENSOR_PROPERTY_CURRENT_REPORT_INTERVAL. std::map> fps_and_sampling_frequency_per_rs2_stream = diff --git a/src/ds5/ds5-nonmonochrome.cpp b/src/ds5/ds5-nonmonochrome.cpp index 10f5b4652b..9c6e937423 100644 --- a/src/ds5/ds5-nonmonochrome.cpp +++ b/src/ds5/ds5-nonmonochrome.cpp @@ -16,6 +16,8 @@ #include "ds5-private.h" #include "ds5-options.h" #include "ds5-timestamp.h" +#include "proc/color-formats-converter.h" +#include "proc/depth-formats-converter.h" namespace librealsense { @@ -28,16 +30,26 @@ namespace librealsense auto pid = group.uvc_devices.front().pid; if ((_fw_version >= firmware_version("5.5.8.0")) && (pid != RS_USB2_PID)) { - get_depth_sensor().register_option(RS2_OPTION_ENABLE_AUTO_WHITE_BALANCE, - std::make_shared>(get_depth_sensor(), + auto& depth_ep = get_depth_sensor(); + depth_ep.register_option(RS2_OPTION_ENABLE_AUTO_WHITE_BALANCE, + std::make_shared>(get_raw_depth_sensor(), depth_xu, DS5_ENABLE_AUTO_WHITE_BALANCE, "Enable Auto White Balance")); // RS400 rolling-shutter Skus allow to get low-quality color image from the same viewport as the depth - get_depth_sensor().register_pixel_format(pf_uyvyl); - get_depth_sensor().register_pixel_format(pf_rgb888); - get_depth_sensor().register_pixel_format(pf_w10); + depth_ep.register_processing_block({ {RS2_FORMAT_UYVY} }, { {RS2_FORMAT_RGB8, RS2_STREAM_INFRARED} }, []() { return std::make_shared(RS2_FORMAT_RGB8, RS2_STREAM_INFRARED); }); + depth_ep.register_processing_block({ {RS2_FORMAT_UYVY} }, { {RS2_FORMAT_RGBA8, RS2_STREAM_INFRARED} }, []() { return std::make_shared(RS2_FORMAT_RGBA8, RS2_STREAM_INFRARED); }); + depth_ep.register_processing_block({ {RS2_FORMAT_UYVY} }, { {RS2_FORMAT_BGR8, RS2_STREAM_INFRARED} }, []() { return std::make_shared(RS2_FORMAT_BGR8, RS2_STREAM_INFRARED); }); + depth_ep.register_processing_block({ {RS2_FORMAT_UYVY} }, { {RS2_FORMAT_BGRA8, RS2_STREAM_INFRARED} }, []() { return std::make_shared(RS2_FORMAT_BGRA8, RS2_STREAM_INFRARED); }); + depth_ep.register_processing_block(processing_block_factory::create_id_pbf(RS2_FORMAT_UYVY, RS2_STREAM_INFRARED)); + + depth_ep.register_processing_block({ {RS2_FORMAT_BGR8} }, { {RS2_FORMAT_RGB8, RS2_STREAM_INFRARED} }, []() { return std::make_shared(); }); + + depth_ep.register_processing_block(processing_block_factory::create_id_pbf(RS2_FORMAT_Z16, RS2_STREAM_DEPTH)); + depth_ep.register_processing_block({ {RS2_FORMAT_W10} }, { {RS2_FORMAT_RAW10, RS2_STREAM_INFRARED, 1} }, []() { return std::make_shared(RS2_FORMAT_RAW10); }); + depth_ep.register_processing_block({ {RS2_FORMAT_W10} }, { {RS2_FORMAT_Y10BPACK, RS2_STREAM_INFRARED, 1} }, []() { return std::make_shared(RS2_FORMAT_Y10BPACK); }); + } get_depth_sensor().unregister_option(RS2_OPTION_EMITTER_ON_OFF); diff --git a/src/ds5/ds5-options.cpp b/src/ds5/ds5-options.cpp index 1fd215c42f..2383c5294a 100644 --- a/src/ds5/ds5-options.cpp +++ b/src/ds5/ds5-options.cpp @@ -175,32 +175,10 @@ namespace librealsense } enable_motion_correction::enable_motion_correction(sensor_base* mm_ep, - const ds::imu_intrinsic& accel, - const ds::imu_intrinsic& gyro, std::shared_ptr> depth_to_imu, - on_before_frame_callback frame_callback, const option_range& opt_range) - : option_base(opt_range), _is_enabled(true), _accel(accel), _gyro(gyro), _depth_to_imu(**depth_to_imu) - { - mm_ep->register_on_before_frame_callback( - [this, frame_callback](rs2_stream stream, frame_interface* fr, callback_invocation_holder callback) - { - if (_is_enabled.load() && fr->get_stream()->get_format() == RS2_FORMAT_MOTION_XYZ32F) - { - auto xyz = (float3*)(fr->get_frame_data()); - - if (stream == RS2_STREAM_ACCEL) - *xyz = (_accel.sensitivity * (*xyz)) - _accel.bias; - - if (stream == RS2_STREAM_GYRO) - *xyz = _gyro.sensitivity * (*xyz) - _gyro.bias; - } - - // Align IMU axes to the established Coordinates System - if (frame_callback) - frame_callback(stream, fr, std::move(callback)); - }); - } + : option_base(opt_range), _is_enabled(true), _depth_to_imu(**depth_to_imu) + {} void enable_auto_exposure_option::set(float value) { @@ -232,7 +210,7 @@ namespace librealsense return _auto_exposure_state->get_enable_auto_exposure(); } - enable_auto_exposure_option::enable_auto_exposure_option(uvc_sensor* fisheye_ep, + enable_auto_exposure_option::enable_auto_exposure_option(synthetic_sensor* fisheye_ep, std::shared_ptr auto_exposure, std::shared_ptr auto_exposure_state, const option_range& opt_range) @@ -240,19 +218,7 @@ namespace librealsense _auto_exposure_state(auto_exposure_state), _to_add_frames((_auto_exposure_state->get_enable_auto_exposure())), _auto_exposure(auto_exposure) - { - fisheye_ep->register_on_before_frame_callback( - [this](rs2_stream stream, frame_interface* f, callback_invocation_holder callback) - { - if (!_to_add_frames || stream != RS2_STREAM_FISHEYE) - return; - - ((frame*)f)->additional_data.fisheye_ae_mode = true; - - f->acquire(); - _auto_exposure->add_frame(f, std::move(callback)); - }); - } + {} auto_exposure_mode_option::auto_exposure_mode_option(std::shared_ptr auto_exposure, std::shared_ptr auto_exposure_state, diff --git a/src/ds5/ds5-options.h b/src/ds5/ds5-options.h index 611d883db2..38eac52491 100644 --- a/src/ds5/ds5-options.h +++ b/src/ds5/ds5-options.h @@ -61,7 +61,7 @@ namespace librealsense float query() const override; - bool is_enabled() const override { return true; } + bool is_enabled() const override { return _is_enabled.load(); } const char* get_description() const override { @@ -69,16 +69,11 @@ namespace librealsense } enable_motion_correction(sensor_base* mm_ep, - const ds::imu_intrinsic& accel, - const ds::imu_intrinsic& gyro, std::shared_ptr> depth_to_imu, - on_before_frame_callback frame_callback, const option_range& opt_range); private: std::atomic _is_enabled; - ds::imu_intrinsic _accel; - ds::imu_intrinsic _gyro; rs2_extrinsics _depth_to_imu; }; @@ -96,7 +91,10 @@ namespace librealsense return "Enable/disable auto-exposure"; } - enable_auto_exposure_option(uvc_sensor* fisheye_ep, + auto_exposure_mechanism* get_auto_exposure() { return _auto_exposure.get(); } + bool to_add_frames() { return _to_add_frames.load(); } + + enable_auto_exposure_option(synthetic_sensor* fisheye_ep, std::shared_ptr auto_exposure, std::shared_ptr auto_exposure_state, const option_range& opt_range); diff --git a/src/ds5/ds5-timestamp.cpp b/src/ds5/ds5-timestamp.cpp index da7acb17d6..075f194dd8 100644 --- a/src/ds5/ds5-timestamp.cpp +++ b/src/ds5/ds5-timestamp.cpp @@ -21,18 +21,27 @@ namespace librealsense reset(); } - bool ds5_timestamp_reader_from_metadata::has_metadata(const request_mapping& mode, const void * metadata, size_t metadata_size) + bool ds5_timestamp_reader_from_metadata::has_metadata(std::shared_ptr frame) { std::lock_guard lock(_mtx); - if(metadata == nullptr || metadata_size == 0) + auto f = std::dynamic_pointer_cast(frame); + if (!f) { + LOG_ERROR("Frame is not valid. Failed to downcast to librealsense::frame."); return false; } + auto md = f->additional_data.metadata_blob; + auto mds = f->additional_data.metadata_size; - for(uint32_t i=0; i frame) { std::lock_guard lock(_mtx); + + auto f = std::dynamic_pointer_cast(frame); + if (!f) + { + LOG_ERROR("Frame is not valid. Failed to downcast to librealsense::frame."); + return 0; + } auto pin_index = 0; - if (mode.pf->fourcc == 0x5a313620) // Z16 + + if (frame->get_stream()->get_format() == RS2_FORMAT_Z16) pin_index = 1; if(!_has_metadata[pin_index]) { - _has_metadata[pin_index] = has_metadata(mode, fo.metadata, fo.metadata_size); + _has_metadata[pin_index] = has_metadata(frame); } - auto md = (librealsense::metadata_intel_basic*)(fo.metadata); + auto md = (librealsense::metadata_intel_basic*)(f->additional_data.metadata_blob.data()); if(_has_metadata[pin_index] && md) { return (double)(md->header.timestamp)*TIMESTAMP_USEC_TO_MSEC; @@ -64,25 +81,33 @@ namespace librealsense LOG_WARNING("UVC metadata payloads not available. Please refer to the installation chapter for details."); one_time_note = true; } - return _backup_timestamp_reader->get_frame_timestamp(mode, fo); + return _backup_timestamp_reader->get_frame_timestamp(frame); } } - unsigned long long ds5_timestamp_reader_from_metadata::get_frame_counter(const request_mapping & mode, const platform::frame_object& fo) const + unsigned long long ds5_timestamp_reader_from_metadata::get_frame_counter(std::shared_ptr frame) const { std::lock_guard lock(_mtx); + + auto f = std::dynamic_pointer_cast(frame); + if (!f) + { + LOG_ERROR("Frame is not valid. Failed to downcast to librealsense::frame."); + return 0; + } auto pin_index = 0; - if (mode.pf->fourcc == 0x5a313620) // Z16 + + if (frame->get_stream()->get_format() == RS2_FORMAT_Z16) pin_index = 1; - if(_has_metadata[pin_index] && fo.metadata_size > platform::uvc_header_size) + if(_has_metadata[pin_index] && f->additional_data.metadata_size > platform::uvc_header_size) { - auto md = (librealsense::metadata_intel_basic*)(fo.metadata); + auto md = (librealsense::metadata_intel_basic*)(f->additional_data.metadata_blob.data()); if (md->capture_valid()) return md->payload.frame_counter; } - return _backup_timestamp_reader->get_frame_counter(mode, fo); + return _backup_timestamp_reader->get_frame_counter(frame); } void ds5_timestamp_reader_from_metadata::reset() @@ -95,15 +120,15 @@ namespace librealsense } } - rs2_timestamp_domain ds5_timestamp_reader_from_metadata::get_frame_timestamp_domain(const request_mapping & mode, const platform::frame_object& fo) const + rs2_timestamp_domain ds5_timestamp_reader_from_metadata::get_frame_timestamp_domain(std::shared_ptr frame) const { std::lock_guard lock(_mtx); auto pin_index = 0; - if (mode.pf->fourcc == 0x5a313620) // Z16 + if (frame->get_stream()->get_format() == RS2_FORMAT_Z16) pin_index = 1; return _has_metadata[pin_index] ? RS2_TIMESTAMP_DOMAIN_HARDWARE_CLOCK : - _backup_timestamp_reader->get_frame_timestamp_domain(mode,fo); + _backup_timestamp_reader->get_frame_timestamp_domain(frame); } ds5_timestamp_reader::ds5_timestamp_reader(std::shared_ptr ts) @@ -121,23 +146,23 @@ namespace librealsense } } - rs2_time_t ds5_timestamp_reader::get_frame_timestamp(const request_mapping& mode, const platform::frame_object& fo) + rs2_time_t ds5_timestamp_reader::get_frame_timestamp(std::shared_ptr frame) { std::lock_guard lock(_mtx); return _ts->get_time(); } - unsigned long long ds5_timestamp_reader::get_frame_counter(const request_mapping & mode, const platform::frame_object& fo) const + unsigned long long ds5_timestamp_reader::get_frame_counter(std::shared_ptr frame) const { std::lock_guard lock(_mtx); auto pin_index = 0; - if (mode.pf->fourcc == 0x5a313620) // Z16 + if (frame->get_stream()->get_format() == RS2_FORMAT_Z16) pin_index = 1; return ++counter[pin_index]; } - rs2_timestamp_domain ds5_timestamp_reader::get_frame_timestamp_domain(const request_mapping & mode, const platform::frame_object& fo) const + rs2_timestamp_domain ds5_timestamp_reader::get_frame_timestamp_domain(std::shared_ptr frame) const { return RS2_TIMESTAMP_DOMAIN_SYSTEM_TIME; } @@ -157,30 +182,36 @@ namespace librealsense } } - rs2_time_t ds5_custom_hid_timestamp_reader::get_frame_timestamp(const request_mapping& /*mode*/, const platform::frame_object& fo) + rs2_time_t ds5_custom_hid_timestamp_reader::get_frame_timestamp(std::shared_ptr frame) { std::lock_guard lock(_mtx); static const uint8_t timestamp_offset = 17; + auto f = std::dynamic_pointer_cast(frame); + if (!f) + { + LOG_ERROR("Frame is not valid. Failed to downcast to librealsense::frame."); + return 0; + } // The timewstamp shall be trimmed back to 32 bit to allow HID/UVC intra-stream sync // See ds5_iio_hid_timestamp_reader description - auto timestamp = *((uint32_t*)((const uint8_t*)fo.pixels + timestamp_offset)); + auto timestamp = *((uint32_t*)((const uint8_t*)f->get_frame_data() + timestamp_offset)); // TODO - verify units with custom report return static_cast(timestamp) * TIMESTAMP_USEC_TO_MSEC; } - bool ds5_custom_hid_timestamp_reader::has_metadata(const request_mapping& /*mode*/, const void * /*metadata*/, size_t /*metadata_size*/) const + bool ds5_custom_hid_timestamp_reader::has_metadata(std::shared_ptr frame) const { return true; } - unsigned long long ds5_custom_hid_timestamp_reader::get_frame_counter(const request_mapping & mode, const platform::frame_object& fo) const + unsigned long long ds5_custom_hid_timestamp_reader::get_frame_counter(std::shared_ptr frame) const { std::lock_guard lock(_mtx); return ++counter.front(); } - rs2_timestamp_domain ds5_custom_hid_timestamp_reader::get_frame_timestamp_domain(const request_mapping & /*mode*/, const platform::frame_object& /*fo*/) const + rs2_timestamp_domain ds5_custom_hid_timestamp_reader::get_frame_timestamp_domain(std::shared_ptr frame) const { return RS2_TIMESTAMP_DOMAIN_HARDWARE_CLOCK; } diff --git a/src/ds5/ds5-timestamp.h b/src/ds5/ds5-timestamp.h index d742903ebd..bef6d2d5d6 100644 --- a/src/ds5/ds5-timestamp.h +++ b/src/ds5/ds5-timestamp.h @@ -21,15 +21,15 @@ namespace librealsense public: ds5_timestamp_reader_from_metadata(std::unique_ptr backup_timestamp_reader); - bool has_metadata(const request_mapping& mode, const void * metadata, size_t metadata_size); + bool has_metadata(std::shared_ptr frame); - rs2_time_t get_frame_timestamp(const request_mapping& mode, const platform::frame_object& fo) override; + rs2_time_t get_frame_timestamp(std::shared_ptr frame) override; - unsigned long long get_frame_counter(const request_mapping & mode, const platform::frame_object& fo) const override; + unsigned long long get_frame_counter(std::shared_ptr frame) const override; void reset() override; - rs2_timestamp_domain get_frame_timestamp_domain(const request_mapping & mode, const platform::frame_object& fo) const override; + rs2_timestamp_domain get_frame_timestamp_domain(std::shared_ptr frame) const override; }; class ds5_timestamp_reader : public frame_timestamp_reader @@ -43,11 +43,11 @@ namespace librealsense void reset() override; - rs2_time_t get_frame_timestamp(const request_mapping& mode, const platform::frame_object& fo) override; + rs2_time_t get_frame_timestamp(std::shared_ptr frame) override; - unsigned long long get_frame_counter(const request_mapping & mode, const platform::frame_object& fo) const override; + unsigned long long get_frame_counter(std::shared_ptr frame) const override; - rs2_timestamp_domain get_frame_timestamp_domain(const request_mapping & mode, const platform::frame_object& fo) const override; + rs2_timestamp_domain get_frame_timestamp_domain(std::shared_ptr frame) const override; }; class ds5_custom_hid_timestamp_reader : public frame_timestamp_reader @@ -61,12 +61,12 @@ namespace librealsense void reset() override; - rs2_time_t get_frame_timestamp(const request_mapping& mode, const platform::frame_object& fo) override; + rs2_time_t get_frame_timestamp(std::shared_ptr frame) override; - bool has_metadata(const request_mapping& mode, const void * metadata, size_t metadata_size) const; + bool has_metadata(std::shared_ptr frame) const; - unsigned long long get_frame_counter(const request_mapping & mode, const platform::frame_object& fo) const override; + unsigned long long get_frame_counter(std::shared_ptr frame) const override; - rs2_timestamp_domain get_frame_timestamp_domain(const request_mapping & mode, const platform::frame_object& fo) const override; + rs2_timestamp_domain get_frame_timestamp_domain(std::shared_ptr frame) const override; }; } diff --git a/src/frame-archive.h b/src/frame-archive.h index 40f07f610d..1b2e2675af 100644 --- a/src/frame-archive.h +++ b/src/frame-archive.h @@ -122,7 +122,8 @@ namespace librealsense if (new_frame) { - if (max_frames) new_frame->mark_fixed(); + if (max_frames) + new_frame->mark_fixed(); } else { diff --git a/src/gl/rs-gl.cpp b/src/gl/rs-gl.cpp index bc6afc0e30..cc2c1f3d1f 100644 --- a/src/gl/rs-gl.cpp +++ b/src/gl/rs-gl.cpp @@ -12,7 +12,7 @@ #include "upload.h" #include "pc-shader.h" #include "colorizer-gl.h" -#include "proc/yuy2rgb.h" +#include "proc/color-formats-converter.h" #include "proc/colorizer.h" #include "proc/align.h" #include "log.h" @@ -71,7 +71,7 @@ rs2_processing_block* rs2_gl_create_yuy_decoder(int api_version, rs2_error** err verify_version_compatibility(api_version); auto block = std::make_shared(); - auto backup = std::make_shared(); + auto backup = std::make_shared(RS2_FORMAT_RGB8); auto dual = std::make_shared(); dual->add(block); dual->add(backup); diff --git a/src/global_timestamp_reader.cpp b/src/global_timestamp_reader.cpp index 99652023ca..5a0de25e4e 100644 --- a/src/global_timestamp_reader.cpp +++ b/src/global_timestamp_reader.cpp @@ -211,10 +211,10 @@ namespace librealsense { } - double global_timestamp_reader::get_frame_timestamp(const request_mapping& mode, const platform::frame_object& fo) + double global_timestamp_reader::get_frame_timestamp(std::shared_ptr frame) { - double frame_time = _device_timestamp_reader->get_frame_timestamp(mode, fo); - rs2_timestamp_domain ts_domain = _device_timestamp_reader->get_frame_timestamp_domain(mode, fo); + double frame_time = _device_timestamp_reader->get_frame_timestamp(frame); + rs2_timestamp_domain ts_domain = _device_timestamp_reader->get_frame_timestamp_domain(frame); if (_option_is_enabled->is_true() && ts_domain == RS2_TIMESTAMP_DOMAIN_HARDWARE_CLOCK) { auto sp = _time_diff_keeper.lock(); @@ -227,14 +227,14 @@ namespace librealsense } - unsigned long long global_timestamp_reader::get_frame_counter(const request_mapping& mode, const platform::frame_object& fo) const + unsigned long long global_timestamp_reader::get_frame_counter(std::shared_ptr frame) const { - return _device_timestamp_reader->get_frame_counter(mode, fo); + return _device_timestamp_reader->get_frame_counter(frame); } - rs2_timestamp_domain global_timestamp_reader::get_frame_timestamp_domain(const request_mapping& mode, const platform::frame_object& fo) const + rs2_timestamp_domain global_timestamp_reader::get_frame_timestamp_domain(std::shared_ptr frame) const { - rs2_timestamp_domain ts_domain = _device_timestamp_reader->get_frame_timestamp_domain(mode, fo); + rs2_timestamp_domain ts_domain = _device_timestamp_reader->get_frame_timestamp_domain(frame); return (_option_is_enabled->is_true() && _ts_is_ready && ts_domain == RS2_TIMESTAMP_DOMAIN_HARDWARE_CLOCK) ? RS2_TIMESTAMP_DOMAIN_GLOBAL_TIME : ts_domain; } diff --git a/src/global_timestamp_reader.h b/src/global_timestamp_reader.h index 639ef98433..afd235e25b 100644 --- a/src/global_timestamp_reader.h +++ b/src/global_timestamp_reader.h @@ -86,9 +86,9 @@ namespace librealsense std::shared_ptr timediff, std::shared_ptr); - rs2_time_t get_frame_timestamp(const request_mapping& mode, const platform::frame_object& fo) override; - unsigned long long get_frame_counter(const request_mapping& mode, const platform::frame_object& fo) const override; - rs2_timestamp_domain get_frame_timestamp_domain(const request_mapping & mode, const platform::frame_object& fo) const override; + rs2_time_t get_frame_timestamp(std::shared_ptr frame) override; + unsigned long long get_frame_counter(std::shared_ptr frame) const override; + rs2_timestamp_domain get_frame_timestamp_domain(std::shared_ptr frame) const override; void reset() override; private: diff --git a/src/image.cpp b/src/image.cpp index c5583b4ed5..e3c7109a82 100644 --- a/src/image.cpp +++ b/src/image.cpp @@ -3,11 +3,6 @@ #include "image.h" #include "image-avx.h" -#include "types.h" - -#define STB_IMAGE_STATIC -#define STB_IMAGE_IMPLEMENTATION -#include "../third-party/stb_image.h" #ifdef RS2_USE_CUDA #include "cuda/cuda-conversion.cuh" @@ -16,32 +11,6 @@ #include // For SSSE3 intrinsics #endif -#if defined (ANDROID) || (defined (__linux__) && !defined (__x86_64__)) - -bool has_avx() { return false; } - -#else - -#ifdef _WIN32 -#include -#define cpuid(info, x) __cpuidex(info, x, 0) -#else -#include -void cpuid(int info[4], int info_type){ - __cpuid_count(info_type, 0, info[0], info[1], info[2], info[3]); -} -#endif - -bool has_avx() -{ - int info[4]; - cpuid(info, 0); - cpuid(info, 0x80000000); - return (info[2] & ((int)1 << 28)) != 0; -} - -#endif - #pragma pack(push, 1) // All structs in this file are assumed to be byte-packed namespace librealsense { @@ -82,963 +51,18 @@ namespace librealsense case RS2_FORMAT_MOTION_XYZ32F: return 1; case RS2_FORMAT_6DOF: return 1; case RS2_FORMAT_MJPEG: return 8; + case RS2_FORMAT_Y8I: return 16; + case RS2_FORMAT_Y12I: return 24; + case RS2_FORMAT_INZI: return 32; + case RS2_FORMAT_INVI: return 16; + case RS2_FORMAT_W10: return 32; default: assert(false); return 0; } } - ////////////////////////////// - // Naive unpacking routines // - ////////////////////////////// - - - template void copy_hid_axes(byte * const dest[], const byte * source, double factor) - { - using namespace librealsense; - auto hid = (hid_data*)(source); - - auto res= float3{ float(hid->x), float(hid->y), float(hid->z)} * float(factor); - - librealsense::copy(dest[0], &res, sizeof(float3)); - } - - // The Accelerometer input format: signed int 16bit. data units 1LSB=0.001g; - // Librealsense output format: floating point 32bit. units m/s^2, - template void unpack_accel_axes(byte * const dest[], const byte * source, int width, int height, int actual_size) - { - static constexpr float gravity = 9.80665f; // Standard Gravitation Acceleration - static constexpr double accelerator_transform_factor = 0.001*gravity; - - copy_hid_axes(dest, source, accelerator_transform_factor); - } - - // The Gyro input format: signed int 16bit. data units 1LSB=0.1deg/sec; - // Librealsense output format: floating point 32bit. units rad/sec, - template void unpack_gyro_axes(byte * const dest[], const byte * source, int width, int height, int actual_size) - { - static const double gyro_transform_factor = deg2rad(0.1); - - copy_hid_axes(dest, source, gyro_transform_factor); - } - - void unpack_hid_raw_data(byte * const dest[], const byte * source, int width, int height, int actual_size) - { - librealsense::copy(dest[0] + 0, source + 0, 2); - librealsense::copy(dest[0] + 2, source + 4, 2); - librealsense::copy(dest[0] + 4, source + 8, 2); - librealsense::copy(dest[0] + 6, source + 16, 8); - } - - void unpack_input_reports_data(byte * const dest[], const byte * source, int width, int height, int actual_size) - { - // Input Report Struct - // uint8_t sensor_state - // uint8_t sourceId - // uint32_t customTimestamp - // uint8_t mmCounter - // uint8_t usbCounter - static const int input_reports_size = 9; - static const int input_reports_offset = 15; - librealsense::copy(dest[0], source + input_reports_offset, input_reports_size); - } - - template - void align_l500_image_optimized(byte * const dest[], const byte * source, int width, int height, int actual_size) - { - auto width_out = height; - auto height_out = width; - - auto out = dest[0]; - byte buffer[8][8 * SIZE]; // = { 0 }; - for (int i = 0; i <= height-8; i = i + 8) - { - for (int j = 0; j <= width-8; j = j + 8) - { - for (int ii = 0; ii < 8; ++ii) - { - for (int jj = 0; jj < 8; ++jj) - { - auto source_index = ((j+ jj) + (width * (i + ii))) * SIZE; - memcpy((void*)(&buffer[7-jj][(7-ii) * SIZE]), &source[source_index], SIZE); - } - } - - for (int ii = 0; ii < 8; ++ii) - { - auto out_index = (((height_out - 8 - j + 1) * width_out) - i - 8 + (ii)* width_out); - memcpy(&out[(out_index)* SIZE], &(buffer[ii]), 8 * SIZE); - } - } - } - } - - template - void align_l500_image(byte * const dest[], const byte * source, int width, int height, int actual_size) - { - auto width_out = height; - auto height_out = width; - - auto out = dest[0]; - for (int i = 0; i < height; ++i) - { - auto row_offset = i * width; - for (int j = 0; j < width; ++j) - { - auto out_index = (((height_out - j) * width_out) - i - 1) * SIZE; - librealsense::copy((void*)(&out[out_index]), &(source[(row_offset + j) * SIZE]), SIZE); - } - } - } - - void unpack_confidence(byte * const dest[], const byte * source, int width, int height, int actual_size) - { -#pragma pack (push, 1) - struct lsb_msb - { - unsigned lsb : 4; - unsigned msb : 4; - }; -#pragma pack(pop) - - align_l500_image<1>(dest, source, width, height, actual_size); - auto out = dest[0]; - for (int i = (width - 1), out_i = ((width - 1) * 2); i >= 0; --i, out_i-=2) - { - auto row_offset = i * height; - for (int j = 0; j < height; ++j) - { - auto val = *(reinterpret_cast(&out[(row_offset + j)])); - auto out_index = out_i * height + j; - out[out_index] = val.lsb << 4; - out[out_index + height] = val.msb << 4; - } - } - } - - template void copy_pixels(byte * const dest[], const byte * source, int width, int height, int actual_size) - { - auto count = width * height; - librealsense::copy(dest[0], source, SIZE * count); - } - - void copy_raw10(byte * const dest[], const byte * source, int width, int height, int actual_size) - { - auto count = width * height; // num of pixels - librealsense::copy(dest[0], source, size_t(5.0 * (count / 4.0))); - } - - void unpack_y10bpack(byte * const dest[], const byte * source, int width, int height, int actual_size) - { - auto count = width * height / 4; // num of pixels - uint8_t * from = (uint8_t*)(source); - uint16_t * to = (uint16_t*)(dest[0]); - - // Put the 10 bit into the msb of uint16_t - for (int i = 0; i < count; i++, from +=5) // traverse macro-pixels - { - *to++ = ((from[0] << 2) | ( from[4] & 3)) << 6; - *to++ = ((from[1] << 2) | ((from[4] >> 2) & 3)) << 6; - *to++ = ((from[2] << 2) | ((from[4] >> 4) & 3)) << 6; - *to++ = ((from[3] << 2) | ((from[4] >> 6) & 3)) << 6; - } - } - - template void unpack_pixels(byte * const dest[], int count, const SOURCE * source, UNPACK unpack, int actual_size) - { - auto out = reinterpret_cast(dest[0]); - for (int i = 0; i < count; ++i) *out++ = unpack(*source++); - } - - void unpack_y16_from_y8(byte * const d[], const byte * s, int width, int height, int actual_size) { unpack_pixels(d, width * height, reinterpret_cast(s), [](uint8_t pixel) -> uint16_t { return pixel | pixel << 8; }, actual_size); } - void unpack_y16_from_y16_10(byte * const d[], const byte * s, int width, int height, int actual_size) { unpack_pixels(d, width * height, reinterpret_cast(s), [](uint16_t pixel) -> uint16_t { return pixel << 6; }, actual_size); } - void unpack_y8_from_y16_10(byte * const d[], const byte * s, int width, int height, int actual_size) { unpack_pixels(d, width * height, reinterpret_cast(s), [](uint16_t pixel) -> uint8_t { return pixel >> 2; }, actual_size); } - void unpack_rw10_from_rw8(byte * const d[], const byte * s, int width, int height, int actual_size) - { -#ifdef __SSSE3__ - auto src = reinterpret_cast(s); - auto dst = reinterpret_cast<__m128i *>(d[0]); - - __m128i * xin = (__m128i *)src; - __m128i * xout = (__m128i *) dst; - for (int i = 0; i < width * height; i += 16, ++xout, xin += 2) - { - __m128i in1_16 = _mm_load_si128((__m128i *)(xin)); - __m128i in2_16 = _mm_load_si128((__m128i *)(xin + 1)); - __m128i out1_16 = _mm_srli_epi16(in1_16, 2); - __m128i out2_16 = _mm_srli_epi16(in2_16, 2); - __m128i out8 = _mm_packus_epi16(out1_16, out2_16); - _mm_store_si128(xout, out8); - } -#else // Generic code for when SSSE3 is not available. - unsigned short* from = (unsigned short*)s; - byte * to = d[0]; - - for(int i = 0; i < width * height; ++i) - { - byte temp = (byte)(*from >> 2); - *to = temp; - ++from; - ++to; - } -#endif - } - - // Unpack luminocity 8 bit from 10-bit packed macro-pixels (4 pixels in 5 bytes): - // The first four bytes store the 8 MSB of each pixel, and the last byte holds the 2 LSB for each pixel :8888[2222] - void unpack_y8_from_rw10(byte * const d[], const byte * s, int width, int height, int actual_size) - { -#ifdef __SSSE3__ - auto n = width * height; - assert(!(n % 48)); //We process 12 macro-pixels simultaneously to achieve performance boost - auto src = reinterpret_cast(s); - auto dst = reinterpret_cast(d[0]); - - const __m128i * blk_0_in = reinterpret_cast(src); - const __m128i * blk_1_in = reinterpret_cast(src + 15); - const __m128i * blk_2_in = reinterpret_cast(src + 30); - const __m128i * blk_3_in = reinterpret_cast(src + 45); - - auto blk_0_out = reinterpret_cast<__m128i *>(dst); - auto blk_1_out = reinterpret_cast<__m128i *>(dst + 12); - auto blk_2_out = reinterpret_cast<__m128i *>(dst + 24); - auto blk_3_out = reinterpret_cast<__m128i *>(dst + 36); - - __m128i res[4]; - // The mask will reorder the input so the 12 bytes with pixels' MSB values will come first - static const __m128i mask = _mm_setr_epi8(0x0, 0x1, 0x2, 0x3, 0x5, 0x6, 0x7, 0x8, 0xa, 0xb, 0xc, 0xd, -1, -1, -1, -1); - - - for (int i = 0; (i + 48) < n; i += 48, src += 60, dst += 48) - { - blk_0_in = reinterpret_cast(src); - blk_1_in = reinterpret_cast(src + 15); - blk_2_in = reinterpret_cast(src + 30); - blk_3_in = reinterpret_cast(src + 45); - - blk_0_out = reinterpret_cast<__m128i *>(dst); - blk_1_out = reinterpret_cast<__m128i *>(dst + 12); - blk_2_out = reinterpret_cast<__m128i *>(dst + 24); - blk_3_out = reinterpret_cast<__m128i *>(dst + 36); - - res[0] = _mm_shuffle_epi8(_mm_loadu_si128(blk_0_in), mask); - res[1] = _mm_shuffle_epi8(_mm_loadu_si128(blk_1_in), mask); - res[2] = _mm_shuffle_epi8(_mm_loadu_si128(blk_2_in), mask); - res[3] = _mm_shuffle_epi8(_mm_loadu_si128(blk_3_in), mask); - - _mm_storeu_si128(blk_0_out, res[0]); - _mm_storeu_si128(blk_1_out, res[1]); - _mm_storeu_si128(blk_2_out, res[2]); - _mm_storeu_si128(blk_3_out, res[3]); - } -#else // Generic code for when SSSE3 is not available. - auto from = reinterpret_cast(s); - uint8_t * tgt = d[0]; - - for (int i = 0; i < width * height; i += 4, from += 5) - { - *tgt++ = from[0]; - *tgt++ = from[1]; - *tgt++ = from[2]; - *tgt++ = from[3]; - } -#endif - } - - ///////////////////////////// - // YUY2 unpacking routines // - ///////////////////////////// - // This templated function unpacks YUY2 into Y8/Y16/RGB8/RGBA8/BGR8/BGRA8, depending on the compile-time parameter FORMAT. - // It is expected that all branching outside of the loop control variable will be removed due to constant-folding. - template void unpack_yuy2(byte * const d[], const byte * s, int width, int height, int actual_size) - { - auto n = width * height; - assert(n % 16 == 0); // All currently supported color resolutions are multiples of 16 pixels. Could easily extend support to other resolutions by copying final n<16 pixels into a zero-padded buffer and recursively calling self for final iteration. -#ifdef RS2_USE_CUDA - rscuda::unpack_yuy2_cuda(d, s, n); - return; -#endif -#if defined __SSSE3__ && ! defined ANDROID - static bool do_avx = has_avx(); - #ifdef __AVX2__ - - if (do_avx) - { - if (FORMAT == RS2_FORMAT_Y8) unpack_yuy2_avx_y8(d, s, n); - if (FORMAT == RS2_FORMAT_Y16) unpack_yuy2_avx_y16(d, s, n); - if (FORMAT == RS2_FORMAT_RGB8) unpack_yuy2_avx_rgb8(d, s, n); - if (FORMAT == RS2_FORMAT_RGBA8) unpack_yuy2_avx_rgba8(d, s, n); - if (FORMAT == RS2_FORMAT_BGR8) unpack_yuy2_avx_bgr8(d, s, n); - if (FORMAT == RS2_FORMAT_BGRA8) unpack_yuy2_avx_bgra8(d, s, n); - } - else - #endif - { - auto src = reinterpret_cast(s); - auto dst = reinterpret_cast<__m128i *>(d[0]); - -#pragma omp parallel for - for (int i = 0; i < n / 16; i++) - { - const __m128i zero = _mm_set1_epi8(0); - const __m128i n100 = _mm_set1_epi16(100 << 4); - const __m128i n208 = _mm_set1_epi16(208 << 4); - const __m128i n298 = _mm_set1_epi16(298 << 4); - const __m128i n409 = _mm_set1_epi16(409 << 4); - const __m128i n516 = _mm_set1_epi16(516 << 4); - const __m128i evens_odds = _mm_setr_epi8(0, 2, 4, 6, 8, 10, 12, 14, 1, 3, 5, 7, 9, 11, 13, 15); - - // Load 8 YUY2 pixels each into two 16-byte registers - __m128i s0 = _mm_loadu_si128(&src[i * 2]); - __m128i s1 = _mm_loadu_si128(&src[i * 2 + 1]); - - if (FORMAT == RS2_FORMAT_Y8) - { - // Align all Y components and output 16 pixels (16 bytes) at once - __m128i y0 = _mm_shuffle_epi8(s0, _mm_setr_epi8(1, 3, 5, 7, 9, 11, 13, 15, 0, 2, 4, 6, 8, 10, 12, 14)); - __m128i y1 = _mm_shuffle_epi8(s1, _mm_setr_epi8(0, 2, 4, 6, 8, 10, 12, 14, 1, 3, 5, 7, 9, 11, 13, 15)); - _mm_storeu_si128(&dst[i], _mm_alignr_epi8(y0, y1, 8)); - continue; - } - - // Shuffle all Y components to the low order bytes of the register, and all U/V components to the high order bytes - const __m128i evens_odd1s_odd3s = _mm_setr_epi8(0, 2, 4, 6, 8, 10, 12, 14, 1, 5, 9, 13, 3, 7, 11, 15); // to get yyyyyyyyuuuuvvvv - __m128i yyyyyyyyuuuuvvvv0 = _mm_shuffle_epi8(s0, evens_odd1s_odd3s); - __m128i yyyyyyyyuuuuvvvv8 = _mm_shuffle_epi8(s1, evens_odd1s_odd3s); - - // Retrieve all 16 Y components as 16-bit values (8 components per register)) - __m128i y16__0_7 = _mm_unpacklo_epi8(yyyyyyyyuuuuvvvv0, zero); // convert to 16 bit - __m128i y16__8_F = _mm_unpacklo_epi8(yyyyyyyyuuuuvvvv8, zero); // convert to 16 bit - - if (FORMAT == RS2_FORMAT_Y16) - { - // Output 16 pixels (32 bytes) at once - _mm_storeu_si128(&dst[i * 2], _mm_slli_epi16(y16__0_7, 8)); - _mm_storeu_si128(&dst[i * 2 + 1], _mm_slli_epi16(y16__8_F, 8)); - continue; - } - - // Retrieve all 16 U and V components as 16-bit values (8 components per register) - __m128i uv = _mm_unpackhi_epi32(yyyyyyyyuuuuvvvv0, yyyyyyyyuuuuvvvv8); // uuuuuuuuvvvvvvvv - __m128i u = _mm_unpacklo_epi8(uv, uv); // uu uu uu uu uu uu uu uu u's duplicated - __m128i v = _mm_unpackhi_epi8(uv, uv); // vv vv vv vv vv vv vv vv - __m128i u16__0_7 = _mm_unpacklo_epi8(u, zero); // convert to 16 bit - __m128i u16__8_F = _mm_unpackhi_epi8(u, zero); // convert to 16 bit - __m128i v16__0_7 = _mm_unpacklo_epi8(v, zero); // convert to 16 bit - __m128i v16__8_F = _mm_unpackhi_epi8(v, zero); // convert to 16 bit - - // Compute R, G, B values for first 8 pixels - __m128i c16__0_7 = _mm_slli_epi16(_mm_subs_epi16(y16__0_7, _mm_set1_epi16(16)), 4); - __m128i d16__0_7 = _mm_slli_epi16(_mm_subs_epi16(u16__0_7, _mm_set1_epi16(128)), 4); // perhaps could have done these u,v to d,e before the duplication - __m128i e16__0_7 = _mm_slli_epi16(_mm_subs_epi16(v16__0_7, _mm_set1_epi16(128)), 4); - __m128i r16__0_7 = _mm_min_epi16(_mm_set1_epi16(255), _mm_max_epi16(zero, ((_mm_add_epi16(_mm_mulhi_epi16(c16__0_7, n298), _mm_mulhi_epi16(e16__0_7, n409)))))); // (298 * c + 409 * e + 128) ; // - __m128i g16__0_7 = _mm_min_epi16(_mm_set1_epi16(255), _mm_max_epi16(zero, ((_mm_sub_epi16(_mm_sub_epi16(_mm_mulhi_epi16(c16__0_7, n298), _mm_mulhi_epi16(d16__0_7, n100)), _mm_mulhi_epi16(e16__0_7, n208)))))); // (298 * c - 100 * d - 208 * e + 128) - __m128i b16__0_7 = _mm_min_epi16(_mm_set1_epi16(255), _mm_max_epi16(zero, ((_mm_add_epi16(_mm_mulhi_epi16(c16__0_7, n298), _mm_mulhi_epi16(d16__0_7, n516)))))); // clampbyte((298 * c + 516 * d + 128) >> 8); - - // Compute R, G, B values for second 8 pixels - __m128i c16__8_F = _mm_slli_epi16(_mm_subs_epi16(y16__8_F, _mm_set1_epi16(16)), 4); - __m128i d16__8_F = _mm_slli_epi16(_mm_subs_epi16(u16__8_F, _mm_set1_epi16(128)), 4); // perhaps could have done these u,v to d,e before the duplication - __m128i e16__8_F = _mm_slli_epi16(_mm_subs_epi16(v16__8_F, _mm_set1_epi16(128)), 4); - __m128i r16__8_F = _mm_min_epi16(_mm_set1_epi16(255), _mm_max_epi16(zero, ((_mm_add_epi16(_mm_mulhi_epi16(c16__8_F, n298), _mm_mulhi_epi16(e16__8_F, n409)))))); // (298 * c + 409 * e + 128) ; // - __m128i g16__8_F = _mm_min_epi16(_mm_set1_epi16(255), _mm_max_epi16(zero, ((_mm_sub_epi16(_mm_sub_epi16(_mm_mulhi_epi16(c16__8_F, n298), _mm_mulhi_epi16(d16__8_F, n100)), _mm_mulhi_epi16(e16__8_F, n208)))))); // (298 * c - 100 * d - 208 * e + 128) - __m128i b16__8_F = _mm_min_epi16(_mm_set1_epi16(255), _mm_max_epi16(zero, ((_mm_add_epi16(_mm_mulhi_epi16(c16__8_F, n298), _mm_mulhi_epi16(d16__8_F, n516)))))); // clampbyte((298 * c + 516 * d + 128) >> 8); - - if (FORMAT == RS2_FORMAT_RGB8 || FORMAT == RS2_FORMAT_RGBA8) - { - // Shuffle separate R, G, B values into four registers storing four pixels each in (R, G, B, A) order - __m128i rg8__0_7 = _mm_unpacklo_epi8(_mm_shuffle_epi8(r16__0_7, evens_odds), _mm_shuffle_epi8(g16__0_7, evens_odds)); // hi to take the odds which are the upper bytes we care about - __m128i ba8__0_7 = _mm_unpacklo_epi8(_mm_shuffle_epi8(b16__0_7, evens_odds), _mm_set1_epi8(-1)); - __m128i rgba_0_3 = _mm_unpacklo_epi16(rg8__0_7, ba8__0_7); - __m128i rgba_4_7 = _mm_unpackhi_epi16(rg8__0_7, ba8__0_7); - - __m128i rg8__8_F = _mm_unpacklo_epi8(_mm_shuffle_epi8(r16__8_F, evens_odds), _mm_shuffle_epi8(g16__8_F, evens_odds)); // hi to take the odds which are the upper bytes we care about - __m128i ba8__8_F = _mm_unpacklo_epi8(_mm_shuffle_epi8(b16__8_F, evens_odds), _mm_set1_epi8(-1)); - __m128i rgba_8_B = _mm_unpacklo_epi16(rg8__8_F, ba8__8_F); - __m128i rgba_C_F = _mm_unpackhi_epi16(rg8__8_F, ba8__8_F); - - if (FORMAT == RS2_FORMAT_RGBA8) - { - // Store 16 pixels (64 bytes) at once - _mm_storeu_si128(&dst[i * 4], rgba_0_3); - _mm_storeu_si128(&dst[i * 4 + 1], rgba_4_7); - _mm_storeu_si128(&dst[i * 4 + 2], rgba_8_B); - _mm_storeu_si128(&dst[i * 4 + 3], rgba_C_F); - } - - if (FORMAT == RS2_FORMAT_RGB8) - { - // Shuffle rgb triples to the start and end of each register - __m128i rgb0 = _mm_shuffle_epi8(rgba_0_3, _mm_setr_epi8(3, 7, 11, 15, 0, 1, 2, 4, 5, 6, 8, 9, 10, 12, 13, 14)); - __m128i rgb1 = _mm_shuffle_epi8(rgba_4_7, _mm_setr_epi8(0, 1, 2, 4, 3, 7, 11, 15, 5, 6, 8, 9, 10, 12, 13, 14)); - __m128i rgb2 = _mm_shuffle_epi8(rgba_8_B, _mm_setr_epi8(0, 1, 2, 4, 5, 6, 8, 9, 3, 7, 11, 15, 10, 12, 13, 14)); - __m128i rgb3 = _mm_shuffle_epi8(rgba_C_F, _mm_setr_epi8(0, 1, 2, 4, 5, 6, 8, 9, 10, 12, 13, 14, 3, 7, 11, 15)); - - // Align registers and store 16 pixels (48 bytes) at once - _mm_storeu_si128(&dst[i * 3], _mm_alignr_epi8(rgb1, rgb0, 4)); - _mm_storeu_si128(&dst[i * 3 + 1], _mm_alignr_epi8(rgb2, rgb1, 8)); - _mm_storeu_si128(&dst[i * 3 + 2], _mm_alignr_epi8(rgb3, rgb2, 12)); - } - } - - if (FORMAT == RS2_FORMAT_BGR8 || FORMAT == RS2_FORMAT_BGRA8) - { - // Shuffle separate R, G, B values into four registers storing four pixels each in (B, G, R, A) order - __m128i bg8__0_7 = _mm_unpacklo_epi8(_mm_shuffle_epi8(b16__0_7, evens_odds), _mm_shuffle_epi8(g16__0_7, evens_odds)); // hi to take the odds which are the upper bytes we care about - __m128i ra8__0_7 = _mm_unpacklo_epi8(_mm_shuffle_epi8(r16__0_7, evens_odds), _mm_set1_epi8(-1)); - __m128i bgra_0_3 = _mm_unpacklo_epi16(bg8__0_7, ra8__0_7); - __m128i bgra_4_7 = _mm_unpackhi_epi16(bg8__0_7, ra8__0_7); - - __m128i bg8__8_F = _mm_unpacklo_epi8(_mm_shuffle_epi8(b16__8_F, evens_odds), _mm_shuffle_epi8(g16__8_F, evens_odds)); // hi to take the odds which are the upper bytes we care about - __m128i ra8__8_F = _mm_unpacklo_epi8(_mm_shuffle_epi8(r16__8_F, evens_odds), _mm_set1_epi8(-1)); - __m128i bgra_8_B = _mm_unpacklo_epi16(bg8__8_F, ra8__8_F); - __m128i bgra_C_F = _mm_unpackhi_epi16(bg8__8_F, ra8__8_F); - - if (FORMAT == RS2_FORMAT_BGRA8) - { - // Store 16 pixels (64 bytes) at once - _mm_storeu_si128(&dst[i * 4], bgra_0_3); - _mm_storeu_si128(&dst[i * 4 + 1], bgra_4_7); - _mm_storeu_si128(&dst[i * 4 + 2], bgra_8_B); - _mm_storeu_si128(&dst[i * 4 + 3], bgra_C_F); - } - - if (FORMAT == RS2_FORMAT_BGR8) - { - // Shuffle rgb triples to the start and end of each register - __m128i bgr0 = _mm_shuffle_epi8(bgra_0_3, _mm_setr_epi8(3, 7, 11, 15, 0, 1, 2, 4, 5, 6, 8, 9, 10, 12, 13, 14)); - __m128i bgr1 = _mm_shuffle_epi8(bgra_4_7, _mm_setr_epi8(0, 1, 2, 4, 3, 7, 11, 15, 5, 6, 8, 9, 10, 12, 13, 14)); - __m128i bgr2 = _mm_shuffle_epi8(bgra_8_B, _mm_setr_epi8(0, 1, 2, 4, 5, 6, 8, 9, 3, 7, 11, 15, 10, 12, 13, 14)); - __m128i bgr3 = _mm_shuffle_epi8(bgra_C_F, _mm_setr_epi8(0, 1, 2, 4, 5, 6, 8, 9, 10, 12, 13, 14, 3, 7, 11, 15)); - - // Align registers and store 16 pixels (48 bytes) at once - _mm_storeu_si128(&dst[i * 3], _mm_alignr_epi8(bgr1, bgr0, 4)); - _mm_storeu_si128(&dst[i * 3 + 1], _mm_alignr_epi8(bgr2, bgr1, 8)); - _mm_storeu_si128(&dst[i * 3 + 2], _mm_alignr_epi8(bgr3, bgr2, 12)); - } - } - } - } -#else // Generic code for when SSSE3 is not available. - auto src = reinterpret_cast(s); - auto dst = reinterpret_cast(d[0]); - for (; n; n -= 16, src += 32) - { - if (FORMAT == RS2_FORMAT_Y8) - { - uint8_t out[16] = { - src[0], src[2], src[4], src[6], - src[8], src[10], src[12], src[14], - src[16], src[18], src[20], src[22], - src[24], src[26], src[28], src[30], - }; - librealsense::copy(dst, out, sizeof out); - dst += sizeof out; - continue; - } - - if (FORMAT == RS2_FORMAT_Y16) - { - // Y16 is little-endian. We output Y << 8. - uint8_t out[32] = { - 0, src[0], 0, src[2], 0, src[4], 0, src[6], - 0, src[8], 0, src[10], 0, src[12], 0, src[14], - 0, src[16], 0, src[18], 0, src[20], 0, src[22], - 0, src[24], 0, src[26], 0, src[28], 0, src[30], - }; - librealsense::copy(dst, out, sizeof out); - dst += sizeof out; - continue; - } - - int16_t y[16] = { - src[0], src[2], src[4], src[6], - src[8], src[10], src[12], src[14], - src[16], src[18], src[20], src[22], - src[24], src[26], src[28], src[30], - }, u[16] = { - src[1], src[1], src[5], src[5], - src[9], src[9], src[13], src[13], - src[17], src[17], src[21], src[21], - src[25], src[25], src[29], src[29], - }, v[16] = { - src[3], src[3], src[7], src[7], - src[11], src[11], src[15], src[15], - src[19], src[19], src[23], src[23], - src[27], src[27], src[31], src[31], - }; - - uint8_t r[16], g[16], b[16]; - for (int i = 0; i < 16; i++) - { - int32_t c = y[i] - 16; - int32_t d = u[i] - 128; - int32_t e = v[i] - 128; - - int32_t t; - #define clamp(x) ((t=(x)) > 255 ? 255 : t < 0 ? 0 : t) - r[i] = clamp((298 * c + 409 * e + 128) >> 8); - g[i] = clamp((298 * c - 100 * d - 208 * e + 128) >> 8); - b[i] = clamp((298 * c + 516 * d + 128) >> 8); - #undef clamp - } - - if (FORMAT == RS2_FORMAT_RGB8) - { - uint8_t out[16 * 3] = { - r[0], g[0], b[0], r[1], g[1], b[1], - r[2], g[2], b[2], r[3], g[3], b[3], - r[4], g[4], b[4], r[5], g[5], b[5], - r[6], g[6], b[6], r[7], g[7], b[7], - r[8], g[8], b[8], r[9], g[9], b[9], - r[10], g[10], b[10], r[11], g[11], b[11], - r[12], g[12], b[12], r[13], g[13], b[13], - r[14], g[14], b[14], r[15], g[15], b[15], - }; - librealsense::copy(dst, out, sizeof out); - dst += sizeof out; - continue; - } - - if (FORMAT == RS2_FORMAT_BGR8) - { - uint8_t out[16 * 3] = { - b[0], g[0], r[0], b[1], g[1], r[1], - b[2], g[2], r[2], b[3], g[3], r[3], - b[4], g[4], r[4], b[5], g[5], r[5], - b[6], g[6], r[6], b[7], g[7], r[7], - b[8], g[8], r[8], b[9], g[9], r[9], - b[10], g[10], r[10], b[11], g[11], r[11], - b[12], g[12], r[12], b[13], g[13], r[13], - b[14], g[14], r[14], b[15], g[15], r[15], - }; - librealsense::copy(dst, out, sizeof out); - dst += sizeof out; - continue; - } - - if (FORMAT == RS2_FORMAT_RGBA8) - { - uint8_t out[16 * 4] = { - r[0], g[0], b[0], 255, r[1], g[1], b[1], 255, - r[2], g[2], b[2], 255, r[3], g[3], b[3], 255, - r[4], g[4], b[4], 255, r[5], g[5], b[5], 255, - r[6], g[6], b[6], 255, r[7], g[7], b[7], 255, - r[8], g[8], b[8], 255, r[9], g[9], b[9], 255, - r[10], g[10], b[10], 255, r[11], g[11], b[11], 255, - r[12], g[12], b[12], 255, r[13], g[13], b[13], 255, - r[14], g[14], b[14], 255, r[15], g[15], b[15], 255, - }; - librealsense::copy(dst, out, sizeof out); - dst += sizeof out; - continue; - } - - if (FORMAT == RS2_FORMAT_BGRA8) - { - uint8_t out[16 * 4] = { - b[0], g[0], r[0], 255, b[1], g[1], r[1], 255, - b[2], g[2], r[2], 255, b[3], g[3], r[3], 255, - b[4], g[4], r[4], 255, b[5], g[5], r[5], 255, - b[6], g[6], r[6], 255, b[7], g[7], r[7], 255, - b[8], g[8], r[8], 255, b[9], g[9], r[9], 255, - b[10], g[10], r[10], 255, b[11], g[11], r[11], 255, - b[12], g[12], r[12], 255, b[13], g[13], r[13], 255, - b[14], g[14], r[14], 255, b[15], g[15], r[15], 255, - }; - librealsense::copy(dst, out, sizeof out); - dst += sizeof out; - continue; - } - } - #endif - } - - void unpack_yuy2_y8(byte * const d[], const byte * s, int w, int h, int actual_size) - { - unpack_yuy2(d, s, w, h, actual_size); - } - void unpack_yuy2_y16(byte * const d[], const byte * s, int w, int h, int actual_size) - { - unpack_yuy2(d, s, w, h, actual_size); - } - void unpack_yuy2_rgb8(byte * const d[], const byte * s, int w, int h, int actual_size) - { - unpack_yuy2(d, s, w, h, actual_size); - } - void unpack_yuy2_rgba8(byte * const d[], const byte * s, int w, int h, int actual_size) - { - unpack_yuy2(d, s, w, h, actual_size); - } - void unpack_yuy2_bgr8(byte * const d[], const byte * s, int w, int h, int actual_size) - { - unpack_yuy2(d, s, w, h, actual_size); - } - void unpack_yuy2_bgra8(byte * const d[], const byte * s, int w, int h, int actual_size) - { - unpack_yuy2(d, s, w, h, actual_size); - } - - // This templated function unpacks UYVY into RGB8/RGBA8/BGR8/BGRA8, depending on the compile-time parameter FORMAT. - // It is expected that all branching outside of the loop control variable will be removed due to constant-folding. - template void unpack_uyvy(byte * const d[], const byte * s, int width, int height, int actual_size) - { - auto n = width * height; - assert(n % 16 == 0); // All currently supported color resolutions are multiples of 16 pixels. Could easily extend support to other resolutions by copying final n<16 pixels into a zero-padded buffer and recursively calling self for final iteration. -#ifdef __SSSE3__ - auto src = reinterpret_cast(s); - auto dst = reinterpret_cast<__m128i *>(d[0]); - for (; n; n -= 16) - { - const __m128i zero = _mm_set1_epi8(0); - const __m128i n100 = _mm_set1_epi16(100 << 4); - const __m128i n208 = _mm_set1_epi16(208 << 4); - const __m128i n298 = _mm_set1_epi16(298 << 4); - const __m128i n409 = _mm_set1_epi16(409 << 4); - const __m128i n516 = _mm_set1_epi16(516 << 4); - const __m128i evens_odds = _mm_setr_epi8(0, 2, 4, 6, 8, 10, 12, 14, 1, 3, 5, 7, 9, 11, 13, 15); - - // Load 8 UYVY pixels each into two 16-byte registers - __m128i s0 = _mm_loadu_si128(src++); - __m128i s1 = _mm_loadu_si128(src++); - - - // Shuffle all Y components to the low order bytes of the register, and all U/V components to the high order bytes - const __m128i evens_odd1s_odd3s = _mm_setr_epi8(1, 3, 5, 7, 9, 11, 13, 15, 0, 4, 8, 12, 2, 6, 10, 14); // to get yyyyyyyyuuuuvvvv - __m128i yyyyyyyyuuuuvvvv0 = _mm_shuffle_epi8(s0, evens_odd1s_odd3s); - __m128i yyyyyyyyuuuuvvvv8 = _mm_shuffle_epi8(s1, evens_odd1s_odd3s); - - // Retrieve all 16 Y components as 16-bit values (8 components per register)) - __m128i y16__0_7 = _mm_unpacklo_epi8(yyyyyyyyuuuuvvvv0, zero); // convert to 16 bit - __m128i y16__8_F = _mm_unpacklo_epi8(yyyyyyyyuuuuvvvv8, zero); // convert to 16 bit - - - // Retrieve all 16 U and V components as 16-bit values (8 components per register) - __m128i uv = _mm_unpackhi_epi32(yyyyyyyyuuuuvvvv0, yyyyyyyyuuuuvvvv8); // uuuuuuuuvvvvvvvv - __m128i u = _mm_unpacklo_epi8(uv, uv); // uu uu uu uu uu uu uu uu u's duplicated - __m128i v = _mm_unpackhi_epi8(uv, uv); // vv vv vv vv vv vv vv vv - __m128i u16__0_7 = _mm_unpacklo_epi8(u, zero); // convert to 16 bit - __m128i u16__8_F = _mm_unpackhi_epi8(u, zero); // convert to 16 bit - __m128i v16__0_7 = _mm_unpacklo_epi8(v, zero); // convert to 16 bit - __m128i v16__8_F = _mm_unpackhi_epi8(v, zero); // convert to 16 bit - - // Compute R, G, B values for first 8 pixels - __m128i c16__0_7 = _mm_slli_epi16(_mm_subs_epi16(y16__0_7, _mm_set1_epi16(16)), 4); - __m128i d16__0_7 = _mm_slli_epi16(_mm_subs_epi16(u16__0_7, _mm_set1_epi16(128)), 4); // perhaps could have done these u,v to d,e before the duplication - __m128i e16__0_7 = _mm_slli_epi16(_mm_subs_epi16(v16__0_7, _mm_set1_epi16(128)), 4); - __m128i r16__0_7 = _mm_min_epi16(_mm_set1_epi16(255), _mm_max_epi16(zero, ((_mm_add_epi16(_mm_mulhi_epi16(c16__0_7, n298), _mm_mulhi_epi16(e16__0_7, n409)))))); // (298 * c + 409 * e + 128) ; // - __m128i g16__0_7 = _mm_min_epi16(_mm_set1_epi16(255), _mm_max_epi16(zero, ((_mm_sub_epi16(_mm_sub_epi16(_mm_mulhi_epi16(c16__0_7, n298), _mm_mulhi_epi16(d16__0_7, n100)), _mm_mulhi_epi16(e16__0_7, n208)))))); // (298 * c - 100 * d - 208 * e + 128) - __m128i b16__0_7 = _mm_min_epi16(_mm_set1_epi16(255), _mm_max_epi16(zero, ((_mm_add_epi16(_mm_mulhi_epi16(c16__0_7, n298), _mm_mulhi_epi16(d16__0_7, n516)))))); // clampbyte((298 * c + 516 * d + 128) >> 8); - - // Compute R, G, B values for second 8 pixels - __m128i c16__8_F = _mm_slli_epi16(_mm_subs_epi16(y16__8_F, _mm_set1_epi16(16)), 4); - __m128i d16__8_F = _mm_slli_epi16(_mm_subs_epi16(u16__8_F, _mm_set1_epi16(128)), 4); // perhaps could have done these u,v to d,e before the duplication - __m128i e16__8_F = _mm_slli_epi16(_mm_subs_epi16(v16__8_F, _mm_set1_epi16(128)), 4); - __m128i r16__8_F = _mm_min_epi16(_mm_set1_epi16(255), _mm_max_epi16(zero, ((_mm_add_epi16(_mm_mulhi_epi16(c16__8_F, n298), _mm_mulhi_epi16(e16__8_F, n409)))))); // (298 * c + 409 * e + 128) ; // - __m128i g16__8_F = _mm_min_epi16(_mm_set1_epi16(255), _mm_max_epi16(zero, ((_mm_sub_epi16(_mm_sub_epi16(_mm_mulhi_epi16(c16__8_F, n298), _mm_mulhi_epi16(d16__8_F, n100)), _mm_mulhi_epi16(e16__8_F, n208)))))); // (298 * c - 100 * d - 208 * e + 128) - __m128i b16__8_F = _mm_min_epi16(_mm_set1_epi16(255), _mm_max_epi16(zero, ((_mm_add_epi16(_mm_mulhi_epi16(c16__8_F, n298), _mm_mulhi_epi16(d16__8_F, n516)))))); // clampbyte((298 * c + 516 * d + 128) >> 8); - - if (FORMAT == RS2_FORMAT_RGB8 || FORMAT == RS2_FORMAT_RGBA8) - { - // Shuffle separate R, G, B values into four registers storing four pixels each in (R, G, B, A) order - __m128i rg8__0_7 = _mm_unpacklo_epi8(_mm_shuffle_epi8(r16__0_7, evens_odds), _mm_shuffle_epi8(g16__0_7, evens_odds)); // hi to take the odds which are the upper bytes we care about - __m128i ba8__0_7 = _mm_unpacklo_epi8(_mm_shuffle_epi8(b16__0_7, evens_odds), _mm_set1_epi8(-1)); - __m128i rgba_0_3 = _mm_unpacklo_epi16(rg8__0_7, ba8__0_7); - __m128i rgba_4_7 = _mm_unpackhi_epi16(rg8__0_7, ba8__0_7); - - __m128i rg8__8_F = _mm_unpacklo_epi8(_mm_shuffle_epi8(r16__8_F, evens_odds), _mm_shuffle_epi8(g16__8_F, evens_odds)); // hi to take the odds which are the upper bytes we care about - __m128i ba8__8_F = _mm_unpacklo_epi8(_mm_shuffle_epi8(b16__8_F, evens_odds), _mm_set1_epi8(-1)); - __m128i rgba_8_B = _mm_unpacklo_epi16(rg8__8_F, ba8__8_F); - __m128i rgba_C_F = _mm_unpackhi_epi16(rg8__8_F, ba8__8_F); - - if (FORMAT == RS2_FORMAT_RGBA8) - { - // Store 16 pixels (64 bytes) at once - _mm_storeu_si128(dst++, rgba_0_3); - _mm_storeu_si128(dst++, rgba_4_7); - _mm_storeu_si128(dst++, rgba_8_B); - _mm_storeu_si128(dst++, rgba_C_F); - } - - if (FORMAT == RS2_FORMAT_RGB8) - { - // Shuffle rgb triples to the start and end of each register - __m128i rgb0 = _mm_shuffle_epi8(rgba_0_3, _mm_setr_epi8(3, 7, 11, 15, 0, 1, 2, 4, 5, 6, 8, 9, 10, 12, 13, 14)); - __m128i rgb1 = _mm_shuffle_epi8(rgba_4_7, _mm_setr_epi8(0, 1, 2, 4, 3, 7, 11, 15, 5, 6, 8, 9, 10, 12, 13, 14)); - __m128i rgb2 = _mm_shuffle_epi8(rgba_8_B, _mm_setr_epi8(0, 1, 2, 4, 5, 6, 8, 9, 3, 7, 11, 15, 10, 12, 13, 14)); - __m128i rgb3 = _mm_shuffle_epi8(rgba_C_F, _mm_setr_epi8(0, 1, 2, 4, 5, 6, 8, 9, 10, 12, 13, 14, 3, 7, 11, 15)); - - // Align registers and store 16 pixels (48 bytes) at once - _mm_storeu_si128(dst++, _mm_alignr_epi8(rgb1, rgb0, 4)); - _mm_storeu_si128(dst++, _mm_alignr_epi8(rgb2, rgb1, 8)); - _mm_storeu_si128(dst++, _mm_alignr_epi8(rgb3, rgb2, 12)); - } - } - - if (FORMAT == RS2_FORMAT_BGR8 || FORMAT == RS2_FORMAT_BGRA8) - { - // Shuffle separate R, G, B values into four registers storing four pixels each in (B, G, R, A) order - __m128i bg8__0_7 = _mm_unpacklo_epi8(_mm_shuffle_epi8(b16__0_7, evens_odds), _mm_shuffle_epi8(g16__0_7, evens_odds)); // hi to take the odds which are the upper bytes we care about - __m128i ra8__0_7 = _mm_unpacklo_epi8(_mm_shuffle_epi8(r16__0_7, evens_odds), _mm_set1_epi8(-1)); - __m128i bgra_0_3 = _mm_unpacklo_epi16(bg8__0_7, ra8__0_7); - __m128i bgra_4_7 = _mm_unpackhi_epi16(bg8__0_7, ra8__0_7); - - __m128i bg8__8_F = _mm_unpacklo_epi8(_mm_shuffle_epi8(b16__8_F, evens_odds), _mm_shuffle_epi8(g16__8_F, evens_odds)); // hi to take the odds which are the upper bytes we care about - __m128i ra8__8_F = _mm_unpacklo_epi8(_mm_shuffle_epi8(r16__8_F, evens_odds), _mm_set1_epi8(-1)); - __m128i bgra_8_B = _mm_unpacklo_epi16(bg8__8_F, ra8__8_F); - __m128i bgra_C_F = _mm_unpackhi_epi16(bg8__8_F, ra8__8_F); - - if (FORMAT == RS2_FORMAT_BGRA8) - { - // Store 16 pixels (64 bytes) at once - _mm_storeu_si128(dst++, bgra_0_3); - _mm_storeu_si128(dst++, bgra_4_7); - _mm_storeu_si128(dst++, bgra_8_B); - _mm_storeu_si128(dst++, bgra_C_F); - } - - if (FORMAT == RS2_FORMAT_BGR8) - { - // Shuffle rgb triples to the start and end of each register - __m128i bgr0 = _mm_shuffle_epi8(bgra_0_3, _mm_setr_epi8(3, 7, 11, 15, 0, 1, 2, 4, 5, 6, 8, 9, 10, 12, 13, 14)); - __m128i bgr1 = _mm_shuffle_epi8(bgra_4_7, _mm_setr_epi8(0, 1, 2, 4, 3, 7, 11, 15, 5, 6, 8, 9, 10, 12, 13, 14)); - __m128i bgr2 = _mm_shuffle_epi8(bgra_8_B, _mm_setr_epi8(0, 1, 2, 4, 5, 6, 8, 9, 3, 7, 11, 15, 10, 12, 13, 14)); - __m128i bgr3 = _mm_shuffle_epi8(bgra_C_F, _mm_setr_epi8(0, 1, 2, 4, 5, 6, 8, 9, 10, 12, 13, 14, 3, 7, 11, 15)); - - // Align registers and store 16 pixels (48 bytes) at once - _mm_storeu_si128(dst++, _mm_alignr_epi8(bgr1, bgr0, 4)); - _mm_storeu_si128(dst++, _mm_alignr_epi8(bgr2, bgr1, 8)); - _mm_storeu_si128(dst++, _mm_alignr_epi8(bgr3, bgr2, 12)); - } - } - } -#else // Generic code for when SSSE3 is not available. - auto src = reinterpret_cast(s); - auto dst = reinterpret_cast(d[0]); - for (; n; n -= 16, src += 32) - { - int16_t y[16] = { - src[1], src[3], src[5], src[7], - src[9], src[11], src[13], src[15], - src[17], src[19], src[21], src[23], - src[25], src[27], src[29], src[31], - }, u[16] = { - src[0], src[0], src[4], src[4], - src[8], src[8], src[12], src[12], - src[16], src[16], src[20], src[20], - src[24], src[24], src[28], src[28], - }, v[16] = { - src[2], src[2], src[6], src[6], - src[10], src[10], src[14], src[14], - src[18], src[18], src[22], src[22], - src[26], src[26], src[30], src[30], - }; - - uint8_t r[16], g[16], b[16]; - for (int i = 0; i < 16; i++) - { - int32_t c = y[i] - 16; - int32_t d = u[i] - 128; - int32_t e = v[i] - 128; - - int32_t t; -#define clamp(x) ((t=(x)) > 255 ? 255 : t < 0 ? 0 : t) - r[i] = clamp((298 * c + 409 * e + 128) >> 8); - g[i] = clamp((298 * c - 100 * d - 208 * e + 128) >> 8); - b[i] = clamp((298 * c + 516 * d + 128) >> 8); -#undef clamp - } - - if (FORMAT == RS2_FORMAT_RGB8) - { - uint8_t out[16 * 3] = { - r[0], g[0], b[0], r[1], g[1], b[1], - r[2], g[2], b[2], r[3], g[3], b[3], - r[4], g[4], b[4], r[5], g[5], b[5], - r[6], g[6], b[6], r[7], g[7], b[7], - r[8], g[8], b[8], r[9], g[9], b[9], - r[10], g[10], b[10], r[11], g[11], b[11], - r[12], g[12], b[12], r[13], g[13], b[13], - r[14], g[14], b[14], r[15], g[15], b[15], - }; - librealsense::copy(dst, out, sizeof out); - dst += sizeof out; - continue; - } - - if (FORMAT == RS2_FORMAT_BGR8) - { - uint8_t out[16 * 3] = { - b[0], g[0], r[0], b[1], g[1], r[1], - b[2], g[2], r[2], b[3], g[3], r[3], - b[4], g[4], r[4], b[5], g[5], r[5], - b[6], g[6], r[6], b[7], g[7], r[7], - b[8], g[8], r[8], b[9], g[9], r[9], - b[10], g[10], r[10], b[11], g[11], r[11], - b[12], g[12], r[12], b[13], g[13], r[13], - b[14], g[14], r[14], b[15], g[15], r[15], - }; - librealsense::copy(dst, out, sizeof out); - dst += sizeof out; - continue; - } - - if (FORMAT == RS2_FORMAT_RGBA8) - { - uint8_t out[16 * 4] = { - r[0], g[0], b[0], 255, r[1], g[1], b[1], 255, - r[2], g[2], b[2], 255, r[3], g[3], b[3], 255, - r[4], g[4], b[4], 255, r[5], g[5], b[5], 255, - r[6], g[6], b[6], 255, r[7], g[7], b[7], 255, - r[8], g[8], b[8], 255, r[9], g[9], b[9], 255, - r[10], g[10], b[10], 255, r[11], g[11], b[11], 255, - r[12], g[12], b[12], 255, r[13], g[13], b[13], 255, - r[14], g[14], b[14], 255, r[15], g[15], b[15], 255, - }; - librealsense::copy(dst, out, sizeof out); - dst += sizeof out; - continue; - } - - if (FORMAT == RS2_FORMAT_BGRA8) - { - uint8_t out[16 * 4] = { - b[0], g[0], r[0], 255, b[1], g[1], r[1], 255, - b[2], g[2], r[2], 255, b[3], g[3], r[3], 255, - b[4], g[4], r[4], 255, b[5], g[5], r[5], 255, - b[6], g[6], r[6], 255, b[7], g[7], r[7], 255, - b[8], g[8], r[8], 255, b[9], g[9], r[9], 255, - b[10], g[10], r[10], 255, b[11], g[11], r[11], 255, - b[12], g[12], r[12], 255, b[13], g[13], r[13], 255, - b[14], g[14], r[14], 255, b[15], g[15], r[15], 255, - }; - librealsense::copy(dst, out, sizeof out); - dst += sizeof out; - continue; - } - } -#endif - } - - void copy_mjpeg(byte * const dest[], const byte * source, int width, int height, int actual_size) - { - librealsense::copy(dest[0], source, actual_size); - } - - void unpack_mjpeg(byte * const dest[], const byte * source, int width, int height, int actual_size) - { - //STBIDEF stbi_uc *stbi_load_from_memory(stbi_uc const *buffer, int len, int *x, int *y, int *comp, int req_comp) - int w, h, bpp; - auto uncompressed_rgb = stbi_load_from_memory(source, actual_size, &w, &h, &bpp, false); - if (uncompressed_rgb) - { - auto uncompressed_size = w * h * bpp; - librealsense::copy(dest[0], uncompressed_rgb, uncompressed_size); - stbi_image_free(uncompressed_rgb); - } - else - LOG_ERROR("jpeg decode failed"); - } ////////////////////////////////////// - // 2-in-1 format splitting routines // + // Frame rotation routines // ////////////////////////////////////// - - template void split_frame(byte * const dest[], int count, const SOURCE * source, SPLIT_A split_a, SPLIT_B split_b) - { - auto a = reinterpret_cast(dest[0]); - auto b = reinterpret_cast(dest[1]); - for (int i = 0; i < count; ++i) - { - *a++ = split_a(*source); - *b++ = split_b(*source++); - } - } - - struct y8i_pixel { uint8_t l, r; }; - void unpack_y8_y8_from_y8i(byte * const dest[], const byte * source, int width, int height, int actual_size) - { - auto count = width * height; -#ifdef RS2_USE_CUDA - rscuda::split_frame_y8_y8_from_y8i_cuda(dest, count, reinterpret_cast(source)); -#else - split_frame(dest, count, reinterpret_cast(source), - [](const y8i_pixel & p) -> uint8_t { return p.l; }, - [](const y8i_pixel & p) -> uint8_t { return p.r; }); -#endif - } - - struct y12i_pixel { uint8_t rl : 8, rh : 4, ll : 4, lh : 8; int l() const { return lh << 4 | ll; } int r() const { return rh << 8 | rl; } }; - void unpack_y16_y16_from_y12i_10(byte * const dest[], const byte * source, int width, int height, int actual_size) - { - auto count = width * height; -#ifdef RS2_USE_CUDA - rscuda::split_frame_y16_y16_from_y12i_cuda(dest, count, reinterpret_cast(source)); -#else - split_frame(dest, count, reinterpret_cast(source), - [](const y12i_pixel & p) -> uint16_t { return p.l() << 6 | p.l() >> 4; }, // We want to convert 10-bit data to 16-bit data - [](const y12i_pixel & p) -> uint16_t { return p.r() << 6 | p.r() >> 4; }); // Multiply by 64 1/16 to efficiently approximate 65535/1023 -#endif - } - - struct f200_inzi_pixel { uint16_t z16; uint8_t y8; }; - void unpack_z16_y8_from_f200_inzi(byte * const dest[], const byte * source, int width, int height, int actual_size) - { - auto count = width * height; - split_frame(dest, count, reinterpret_cast(source), - [](const f200_inzi_pixel & p) -> uint16_t { return p.z16; }, - [](const f200_inzi_pixel & p) -> uint8_t { return p.y8; }); - } - - void unpack_z16_y16_from_f200_inzi(byte * const dest[], const byte * source, int width, int height, int actual_size) - { - auto count = width * height; - split_frame(dest, count, reinterpret_cast(source), - [](const f200_inzi_pixel & p) -> uint16_t { return p.z16; }, - [](const f200_inzi_pixel & p) -> uint16_t { return p.y8 | p.y8 << 8; }); - } - - void unpack_z16_y8_from_sr300_inzi(byte * const dest[], const byte * source, int width, int height, int actual_size) - { - auto count = width * height; - auto in = reinterpret_cast(source); - auto out_ir = reinterpret_cast(dest[1]); -#ifdef RS2_USE_CUDA - rscuda::unpack_z16_y8_from_sr300_inzi_cuda(out_ir, in, count); -#else - for (int i = 0; i < count; ++i) *out_ir++ = *in++ >> 2; -#endif - librealsense::copy(dest[0], in, count * 2); - } - - void unpack_z16_y16_from_sr300_inzi (byte * const dest[], const byte * source, int width, int height, int actual_size) - { - auto count = width * height; - auto in = reinterpret_cast(source); - auto out_ir = reinterpret_cast(dest[1]); -#ifdef RS2_USE_CUDA - rscuda::unpack_z16_y16_from_sr300_inzi_cuda(out_ir, in, count); -#else - for (int i = 0; i < count; ++i) *out_ir++ = *in++ << 6; -#endif - librealsense::copy(dest[0], in, count * 2); - } - - void unpack_rgb_from_bgr(byte * const dest[], const byte * source, int width, int height, int actual_size) - { - auto count = width * height; - auto in = reinterpret_cast(source); - auto out = reinterpret_cast(dest[0]); - - librealsense::copy(out, in, count * 3); - for (auto i = 0; i < count; i++) - { - std::swap(out[i * 3], out[i * 3 + 2]); - } - } - -#ifdef ZERO_COPY - constexpr bool requires_processing = false; -#else - constexpr bool requires_processing = true; -#endif - resolution rotate_resolution(resolution res) { return resolution{ res.height , res.width}; @@ -1049,86 +73,6 @@ namespace librealsense return resolution{ res.height , res.width * 2 }; } - ////////////////////////// - // Native pixel formats // - ////////////////////////// - const native_pixel_format pf_fe_raw8_unpatched_kernel = { rs_fourcc('R','A','W','8'), 1, 1, { { false, ©_pixels<1>, { { RS2_STREAM_FISHEYE, RS2_FORMAT_RAW8 } } } } }; - const native_pixel_format pf_raw8 = { rs_fourcc('G','R','E','Y'), 1, 1, { { true, ©_pixels<1>, { { RS2_STREAM_FISHEYE, RS2_FORMAT_RAW8 } } } } }; - const native_pixel_format pf_rw16 = { rs_fourcc('R','W','1','6'), 1, 2, { { false, ©_pixels<2>, { { RS2_STREAM_COLOR, RS2_FORMAT_RAW16 } } } } }; - const native_pixel_format pf_bayer16 = { rs_fourcc('B','Y','R','2'), 1, 2, { { false, ©_pixels<2>, { { RS2_STREAM_COLOR, RS2_FORMAT_RAW16 } } } } }; - const native_pixel_format pf_rw10 = { rs_fourcc('p','R','A','A'), 1, 1, { { false, ©_raw10, { { RS2_STREAM_COLOR, RS2_FORMAT_RAW10 } } } } }; - const native_pixel_format pf_w10 = { rs_fourcc('W','1','0',' '), 1, 2, { { true, ©_raw10, { { { RS2_STREAM_INFRARED, 1 }, RS2_FORMAT_RAW10 } } }, - { true, &unpack_y10bpack, { { { RS2_STREAM_INFRARED, 1 }, RS2_FORMAT_Y10BPACK } } } } }; - - const native_pixel_format pf_yuy2 = { rs_fourcc('Y','U','Y','2'), 1, 2, { { true, &unpack_yuy2, { { RS2_STREAM_COLOR, RS2_FORMAT_RGB8 } } }, - { true, &unpack_yuy2, { { RS2_STREAM_COLOR, RS2_FORMAT_Y16 } } }, - { true, ©_pixels<2>, { { RS2_STREAM_COLOR, RS2_FORMAT_YUYV } } }, - { true, &unpack_yuy2, { { RS2_STREAM_COLOR, RS2_FORMAT_RGBA8 } } }, - { true, &unpack_yuy2, { { RS2_STREAM_COLOR, RS2_FORMAT_BGR8 } } }, - { true, &unpack_yuy2, { { RS2_STREAM_COLOR, RS2_FORMAT_BGRA8 } } } } }; - - const native_pixel_format pf_confidence_l500 = { rs_fourcc('C',' ',' ',' '), 1, 1, { { true, &unpack_confidence, { { RS2_STREAM_CONFIDENCE, RS2_FORMAT_RAW8, l500_confidence_resolution } } }, - { requires_processing, ©_pixels<1>, { { RS2_STREAM_CONFIDENCE, RS2_FORMAT_RAW8 } } } } }; - const native_pixel_format pf_z16_l500 = { rs_fourcc('Z','1','6',' '), 1, 2, { { true, &align_l500_image_optimized<2>, { { RS2_STREAM_DEPTH, RS2_FORMAT_Z16, rotate_resolution } } }, - { requires_processing, ©_pixels<2>, { { RS2_STREAM_DEPTH, RS2_FORMAT_Z16 } } } } }; - const native_pixel_format pf_y8_l500 = { rs_fourcc('G','R','E','Y'), 1, 1, { { true, &align_l500_image_optimized<1>, { { RS2_STREAM_INFRARED, RS2_FORMAT_Y8, rotate_resolution } } }, - { requires_processing, ©_pixels<1>, { { RS2_STREAM_INFRARED, RS2_FORMAT_Y8 } } } } }; - const native_pixel_format pf_y8 = { rs_fourcc('G','R','E','Y'), 1, 1, { { requires_processing, ©_pixels<1>, { { { RS2_STREAM_INFRARED, 1 }, RS2_FORMAT_Y8 } } } } }; - const native_pixel_format pf_y16 = { rs_fourcc('Y','1','6',' '), 1, 2, { { true, &unpack_y16_from_y16_10, { { { RS2_STREAM_INFRARED, 1 }, RS2_FORMAT_Y16 } } } } }; - const native_pixel_format pf_y8i = { rs_fourcc('Y','8','I',' '), 1, 2, { { true, &unpack_y8_y8_from_y8i, { { { RS2_STREAM_INFRARED, 1 }, RS2_FORMAT_Y8 }, - { { RS2_STREAM_INFRARED, 2 }, RS2_FORMAT_Y8 } } } } }; - const native_pixel_format pf_y12i = { rs_fourcc('Y','1','2','I'), 1, 3, { { true, &unpack_y16_y16_from_y12i_10, { { { RS2_STREAM_INFRARED, 1 }, RS2_FORMAT_Y16 }, - { { RS2_STREAM_INFRARED, 2 }, RS2_FORMAT_Y16 } } } } }; - const native_pixel_format pf_z16 = { rs_fourcc('Z','1','6',' '), 1, 2, { { requires_processing, ©_pixels<2>, { { RS2_STREAM_DEPTH, RS2_FORMAT_Z16 } } }, - // The Disparity_Z is not applicable for D4XX. TODO - merge with INVZ when confirmed - /*{ false, ©_pixels<2>, { { RS2_STREAM_DEPTH, RS2_FORMAT_DISPARITY16 } } }*/ } }; - const native_pixel_format pf_invz = { rs_fourcc('Z','1','6',' '), 1, 2, { { true, ©_pixels<2>, { { RS2_STREAM_DEPTH, RS2_FORMAT_Z16 } } } } }; - const native_pixel_format pf_f200_invi = { rs_fourcc('I','N','V','I'), 1, 1, { { true, ©_pixels<1>, { { { RS2_STREAM_INFRARED, 1 }, RS2_FORMAT_Y8 } } }, - { true, &unpack_y16_from_y8, { { { RS2_STREAM_INFRARED, 1 }, RS2_FORMAT_Y16 } } } } }; - const native_pixel_format pf_f200_inzi = { rs_fourcc('I','N','Z','I'), 1, 3, { { true, &unpack_z16_y8_from_f200_inzi, { { RS2_STREAM_DEPTH, RS2_FORMAT_Z16 }, - { { RS2_STREAM_INFRARED, 1 }, RS2_FORMAT_Y8 } } }, - { true, &unpack_z16_y16_from_f200_inzi, { { RS2_STREAM_DEPTH, RS2_FORMAT_Z16 }, - { { RS2_STREAM_INFRARED, 1 }, RS2_FORMAT_Y16 } } } } }; - const native_pixel_format pf_sr300_invi = { rs_fourcc('I','N','V','I'), 1, 2, { { true, &unpack_y8_from_y16_10, { { { RS2_STREAM_INFRARED, 1 }, RS2_FORMAT_Y8 } } }, - { true, &unpack_y16_from_y16_10, { { { RS2_STREAM_INFRARED, 1 }, RS2_FORMAT_Y16 } } } } }; - const native_pixel_format pf_sr300_inzi = { rs_fourcc('I','N','Z','I'), 2, 2, { { true, &unpack_z16_y8_from_sr300_inzi, { { RS2_STREAM_DEPTH, RS2_FORMAT_Z16 }, - { { RS2_STREAM_INFRARED, 1 }, RS2_FORMAT_Y8 } } }, - { true, &unpack_z16_y16_from_sr300_inzi, { { RS2_STREAM_DEPTH, RS2_FORMAT_Z16 }, - { { RS2_STREAM_INFRARED, 1 }, RS2_FORMAT_Y16 } } } } }; - - const native_pixel_format pf_uyvyl = { rs_fourcc('U','Y','V','Y'), 1, 2, { { true, &unpack_uyvy, { { RS2_STREAM_INFRARED, RS2_FORMAT_RGB8 } } }, - { true, ©_pixels<2>, { { RS2_STREAM_INFRARED, RS2_FORMAT_UYVY } } }, - { true, &unpack_uyvy, { { RS2_STREAM_INFRARED, RS2_FORMAT_RGBA8} } }, - { true, &unpack_uyvy, { { RS2_STREAM_INFRARED, RS2_FORMAT_BGR8 } } }, - { true, &unpack_uyvy, { { RS2_STREAM_INFRARED, RS2_FORMAT_BGRA8} } } } }; - - const native_pixel_format pf_uyvyc = { rs_fourcc('U','Y','V','Y'), 1, 2, { { true, &unpack_uyvy, { { RS2_STREAM_COLOR, RS2_FORMAT_RGB8 } } }, - { true, ©_pixels<2>, { { RS2_STREAM_COLOR, RS2_FORMAT_UYVY } } }, - { true, &unpack_uyvy, { { RS2_STREAM_COLOR, RS2_FORMAT_RGBA8} } }, - { true, &unpack_uyvy, { { RS2_STREAM_COLOR, RS2_FORMAT_BGR8 } } }, - { true, &unpack_uyvy, { { RS2_STREAM_COLOR, RS2_FORMAT_BGRA8} } } } }; - - const native_pixel_format pf_rgb888 = { rs_fourcc('R','G','B','2'), 1, 2, { { true, &unpack_rgb_from_bgr, { { RS2_STREAM_INFRARED, RS2_FORMAT_RGB8 } } } } }; - - - const native_pixel_format pf_yuyv = { rs_fourcc('Y','U','Y','V'), 1, 2, { { true, &unpack_yuy2, { { RS2_STREAM_COLOR, RS2_FORMAT_RGB8 } } }, - { true, &unpack_yuy2, { { RS2_STREAM_COLOR, RS2_FORMAT_Y16 } } }, - { true, ©_pixels<2>, { { RS2_STREAM_COLOR, RS2_FORMAT_YUYV } } }, - { true, &unpack_yuy2, { { RS2_STREAM_COLOR, RS2_FORMAT_RGBA8 } } }, - { true, &unpack_yuy2, { { RS2_STREAM_COLOR, RS2_FORMAT_BGR8 } } }, - { true, &unpack_yuy2, { { RS2_STREAM_COLOR, RS2_FORMAT_BGRA8 } } } } }; - - const native_pixel_format pf_accel_axes = { rs_fourcc('A','C','C','L'), 1, 1, { { true, &unpack_accel_axes, { { RS2_STREAM_ACCEL, RS2_FORMAT_MOTION_XYZ32F } } } } }; - //{ false, &unpack_hid_raw_data, { { RS2_STREAM_ACCEL, RS2_FORMAT_MOTION_RAW } } } } }; - const native_pixel_format pf_gyro_axes = { rs_fourcc('G','Y','R','O'), 1, 1, { { true, &unpack_gyro_axes, { { RS2_STREAM_GYRO, RS2_FORMAT_MOTION_XYZ32F } } } } }; - //{ false, &unpack_hid_raw_data, { { RS2_STREAM_GYRO, RS2_FORMAT_MOTION_RAW } } } } }; - const native_pixel_format pf_gpio_timestamp = { rs_fourcc('G','P','I','O'), 1, 1, { { false, &unpack_input_reports_data, { { { RS2_STREAM_GPIO, 1 }, RS2_FORMAT_GPIO_RAW }, - { { RS2_STREAM_GPIO, 2 }, RS2_FORMAT_GPIO_RAW }, - { { RS2_STREAM_GPIO, 3 }, RS2_FORMAT_GPIO_RAW }, - { { RS2_STREAM_GPIO, 4 }, RS2_FORMAT_GPIO_RAW }} } } }; - - const native_pixel_format pf_mjpg = { rs_fourcc('M','J','P','G'), 1, 1, { { true, ©_mjpeg, { { RS2_STREAM_COLOR, RS2_FORMAT_MJPEG } } }, - { true, &unpack_mjpeg, { { RS2_STREAM_COLOR, RS2_FORMAT_RGB8} } } } }; - } +} #pragma pack(pop) diff --git a/src/image.h b/src/image.h index 71056bd554..63ac9537eb 100644 --- a/src/image.h +++ b/src/image.h @@ -9,58 +9,22 @@ namespace librealsense { - void unpack_yuy2_y8(byte * const d[], const byte * s, int w, int h, int actual_size); - void unpack_yuy2_y16(byte * const d[], const byte * s, int w, int h, int actual_size); - void unpack_yuy2_rgb8(byte * const d[], const byte * s, int w, int h, int actual_size); - void unpack_yuy2_rgba8(byte * const d[], const byte * s, int w, int h, int actual_size); - void unpack_yuy2_bgr8(byte * const d[], const byte * s, int w, int h, int actual_size); - void unpack_yuy2_bgra8(byte * const d[], const byte * s, int w, int h, int actual_size); - size_t get_image_size (int width, int height, rs2_format format); int get_image_bpp (rs2_format format); - void deproject_z (float * points, const rs2_intrinsics & z_intrin, const uint16_t * z_pixels, float z_scale); - void deproject_disparity (float * points, const rs2_intrinsics & disparity_intrin, const uint16_t * disparity_pixels, float disparity_scale); - - void align_z_to_other (byte * z_aligned_to_other, const uint16_t * z_pixels, float z_scale, const rs2_intrinsics & z_intrin, - const rs2_extrinsics & z_to_other, const rs2_intrinsics & other_intrin); - void align_disparity_to_other (byte * disparity_aligned_to_other, const uint16_t * disparity_pixels, float disparity_scale, const rs2_intrinsics & disparity_intrin, - const rs2_extrinsics & disparity_to_other, const rs2_intrinsics & other_intrin); - void align_other_to_z (byte * other_aligned_to_z, const uint16_t * z_pixels, float z_scale, const rs2_intrinsics & z_intrin, - const rs2_extrinsics & z_to_other, const rs2_intrinsics & other_intrin, const byte * other_pixels, rs2_format other_format); - void align_other_to_disparity (byte * other_aligned_to_disparity, const uint16_t * disparity_pixels, float disparity_scale, const rs2_intrinsics & disparity_intrin, - const rs2_extrinsics & disparity_to_other, const rs2_intrinsics & other_intrin, const byte * other_pixels, rs2_format other_format); - - std::vector compute_rectification_table (const rs2_intrinsics & rect_intrin, const rs2_extrinsics & rect_to_unrect, const rs2_intrinsics & unrect_intrin); - void rectify_image (uint8_t * rect_pixels, const std::vector & rectification_table, const uint8_t * unrect_pixels, rs2_format format); - extern const native_pixel_format pf_fe_raw8_unpatched_kernel; // W/O for unpatched kernel - extern const native_pixel_format pf_raw8; // 8 bit luminance - extern const native_pixel_format pf_rw10; // Four 10 bit luminance values in one 40 bit macropixel - extern const native_pixel_format pf_w10; // Four 10 bit luminance values in one 40 bit macropixel - extern const native_pixel_format pf_rw16; // 10 bit in 16 bit WORD with 6 bit unused - extern const native_pixel_format pf_bayer16; // 16-bit Bayer raw - extern const native_pixel_format pf_yuy2; // Y0 U Y1 V ordered chroma subsampled macropixel - extern const native_pixel_format pf_yuyv; // Y0 U Y1 V ordered chroma subsampled macropixel - extern const native_pixel_format pf_y8; // 8 bit IR/Luminosity (left) imager - extern const native_pixel_format pf_y8i; // 8 bits left IR + 8 bits right IR per pixel - extern const native_pixel_format pf_y16; // 16 bit (left) IR image - extern const native_pixel_format pf_y12i; // 12 bits left IR + 12 bits right IR per pixel - extern const native_pixel_format pf_z16; // 16 bit Z + Disparity image - extern const native_pixel_format pf_invz; // 16 bit Z image - extern const native_pixel_format pf_f200_invi; // 8-bit IR image - extern const native_pixel_format pf_f200_inzi; // 16-bit Z + 8 bit IR per pixel - extern const native_pixel_format pf_sr300_invi; // 16-bit IR image - extern const native_pixel_format pf_sr300_inzi; // Planar 16-bit IR image followed by 16-bit Z image - extern const native_pixel_format pf_uyvyl; // U Y0 V Y1 ordered chroma subsampled macropixel for Infrared stream - extern const native_pixel_format pf_uyvyc; // U Y0 V Y1 ordered chroma subsampled macropixel for Color stream - extern const native_pixel_format pf_accel_axes; // Parse accel HID raw data to 3 axes - extern const native_pixel_format pf_gyro_axes; // Parse gyro HID raw data to 3 axes - extern const native_pixel_format pf_rgb888; - extern const native_pixel_format pf_gpio_timestamp; // Parse GPIO timestamp - extern const native_pixel_format pf_confidence_l500; - extern const native_pixel_format pf_z16_l500; - extern const native_pixel_format pf_y8_l500; - extern const native_pixel_format pf_mjpg; + template void split_frame(byte * const dest[], int count, const SOURCE * source, SPLIT_A split_a, SPLIT_B split_b) + { + auto a = reinterpret_cast(dest[0]); + auto b = reinterpret_cast(dest[1]); + for (int i = 0; i < count; ++i) + { + *a++ = split_a(*source); + *b++ = split_b(*source++); + } + } + + resolution rotate_resolution(resolution res); + resolution l500_confidence_resolution(resolution res); } #endif diff --git a/src/ivcam/sr300.cpp b/src/ivcam/sr300.cpp index 5b287e8b8a..7332943338 100644 --- a/src/ivcam/sr300.cpp +++ b/src/ivcam/sr300.cpp @@ -9,12 +9,35 @@ #include "proc/spatial-filter.h" #include "proc/temporal-filter.h" #include "proc/hole-filling-filter.h" +#include "proc/depth-formats-converter.h" #include "ds5/ds5-device.h" #include "../../include/librealsense2/h/rs_sensor.h" #include "../common/fw/firmware-version.h" namespace librealsense { + std::map sr300_color_fourcc_to_rs2_format = { + {rs_fourcc('Y','U','Y','2'), RS2_FORMAT_YUYV}, + {rs_fourcc('U','Y','V','Y'), RS2_FORMAT_UYVY} + }; + std::map sr300_color_fourcc_to_rs2_stream = { + {rs_fourcc('Y','U','Y','2'), RS2_STREAM_COLOR}, + {rs_fourcc('U','Y','V','Y'), RS2_STREAM_COLOR} + }; + + std::map sr300_depth_fourcc_to_rs2_format = { + {rs_fourcc('G','R','E','Y'), RS2_FORMAT_Y8}, + {rs_fourcc('Z','1','6',' '), RS2_FORMAT_Z16}, + {rs_fourcc('I','N','V','I'), RS2_FORMAT_INVI}, + {rs_fourcc('I','N','Z','I'), RS2_FORMAT_INZI} + }; + std::map sr300_depth_fourcc_to_rs2_stream = { + {rs_fourcc('G','R','E','Y'), RS2_STREAM_INFRARED}, + {rs_fourcc('Z','1','6',' '), RS2_STREAM_DEPTH}, + {rs_fourcc('I','N','V','I'), RS2_STREAM_INFRARED}, + {rs_fourcc('I','N','Z','I'), RS2_STREAM_DEPTH} + }; + std::shared_ptr sr300_info::create(std::shared_ptr ctx, bool register_device_notifications) const { @@ -66,6 +89,137 @@ namespace librealsense return results; } + std::shared_ptr sr300_camera::create_color_device(std::shared_ptr ctx, + const platform::uvc_device_info& color) + { + auto raw_color_ep = std::make_shared("Raw RGB Sensor", ctx->get_backend().create_uvc_device(color), + std::unique_ptr(new sr300_timestamp_reader_from_metadata()), + this); + auto color_ep = std::make_shared(this, + raw_color_ep, + sr300_color_fourcc_to_rs2_format, + sr300_color_fourcc_to_rs2_stream); + + // register processing blocks + color_ep->register_processing_block({ {RS2_FORMAT_YUYV} }, { {RS2_FORMAT_RGB8, RS2_STREAM_COLOR} }, []() { return std::make_shared(RS2_FORMAT_RGB8); }); + color_ep->register_processing_block({ {RS2_FORMAT_YUYV} }, { {RS2_FORMAT_RGBA8, RS2_STREAM_COLOR} }, []() { return std::make_shared(RS2_FORMAT_RGBA8); }); + color_ep->register_processing_block({ {RS2_FORMAT_YUYV} }, { {RS2_FORMAT_BGR8, RS2_STREAM_COLOR} }, []() { return std::make_shared(RS2_FORMAT_BGR8); }); + color_ep->register_processing_block({ {RS2_FORMAT_YUYV} }, { {RS2_FORMAT_BGRA8, RS2_STREAM_COLOR} }, []() { return std::make_shared(RS2_FORMAT_BGRA8); }); + color_ep->register_processing_block({ {RS2_FORMAT_YUYV} }, { {RS2_FORMAT_Y16, RS2_STREAM_COLOR} }, []() { return std::make_shared(RS2_FORMAT_Y16); }); + color_ep->register_processing_block(processing_block_factory::create_id_pbf(RS2_FORMAT_YUYV, RS2_STREAM_COLOR)); + + color_ep->register_processing_block({ {RS2_FORMAT_UYVY} }, { {RS2_FORMAT_RGB8, RS2_STREAM_COLOR} }, []() { return std::make_shared(RS2_FORMAT_RGB8); }); + color_ep->register_processing_block({ {RS2_FORMAT_UYVY} }, { {RS2_FORMAT_RGBA8, RS2_STREAM_COLOR} }, []() { return std::make_shared(RS2_FORMAT_RGBA8); }); + color_ep->register_processing_block({ {RS2_FORMAT_UYVY} }, { {RS2_FORMAT_BGR8, RS2_STREAM_COLOR} }, []() { return std::make_shared(RS2_FORMAT_BGR8); }); + color_ep->register_processing_block({ {RS2_FORMAT_UYVY} }, { {RS2_FORMAT_BGRA8, RS2_STREAM_COLOR} }, []() { return std::make_shared(RS2_FORMAT_BGRA8); }); + color_ep->register_processing_block(processing_block_factory::create_id_pbf(RS2_FORMAT_UYVY, RS2_STREAM_COLOR)); + + // register options + color_ep->register_pu(RS2_OPTION_BACKLIGHT_COMPENSATION); + color_ep->register_pu(RS2_OPTION_BRIGHTNESS); + color_ep->register_pu(RS2_OPTION_CONTRAST); + color_ep->register_pu(RS2_OPTION_GAIN); + color_ep->register_pu(RS2_OPTION_GAMMA); + color_ep->register_pu(RS2_OPTION_HUE); + color_ep->register_pu(RS2_OPTION_SATURATION); + color_ep->register_pu(RS2_OPTION_SHARPNESS); + + auto white_balance_option = std::make_shared(*raw_color_ep, RS2_OPTION_WHITE_BALANCE); + auto auto_white_balance_option = std::make_shared(*raw_color_ep, RS2_OPTION_ENABLE_AUTO_WHITE_BALANCE); + color_ep->register_option(RS2_OPTION_WHITE_BALANCE, white_balance_option); + color_ep->register_option(RS2_OPTION_ENABLE_AUTO_WHITE_BALANCE, auto_white_balance_option); + color_ep->register_option(RS2_OPTION_WHITE_BALANCE, + std::make_shared( + white_balance_option, + auto_white_balance_option)); + + auto exposure_option = std::make_shared(*raw_color_ep, RS2_OPTION_EXPOSURE); + auto auto_exposure_option = std::make_shared(*raw_color_ep, RS2_OPTION_ENABLE_AUTO_EXPOSURE); + color_ep->register_option(RS2_OPTION_EXPOSURE, exposure_option); + color_ep->register_option(RS2_OPTION_ENABLE_AUTO_EXPOSURE, auto_exposure_option); + color_ep->register_option(RS2_OPTION_EXPOSURE, + std::make_shared( + exposure_option, + auto_exposure_option)); + + // register metadata + auto md_offset = offsetof(metadata_raw, mode); + color_ep->register_metadata(RS2_FRAME_METADATA_FRAME_TIMESTAMP, make_uvc_header_parser(&platform::uvc_header::timestamp, + [](rs2_metadata_type param) { return static_cast(param * TIMESTAMP_10NSEC_TO_MSEC); })); + color_ep->register_metadata(RS2_FRAME_METADATA_FRAME_COUNTER, make_sr300_attribute_parser(&md_sr300_rgb::frame_counter, md_offset)); + color_ep->register_metadata(RS2_FRAME_METADATA_ACTUAL_FPS, make_sr300_attribute_parser(&md_sr300_rgb::actual_fps, md_offset)); + color_ep->register_metadata(RS2_FRAME_METADATA_SENSOR_TIMESTAMP, make_sr300_attribute_parser(&md_sr300_rgb::frame_latency, md_offset)); + color_ep->register_metadata(RS2_FRAME_METADATA_ACTUAL_EXPOSURE, make_sr300_attribute_parser(&md_sr300_rgb::actual_exposure, md_offset, [](rs2_metadata_type param) { return param * 100; })); + color_ep->register_metadata(RS2_FRAME_METADATA_AUTO_EXPOSURE, make_sr300_attribute_parser(&md_sr300_rgb::auto_exp_mode, md_offset, [](rs2_metadata_type param) { return (param != 1); })); + color_ep->register_metadata(RS2_FRAME_METADATA_GAIN_LEVEL, make_sr300_attribute_parser(&md_sr300_rgb::gain, md_offset)); + color_ep->register_metadata(RS2_FRAME_METADATA_WHITE_BALANCE, make_sr300_attribute_parser(&md_sr300_rgb::color_temperature, md_offset)); + + return color_ep; + } + + std::shared_ptr sr300_camera::create_depth_device(std::shared_ptr ctx, + const platform::uvc_device_info& depth) + { + using namespace ivcam; + + auto&& backend = ctx->get_backend(); + + // create uvc-endpoint from backend uvc-device + auto raw_depth_ep = std::make_shared("Raw Depth Sensor", backend.create_uvc_device(depth), + std::unique_ptr(new sr300_timestamp_reader_from_metadata()), + this); + auto depth_ep = std::make_shared(this, + raw_depth_ep, + sr300_depth_fourcc_to_rs2_format, + sr300_depth_fourcc_to_rs2_stream); + raw_depth_ep->register_xu(depth_xu); // make sure the XU is initialized everytime we power the camera + + // register processing blocks factories + depth_ep->register_processing_block( + { { RS2_FORMAT_INVI } }, + { { RS2_FORMAT_Y8, RS2_STREAM_INFRARED, 1 } }, + []() {return std::make_shared(RS2_FORMAT_Y8); }); + depth_ep->register_processing_block( + { { RS2_FORMAT_INVI } }, + { { RS2_FORMAT_Y16, RS2_STREAM_INFRARED, 1 } }, + []() {return std::make_shared(RS2_FORMAT_Y16); }); + depth_ep->register_processing_block( + { { RS2_FORMAT_INZI } }, + { { RS2_FORMAT_Z16, RS2_STREAM_DEPTH }, { RS2_FORMAT_Y8, RS2_STREAM_INFRARED, 1 } }, + []() {return std::make_shared(RS2_FORMAT_Y8); }); + depth_ep->register_processing_block( + { { RS2_FORMAT_INZI } }, + { { RS2_FORMAT_Z16, RS2_STREAM_DEPTH }, { RS2_FORMAT_Y16, RS2_STREAM_INFRARED, 1 } }, + []() {return std::make_shared(RS2_FORMAT_Y16); }); + depth_ep->register_processing_block(processing_block_factory::create_id_pbf(RS2_FORMAT_Y8, RS2_STREAM_INFRARED, 1)); + depth_ep->register_processing_block(processing_block_factory::create_id_pbf(RS2_FORMAT_Z16, RS2_STREAM_DEPTH)); + + register_depth_xu(*depth_ep, RS2_OPTION_LASER_POWER, IVCAM_DEPTH_LASER_POWER, + "Power of the SR300 projector, with 0 meaning projector off"); + register_depth_xu(*depth_ep, RS2_OPTION_ACCURACY, IVCAM_DEPTH_ACCURACY, + "Set the number of patterns projected per frame.\nThe higher the accuracy value the more patterns projected.\nIncreasing the number of patterns help to achieve better accuracy.\nNote that this control is affecting the Depth FPS"); + register_depth_xu(*depth_ep, RS2_OPTION_MOTION_RANGE, IVCAM_DEPTH_MOTION_RANGE, + "Motion vs. Range trade-off, with lower values allowing for better motion\nsensitivity and higher values allowing for better depth range"); + register_depth_xu(*depth_ep, RS2_OPTION_CONFIDENCE_THRESHOLD, IVCAM_DEPTH_CONFIDENCE_THRESH, + "The confidence level threshold used by the Depth algorithm pipe to set whether\na pixel will get a valid range or will be marked with invalid range"); + register_depth_xu(*depth_ep, RS2_OPTION_FILTER_OPTION, IVCAM_DEPTH_FILTER_OPTION, + "Set the filter to apply to each depth frame.\nEach one of the filter is optimized per the application requirements"); + + depth_ep->register_option(RS2_OPTION_VISUAL_PRESET, std::make_shared(*this, + option_range{ 0, RS2_SR300_VISUAL_PRESET_COUNT - 1, 1, RS2_SR300_VISUAL_PRESET_DEFAULT })); + + auto md_offset = offsetof(metadata_raw, mode); + + depth_ep->register_metadata(RS2_FRAME_METADATA_FRAME_TIMESTAMP, make_uvc_header_parser(&platform::uvc_header::timestamp, + [](rs2_metadata_type param) { return static_cast(param * TIMESTAMP_10NSEC_TO_MSEC); })); + depth_ep->register_metadata(RS2_FRAME_METADATA_FRAME_COUNTER, make_sr300_attribute_parser(&md_sr300_depth::frame_counter, md_offset)); + depth_ep->register_metadata(RS2_FRAME_METADATA_ACTUAL_EXPOSURE, make_sr300_attribute_parser(&md_sr300_depth::actual_exposure, md_offset, + [](rs2_metadata_type param) { return param * 100; })); + depth_ep->register_metadata(RS2_FRAME_METADATA_ACTUAL_FPS, make_sr300_attribute_parser(&md_sr300_depth::actual_fps, md_offset)); + + return depth_ep; + } + rs2_intrinsics sr300_camera::make_depth_intrinsics(const ivcam::camera_calib_params & c, const int2 & dims) { return{ dims.x, dims.y, (c.Kc[0][2] * 0.5f + 0.5f) * dims.x, @@ -261,7 +415,7 @@ namespace librealsense : device(ctx, group, register_device_notifications), _depth_device_idx(add_sensor(create_depth_device(ctx, depth))), _color_device_idx(add_sensor(create_color_device(ctx, color))), - _hw_monitor(std::make_shared(std::make_shared(ctx->get_backend().create_usb_device(hwm_device), get_depth_sensor()))), + _hw_monitor(std::make_shared(std::make_shared(ctx->get_backend().create_usb_device(hwm_device), get_raw_depth_sensor()))), _depth_stream(new stream(RS2_STREAM_DEPTH)), _ir_stream(new stream(RS2_STREAM_INFRARED)), _color_stream(new stream(RS2_STREAM_COLOR)) @@ -340,39 +494,57 @@ namespace librealsense } - rs2_time_t sr300_timestamp_reader_from_metadata::get_frame_timestamp(const request_mapping& mode, const platform::frame_object& fo) + rs2_time_t sr300_timestamp_reader_from_metadata::get_frame_timestamp(std::shared_ptr frame) { std::lock_guard lock(_mtx); - if(has_metadata_ts(fo)) + if(has_metadata_ts(frame)) { - auto md = (librealsense::metadata_raw*)(fo.metadata); + auto f = std::dynamic_pointer_cast(frame); + if (!f) + { + LOG_ERROR("Frame is not valid. Failed to downcast to librealsense::frame."); + return 0; + } + auto md = (librealsense::metadata_raw*)(f->additional_data.metadata_blob.data()); return (double)(ts_wrap.calc(md->header.timestamp))*TIMESTAMP_10NSEC_TO_MSEC; } else { if (!one_time_note) { + uint32_t fcc; + auto sp = frame->get_stream(); + auto bp = As(sp); + if (bp) + fcc = bp->get_backend_profile().format; + LOG_WARNING("UVC metadata payloads are not available for stream " - << std::hex << mode.pf->fourcc << std::dec << (mode.profile.format) + << std::hex << fcc << std::dec << sp->get_format() << ". Please refer to installation chapter for details."); one_time_note = true; } - return _backup_timestamp_reader->get_frame_timestamp(mode, fo); + return _backup_timestamp_reader->get_frame_timestamp(frame); } } - unsigned long long sr300_timestamp_reader_from_metadata::get_frame_counter(const request_mapping & mode, const platform::frame_object& fo) const + unsigned long long sr300_timestamp_reader_from_metadata::get_frame_counter(std::shared_ptr frame) const { std::lock_guard lock(_mtx); - if (has_metadata_fc(fo)) + if (has_metadata_fc(frame)) { - auto md = (librealsense::metadata_raw*)(fo.metadata); + auto f = std::dynamic_pointer_cast(frame); + if (!f) + { + LOG_ERROR("Frame is not valid. Failed to downcast to librealsense::frame."); + return 0; + } + auto md = (librealsense::metadata_raw*)(f->additional_data.metadata_blob.data()); return md->mode.sr300_rgb_mode.frame_counter; // The attribute offset is identical for all sr300-supported streams } - return _backup_timestamp_reader->get_frame_counter(mode, fo); + return _backup_timestamp_reader->get_frame_counter(frame); } void sr300_timestamp_reader_from_metadata::reset() @@ -383,11 +555,11 @@ namespace librealsense ts_wrap.reset(); } - rs2_timestamp_domain sr300_timestamp_reader_from_metadata::get_frame_timestamp_domain(const request_mapping & mode, const platform::frame_object& fo) const + rs2_timestamp_domain sr300_timestamp_reader_from_metadata::get_frame_timestamp_domain(std::shared_ptr frame) const { std::lock_guard lock(_mtx); - return (has_metadata_ts(fo))? RS2_TIMESTAMP_DOMAIN_HARDWARE_CLOCK : _backup_timestamp_reader->get_frame_timestamp_domain(mode,fo); + return (has_metadata_ts(frame))? RS2_TIMESTAMP_DOMAIN_HARDWARE_CLOCK : _backup_timestamp_reader->get_frame_timestamp_domain(frame); } std::shared_ptr sr300_camera::create_matcher(const frame_holder& frame) const diff --git a/src/ivcam/sr300.h b/src/ivcam/sr300.h index 83b870b166..dc95716328 100644 --- a/src/ivcam/sr300.h +++ b/src/ivcam/sr300.h @@ -4,6 +4,7 @@ #pragma once #include +#include #include #include @@ -19,6 +20,7 @@ #include "core/debug.h" #include "stream.h" #include "fw-update/fw-update-device-interface.h" +#include "proc/color-formats-converter.h" namespace librealsense { @@ -50,11 +52,18 @@ namespace librealsense counter = 0; } - double get_frame_timestamp(const request_mapping& /*mode*/, const platform::frame_object& fo) override + double get_frame_timestamp(std::shared_ptr frame) override { std::lock_guard lock(_mtx); + // Timestamps are encoded within the first 32 bits of the image and provided in 10nsec units - uint32_t rolling_timestamp = *reinterpret_cast(fo.pixels); + auto f = std::dynamic_pointer_cast(frame); + if (!f) + { + LOG_ERROR("Frame is not valid. Failed to downcast to librealsense::frame."); + return 0; + } + uint32_t rolling_timestamp = *reinterpret_cast(f->get_frame_data()); if (!started) { total = last_timestamp = rolling_timestamp; @@ -69,15 +78,21 @@ namespace librealsense return total * 0.00001; // to msec } - unsigned long long get_frame_counter(const request_mapping & /*mode*/, const platform::frame_object& fo) const override + unsigned long long get_frame_counter(std::shared_ptr frame) const override { std::lock_guard lock(_mtx); return ++counter; } - rs2_timestamp_domain get_frame_timestamp_domain(const request_mapping & mode, const platform::frame_object& fo) const override + rs2_timestamp_domain get_frame_timestamp_domain(std::shared_ptr frame) const override { - if(fo.metadata_size >= platform::uvc_header_size ) + auto f = std::dynamic_pointer_cast(frame); + if (!f) + { + LOG_ERROR("Frame is not valid. Failed to downcast to librealsense::frame."); + return RS2_TIMESTAMP_DOMAIN_COUNT; + } + if(f->additional_data.metadata_size >= platform::uvc_header_size ) return RS2_TIMESTAMP_DOMAIN_HARDWARE_CLOCK; else return RS2_TIMESTAMP_DOMAIN_SYSTEM_TIME; @@ -93,21 +108,33 @@ namespace librealsense protected: - bool has_metadata_ts(const platform::frame_object& fo) const + bool has_metadata_ts(std::shared_ptr frame) const { + auto f = std::dynamic_pointer_cast(frame); + if (!f) + { + LOG_ERROR("Frame is not valid. Failed to downcast to librealsense::frame."); + return false; + } // Metadata support for a specific stream is immutable const bool has_md_ts = [&]{ std::lock_guard lock(_mtx); - return ((fo.metadata != nullptr) && (fo.metadata_size >= platform::uvc_header_size) && ((byte*)fo.metadata)[0] >= platform::uvc_header_size); + return ((f->additional_data.metadata_blob.data() != nullptr) && (f->additional_data.metadata_size >= platform::uvc_header_size) && ((byte*)f->additional_data.metadata_blob.data())[0] >= platform::uvc_header_size); }(); return has_md_ts; } - bool has_metadata_fc(const platform::frame_object& fo) const + bool has_metadata_fc(std::shared_ptr frame) const { + auto f = std::dynamic_pointer_cast(frame); + if (!f) + { + LOG_ERROR("Frame is not valid. Failed to downcast to librealsense::frame."); + return false; + } // Metadata support for a specific stream is immutable const bool has_md_frame_counter = [&] { std::lock_guard lock(_mtx); - return ((fo.metadata != nullptr) && (fo.metadata_size > platform::uvc_header_size) && ((byte*)fo.metadata)[0] > platform::uvc_header_size); + return ((f->additional_data.metadata_blob.data() != nullptr) && (f->additional_data.metadata_size > platform::uvc_header_size) && ((byte*)f->additional_data.metadata_blob.data())[0] > platform::uvc_header_size); }(); return has_md_frame_counter; @@ -120,13 +147,13 @@ namespace librealsense reset(); } - rs2_time_t get_frame_timestamp(const request_mapping& mode, const platform::frame_object& fo) override; + rs2_time_t get_frame_timestamp(std::shared_ptr frame) override; - unsigned long long get_frame_counter(const request_mapping & mode, const platform::frame_object& fo) const override; + unsigned long long get_frame_counter(std::shared_ptr frame) const override; void reset() override; - rs2_timestamp_domain get_frame_timestamp_domain(const request_mapping & mode, const platform::frame_object& fo) const override; + rs2_timestamp_domain get_frame_timestamp_domain(std::shared_ptr frame) const override; }; class sr300_info : public device_info @@ -209,13 +236,14 @@ namespace librealsense sr300_camera& _owner; }; - class sr300_color_sensor : public uvc_sensor, public video_sensor_interface, public roi_sensor_base + class sr300_color_sensor : public synthetic_sensor, public video_sensor_interface, public roi_sensor_base { public: - explicit sr300_color_sensor(sr300_camera* owner, std::shared_ptr uvc_device, - std::unique_ptr timestamp_reader, - std::shared_ptr ctx) - : uvc_sensor("RGB Camera", uvc_device, move(timestamp_reader), owner), _owner(owner) + explicit sr300_color_sensor(sr300_camera* owner, + std::shared_ptr uvc_sensor, + const std::map& sr300_color_fourcc_to_rs2_format, + const std::map& sr300_color_fourcc_to_rs2_stream) + : synthetic_sensor("RGB Camera", uvc_sensor, owner, sr300_color_fourcc_to_rs2_format, sr300_color_fourcc_to_rs2_stream), _owner(owner) {} rs2_intrinsics get_intrinsics(const stream_profile& profile) const override @@ -227,7 +255,7 @@ namespace librealsense { auto lock = environment::get_instance().get_extrinsics_graph().lock(); - auto results = uvc_sensor::init_stream_profiles(); + auto results = synthetic_sensor::init_stream_profiles(); for (auto p : results) { @@ -265,13 +293,14 @@ namespace librealsense const sr300_camera* _owner; }; - class sr300_depth_sensor : public uvc_sensor, public video_sensor_interface, public depth_sensor + class sr300_depth_sensor : public synthetic_sensor, public video_sensor_interface, public depth_sensor { public: - explicit sr300_depth_sensor(sr300_camera* owner, std::shared_ptr uvc_device, - std::unique_ptr timestamp_reader, - std::shared_ptr ctx) - : uvc_sensor("Coded-Light Depth Sensor", uvc_device, move(timestamp_reader), owner), _owner(owner) + explicit sr300_depth_sensor(sr300_camera* owner, + std::shared_ptr uvc_sensor, + const std::map& sr300_depth_fourcc_to_rs2_format, + const std::map& sr300_depth_fourcc_to_rs2_stream) + : synthetic_sensor("Coded-Light Depth Sensor", uvc_sensor, owner, sr300_depth_fourcc_to_rs2_format, sr300_depth_fourcc_to_rs2_stream), _owner(owner) {} rs2_intrinsics get_intrinsics(const stream_profile& profile) const override @@ -283,7 +312,7 @@ namespace librealsense { auto lock = environment::get_instance().get_extrinsics_graph().lock(); - auto results = uvc_sensor::init_stream_profiles(); + auto results = synthetic_sensor::init_stream_profiles(); for (auto p : results) { @@ -341,98 +370,11 @@ namespace librealsense const sr300_camera* _owner; }; - std::shared_ptr create_color_device(std::shared_ptr ctx, - const platform::uvc_device_info& color) - { - auto color_ep = std::make_shared(this, ctx->get_backend().create_uvc_device(color), - std::unique_ptr(new sr300_timestamp_reader_from_metadata()), - ctx); - color_ep->register_pixel_format(pf_yuy2); - color_ep->register_pixel_format(pf_yuyv); - - color_ep->register_pu(RS2_OPTION_BACKLIGHT_COMPENSATION); - color_ep->register_pu(RS2_OPTION_BRIGHTNESS); - color_ep->register_pu(RS2_OPTION_CONTRAST); - color_ep->register_pu(RS2_OPTION_GAIN); - color_ep->register_pu(RS2_OPTION_GAMMA); - color_ep->register_pu(RS2_OPTION_HUE); - color_ep->register_pu(RS2_OPTION_SATURATION); - color_ep->register_pu(RS2_OPTION_SHARPNESS); - - auto white_balance_option = std::make_shared(*color_ep, RS2_OPTION_WHITE_BALANCE); - auto auto_white_balance_option = std::make_shared(*color_ep, RS2_OPTION_ENABLE_AUTO_WHITE_BALANCE); - color_ep->register_option(RS2_OPTION_WHITE_BALANCE, white_balance_option); - color_ep->register_option(RS2_OPTION_ENABLE_AUTO_WHITE_BALANCE, auto_white_balance_option); - color_ep->register_option(RS2_OPTION_WHITE_BALANCE, - std::make_shared( - white_balance_option, - auto_white_balance_option)); - - auto exposure_option = std::make_shared(*color_ep, RS2_OPTION_EXPOSURE); - auto auto_exposure_option = std::make_shared(*color_ep, RS2_OPTION_ENABLE_AUTO_EXPOSURE); - color_ep->register_option(RS2_OPTION_EXPOSURE, exposure_option); - color_ep->register_option(RS2_OPTION_ENABLE_AUTO_EXPOSURE, auto_exposure_option); - color_ep->register_option(RS2_OPTION_EXPOSURE, - std::make_shared( - exposure_option, - auto_exposure_option)); - - auto md_offset = offsetof(metadata_raw, mode); - color_ep->register_metadata(RS2_FRAME_METADATA_FRAME_TIMESTAMP, make_uvc_header_parser(&platform::uvc_header::timestamp, - [](rs2_metadata_type param) { return static_cast(param * TIMESTAMP_10NSEC_TO_MSEC); })); - color_ep->register_metadata(RS2_FRAME_METADATA_FRAME_COUNTER, make_sr300_attribute_parser(&md_sr300_rgb::frame_counter, md_offset)); - color_ep->register_metadata(RS2_FRAME_METADATA_ACTUAL_FPS, make_sr300_attribute_parser(&md_sr300_rgb::actual_fps, md_offset)); - color_ep->register_metadata(RS2_FRAME_METADATA_SENSOR_TIMESTAMP,make_sr300_attribute_parser(&md_sr300_rgb::frame_latency, md_offset)); - color_ep->register_metadata(RS2_FRAME_METADATA_ACTUAL_EXPOSURE, make_sr300_attribute_parser(&md_sr300_rgb::actual_exposure, md_offset, [](rs2_metadata_type param) { return param*100; })); - color_ep->register_metadata(RS2_FRAME_METADATA_AUTO_EXPOSURE, make_sr300_attribute_parser(&md_sr300_rgb::auto_exp_mode, md_offset, [](rs2_metadata_type param) { return (param !=1); })); - color_ep->register_metadata(RS2_FRAME_METADATA_GAIN_LEVEL, make_sr300_attribute_parser(&md_sr300_rgb::gain, md_offset)); - color_ep->register_metadata(RS2_FRAME_METADATA_WHITE_BALANCE, make_sr300_attribute_parser(&md_sr300_rgb::color_temperature, md_offset)); - - return color_ep; - } + std::shared_ptr create_color_device(std::shared_ptr ctx, + const platform::uvc_device_info& color); - std::shared_ptr create_depth_device(std::shared_ptr ctx, - const platform::uvc_device_info& depth) - { - using namespace ivcam; - - auto&& backend = ctx->get_backend(); - - // create uvc-endpoint from backend uvc-device - auto depth_ep = std::make_shared(this, backend.create_uvc_device(depth), - std::unique_ptr(new sr300_timestamp_reader_from_metadata()), - ctx); - depth_ep->register_xu(depth_xu); // make sure the XU is initialized everytime we power the camera - depth_ep->register_pixel_format(pf_invz); - depth_ep->register_pixel_format(pf_y8); - depth_ep->register_pixel_format(pf_sr300_invi); - depth_ep->register_pixel_format(pf_sr300_inzi); - - register_depth_xu(*depth_ep, RS2_OPTION_LASER_POWER, IVCAM_DEPTH_LASER_POWER, - "Power of the SR300 projector, with 0 meaning projector off"); - register_depth_xu(*depth_ep, RS2_OPTION_ACCURACY, IVCAM_DEPTH_ACCURACY, - "Set the number of patterns projected per frame.\nThe higher the accuracy value the more patterns projected.\nIncreasing the number of patterns help to achieve better accuracy.\nNote that this control is affecting the Depth FPS"); - register_depth_xu(*depth_ep, RS2_OPTION_MOTION_RANGE, IVCAM_DEPTH_MOTION_RANGE, - "Motion vs. Range trade-off, with lower values allowing for better motion\nsensitivity and higher values allowing for better depth range"); - register_depth_xu(*depth_ep, RS2_OPTION_CONFIDENCE_THRESHOLD, IVCAM_DEPTH_CONFIDENCE_THRESH, - "The confidence level threshold used by the Depth algorithm pipe to set whether\na pixel will get a valid range or will be marked with invalid range"); - register_depth_xu(*depth_ep, RS2_OPTION_FILTER_OPTION, IVCAM_DEPTH_FILTER_OPTION, - "Set the filter to apply to each depth frame.\nEach one of the filter is optimized per the application requirements"); - - depth_ep->register_option(RS2_OPTION_VISUAL_PRESET, std::make_shared(*this, - option_range{0, RS2_SR300_VISUAL_PRESET_COUNT - 1, 1, RS2_SR300_VISUAL_PRESET_DEFAULT})); - - auto md_offset = offsetof(metadata_raw, mode); - - depth_ep->register_metadata(RS2_FRAME_METADATA_FRAME_TIMESTAMP, make_uvc_header_parser(&platform::uvc_header::timestamp, - [](rs2_metadata_type param) { return static_cast(param * TIMESTAMP_10NSEC_TO_MSEC); })); - depth_ep->register_metadata(RS2_FRAME_METADATA_FRAME_COUNTER, make_sr300_attribute_parser(&md_sr300_depth::frame_counter, md_offset)); - depth_ep->register_metadata(RS2_FRAME_METADATA_ACTUAL_EXPOSURE, make_sr300_attribute_parser(&md_sr300_depth::actual_exposure, md_offset, - [](rs2_metadata_type param) { return param * 100; })); - depth_ep->register_metadata(RS2_FRAME_METADATA_ACTUAL_FPS, make_sr300_attribute_parser(&md_sr300_depth::actual_fps, md_offset)); - - return depth_ep; - } + std::shared_ptr create_depth_device(std::shared_ptr ctx, + const platform::uvc_device_info& depth); std::vector send_receive_raw_data(const std::vector& input) override { @@ -444,8 +386,13 @@ namespace librealsense force_hardware_reset(); } - uvc_sensor& get_depth_sensor() { return dynamic_cast(get_sensor(_depth_device_idx)); } - + synthetic_sensor& get_depth_sensor() { return dynamic_cast(get_sensor(_depth_device_idx)); } + + uvc_sensor& get_raw_depth_sensor() + { + synthetic_sensor& depth_sensor = get_depth_sensor(); + return dynamic_cast(*depth_sensor.get_raw_sensor()); + } sr300_camera(std::shared_ptr ctx, const platform::uvc_device_info& color, @@ -536,11 +483,14 @@ namespace librealsense bool _is_locked = true; template - void register_depth_xu(uvc_sensor& depth, rs2_option opt, uint8_t id, std::string desc) const + void register_depth_xu(synthetic_sensor& depth, rs2_option opt, uint8_t id, std::string desc) const { + auto raw_sensor = depth.get_raw_sensor(); + auto raw_uvc_sensor = As(raw_sensor); + assert(raw_uvc_sensor); depth.register_option(opt, std::make_shared>( - depth, + *raw_uvc_sensor, ivcam::depth_xu, id, std::move(desc))); } diff --git a/src/l500/l500-color.cpp b/src/l500/l500-color.cpp index 9a34632de9..45dd48abe0 100644 --- a/src/l500/l500-color.cpp +++ b/src/l500/l500-color.cpp @@ -3,26 +3,42 @@ #include "l500-color.h" #include "l500-private.h" +#include "proc/identity-processing-block.h" +#include "proc/color-formats-converter.h" #include namespace librealsense { using namespace ivcam2; - std::shared_ptr l500_color::create_color_device(std::shared_ptr ctx, const std::vector& color_devices_info) + std::map l500_color_fourcc_to_rs2_format = { + {rs_fourcc('Y','U','Y','2'), RS2_FORMAT_YUYV} + }; + + std::map l500_color_fourcc_to_rs2_stream = { + {rs_fourcc('Y','U','Y','2'), RS2_STREAM_COLOR}, + }; + + std::shared_ptr l500_color::create_color_device(std::shared_ptr ctx, const std::vector& color_devices_info) { auto&& backend = ctx->get_backend(); std::unique_ptr timestamp_reader_metadata(new ivcam2::l500_timestamp_reader_from_metadata(backend.create_time_service())); auto enable_global_time_option = std::shared_ptr(new global_time_option()); - auto color_ep = std::make_shared(this, ctx->get_backend().create_uvc_device(color_devices_info.front()), + auto raw_color_ep = std::make_shared("RGB Sensor", ctx->get_backend().create_uvc_device(color_devices_info.front()), std::unique_ptr(new global_timestamp_reader(std::move(timestamp_reader_metadata), _tf_keeper, enable_global_time_option)), - ctx); - - color_ep->register_option(RS2_OPTION_GLOBAL_TIME_ENABLED, enable_global_time_option); - color_ep->register_pixel_format(pf_yuy2); - color_ep->register_pixel_format(pf_yuyv); - + this); + auto color_ep = std::make_shared(this, raw_color_ep, ctx, l500_color_fourcc_to_rs2_format, l500_color_fourcc_to_rs2_stream); + + // processing blocks + color_ep->register_processing_block({ {RS2_FORMAT_YUYV} }, { {RS2_FORMAT_RGB8, RS2_STREAM_COLOR} }, []() { return std::make_shared(RS2_FORMAT_RGB8); }); + color_ep->register_processing_block({ {RS2_FORMAT_YUYV} }, { {RS2_FORMAT_RGBA8, RS2_STREAM_COLOR} }, []() { return std::make_shared(RS2_FORMAT_RGBA8); }); + color_ep->register_processing_block({ {RS2_FORMAT_YUYV} }, { {RS2_FORMAT_BGR8, RS2_STREAM_COLOR} }, []() { return std::make_shared(RS2_FORMAT_BGR8); }); + color_ep->register_processing_block({ {RS2_FORMAT_YUYV} }, { {RS2_FORMAT_BGRA8, RS2_STREAM_COLOR} }, []() { return std::make_shared(RS2_FORMAT_BGRA8); }); + color_ep->register_processing_block({ {RS2_FORMAT_YUYV} }, { {RS2_FORMAT_Y16, RS2_STREAM_COLOR} }, []() { return std::make_shared(RS2_FORMAT_Y16); }); + color_ep->register_processing_block(processing_block_factory::create_id_pbf(RS2_FORMAT_YUYV, RS2_STREAM_COLOR)); + + // options color_ep->register_pu(RS2_OPTION_BACKLIGHT_COMPENSATION); color_ep->register_pu(RS2_OPTION_BRIGHTNESS); color_ep->register_pu(RS2_OPTION_CONTRAST); @@ -33,8 +49,10 @@ namespace librealsense color_ep->register_pu(RS2_OPTION_SHARPNESS); color_ep->register_pu(RS2_OPTION_AUTO_EXPOSURE_PRIORITY); - auto white_balance_option = std::make_shared(*color_ep, RS2_OPTION_WHITE_BALANCE); - auto auto_white_balance_option = std::make_shared(*color_ep, RS2_OPTION_ENABLE_AUTO_WHITE_BALANCE); + color_ep->register_option(RS2_OPTION_GLOBAL_TIME_ENABLED, enable_global_time_option); + + auto white_balance_option = std::make_shared(*raw_color_ep, RS2_OPTION_WHITE_BALANCE); + auto auto_white_balance_option = std::make_shared(*raw_color_ep, RS2_OPTION_ENABLE_AUTO_WHITE_BALANCE); color_ep->register_option(RS2_OPTION_WHITE_BALANCE, white_balance_option); color_ep->register_option(RS2_OPTION_ENABLE_AUTO_WHITE_BALANCE, auto_white_balance_option); color_ep->register_option(RS2_OPTION_WHITE_BALANCE, @@ -42,8 +60,8 @@ namespace librealsense white_balance_option, auto_white_balance_option)); - auto exposure_option = std::make_shared(*color_ep, RS2_OPTION_EXPOSURE); - auto auto_exposure_option = std::make_shared(*color_ep, RS2_OPTION_ENABLE_AUTO_EXPOSURE); + auto exposure_option = std::make_shared(*raw_color_ep, RS2_OPTION_EXPOSURE); + auto auto_exposure_option = std::make_shared(*raw_color_ep, RS2_OPTION_ENABLE_AUTO_EXPOSURE); color_ep->register_option(RS2_OPTION_EXPOSURE, exposure_option); color_ep->register_option(RS2_OPTION_ENABLE_AUTO_EXPOSURE, auto_exposure_option); color_ep->register_option(RS2_OPTION_EXPOSURE, @@ -52,14 +70,13 @@ namespace librealsense auto_exposure_option)); color_ep->register_option(RS2_OPTION_POWER_LINE_FREQUENCY, - std::make_shared(*color_ep, RS2_OPTION_POWER_LINE_FREQUENCY, + std::make_shared(*raw_color_ep, RS2_OPTION_POWER_LINE_FREQUENCY, std::map{ { 0.f, "Disabled"}, { 1.f, "50Hz" }, { 2.f, "60Hz" }, { 3.f, "Auto" }, })); - color_ep->register_metadata(RS2_FRAME_METADATA_FRAME_TIMESTAMP, make_uvc_header_parser(&platform::uvc_header::timestamp)); - + // metadata // attributes of md_capture_timing auto md_prop_offset = offsetof(metadata_raw, mode) + offsetof(md_rgb_normal_mode, intel_capture_timing); @@ -82,7 +99,6 @@ namespace librealsense color_ep->register_metadata(RS2_FRAME_METADATA_ACTUAL_EXPOSURE, make_attribute_parser(&md_rgb_control::manual_exp, md_rgb_control_attributes::manual_exp_attribute, md_prop_offset)); color_ep->register_metadata(RS2_FRAME_METADATA_AUTO_EXPOSURE, make_attribute_parser(&md_rgb_control::ae_mode, md_rgb_control_attributes::ae_mode_attribute, md_prop_offset, [](rs2_metadata_type param) { return (param != 1); })); - color_ep->register_metadata(RS2_FRAME_METADATA_BRIGHTNESS, make_attribute_parser(&md_rgb_control::brightness, md_rgb_control_attributes::brightness_attribute, md_prop_offset)); color_ep->register_metadata(RS2_FRAME_METADATA_CONTRAST, make_attribute_parser(&md_rgb_control::contrast, md_rgb_control_attributes::contrast_attribute, md_prop_offset)); color_ep->register_metadata(RS2_FRAME_METADATA_SATURATION, make_attribute_parser(&md_rgb_control::saturation, md_rgb_control_attributes::saturation_attribute, md_prop_offset)); @@ -94,6 +110,7 @@ namespace librealsense color_ep->register_metadata(RS2_FRAME_METADATA_MANUAL_WHITE_BALANCE, make_attribute_parser(&md_rgb_control::manual_wb, md_rgb_control_attributes::manual_wb_attribute, md_prop_offset)); color_ep->register_metadata(RS2_FRAME_METADATA_POWER_LINE_FREQUENCY, make_attribute_parser(&md_rgb_control::power_line_frequency, md_rgb_control_attributes::power_line_frequency_attribute, md_prop_offset)); color_ep->register_metadata(RS2_FRAME_METADATA_LOW_LIGHT_COMPENSATION, make_attribute_parser(&md_rgb_control::low_light_comp, md_rgb_control_attributes::low_light_comp_attribute, md_prop_offset)); + color_ep->register_metadata(RS2_FRAME_METADATA_FRAME_TIMESTAMP, make_uvc_header_parser(&platform::uvc_header::timestamp)); return color_ep; } diff --git a/src/l500/l500-color.h b/src/l500/l500-color.h index 4977d1f5db..f785005338 100644 --- a/src/l500/l500-color.h +++ b/src/l500/l500-color.h @@ -5,6 +5,8 @@ #include #include +#include + #include "l500-device.h" #include "stream.h" #include "l500-depth.h" @@ -14,7 +16,7 @@ namespace librealsense class l500_color : public virtual l500_device { public: - std::shared_ptr create_color_device(std::shared_ptr ctx, + std::shared_ptr create_color_device(std::shared_ptr ctx, const std::vector& color_devices_info); l500_color(std::shared_ptr ctx, @@ -38,13 +40,16 @@ namespace librealsense std::vector get_raw_extrinsics_table() const; }; - class l500_color_sensor : public uvc_sensor, public video_sensor_interface + class l500_color_sensor : public synthetic_sensor, public video_sensor_interface { public: - explicit l500_color_sensor(l500_color* owner, std::shared_ptr uvc_device, - std::unique_ptr timestamp_reader, - std::shared_ptr ctx) - : uvc_sensor("RGB Camera", uvc_device, move(timestamp_reader), owner), _owner(owner) + explicit l500_color_sensor(l500_color* owner, + std::shared_ptr uvc_sensor, + std::shared_ptr ctx, + std::map l500_color_fourcc_to_rs2_format, + std::map l500_color_fourcc_to_rs2_stream) + : synthetic_sensor("RGB Sensor", uvc_sensor, owner, l500_color_fourcc_to_rs2_format, l500_color_fourcc_to_rs2_stream), + _owner(owner) {} rs2_intrinsics get_intrinsics(const stream_profile& profile) const override @@ -89,7 +94,7 @@ namespace librealsense { auto lock = environment::get_instance().get_extrinsics_graph().lock(); - auto results = uvc_sensor::init_stream_profiles(); + auto results = synthetic_sensor::init_stream_profiles(); for (auto p : results) { diff --git a/src/l500/l500-depth.cpp b/src/l500/l500-depth.cpp index da9fafc103..a3423e8680 100644 --- a/src/l500/l500-depth.cpp +++ b/src/l500/l500-depth.cpp @@ -58,16 +58,19 @@ namespace librealsense { _calib_table_raw = [this]() { return get_raw_calibration_table(); }; - get_depth_sensor().register_option(RS2_OPTION_LLD_TEMPERATURE, + auto& depth_sensor = get_depth_sensor(); + auto& raw_depth_sensor = get_raw_depth_sensor(); + + depth_sensor.register_option(RS2_OPTION_LLD_TEMPERATURE, std::make_shared (_hw_monitor.get(), RS2_OPTION_LLD_TEMPERATURE)); - get_depth_sensor().register_option(RS2_OPTION_MC_TEMPERATURE, + depth_sensor.register_option(RS2_OPTION_MC_TEMPERATURE, std::make_shared (_hw_monitor.get(), RS2_OPTION_MC_TEMPERATURE)); - get_depth_sensor().register_option(RS2_OPTION_MA_TEMPERATURE, + depth_sensor.register_option(RS2_OPTION_MA_TEMPERATURE, std::make_shared (_hw_monitor.get(), RS2_OPTION_MA_TEMPERATURE)); - get_depth_sensor().register_option(RS2_OPTION_APD_TEMPERATURE, + depth_sensor.register_option(RS2_OPTION_APD_TEMPERATURE, std::make_shared (_hw_monitor.get(), RS2_OPTION_APD_TEMPERATURE)); @@ -77,31 +80,31 @@ namespace librealsense register_stream_to_extrinsic_group(*_depth_stream, 0); register_stream_to_extrinsic_group(*_ir_stream, 0); - auto error_control = std::unique_ptr>(new uvc_xu_option(get_depth_sensor(), ivcam2::depth_xu, L500_ERROR_REPORTING, "Error reporting")); + auto error_control = std::unique_ptr>(new uvc_xu_option(raw_depth_sensor, ivcam2::depth_xu, L500_ERROR_REPORTING, "Error reporting")); _polling_error_handler = std::unique_ptr( new polling_error_handler(1000, std::move(error_control), - get_depth_sensor().get_notifications_processor(), + raw_depth_sensor.get_notifications_processor(), std::unique_ptr(new l500_notification_decoder()))); - get_depth_sensor().register_option(RS2_OPTION_ERROR_POLLING_ENABLED, std::make_shared(_polling_error_handler.get())); + depth_sensor.register_option(RS2_OPTION_ERROR_POLLING_ENABLED, std::make_shared(_polling_error_handler.get())); // attributes of md_capture_timing auto md_prop_offset = offsetof(metadata_raw, mode) + offsetof(md_l500_depth, intel_capture_timing); - get_depth_sensor().register_metadata(RS2_FRAME_METADATA_FRAME_TIMESTAMP, make_uvc_header_parser(&platform::uvc_header::timestamp)); - get_depth_sensor().register_metadata(RS2_FRAME_METADATA_FRAME_COUNTER, make_attribute_parser(&l500_md_capture_timing::frame_counter, md_capture_timing_attributes::frame_counter_attribute, md_prop_offset)); - get_depth_sensor().register_metadata(RS2_FRAME_METADATA_SENSOR_TIMESTAMP, make_attribute_parser(&l500_md_capture_timing::sensor_timestamp, md_capture_timing_attributes::sensor_timestamp_attribute, md_prop_offset)); - get_depth_sensor().register_metadata(RS2_FRAME_METADATA_ACTUAL_FPS, make_attribute_parser(&l500_md_capture_timing::exposure_time, md_capture_timing_attributes::sensor_timestamp_attribute, md_prop_offset)); + depth_sensor.register_metadata(RS2_FRAME_METADATA_FRAME_TIMESTAMP, make_uvc_header_parser(&platform::uvc_header::timestamp)); + depth_sensor.register_metadata(RS2_FRAME_METADATA_FRAME_COUNTER, make_attribute_parser(&l500_md_capture_timing::frame_counter, md_capture_timing_attributes::frame_counter_attribute, md_prop_offset)); + depth_sensor.register_metadata(RS2_FRAME_METADATA_SENSOR_TIMESTAMP, make_attribute_parser(&l500_md_capture_timing::sensor_timestamp, md_capture_timing_attributes::sensor_timestamp_attribute, md_prop_offset)); + depth_sensor.register_metadata(RS2_FRAME_METADATA_ACTUAL_FPS, make_attribute_parser(&l500_md_capture_timing::exposure_time, md_capture_timing_attributes::sensor_timestamp_attribute, md_prop_offset)); // attributes of md_depth_control md_prop_offset = offsetof(metadata_raw, mode) + offsetof(md_l500_depth, intel_depth_control); - get_depth_sensor().register_metadata(RS2_FRAME_METADATA_FRAME_LASER_POWER, make_attribute_parser(&md_l500_depth_control::laser_power, md_l500_depth_control_attributes::laser_power, md_prop_offset)); - get_depth_sensor().register_metadata(RS2_FRAME_METADATA_FRAME_LASER_POWER_MODE, make_attribute_parser(&md_l500_depth_control::laser_power_mode, md_rgb_control_attributes::manual_exp_attribute, md_prop_offset)); + depth_sensor.register_metadata(RS2_FRAME_METADATA_FRAME_LASER_POWER, make_attribute_parser(&md_l500_depth_control::laser_power, md_l500_depth_control_attributes::laser_power, md_prop_offset)); + depth_sensor.register_metadata(RS2_FRAME_METADATA_FRAME_LASER_POWER_MODE, make_attribute_parser(&md_l500_depth_control::laser_power_mode, md_rgb_control_attributes::manual_exp_attribute, md_prop_offset)); } std::vector l500_depth::get_profiles_tags() const @@ -183,13 +186,14 @@ namespace librealsense return intrinsic->orient.depth_offset; } - rs2_time_t l500_timestamp_reader_from_metadata::get_frame_timestamp(const request_mapping& mode, const platform::frame_object& fo) + rs2_time_t l500_timestamp_reader_from_metadata::get_frame_timestamp(std::shared_ptr frame) { std::lock_guard lock(_mtx); - if (has_metadata_ts(fo)) + auto f = std::dynamic_pointer_cast(frame); + if (has_metadata_ts(frame)) { - auto md = (librealsense::metadata_raw*)(fo.metadata); + auto md = (librealsense::metadata_raw*)(f->additional_data.metadata_blob.data()); return (double)(md->header.timestamp)*TIMESTAMP_USEC_TO_MSEC; } else @@ -197,26 +201,27 @@ namespace librealsense if (!one_time_note) { LOG_WARNING("UVC metadata payloads are not available for stream " - << std::hex << mode.pf->fourcc << std::dec << (mode.profile.format) + //<< std::hex << mode.pf->fourcc << std::dec << (mode.profile.format) << ". Please refer to installation chapter for details."); one_time_note = true; } - return _backup_timestamp_reader->get_frame_timestamp(mode, fo); + return _backup_timestamp_reader->get_frame_timestamp(frame); } } - unsigned long long l500_timestamp_reader_from_metadata::get_frame_counter(const request_mapping & mode, const platform::frame_object& fo) const + unsigned long long l500_timestamp_reader_from_metadata::get_frame_counter(std::shared_ptr frame) const { std::lock_guard lock(_mtx); - if (has_metadata_fc(fo)) + auto f = std::dynamic_pointer_cast(frame); + if (has_metadata_fc(frame)) { - auto md = (byte*)(fo.metadata); + auto md = (byte*)(f->additional_data.metadata_blob.data()); // WA until we will have the struct of metadata return ((int*)md)[7]; } - return _backup_timestamp_reader->get_frame_counter(mode, fo); + return _backup_timestamp_reader->get_frame_counter(frame); } void l500_timestamp_reader_from_metadata::reset() @@ -227,11 +232,11 @@ namespace librealsense ts_wrap.reset(); } - rs2_timestamp_domain l500_timestamp_reader_from_metadata::get_frame_timestamp_domain(const request_mapping & mode, const platform::frame_object& fo) const + rs2_timestamp_domain l500_timestamp_reader_from_metadata::get_frame_timestamp_domain(std::shared_ptr frame) const { std::lock_guard lock(_mtx); - return (has_metadata_ts(fo)) ? RS2_TIMESTAMP_DOMAIN_HARDWARE_CLOCK : _backup_timestamp_reader->get_frame_timestamp_domain(mode, fo); + return (has_metadata_ts(frame)) ? RS2_TIMESTAMP_DOMAIN_HARDWARE_CLOCK : _backup_timestamp_reader->get_frame_timestamp_domain(frame); } processing_blocks l500_depth_sensor::get_l500_recommended_proccesing_blocks() @@ -250,14 +255,14 @@ namespace librealsense void l500_depth_sensor::start(frame_callback_ptr callback) { if(_depth_invalidation_enabled) - uvc_sensor::start(std::make_shared(shared_from_this(), callback, _user_requests, _validator_requests)); + synthetic_sensor::start(std::make_shared(shared_from_this(), callback, _user_requests, _validator_requests)); else - uvc_sensor::start(callback); + synthetic_sensor::start(callback); } void l500_depth_sensor::stop() { - uvc_sensor::stop(); + synthetic_sensor::stop(); _depth_invalidation_option->set_streaming(false); } @@ -285,7 +290,7 @@ namespace librealsense auto user_request_profile = dynamic_cast(user_request->get()); - auto sp = uvc_sensor::get_stream_profiles(); + auto sp = synthetic_sensor::get_stream_profiles(); auto corresponding_ir = std::find_if(sp.begin(), sp.end(), [&](std::shared_ptr sp) { @@ -305,7 +310,7 @@ namespace librealsense _validator_requests = requests; } - uvc_sensor::open(_validator_requests); + synthetic_sensor::open(_validator_requests); } catch (...) { diff --git a/src/l500/l500-depth.h b/src/l500/l500-depth.h index a5d58c1e4f..8f3fa88179 100644 --- a/src/l500/l500-depth.h +++ b/src/l500/l500-depth.h @@ -6,6 +6,8 @@ #include #include #include +#include + #include "l500-device.h" #include "context.h" #include "backend.h" @@ -80,13 +82,11 @@ namespace librealsense float _baseline; }; - class l500_depth_sensor : public uvc_sensor, public video_sensor_interface, public virtual depth_sensor, public virtual l500_depth_sensor_interface + class l500_depth_sensor : public synthetic_sensor, public video_sensor_interface, public virtual depth_sensor, public virtual l500_depth_sensor_interface { public: - explicit l500_depth_sensor(l500_device* owner, std::shared_ptr uvc_device, - std::unique_ptr timestamp_reader) - : uvc_sensor("L500 Depth Sensor", uvc_device, move(timestamp_reader), owner), _owner(owner), - _depth_invalidation_enabled(false) + explicit l500_depth_sensor(l500_device* owner, std::shared_ptr uvc_sensor, std::map l500_depth_fourcc_to_rs2_format_map, std::map l500_depth_fourcc_to_rs2_stream_map) + : synthetic_sensor("Depth Sensor", uvc_sensor, owner, l500_depth_fourcc_to_rs2_format_map, l500_depth_fourcc_to_rs2_stream_map), _owner(owner), _depth_invalidation_enabled(false) { register_option(RS2_OPTION_DEPTH_UNITS, std::make_shared("Number of meters represented by a single depth unit", lazy([&]() { @@ -161,7 +161,7 @@ namespace librealsense { auto lock = environment::get_instance().get_extrinsics_graph().lock(); - auto results = uvc_sensor::init_stream_profiles(); + auto results = synthetic_sensor::init_stream_profiles(); for (auto p : results) { // Register stream types diff --git a/src/l500/l500-device.cpp b/src/l500/l500-device.cpp index b86e6f29a4..c4ad41a741 100644 --- a/src/l500/l500-device.cpp +++ b/src/l500/l500-device.cpp @@ -1,22 +1,42 @@ // License: Apache 2.0. See LICENSE file in root directory. // Copyright(c) 2018 Intel Corporation. All Rights Reserved. -#include #include "l500-device.h" + +#include + #include "context.h" #include "stream.h" +#include "image.h" + #include "l500-depth.h" #include "l500-private.h" + #include "proc/decimation-filter.h" #include "proc/threshold.h" #include "proc/spatial-filter.h" #include "proc/temporal-filter.h" #include "proc/hole-filling-filter.h" #include "proc/zero-order.h" +#include "proc/syncer-processing-block.h" +#include "proc/identity-processing-block.h" +#include "proc/rotation-transform.h" #include "fw-update/fw-update-unsigned.h" namespace librealsense { + std::map l500_depth_fourcc_to_rs2_format = { + { rs_fourcc('G','R','E','Y'), RS2_FORMAT_Y8 }, + { rs_fourcc('Z','1','6',' '), RS2_FORMAT_Z16 }, + { rs_fourcc('C',' ',' ',' '), RS2_FORMAT_RAW8 }, + }; + + std::map l500_depth_fourcc_to_rs2_stream = { + { rs_fourcc('G','R','E','Y'), RS2_STREAM_INFRARED }, + { rs_fourcc('Z','1','6',' '), RS2_STREAM_DEPTH }, + { rs_fourcc('C',' ',' ',' '), RS2_STREAM_CONFIDENCE } + }; + using namespace ivcam2; l500_device::l500_device(std::shared_ptr ctx, @@ -34,18 +54,21 @@ namespace librealsense auto&& backend = ctx->get_backend(); + auto& depth_sensor = get_depth_sensor(); + auto& raw_depth_sensor = get_raw_depth_sensor(); + if (group.usb_devices.size() > 0) { _hw_monitor = std::make_shared( std::make_shared(backend.create_usb_device(group.usb_devices.front()), - get_depth_sensor())); + raw_depth_sensor)); } else { _hw_monitor = std::make_shared( std::make_shared(std::make_shared( - get_depth_sensor(), depth_xu, L500_HWMONITOR), - get_depth_sensor())); + raw_depth_sensor, depth_xu, L500_HWMONITOR), + raw_depth_sensor)); } #ifdef HWM_OVER_XU @@ -53,8 +76,8 @@ namespace librealsense { _hw_monitor = std::make_shared( std::make_shared(std::make_shared( - get_depth_sensor(), depth_xu, L500_HWMONITOR), - get_depth_sensor())); + raw_depth_sensor, depth_xu, L500_HWMONITOR), + raw_depth_sensor)); } #endif @@ -79,7 +102,7 @@ namespace librealsense using namespace platform; - auto usb_mode = get_depth_sensor().get_usb_specification(); + auto usb_mode = raw_depth_sensor.get_usb_specification(); if (usb_spec_names.count(usb_mode) && (usb_undefined != usb_mode)) { auto usb_type_str = usb_spec_names.at(usb_mode); @@ -98,7 +121,7 @@ namespace librealsense register_info(RS2_CAMERA_INFO_CAMERA_LOCKED, _is_locked ? "YES" : "NO"); } - std::shared_ptr l500_device::create_depth_device(std::shared_ptr ctx, + std::shared_ptr l500_device::create_depth_device(std::shared_ptr ctx, const std::vector& all_device_infos) { auto&& backend = ctx->get_backend(); @@ -109,23 +132,75 @@ namespace librealsense std::unique_ptr timestamp_reader_metadata(new l500_timestamp_reader_from_metadata(backend.create_time_service())); auto enable_global_time_option = std::shared_ptr(new global_time_option()); - auto depth_ep = std::make_shared(this, std::make_shared(depth_devices), - std::unique_ptr(new global_timestamp_reader(std::move(timestamp_reader_metadata), _tf_keeper, enable_global_time_option))); + auto raw_depth_ep = std::make_shared("Raw Depth Sensor", std::make_shared(depth_devices), + std::unique_ptr(new global_timestamp_reader(std::move(timestamp_reader_metadata), _tf_keeper, enable_global_time_option)), this); + raw_depth_ep->register_xu(depth_xu); + auto depth_ep = std::make_shared(this, raw_depth_ep, l500_depth_fourcc_to_rs2_format, l500_depth_fourcc_to_rs2_stream); depth_ep->register_option(RS2_OPTION_GLOBAL_TIME_ENABLED, enable_global_time_option); - depth_ep->register_xu(depth_xu); - depth_ep->register_pixel_format(pf_z16_l500); - depth_ep->register_pixel_format(pf_confidence_l500); - depth_ep->register_pixel_format(pf_y8_l500); - depth_ep->register_option(RS2_OPTION_VISUAL_PRESET, std::make_shared>( - *depth_ep, + *raw_depth_ep, ivcam2::depth_xu, ivcam2::L500_DEPTH_VISUAL_PRESET, "Preset to calibrate the camera to short or long range. 1 is long range and 2 is short range", std::map{ { 1, "Long range"}, { 2, "Short range" }})); + depth_ep->register_processing_block( + { {RS2_FORMAT_Z16}, {RS2_FORMAT_Y8} }, + { {RS2_FORMAT_Z16, RS2_STREAM_DEPTH, 0, 0, 0, 0, &rotate_resolution} }, + []() { + auto z16rot = std::make_shared(); + auto y8rot = std::make_shared(); + auto sync = std::make_shared(); + auto zo = std::make_shared(); + + auto cpb = std::make_shared(); + cpb->add(z16rot); + cpb->add(y8rot); + cpb->add(sync); + cpb->add(zo); + + return cpb; + } + ); + + depth_ep->register_processing_block( + { {RS2_FORMAT_Z16}, {RS2_FORMAT_Y8}, {RS2_FORMAT_RAW8} }, + { + {RS2_FORMAT_Z16, RS2_STREAM_DEPTH, 0, 0, 0, 0, &rotate_resolution}, + {RS2_FORMAT_RAW8, RS2_STREAM_CONFIDENCE, 0, 0, 0, 0, &l500_confidence_resolution} + }, + []() { + auto z16rot = std::make_shared(); + auto y8rot = std::make_shared(); + auto conf = std::make_shared(); + auto sync = std::make_shared(); + auto zo = std::make_shared(); + + auto cpb = std::make_shared(); + cpb->add(z16rot); + cpb->add(y8rot); + cpb->add(conf); + cpb->add(sync); + cpb->add(zo); + + return cpb; + } + ); + + depth_ep->register_processing_block( + { {RS2_FORMAT_Y8} }, + { {RS2_FORMAT_Y8, RS2_STREAM_INFRARED, 1, 0, 0, 0, &rotate_resolution} }, + []() { return std::make_shared(); } + ); + + depth_ep->register_processing_block( + { {RS2_FORMAT_RAW8} }, + { {RS2_FORMAT_RAW8, RS2_STREAM_CONFIDENCE, 0, 0, 0, 0, &l500_confidence_resolution} }, + []() { return std::make_shared(); } + ); + return depth_ep; } @@ -191,7 +266,7 @@ namespace librealsense std::vector flash; flash.reserve(flash_size); - get_depth_sensor().invoke_powered([&](platform::uvc_device& dev) + get_raw_depth_sensor().invoke_powered([&](platform::uvc_device& dev) { for (int i = 0; i < max_iterations; i++) { @@ -308,7 +383,7 @@ namespace librealsense if (_is_locked) throw std::runtime_error("this camera is locked and doesn't allow direct flash write, for firmware update use rs2_update_firmware method (DFU)"); - get_depth_sensor().invoke_powered([&](platform::uvc_device& dev) + get_raw_depth_sensor().invoke_powered([&](platform::uvc_device& dev) { command cmdPFD(ivcam2::PFD); cmdPFD.require_response = false; diff --git a/src/l500/l500-device.h b/src/l500/l500-device.h index 97443bdd6a..54a97221a5 100644 --- a/src/l500/l500-device.h +++ b/src/l500/l500-device.h @@ -26,10 +26,16 @@ namespace librealsense l500_device(std::shared_ptr ctx, const platform::backend_device_group& group); - std::shared_ptr create_depth_device(std::shared_ptr ctx, + std::shared_ptr create_depth_device(std::shared_ptr ctx, const std::vector& all_device_infos); - uvc_sensor& get_depth_sensor() { return dynamic_cast(get_sensor(_depth_device_idx)); } + synthetic_sensor& get_depth_sensor() { return dynamic_cast(get_sensor(_depth_device_idx)); } + + uvc_sensor& get_raw_depth_sensor() + { + synthetic_sensor& depth_sensor = get_depth_sensor(); + return dynamic_cast(*depth_sensor.get_raw_sensor()); + } std::vector send_receive_raw_data(const std::vector& input) override { diff --git a/src/l500/l500-motion.cpp b/src/l500/l500-motion.cpp index 94878fa920..591d53a36f 100644 --- a/src/l500/l500-motion.cpp +++ b/src/l500/l500-motion.cpp @@ -5,19 +5,20 @@ #include "l500-private.h" #include "l500-motion.h" #include "../backend.h" +#include "proc/motion-transform.h" namespace librealsense { #ifdef _WIN32 std::vector> l500_sensor_name_and_hid_profiles = - {{ "HID Sensor Class Device: Gyroscope", {RS2_STREAM_GYRO, 0, 1, 1, 100, RS2_FORMAT_MOTION_XYZ32F}}, - { "HID Sensor Class Device: Gyroscope", {RS2_STREAM_GYRO, 0, 1, 1, 200, RS2_FORMAT_MOTION_XYZ32F}}, - { "HID Sensor Class Device: Gyroscope", {RS2_STREAM_GYRO, 0, 1, 1, 400, RS2_FORMAT_MOTION_XYZ32F}}, - { "HID Sensor Class Device: Accelerometer", {RS2_STREAM_ACCEL, 0, 1, 1, 50, RS2_FORMAT_MOTION_XYZ32F}}, - { "HID Sensor Class Device: Accelerometer", {RS2_STREAM_ACCEL, 0, 1, 1, 100, RS2_FORMAT_MOTION_XYZ32F}}, - { "HID Sensor Class Device: Accelerometer", {RS2_STREAM_ACCEL, 0, 1, 1, 200, RS2_FORMAT_MOTION_XYZ32F}}, - { "HID Sensor Class Device: Accelerometer", {RS2_STREAM_ACCEL, 0, 1, 1, 400, RS2_FORMAT_MOTION_XYZ32F}} }; + {{ "HID Sensor Class Device: Gyroscope", { RS2_FORMAT_MOTION_XYZ32F, RS2_STREAM_GYRO, 0, 1, 1, 100 }}, + { "HID Sensor Class Device: Gyroscope", { RS2_FORMAT_MOTION_XYZ32F, RS2_STREAM_GYRO, 0, 1, 1, 200 }}, + { "HID Sensor Class Device: Gyroscope", { RS2_FORMAT_MOTION_XYZ32F, RS2_STREAM_GYRO, 0, 1, 1, 400 }}, + { "HID Sensor Class Device: Accelerometer", { RS2_FORMAT_MOTION_XYZ32F, RS2_STREAM_ACCEL, 0, 1, 1, 50 }}, + { "HID Sensor Class Device: Accelerometer", { RS2_FORMAT_MOTION_XYZ32F, RS2_STREAM_ACCEL, 0, 1, 1, 100 }}, + { "HID Sensor Class Device: Accelerometer", { RS2_FORMAT_MOTION_XYZ32F, RS2_STREAM_ACCEL, 0, 1, 1, 200 }}, + { "HID Sensor Class Device: Accelerometer", { RS2_FORMAT_MOTION_XYZ32F, RS2_STREAM_ACCEL, 0, 1, 1, 400 }}}; // Translate frequency to SENSOR_PROPERTY_CURRENT_REPORT_INTERVAL std::map> l500_fps_and_sampling_frequency_per_rs2_stream = @@ -31,13 +32,13 @@ namespace librealsense #else std::vector> l500_sensor_name_and_hid_profiles = - {{ "gyro_3d", {RS2_STREAM_GYRO, 0, 1, 1, 100, RS2_FORMAT_MOTION_XYZ32F}}, - { "gyro_3d", {RS2_STREAM_GYRO, 0, 1, 1, 200, RS2_FORMAT_MOTION_XYZ32F}}, - { "gyro_3d", {RS2_STREAM_GYRO, 0, 1, 1, 400, RS2_FORMAT_MOTION_XYZ32F}}, - { "accel_3d", {RS2_STREAM_ACCEL, 0, 1, 1, 50, RS2_FORMAT_MOTION_XYZ32F}}, - { "accel_3d", {RS2_STREAM_ACCEL, 0, 1, 1, 100, RS2_FORMAT_MOTION_XYZ32F}}, - { "accel_3d", {RS2_STREAM_ACCEL, 0, 1, 1, 200, RS2_FORMAT_MOTION_XYZ32F}}, - { "accel_3d", {RS2_STREAM_ACCEL, 0, 1, 1, 400, RS2_FORMAT_MOTION_XYZ32F}}}; + {{ "gyro_3d", { RS2_FORMAT_MOTION_XYZ32F, RS2_STREAM_GYRO, 0, 1, 1, 100 }}, + { "gyro_3d", { RS2_FORMAT_MOTION_XYZ32F, RS2_STREAM_GYRO, 0, 1, 1, 200 }}, + { "gyro_3d", { RS2_FORMAT_MOTION_XYZ32F, RS2_STREAM_GYRO, 0, 1, 1, 400 }}, + { "accel_3d", { RS2_FORMAT_MOTION_XYZ32F, RS2_STREAM_ACCEL, 0, 1, 1, 50 }}, + { "accel_3d", { RS2_FORMAT_MOTION_XYZ32F, RS2_STREAM_ACCEL, 0, 1, 1, 100 }}, + { "accel_3d", { RS2_FORMAT_MOTION_XYZ32F, RS2_STREAM_ACCEL, 0, 1, 1, 200 }}, + { "accel_3d", { RS2_FORMAT_MOTION_XYZ32F, RS2_STREAM_ACCEL, 0, 1, 1, 400 }}}; // The frequency selector is vendor and model-specific std::map> l500_fps_and_sampling_frequency_per_rs2_stream = @@ -50,16 +51,14 @@ namespace librealsense {400, 4}}} }; #endif - class l500_hid_sensor : public hid_sensor + class l500_hid_sensor : public synthetic_sensor { public: - explicit l500_hid_sensor(l500_motion* owner, std::shared_ptr hid_device, - std::unique_ptr hid_iio_timestamp_reader, - std::unique_ptr custom_hid_timestamp_reader, - std::map> fps_and_sampling_frequency_per_rs2_stream, - std::vector> sensor_name_and_hid_profiles) - : hid_sensor(hid_device, move(hid_iio_timestamp_reader), move(custom_hid_timestamp_reader), - fps_and_sampling_frequency_per_rs2_stream, sensor_name_and_hid_profiles, owner), _owner(owner) + explicit l500_hid_sensor(std::string name, + std::shared_ptr sensor, + device* device, + l500_motion* owner) + : synthetic_sensor(name, sensor, device), _owner(owner) { } @@ -71,7 +70,7 @@ namespace librealsense stream_profiles init_stream_profiles() override { auto lock = environment::get_instance().get_extrinsics_graph().lock(); - auto results = hid_sensor::init_stream_profiles(); + auto results = synthetic_sensor::init_stream_profiles(); for (auto p : results) { @@ -98,7 +97,7 @@ namespace librealsense const l500_motion* _owner; }; - std::shared_ptr l500_motion::create_hid_device(std::shared_ptr ctx, const std::vector& all_hid_infos) + std::shared_ptr l500_motion::create_hid_device(std::shared_ptr ctx, const std::vector& all_hid_infos) { if (all_hid_infos.empty()) { @@ -109,17 +108,27 @@ namespace librealsense std::unique_ptr iio_hid_ts_reader(new iio_hid_timestamp_reader()); std::unique_ptr custom_hid_ts_reader(new iio_hid_timestamp_reader()); auto enable_global_time_option = std::shared_ptr(new global_time_option()); - auto hid_ep = std::make_shared(this, ctx->get_backend().create_hid_device(all_hid_infos.front()), + auto hid_ep = std::make_shared(ctx->get_backend().create_hid_device(all_hid_infos.front()), std::unique_ptr(new global_timestamp_reader(std::move(iio_hid_ts_reader), _tf_keeper, enable_global_time_option)), std::unique_ptr(new global_timestamp_reader(std::move(custom_hid_ts_reader), _tf_keeper, enable_global_time_option)), l500_fps_and_sampling_frequency_per_rs2_stream, - l500_sensor_name_and_hid_profiles); - - hid_ep->register_option(RS2_OPTION_GLOBAL_TIME_ENABLED, enable_global_time_option); - hid_ep->register_pixel_format(pf_accel_axes); - hid_ep->register_pixel_format(pf_gyro_axes); - - return hid_ep; + l500_sensor_name_and_hid_profiles, + this); + + auto smart_hid_ep = std::make_shared("Motion Module", hid_ep, this, this); + smart_hid_ep->register_processing_block( + { {RS2_FORMAT_MOTION_XYZ32F, RS2_STREAM_ACCEL} }, + { {RS2_FORMAT_MOTION_XYZ32F, RS2_STREAM_ACCEL} }, + []() { return std::make_shared(); } + ); + + smart_hid_ep->register_processing_block( + { {RS2_FORMAT_MOTION_XYZ32F, RS2_STREAM_GYRO} }, + { {RS2_FORMAT_MOTION_XYZ32F, RS2_STREAM_GYRO} }, + []() { return std::make_shared(); } + ); + smart_hid_ep->register_option(RS2_OPTION_GLOBAL_TIME_ENABLED, enable_global_time_option); + return smart_hid_ep; } l500_motion::l500_motion(std::shared_ptr ctx, const platform::backend_device_group & group) @@ -132,7 +141,7 @@ namespace librealsense { _motion_module_device_idx = add_sensor(hid_ep); // HID metadata attributes - hid_ep->register_metadata(RS2_FRAME_METADATA_FRAME_TIMESTAMP, make_hid_header_parser(&platform::hid_header::timestamp)); + hid_ep->get_raw_sensor()->register_metadata(RS2_FRAME_METADATA_FRAME_TIMESTAMP, make_hid_header_parser(&platform::hid_header::timestamp)); } } diff --git a/src/l500/l500-motion.h b/src/l500/l500-motion.h index 9658529c35..aaa276e20e 100644 --- a/src/l500/l500-motion.h +++ b/src/l500/l500-motion.h @@ -14,7 +14,7 @@ namespace librealsense class l500_motion : public virtual l500_device { public: - std::shared_ptr create_hid_device(std::shared_ptr ctx, + std::shared_ptr create_hid_device(std::shared_ptr ctx, const std::vector& all_hid_infos); l500_motion(std::shared_ptr ctx, diff --git a/src/l500/l500-private.h b/src/l500/l500-private.h index 2db3453775..94d9ac9a4f 100644 --- a/src/l500/l500-private.h +++ b/src/l500/l500-private.h @@ -247,25 +247,25 @@ namespace librealsense } } - rs2_time_t get_frame_timestamp(const request_mapping& mode, const platform::frame_object& fo) override + rs2_time_t get_frame_timestamp(std::shared_ptr frame) override { std::lock_guard lock(_mtx); return _ts->get_time(); } - unsigned long long get_frame_counter(const request_mapping & mode, const platform::frame_object& fo) const override + unsigned long long get_frame_counter(std::shared_ptr frame) const override { std::lock_guard lock(_mtx); auto pin_index = 0; - if (mode.pf->fourcc == 0x5a313620) // Z16 + if (frame->get_stream()->get_format() == RS2_FORMAT_Z16) pin_index = 1; - else if (mode.pf->fourcc == 0x43202020) // Confidence + else if (frame->get_stream()->get_stream_type() == RS2_STREAM_CONFIDENCE) pin_index = 2; return ++counter[pin_index]; } - rs2_timestamp_domain get_frame_timestamp_domain(const request_mapping & mode, const platform::frame_object& fo) const override + rs2_timestamp_domain get_frame_timestamp_domain(std::shared_ptr frame) const override { return RS2_TIMESTAMP_DOMAIN_SYSTEM_TIME; } @@ -280,21 +280,23 @@ namespace librealsense protected: - bool has_metadata_ts(const platform::frame_object& fo) const + bool has_metadata_ts(std::shared_ptr frame) const { // Metadata support for a specific stream is immutable + auto f = std::dynamic_pointer_cast(frame); const bool has_md_ts = [&] { std::lock_guard lock(_mtx); - return ((fo.metadata != nullptr) && (fo.metadata_size >= platform::uvc_header_size) && ((byte*)fo.metadata)[0] >= platform::uvc_header_size); + return ((f->additional_data.metadata_size >= platform::uvc_header_size) && ((byte*)f->additional_data.metadata_blob.data())[0] >= platform::uvc_header_size); }(); return has_md_ts; } - bool has_metadata_fc(const platform::frame_object& fo) const + bool has_metadata_fc(std::shared_ptr frame) const { // Metadata support for a specific stream is immutable + auto f = std::dynamic_pointer_cast(frame); const bool has_md_frame_counter = [&] { std::lock_guard lock(_mtx); - return ((fo.metadata != nullptr) && (fo.metadata_size > platform::uvc_header_size) && ((byte*)fo.metadata)[0] > platform::uvc_header_size); + return ((f->additional_data.metadata_size > platform::uvc_header_size) && ((byte*)f->additional_data.metadata_blob.data())[0] > platform::uvc_header_size); }(); return has_md_frame_counter; @@ -307,13 +309,13 @@ namespace librealsense reset(); } - rs2_time_t get_frame_timestamp(const request_mapping& mode, const platform::frame_object& fo) override; + rs2_time_t get_frame_timestamp(std::shared_ptr frame) override; - unsigned long long get_frame_counter(const request_mapping & mode, const platform::frame_object& fo) const override; + unsigned long long get_frame_counter(std::shared_ptr frame) const override; void reset() override; - rs2_timestamp_domain get_frame_timestamp_domain(const request_mapping & mode, const platform::frame_object& fo) const override; + rs2_timestamp_domain get_frame_timestamp_domain(std::shared_ptr frame) const override; }; } // librealsense::ivcam2 diff --git a/src/media/record/record_device.cpp b/src/media/record/record_device.cpp index 161cd6d993..918bc4cd2b 100644 --- a/src/media/record/record_device.cpp +++ b/src/media/record/record_device.cpp @@ -41,7 +41,9 @@ std::vector> librealsense::record_d auto recording_sensor = std::make_shared(*this, live_sensor); m_on_notification_token = recording_sensor->on_notification += [this, recording_sensor, sensor_index](const notification& n) { write_notification(sensor_index, n); }; auto on_error = [recording_sensor](const std::string& s) {recording_sensor->stop_with_error(s); }; - m_on_frame_token = recording_sensor->on_frame += [this, recording_sensor, sensor_index, on_error](frame_holder f) { write_data(sensor_index, std::move(f), on_error); }; + m_on_frame_token = recording_sensor->on_frame += [this, recording_sensor, sensor_index, on_error](frame_holder f) { + write_data(sensor_index, std::move(f), on_error); + }; m_on_extension_change_token = recording_sensor->on_extension_change += [this, recording_sensor, sensor_index, on_error](rs2_extension ext, std::shared_ptr snapshot) { write_sensor_extension_snapshot(sensor_index, ext, snapshot, on_error); }; recording_sensor->init(); //Calling init AFTER register to the above events record_sensors.emplace_back(recording_sensor); diff --git a/src/pipeline/config.cpp b/src/pipeline/config.cpp index 1cfa17f0b4..7e428b85b1 100644 --- a/src/pipeline/config.cpp +++ b/src/pipeline/config.cpp @@ -16,7 +16,7 @@ namespace librealsense { std::lock_guard lock(_mtx); _resolved_profile.reset(); - _stream_requests[{stream, index}] = { stream, index, width, height, fps, format }; + _stream_requests[{stream, index}] = { format, stream, index, width, height, fps }; } void config::enable_all_stream() diff --git a/src/pipeline/resolver.h b/src/pipeline/resolver.h index e3a781c037..c46efa99c2 100644 --- a/src/pipeline/resolver.h +++ b/src/pipeline/resolver.h @@ -204,7 +204,7 @@ namespace librealsense void enable_stream(rs2_stream stream, int index, uint32_t width, uint32_t height, rs2_format format, uint32_t fps) { - _requests[{stream, index}] = stream_profile{ stream, index, width, height, fps, format }; + _requests[{stream, index}] = stream_profile{ format, stream, index, width, height, fps }; require_all = true; } diff --git a/src/proc/CMakeLists.txt b/src/proc/CMakeLists.txt index e38c7a6434..f0ebdddd0d 100644 --- a/src/proc/CMakeLists.txt +++ b/src/proc/CMakeLists.txt @@ -22,11 +22,18 @@ target_sources(${LRS_TARGET} "${CMAKE_CURRENT_LIST_DIR}/temporal-filter.cpp" "${CMAKE_CURRENT_LIST_DIR}/hole-filling-filter.cpp" "${CMAKE_CURRENT_LIST_DIR}/disparity-transform.cpp" - "${CMAKE_CURRENT_LIST_DIR}/yuy2rgb.cpp" + "${CMAKE_CURRENT_LIST_DIR}/y8i-to-y8y8.cpp" + "${CMAKE_CURRENT_LIST_DIR}/y12i-to-y16y16.cpp" + "${CMAKE_CURRENT_LIST_DIR}/identity-processing-block.cpp" "${CMAKE_CURRENT_LIST_DIR}/threshold.cpp" "${CMAKE_CURRENT_LIST_DIR}/rates-printer.cpp" "${CMAKE_CURRENT_LIST_DIR}/zero-order.cpp" "${CMAKE_CURRENT_LIST_DIR}/units-transform.cpp" + "${CMAKE_CURRENT_LIST_DIR}/rotation-transform.cpp" + "${CMAKE_CURRENT_LIST_DIR}/color-formats-converter.cpp" + "${CMAKE_CURRENT_LIST_DIR}/depth-formats-converter.cpp" + "${CMAKE_CURRENT_LIST_DIR}/motion-transform.cpp" + "${CMAKE_CURRENT_LIST_DIR}/auto-exposure-processor.cpp" "${CMAKE_CURRENT_LIST_DIR}/processing-blocks-factory.h" "${CMAKE_CURRENT_LIST_DIR}/align.h" @@ -40,9 +47,16 @@ target_sources(${LRS_TARGET} "${CMAKE_CURRENT_LIST_DIR}/hole-filling-filter.h" "${CMAKE_CURRENT_LIST_DIR}/syncer-processing-block.h" "${CMAKE_CURRENT_LIST_DIR}/disparity-transform.h" - "${CMAKE_CURRENT_LIST_DIR}/yuy2rgb.h" + "${CMAKE_CURRENT_LIST_DIR}/y8i-to-y8y8.h" + "${CMAKE_CURRENT_LIST_DIR}/y12i-to-y16y16.h" + "${CMAKE_CURRENT_LIST_DIR}/identity-processing-block.h" "${CMAKE_CURRENT_LIST_DIR}/threshold.h" "${CMAKE_CURRENT_LIST_DIR}/rates-printer.h" "${CMAKE_CURRENT_LIST_DIR}/zero-order.h" "${CMAKE_CURRENT_LIST_DIR}/units-transform.h" + "${CMAKE_CURRENT_LIST_DIR}/rotation-transform.h" + "${CMAKE_CURRENT_LIST_DIR}/color-formats-converter.h" + "${CMAKE_CURRENT_LIST_DIR}/depth-formats-converter.h" + "${CMAKE_CURRENT_LIST_DIR}/motion-transform.h" + "${CMAKE_CURRENT_LIST_DIR}/auto-exposure-processor.h" ) diff --git a/src/proc/auto-exposure-processor.cpp b/src/proc/auto-exposure-processor.cpp new file mode 100644 index 0000000000..45e58873b9 --- /dev/null +++ b/src/proc/auto-exposure-processor.cpp @@ -0,0 +1,31 @@ +// License: Apache 2.0. See LICENSE file in root directory. +// Copyright(c) 2019 Intel Corporation. All Rights Reserved. + +#include "auto-exposure-processor.h" + +librealsense::auto_exposure_processor::auto_exposure_processor(rs2_stream stream, enable_auto_exposure_option& enable_ae_option) + : auto_exposure_processor("Auto Exposure Processor", stream, enable_ae_option) {} + +librealsense::auto_exposure_processor::auto_exposure_processor(const char * name, rs2_stream stream, enable_auto_exposure_option& enable_ae_option) + : generic_processing_block(name), + _stream(stream), + _enable_ae_option(enable_ae_option) {} + +bool librealsense::auto_exposure_processor::should_process(const rs2::frame & frame) +{ + return _enable_ae_option.to_add_frames() && _stream == RS2_STREAM_FISHEYE; +} + +rs2::frame librealsense::auto_exposure_processor::process_frame(const rs2::frame_source & source, const rs2::frame & f) +{ + // We dont actually modify the frame, only calculate and process the exposure values. + auto&& fi = (frame_interface*)f.get(); + ((librealsense::frame*)fi)->additional_data.fisheye_ae_mode = true; + + fi->acquire(); + auto&& auto_exposure = _enable_ae_option.get_auto_exposure(); + if (auto_exposure) + auto_exposure->add_frame(fi); + + return f; +} diff --git a/src/proc/auto-exposure-processor.h b/src/proc/auto-exposure-processor.h new file mode 100644 index 0000000000..cae1e2c0ab --- /dev/null +++ b/src/proc/auto-exposure-processor.h @@ -0,0 +1,27 @@ +// License: Apache 2.0. See LICENSE file in root directory. +// Copyright(c) 2019 Intel Corporation. All Rights Reserved. + +#pragma once + +#include "synthetic-stream.h" +#include "option.h" +#include "image.h" +#include "ds5/ds5-options.h" + +namespace librealsense +{ + class auto_exposure_processor : public generic_processing_block + { + public: + auto_exposure_processor(rs2_stream stream, enable_auto_exposure_option& enable_ae_option); + + protected: + auto_exposure_processor(const char* name, rs2_stream stream, enable_auto_exposure_option& enable_ae_option); + + bool should_process(const rs2::frame& frame) override; + rs2::frame process_frame(const rs2::frame_source& source, const rs2::frame& f) override; + + enable_auto_exposure_option& _enable_ae_option; + rs2_stream _stream; + }; +} diff --git a/src/proc/color-formats-converter.cpp b/src/proc/color-formats-converter.cpp new file mode 100644 index 0000000000..b946426a21 --- /dev/null +++ b/src/proc/color-formats-converter.cpp @@ -0,0 +1,697 @@ +// License: Apache 2.0. See LICENSE file in root directory. +// Copyright(c) 2019 Intel Corporation. All Rights Reserved. + +#include "color-formats-converter.h" + +#include "option.h" +#include "image.h" + +#define STB_IMAGE_STATIC +#define STB_IMAGE_IMPLEMENTATION +#include "../third-party/stb_image.h" + +#if defined (ANDROID) || (defined (__linux__) && !defined (__x86_64__)) + +bool has_avx() { return false; } + +#else + +#ifdef _WIN32 +#include +#define cpuid(info, x) __cpuidex(info, x, 0) +#else +#include +void cpuid(int info[4], int info_type) { + __cpuid_count(info_type, 0, info[0], info[1], info[2], info[3]); +} +#endif + +bool has_avx() +{ + int info[4]; + cpuid(info, 0); + cpuid(info, 0x80000000); + return (info[2] & ((int)1 << 28)) != 0; +} + +#endif + +namespace librealsense +{ + ///////////////////////////// + // YUY2 unpacking routines // + ///////////////////////////// + // This templated function unpacks YUY2 into Y8/Y16/RGB8/RGBA8/BGR8/BGRA8, depending on the compile-time parameter FORMAT. + // It is expected that all branching outside of the loop control variable will be removed due to constant-folding. + template void unpack_yuy2(byte * const d[], const byte * s, int width, int height, int actual_size) + { + auto n = width * height; + assert(n % 16 == 0); // All currently supported color resolutions are multiples of 16 pixels. Could easily extend support to other resolutions by copying final n<16 pixels into a zero-padded buffer and recursively calling self for final iteration. +#ifdef RS2_USE_CUDA + rscuda::unpack_yuy2_cuda(d, s, n); + return; +#endif +#if defined __SSSE3__ && ! defined ANDROID + static bool do_avx = has_avx(); +#ifdef __AVX2__ + + if (do_avx) + { + if (FORMAT == RS2_FORMAT_Y8) unpack_yuy2_avx_y8(d, s, n); + if (FORMAT == RS2_FORMAT_Y16) unpack_yuy2_avx_y16(d, s, n); + if (FORMAT == RS2_FORMAT_RGB8) unpack_yuy2_avx_rgb8(d, s, n); + if (FORMAT == RS2_FORMAT_RGBA8) unpack_yuy2_avx_rgba8(d, s, n); + if (FORMAT == RS2_FORMAT_BGR8) unpack_yuy2_avx_bgr8(d, s, n); + if (FORMAT == RS2_FORMAT_BGRA8) unpack_yuy2_avx_bgra8(d, s, n); + } + else +#endif + { + auto src = reinterpret_cast(s); + auto dst = reinterpret_cast<__m128i *>(d[0]); + +#pragma omp parallel for + for (int i = 0; i < n / 16; i++) + { + const __m128i zero = _mm_set1_epi8(0); + const __m128i n100 = _mm_set1_epi16(100 << 4); + const __m128i n208 = _mm_set1_epi16(208 << 4); + const __m128i n298 = _mm_set1_epi16(298 << 4); + const __m128i n409 = _mm_set1_epi16(409 << 4); + const __m128i n516 = _mm_set1_epi16(516 << 4); + const __m128i evens_odds = _mm_setr_epi8(0, 2, 4, 6, 8, 10, 12, 14, 1, 3, 5, 7, 9, 11, 13, 15); + + // Load 8 YUY2 pixels each into two 16-byte registers + __m128i s0 = _mm_loadu_si128(&src[i * 2]); + __m128i s1 = _mm_loadu_si128(&src[i * 2 + 1]); + + if (FORMAT == RS2_FORMAT_Y8) + { + // Align all Y components and output 16 pixels (16 bytes) at once + __m128i y0 = _mm_shuffle_epi8(s0, _mm_setr_epi8(1, 3, 5, 7, 9, 11, 13, 15, 0, 2, 4, 6, 8, 10, 12, 14)); + __m128i y1 = _mm_shuffle_epi8(s1, _mm_setr_epi8(0, 2, 4, 6, 8, 10, 12, 14, 1, 3, 5, 7, 9, 11, 13, 15)); + _mm_storeu_si128(&dst[i], _mm_alignr_epi8(y0, y1, 8)); + continue; + } + + // Shuffle all Y components to the low order bytes of the register, and all U/V components to the high order bytes + const __m128i evens_odd1s_odd3s = _mm_setr_epi8(0, 2, 4, 6, 8, 10, 12, 14, 1, 5, 9, 13, 3, 7, 11, 15); // to get yyyyyyyyuuuuvvvv + __m128i yyyyyyyyuuuuvvvv0 = _mm_shuffle_epi8(s0, evens_odd1s_odd3s); + __m128i yyyyyyyyuuuuvvvv8 = _mm_shuffle_epi8(s1, evens_odd1s_odd3s); + + // Retrieve all 16 Y components as 16-bit values (8 components per register)) + __m128i y16__0_7 = _mm_unpacklo_epi8(yyyyyyyyuuuuvvvv0, zero); // convert to 16 bit + __m128i y16__8_F = _mm_unpacklo_epi8(yyyyyyyyuuuuvvvv8, zero); // convert to 16 bit + + if (FORMAT == RS2_FORMAT_Y16) + { + // Output 16 pixels (32 bytes) at once + _mm_storeu_si128(&dst[i * 2], _mm_slli_epi16(y16__0_7, 8)); + _mm_storeu_si128(&dst[i * 2 + 1], _mm_slli_epi16(y16__8_F, 8)); + continue; + } + + // Retrieve all 16 U and V components as 16-bit values (8 components per register) + __m128i uv = _mm_unpackhi_epi32(yyyyyyyyuuuuvvvv0, yyyyyyyyuuuuvvvv8); // uuuuuuuuvvvvvvvv + __m128i u = _mm_unpacklo_epi8(uv, uv); // uu uu uu uu uu uu uu uu u's duplicated + __m128i v = _mm_unpackhi_epi8(uv, uv); // vv vv vv vv vv vv vv vv + __m128i u16__0_7 = _mm_unpacklo_epi8(u, zero); // convert to 16 bit + __m128i u16__8_F = _mm_unpackhi_epi8(u, zero); // convert to 16 bit + __m128i v16__0_7 = _mm_unpacklo_epi8(v, zero); // convert to 16 bit + __m128i v16__8_F = _mm_unpackhi_epi8(v, zero); // convert to 16 bit + + // Compute R, G, B values for first 8 pixels + __m128i c16__0_7 = _mm_slli_epi16(_mm_subs_epi16(y16__0_7, _mm_set1_epi16(16)), 4); + __m128i d16__0_7 = _mm_slli_epi16(_mm_subs_epi16(u16__0_7, _mm_set1_epi16(128)), 4); // perhaps could have done these u,v to d,e before the duplication + __m128i e16__0_7 = _mm_slli_epi16(_mm_subs_epi16(v16__0_7, _mm_set1_epi16(128)), 4); + __m128i r16__0_7 = _mm_min_epi16(_mm_set1_epi16(255), _mm_max_epi16(zero, ((_mm_add_epi16(_mm_mulhi_epi16(c16__0_7, n298), _mm_mulhi_epi16(e16__0_7, n409)))))); // (298 * c + 409 * e + 128) ; // + __m128i g16__0_7 = _mm_min_epi16(_mm_set1_epi16(255), _mm_max_epi16(zero, ((_mm_sub_epi16(_mm_sub_epi16(_mm_mulhi_epi16(c16__0_7, n298), _mm_mulhi_epi16(d16__0_7, n100)), _mm_mulhi_epi16(e16__0_7, n208)))))); // (298 * c - 100 * d - 208 * e + 128) + __m128i b16__0_7 = _mm_min_epi16(_mm_set1_epi16(255), _mm_max_epi16(zero, ((_mm_add_epi16(_mm_mulhi_epi16(c16__0_7, n298), _mm_mulhi_epi16(d16__0_7, n516)))))); // clampbyte((298 * c + 516 * d + 128) >> 8); + + // Compute R, G, B values for second 8 pixels + __m128i c16__8_F = _mm_slli_epi16(_mm_subs_epi16(y16__8_F, _mm_set1_epi16(16)), 4); + __m128i d16__8_F = _mm_slli_epi16(_mm_subs_epi16(u16__8_F, _mm_set1_epi16(128)), 4); // perhaps could have done these u,v to d,e before the duplication + __m128i e16__8_F = _mm_slli_epi16(_mm_subs_epi16(v16__8_F, _mm_set1_epi16(128)), 4); + __m128i r16__8_F = _mm_min_epi16(_mm_set1_epi16(255), _mm_max_epi16(zero, ((_mm_add_epi16(_mm_mulhi_epi16(c16__8_F, n298), _mm_mulhi_epi16(e16__8_F, n409)))))); // (298 * c + 409 * e + 128) ; // + __m128i g16__8_F = _mm_min_epi16(_mm_set1_epi16(255), _mm_max_epi16(zero, ((_mm_sub_epi16(_mm_sub_epi16(_mm_mulhi_epi16(c16__8_F, n298), _mm_mulhi_epi16(d16__8_F, n100)), _mm_mulhi_epi16(e16__8_F, n208)))))); // (298 * c - 100 * d - 208 * e + 128) + __m128i b16__8_F = _mm_min_epi16(_mm_set1_epi16(255), _mm_max_epi16(zero, ((_mm_add_epi16(_mm_mulhi_epi16(c16__8_F, n298), _mm_mulhi_epi16(d16__8_F, n516)))))); // clampbyte((298 * c + 516 * d + 128) >> 8); + + if (FORMAT == RS2_FORMAT_RGB8 || FORMAT == RS2_FORMAT_RGBA8) + { + // Shuffle separate R, G, B values into four registers storing four pixels each in (R, G, B, A) order + __m128i rg8__0_7 = _mm_unpacklo_epi8(_mm_shuffle_epi8(r16__0_7, evens_odds), _mm_shuffle_epi8(g16__0_7, evens_odds)); // hi to take the odds which are the upper bytes we care about + __m128i ba8__0_7 = _mm_unpacklo_epi8(_mm_shuffle_epi8(b16__0_7, evens_odds), _mm_set1_epi8(-1)); + __m128i rgba_0_3 = _mm_unpacklo_epi16(rg8__0_7, ba8__0_7); + __m128i rgba_4_7 = _mm_unpackhi_epi16(rg8__0_7, ba8__0_7); + + __m128i rg8__8_F = _mm_unpacklo_epi8(_mm_shuffle_epi8(r16__8_F, evens_odds), _mm_shuffle_epi8(g16__8_F, evens_odds)); // hi to take the odds which are the upper bytes we care about + __m128i ba8__8_F = _mm_unpacklo_epi8(_mm_shuffle_epi8(b16__8_F, evens_odds), _mm_set1_epi8(-1)); + __m128i rgba_8_B = _mm_unpacklo_epi16(rg8__8_F, ba8__8_F); + __m128i rgba_C_F = _mm_unpackhi_epi16(rg8__8_F, ba8__8_F); + + if (FORMAT == RS2_FORMAT_RGBA8) + { + // Store 16 pixels (64 bytes) at once + _mm_storeu_si128(&dst[i * 4], rgba_0_3); + _mm_storeu_si128(&dst[i * 4 + 1], rgba_4_7); + _mm_storeu_si128(&dst[i * 4 + 2], rgba_8_B); + _mm_storeu_si128(&dst[i * 4 + 3], rgba_C_F); + } + + if (FORMAT == RS2_FORMAT_RGB8) + { + // Shuffle rgb triples to the start and end of each register + __m128i rgb0 = _mm_shuffle_epi8(rgba_0_3, _mm_setr_epi8(3, 7, 11, 15, 0, 1, 2, 4, 5, 6, 8, 9, 10, 12, 13, 14)); + __m128i rgb1 = _mm_shuffle_epi8(rgba_4_7, _mm_setr_epi8(0, 1, 2, 4, 3, 7, 11, 15, 5, 6, 8, 9, 10, 12, 13, 14)); + __m128i rgb2 = _mm_shuffle_epi8(rgba_8_B, _mm_setr_epi8(0, 1, 2, 4, 5, 6, 8, 9, 3, 7, 11, 15, 10, 12, 13, 14)); + __m128i rgb3 = _mm_shuffle_epi8(rgba_C_F, _mm_setr_epi8(0, 1, 2, 4, 5, 6, 8, 9, 10, 12, 13, 14, 3, 7, 11, 15)); + + // Align registers and store 16 pixels (48 bytes) at once + _mm_storeu_si128(&dst[i * 3], _mm_alignr_epi8(rgb1, rgb0, 4)); + _mm_storeu_si128(&dst[i * 3 + 1], _mm_alignr_epi8(rgb2, rgb1, 8)); + _mm_storeu_si128(&dst[i * 3 + 2], _mm_alignr_epi8(rgb3, rgb2, 12)); + } + } + + if (FORMAT == RS2_FORMAT_BGR8 || FORMAT == RS2_FORMAT_BGRA8) + { + // Shuffle separate R, G, B values into four registers storing four pixels each in (B, G, R, A) order + __m128i bg8__0_7 = _mm_unpacklo_epi8(_mm_shuffle_epi8(b16__0_7, evens_odds), _mm_shuffle_epi8(g16__0_7, evens_odds)); // hi to take the odds which are the upper bytes we care about + __m128i ra8__0_7 = _mm_unpacklo_epi8(_mm_shuffle_epi8(r16__0_7, evens_odds), _mm_set1_epi8(-1)); + __m128i bgra_0_3 = _mm_unpacklo_epi16(bg8__0_7, ra8__0_7); + __m128i bgra_4_7 = _mm_unpackhi_epi16(bg8__0_7, ra8__0_7); + + __m128i bg8__8_F = _mm_unpacklo_epi8(_mm_shuffle_epi8(b16__8_F, evens_odds), _mm_shuffle_epi8(g16__8_F, evens_odds)); // hi to take the odds which are the upper bytes we care about + __m128i ra8__8_F = _mm_unpacklo_epi8(_mm_shuffle_epi8(r16__8_F, evens_odds), _mm_set1_epi8(-1)); + __m128i bgra_8_B = _mm_unpacklo_epi16(bg8__8_F, ra8__8_F); + __m128i bgra_C_F = _mm_unpackhi_epi16(bg8__8_F, ra8__8_F); + + if (FORMAT == RS2_FORMAT_BGRA8) + { + // Store 16 pixels (64 bytes) at once + _mm_storeu_si128(&dst[i * 4], bgra_0_3); + _mm_storeu_si128(&dst[i * 4 + 1], bgra_4_7); + _mm_storeu_si128(&dst[i * 4 + 2], bgra_8_B); + _mm_storeu_si128(&dst[i * 4 + 3], bgra_C_F); + } + + if (FORMAT == RS2_FORMAT_BGR8) + { + // Shuffle rgb triples to the start and end of each register + __m128i bgr0 = _mm_shuffle_epi8(bgra_0_3, _mm_setr_epi8(3, 7, 11, 15, 0, 1, 2, 4, 5, 6, 8, 9, 10, 12, 13, 14)); + __m128i bgr1 = _mm_shuffle_epi8(bgra_4_7, _mm_setr_epi8(0, 1, 2, 4, 3, 7, 11, 15, 5, 6, 8, 9, 10, 12, 13, 14)); + __m128i bgr2 = _mm_shuffle_epi8(bgra_8_B, _mm_setr_epi8(0, 1, 2, 4, 5, 6, 8, 9, 3, 7, 11, 15, 10, 12, 13, 14)); + __m128i bgr3 = _mm_shuffle_epi8(bgra_C_F, _mm_setr_epi8(0, 1, 2, 4, 5, 6, 8, 9, 10, 12, 13, 14, 3, 7, 11, 15)); + + // Align registers and store 16 pixels (48 bytes) at once + _mm_storeu_si128(&dst[i * 3], _mm_alignr_epi8(bgr1, bgr0, 4)); + _mm_storeu_si128(&dst[i * 3 + 1], _mm_alignr_epi8(bgr2, bgr1, 8)); + _mm_storeu_si128(&dst[i * 3 + 2], _mm_alignr_epi8(bgr3, bgr2, 12)); + } + } + } + } +#else // Generic code for when SSSE3 is not available. + auto src = reinterpret_cast(s); + auto dst = reinterpret_cast(d[0]); + for (; n; n -= 16, src += 32) + { + if (FORMAT == RS2_FORMAT_Y8) + { + uint8_t out[16] = { + src[0], src[2], src[4], src[6], + src[8], src[10], src[12], src[14], + src[16], src[18], src[20], src[22], + src[24], src[26], src[28], src[30], + }; + librealsense::copy(dst, out, sizeof out); + dst += sizeof out; + continue; + } + + if (FORMAT == RS2_FORMAT_Y16) + { + // Y16 is little-endian. We output Y << 8. + uint8_t out[32] = { + 0, src[0], 0, src[2], 0, src[4], 0, src[6], + 0, src[8], 0, src[10], 0, src[12], 0, src[14], + 0, src[16], 0, src[18], 0, src[20], 0, src[22], + 0, src[24], 0, src[26], 0, src[28], 0, src[30], + }; + librealsense::copy(dst, out, sizeof out); + dst += sizeof out; + continue; + } + + int16_t y[16] = { + src[0], src[2], src[4], src[6], + src[8], src[10], src[12], src[14], + src[16], src[18], src[20], src[22], + src[24], src[26], src[28], src[30], + }, u[16] = { + src[1], src[1], src[5], src[5], + src[9], src[9], src[13], src[13], + src[17], src[17], src[21], src[21], + src[25], src[25], src[29], src[29], + }, v[16] = { + src[3], src[3], src[7], src[7], + src[11], src[11], src[15], src[15], + src[19], src[19], src[23], src[23], + src[27], src[27], src[31], src[31], + }; + + uint8_t r[16], g[16], b[16]; + for (int i = 0; i < 16; i++) + { + int32_t c = y[i] - 16; + int32_t d = u[i] - 128; + int32_t e = v[i] - 128; + + int32_t t; +#define clamp(x) ((t=(x)) > 255 ? 255 : t < 0 ? 0 : t) + r[i] = clamp((298 * c + 409 * e + 128) >> 8); + g[i] = clamp((298 * c - 100 * d - 208 * e + 128) >> 8); + b[i] = clamp((298 * c + 516 * d + 128) >> 8); +#undef clamp + } + + if (FORMAT == RS2_FORMAT_RGB8) + { + uint8_t out[16 * 3] = { + r[0], g[0], b[0], r[1], g[1], b[1], + r[2], g[2], b[2], r[3], g[3], b[3], + r[4], g[4], b[4], r[5], g[5], b[5], + r[6], g[6], b[6], r[7], g[7], b[7], + r[8], g[8], b[8], r[9], g[9], b[9], + r[10], g[10], b[10], r[11], g[11], b[11], + r[12], g[12], b[12], r[13], g[13], b[13], + r[14], g[14], b[14], r[15], g[15], b[15], + }; + librealsense::copy(dst, out, sizeof out); + dst += sizeof out; + continue; + } + + if (FORMAT == RS2_FORMAT_BGR8) + { + uint8_t out[16 * 3] = { + b[0], g[0], r[0], b[1], g[1], r[1], + b[2], g[2], r[2], b[3], g[3], r[3], + b[4], g[4], r[4], b[5], g[5], r[5], + b[6], g[6], r[6], b[7], g[7], r[7], + b[8], g[8], r[8], b[9], g[9], r[9], + b[10], g[10], r[10], b[11], g[11], r[11], + b[12], g[12], r[12], b[13], g[13], r[13], + b[14], g[14], r[14], b[15], g[15], r[15], + }; + librealsense::copy(dst, out, sizeof out); + dst += sizeof out; + continue; + } + + if (FORMAT == RS2_FORMAT_RGBA8) + { + uint8_t out[16 * 4] = { + r[0], g[0], b[0], 255, r[1], g[1], b[1], 255, + r[2], g[2], b[2], 255, r[3], g[3], b[3], 255, + r[4], g[4], b[4], 255, r[5], g[5], b[5], 255, + r[6], g[6], b[6], 255, r[7], g[7], b[7], 255, + r[8], g[8], b[8], 255, r[9], g[9], b[9], 255, + r[10], g[10], b[10], 255, r[11], g[11], b[11], 255, + r[12], g[12], b[12], 255, r[13], g[13], b[13], 255, + r[14], g[14], b[14], 255, r[15], g[15], b[15], 255, + }; + librealsense::copy(dst, out, sizeof out); + dst += sizeof out; + continue; + } + + if (FORMAT == RS2_FORMAT_BGRA8) + { + uint8_t out[16 * 4] = { + b[0], g[0], r[0], 255, b[1], g[1], r[1], 255, + b[2], g[2], r[2], 255, b[3], g[3], r[3], 255, + b[4], g[4], r[4], 255, b[5], g[5], r[5], 255, + b[6], g[6], r[6], 255, b[7], g[7], r[7], 255, + b[8], g[8], r[8], 255, b[9], g[9], r[9], 255, + b[10], g[10], r[10], 255, b[11], g[11], r[11], 255, + b[12], g[12], r[12], 255, b[13], g[13], r[13], 255, + b[14], g[14], r[14], 255, b[15], g[15], r[15], 255, + }; + librealsense::copy(dst, out, sizeof out); + dst += sizeof out; + continue; + } + } +#endif + } + + void unpack_yuy2(rs2_format dst_format, rs2_stream dst_stream, byte * const d[], const byte * s, int w, int h, int actual_size) + { + switch (dst_format) + { + case RS2_FORMAT_Y8: + unpack_yuy2(d, s, w, h, actual_size); + break; + case RS2_FORMAT_Y16: + unpack_yuy2(d, s, w, h, actual_size); + break; + case RS2_FORMAT_RGB8: + unpack_yuy2(d, s, w, h, actual_size); + break; + case RS2_FORMAT_RGBA8: + unpack_yuy2(d, s, w, h, actual_size); + break; + case RS2_FORMAT_BGR8: + unpack_yuy2(d, s, w, h, actual_size); + break; + case RS2_FORMAT_BGRA8: + unpack_yuy2(d, s, w, h, actual_size); + break; + default: + LOG_ERROR("Unsupported format for YUY2 conversion."); + break; + } + } + + + ///////////////////////////// + // UYVY unpacking routines // + ///////////////////////////// + // This templated function unpacks UYVY into RGB8/RGBA8/BGR8/BGRA8, depending on the compile-time parameter FORMAT. + // It is expected that all branching outside of the loop control variable will be removed due to constant-folding. + template void unpack_uyvy(byte * const d[], const byte * s, int width, int height, int actual_size) + { + auto n = width * height; + assert(n % 16 == 0); // All currently supported color resolutions are multiples of 16 pixels. Could easily extend support to other resolutions by copying final n<16 pixels into a zero-padded buffer and recursively calling self for final iteration. +#ifdef __SSSE3__ + auto src = reinterpret_cast(s); + auto dst = reinterpret_cast<__m128i *>(d[0]); + for (; n; n -= 16) + { + const __m128i zero = _mm_set1_epi8(0); + const __m128i n100 = _mm_set1_epi16(100 << 4); + const __m128i n208 = _mm_set1_epi16(208 << 4); + const __m128i n298 = _mm_set1_epi16(298 << 4); + const __m128i n409 = _mm_set1_epi16(409 << 4); + const __m128i n516 = _mm_set1_epi16(516 << 4); + const __m128i evens_odds = _mm_setr_epi8(0, 2, 4, 6, 8, 10, 12, 14, 1, 3, 5, 7, 9, 11, 13, 15); + + // Load 8 UYVY pixels each into two 16-byte registers + __m128i s0 = _mm_loadu_si128(src++); + __m128i s1 = _mm_loadu_si128(src++); + + + // Shuffle all Y components to the low order bytes of the register, and all U/V components to the high order bytes + const __m128i evens_odd1s_odd3s = _mm_setr_epi8(1, 3, 5, 7, 9, 11, 13, 15, 0, 4, 8, 12, 2, 6, 10, 14); // to get yyyyyyyyuuuuvvvv + __m128i yyyyyyyyuuuuvvvv0 = _mm_shuffle_epi8(s0, evens_odd1s_odd3s); + __m128i yyyyyyyyuuuuvvvv8 = _mm_shuffle_epi8(s1, evens_odd1s_odd3s); + + // Retrieve all 16 Y components as 16-bit values (8 components per register)) + __m128i y16__0_7 = _mm_unpacklo_epi8(yyyyyyyyuuuuvvvv0, zero); // convert to 16 bit + __m128i y16__8_F = _mm_unpacklo_epi8(yyyyyyyyuuuuvvvv8, zero); // convert to 16 bit + + + // Retrieve all 16 U and V components as 16-bit values (8 components per register) + __m128i uv = _mm_unpackhi_epi32(yyyyyyyyuuuuvvvv0, yyyyyyyyuuuuvvvv8); // uuuuuuuuvvvvvvvv + __m128i u = _mm_unpacklo_epi8(uv, uv); // uu uu uu uu uu uu uu uu u's duplicated + __m128i v = _mm_unpackhi_epi8(uv, uv); // vv vv vv vv vv vv vv vv + __m128i u16__0_7 = _mm_unpacklo_epi8(u, zero); // convert to 16 bit + __m128i u16__8_F = _mm_unpackhi_epi8(u, zero); // convert to 16 bit + __m128i v16__0_7 = _mm_unpacklo_epi8(v, zero); // convert to 16 bit + __m128i v16__8_F = _mm_unpackhi_epi8(v, zero); // convert to 16 bit + + // Compute R, G, B values for first 8 pixels + __m128i c16__0_7 = _mm_slli_epi16(_mm_subs_epi16(y16__0_7, _mm_set1_epi16(16)), 4); + __m128i d16__0_7 = _mm_slli_epi16(_mm_subs_epi16(u16__0_7, _mm_set1_epi16(128)), 4); // perhaps could have done these u,v to d,e before the duplication + __m128i e16__0_7 = _mm_slli_epi16(_mm_subs_epi16(v16__0_7, _mm_set1_epi16(128)), 4); + __m128i r16__0_7 = _mm_min_epi16(_mm_set1_epi16(255), _mm_max_epi16(zero, ((_mm_add_epi16(_mm_mulhi_epi16(c16__0_7, n298), _mm_mulhi_epi16(e16__0_7, n409)))))); // (298 * c + 409 * e + 128) ; // + __m128i g16__0_7 = _mm_min_epi16(_mm_set1_epi16(255), _mm_max_epi16(zero, ((_mm_sub_epi16(_mm_sub_epi16(_mm_mulhi_epi16(c16__0_7, n298), _mm_mulhi_epi16(d16__0_7, n100)), _mm_mulhi_epi16(e16__0_7, n208)))))); // (298 * c - 100 * d - 208 * e + 128) + __m128i b16__0_7 = _mm_min_epi16(_mm_set1_epi16(255), _mm_max_epi16(zero, ((_mm_add_epi16(_mm_mulhi_epi16(c16__0_7, n298), _mm_mulhi_epi16(d16__0_7, n516)))))); // clampbyte((298 * c + 516 * d + 128) >> 8); + + // Compute R, G, B values for second 8 pixels + __m128i c16__8_F = _mm_slli_epi16(_mm_subs_epi16(y16__8_F, _mm_set1_epi16(16)), 4); + __m128i d16__8_F = _mm_slli_epi16(_mm_subs_epi16(u16__8_F, _mm_set1_epi16(128)), 4); // perhaps could have done these u,v to d,e before the duplication + __m128i e16__8_F = _mm_slli_epi16(_mm_subs_epi16(v16__8_F, _mm_set1_epi16(128)), 4); + __m128i r16__8_F = _mm_min_epi16(_mm_set1_epi16(255), _mm_max_epi16(zero, ((_mm_add_epi16(_mm_mulhi_epi16(c16__8_F, n298), _mm_mulhi_epi16(e16__8_F, n409)))))); // (298 * c + 409 * e + 128) ; // + __m128i g16__8_F = _mm_min_epi16(_mm_set1_epi16(255), _mm_max_epi16(zero, ((_mm_sub_epi16(_mm_sub_epi16(_mm_mulhi_epi16(c16__8_F, n298), _mm_mulhi_epi16(d16__8_F, n100)), _mm_mulhi_epi16(e16__8_F, n208)))))); // (298 * c - 100 * d - 208 * e + 128) + __m128i b16__8_F = _mm_min_epi16(_mm_set1_epi16(255), _mm_max_epi16(zero, ((_mm_add_epi16(_mm_mulhi_epi16(c16__8_F, n298), _mm_mulhi_epi16(d16__8_F, n516)))))); // clampbyte((298 * c + 516 * d + 128) >> 8); + + if (FORMAT == RS2_FORMAT_RGB8 || FORMAT == RS2_FORMAT_RGBA8) + { + // Shuffle separate R, G, B values into four registers storing four pixels each in (R, G, B, A) order + __m128i rg8__0_7 = _mm_unpacklo_epi8(_mm_shuffle_epi8(r16__0_7, evens_odds), _mm_shuffle_epi8(g16__0_7, evens_odds)); // hi to take the odds which are the upper bytes we care about + __m128i ba8__0_7 = _mm_unpacklo_epi8(_mm_shuffle_epi8(b16__0_7, evens_odds), _mm_set1_epi8(-1)); + __m128i rgba_0_3 = _mm_unpacklo_epi16(rg8__0_7, ba8__0_7); + __m128i rgba_4_7 = _mm_unpackhi_epi16(rg8__0_7, ba8__0_7); + + __m128i rg8__8_F = _mm_unpacklo_epi8(_mm_shuffle_epi8(r16__8_F, evens_odds), _mm_shuffle_epi8(g16__8_F, evens_odds)); // hi to take the odds which are the upper bytes we care about + __m128i ba8__8_F = _mm_unpacklo_epi8(_mm_shuffle_epi8(b16__8_F, evens_odds), _mm_set1_epi8(-1)); + __m128i rgba_8_B = _mm_unpacklo_epi16(rg8__8_F, ba8__8_F); + __m128i rgba_C_F = _mm_unpackhi_epi16(rg8__8_F, ba8__8_F); + + if (FORMAT == RS2_FORMAT_RGBA8) + { + // Store 16 pixels (64 bytes) at once + _mm_storeu_si128(dst++, rgba_0_3); + _mm_storeu_si128(dst++, rgba_4_7); + _mm_storeu_si128(dst++, rgba_8_B); + _mm_storeu_si128(dst++, rgba_C_F); + } + + if (FORMAT == RS2_FORMAT_RGB8) + { + // Shuffle rgb triples to the start and end of each register + __m128i rgb0 = _mm_shuffle_epi8(rgba_0_3, _mm_setr_epi8(3, 7, 11, 15, 0, 1, 2, 4, 5, 6, 8, 9, 10, 12, 13, 14)); + __m128i rgb1 = _mm_shuffle_epi8(rgba_4_7, _mm_setr_epi8(0, 1, 2, 4, 3, 7, 11, 15, 5, 6, 8, 9, 10, 12, 13, 14)); + __m128i rgb2 = _mm_shuffle_epi8(rgba_8_B, _mm_setr_epi8(0, 1, 2, 4, 5, 6, 8, 9, 3, 7, 11, 15, 10, 12, 13, 14)); + __m128i rgb3 = _mm_shuffle_epi8(rgba_C_F, _mm_setr_epi8(0, 1, 2, 4, 5, 6, 8, 9, 10, 12, 13, 14, 3, 7, 11, 15)); + + // Align registers and store 16 pixels (48 bytes) at once + _mm_storeu_si128(dst++, _mm_alignr_epi8(rgb1, rgb0, 4)); + _mm_storeu_si128(dst++, _mm_alignr_epi8(rgb2, rgb1, 8)); + _mm_storeu_si128(dst++, _mm_alignr_epi8(rgb3, rgb2, 12)); + } + } + + if (FORMAT == RS2_FORMAT_BGR8 || FORMAT == RS2_FORMAT_BGRA8) + { + // Shuffle separate R, G, B values into four registers storing four pixels each in (B, G, R, A) order + __m128i bg8__0_7 = _mm_unpacklo_epi8(_mm_shuffle_epi8(b16__0_7, evens_odds), _mm_shuffle_epi8(g16__0_7, evens_odds)); // hi to take the odds which are the upper bytes we care about + __m128i ra8__0_7 = _mm_unpacklo_epi8(_mm_shuffle_epi8(r16__0_7, evens_odds), _mm_set1_epi8(-1)); + __m128i bgra_0_3 = _mm_unpacklo_epi16(bg8__0_7, ra8__0_7); + __m128i bgra_4_7 = _mm_unpackhi_epi16(bg8__0_7, ra8__0_7); + + __m128i bg8__8_F = _mm_unpacklo_epi8(_mm_shuffle_epi8(b16__8_F, evens_odds), _mm_shuffle_epi8(g16__8_F, evens_odds)); // hi to take the odds which are the upper bytes we care about + __m128i ra8__8_F = _mm_unpacklo_epi8(_mm_shuffle_epi8(r16__8_F, evens_odds), _mm_set1_epi8(-1)); + __m128i bgra_8_B = _mm_unpacklo_epi16(bg8__8_F, ra8__8_F); + __m128i bgra_C_F = _mm_unpackhi_epi16(bg8__8_F, ra8__8_F); + + if (FORMAT == RS2_FORMAT_BGRA8) + { + // Store 16 pixels (64 bytes) at once + _mm_storeu_si128(dst++, bgra_0_3); + _mm_storeu_si128(dst++, bgra_4_7); + _mm_storeu_si128(dst++, bgra_8_B); + _mm_storeu_si128(dst++, bgra_C_F); + } + + if (FORMAT == RS2_FORMAT_BGR8) + { + // Shuffle rgb triples to the start and end of each register + __m128i bgr0 = _mm_shuffle_epi8(bgra_0_3, _mm_setr_epi8(3, 7, 11, 15, 0, 1, 2, 4, 5, 6, 8, 9, 10, 12, 13, 14)); + __m128i bgr1 = _mm_shuffle_epi8(bgra_4_7, _mm_setr_epi8(0, 1, 2, 4, 3, 7, 11, 15, 5, 6, 8, 9, 10, 12, 13, 14)); + __m128i bgr2 = _mm_shuffle_epi8(bgra_8_B, _mm_setr_epi8(0, 1, 2, 4, 5, 6, 8, 9, 3, 7, 11, 15, 10, 12, 13, 14)); + __m128i bgr3 = _mm_shuffle_epi8(bgra_C_F, _mm_setr_epi8(0, 1, 2, 4, 5, 6, 8, 9, 10, 12, 13, 14, 3, 7, 11, 15)); + + // Align registers and store 16 pixels (48 bytes) at once + _mm_storeu_si128(dst++, _mm_alignr_epi8(bgr1, bgr0, 4)); + _mm_storeu_si128(dst++, _mm_alignr_epi8(bgr2, bgr1, 8)); + _mm_storeu_si128(dst++, _mm_alignr_epi8(bgr3, bgr2, 12)); + } + } + } +#else // Generic code for when SSSE3 is not available. + auto src = reinterpret_cast(s); + auto dst = reinterpret_cast(d[0]); + for (; n; n -= 16, src += 32) + { + int16_t y[16] = { + src[1], src[3], src[5], src[7], + src[9], src[11], src[13], src[15], + src[17], src[19], src[21], src[23], + src[25], src[27], src[29], src[31], + }, u[16] = { + src[0], src[0], src[4], src[4], + src[8], src[8], src[12], src[12], + src[16], src[16], src[20], src[20], + src[24], src[24], src[28], src[28], + }, v[16] = { + src[2], src[2], src[6], src[6], + src[10], src[10], src[14], src[14], + src[18], src[18], src[22], src[22], + src[26], src[26], src[30], src[30], + }; + + uint8_t r[16], g[16], b[16]; + for (int i = 0; i < 16; i++) + { + int32_t c = y[i] - 16; + int32_t d = u[i] - 128; + int32_t e = v[i] - 128; + + int32_t t; +#define clamp(x) ((t=(x)) > 255 ? 255 : t < 0 ? 0 : t) + r[i] = clamp((298 * c + 409 * e + 128) >> 8); + g[i] = clamp((298 * c - 100 * d - 208 * e + 128) >> 8); + b[i] = clamp((298 * c + 516 * d + 128) >> 8); +#undef clamp + } + + if (FORMAT == RS2_FORMAT_RGB8) + { + uint8_t out[16 * 3] = { + r[0], g[0], b[0], r[1], g[1], b[1], + r[2], g[2], b[2], r[3], g[3], b[3], + r[4], g[4], b[4], r[5], g[5], b[5], + r[6], g[6], b[6], r[7], g[7], b[7], + r[8], g[8], b[8], r[9], g[9], b[9], + r[10], g[10], b[10], r[11], g[11], b[11], + r[12], g[12], b[12], r[13], g[13], b[13], + r[14], g[14], b[14], r[15], g[15], b[15], + }; + librealsense::copy(dst, out, sizeof out); + dst += sizeof out; + continue; + } + + if (FORMAT == RS2_FORMAT_BGR8) + { + uint8_t out[16 * 3] = { + b[0], g[0], r[0], b[1], g[1], r[1], + b[2], g[2], r[2], b[3], g[3], r[3], + b[4], g[4], r[4], b[5], g[5], r[5], + b[6], g[6], r[6], b[7], g[7], r[7], + b[8], g[8], r[8], b[9], g[9], r[9], + b[10], g[10], r[10], b[11], g[11], r[11], + b[12], g[12], r[12], b[13], g[13], r[13], + b[14], g[14], r[14], b[15], g[15], r[15], + }; + librealsense::copy(dst, out, sizeof out); + dst += sizeof out; + continue; + } + + if (FORMAT == RS2_FORMAT_RGBA8) + { + uint8_t out[16 * 4] = { + r[0], g[0], b[0], 255, r[1], g[1], b[1], 255, + r[2], g[2], b[2], 255, r[3], g[3], b[3], 255, + r[4], g[4], b[4], 255, r[5], g[5], b[5], 255, + r[6], g[6], b[6], 255, r[7], g[7], b[7], 255, + r[8], g[8], b[8], 255, r[9], g[9], b[9], 255, + r[10], g[10], b[10], 255, r[11], g[11], b[11], 255, + r[12], g[12], b[12], 255, r[13], g[13], b[13], 255, + r[14], g[14], b[14], 255, r[15], g[15], b[15], 255, + }; + librealsense::copy(dst, out, sizeof out); + dst += sizeof out; + continue; + } + + if (FORMAT == RS2_FORMAT_BGRA8) + { + uint8_t out[16 * 4] = { + b[0], g[0], r[0], 255, b[1], g[1], r[1], 255, + b[2], g[2], r[2], 255, b[3], g[3], r[3], 255, + b[4], g[4], r[4], 255, b[5], g[5], r[5], 255, + b[6], g[6], r[6], 255, b[7], g[7], r[7], 255, + b[8], g[8], r[8], 255, b[9], g[9], r[9], 255, + b[10], g[10], r[10], 255, b[11], g[11], r[11], 255, + b[12], g[12], r[12], 255, b[13], g[13], r[13], 255, + b[14], g[14], r[14], 255, b[15], g[15], r[15], 255, + }; + librealsense::copy(dst, out, sizeof out); + dst += sizeof out; + continue; + } + } +#endif + } + + void unpack_uyvyc(rs2_format dst_format, rs2_stream dst_stream, byte * const d[], const byte * s, int w, int h, int actual_size) + { + switch (dst_format) + { + case RS2_FORMAT_RGB8: + unpack_uyvy(d, s, w, h, actual_size); + break; + case RS2_FORMAT_RGBA8: + unpack_uyvy(d, s, w, h, actual_size); + break; + case RS2_FORMAT_BGR8: + unpack_uyvy(d, s, w, h, actual_size); + break; + case RS2_FORMAT_BGRA8: + unpack_uyvy(d, s, w, h, actual_size); + break; + default: + LOG_ERROR("Unsupported format for UYVY conversion."); + break; + } + } + + ///////////////////////////// + // MJPEG unpacking routines // + ///////////////////////////// + void unpack_mjpeg(byte * const dest[], const byte * source, int width, int height, int actual_size) + { + int w, h, bpp; + auto uncompressed_rgb = stbi_load_from_memory(source, actual_size, &w, &h, &bpp, false); + if (uncompressed_rgb) + { + auto uncompressed_size = w * h * bpp; + librealsense::copy(dest[0], uncompressed_rgb, uncompressed_size); + stbi_image_free(uncompressed_rgb); + } + else + LOG_ERROR("jpeg decode failed"); + } + + ///////////////////////////// + // BGR unpacking routines // + ///////////////////////////// + void unpack_rgb_from_bgr(byte * const dest[], const byte * source, int width, int height, int actual_size) + { + auto count = width * height; + auto in = reinterpret_cast(source); + auto out = reinterpret_cast(dest[0]); + + librealsense::copy(out, in, count * 3); + for (auto i = 0; i < count; i++) + { + std::swap(out[i * 3], out[i * 3 + 2]); + } + } + + void yuy2_converter::process_function(byte * const dest[], const byte * source, int width, int height, int actual_size) + { + unpack_yuy2(_target_format, _target_stream, dest, source, width, height, actual_size); + } + + void uyvy_converter::process_function(byte * const dest[], const byte * source, int width, int height, int actual_size) + { + unpack_uyvyc(_target_format, _target_stream, dest, source, width, height, actual_size); + } + + void mjpeg_converter::process_function(byte * const dest[], const byte * source, int width, int height, int actual_size) + { + unpack_mjpeg(dest, source, width, height, actual_size); + } + + void bgr_to_rgb::process_function(byte * const dest[], const byte * source, int width, int height, int actual_size) + { + unpack_rgb_from_bgr(dest, source, width, height, actual_size); + } + +} diff --git a/src/proc/color-formats-converter.h b/src/proc/color-formats-converter.h new file mode 100644 index 0000000000..5c1260350a --- /dev/null +++ b/src/proc/color-formats-converter.h @@ -0,0 +1,64 @@ +// License: Apache 2.0. See LICENSE file in root directory. +// Copyright(c) 2019 Intel Corporation. All Rights Reserved. + +#pragma once + +#include "synthetic-stream.h" + +namespace librealsense +{ + class color_converter : public functional_processing_block + { + protected: + color_converter(const char* name, rs2_format target_format, rs2_stream target_stream = RS2_STREAM_COLOR) : + functional_processing_block(name, target_format, target_stream, RS2_EXTENSION_VIDEO_FRAME) {}; + }; + + class yuy2_converter : public color_converter + { + public: + yuy2_converter(rs2_format target_format) : + yuy2_converter("YUY Converter", target_format) {}; + + protected: + yuy2_converter(const char* name, rs2_format target_format) : + color_converter(name, target_format) {}; + void process_function(byte * const dest[], const byte * source, int width, int height, int actual_size) override; + }; + + class uyvy_converter : public color_converter + { + public: + uyvy_converter(rs2_format target_format, rs2_stream target_stream = RS2_STREAM_COLOR) : + uyvy_converter("UYVY Converter", target_format, target_stream) {}; + + protected: + uyvy_converter(const char* name, rs2_format target_format, rs2_stream target_stream) : + color_converter(name, target_format, target_stream) {}; + void process_function(byte * const dest[], const byte * source, int width, int height, int actual_size) override; + }; + + class mjpeg_converter : public color_converter + { + public: + mjpeg_converter(rs2_format target_format) : + mjpeg_converter("MJPEG Converter", target_format) {}; + + protected: + mjpeg_converter(const char* name, rs2_format target_format) : + color_converter(name, target_format) {}; + void process_function(byte * const dest[], const byte * source, int width, int height, int actual_size) override; + }; + + class bgr_to_rgb : public color_converter + { + public: + bgr_to_rgb() : + bgr_to_rgb("BGR to RGB Converter") {}; + + protected: + bgr_to_rgb(const char* name) : + color_converter(name, RS2_FORMAT_RGB8, RS2_STREAM_INFRARED) {}; + void process_function(byte * const dest[], const byte * source, int width, int height, int actual_size) override; + }; +} \ No newline at end of file diff --git a/src/proc/colorizer.cpp b/src/proc/colorizer.cpp index 4c85832f46..250457deb1 100644 --- a/src/proc/colorizer.cpp +++ b/src/proc/colorizer.cpp @@ -219,7 +219,7 @@ namespace librealsense if (!frame || frame.is()) return false; - if (frame.get_profile().stream_type() != RS2_STREAM_DEPTH) + if (frame.get_profile().format() != RS2_FORMAT_Z16) return false; return true; diff --git a/src/proc/depth-formats-converter.cpp b/src/proc/depth-formats-converter.cpp new file mode 100644 index 0000000000..a16dc73f35 --- /dev/null +++ b/src/proc/depth-formats-converter.cpp @@ -0,0 +1,139 @@ +// License: Apache 2.0. See LICENSE file in root directory. +// Copyright(c) 2019 Intel Corporation. All Rights Reserved. + +#include "depth-formats-converter.h" + +#include "stream.h" + +namespace librealsense +{ + void unpack_z16_y8_from_sr300_inzi(byte * const dest[], const byte * source, int width, int height, int actual_size) + { + auto count = width * height; + auto in = reinterpret_cast(source); + auto out_ir = reinterpret_cast(dest[1]); +#ifdef RS2_USE_CUDA + rscuda::unpack_z16_y8_from_sr300_inzi_cuda(out_ir, in, count); +#else + for (int i = 0; i < count; ++i) *out_ir++ = *in++ >> 2; +#endif + librealsense::copy(dest[0], in, count * 2); + } + + void unpack_z16_y16_from_sr300_inzi(byte * const dest[], const byte * source, int width, int height, int actual_size) + { + auto count = width * height; + auto in = reinterpret_cast(source); + auto out_ir = reinterpret_cast(dest[1]); +#ifdef RS2_USE_CUDA + rscuda::unpack_z16_y16_from_sr300_inzi_cuda(out_ir, in, count); +#else + for (int i = 0; i < count; ++i) *out_ir++ = *in++ << 6; +#endif + librealsense::copy(dest[0], in, count * 2); + } + + void unpack_inzi(rs2_format dst_ir_format, byte * const d[], const byte * s, int width, int height, int actual_size) + { + switch (dst_ir_format) + { + case RS2_FORMAT_Y8: + unpack_z16_y8_from_sr300_inzi(d, s, width, height, actual_size); + break; + case RS2_FORMAT_Y16: + unpack_z16_y16_from_sr300_inzi(d, s, width, height, actual_size); + break; + default: + LOG_ERROR("Unsupported format for INZI conversion."); + break; + } + } + + template void unpack_pixels(byte * const dest[], int count, const SOURCE * source, UNPACK unpack, int actual_size) + { + auto out = reinterpret_cast(dest[0]); + for (int i = 0; i < count; ++i) *out++ = unpack(*source++); + } + + void unpack_y16_from_y16_10(byte * const d[], const byte * s, int width, int height, int actual_size) { unpack_pixels(d, width * height, reinterpret_cast(s), [](uint16_t pixel) -> uint16_t { return pixel << 6; }, actual_size); } + void unpack_y8_from_y16_10(byte * const d[], const byte * s, int width, int height, int actual_size) { unpack_pixels(d, width * height, reinterpret_cast(s), [](uint16_t pixel) -> uint8_t { return pixel >> 2; }, actual_size); } + + void unpack_invi(rs2_format dst_format, byte * const d[], const byte * s, int width, int height, int actual_size) + { + switch (dst_format) + { + case RS2_FORMAT_Y8: + unpack_y8_from_y16_10(d, s, width, height, actual_size); + break; + case RS2_FORMAT_Y16: + unpack_y16_from_y16_10(d, s, width, height, actual_size); + break; + default: + LOG_ERROR("Unsupported format for INVI conversion."); + break; + } + } + + void copy_raw10(byte * const dest[], const byte * source, int width, int height, int actual_size) + { + auto count = width * height; // num of pixels + librealsense::copy(dest[0], source, size_t(5.0 * (count / 4.0))); + } + + void unpack_y10bpack(byte * const dest[], const byte * source, int width, int height, int actual_size) + { + auto count = width * height / 4; // num of pixels + uint8_t * from = (uint8_t*)(source); + uint16_t * to = (uint16_t*)(dest[0]); + + // Put the 10 bit into the msb of uint16_t + for (int i = 0; i < count; i++, from += 5) // traverse macro-pixels + { + *to++ = ((from[0] << 2) | (from[4] & 3)) << 6; + *to++ = ((from[1] << 2) | ((from[4] >> 2) & 3)) << 6; + *to++ = ((from[2] << 2) | ((from[4] >> 4) & 3)) << 6; + *to++ = ((from[3] << 2) | ((from[4] >> 6) & 3)) << 6; + } + } + + void unpack_w10(rs2_format dst_format, byte * const d[], const byte * s, int width, int height, int actual_size) + { + switch (dst_format) + { + case RS2_FORMAT_W10: + copy_raw10(d, s, width, height, actual_size); + break; + case RS2_FORMAT_Y10BPACK: + unpack_y10bpack(d, s, width, height, actual_size); + break; + default: + LOG_ERROR("Unsupported format for W10 unpacking."); + break; + } + } + + // INZI converter + inzi_converter::inzi_converter(const char * name, rs2_format target_ir_format) + : interleaved_functional_processing_block(name, RS2_FORMAT_INZI, RS2_FORMAT_Z16, RS2_STREAM_DEPTH, RS2_EXTENSION_DEPTH_FRAME, 0, + target_ir_format, RS2_STREAM_INFRARED, RS2_EXTENSION_VIDEO_FRAME, 1) + {} + + void inzi_converter::process_function(byte * const dest[], const byte * source, int width, int height, int actual_size) + { + // convension: right frame is IR and left is Z16 + unpack_inzi(_right_target_format, dest, source, width, height, actual_size); + } + + void invi_converter::process_function(byte * const dest[], const byte * source, int width, int height, int actual_size) + { + unpack_invi(_target_format, dest, source, width, height, actual_size); + } + + w10_converter::w10_converter(const char * name, const rs2_format& target_format) : + functional_processing_block(name, target_format, RS2_STREAM_INFRARED, RS2_EXTENSION_VIDEO_FRAME) {} + + void w10_converter::process_function(byte * const dest[], const byte * source, int width, int height, int actual_size) + { + unpack_w10(_target_format, dest, source, width, height, actual_size); + } +} diff --git a/src/proc/depth-formats-converter.h b/src/proc/depth-formats-converter.h new file mode 100644 index 0000000000..60e06e88fe --- /dev/null +++ b/src/proc/depth-formats-converter.h @@ -0,0 +1,45 @@ +// License: Apache 2.0. See LICENSE file in root directory. +// Copyright(c) 2019 Intel Corporation. All Rights Reserved. + +#pragma once + +#include "synthetic-stream.h" +#include "option.h" +#include "image.h" + +namespace librealsense +{ + class inzi_converter : public interleaved_functional_processing_block + { + public: + inzi_converter(rs2_format target_ir_format) : + inzi_converter("INZI to depth and IR Transform", target_ir_format) {}; + + protected: + inzi_converter(const char* name, rs2_format target_ir_format); + void process_function(byte * const dest[], const byte * source, int width, int height, int actual_size) override; + }; + + class invi_converter : public functional_processing_block + { + public: + invi_converter(rs2_format target_format) : + invi_converter("INVI to IR Transform", target_format) {}; + + protected: + invi_converter(const char* name, rs2_format target_format) : + functional_processing_block(name, target_format, RS2_STREAM_INFRARED, RS2_EXTENSION_VIDEO_FRAME) {}; + void process_function(byte * const dest[], const byte * source, int width, int height, int actual_size) override; + }; + + class w10_converter : public functional_processing_block + { + public: + w10_converter(const rs2_format& target_format) : + w10_converter("W10 Transform", target_format) {}; + + protected: + w10_converter(const char* name, const rs2_format& target_format); + void process_function(byte * const dest[], const byte * source, int width, int height, int actual_size) override; + }; +} diff --git a/src/proc/identity-processing-block.cpp b/src/proc/identity-processing-block.cpp new file mode 100644 index 0000000000..d9d75871dc --- /dev/null +++ b/src/proc/identity-processing-block.cpp @@ -0,0 +1,17 @@ +#include "identity-processing-block.h" +#include "image.h" + +librealsense::identity_processing_block::identity_processing_block() : + identity_processing_block("Identity processing block") +{ +} + +librealsense::identity_processing_block::identity_processing_block(const char * name) : + stream_filter_processing_block(name) +{ +} + +rs2::frame librealsense::identity_processing_block::process_frame(const rs2::frame_source & source, const rs2::frame & f) +{ + return f; +} diff --git a/src/proc/identity-processing-block.h b/src/proc/identity-processing-block.h new file mode 100644 index 0000000000..7019dde893 --- /dev/null +++ b/src/proc/identity-processing-block.h @@ -0,0 +1,16 @@ +#pragma once + +#include "synthetic-stream.h" + +namespace librealsense +{ + class LRS_EXTENSION_API identity_processing_block : public stream_filter_processing_block + { + public: + identity_processing_block(); + + protected: + identity_processing_block(const char* name); + rs2::frame process_frame(const rs2::frame_source& source, const rs2::frame& f) override; + }; +} \ No newline at end of file diff --git a/src/proc/motion-transform.cpp b/src/proc/motion-transform.cpp new file mode 100644 index 0000000000..b721934a7c --- /dev/null +++ b/src/proc/motion-transform.cpp @@ -0,0 +1,119 @@ +// License: Apache 2.0. See LICENSE file in root directory. +// Copyright(c) 2019 Intel Corporation. All Rights Reserved. + +#include "motion-transform.h" + +#include "../include/librealsense2/hpp/rs_sensor.hpp" +#include "../include/librealsense2/hpp/rs_processing.hpp" + +#include "context.h" +#include "environment.h" +#include "image.h" + +namespace librealsense +{ + template void copy_hid_axes(byte * const dest[], const byte * source, double factor) + { + using namespace librealsense; + auto hid = (hid_data*)(source); + + auto res = float3{ float(hid->x), float(hid->y), float(hid->z) } *float(factor); + + librealsense::copy(dest[0], &res, sizeof(float3)); + } + + // The Accelerometer input format: signed int 16bit. data units 1LSB=0.001g; + // Librealsense output format: floating point 32bit. units m/s^2, + template void unpack_accel_axes(byte * const dest[], const byte * source, int width, int height, int actual_size) + { + static constexpr float gravity = 9.80665f; // Standard Gravitation Acceleration + static constexpr double accelerator_transform_factor = 0.001*gravity; + + copy_hid_axes(dest, source, accelerator_transform_factor); + } + + // The Gyro input format: signed int 16bit. data units 1LSB=0.1deg/sec; + // Librealsense output format: floating point 32bit. units rad/sec, + template void unpack_gyro_axes(byte * const dest[], const byte * source, int width, int height, int actual_size) + { + static const double gyro_transform_factor = deg2rad(0.1); + + copy_hid_axes(dest, source, gyro_transform_factor); + } + + motion_transform::motion_transform(rs2_format target_format, rs2_stream target_stream, mm_calib_handler* mm_calib, bool is_motion_correction_enabled) + : motion_transform("Motion Transform", target_format, target_stream, mm_calib, is_motion_correction_enabled) + {} + + motion_transform::motion_transform(const char* name, rs2_format target_format, rs2_stream target_stream, mm_calib_handler* mm_calib, bool is_motion_correction_enabled) + : functional_processing_block(name, target_format, target_stream, RS2_EXTENSION_MOTION_FRAME), + _mm_calib(mm_calib), + _is_motion_correction_enabled(is_motion_correction_enabled) + {} + + rs2::frame motion_transform::process_frame(const rs2::frame_source& source, const rs2::frame& f) + { + auto& ret = functional_processing_block::process_frame(source, f); + correct_motion(&ret); + + return ret; + } + + void motion_transform::correct_motion(rs2::frame* f) + { + if (!_mm_calib) + return; + + auto xyz = (float3*)(f->get_data()); + + try + { + auto accel_intrinsic = _mm_calib->get_intrinsic(RS2_STREAM_ACCEL); + auto gyro_intrinsic = _mm_calib->get_intrinsic(RS2_STREAM_GYRO); + + if (_is_motion_correction_enabled) + { + auto&& s = f->get_profile().stream_type(); + if (s == RS2_STREAM_ACCEL) + *xyz = (accel_intrinsic.sensitivity * (*xyz)) - accel_intrinsic.bias; + + if (s == RS2_STREAM_GYRO) + *xyz = gyro_intrinsic.sensitivity * (*xyz) - gyro_intrinsic.bias; + } + } + catch (const std::exception& ex) + { + LOG_INFO("Motion Module - no intrinsic calibration, " << ex.what()); + } + + // The IMU sensor orientation shall be aligned with depth sensor's coordinate system + *xyz = _mm_calib->imu_to_depth_alignment() * (*xyz); + } + + acceleration_transform::acceleration_transform(mm_calib_handler * mm_calib, bool is_motion_correction_enabled) + : acceleration_transform("Acceleration Transform", mm_calib, is_motion_correction_enabled) + {} + + acceleration_transform::acceleration_transform(const char * name, mm_calib_handler * mm_calib, bool is_motion_correction_enabled) + : motion_transform(name, RS2_FORMAT_MOTION_XYZ32F, RS2_STREAM_ACCEL, mm_calib, is_motion_correction_enabled) + {} + + void acceleration_transform::process_function(byte * const dest[], const byte * source, int width, int height, int actual_size) + { + unpack_accel_axes(dest, source, width, height, actual_size); + } + + gyroscope_transform::gyroscope_transform(mm_calib_handler * mm_calib, bool is_motion_correction_enabled) + : gyroscope_transform("Gyroscope Transform", mm_calib, is_motion_correction_enabled) + {} + + gyroscope_transform::gyroscope_transform(const char * name, mm_calib_handler * mm_calib, bool is_motion_correction_enabled) + : motion_transform(name, RS2_FORMAT_MOTION_XYZ32F, RS2_STREAM_GYRO, mm_calib, is_motion_correction_enabled) + {} + + void gyroscope_transform::process_function(byte * const dest[], const byte * source, int width, int height, int actual_size) + { + unpack_gyro_axes(dest, source, width, height, actual_size); + } +} + diff --git a/src/proc/motion-transform.h b/src/proc/motion-transform.h new file mode 100644 index 0000000000..29e8029001 --- /dev/null +++ b/src/proc/motion-transform.h @@ -0,0 +1,47 @@ +// License: Apache 2.0. See LICENSE file in root directory. +// Copyright(c) 2019 Intel Corporation. All Rights Reserved. + +#pragma once + +#include "synthetic-stream.h" + +#include "ds5/ds5-motion.h" + +namespace librealsense +{ + class motion_transform : public functional_processing_block + { + public: + motion_transform(rs2_format target_format, rs2_stream target_stream, mm_calib_handler* mm_calib = nullptr, bool is_motion_correction_enabled = false); + + protected: + motion_transform(const char* name, rs2_format target_format, rs2_stream target_stream, mm_calib_handler* mm_calib, bool is_motion_correction_enabled); + rs2::frame process_frame(const rs2::frame_source& source, const rs2::frame& f) override; + + private: + void correct_motion(rs2::frame* f); + + mm_calib_handler* _mm_calib = nullptr; + bool _is_motion_correction_enabled = false; + }; + + class acceleration_transform : public motion_transform + { + public: + acceleration_transform(mm_calib_handler* mm_calib = nullptr, bool is_motion_correction_enabled = false); + + protected: + acceleration_transform(const char* name, mm_calib_handler* mm_calib, bool is_motion_correction_enabled); + void process_function(byte * const dest[], const byte * source, int width, int height, int actual_size) override; + }; + + class gyroscope_transform : public motion_transform + { + public: + gyroscope_transform(mm_calib_handler* mm_calib = nullptr, bool is_motion_correction_enabled = false); + + protected: + gyroscope_transform(const char* name, mm_calib_handler* mm_calib, bool is_motion_correction_enabled); + void process_function(byte * const dest[], const byte * source, int width, int height, int actual_size) override; + }; +} \ No newline at end of file diff --git a/src/proc/processing-blocks-factory.cpp b/src/proc/processing-blocks-factory.cpp index c3e620c905..7c697d8e26 100644 --- a/src/proc/processing-blocks-factory.cpp +++ b/src/proc/processing-blocks-factory.cpp @@ -2,9 +2,13 @@ // Copyright(c) 2017 Intel Corporation. All Rights Reserved. #include "processing-blocks-factory.h" + #include "sse/sse-align.h" #include "cuda/cuda-align.h" +#include "stream.h" +#include "proc/identity-processing-block.h" + namespace librealsense { #ifdef RS2_USE_CUDA @@ -25,4 +29,72 @@ namespace librealsense } #endif // __SSSE3__ #endif // RS2_USE_CUDA + + processing_block_factory::processing_block_factory(const std::vector& from, const std::vector& to, std::function(void)> generate_func) : + _source_info(from), _target_info(to), generate_processing_block(generate_func) + {} + + std::shared_ptr processing_block_factory::generate() + { + return generate_processing_block(); + } + + processing_block_factory processing_block_factory::create_id_pbf(rs2_format format, rs2_stream stream, int idx) + { + processing_block_factory id_pbf = { + { {format} }, + { {format, stream, idx} }, + []() { return std::make_shared(); } + }; + return id_pbf; + } + + bool processing_block_factory::operator==(const processing_block_factory & rhs) const + { + const auto&& rhs_src_fmts = rhs.get_source_info(); + for (auto src : _source_info) + { + if (std::find_if(rhs_src_fmts.begin(), rhs_src_fmts.end(), [&src](auto fmt) { + return src == fmt; + }) == rhs_src_fmts.end()) + return false; + } + + const auto&& rhs_tgt_fmts = rhs.get_target_info(); + for (auto tgt : _target_info) + { + if (std::find_if(rhs_tgt_fmts.begin(), rhs_tgt_fmts.end(), [&tgt](auto rhs_tgt) { + return tgt == rhs_tgt; + }) == rhs_tgt_fmts.end()) + return false; + } + + return true; + } + + bool processing_block_factory::has_source(const std::shared_ptr& source) const + { + for (auto s : _source_info) + { + if (source->get_format() == s.format) + return true; + } + return false; + } + + stream_profiles processing_block_factory::find_satisfied_requests(const stream_profiles& requests, const stream_profiles& supported_profiles) const + { + // Return all requests which are related to this processing block factory. + + stream_profiles satisfied_req; + for (auto&& req : requests) + { + auto equal_profiles_predicate = [&req](const std::shared_ptr& sp) { + return to_profile(req.get()) == to_profile(sp.get()); + }; + if (std::any_of(begin(supported_profiles), end(supported_profiles), equal_profiles_predicate)) + satisfied_req.push_back(req); + } + return satisfied_req; + } } diff --git a/src/proc/processing-blocks-factory.h b/src/proc/processing-blocks-factory.h index bbad0ba832..d3bd6a781a 100644 --- a/src/proc/processing-blocks-factory.h +++ b/src/proc/processing-blocks-factory.h @@ -4,8 +4,36 @@ #pragma once #include "align.h" +#include "types.h" + namespace librealsense { std::shared_ptr create_align(rs2_stream align_to); + + class processing_block_factory + { + public: + processing_block_factory() {}; + + processing_block_factory(const std::vector& from, + const std::vector& to, + std::function(void)> generate_func); + + bool operator==(const processing_block_factory& rhs) const; + + std::vector get_source_info() const { return _source_info; } + std::vector get_target_info() const { return _target_info; } + std::shared_ptr generate(); + + static processing_block_factory create_id_pbf(rs2_format format, rs2_stream stream, int idx = 0); + + stream_profiles find_satisfied_requests(const stream_profiles& sp, const stream_profiles& supported_profiles) const; + bool has_source(const std::shared_ptr& source) const; + + protected: + std::vector _source_info; + std::vector _target_info; + std::function(void)> generate_processing_block; + }; } diff --git a/src/proc/rotation-transform.cpp b/src/proc/rotation-transform.cpp new file mode 100644 index 0000000000..7091d1b826 --- /dev/null +++ b/src/proc/rotation-transform.cpp @@ -0,0 +1,162 @@ +// License: Apache 2.0. See LICENSE file in root directory. +// Copyright(c) 2019 Intel Corporation. All Rights Reserved. + +#include "rotation-transform.h" + +#include "../include/librealsense2/hpp/rs_sensor.hpp" +#include "../include/librealsense2/hpp/rs_processing.hpp" +#include "context.h" +#include "image.h" +#include "stream.h" + +namespace librealsense +{ + //// Unpacking routines //// + template + void unpack_l500_image_optimized(byte * const dest[], const byte * source, int width, int height, int actual_size) + { + auto width_out = height; + auto height_out = width; + + auto out = dest[0]; + byte buffer[8][8 * SIZE]; // = { 0 }; + for (int i = 0; i <= height - 8; i = i + 8) + { + for (int j = 0; j <= width - 8; j = j + 8) + { + for (int ii = 0; ii < 8; ++ii) + { + for (int jj = 0; jj < 8; ++jj) + { + auto source_index = ((j + jj) + (width * (i + ii))) * SIZE; + memcpy((void*)(&buffer[7 - jj][(7 - ii) * SIZE]), &source[source_index], SIZE); + } + } + + for (int ii = 0; ii < 8; ++ii) + { + auto out_index = (((height_out - 8 - j + 1) * width_out) - i - 8 + (ii)* width_out); + memcpy(&out[(out_index)* SIZE], &(buffer[ii]), 8 * SIZE); + } + } + } + } + + template + void unpack_l500_image(byte * const dest[], const byte * source, int width, int height, int actual_size) + { + auto width_out = height; + auto height_out = width; + + auto out = dest[0]; + for (int i = 0; i < height; ++i) + { + auto row_offset = i * width; + for (int j = 0; j < width; ++j) + { + auto out_index = (((height_out - j) * width_out) - i - 1) * SIZE; + librealsense::copy((void*)(&out[out_index]), &(source[(row_offset + j) * SIZE]), SIZE); + } + } + } + + void unpack_confidence(byte * const dest[], const byte * source, int width, int height, int actual_size) + { +#pragma pack (push, 1) + struct lsb_msb + { + unsigned lsb : 4; + unsigned msb : 4; + }; +#pragma pack(pop) + + unpack_l500_image<1>(dest, source, width, height, actual_size); + auto out = dest[0]; + for (int i = (width - 1), out_i = ((width - 1) * 2); i >= 0; --i, out_i -= 2) + { + auto row_offset = i * height; + for (int j = 0; j < height; ++j) + { + auto val = *(reinterpret_cast(&out[(row_offset + j)])); + auto out_index = out_i * height + j; + out[out_index] = val.lsb << 4; + out[out_index + height] = val.msb << 4; + } + } + } + + //// Processing routines//// + rotation_transform::rotation_transform(rs2_format target_format, rs2_stream target_stream, rs2_extension extension_type) + : rotation_transform("Rotation Transform", target_format, target_stream, extension_type) + {} + + rotation_transform::rotation_transform(const char* name, rs2_format target_format, rs2_stream target_stream, rs2_extension extension_type) + : functional_processing_block(name, target_format, target_stream, extension_type) + { + _stream_filter.format = _target_format; + _stream_filter.stream = _target_stream; + } + + void rotation_transform::init_profiles_info(const rs2::frame* f) + { + auto p = f->get_profile(); + if (p.get() != _source_stream_profile.get()) + { + _source_stream_profile = p; + _target_stream_profile = p.clone(p.stream_type(), p.stream_index(), _target_format); + _target_bpp = get_image_bpp(_target_format) / 8; + + // Set the unique ID as the original frame. + // The frames are piped through a syncer and must have the origin UID. + auto target_spi = (stream_profile_interface*)_target_stream_profile.get()->profile; + target_spi->set_unique_id(p.unique_id()); + } + } + + depth_rotation_transform::depth_rotation_transform() : + depth_rotation_transform("Depth Rotation Transform") + {} + + depth_rotation_transform::depth_rotation_transform(const char * name) + : rotation_transform(name, RS2_FORMAT_Z16, RS2_STREAM_DEPTH, RS2_EXTENSION_DEPTH_FRAME) + {} + + void depth_rotation_transform::process_function(byte * const dest[], const byte * source, int width, int height, int actual_size) + { + int rotated_width = height; + int rotated_height = width; + unpack_l500_image_optimized<2>(dest, source, rotated_width, rotated_height, actual_size); + } + + ir_rotation_transform::ir_rotation_transform() : + ir_rotation_transform("IR Rotation Transform") + {} + + ir_rotation_transform::ir_rotation_transform(const char * name) + : rotation_transform(name, RS2_FORMAT_Y8, RS2_STREAM_INFRARED, RS2_EXTENSION_VIDEO_FRAME) + {} + + void ir_rotation_transform::process_function(byte * const dest[], const byte * source, int width, int height, int actual_size) + { + int rotated_width = height; + int rotated_height = width; + unpack_l500_image_optimized<1>(dest, source, rotated_width, rotated_height, actual_size); + } + + confidence_rotation_transform::confidence_rotation_transform() : + confidence_rotation_transform("Confidence Rotation Transform") + {} + + confidence_rotation_transform::confidence_rotation_transform(const char * name) + : rotation_transform(name, RS2_FORMAT_RAW8, RS2_STREAM_CONFIDENCE, RS2_EXTENSION_VIDEO_FRAME) + {} + + void confidence_rotation_transform::process_function(byte * const dest[], const byte * source, int width, int height, int actual_size) + { + int rotated_width = height; + int rotated_height = width; + + // Workaround: the height is given by bytes and not by pixels. + unpack_confidence(dest, source, rotated_width / 2, rotated_height, actual_size); + } +} diff --git a/src/proc/rotation-transform.h b/src/proc/rotation-transform.h new file mode 100644 index 0000000000..adf5e46321 --- /dev/null +++ b/src/proc/rotation-transform.h @@ -0,0 +1,50 @@ +// License: Apache 2.0. See LICENSE file in root directory. +// Copyright(c) 2019 Intel Corporation. All Rights Reserved. + +#pragma once + +#include "synthetic-stream.h" + +namespace librealsense +{ + // Processes rotated frames. + class rotation_transform : public functional_processing_block + { + public: + rotation_transform(rs2_format target_format, rs2_stream target_stream, rs2_extension extension_type); + rotation_transform(const char* name, rs2_format target_format, rs2_stream target_stream, rs2_extension extension_type); + + protected: + void init_profiles_info(const rs2::frame* f) override; + }; + + class depth_rotation_transform : public rotation_transform + { + public: + depth_rotation_transform(); + + protected: + depth_rotation_transform(const char* name); + void process_function(byte * const dest[], const byte * source, int width, int height, int actual_size) override; + }; + + class ir_rotation_transform : public rotation_transform + { + public: + ir_rotation_transform(); + + protected: + ir_rotation_transform(const char* name); + void process_function(byte * const dest[], const byte * source, int width, int height, int actual_size) override; + }; + + class confidence_rotation_transform : public rotation_transform + { + public: + confidence_rotation_transform(); + + protected: + confidence_rotation_transform(const char* name); + void process_function(byte * const dest[], const byte * source, int width, int height, int actual_size) override; + }; +} diff --git a/src/proc/synthetic-stream.cpp b/src/proc/synthetic-stream.cpp index 2e728d38e5..2eb7e80f64 100644 --- a/src/proc/synthetic-stream.cpp +++ b/src/proc/synthetic-stream.cpp @@ -1,9 +1,12 @@ // License: Apache 2.0. See LICENSE file in root directory. // Copyright(c) 2017 Intel Corporation. All Rights Reserved. -#include "core/video.h" #include "proc/synthetic-stream.h" + +#include "core/video.h" #include "option.h" +#include "context.h" +#include "stream.h" namespace librealsense { @@ -206,6 +209,60 @@ namespace librealsense register_option(RS2_OPTION_STREAM_INDEX_FILTER, index_selector); } + functional_processing_block::functional_processing_block(const char * name, rs2_format target_format, rs2_stream target_stream, rs2_extension extension_type) : + stream_filter_processing_block(name), _target_format(target_format), _target_stream(target_stream), _extension_type(extension_type) {} + + void functional_processing_block::init_profiles_info(const rs2::frame * f) + { + auto p = f->get_profile(); + if (p.get() != _source_stream_profile.get()) + { + _source_stream_profile = p; + _target_stream_profile = p.clone(p.stream_type(), p.stream_index(), _target_format); + _target_bpp = get_image_bpp(_target_format) / 8; + } + } + + rs2::frame functional_processing_block::process_frame(const rs2::frame_source & source, const rs2::frame & f) + { + auto&& ret = prepare_frame(source, f); + int width = 0; + int height = 0; + auto vf = ret.as(); + if (vf) + { + width = vf.get_width(); + height = vf.get_height(); + } + byte* planes[1]; + planes[0] = (byte*)ret.get_data(); + + process_function(planes, (const byte*)f.get_data(), width, height, height * width * _target_bpp); + + return ret; + } + + rs2::frame functional_processing_block::prepare_frame(const rs2::frame_source & source, const rs2::frame & f) + { + init_profiles_info(&f); + auto vf = f.as(); + if (vf) + { + int width = vf.get_width(); + int height = vf.get_height(); + return source.allocate_video_frame(_target_stream_profile, f, _target_bpp, + width, height, width * _target_bpp, _extension_type); + } + auto mf = f.as(); + if (mf) + { + int width = f.get_data_size(); + int height = 1; + return source.allocate_motion_frame(_target_stream_profile, f, + width, height, _extension_type); + } + } + bool is_z_or_disparity(rs2_format format) { if (format == RS2_FORMAT_Z16 || format == RS2_FORMAT_DISPARITY16 || format == RS2_FORMAT_DISPARITY32) @@ -329,7 +386,7 @@ namespace librealsense frame_additional_data data = of->additional_data; auto res = _actual_source.alloc_frame(frame_type, stride * height, data, true); if (!res) throw wrong_api_call_sequence_exception("Out of frame resources!"); - vf = static_cast(res); + vf = dynamic_cast(res); vf->metadata_parsers = of->metadata_parsers; vf->assign(width, height, stride, bpp); vf->set_sensor(original->get_sensor()); @@ -344,6 +401,24 @@ namespace librealsense return res; } + frame_interface* synthetic_source::allocate_motion_frame(std::shared_ptr stream, + frame_interface* original, + int width, + int height, + rs2_extension frame_type) + { + auto of = dynamic_cast(original); + frame_additional_data data = of->additional_data; + auto res = _actual_source.alloc_frame(frame_type, width * height, data, true); + if (!res) throw wrong_api_call_sequence_exception("Out of frame resources!"); + auto mf = dynamic_cast(res); + mf->metadata_parsers = of->metadata_parsers; + mf->set_sensor(original->get_sensor()); + res->set_stream(stream); + + return res; + } + int get_embeded_frames_size(frame_interface* f) { if (f == nullptr) return 0; @@ -410,4 +485,156 @@ namespace librealsense return res; } + + composite_processing_block::composite_processing_block() : + composite_processing_block("Composite Processing Block") + {} + + composite_processing_block::composite_processing_block(const char * name) : + processing_block(name) + {} + + processing_block & composite_processing_block::get(rs2_option option) + { + // Find the first block which supports the option. + // It doesn't matter which one is selected, as long as it supports the option, because of the + // option propogation caused by bypass_option::set(value) + int i = 0; + for (i = 0; i < _processing_blocks.size(); i++) + { + if (_processing_blocks[i]->supports_option(option)) + { + auto val = _processing_blocks[i]->get_option(option).query(); + if (val > 0.f) break; + } + } + + update_info(RS2_CAMERA_INFO_NAME, _processing_blocks[i]->get_info(RS2_CAMERA_INFO_NAME)); + + return *_processing_blocks[i]; + } + + void composite_processing_block::add(std::shared_ptr block) + { + _processing_blocks.push_back(block); + + const auto&& supported_options = block->get_supported_options(); + for (auto&& opt : supported_options) + { + register_option(opt, std::make_shared(this, opt)); + } + + update_info(RS2_CAMERA_INFO_NAME, block->get_info(RS2_CAMERA_INFO_NAME)); + } + + void composite_processing_block::set_output_callback(frame_callback_ptr callback) + { + // Each processing block will process the preceding processing block output frame. + size_t i = 0; + for (i = 1; i < _processing_blocks.size(); i++) + { + auto output_cb = [i, this](frame_holder fh) { + _processing_blocks[i]->invoke(std::move(fh)); + }; + _processing_blocks[i - 1]->set_output_callback(std::make_shared>(output_cb)); + } + + // Set the output callback of the composite processing block as last processing block in the vector. + _processing_blocks.back()->set_output_callback(callback); + } + + void composite_processing_block::invoke(frame_holder frames) + { + // Invoke the first processing block. + // This will trigger processing the frame in a chain by the order of the given processing blocks vector. + _processing_blocks.front()->invoke(std::move(frames)); + } + + interleaved_functional_processing_block::interleaved_functional_processing_block(const char* name, + rs2_format source_format, + rs2_format left_target_format, + rs2_stream left_target_stream, + rs2_extension left_extension_type, + int left_idx, + rs2_format right_target_format, + rs2_stream right_target_stream, + rs2_extension right_extension_type, + int right_idx) + : processing_block(name), + _source_format(source_format), + _left_target_format(left_target_format), + _left_target_stream(left_target_stream), + _left_extension_type(left_extension_type), + _left_target_profile_idx(left_idx), + _right_target_format(right_target_format), + _right_target_stream(right_target_stream), + _right_extension_type(right_extension_type), + _right_target_profile_idx(right_idx) + { + configure_processing_callback(); + } + + void interleaved_functional_processing_block::configure_processing_callback() + { + // define and set the frame processing callback + auto process_callback = [&](frame_holder frame, synthetic_source_interface* source) + { + auto profile = As(frame.frame->get_stream()); + if (!profile) + { + LOG_ERROR("Failed configuring interleaved funcitonal processing block: ", get_info(RS2_CAMERA_INFO_NAME)); + return; + } + + auto w = profile->get_width(); + auto h = profile->get_height(); + + if (profile.get() != _source_stream_profile.get()) + { + _source_stream_profile = profile; + _right_target_stream_profile = profile->clone(); + _left_target_stream_profile = profile->clone(); + + _left_target_bpp = get_image_bpp(_left_target_format) / 8; + _right_target_bpp = get_image_bpp(_right_target_format) / 8; + + _left_target_stream_profile->set_format(_left_target_format); + _right_target_stream_profile->set_format(_right_target_format); + _left_target_stream_profile->set_stream_type(_left_target_stream); + _right_target_stream_profile->set_stream_type(_right_target_stream); + _left_target_stream_profile->set_stream_index(_left_target_profile_idx); + _left_target_stream_profile->set_unique_id(_left_target_profile_idx); + _right_target_stream_profile->set_stream_index(_right_target_profile_idx); + _right_target_stream_profile->set_unique_id(_right_target_profile_idx); + } + + // passthrough the frame if we don't need to process it. + auto format = profile->get_format(); + if (format != _source_format) + { + source->frame_ready(std::move(frame)); + return; + } + + frame_holder lf, rf; + + lf = source->allocate_video_frame(_left_target_stream_profile, frame, _left_target_bpp, + w, h, w * _left_target_bpp, _left_extension_type); + rf = source->allocate_video_frame(_right_target_stream_profile, frame, _right_target_bpp, + w, h, w * _right_target_bpp, _right_extension_type); + + // process the frame + byte* planes[2]; + planes[0] = (byte*)lf.frame->get_frame_data(); + planes[1] = (byte*)rf.frame->get_frame_data(); + + process_function(planes, (const byte*)frame->get_frame_data(), w, h, 0); + + source->frame_ready(std::move(lf)); + source->frame_ready(std::move(rf)); + }; + + set_processing_callback(std::shared_ptr( + new internal_frame_processor_callback(process_callback))); + } } diff --git a/src/proc/synthetic-stream.h b/src/proc/synthetic-stream.h index c9daa1cb0b..62785c788f 100644 --- a/src/proc/synthetic-stream.h +++ b/src/proc/synthetic-stream.h @@ -27,6 +27,12 @@ namespace librealsense int new_stride = 0, rs2_extension frame_type = RS2_EXTENSION_VIDEO_FRAME) override; + frame_interface* allocate_motion_frame(std::shared_ptr stream, + frame_interface* original, + int width = 0, + int height = 0, + rs2_extension frame_type = RS2_EXTENSION_MOTION_FRAME) override; + frame_interface* allocate_composite_frame(std::vector frames) override; frame_interface* allocate_points(std::shared_ptr stream, @@ -134,6 +140,61 @@ namespace librealsense bool should_process(const rs2::frame& frame) override; }; + // process frames with a given function + class functional_processing_block : public stream_filter_processing_block + { + public: + functional_processing_block(const char* name, rs2_format target_format, rs2_stream target_stream = RS2_STREAM_ANY, rs2_extension extension_type = RS2_EXTENSION_VIDEO_FRAME); + + protected: + virtual void init_profiles_info(const rs2::frame* f); + rs2::frame process_frame(const rs2::frame_source & source, const rs2::frame & f) override; + virtual rs2::frame prepare_frame(const rs2::frame_source& source, const rs2::frame& f); + virtual void process_function(byte * const dest[], const byte * source, int width, int height, int actual_size) = 0; + + rs2::stream_profile _target_stream_profile; + rs2::stream_profile _source_stream_profile; + rs2_format _target_format; + rs2_stream _target_stream; + rs2_extension _extension_type; + int _target_bpp = 0; + }; + + // process interleaved frames with a given function + class interleaved_functional_processing_block : public processing_block + { + public: + interleaved_functional_processing_block(const char* name, + rs2_format source_format, + rs2_format left_target_format, + rs2_stream left_target_stream, + rs2_extension left_extension_type, + int left_idx, + rs2_format right_target_format, + rs2_stream right_target_stream, + rs2_extension right_extension_type, + int right_idx); + + protected: + virtual void process_function(byte * const dest[], const byte * source, int width, int height, int actual_size) = 0; + void configure_processing_callback(); + + std::shared_ptr _source_stream_profile; + std::shared_ptr _left_target_stream_profile; + std::shared_ptr _right_target_stream_profile; + rs2_format _source_format; + rs2_format _left_target_format; + rs2_stream _left_target_stream; + rs2_extension _left_extension_type; + rs2_format _right_target_format; + rs2_stream _right_target_stream; + rs2_extension _right_extension_type; + int _left_target_bpp = 0; + int _right_target_bpp = 0; + int _left_target_profile_idx = 1; + int _right_target_profile_idx = 2; + }; + class depth_processing_block : public stream_filter_processing_block { public: @@ -144,6 +205,56 @@ namespace librealsense protected: bool should_process(const rs2::frame& frame) override; }; + + class LRS_EXTENSION_API composite_processing_block : public processing_block + { + public: + class bypass_option : public option + { + public: + bypass_option(composite_processing_block* parent, rs2_option opt) + : _parent(parent), _opt(opt) {} + + void set(float value) override { + // While query and other read operations + // will only read from the currently selected + // block, setting an option will propogate + // to all blocks in the group + for (int i = 0; i < _parent->_processing_blocks.size(); i++) + { + if (_parent->_processing_blocks[i]->supports_option(_opt)) + { + _parent->_processing_blocks[i]->get_option(_opt).set(value); + } + } + } + float query() const override { return get().query(); } + option_range get_range() const override { return get().get_range(); } + bool is_enabled() const override { return get().is_enabled(); } + bool is_read_only() const override { return get().is_read_only(); } + const char* get_description() const override { return get().get_description(); } + const char* get_value_description(float v) const override { return get().get_value_description(v); } + void enable_recording(std::function record_action) override {} + + option& get() { return _parent->get(_opt).get_option(_opt); } + const option& get() const { return _parent->get(_opt).get_option(_opt); } + private: + composite_processing_block* _parent; + rs2_option _opt; + }; + + composite_processing_block(); + composite_processing_block(const char* name); + virtual ~composite_processing_block() { _source.flush(); }; + + processing_block& get(rs2_option option); + void add(std::shared_ptr block); + void set_output_callback(frame_callback_ptr callback) override; + void invoke(frame_holder frames) override; + + protected: + std::vector> _processing_blocks; + }; } // API structures diff --git a/src/proc/y12i-to-y16y16.cpp b/src/proc/y12i-to-y16y16.cpp new file mode 100644 index 0000000000..27cdb4990e --- /dev/null +++ b/src/proc/y12i-to-y16y16.cpp @@ -0,0 +1,35 @@ +// License: Apache 2.0. See LICENSE file in root directory. +// Copyright(c) 2019 Intel Corporation. All Rights Reserved. + +#include "y12i-to-y16y16.h" + +#include "stream.h" + +namespace librealsense +{ + struct y12i_pixel { uint8_t rl : 8, rh : 4, ll : 4, lh : 8; int l() const { return lh << 4 | ll; } int r() const { return rh << 8 | rl; } }; + void unpack_y16_y16_from_y12i_10(byte * const dest[], const byte * source, int width, int height, int actual_size) + { + auto count = width * height; +#ifdef RS2_USE_CUDA + rscuda::split_frame_y16_y16_from_y12i_cuda(dest, count, reinterpret_cast(source)); +#else + split_frame(dest, count, reinterpret_cast(source), + [](const y12i_pixel & p) -> uint16_t { return p.l() << 6 | p.l() >> 4; }, // We want to convert 10-bit data to 16-bit data + [](const y12i_pixel & p) -> uint16_t { return p.r() << 6 | p.r() >> 4; }); // Multiply by 64 1/16 to efficiently approximate 65535/1023 +#endif + } + + y12i_to_y16y16::y12i_to_y16y16(int left_idx, int right_idx) + : y12i_to_y16y16("Y12I to Y16L Y16R Transform", left_idx, right_idx) {}; + + y12i_to_y16y16::y12i_to_y16y16(const char * name, int left_idx, int right_idx) + : interleaved_functional_processing_block(name, RS2_FORMAT_Y12I, RS2_FORMAT_Y16, RS2_STREAM_INFRARED, RS2_EXTENSION_VIDEO_FRAME, 1, + RS2_FORMAT_Y16, RS2_STREAM_INFRARED, RS2_EXTENSION_VIDEO_FRAME, 2) + {} + + void y12i_to_y16y16::process_function(byte * const dest[], const byte * source, int width, int height, int actual_size) + { + unpack_y16_y16_from_y12i_10(dest, source, width, height, actual_size); + } +} \ No newline at end of file diff --git a/src/proc/y12i-to-y16y16.h b/src/proc/y12i-to-y16y16.h new file mode 100644 index 0000000000..7f423c308f --- /dev/null +++ b/src/proc/y12i-to-y16y16.h @@ -0,0 +1,21 @@ +// License: Apache 2.0. See LICENSE file in root directory. +// Copyright(c) 2019 Intel Corporation. All Rights Reserved. + +#pragma once + +#include "synthetic-stream.h" +#include "option.h" +#include "image.h" + +namespace librealsense +{ + class y12i_to_y16y16 : public interleaved_functional_processing_block + { + public: + y12i_to_y16y16(int left_idx = 1, int right_idx = 2); + + protected: + y12i_to_y16y16(const char* name, int left_idx, int right_idx); + void process_function(byte * const dest[], const byte * source, int width, int height, int actual_size) override; + }; +} diff --git a/src/proc/y8i-to-y8y8.cpp b/src/proc/y8i-to-y8y8.cpp new file mode 100644 index 0000000000..edf377fbcf --- /dev/null +++ b/src/proc/y8i-to-y8y8.cpp @@ -0,0 +1,37 @@ +// License: Apache 2.0. See LICENSE file in root directory. +// Copyright(c) 2019 Intel Corporation. All Rights Reserved. + +#include "y8i-to-y8y8.h" + +#include "stream.h" +#include "context.h" +#include "image.h" + +namespace librealsense +{ + struct y8i_pixel { uint8_t l, r; }; + void unpack_y8_y8_from_y8i(byte * const dest[], const byte * source, int width, int height, int actual_size) + { + auto count = width * height; +#ifdef RS2_USE_CUDA + rscuda::split_frame_y8_y8_from_y8i_cuda(dest, count, reinterpret_cast(source)); +#else + split_frame(dest, count, reinterpret_cast(source), + [](const y8i_pixel & p) -> uint8_t { return p.l; }, + [](const y8i_pixel & p) -> uint8_t { return p.r; }); +#endif + } + + y8i_to_y8y8::y8i_to_y8y8(int left_idx, int right_idx) : + y8i_to_y8y8("Y8i to Y8-Y8 Converter", left_idx, right_idx) {} + + y8i_to_y8y8::y8i_to_y8y8(const char * name, int left_idx, int right_idx) + : interleaved_functional_processing_block(name, RS2_FORMAT_Y8I, RS2_FORMAT_Y8, RS2_STREAM_INFRARED, RS2_EXTENSION_VIDEO_FRAME, 1, + RS2_FORMAT_Y8, RS2_STREAM_INFRARED, RS2_EXTENSION_VIDEO_FRAME, 2) + {} + + void y8i_to_y8y8::process_function(byte * const dest[], const byte * source, int width, int height, int actual_size) + { + unpack_y8_y8_from_y8i(dest, source, width, height, actual_size); + } +} // namespace librealsense diff --git a/src/proc/y8i-to-y8y8.h b/src/proc/y8i-to-y8y8.h new file mode 100644 index 0000000000..109280ac08 --- /dev/null +++ b/src/proc/y8i-to-y8y8.h @@ -0,0 +1,19 @@ +// License: Apache 2.0. See LICENSE file in root directory. +// Copyright(c) 2019 Intel Corporation. All Rights Reserved. + +#pragma once + +#include "synthetic-stream.h" + +namespace librealsense +{ + class LRS_EXTENSION_API y8i_to_y8y8 : public interleaved_functional_processing_block + { + public: + y8i_to_y8y8(int left_idx = 1, int right_idx = 2); + + protected: + y8i_to_y8y8(const char* name, int left_idx, int right_idx); + void process_function(byte * const dest[], const byte * source, int width, int height, int actual_size) override; + }; +} diff --git a/src/proc/yuy2rgb.cpp b/src/proc/yuy2rgb.cpp deleted file mode 100644 index cfa0698135..0000000000 --- a/src/proc/yuy2rgb.cpp +++ /dev/null @@ -1,49 +0,0 @@ -// License: Apache 2.0. See LICENSE file in root directory. -// Copyright(c) 2017 Intel Corporation. All Rights Reserved. - -#include "../include/librealsense2/hpp/rs_sensor.hpp" -#include "../include/librealsense2/hpp/rs_processing.hpp" - -#include "proc/synthetic-stream.h" -#include "context.h" -#include "environment.h" -#include "option.h" -#include "yuy2rgb.h" -#include "image.h" - -namespace librealsense -{ - yuy2rgb::yuy2rgb() - : yuy2rgb("YUY Converter") - {} - - yuy2rgb::yuy2rgb(const char* name) - : stream_filter_processing_block(name) - { - _stream_filter.stream = RS2_STREAM_ANY; - _stream_filter.format = RS2_FORMAT_YUYV; - } - - rs2::frame yuy2rgb::process_frame(const rs2::frame_source& source, const rs2::frame& f) - { - auto p = f.get_profile(); - if (p.get() != _source_stream_profile.get()) - { - _source_stream_profile = p; - _target_stream_profile = p.clone(p.stream_type(), p.stream_index(), RS2_FORMAT_RGB8); - } - - rs2::frame ret; - - auto vf = f.as(); - ret = source.allocate_video_frame(_target_stream_profile, f, _traget_bpp, - vf.get_width(), vf.get_height(), vf.get_width() * _traget_bpp, RS2_EXTENSION_VIDEO_FRAME); - - byte* planes[1]; - planes[0] = (byte*)ret.get_data(); - - unpack_yuy2_rgb8(planes, (const byte*)f.get_data(), vf.get_width(), vf.get_height(), vf.get_height() * vf.get_width() * _traget_bpp); - - return ret; - } -} diff --git a/src/proc/yuy2rgb.h b/src/proc/yuy2rgb.h deleted file mode 100644 index b0eedd9421..0000000000 --- a/src/proc/yuy2rgb.h +++ /dev/null @@ -1,29 +0,0 @@ -// License: Apache 2.0. See LICENSE file in root directory. -// Copyright(c) 2017 Intel Corporation. All Rights Reserved. - -#pragma once - -#include "synthetic-stream.h" - -namespace rs2 -{ - class stream_profile; -} - -namespace librealsense -{ - class LRS_EXTENSION_API yuy2rgb : public stream_filter_processing_block - { - public: - yuy2rgb(); - - protected: - yuy2rgb(const char* name); - rs2::frame process_frame(const rs2::frame_source& source, const rs2::frame& f) override; - - private: - rs2::stream_profile _target_stream_profile; - rs2::stream_profile _source_stream_profile; - int _traget_bpp = 3; - }; -} diff --git a/src/rs.cpp b/src/rs.cpp index d34eaac70c..3b84fbec27 100644 --- a/src/rs.cpp +++ b/src/rs.cpp @@ -28,7 +28,7 @@ #include "proc/spatial-filter.h" #include "proc/zero-order.h" #include "proc/hole-filling-filter.h" -#include "proc/yuy2rgb.h" +#include "proc/color-formats-converter.h" #include "proc/rates-printer.h" #include "media/playback/playback_device.h" #include "stream.h" @@ -414,7 +414,6 @@ rs2_stream_profile* rs2_clone_stream_profile(const rs2_stream_profile* mode, rs2 sp->set_stream_type(stream); sp->set_stream_index(stream_idx); sp->set_format(fmt); - return new rs2_stream_profile{ sp.get(), sp }; } HANDLE_EXCEPTIONS_AND_RETURN(nullptr, mode, stream, stream_idx, fmt) @@ -1421,6 +1420,20 @@ rs2_frame* rs2_allocate_synthetic_video_frame(rs2_source* source, const rs2_stre } HANDLE_EXCEPTIONS_AND_RETURN(nullptr, source, new_stream, original, new_bpp, new_width, new_height, new_stride, frame_type) +rs2_frame* rs2_allocate_synthetic_motion_frame(rs2_source* source, const rs2_stream_profile* new_stream, rs2_frame* original, + int new_width, int new_height, rs2_extension frame_type, rs2_error** error) BEGIN_API_CALL +{ + VALIDATE_NOT_NULL(source); + VALIDATE_NOT_NULL(original); + VALIDATE_NOT_NULL(new_stream); + + auto recovered_profile = std::dynamic_pointer_cast(new_stream->profile->shared_from_this()); + + return (rs2_frame*)source->source->allocate_motion_frame(recovered_profile, + (frame_interface*)original, new_width, new_height, frame_type); +} +HANDLE_EXCEPTIONS_AND_RETURN(nullptr, source, new_stream, original, new_width, new_height, frame_type) + rs2_frame* rs2_allocate_points(rs2_source* source, const rs2_stream_profile* new_stream, rs2_frame* original, rs2_error** error) BEGIN_API_CALL { VALIDATE_NOT_NULL(source); @@ -1884,7 +1897,7 @@ NOARGS_HANDLE_EXCEPTIONS_AND_RETURN(nullptr) rs2_processing_block* rs2_create_yuy_decoder(rs2_error** error) BEGIN_API_CALL { - return new rs2_processing_block { std::make_shared() }; + return new rs2_processing_block { std::make_shared(RS2_FORMAT_RGB8) }; } NOARGS_HANDLE_EXCEPTIONS_AND_RETURN(nullptr) diff --git a/src/sensor.cpp b/src/sensor.cpp index f4ae83e8d6..7b4178a94d 100644 --- a/src/sensor.cpp +++ b/src/sensor.cpp @@ -1,29 +1,34 @@ // License: Apache 2.0. See LICENSE file in root directory. // Copyright(c) 2015 Intel Corporation. All Rights Reserved. + +#include "sensor.h" + #include #include #include - -#include "source.h" -#include "proc/synthetic-stream.h" #include +#include "source.h" #include "device.h" #include "stream.h" -#include "sensor.h" +#include "metadata.h" +#include "proc/synthetic-stream.h" #include "proc/decimation-filter.h" +#include "proc/identity-processing-block.h" #include "global_timestamp_reader.h" -#include "metadata.h" namespace librealsense { + ////////////////////////////////////////////////////// + /////////////////// Sensor Base ////////////////////// + ////////////////////////////////////////////////////// + sensor_base::sensor_base(std::string name, device* dev, recommended_proccesing_blocks_interface* owner) : recommended_proccesing_blocks_base(owner), _is_streaming(false), _is_opened(false), _notifications_processor(std::shared_ptr(new notifications_processor())), - _on_before_frame_callback(nullptr), _metadata_parsers(std::make_shared()), _on_open(nullptr), _owner(dev), @@ -60,7 +65,7 @@ namespace librealsense { if (supports_option(RS2_OPTION_ERROR_POLLING_ENABLED)) { - auto& opt = get_option(RS2_OPTION_ERROR_POLLING_ENABLED); + auto&& opt = get_option(RS2_OPTION_ERROR_POLLING_ENABLED); opt.set(1.0f); } _notifications_processor->set_callback(std::move(callback)); @@ -95,172 +100,50 @@ namespace librealsense { return _source.set_callback(callback); } - std::shared_ptr sensor_base::get_notifications_processor() + + bool sensor_base::is_streaming() const { - return _notifications_processor; + return _is_streaming; } - void sensor_base::raise_on_before_streaming_changes(bool streaming) + std::shared_ptr sensor_base::get_notifications_processor() const { - on_before_streaming_changes(streaming); + return _notifications_processor; } - void sensor_base::set_active_streams(const stream_profiles& requests) + + void sensor_base::register_metadata(rs2_frame_metadata_value metadata, std::shared_ptr metadata_parser) const { - _active_profiles = requests; + if (_metadata_parsers.get()->end() != _metadata_parsers.get()->find(metadata)) + throw invalid_value_exception(to_string() << "Metadata attribute parser for " << rs2_frame_metadata_to_string(metadata) + << " is already defined"); + + _metadata_parsers.get()->insert(std::pair>(metadata, metadata_parser)); } - bool sensor_base::try_get_pf(const platform::stream_profile& p, native_pixel_format& result) const + std::shared_ptr>& sensor_base::get_fourcc_to_rs2_format_map() { - auto it = std::find_if(begin(_pixel_formats), end(_pixel_formats), - [&p](const native_pixel_format& pf) - { - return pf.fourcc == p.format; - }); - if (it != end(_pixel_formats)) - { - result = *it; - return true; - } - return false; + return _fourcc_to_rs2_format; } - void sensor_base::assign_stream(const std::shared_ptr& stream, std::shared_ptr target) const + std::shared_ptr>& sensor_base::get_fourcc_to_rs2_stream_map() { - environment::get_instance().get_extrinsics_graph().register_same_extrinsics(*stream, *target); - target->set_unique_id(stream->get_unique_id()); + return _fourcc_to_rs2_stream; } - std::vector sensor_base::resolve_requests(stream_profiles requests) + void sensor_base::raise_on_before_streaming_changes(bool streaming) { - // per requested profile, find all 4ccs that support that request. - std::map> legal_fourccs; - auto profiles = get_stream_profiles(); - for (auto&& r : requests) - { - auto sp = to_profile(r.get()); - for (auto&& mode : profiles) - { - if (auto backend_profile = dynamic_cast(mode.get())) - { - auto m = to_profile(mode.get()); - - if (m.fps == sp.fps && m.height == sp.height && m.width == sp.width) - legal_fourccs[sp.index].insert(backend_profile->get_backend_profile().format); // TODO: Stread ID??? - } - } - } - - //if you want more efficient data structure use std::unordered_set - //with well-defined hash function - std::set results; - while (!requests.empty() && !_pixel_formats.empty()) - { - auto max = 0; - size_t best_size = 0; - auto best_pf = &_pixel_formats.front(); - auto best_unpacker = &_pixel_formats.front().unpackers.front(); - platform::stream_profile uvc_profile{}; - for (auto&& pf : _pixel_formats) - { - // Speeds up algorithm by skipping obviously useless 4ccs - // if (std::none_of(begin(legal_fourccs), end(legal_fourccs), [&](const uint32_t fourcc) {return fourcc == pf.fourcc; })) continue; - - for (auto&& unpacker : pf.unpackers) - { - auto count = static_cast(std::count_if(begin(requests), end(requests), - [&pf, &legal_fourccs, &unpacker, this](const std::shared_ptr& r) - { - // only count if the 4cc can be unpacked into the relevant stream/format - // and also, the pixel format can be streamed in the requested dimensions/fps. - return unpacker.satisfies(to_profile(r.get()), pf.fourcc, _uvc_profiles) && legal_fourccs[r->get_stream_index()].count(pf.fourcc); - })); - - // Here we check if the current pixel format / unpacker combination is better than the current best. - // We judge on two criteria. A: how many of the requested streams can we supply? B: how many total streams do we open? - // Optimally, we want to find a combination that supplies all the requested streams, and no additional streams. - if ( - count > max // If the current combination supplies more streams, it is better. - || (count == max // Alternatively, if it supplies the same number of streams, - && unpacker.outputs.size() < best_size) // but this combination opens fewer total streams, it is also better - ) - { - max = count; - best_size = unpacker.outputs.size(); - best_pf = &pf; - best_unpacker = &unpacker; - } - } - } - - if (max == 0) break; - - requests.erase(std::remove_if(begin(requests), end(requests), - [best_unpacker, best_pf, &results, &legal_fourccs, this](const std::shared_ptr& r) - { - if (best_unpacker->satisfies(to_profile(r.get()), best_pf->fourcc, _uvc_profiles) && legal_fourccs[r->get_stream_index()].count(best_pf->fourcc)) - { - auto request = dynamic_cast(r.get()); - - request_mapping mapping; - mapping.unpacker = best_unpacker; - mapping.pf = best_pf; - auto uvc_profile = best_unpacker->get_uvc_profile(to_profile(r.get()), best_pf->fourcc, _uvc_profiles); - if (!request) { - mapping.profile = { 0, 0, r->get_framerate(), best_pf->fourcc }; - } - else - { - mapping.profile = { uvc_profile.width, uvc_profile.height, uvc_profile.fps, best_pf->fourcc }; - } - - results.insert(mapping); - - auto it = results.find(mapping); - if (it != results.end()) - { - it->original_requests.push_back(map_requests(r)); - } - - return true; - } - return false; - }), end(requests)); - } - - if (requests.empty()) return{ begin(results), end(results) }; - - throw invalid_value_exception("Subdevice unable to satisfy stream requests!"); + on_before_streaming_changes(streaming); } - - std::shared_ptr sensor_base::map_requests(std::shared_ptr request) + void sensor_base::set_active_streams(const stream_profiles& requests) { - stream_profiles results; - auto profiles = get_stream_profiles(); - - auto it = std::find_if(profiles.begin(), profiles.end(), [&](std::shared_ptr p) { - return to_profile(p.get()) == to_profile(request.get()); - }); - - if (it == profiles.end()) - throw invalid_value_exception("Subdevice could not map requests!"); - - return *it; + _active_profiles = requests; } - uvc_sensor::~uvc_sensor() + void sensor_base::assign_stream(const std::shared_ptr& stream, std::shared_ptr target) const { - try - { - if (_is_streaming) - uvc_sensor::stop(); - - if (_is_opened) - uvc_sensor::close(); - } - catch(...) - { - LOG_ERROR("An error has occurred while stop_streaming()!"); - } + environment::get_instance().get_extrinsics_graph().register_same_extrinsics(*stream, *target); + auto uid = stream->get_unique_id(); + target->set_unique_id(uid); } stream_profiles sensor_base::get_stream_profiles(int tag) const @@ -304,121 +187,65 @@ namespace librealsense return res; } - stream_profiles uvc_sensor::init_stream_profiles() + device_interface& sensor_base::get_device() { - std::unordered_set> results; - std::set unregistered_formats; - std::set supported_formats; - - power on(std::dynamic_pointer_cast(shared_from_this())); - if (_uvc_profiles.empty()){} - _uvc_profiles = _device->get_profiles(); + return *_owner; + } - for (auto&& p : _uvc_profiles) - { - supported_formats.insert(p.format); - native_pixel_format pf{}; - if (try_get_pf(p, pf)) - { - for (auto&& unpacker : pf.unpackers) - { - for (auto&& output : unpacker.outputs) - { - auto profile = std::make_shared(p); - auto res = output.stream_resolution({ p.width, p.height }); - profile->set_dims(res.width, res.height); - profile->set_stream_type(output.stream_desc.type); - profile->set_stream_index(output.stream_desc.index); - profile->set_format(output.format); - profile->set_framerate(p.fps); - results.insert(profile); - } - } - } - else - { - unregistered_formats.insert(p.format); - } - } + std::shared_ptr sensor_base::generate_frame_from_data(const platform::frame_object& fo, + frame_timestamp_reader* timestamp_reader, + const rs2_time_t& last_timestamp, + const unsigned long long& last_frame_number, + std::shared_ptr profile) + { + auto system_time = environment::get_instance().get_time_service()->get_time(); + auto fr = std::make_shared(); + byte* pix = (byte*)fo.pixels; + std::vector pixels(pix, pix + fo.frame_size); + fr->data = pixels; + fr->set_stream(profile); + + // generate additional data + frame_additional_data additional_data(0, + 0, + system_time, + static_cast(fo.metadata_size), + (const uint8_t*)fo.metadata, + fo.backend_time, + last_timestamp, + last_frame_number, + false); + fr->additional_data = additional_data; + + // update additional data + additional_data.timestamp = timestamp_reader->get_frame_timestamp(fr); + additional_data.last_frame_number = last_frame_number; + additional_data.frame_number = timestamp_reader->get_frame_counter(fr); + fr->additional_data = additional_data; + + return fr; + } + + ////////////////////////////////////////////////////// + /////////////////// UVC Sensor /////////////////////// + ////////////////////////////////////////////////////// - if (unregistered_formats.size()) + uvc_sensor::~uvc_sensor() + { + try { - std::stringstream ss; - ss << "Unregistered Media formats : [ "; - for (auto& elem : unregistered_formats) - { - uint32_t device_fourcc = reinterpret_cast&>(elem); - char fourcc[sizeof(device_fourcc) + 1]; - librealsense::copy(fourcc, &device_fourcc, sizeof(device_fourcc)); - fourcc[sizeof(device_fourcc)] = 0; - ss << fourcc << " "; - } + if (_is_streaming) + uvc_sensor::stop(); - ss << "]; Supported: [ "; - for (auto& elem : supported_formats) - { - uint32_t device_fourcc = reinterpret_cast&>(elem); - char fourcc[sizeof(device_fourcc) + 1]; - librealsense::copy(fourcc, &device_fourcc, sizeof(device_fourcc)); - fourcc[sizeof(device_fourcc)] = 0; - ss << fourcc << " "; - } - ss << "]"; - LOG_INFO(ss.str()); + if (_is_opened) + uvc_sensor::close(); } - - // Sort the results to make sure that the user will receive predictable deterministic output from the API - stream_profiles res{ begin(results), end(results) }; - std::sort(res.begin(), res.end(), [](const std::shared_ptr& ap, - const std::shared_ptr& bp) - { - auto a = to_profile(ap.get()); - auto b = to_profile(bp.get()); - - // stream == RS2_STREAM_COLOR && format == RS2_FORMAT_RGB8 element works around the fact that Y16 gets priority over RGB8 when both - // are available for pipeline stream resolution - auto at = std::make_tuple(a.stream, a.width, a.height, a.fps, a.stream == RS2_STREAM_COLOR && a.format == RS2_FORMAT_RGB8, a.format); - auto bt = std::make_tuple(b.stream, b.width, b.height, b.fps, b.stream == RS2_STREAM_COLOR && b.format == RS2_FORMAT_RGB8, b.format); - - return at > bt; - }); - - return res; - } - - rs2_extension uvc_sensor::stream_to_frame_types(rs2_stream stream) const - { - // TODO: explicitly return video_frame for relevant streams and default to an error? - switch (stream) + catch (...) { - case RS2_STREAM_DEPTH: return RS2_EXTENSION_DEPTH_FRAME; - default: return RS2_EXTENSION_VIDEO_FRAME; + LOG_ERROR("An error has occurred while stop_streaming()!"); } } - device_interface& sensor_base::get_device() - { - return *_owner; - } - - void sensor_base::register_pixel_format(native_pixel_format pf) - { - if (_pixel_formats.end() == std::find_if(_pixel_formats.begin(), _pixel_formats.end(), - [&pf](const native_pixel_format& cur) { return cur.fourcc == pf.fourcc; })) - _pixel_formats.push_back(pf); - else - throw invalid_value_exception(to_string() - << "Pixel format " << std::hex << std::setw(8) << std::setfill('0') << pf.fourcc - << " has been already registered with the sensor " << get_info(RS2_CAMERA_INFO_NAME)); - } - - void sensor_base::remove_pixel_format(native_pixel_format pf) - { - auto it = std::find_if(_pixel_formats.begin(), _pixel_formats.end(), [&pf](const native_pixel_format& cur) { return cur.fourcc == pf.fourcc; }); - if (it != _pixel_formats.end()) - _pixel_formats.erase(it); - } - void uvc_sensor::open(const stream_profiles& requests) { std::lock_guard lock(_configure_lock); @@ -430,133 +257,82 @@ namespace librealsense auto on = std::unique_ptr(new power(std::dynamic_pointer_cast(shared_from_this()))); _source.init(_metadata_parsers); - _source.set_sensor(this->shared_from_this()); - auto mapping = resolve_requests(requests); - - auto timestamp_reader = _timestamp_reader.get(); + _source.set_sensor(_source_owner); std::vector commited; - for (auto&& mode : mapping) + for (auto&& req_profile : requests) { + auto&& req_profile_base = std::dynamic_pointer_cast(req_profile); try { unsigned long long last_frame_number = 0; rs2_time_t last_timestamp = 0; - _device->probe_and_commit(mode.profile, - [this, mode, timestamp_reader, requests, last_frame_number, last_timestamp](platform::stream_profile p, platform::frame_object f, std::function continuation) mutable + _device->probe_and_commit(req_profile_base->get_backend_profile(), + [this, req_profile_base, req_profile, last_frame_number, last_timestamp](platform::stream_profile p, platform::frame_object f, std::function continuation) mutable { - auto system_time = environment::get_instance().get_time_service()->get_time(); + const auto&& system_time = environment::get_instance().get_time_service()->get_time(); + const auto&& fr = generate_frame_from_data(f, _timestamp_reader.get(), last_timestamp, last_frame_number, req_profile_base); + const auto&& requires_processing = true; // TODO - Ariel add option + const auto&& timestamp_domain = _timestamp_reader->get_frame_timestamp_domain(fr); + const auto&& bpp = get_image_bpp(req_profile_base->get_format()); + auto&& frame_counter = fr->additional_data.frame_number; + auto&& timestamp = fr->additional_data.timestamp; + if (!this->is_streaming()) { LOG_WARNING("Frame received with streaming inactive," - << librealsense::get_string(mode.unpacker->outputs.front().stream_desc.type) - << mode.unpacker->outputs.front().stream_desc.index - << ", Arrived," << std::fixed << f.backend_time << " " << system_time); + << librealsense::get_string(req_profile_base->get_stream_type()) + << req_profile_base->get_stream_index() + << ", Arrived," << std::fixed << f.backend_time << " " << system_time); return; } frame_continuation release_and_enqueue(continuation, f.pixels); - - // Ignore any frames which appear corrupted or invalid - // Determine the timestamp for this frame - auto timestamp = timestamp_reader->get_frame_timestamp(mode, f); - auto timestamp_domain = timestamp_reader->get_frame_timestamp_domain(mode, f); - auto frame_counter = timestamp_reader->get_frame_counter(mode, f); - - auto requires_processing = mode.requires_processing(); - - std::vector dest; - std::vector refs; - - auto&& unpacker = *mode.unpacker; - for (auto&& output : unpacker.outputs) - { - LOG_DEBUG("FrameAccepted," << librealsense::get_string(output.stream_desc.type) - << ",Counter," << std::dec << frame_counter - << ",Index," << output.stream_desc.index + + LOG_DEBUG("FrameAccepted," << librealsense::get_string(req_profile_base->get_stream_type()) + << ",Counter," << std::dec << fr->additional_data.frame_number + << ",Index," << req_profile_base->get_stream_index() << ",BackEndTS," << std::fixed << f.backend_time << ",SystemTime," << std::fixed << system_time - <<" ,diff_ts[Sys-BE],"<< system_time- f.backend_time + << " ,diff_ts[Sys-BE]," << system_time - f.backend_time << ",TS," << std::fixed << timestamp << ",TS_Domain," << rs2_timestamp_domain_to_string(timestamp_domain) - <<",last_frame_number,"<< last_frame_number<<",last_timestamp,"<< last_timestamp); + << ",last_frame_number," << last_frame_number << ",last_timestamp," << last_timestamp); - std::shared_ptr request = nullptr; - for (auto&& original_prof : mode.original_requests) - { - if (original_prof->get_format() == output.format && - original_prof->get_stream_type() == output.stream_desc.type && - original_prof->get_stream_index() == output.stream_desc.index) - { - request = original_prof; - } - } + last_frame_number = frame_counter; + last_timestamp = timestamp; - auto bpp = get_image_bpp(output.format); - frame_additional_data additional_data(timestamp, - frame_counter, - system_time, - static_cast(f.metadata_size), - (const uint8_t*)f.metadata, - f.backend_time, - last_timestamp, - last_frame_number, - false); - - last_frame_number = frame_counter; - last_timestamp = timestamp; - - auto res = output.stream_resolution({ mode.profile.width, mode.profile.height }); - auto width = res.width; - auto height = res.height; - - frame_holder frame = _source.alloc_frame(stream_to_frame_types(output.stream_desc.type), width * height * bpp / 8, additional_data, requires_processing); - if (frame.frame) - { - auto video = (video_frame*)frame.frame; - video->assign(width, height, width * bpp / 8, bpp); - video->set_timestamp_domain(timestamp_domain); - dest.push_back(const_cast(video->get_frame_data())); - frame->set_stream(request); - refs.push_back(std::move(frame)); - } - else - { - LOG_INFO("Dropped frame. alloc_frame(...) returned nullptr"); - return; - } + const auto&& vsp = As(req_profile); + int width = vsp ? vsp->get_width() : 0; + int height = vsp ? vsp->get_height() : 0; - // Obtain buffers for unpacking the frame - //dest.push_back(archive->alloc_frame(output.first, additional_data, requires_processing)); + frame_holder fh = _source.alloc_frame(stream_to_frame_types(req_profile_base->get_stream_type()), width * height * bpp / 8, fr->additional_data, requires_processing); + if (fh.frame) + { + memcpy((void*)fh->get_frame_data(), fr->data.data(), sizeof(byte)*fr->data.size()); + auto&& video = (video_frame*)fh.frame; + video->assign(width, height, width * bpp / 8, bpp); + video->set_timestamp_domain(timestamp_domain); + fh->set_stream(req_profile_base); } - - // Unpack the frame - if (requires_processing && (dest.size() > 0)) + else { - unpacker.unpack(dest.data(), reinterpret_cast(f.pixels), mode.profile.width, mode.profile.height, f.frame_size); + LOG_INFO("Dropped frame. alloc_frame(...) returned nullptr"); + return; } - // If any frame callbacks were specified, dispatch them now - for (auto&& pref : refs) + if (!requires_processing) { - if (!requires_processing) - { - pref->attach_continuation(std::move(release_and_enqueue)); - } - - if (_on_before_frame_callback) - { - auto callback = _source.begin_callback(); - auto stream_type = pref->get_stream()->get_stream_type(); - _on_before_frame_callback(stream_type, pref, std::move(callback)); - } + fh->attach_continuation(std::move(release_and_enqueue)); + } - if (pref->get_stream().get()) - _source.invoke_callback(std::move(pref)); + if (fh->get_stream().get()) + { + _source.invoke_callback(std::move(fh)); } }); } - catch(...) + catch (...) { for (auto&& commited_profile : commited) { @@ -564,7 +340,7 @@ namespace librealsense } throw; } - commited.push_back(mode.profile); + commited.push_back(req_profile_base->get_backend_profile()); } _internal_config = commited; @@ -583,7 +359,7 @@ namespace librealsense } catch (...) { - for (auto& profile : _internal_config) + for (auto&& profile : _internal_config) { try { _device->close(profile); @@ -610,7 +386,7 @@ namespace librealsense else if (!_is_opened) throw wrong_api_call_sequence_exception("close() failed. UVC device was not opened!"); - for (auto& profile : _internal_config) + for (auto&& profile : _internal_config) { try // Handle disconnect event { @@ -641,9 +417,9 @@ namespace librealsense else if(!_is_opened) throw wrong_api_call_sequence_exception("start_streaming(...) failed. UVC device was not opened!"); + raise_on_before_streaming_changes(true); //Required to be just before actual start allow recording to work _source.set_callback(callback); _is_streaming = true; - raise_on_before_streaming_changes(true); //Required to be just before actual start allow recording to work _device->start_callbacks(); } @@ -658,7 +434,6 @@ namespace librealsense raise_on_before_streaming_changes(false); } - void uvc_sensor::reset_streaming() { _source.flush(); @@ -666,13 +441,39 @@ namespace librealsense _timestamp_reader->reset(); } + rs2_format uvc_sensor::fourcc_to_rs2_format(uint32_t fourcc_format) const + { + rs2_format f = RS2_FORMAT_ANY; + try { + f = _fourcc_to_rs2_format->at(fourcc_format); + } + catch (std::out_of_range) + { + } + + return f; + } + + rs2_stream uvc_sensor::fourcc_to_rs2_stream(uint32_t fourcc_format) const + { + rs2_stream s = RS2_STREAM_ANY; + try { + s = _fourcc_to_rs2_stream->at(fourcc_format); + } + catch (std::out_of_range) + { + } + + return s; + } + void uvc_sensor::acquire_power() { std::lock_guard lock(_power_lock); if (_user_count.fetch_add(1) == 0) { _device->set_power_state(platform::D0); - for (auto& xu : _xus) _device->init_xu(xu); + for (auto&& xu : _xus) _device->init_xu(xu); } } @@ -685,9 +486,46 @@ namespace librealsense } } + stream_profiles uvc_sensor::init_stream_profiles() + { + std::unordered_set> profiles; + power on(std::dynamic_pointer_cast(shared_from_this())); + + if (_uvc_profiles.empty()) {} + _uvc_profiles = _device->get_profiles(); + + for (auto&& p : _uvc_profiles) + { + const auto&& rs2_fmt = fourcc_to_rs2_format(p.format); + if (rs2_fmt == RS2_FORMAT_ANY) + continue; + + auto&& profile = std::make_shared(p); + profile->set_dims(p.width, p.height); + profile->set_stream_type(fourcc_to_rs2_stream(p.format)); + profile->set_stream_index(0); + profile->set_format(rs2_fmt); + profile->set_framerate(p.fps); + profiles.insert(profile); + } + + stream_profiles result{ profiles.begin(), profiles.end() }; + return result; + } + + rs2_extension uvc_sensor::stream_to_frame_types(rs2_stream stream) const + { + // TODO: explicitly return video_frame for relevant streams and default to an error? + switch (stream) + { + case RS2_STREAM_DEPTH: return RS2_EXTENSION_DEPTH_FRAME; + default: return RS2_EXTENSION_VIDEO_FRAME; + } + } + bool info_container::supports_info(rs2_camera_info info) const { - auto it = _camera_info.find(info); + const auto&& it = _camera_info.find(info); return it != _camera_info.end(); } @@ -712,7 +550,7 @@ namespace librealsense } const std::string& info_container::get_info(rs2_camera_info info) const { - auto it = _camera_info.find(info); + const auto&& it = _camera_info.find(info); if (it == _camera_info.end()) throw invalid_value_exception("Selected camera info is not supported for this camera!"); @@ -729,7 +567,7 @@ namespace librealsense void info_container::update(std::shared_ptr ext) { - if (auto info_api = As(ext)) + if (auto&& info_api = As(ext)) { for (int i = 0; i < RS2_CAMERA_INFO_COUNT; ++i) { @@ -767,21 +605,16 @@ namespace librealsense } } - void sensor_base::register_metadata(rs2_frame_metadata_value metadata, std::shared_ptr metadata_parser) const - { - if (_metadata_parsers.get()->end() != _metadata_parsers.get()->find(metadata)) - throw invalid_value_exception( to_string() << "Metadata attribute parser for " << rs2_frame_metadata_to_string(metadata) - << " is already defined"); - - _metadata_parsers.get()->insert(std::pair>(metadata, metadata_parser)); - } + ////////////////////////////////////////////////////// + /////////////////// HID Sensor /////////////////////// + ////////////////////////////////////////////////////// hid_sensor::hid_sensor(std::shared_ptr hid_device, std::unique_ptr hid_iio_timestamp_reader, std::unique_ptr custom_hid_timestamp_reader, std::map> fps_and_sampling_frequency_per_rs2_stream, std::vector> sensor_name_and_hid_profiles, device* dev) - : sensor_base("Motion Module", dev, (recommended_proccesing_blocks_interface*)this), _sensor_name_and_hid_profiles(sensor_name_and_hid_profiles), + : sensor_base("Raw Motion Module", dev, (recommended_proccesing_blocks_interface*)this), _sensor_name_and_hid_profiles(sensor_name_and_hid_profiles), _fps_and_sampling_frequency_per_rs2_stream(fps_and_sampling_frequency_per_rs2_stream), _hid_device(hid_device), _is_configured_stream(RS2_STREAM_COUNT), @@ -790,17 +623,16 @@ namespace librealsense { register_metadata(RS2_FRAME_METADATA_BACKEND_TIMESTAMP, make_additional_data_parser(&frame_additional_data::backend_timestamp)); - std::map frequency_per_sensor; - for (auto& elem : sensor_name_and_hid_profiles) + for (auto&& elem : sensor_name_and_hid_profiles) frequency_per_sensor.insert(make_pair(elem.first, elem.second.fps)); std::vector profiles_vector; - for (auto& elem : frequency_per_sensor) + for (auto&& elem : frequency_per_sensor) profiles_vector.push_back(platform::hid_profile{elem.first, elem.second}); _hid_device->register_profiles(profiles_vector); - for (auto& elem : _hid_device->get_sensors()) + for (auto&& elem : _hid_device->get_sensors()) _hid_sensors.push_back(elem); } @@ -823,11 +655,11 @@ namespace librealsense stream_profiles hid_sensor::get_sensor_profiles(std::string sensor_name) const { stream_profiles profiles{}; - for (auto& elem : _sensor_name_and_hid_profiles) + for (auto&& elem : _sensor_name_and_hid_profiles) { if (!elem.first.compare(sensor_name)) { - auto p = elem.second; + auto&& p = elem.second; platform::stream_profile sp{ 1, 1, p.fps, stream_to_fourcc(p.stream) }; auto profile = std::make_shared(sp); profile->set_stream_index(p.index); @@ -849,37 +681,14 @@ namespace librealsense else if (_is_opened) throw wrong_api_call_sequence_exception("Hid device is already opened!"); - auto mapping = resolve_requests(requests); - for (auto& request : requests) - { - auto sensor_name = rs2_stream_to_sensor_name(request->get_stream_type()); - for (auto& map : mapping) - { - auto it = std::find_if(begin(map.unpacker->outputs), end(map.unpacker->outputs), - [&](const stream_output& pair) - { - return pair.stream_desc.type == request->get_stream_type() && - pair.stream_desc.index == request->get_stream_index(); - }); - - if (it != end(map.unpacker->outputs)) - { - _configured_profiles.insert(std::make_pair(sensor_name, - stream_profile{ request->get_stream_type(), request->get_stream_index(), - 0, - 0, - fps_to_sampling_frequency(request->get_stream_type(), request->get_framerate()), - request->get_format()})); - _is_configured_stream[request->get_stream_type()] = true; - _hid_mapping.insert(std::make_pair(sensor_name, map)); - } - } - } - std::vector configured_hid_profiles; - for (auto& elem : _configured_profiles) + for (auto&& request : requests) { - configured_hid_profiles.push_back(platform::hid_profile{elem.first, elem.second.fps}); + auto&& sensor_name = rs2_stream_to_sensor_name(request->get_stream_type()); + _configured_profiles.insert(std::make_pair(sensor_name, request)); + _is_configured_stream[request->get_stream_type()] = true; + configured_hid_profiles.push_back(platform::hid_profile{ sensor_name, + fps_to_sampling_frequency(request->get_stream_type(), request->get_framerate()) }); } _hid_device->open(configured_hid_profiles); if (Is(_owner)) @@ -902,7 +711,6 @@ namespace librealsense _configured_profiles.clear(); _is_configured_stream.clear(); _is_configured_stream.resize(RS2_STREAM_COUNT); - _hid_mapping.clear(); _is_opened = false; if (Is(_owner)) { @@ -932,20 +740,20 @@ namespace librealsense throw wrong_api_call_sequence_exception("start_streaming(...) failed. Hid device was not opened!"); _source.set_callback(callback); - _source.init(_metadata_parsers); - _source.set_sensor(this->shared_from_this()); + _source.set_sensor(_source_owner); + unsigned long long last_frame_number = 0; rs2_time_t last_timestamp = 0; raise_on_before_streaming_changes(true); //Required to be just before actual start allow recording to work - _hid_device->start_capture([this,last_frame_number,last_timestamp](const platform::sensor_data& sensor_data) mutable + + _hid_device->start_capture([this, last_frame_number, last_timestamp](const platform::sensor_data& sensor_data) mutable { - auto system_time = environment::get_instance().get_time_service()->get_time(); + const auto&& system_time = environment::get_instance().get_time_service()->get_time(); auto timestamp_reader = _hid_iio_timestamp_reader.get(); - - // TODO: static const std::string custom_sensor_name = "custom"; - auto sensor_name = sensor_data.sensor.name; + auto&& sensor_name = sensor_data.sensor.name; + auto&& request = _configured_profiles[sensor_name]; bool is_custom_sensor = false; static const uint32_t custom_source_id_offset = 16; uint8_t custom_gpio = 0; @@ -967,67 +775,40 @@ namespace librealsense if (!this->is_streaming()) { + auto stream_type = request->get_stream_type(); LOG_INFO("HID Frame received when Streaming is not active," - << get_string(_configured_profiles[sensor_name].stream) - << ",Arrived," << std::fixed << system_time); + << get_string(stream_type) + << ",Arrived," << std::fixed << system_time); return; } - auto mode = _hid_mapping[sensor_name]; - auto request = *(mode.original_requests.begin()); - auto data_size = sensor_data.fo.frame_size; - mode.profile.width = (uint32_t)data_size; - mode.profile.height = 1; - - // Determine the timestamp for this HID frame - auto timestamp = timestamp_reader->get_frame_timestamp(mode, sensor_data.fo); - auto frame_counter = timestamp_reader->get_frame_counter(mode, sensor_data.fo); - auto ts_domain = timestamp_reader->get_frame_timestamp_domain(mode, sensor_data.fo); - - frame_additional_data additional_data(timestamp, - frame_counter, - system_time, - static_cast(sensor_data.fo.metadata_size), - (const uint8_t*)sensor_data.fo.metadata, - sensor_data.fo.backend_time, - last_timestamp, - last_frame_number, - false); - - additional_data.timestamp_domain = ts_domain; - additional_data.backend_timestamp = sensor_data.fo.backend_time; + const auto&& fr = generate_frame_from_data(sensor_data.fo, timestamp_reader, last_timestamp, last_frame_number, request); + auto&& frame_counter = fr->additional_data.frame_number; + const auto&& timestamp_domain = timestamp_reader->get_frame_timestamp_domain(fr); + auto&& timestamp = fr->additional_data.timestamp; + const auto&& bpp = get_image_bpp(request->get_format()); + auto&& data_size = sensor_data.fo.frame_size; LOG_DEBUG("FrameAccepted," << get_string(request->get_stream_type()) - << ",Counter," << std::dec << frame_counter << ",Index,0" - << ",BackEndTS," << std::fixed << sensor_data.fo.backend_time - << ",SystemTime," << std::fixed << system_time - <<" ,diff_ts[Sys-BE],"<< system_time- sensor_data.fo.backend_time - << ",TS," << std::fixed << timestamp << ",TS_Domain," << rs2_timestamp_domain_to_string(ts_domain) - <<",last_frame_number,"<< last_frame_number<<",last_timestamp,"<< last_timestamp); + << ",Counter," << std::dec << frame_counter << ",Index,0" + << ",BackEndTS," << std::fixed << sensor_data.fo.backend_time + << ",SystemTime," << std::fixed << system_time + << " ,diff_ts[Sys-BE]," << system_time - sensor_data.fo.backend_time + << ",TS," << std::fixed << timestamp << ",TS_Domain," << rs2_timestamp_domain_to_string(timestamp_domain) + << ",last_frame_number," << last_frame_number << ",last_timestamp," << last_timestamp); last_frame_number = frame_counter; last_timestamp = timestamp; - auto frame = _source.alloc_frame(RS2_EXTENSION_MOTION_FRAME, data_size, additional_data, true); + frame_holder frame = _source.alloc_frame(RS2_EXTENSION_MOTION_FRAME, data_size, fr->additional_data, true); + memcpy((void*)frame->get_frame_data(), fr->data.data(), sizeof(byte)*fr->data.size()); if (!frame) { LOG_INFO("Dropped frame. alloc_frame(...) returned nullptr"); return; } frame->set_stream(request); - - std::vector dest{const_cast(frame->get_frame_data())}; - mode.unpacker->unpack(dest.data(),(const byte*)sensor_data.fo.pixels, mode.profile.width, mode.profile.height, data_size); - - if (_on_before_frame_callback) - { - auto callback = _source.begin_callback(); - auto stream_type = frame->get_stream()->get_stream_type(); - _on_before_frame_callback(stream_type, frame, std::move(callback)); - } - _source.invoke_callback(std::move(frame)); }); - _is_streaming = true; } @@ -1056,7 +837,7 @@ namespace librealsense stream_profiles hid_sensor::init_stream_profiles() { stream_profiles stream_requests; - for (auto it = _hid_sensors.rbegin(); it != _hid_sensors.rend(); ++it) + for (auto&& it = _hid_sensors.rbegin(); it != _hid_sensors.rend(); ++it) { auto profiles = get_sensor_profiles(it->name); stream_requests.insert(stream_requests.end(), profiles.begin() ,profiles.end()); @@ -1067,7 +848,7 @@ namespace librealsense const std::string& hid_sensor::rs2_stream_to_sensor_name(rs2_stream stream) const { - for (auto& elem : _sensor_name_and_hid_profiles) + for (auto&& elem : _sensor_name_and_hid_profiles) { if (stream == elem.second.stream) return elem.first; @@ -1103,7 +884,10 @@ namespace librealsense return fps; } - uvc_sensor::uvc_sensor(std::string name, std::shared_ptr uvc_device, std::unique_ptr timestamp_reader, device* dev) + uvc_sensor::uvc_sensor(std::string name, + std::shared_ptr uvc_device, + std::unique_ptr timestamp_reader, + device* dev) : sensor_base(name, dev, (recommended_proccesing_blocks_interface*)this), _device(move(uvc_device)), _user_count(0), @@ -1128,11 +912,12 @@ namespace librealsense } } - rs2_time_t iio_hid_timestamp_reader::get_frame_timestamp(const request_mapping& mode, const platform::frame_object& fo) + rs2_time_t iio_hid_timestamp_reader::get_frame_timestamp(std::shared_ptr frame) { std::lock_guard lock(_mtx); - if (has_metadata(mode, fo.metadata, fo.metadata_size)) + auto f = std::dynamic_pointer_cast(frame); + if (has_metadata(frame)) { // The timestamps conversions path comprise of: // FW TS (32bit) -> USB Phy Layer (no changes) -> Host Driver TS (Extend to 64bit) -> LRS read as 64 bit @@ -1140,11 +925,12 @@ namespace librealsense // In order to allow for hw timestamp-based synchronization of Depth and IMU streams the latter will be trimmed to 32 bit. // To revert to the extended 64 bit TS uncomment the next line instead //auto timestamp = *((uint64_t*)((const uint8_t*)fo.metadata)); - // The ternary operator is replaced by explicit assignment due to an issue with GCC for RaspberryPi that causes segfauls in optimized build. - auto timestamp = *(reinterpret_cast(const_cast(fo.metadata))); - if (fo.metadata_size >= platform::hid_header_size) - timestamp = static_cast(reinterpret_cast(fo.metadata)->timestamp); + // The ternary operator is replaced by explicit assignment due to an issue with GCC for RaspberryPi that causes segfauls in optimized build. + auto timestamp = *(reinterpret_cast(f->additional_data.metadata_blob.data())); + if (f->additional_data.metadata_size >= platform::hid_header_size) + timestamp = static_cast(reinterpret_cast(f->additional_data.metadata_blob.data())->timestamp); + // HID timestamps are aligned to FW Default - usec units return static_cast(timestamp * TIMESTAMP_USEC_TO_MSEC); } @@ -1158,32 +944,536 @@ namespace librealsense return std::chrono::duration(std::chrono::system_clock::now().time_since_epoch()).count(); } - bool iio_hid_timestamp_reader::has_metadata(const request_mapping& mode, const void * metadata, size_t metadata_size) const + bool iio_hid_timestamp_reader::has_metadata(std::shared_ptr frame) const { - if (metadata != nullptr && metadata_size > 0) + auto f = std::dynamic_pointer_cast(frame); + + if (f->additional_data.metadata_size > 0) { return true; } return false; } - unsigned long long iio_hid_timestamp_reader::get_frame_counter(const request_mapping & mode, const platform::frame_object& fo) const + unsigned long long iio_hid_timestamp_reader::get_frame_counter(std::shared_ptr frame) const { std::lock_guard lock(_mtx); - if (nullptr == mode.pf) return 0; // Windows support is limited + int index = 0; - if (mode.pf->fourcc == rs_fourcc('G','Y','R','O')) + if (frame->get_stream()->get_stream_type() == RS2_STREAM_GYRO) index = 1; return ++counter[index]; } - rs2_timestamp_domain iio_hid_timestamp_reader::get_frame_timestamp_domain(const request_mapping & mode, const platform::frame_object& fo) const + rs2_timestamp_domain iio_hid_timestamp_reader::get_frame_timestamp_domain(std::shared_ptr frame) const { - if (has_metadata(mode, fo.metadata, fo.metadata_size)) + if (has_metadata(frame)) { return RS2_TIMESTAMP_DOMAIN_HARDWARE_CLOCK; } return RS2_TIMESTAMP_DOMAIN_SYSTEM_TIME; } + + ////////////////////////////////////////////////////// + ///////////////// Synthetic Sensor /////////////////// + ////////////////////////////////////////////////////// + + synthetic_sensor::synthetic_sensor(std::string name, + std::shared_ptr sensor, + device* device, + const std::map& fourcc_to_rs2_format_map, + const std::map& fourcc_to_rs2_stream_map) + : sensor_base(name, device, (recommended_proccesing_blocks_interface*)this), _raw_sensor(std::move(sensor)) + { + // synthetic sensor and its raw sensor will share the formats and streams mapping + auto& raw_fourcc_to_rs2_format_map = _raw_sensor->get_fourcc_to_rs2_format_map(); + _fourcc_to_rs2_format = std::make_shared>(fourcc_to_rs2_format_map); + raw_fourcc_to_rs2_format_map = _fourcc_to_rs2_format; + + auto& raw_fourcc_to_rs2_stream_map = _raw_sensor->get_fourcc_to_rs2_stream_map(); + _fourcc_to_rs2_stream = std::make_shared>(fourcc_to_rs2_stream_map); + raw_fourcc_to_rs2_stream_map = _fourcc_to_rs2_stream; + } + + synthetic_sensor::~synthetic_sensor() + {} + + void synthetic_sensor::register_option(rs2_option id, std::shared_ptr