diff --git a/core/catkin_ws/src/custom_msgs/CMakeLists.txt b/core/catkin_ws/src/custom_msgs/CMakeLists.txt index 14e04df00..98afd04a6 100644 --- a/core/catkin_ws/src/custom_msgs/CMakeLists.txt +++ b/core/catkin_ws/src/custom_msgs/CMakeLists.txt @@ -22,6 +22,7 @@ add_message_files( PIDInfo.msg PIDTerms.msg PWMAllocs.msg + RectInfo.msg RemoteLaunchInfo.msg RunningNode.msg ServoAngleArray.msg @@ -30,6 +31,7 @@ add_message_files( SonarSweepRequest.msg SonarSweepResponse.msg SystemUsage.msg + TaskUpdate.msg ThrusterAllocs.msg ) diff --git a/core/catkin_ws/src/custom_msgs/msg/RectInfo.msg b/core/catkin_ws/src/custom_msgs/msg/RectInfo.msg new file mode 100644 index 000000000..f856e311a --- /dev/null +++ b/core/catkin_ws/src/custom_msgs/msg/RectInfo.msg @@ -0,0 +1,5 @@ +float64 center_x +float64 center_y +float64 width +float64 height +float64 angle \ No newline at end of file diff --git a/core/catkin_ws/src/custom_msgs/msg/TaskUpdate.msg b/core/catkin_ws/src/custom_msgs/msg/TaskUpdate.msg new file mode 100644 index 000000000..03f9648a2 --- /dev/null +++ b/core/catkin_ws/src/custom_msgs/msg/TaskUpdate.msg @@ -0,0 +1,19 @@ +std_msgs/Header header + +uint64 id + +uint64 parent_id + +string name + +uint8 INITIALIZED = 0 +uint8 PAUSED = 1 +uint8 RESUMED = 2 +uint8 THREW = 3 +uint8 RETURNED = 4 +uint8 CLOSED = 5 +uint8 DELETED = 6 +uint8 ERRORED = 7 +uint8 status + +string data \ No newline at end of file diff --git a/foxglove/package-lock.json b/foxglove/package-lock.json index 248c3a566..74a5a2d5b 100644 --- a/foxglove/package-lock.json +++ b/foxglove/package-lock.json @@ -94,13 +94,11 @@ }, "extensions/thruster-allocs-panel/node_modules/argparse": { "version": "2.0.1", - "resolved": "https://registry.npmjs.org/argparse/-/argparse-2.0.1.tgz", - "integrity": "sha512-8+9WqebbFzpX9OR+Wa6O29asIogeRMzcGtAINdpMHHyAg10f05aSFVBbcEqGf/PXw1EjAZ+q2/bEBg3DvurK3Q==" + "license": "Python-2.0" }, "extensions/thruster-allocs-panel/node_modules/js-yaml": { "version": "4.1.0", - "resolved": "https://registry.npmjs.org/js-yaml/-/js-yaml-4.1.0.tgz", - "integrity": "sha512-wpxZs9NoxZaJESJGIZTyDEaYpl0FKSA+FB9aJiyemKhMwkxQg63h4T1KJgUGHpTqPDNRcmmYLugrRjJlBtWvRA==", + "license": "MIT", "dependencies": { "argparse": "^2.0.1" }, @@ -108,9 +106,6 @@ "js-yaml": "bin/js-yaml.js" } }, - "extensions/thruster-speeds-panel": { - "extraneous": true - }, "extensions/toggle-controls-panel": { "name": "@duke-robotics/toggle-controls-panel", "version": "0.0.0" @@ -3271,6 +3266,20 @@ "fsevents": "~2.3.2" } }, + "node_modules/chokidar/node_modules/fsevents": { + "version": "2.3.3", + "resolved": "https://registry.npmjs.org/fsevents/-/fsevents-2.3.3.tgz", + "integrity": "sha512-5xoDfX+fL7faATnagmWPpbFtwh/R77WmMMqqHGS65C3vvB0YHrgF+B1YmZ3441tMj5n63k0212XNoJwzlhffQw==", + "dev": true, + "hasInstallScript": true, + "optional": true, + "os": [ + "darwin" + ], + "engines": { + "node": "^8.16.0 || ^10.6.0 || >=11.0.0" + } + }, "node_modules/chrome-trace-event": { "version": "1.0.3", "dev": true, @@ -6548,18 +6557,6 @@ "dev": true, "license": "ISC" }, - "node_modules/fsevents": { - "version": "2.3.3", - "dev": true, - "license": "MIT", - "optional": true, - "os": [ - "darwin" - ], - "engines": { - "node": "^8.16.0 || ^10.6.0 || >=11.0.0" - } - }, "node_modules/function-bind": { "version": "1.1.2", "license": "MIT", diff --git a/onboard/catkin_ws/src/controls/config/oogway.yaml b/onboard/catkin_ws/src/controls/config/oogway.yaml index cb7d3d660..3936865e7 100644 --- a/onboard/catkin_ws/src/controls/config/oogway.yaml +++ b/onboard/catkin_ws/src/controls/config/oogway.yaml @@ -1,9 +1,9 @@ pid: position: x: - Kp: 4 + Kp: 1.8 Ki: 0 - Kd: 5 + Kd: 1.2 Ff: 0 control_effort: min: -1 @@ -11,9 +11,9 @@ pid: derivative_type: 1 error_ramp_rate: 1000 y: - Kp: 3.5 + Kp: 1.8 Ki: 0 - Kd: 5 + Kd: 1.2 Ff: 0 control_effort: min: -1 @@ -21,9 +21,9 @@ pid: derivative_type: 1 error_ramp_rate: 1000 z: - Kp: 1 + Kp: 1.9 Ki: 0 - Kd: 1.5 + Kd: 1.2 Ff: 0 control_effort: min: -1 @@ -31,9 +31,9 @@ pid: derivative_type: 1 error_ramp_rate: 1000 roll: - Kp: 0.7 + Kp: 0.2 Ki: 0 - Kd: 0.5 + Kd: 0.4 Ff: 0 control_effort: min: -1 @@ -41,9 +41,9 @@ pid: derivative_type: 1 error_ramp_rate: 1000 pitch: - Kp: 0.7 + Kp: 0.2 Ki: 0 - Kd: 0.5 + Kd: 0.4 Ff: 0 control_effort: min: -1 @@ -51,9 +51,9 @@ pid: derivative_type: 1 error_ramp_rate: 1000 yaw: - Kp: 0.5 + Kp: 0.2 Ki: 0 - Kd: 1 + Kd: 0.4 Ff: 0 control_effort: min: -1 @@ -204,7 +204,7 @@ desired_power_limits: static_power_global: x: 0 y: 0 - z: -0.19 + z: -0.1625 power_scale_factor: 1 thrusters: - name: bottom_front_left diff --git a/onboard/catkin_ws/src/cv/README.md b/onboard/catkin_ws/src/cv/README.md index d62109f42..3e57bb380 100644 --- a/onboard/catkin_ws/src/cv/README.md +++ b/onboard/catkin_ws/src/cv/README.md @@ -19,8 +19,9 @@ To stream the feed or perform spatial detection using the OAK camera, use `rosla `scripts/` * `depthai_camera_connect.py`: Connects to the OAK camera and uploads the image pipeline. Used by all other DepthAI scripts. * `depthai_publish_save_streams.py`: Publishes a preview of the image feed from the OAK camera and saves encoded streams. This can be used to verify connection to the camera and to check if there are any issues with the camera feed. -* `depthai_spatial_detection.py`: Publishes live spatial detections using a specified model in `depthai_models.yaml`. -* `depthai_simulate_detection.launch`: Publishes spatial detections using a specified model in `depthai_models.yaml` on a still image or image feed. +* `depthai_spatial_detection.py`: Publishes live detections using a specified model in `depthai_models.yaml`. +* `depthai_mono_detection.launch`: Publishes detections using a specified model in `depthai_models.yaml` on a mono camera image feed. +* `depthai_simulate_detection.launch`: Publishes detections using a specified model in `depthai_models.yaml` on a still image or image feed. Should only be used for DepthAI camera testing (not task planning). * `camera_hard_reset.py`: Used to reset a POE camera if it is not connecting properly. The script creates the topics `/offboard/camera_relay` (for sending commands to the camera relay) and `/offboard/camera_relay_status` (for checking the status of the physical relay). Running `rosservice call /enable_camera ` enables or disables the camera. Note: running `camera_hard_reset.py` requires serial.launch to have been run and this may power cycle the camera upon running. By default, the camera will be enabled unless the topic `/offboard/camera_relay` is set to `false`. See `onboard/catkin_ws/src/offboard_comms/README.md` for more information on the relay service. `launch/` diff --git a/onboard/catkin_ws/src/cv/assets/polyform-a0-buoy-contour.png b/onboard/catkin_ws/src/cv/assets/polyform-a0-buoy-contour.png new file mode 100644 index 000000000..dd744c60f Binary files /dev/null and b/onboard/catkin_ws/src/cv/assets/polyform-a0-buoy-contour.png differ diff --git a/onboard/catkin_ws/src/cv/assets/polyform-a0-buoy.png b/onboard/catkin_ws/src/cv/assets/polyform-a0-buoy.png new file mode 100644 index 000000000..ce900aee7 Binary files /dev/null and b/onboard/catkin_ws/src/cv/assets/polyform-a0-buoy.png differ diff --git a/onboard/catkin_ws/src/cv/configs/usb_cameras.yaml b/onboard/catkin_ws/src/cv/configs/usb_cameras.yaml new file mode 100644 index 000000000..5f2afb12d --- /dev/null +++ b/onboard/catkin_ws/src/cv/configs/usb_cameras.yaml @@ -0,0 +1,21 @@ +front: + device_path: "/dev/video_front" + topic: "front" + focal_length: 2.65 + sensor_size: + width: 3.054 + height: 1.718 + img_size: + width: 640 + height: 480 + +bottom: + device_path: "/dev/video_bottom" + topic: "bottom" + focal_length: 2.65 + sensor_size: + width: 3.054 + height: 1.718 + img_size: + width: 640 + height: 480 \ No newline at end of file diff --git a/onboard/catkin_ws/src/cv/launch/bin_detector.launch b/onboard/catkin_ws/src/cv/launch/bin_detector.launch new file mode 100755 index 000000000..41c56ccb2 --- /dev/null +++ b/onboard/catkin_ws/src/cv/launch/bin_detector.launch @@ -0,0 +1,3 @@ + + + \ No newline at end of file diff --git a/onboard/catkin_ws/src/cv/launch/blue_rectangle_detector.launch b/onboard/catkin_ws/src/cv/launch/blue_rectangle_detector.launch new file mode 100644 index 000000000..47e556c44 --- /dev/null +++ b/onboard/catkin_ws/src/cv/launch/blue_rectangle_detector.launch @@ -0,0 +1,3 @@ + + + diff --git a/onboard/catkin_ws/src/cv/launch/buoy_detector_contour_matching.launch b/onboard/catkin_ws/src/cv/launch/buoy_detector_contour_matching.launch new file mode 100644 index 000000000..b8952d373 --- /dev/null +++ b/onboard/catkin_ws/src/cv/launch/buoy_detector_contour_matching.launch @@ -0,0 +1,3 @@ + + + diff --git a/onboard/catkin_ws/src/cv/launch/depthai_mono_detection.launch b/onboard/catkin_ws/src/cv/launch/depthai_mono_detection.launch new file mode 100644 index 000000000..f8b7083b8 --- /dev/null +++ b/onboard/catkin_ws/src/cv/launch/depthai_mono_detection.launch @@ -0,0 +1,23 @@ + + + + + + + + + + + + + + + + + + + + + + diff --git a/onboard/catkin_ws/src/cv/launch/depthai_save_video.launch b/onboard/catkin_ws/src/cv/launch/depthai_save_video.launch deleted file mode 100644 index bfdebc00a..000000000 --- a/onboard/catkin_ws/src/cv/launch/depthai_save_video.launch +++ /dev/null @@ -1,10 +0,0 @@ - - - - - - - - - - diff --git a/onboard/catkin_ws/src/cv/launch/depthai_simulate_detection.launch b/onboard/catkin_ws/src/cv/launch/depthai_simulate_detection.launch index 6c91d3cff..f880eb0a9 100644 --- a/onboard/catkin_ws/src/cv/launch/depthai_simulate_detection.launch +++ b/onboard/catkin_ws/src/cv/launch/depthai_simulate_detection.launch @@ -1,14 +1,16 @@ - - + + + + diff --git a/onboard/catkin_ws/src/cv/launch/depthai_spatial_detection.launch b/onboard/catkin_ws/src/cv/launch/depthai_spatial_detection.launch index e8f2a19fd..91509e772 100644 --- a/onboard/catkin_ws/src/cv/launch/depthai_spatial_detection.launch +++ b/onboard/catkin_ws/src/cv/launch/depthai_spatial_detection.launch @@ -1,12 +1,13 @@ - + - + + @@ -16,5 +17,6 @@ + diff --git a/onboard/catkin_ws/src/cv/launch/path_marker_detector.launch b/onboard/catkin_ws/src/cv/launch/path_marker_detector.launch new file mode 100755 index 000000000..59de5721f --- /dev/null +++ b/onboard/catkin_ws/src/cv/launch/path_marker_detector.launch @@ -0,0 +1,3 @@ + + + \ No newline at end of file diff --git a/onboard/catkin_ws/src/cv/launch/pink_bins_detector.launch b/onboard/catkin_ws/src/cv/launch/pink_bins_detector.launch new file mode 100755 index 000000000..adac6e9ff --- /dev/null +++ b/onboard/catkin_ws/src/cv/launch/pink_bins_detector.launch @@ -0,0 +1,6 @@ + + + + + + \ No newline at end of file diff --git a/onboard/catkin_ws/src/cv/launch/usb_camera.launch b/onboard/catkin_ws/src/cv/launch/usb_camera.launch index d5c6a693f..634e58b99 100644 --- a/onboard/catkin_ws/src/cv/launch/usb_camera.launch +++ b/onboard/catkin_ws/src/cv/launch/usb_camera.launch @@ -1,11 +1,11 @@ - + - + - + diff --git a/onboard/catkin_ws/src/cv/launch/usb_camera_connect.launch b/onboard/catkin_ws/src/cv/launch/usb_camera_connect.launch new file mode 100755 index 000000000..db8682593 --- /dev/null +++ b/onboard/catkin_ws/src/cv/launch/usb_camera_connect.launch @@ -0,0 +1,3 @@ + + + diff --git a/onboard/catkin_ws/src/cv/models/2024_gate_glyphs.blob b/onboard/catkin_ws/src/cv/models/2024_gate_glyphs.blob new file mode 100644 index 000000000..e7dcbdcd4 Binary files /dev/null and b/onboard/catkin_ws/src/cv/models/2024_gate_glyphs.blob differ diff --git a/onboard/catkin_ws/src/cv/models/depthai_models.yaml b/onboard/catkin_ws/src/cv/models/depthai_models.yaml index 3e1db23d6..8a9cdbe88 100644 --- a/onboard/catkin_ws/src/cv/models/depthai_models.yaml +++ b/onboard/catkin_ws/src/cv/models/depthai_models.yaml @@ -44,4 +44,17 @@ yolov7_tiny_2023_main: anchor_masks: {"side52": [0,1,2], "side26": [3,4,5], "side13": [6,7,8]} iou_threshold: 0.5 confidence_threshold: 0.5 - colors: ["DC143C", "FFA500", "FFFF00", "00FF00", "000080", "8A2BE2"] \ No newline at end of file + colors: ["DC143C", "FFA500", "FFFF00", "00FF00", "000080", "8A2BE2"] + +2024_gate_glyphs: + classes: ["gate_blue_ccw", "gate_red_cw", "gate_tick", "gate_whole"] + sizes: {"gate_blue_ccw": [0.24, 0.3], "gate_red_cw": [0.24, 0.3], "gate_tick": [0.051, 0.61], "gate_whole": [3.048, 1.52]} + topic: cv/ + weights: 2024_gate_glyphs.blob + input_size: [416, 416] + coordinate_size: 4 + anchors: [12.0, 16.0, 19.0, 36.0, 40.0, 28.0, 36.0, 75.0, 76.0, 55.0, 72.0, 146.0, 142.0, 110.0, 192.0, 243.0, 459.0, 401.0] + anchor_masks: {"side52": [0,1,2], "side26": [3,4,5], "side13": [6,7,8]} + iou_threshold: 0.5 + confidence_threshold: 0.9 + colors: ["800000", "3C14DC", "00FFFF", "00A5FF"] diff --git a/onboard/catkin_ws/src/cv/scripts/bin_detector.py b/onboard/catkin_ws/src/cv/scripts/bin_detector.py new file mode 100755 index 000000000..d0cce0cb8 --- /dev/null +++ b/onboard/catkin_ws/src/cv/scripts/bin_detector.py @@ -0,0 +1,201 @@ +#!/usr/bin/env python + +import rospy +import cv2 +import numpy as np +from sensor_msgs.msg import CompressedImage, Image +from cv_bridge import CvBridge +from custom_msgs.msg import CVObject +from geometry_msgs.msg import Point +from utils import compute_yaw, calculate_relative_pose, compute_center_distance + + +class BinDetector: + BIN_WIDTH = 0.3048 # width of one square of the bin, in m + + MONO_CAM_IMG_SHAPE = (640, 480) # Width, height in pixels + MONO_CAM_SENSOR_SIZE = (3.054, 1.718) # Width, height in mm + MONO_CAM_FOCAL_LENGTH = 2.65 # Focal length in mm + + def __init__(self): + self.bridge = CvBridge() + # subscribe to image topic to get images + self.image_sub = rospy.Subscriber("/camera/usb/bottom/compressed", CompressedImage, self.image_callback) + + # blue bin publiishers + self.blue_bin_hsv_filtered_pub = rospy.Publisher("/cv/bottom/bin_blue/hsv_filtered", Image, queue_size=10) + self.blue_bin_contour_image_pub = rospy.Publisher("/cv/bottom/bin_blue/contour_image", Image, queue_size=10) + self.blue_bin_bounding_box_pub = rospy.Publisher("/cv/bottom/bin_blue/bounding_box", CVObject, queue_size=10) + self.blue_bin_distance_pub = rospy.Publisher("/cv/bottom/bin_blue/distance", Point, queue_size=10) + + # red bin publishers + self.red_bin_hsv_filtered_pub = rospy.Publisher("/cv/bottom/bin_red/hsv_filtered", Image, queue_size=10) + self.red_bin_contour_image_pub = rospy.Publisher("/cv/bottom/bin_red/contour_image", Image, queue_size=10) + self.red_bin_bounding_box_pub = rospy.Publisher("/cv/bottom/bin_red/bounding_box", CVObject, queue_size=10) + self.red_bin_distance_pub = rospy.Publisher("/cv/bottom/bin_red/distance", Point, queue_size=10) + + # centre bin publsihers (NOTE we don't actually publish to these heheheha) + self.bin_center_hsv_filtered_pub = rospy.Publisher("/cv/bottom/bin_center/hsv_filtered", Image, queue_size=10) + self.bin_center_contour_image_pub = rospy.Publisher("/cv/bottom/bin_center/contour_image", Image, queue_size=10) + self.bin_center_bounding_box_pub = rospy.Publisher("/cv/bottom/bin_center/bounding_box", CVObject, + queue_size=10) + self.bin_center_distance_pub = rospy.Publisher("/cv/bottom/bin_center/distance", Point, queue_size=10) + + def image_callback(self, data): + # Convert the compressed ROS image to OpenCV format + np_arr = np.frombuffer(data.data, np.uint8) + frame = cv2.imdecode(np_arr, cv2.IMREAD_COLOR) + frame = frame[:, :-20] + # Process the frame to find and publish information on the bin + self.process_frame(frame) + + def process_frame(self, frame): + # Convert frame to HSV color space + hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV) + + # Define range for blue color and create mask + lower_blue = np.array([90, 150, 50]) + upper_blue = np.array([125, 255, 255]) + mask_blue = cv2.inRange(hsv, lower_blue, upper_blue) + + blue_hsv_filtered_msg = self.bridge.cv2_to_imgmsg(mask_blue, "mono8") + self.blue_bin_hsv_filtered_pub.publish(blue_hsv_filtered_msg) + + # Define the range for HSV filtering on the red bin + lower_red_low = np.array([0, 110, 150]) + upper_red_low = np.array([12, 255, 255]) + lower_red_high = np.array([170, 50, 85]) + upper_red_high = np.array([179, 255, 255]) + + # Apply HSV filtering on the image + mask_red1 = cv2.inRange(hsv, lower_red_low, upper_red_low) + mask_red2 = cv2.inRange(hsv, lower_red_high, upper_red_high) + mask_red = cv2.bitwise_or(mask_red1, mask_red2) + + # convert cv2 image to img message, and publish the image + red_hsv_filtered_msg = self.bridge.cv2_to_imgmsg(mask_red, "mono8") + self.red_bin_hsv_filtered_pub.publish(red_hsv_filtered_msg) + + # Apply morphological operations to clean up the binary image + kernel = np.ones((5, 5), np.uint8) + mask_blue = cv2.morphologyEx(mask_blue, cv2.MORPH_OPEN, kernel) + + # Apply morphological operations to clean up the binary image + mask_red = cv2.morphologyEx(mask_red, cv2.MORPH_OPEN, kernel) + mask_red = cv2.morphologyEx(mask_red, cv2.MORPH_OPEN, kernel) + + # Find contours in the mask + contours_blue, _ = cv2.findContours(mask_blue, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE) + contours_red, _ = cv2.findContours(mask_red, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE) + + if contours_blue: + # takes largest contour + contours_blue = sorted(contours_blue, key=cv2.contourArea, reverse=True) + contours_blue = contours_blue[0] + + # only processes if area (in pixels) if selected contour >500 + # publishes bbox, image, distance + bbox, image, dist = self.process_contours(frame.copy(), contours_blue) + if bbox and image and dist and cv2.contourArea(contours_blue) > 500: + self.blue_bin_contour_image_pub.publish(image) + self.blue_bin_bounding_box_pub.publish(bbox) + self.blue_bin_distance_pub.publish(dist) + + if contours_red: + # takes largest contour + contours_red = sorted(contours_red, key=cv2.contourArea, reverse=True) + contours_red = contours_red[0] + + # only processes if area (in pixels) if selected contour >500 + # publishes bbox, image, distance + bbox, image, dist = self.process_contours(frame.copy(), contours_red) + if bbox and image and dist and cv2.contourArea(contours_red) > 500: + self.red_bin_contour_image_pub.publish(image) + self.red_bin_bounding_box_pub.publish(bbox) + self.red_bin_distance_pub.publish(dist) + + def process_contours(self, frame, contours): + # Combine all contours to form the large rectangle + all_points = np.vstack(contours) + + # Get the minimum area rectangle that encloses the combined contour + rect = cv2.minAreaRect(all_points) + + # Draw the rectangle on the frame + box = cv2.boxPoints(rect) + box = np.int0(box) + cv2.drawContours(frame, [box], 0, (0, 0, 255), 3) + + # Calculate the center of the rectangle + rect_center = rect[0] + + x, y, w, h = (rect_center[0], rect_center[1], rect[1][0], rect[1][1]) + + # edge cases integer rounding idk something + if (w == 0): + return None, None, None + + # get dimensions, attributes of relevant shapes + meters_per_pixel = self.BIN_WIDTH / w + + # create CVObject message, and populate relavent attributes + bounding_box = CVObject() + + bounding_box.header.stamp.secs = rospy.Time.now().secs + bounding_box.header.stamp.nsecs = rospy.Time.now().nsecs + + # gets dimns that CVObject wants for our rectangle + bounding_box.xmin = (x) * meters_per_pixel + bounding_box.ymin = (y) * meters_per_pixel + bounding_box.xmax = (x + w) * meters_per_pixel + bounding_box.ymax = (y + h) * meters_per_pixel + + bounding_box.yaw = compute_yaw(x, x + w, self.MONO_CAM_SENSOR_SIZE[0]) # width of camera in in mm + + bounding_box.width = int(w) + bounding_box.height = int(h) + + # Compute distance between center of bounding box and center of image + # Here, image x is robot's y, and image y is robot's z + dist_x, dist_y = compute_center_distance(x, y, *self.MONO_CAM_IMG_SHAPE, height_adjustment_constant=15, + width_adjustment_constant=10) + + # Compute distance between center of bounding box and center of image in meters + # dist_x_meters = dist_x * meters_per_pixel + # dist_y_meters = dist_y * meters_per_pixel + # dist_z_meters = -1 * self.mono_cam_dist_with_obj_width(w, self.BIN_WIDTH) + + # create Point message type, and populate x and y distances + dist_point = Point() + # dist_point.z = dist_z_meters + dist_point.x = dist_x + dist_point.y = -dist_y + + bbox_bounds = (x / self.MONO_CAM_IMG_SHAPE[0], y / self.MONO_CAM_IMG_SHAPE[1], (x+w) / + self.MONO_CAM_IMG_SHAPE[0], (y+h) / self.MONO_CAM_IMG_SHAPE[1]) + + # Point coords represents the 3D position of the object represented by the bounding box relative to the robot + coords_list = calculate_relative_pose(bbox_bounds, + self.MONO_CAM_IMG_SHAPE, + (self.BIN_WIDTH, 0), + self.MONO_CAM_FOCAL_LENGTH, + self.MONO_CAM_SENSOR_SIZE, 1) + bounding_box.coords.x, bounding_box.coords.y, bounding_box.coords.z = coords_list + + # Convert the image with the bounding box to ROS Image message and publish + cv2.circle(frame, (int(x), int(y)), 5, (0, 0, 255), -1) + image_msg = self.bridge.cv2_to_imgmsg(frame, "bgr8") + return bounding_box, image_msg, dist_point + + def mono_cam_dist_with_obj_width(self, width_pixels, width_meters): + return (self.MONO_CAM_FOCAL_LENGTH * width_meters * self.MONO_CAM_IMG_SHAPE[0]) \ + / (width_pixels * self.MONO_CAM_SENSOR_SIZE[0]) + + +if __name__ == "__main__": + rospy.init_node('bin_detector', anonymous=True) + detector = BinDetector() + try: + rospy.spin() + except KeyboardInterrupt: + rospy.loginfo("Shutting down Bin Detector node") diff --git a/onboard/catkin_ws/src/cv/scripts/blue_rectangle_detector.py b/onboard/catkin_ws/src/cv/scripts/blue_rectangle_detector.py new file mode 100755 index 000000000..67177fcc2 --- /dev/null +++ b/onboard/catkin_ws/src/cv/scripts/blue_rectangle_detector.py @@ -0,0 +1,127 @@ +#!/usr/bin/env python + +import rospy +import cv2 +import numpy as np +from sensor_msgs.msg import CompressedImage +from std_msgs.msg import Float64 +from cv_bridge import CvBridge, CvBridgeError +from custom_msgs.msg import RectInfo + + +class BlueRectangleDetector: + def __init__(self): + self.bridge = CvBridge() + self.image_sub = rospy.Subscriber("/camera/usb_camera/compressed", CompressedImage, self.image_callback) + self.angle_pub = rospy.Publisher("/cv/bottom/lane_marker_angle", Float64, queue_size=10) + self.distance_pub = rospy.Publisher("/cv/bottom/lane_marker_dist", Float64, queue_size=10) + self.detections_pub = rospy.Publisher("/cv/bottom/detections/compressed", CompressedImage, queue_size=10) + self.rect_info_pub = rospy.Publisher("/cv/bottom/lane_marker", RectInfo, queue_size=10) + + def image_callback(self, data): + try: + # Convert the compressed ROS image to OpenCV format + np_arr = np.frombuffer(data.data, np.uint8) + frame = cv2.imdecode(np_arr, cv2.IMREAD_COLOR) + + # Process the frame to find the angle of the blue rectangle and draw the rectangle + angle, distance, rect_info, processed_frame = self.get_angle_and_distance_of_rectangle(frame) + if angle is not None: + self.angle_pub.publish(Float64(data=angle)) + + if distance is not None: + self.distance_pub.publish(Float64(data=distance)) + + if rect_info is not None: + self.rect_info_pub.publish(rect_info) + + # Publish the processed frame with the rectangle drawn + try: + compressed_image_msg = self.bridge.cv2_to_compressed_imgmsg(processed_frame) + self.detections_pub.publish(compressed_image_msg) + except CvBridgeError as e: + rospy.logerr(f"Failed to publish processed image: {e}") + + except CvBridgeError as e: + rospy.logerr(f"Could not convert image: {e}") + + def get_angle_and_distance_of_rectangle(self, frame): + # Convert frame to HSV color space + hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV) + + # Define range for blue color and create mask + lower_blue = np.array([100, 150, 50]) + upper_blue = np.array([140, 255, 255]) + mask = cv2.inRange(hsv, lower_blue, upper_blue) + + # Find contours in the mask + contours, _ = cv2.findContours(mask, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE) + + angle = None + distance = None + rect_info = None + if contours: + # Combine all contours to form the large rectangle + all_points = np.vstack(contours) + + # Get the minimum area rectangle that encloses the combined contour + rect = cv2.minAreaRect(all_points) + + # Draw the rectangle on the frame + box = cv2.boxPoints(rect) + box = np.int0(box) + cv2.drawContours(frame, [box], 0, (0, 0, 255), 3) + + # Sort the points based on their x-coordinates to identify left and right sides + box = sorted(box, key=lambda pt: pt[0]) + + # Identify left and right side points + left_pts = box[:2] + right_pts = box[2:] + + # Determine which point is higher on the left side + left_top = min(left_pts, key=lambda pt: pt[1]) + + # Determine which point is higher on the right side + right_top = min(right_pts, key=lambda pt: pt[1]) + + angle = rect[-1] + + # Compare the y-coordinates + if right_top[1] < left_top[1]: + # Right side is higher than left side + angle = rect[-1] - 90 + + if angle == -90 or angle == 90: + angle = 0 + + # Calculate the center of the rectangle + rect_center = rect[0] + + # Calculate the center of the frame + frame_center = (frame.shape[1] / 2, frame.shape[0] / 2) + + # Calculate the vertical distance between the two centers + vertical_distance = rect_center[1] - frame_center[1] + + # The distance is positive if the rectangle is below the centerline, negative if above + distance = -vertical_distance + + # Create RectInfo message + rect_info = RectInfo() + rect_info.center_x = rect_center[0] + rect_info.center_y = rect_center[1] + rect_info.width = rect[1][0] + rect_info.height = rect[1][1] + rect_info.angle = angle + + return angle, distance, rect_info, frame + + +if __name__ == "__main__": + rospy.init_node('blue_rectangle_detector', anonymous=True) + brd = BlueRectangleDetector() + try: + rospy.spin() + except KeyboardInterrupt: + rospy.loginfo("Shutting down Blue Rectangle Detector node") diff --git a/onboard/catkin_ws/src/cv/scripts/buoy_detector_contour_matching.py b/onboard/catkin_ws/src/cv/scripts/buoy_detector_contour_matching.py new file mode 100755 index 000000000..face67b79 --- /dev/null +++ b/onboard/catkin_ws/src/cv/scripts/buoy_detector_contour_matching.py @@ -0,0 +1,192 @@ +#!/usr/bin/env python + +import rospy +import resource_retriever as rr +import cv2 +import numpy as np +from cv_bridge import CvBridge +from sensor_msgs.msg import CompressedImage, Image +from custom_msgs.msg import CVObject +from utils import compute_yaw, calculate_relative_pose + + +class BuoyDetectorContourMatching: + + BUOY_WIDTH = 0.2032 # Width of buoy in meters + + MONO_CAM_IMG_SHAPE = (640, 480) # Width, height in pixels + MONO_CAM_SENSOR_SIZE = (3.054, 1.718) # Width, height in mm + MONO_CAM_FOCAL_LENGTH = 2.65 # Focal length in mm + + def __init__(self): + rospy.init_node('buoy_detector_contour_matching', anonymous=True) + + # ima + path_to_reference_img = rr.get_filename('package://cv/assets/polyform-a0-buoy-contour.png', use_protocol=False) + + # Load the reference image in grayscale (assumes the image is already binary: white and black) + self.reference_image = cv2.imread(path_to_reference_img, cv2.IMREAD_GRAYSCALE) + + # Compute the contours directly on the binary image + self.ref_contours, _ = cv2.findContours(self.reference_image, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE) + + self.bridge = CvBridge() + self.image_sub = rospy.Subscriber('/camera/usb/front/compressed', CompressedImage, self.image_callback) + self.bounding_box_pub = rospy.Publisher('/cv/front_usb/buoy/bounding_box', CVObject, queue_size=1) + self.hsv_filtered_pub = rospy.Publisher('/cv/front_usb/buoy/hsv_filtered', Image, queue_size=1) + self.contour_image_pub = rospy.Publisher('/cv/front_usb/buoy/contour_image', Image, queue_size=1) + self.contour_image_with_bbox_pub = rospy.Publisher('/cv/front_usb/buoy/detections', Image, queue_size=1) + + self.last_n_bboxes = [] + self.n = 10 # Set the number of last bounding boxes to store + + def image_callback(self, data): + try: + # Convert the image from the compressed format to OpenCV format + np_arr = np.frombuffer(data.data, np.uint8) + image = cv2.imdecode(np_arr, cv2.IMREAD_COLOR) + hsv_image = cv2.cvtColor(image, cv2.COLOR_BGR2HSV) + except Exception as e: + rospy.logerr("Failed to convert image: %s", e) + return + + # Define the range for HSV filtering on the red buoy + lower_red = np.array([0, 110, 190]) + upper_red = np.array([12, 255, 255]) + lower_red_upper = np.array([175, 180, 190]) + upper_red_upper = np.array([179, 255, 255]) + + # Apply HSV filtering on the image + mask1 = cv2.inRange(hsv_image, lower_red, upper_red) + mask2 = cv2.inRange(hsv_image, lower_red_upper, upper_red_upper) + red_hsv = cv2.bitwise_or(mask1, mask2) + + # Apply morphological operations to clean up the binary image + kernel = np.ones((5, 5), np.uint8) + red_hsv = cv2.morphologyEx(red_hsv, cv2.MORPH_CLOSE, kernel) + + # Publish the HSV filtered image + hsv_filtered_msg = self.bridge.cv2_to_imgmsg(red_hsv, "mono8") + self.hsv_filtered_pub.publish(hsv_filtered_msg) + + # Find contours in the image + contours, _ = cv2.findContours(red_hsv, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE) + + # Sort contours by area + contours = sorted(contours, key=cv2.contourArea, reverse=True) + + # Get the top 3 contours with the largest area + contours = contours[:3] + + # (prepares for) publish contours only + image_with_contours = image.copy() + cv2.drawContours(image_with_contours, contours, -1, (255, 0, 0), 2) + contour_image_msg = self.bridge.cv2_to_imgmsg(image_with_contours, "bgr8") + self.contour_image_pub.publish(contour_image_msg) + + # only gets contours w/ area>100 + contours = [contour for contour in contours if cv2.contourArea(contour) > 100] + + best_cnt = None + similar_size_contours = [] + + # Match contours with the reference image contours + for cnt in contours: + match = cv2.matchShapes(self.ref_contours[0], cnt, cv2.CONTOURS_MATCH_I1, 0.0) + if match < 0.2: + similar_size_contours.append(cnt) + + # takes contour w/ greatest y-distance + if similar_size_contours: + similar_size_contours.sort(key=lambda x: cv2.contourArea(x)) + best_cnt = similar_size_contours[0] + for cnt in similar_size_contours: + x, y, w, h = cv2.boundingRect(cnt) + if y + h > cv2.boundingRect(best_cnt)[1] + cv2.boundingRect(best_cnt)[3]: + best_cnt = cnt + + if best_cnt is not None: + x, y, w, h = cv2.boundingRect(best_cnt) + bbox = (x, y, w, h) + self.publish_bbox(bbox, image) + + # self.last_n_bboxes.append(bbox) + # if len(self.last_n_bboxes) > self.n: + # self.last_n_bboxes.pop(0) + + # filtered_bboxes = self.filter_outliers(self.last_n_bboxes) + # if filtered_bboxes: + # most_recent_bbox = filtered_bboxes[-1] # Get the most recent bbox + # self.publish_bbox(most_recent_bbox, image) + + # Draw bounding box on the image + cv2.rectangle(image, (x, y), (x + w, y + h), (0, 255, 0), 2) + + # Convert the image with the bounding box to ROS Image message and publish + image_msg = self.bridge.cv2_to_imgmsg(image, "bgr8") + self.contour_image_with_bbox_pub.publish(image_msg) + + def filter_outliers(self, bboxes): + if len(bboxes) <= 2: + return bboxes + + centers = [(x + w / 2, y + h / 2) for x, y, w, h in bboxes] + mean_center = np.mean(centers, axis=0) + distances = [np.linalg.norm(np.array(center) - mean_center) for center in centers] + std_distance = np.std(distances) + + areas = [w * h for x, y, w, h in bboxes] + mean_area = np.mean(areas) + std_area = np.std(areas) + + filtered_bboxes = [ + bboxes[i] for i in range(len(bboxes)) + if distances[i] <= mean_center[0] + 2 * std_distance and abs(areas[i] - mean_area) <= 1 * std_area + ] + return filtered_bboxes + + # we're not commetning the top of this function it's pretty much just a bunch of middle school geometry + def publish_bbox(self, bbox, image): + + x, y, w, h = bbox + + bounding_box = CVObject() + + bounding_box.header.stamp.secs = rospy.Time.now().secs + bounding_box.header.stamp.nsecs = rospy.Time.now().nsecs + + bounding_box.xmin = x + bounding_box.ymin = y + bounding_box.xmax = x + w + bounding_box.ymax = y + h + + bounding_box.yaw = -compute_yaw(x / self.MONO_CAM_IMG_SHAPE[0], (x + w) / self.MONO_CAM_IMG_SHAPE[0], + self.MONO_CAM_IMG_SHAPE[0]) # Update 0 with whatever is self.camera_pixel_width + + bounding_box.width = w + bounding_box.height = h + + # bbox_bounds = (x, y, x + w, y + h) + + bbox_bounds = (x / self.MONO_CAM_IMG_SHAPE[0], y / self.MONO_CAM_IMG_SHAPE[1], + (x+w) / self.MONO_CAM_IMG_SHAPE[0], (y+h) / self.MONO_CAM_IMG_SHAPE[1]) + + # rospy.loginfo(bbox_bounds) + + # Point coords represents the 3D position of the object represented by the bounding box relative to the robot + coords_list = calculate_relative_pose(bbox_bounds, + self.MONO_CAM_IMG_SHAPE, + (self.BUOY_WIDTH, 0), + self.MONO_CAM_FOCAL_LENGTH, + self.MONO_CAM_SENSOR_SIZE, 1) + bounding_box.coords.x, bounding_box.coords.y, bounding_box.coords.z = coords_list + + self.bounding_box_pub.publish(bounding_box) + + +if __name__ == '__main__': + try: + node = BuoyDetectorContourMatching() + rospy.spin() + except rospy.ROSInterruptException: + pass diff --git a/onboard/catkin_ws/src/cv/scripts/correct.py b/onboard/catkin_ws/src/cv/scripts/correct.py new file mode 100644 index 000000000..b4b5c557d --- /dev/null +++ b/onboard/catkin_ws/src/cv/scripts/correct.py @@ -0,0 +1,306 @@ +""" +Adapted from dive-color-corrector by bornfree +https://github.com/bornfree/dive-color-corrector/blob/81f2664f4da4c025454dcff38b957fb43ca41b86/correct.py +""" + +import numpy as np +import cv2 +import math + +THRESHOLD_RATIO = 2000 +MIN_AVG_RED = 60 +MAX_HUE_SHIFT = 120 +BLUE_MAGIC_VALUE = 1.2 +SAMPLE_SECONDS = 2 # Extracts color correction from every N seconds + + +def hue_shift_red(mat, h): + + U = math.cos(h * math.pi / 180) + W = math.sin(h * math.pi / 180) + + r = (0.299 + 0.701 * U + 0.168 * W) * mat[..., 0] + g = (0.587 - 0.587 * U + 0.330 * W) * mat[..., 1] + b = (0.114 - 0.114 * U - 0.497 * W) * mat[..., 2] + + return np.dstack([r, g, b]) + + +def normalizing_interval(array): + + high = 255 + low = 0 + max_dist = 0 + + for i in range(1, len(array)): + dist = array[i] - array[i - 1] + if dist > max_dist: + max_dist = dist + high = array[i] + low = array[i - 1] + + return (low, high) + + +def apply_filter(mat, filt): + + r = mat[..., 0] + g = mat[..., 1] + b = mat[..., 2] + + r = r * filt[0] + g * filt[1] + b * filt[2] + filt[4] * 255 + g = g * filt[6] + filt[9] * 255 + b = b * filt[12] + filt[14] * 255 + + filtered_mat = np.dstack([r, g, b]) + filtered_mat = np.clip(filtered_mat, 0, 255).astype(np.uint8) + + return filtered_mat + + +def get_filter_matrix(mat): + + mat = cv2.resize(mat, (256, 256)) + + # Get average values of RGB + avg_mat = np.array(cv2.mean(mat)[:3], dtype=np.uint8) + + # Find hue shift so that average red reaches MIN_AVG_RED + new_avg_r = avg_mat[0] + hue_shift = 0 + while new_avg_r < MIN_AVG_RED: + + shifted = hue_shift_red(avg_mat, hue_shift) + new_avg_r = np.sum(shifted) + hue_shift += 1 + if hue_shift > MAX_HUE_SHIFT: + new_avg_r = MIN_AVG_RED + + # Apply hue shift to whole image and replace red channel + shifted_mat = hue_shift_red(mat, hue_shift) + new_r_channel = np.sum(shifted_mat, axis=2) + new_r_channel = np.clip(new_r_channel, 0, 255) + mat[..., 0] = new_r_channel + + # Get histogram of all channels + hist_r = cv2.calcHist([mat], [0], None, [256], [0, 256]) + hist_g = cv2.calcHist([mat], [1], None, [256], [0, 256]) + hist_b = cv2.calcHist([mat], [2], None, [256], [0, 256]) + + normalize_mat = np.zeros((256, 3)) + threshold_level = (mat.shape[0] * mat.shape[1]) / THRESHOLD_RATIO + for x in range(256): + + if hist_r[x] < threshold_level: + normalize_mat[x][0] = x + + if hist_g[x] < threshold_level: + normalize_mat[x][1] = x + + if hist_b[x] < threshold_level: + normalize_mat[x][2] = x + + normalize_mat[255][0] = 255 + normalize_mat[255][1] = 255 + normalize_mat[255][2] = 255 + + adjust_r_low, adjust_r_high = normalizing_interval(normalize_mat[..., 0]) + adjust_g_low, adjust_g_high = normalizing_interval(normalize_mat[..., 1]) + adjust_b_low, adjust_b_high = normalizing_interval(normalize_mat[..., 2]) + + shifted = hue_shift_red(np.array([1, 1, 1]), hue_shift) + shifted_r, shifted_g, shifted_b = shifted[0][0] + + red_gain = 256 / (adjust_r_high - adjust_r_low) + green_gain = 256 / (adjust_g_high - adjust_g_low) + blue_gain = 256 / (adjust_b_high - adjust_b_low) + + redOffset = (-adjust_r_low / 256) * red_gain + greenOffset = (-adjust_g_low / 256) * green_gain + blueOffset = (-adjust_b_low / 256) * blue_gain + + adjust_red = shifted_r * red_gain + adjust_red_green = shifted_g * red_gain + adjust_red_blue = shifted_b * red_gain * BLUE_MAGIC_VALUE + + return np.array( + [ + adjust_red, + adjust_red_green, + adjust_red_blue, + 0, + redOffset, + 0, + green_gain, + 0, + 0, + greenOffset, + 0, + 0, + blue_gain, + 0, + blueOffset, + 0, + 0, + 0, + 1, + 0, + ] + ) + + +def correct(mat): + original_mat = mat.copy() + + filter_matrix = get_filter_matrix(mat) + + corrected_mat = apply_filter(original_mat, filter_matrix) + corrected_mat = cv2.cvtColor(corrected_mat, cv2.COLOR_RGB2BGR) + + return corrected_mat + + +def correct_image(input_path, output_path): + mat = cv2.imread(input_path) + rgb_mat = cv2.cvtColor(mat, cv2.COLOR_BGR2RGB) + + corrected_mat = correct(rgb_mat) + + cv2.imwrite(output_path, corrected_mat) + + preview = mat.copy() + width = preview.shape[1] // 2 + preview[::, width:] = corrected_mat[::, width:] + + preview = cv2.resize(preview, (960, 540)) + + return cv2.imencode(".png", preview)[1].tobytes() + + +def analyze_video(input_video_path, output_video_path): + + # Initialize new video writer + cap = cv2.VideoCapture(input_video_path) + fps = math.ceil(cap.get(cv2.CAP_PROP_FPS)) + frame_count = math.ceil(cap.get(cv2.CAP_PROP_FRAME_COUNT)) + + # Get filter matrices for every 10th frame + filter_matrix_indexes = [] + filter_matrices = [] + count = 0 + + print("Analyzing...") + while cap.isOpened(): + + count += 1 + print(f"{count} frames", end="\r") + ret, frame = cap.read() + if not ret: + # End video read if we have gone beyond reported frame count + if count >= frame_count: + break + + # Failsafe to prevent an infinite loop + if count >= 1e6: + break + + # Otherwise this is just a faulty frame read, try reading next frame + continue + + # Pick filter matrix from every N seconds + if count % (fps * SAMPLE_SECONDS) == 0: + mat = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB) + filter_matrix_indexes.append(count) + filter_matrices.append(get_filter_matrix(mat)) + + yield count + + cap.release() + + # Build a interpolation function to get filter matrix at any given frame + filter_matrices = np.array(filter_matrices) + + yield { + "input_video_path": input_video_path, + "output_video_path": output_video_path, + "fps": fps, + "frame_count": count, + "filters": filter_matrices, + "filter_indices": filter_matrix_indexes, + } + + +def process_video(video_data, yield_preview=False): + + cap = cv2.VideoCapture(video_data["input_video_path"]) + + frame_width = cap.get(cv2.CAP_PROP_FRAME_WIDTH) + frame_height = cap.get(cv2.CAP_PROP_FRAME_HEIGHT) + + fourcc = cv2.VideoWriter_fourcc(*"mp4v") + new_video = cv2.VideoWriter( + video_data["output_video_path"], + fourcc, + video_data["fps"], + (int(frame_width), int(frame_height)), + ) + + filter_matrices = video_data["filters"] + filter_indices = video_data["filter_indices"] + + filter_matrix_size = len(filter_matrices[0]) + + def get_interpolated_filter_matrix(frame_number): + + return [ + np.interp(frame_number, filter_indices, filter_matrices[..., x]) + for x in range(filter_matrix_size) + ] + + print("Processing...") + + frame_count = video_data["frame_count"] + + count = 0 + cap = cv2.VideoCapture(video_data["input_video_path"]) + while cap.isOpened(): + + count += 1 + percent = 100 * count / frame_count + print("{:.2f}".format(percent), end=" % \r") + ret, frame = cap.read() + + if not ret: + # End video read if we have gone beyond reported frame count + if count >= frame_count: + break + + # Failsafe to prevent an infinite loop + if count >= 1e6: + break + + # Otherwise this is just a faulty frame read, try reading next + continue + + # Apply the filter + rgb_mat = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB) + interpolated_filter_matrix = get_interpolated_filter_matrix(count) + corrected_mat = apply_filter(rgb_mat, interpolated_filter_matrix) + corrected_mat = cv2.cvtColor(corrected_mat, cv2.COLOR_RGB2BGR) + + new_video.write(corrected_mat) + + if yield_preview: + preview = frame.copy() + width = preview.shape[1] // 2 + height = preview.shape[0] // 2 + preview[::, width:] = corrected_mat[::, width:] + + preview = cv2.resize(preview, (width, height)) + + yield percent, cv2.imencode(".png", preview)[1].tobytes() + else: + yield None + + cap.release() + new_video.release() diff --git a/onboard/catkin_ws/src/cv/scripts/depthai_camera_connect.py b/onboard/catkin_ws/src/cv/scripts/depthai_camera_connect.py index b89f6fcad..7489b9420 100755 --- a/onboard/catkin_ws/src/cv/scripts/depthai_camera_connect.py +++ b/onboard/catkin_ws/src/cv/scripts/depthai_camera_connect.py @@ -22,19 +22,16 @@ def connect(pipeline): # If device is not None, a successful connection to the camera was made device = None - # IP address used for local connection - device_info = dai.DeviceInfo("169.254.1.222") - # Number of attempts that will be made to connect to the camera total_tries = 5 - for i in range(total_tries): + for _ in range(total_tries): if rospy.is_shutdown(): break try: # Scan for camera IP address using custom autodiscovery - ip = custom_autodiscovery() + ip = custom_autodiscovery() # TODO: use MAC address from camera config file # Try connecting with the discovered IP address device = dai.Device(pipeline, dai.DeviceInfo(ip)) @@ -49,38 +46,6 @@ def connect(pipeline): if rospy.is_shutdown(): break - try: - # Try connecting with DepthAI autodiscovery - device = dai.Device(pipeline) - - # If the execution reaches the following return statement, the line above did not raise an exception, so a - # successful camera connection was made, and device should be returned - return device - - except RuntimeError: - pass - - if rospy.is_shutdown(): - break - - try: - # Try connecting with static IP address - device = dai.Device(pipeline, device_info) - - # If the execution reaches the following return statement, the line above did not raise an exception, so a - # successful camera connection was made, and device should be returned - return device - - except RuntimeError as e: - # For all tries before the last one, don't raise the exception and try connecting again - # On the last try, raise the exception so DepthAI code doesn't run without a successful camera connection - if i == total_tries - 1: - raise RuntimeError((f"{total_tries} attempts were made to connect to the DepthAI camera using " - "autodiscovery and static IP address specification. All attempts failed.")) from e - - if rospy.is_shutdown(): - break - # Wait two seconds before trying again # This ensures the script does not terminate if the camera is just temporarily unavailable rospy.sleep(2) diff --git a/onboard/catkin_ws/src/cv/scripts/depthai_mono_detection.py b/onboard/catkin_ws/src/cv/scripts/depthai_mono_detection.py new file mode 100755 index 000000000..23166c4ef --- /dev/null +++ b/onboard/catkin_ws/src/cv/scripts/depthai_mono_detection.py @@ -0,0 +1,424 @@ +#!/usr/bin/env python3 + +import rospy +import resource_retriever as rr +import yaml +import math +import depthai_camera_connect +import depthai as dai +import numpy as np +from utils import DetectionVisualizer, calculate_relative_pose +from image_tools import ImageTools +import cv2 + +from custom_msgs.msg import CVObject, SonarSweepRequest, SonarSweepResponse +from sensor_msgs.msg import CompressedImage +from std_msgs.msg import String + +CAMERA_CONFIG_PATH = 'package://cv/configs/usb_cameras.yaml' +with open(rr.get_filename(CAMERA_CONFIG_PATH, use_protocol=False)) as f: + cameras = yaml.safe_load(f) +CAMERA = cameras['front'] # TODO: use ROS params to select camera + +MM_IN_METER = 1000 +DEPTHAI_OBJECT_DETECTION_MODELS_FILEPATH = 'package://cv/models/depthai_models.yaml' +HORIZONTAL_FOV = 95 +SONAR_DEPTH = 10 +SONAR_RANGE = 1.75 +SONAR_REQUESTS_PATH = 'sonar/request' +SONAR_RESPONSES_PATH = 'sonar/cv/response' +TASK_PLANNING_REQUESTS_PATH = "controls/desired_feature" + +GATE_IMAGE_WIDTH = 0.2452 # Width of gate images in meters +GATE_IMAGE_HEIGHT = 0.2921 # Height of gate images in meters + +# Camera constants +FOCAL_LENGTH = CAMERA['focal_length'] # Focal length of camera in mm +SENSOR_SIZE = (CAMERA['sensor_size']['width'], CAMERA['sensor_size']['height']) # Sensor size in mm + + +# Compute detections on live camera feed and publish spatial coordinates for detected objects +class DepthAISpatialDetector: + def __init__(self): + """ + Initializes the ROS node. Loads the yaml file at cv/models/depthai_models.yaml + """ + rospy.init_node('depthai_spatial_detection', anonymous=True) + self.feed_path = rospy.get_param("~feed_path") + self.running_model = rospy.get_param("~model") + self.rgb_raw = rospy.get_param("~rgb_raw") + self.rgb_detections = rospy.get_param("~rgb_detections") + self.queue_depth = rospy.get_param("~depth") # Whether to output depth map + self.sync_nn = rospy.get_param("~sync_nn") + self.using_sonar = rospy.get_param("~using_sonar") + self.show_class_name = rospy.get_param("~show_class_name") + self.show_confidence = rospy.get_param("~show_confidence") + self.device = None + + with open(rr.get_filename(DEPTHAI_OBJECT_DETECTION_MODELS_FILEPATH, + use_protocol=False)) as f: + self.models = yaml.safe_load(f) + + self.camera = 'front' + self.pipeline = None + self.publishers = {} # Keys are the class names of a given model + self.output_queues = {} # Keys are "rgb", "depth", and "detections" + self.connected = False + self.current_model_name = None + self.classes = None + self.camera_pixel_width = None + self.camera_pixel_height = None + self.detection_feed_publisher = None + self.rgb_preview_publisher = None + self.detection_visualizer = None + + self.image_tools = ImageTools() + + self.sonar_response = (0, 0) + self.in_sonar_range = True + + # By default the first task is going through the gate + self.current_priority = "buoy_abydos_serpenscaput" + + # Initialize publishers and subscribers for sonar/task planning + self.sonar_requests_publisher = rospy.Publisher( + SONAR_REQUESTS_PATH, SonarSweepRequest, queue_size=10) + self.sonar_response_subscriber = rospy.Subscriber( + SONAR_RESPONSES_PATH, SonarSweepResponse, self.update_sonar) + self.desired_detection_feature = rospy.Subscriber( + TASK_PLANNING_REQUESTS_PATH, String, self.update_priority) + + rospy.Subscriber(self.feed_path, CompressedImage, self._update_latest_img, queue_size=1) + + def _update_latest_img(self, img_msg): + """ Send an image to the device for detection + + Args: + img_msg (sensor_msgs.msg.CompressedImage): Image to send to the device + """ + model = self.models[self.current_model_name] + + # Format a cv2 image to be sent to the device + def to_planar(arr: np.ndarray, shape: tuple) -> np.ndarray: + return cv2.resize(arr, shape).transpose(2, 0, 1).flatten() + + latest_img = self.image_tools.convert_to_cv2(img_msg) + + # Input queue will be used to send video frames to the device. + if self.device: + input_queue = self.device.getInputQueue("nn_input") + + # Send a message to the ColorCamera to capture a still image + img = dai.ImgFrame() + img.setType(dai.ImgFrame.Type.BGR888p) + + img.setData(to_planar(latest_img, (416, 416))) + + img.setWidth(model['input_size'][0]) + img.setHeight(model['input_size'][1]) + + input_queue.send(img) + + def build_pipeline(self, nn_blob_path, sync_nn): + """ + Get the DepthAI Pipeline for 3D object localization. Inspiration taken from + https://docs.luxonis.com/projects/api/en/latest/samples/SpatialDetection/spatial_tiny_yolo/. + To understand the DepthAI pipeline structure, please see https://docs.luxonis.com/projects/api/en/latest/. + This pipeline computes the depth map using the two mono cameras. This depth map and the RGB feed are fed into + the YoloSpatialDetection Node, which detects objects and computes the average depth within the bounding box + for any detected object. The object detection model for this node is loaded from the nnBlobPath. For info + about the YoloSpatialDetection Node, see + https://docs.luxonis.com/projects/api/en/latest/components/nodes/yolo_spatial_detection_network/. + The output queues available from this pipeline are: + - "rgb": contains the 400x400 RGB preview of the camera feed. + - "detections": contains SpatialImgDetections messages (https://docs.luxonis.com/projects/api/en/latest/ + components/messages/spatial_img_detections/#spatialimgdetections), which includes bounding boxes for + detections as well as XYZ coordinates of the detected objects. + - "depth": contains ImgFrame messages with UINT16 values representing the depth in millimeters by default. + See the property depth in https://docs.luxonis.com/projects/api/en/latest/components/nodes/stereo_depth/ + + :param nn_blob_path: Path to blob file used for object detection. + :param sync_nn: If True, sync the RGB output feed with the detection from the neural network. Needed if the RGB + feed output will be used and needs to be synced with the object detections. + :return: depthai.Pipeline object to compute + """ + model = self.models[self.current_model_name] + + pipeline = dai.Pipeline() + + # Define sources and outputs + spatial_detection_network = pipeline.create(dai.node.YoloDetectionNetwork) + feed_out = pipeline.create(dai.node.XLinkOut) + + xout_nn = pipeline.create(dai.node.XLinkOut) + xout_nn.setStreamName("detections") + + xin_nn_input = pipeline.create(dai.node.XLinkIn) + xin_nn_input.setStreamName("nn_input") + xin_nn_input.setNumFrames(2) + xin_nn_input.setMaxDataSize(416*416*3) + + # General spatial detection network parameters + spatial_detection_network.setBlobPath(nn_blob_path) + spatial_detection_network.setConfidenceThreshold(model['confidence_threshold']) + spatial_detection_network.input.setBlocking(False) + + # Yolo specific parameters + spatial_detection_network.setNumClasses(len(model['classes'])) + spatial_detection_network.setCoordinateSize(model['coordinate_size']) + spatial_detection_network.setAnchors(np.array(model['anchors'])) + spatial_detection_network.setAnchorMasks(model['anchor_masks']) + spatial_detection_network.setIouThreshold(model['iou_threshold']) + + # Linking + xin_nn_input.out.link(spatial_detection_network.input) + + spatial_detection_network.out.link(xout_nn.input) + + # Feed the image stream to the neural net input node + feed_out.setStreamName("feed") + spatial_detection_network.passthrough.link(feed_out.input) + + return pipeline + + def init_model(self, model_name): + """ + Creates and assigns the pipeline and sets the current model name. + + :param model_name: Name of the model. The model name should match a key in cv/models/depthai_models.yaml. + For example, if depthai_models.yaml is: + + gate: + classes: ['gate', 'gate_side', 'gate_tick', 'gate_top', 'start_gate'] + topic: cv/ + weights: yolo_v4_tiny_openvino_2021.3_6shave-2022-7-21_416_416.blob + input_size: [416, 416] + coordinate_size: 4 + anchors: [10, 14, 23, 27, 37, 58, 81, 82, 135, 169, 344, 319] + anchor_masks: {"side26": [0, 1, 2], "side13": [3, 4, 5]} + + Then, a possible model name is "gate". + """ + # If the model is already set, don't reinitialize + if model_name == self.current_model_name: + return + + self.current_model_name = model_name + + model = self.models[model_name] + + self.classes = model['classes'] + + self.camera_pixel_width, self.camera_pixel_height = model['input_size'] + + self.colors = model['colors'] + + blob_path = rr.get_filename(f"package://cv/models/{model['weights']}", + use_protocol=False) + self.pipeline = self.build_pipeline(blob_path, self.sync_nn) + + def init_publishers(self, model_name): + """ + Initialize the publishers for the node. A publisher is created for each class that the model predicts. + The publishers are created in format: "cv/camera/model_name". + :param model_name: Name of the model that is being used. + """ + model = self.models[model_name] + + # Create a CVObject publisher for each class + publisher_dict = {} + for model_class in model['classes']: + publisher_name = f"cv/{self.camera}/{model_class}" + publisher_dict[model_class] = rospy.Publisher(publisher_name, + CVObject, + queue_size=10) + self.publishers = publisher_dict + + if self.rgb_detections: + self.detection_feed_publisher = rospy.Publisher("cv/front/detections/compressed", CompressedImage, + queue_size=10) + + def init_queues(self, device): + """ + Assigns output queues from the pipeline to dictionary of queues. + :param device: DepthAI.Device object for the connected device. + See https://docs.luxonis.com/projects/api/en/latest/components/device/ + """ + # If the output queues are already set, don't reinitialize + if self.connected: + return + + self.output_queues["detections"] = device.getOutputQueue(name="detections", maxSize=1, blocking=False) + self.output_queues["passthrough"] = device.getOutputQueue( + name="feed", maxSize=1, blocking=False) + + self.input_queue = device.getInputQueue(name="nn_input", maxSize=1, blocking=False) + + self.connected = True # Flag that the output queues have been initialized + + self.detection_visualizer = DetectionVisualizer(self.classes, self.colors, + self.show_class_name, self.show_confidence) + + def detect(self): + """ + Get current detections from output queues and publish. + """ + # init_output_queues must be called before detect + if not self.connected: + rospy.logwarn("Output queues are not initialized so cannot detect. Call init_output_queues first.") + return + + # Get detections from output queues + inDet = self.output_queues["detections"].tryGet() + if not inDet: + return + detections = inDet.detections + + detections_dict = {} + for detection in detections: + prev_conf, prev_detection = detections_dict.get(detection.label, (None, None)) + if (prev_conf is not None and detection.confidence > prev_conf) or prev_conf is None: + detections_dict[detection.label] = detection.confidence, detection + + detections = [detection for _, detection in detections_dict.values()] + model = self.models[self.current_model_name] + + # Publish detections feed + passthrough_feed = self.output_queues["passthrough"].tryGet() + if not passthrough_feed: + return + frame = passthrough_feed.getCvFrame() + + if self.rgb_detections: + detections_visualized = self.detection_visualizer.visualize_detections(frame, detections) + detections_img_msg = self.image_tools.convert_to_ros_compressed_msg(detections_visualized) + self.detection_feed_publisher.publish(detections_img_msg) + + # Process and publish detections. If using sonar, override det robot x coordinate + for detection in detections: + + # Bounding box + bbox = (detection.xmin, detection.ymin, + detection.xmax, detection.ymax) + + # Label name + label_idx = detection.label + label = self.classes[label_idx] + + confidence = detection.confidence + + # Calculate relative pose + det_coords_robot_mm = calculate_relative_pose(bbox, model['input_size'], model['sizes'][label], + FOCAL_LENGTH, SENSOR_SIZE, 0.814) + + # Find yaw angle offset + left_end_compute = self.compute_angle_from_x_offset(detection.xmin * self.camera_pixel_width) + right_end_compute = self.compute_angle_from_x_offset(detection.xmax * self.camera_pixel_width) + midpoint = (left_end_compute + right_end_compute) / 2.0 + yaw_offset = -math.radians(midpoint) # Degrees to radians + + self.publish_prediction( + bbox, det_coords_robot_mm, yaw_offset, label, confidence, + (self.camera_pixel_height, self.camera_pixel_width), self.using_sonar) + + def publish_prediction(self, bbox, det_coords, yaw, label, confidence, + shape, using_sonar): + """ + Publish predictions to label-specific topic. Publishes to /cv/[camera]/[label]. + + :param bbox: Tuple for the bounding box. + Values are from 0-1, where X increases left to right and Y increases top to bottom. + :param det_coords: Tuple with the X, Y, and Z values in meters, and in the robot rotational reference frame. + :param label: Predicted label for the detection. + :param confidence: Confidence for the detection, from 0 to 1. + :param shape: Tuple with the (height, width) of the image. NOTE: This is in reverse order from the model. + """ + object_msg = CVObject() + + object_msg.header.stamp.secs = rospy.Time.now().secs + object_msg.header.stamp.nsecs = rospy.Time.now().nsecs + + object_msg.label = label + object_msg.score = confidence + + object_msg.coords.x = det_coords[0] + object_msg.coords.y = det_coords[1] + object_msg.coords.z = det_coords[2] + + object_msg.xmin = bbox[0] + object_msg.ymin = bbox[1] + object_msg.xmax = bbox[2] + object_msg.ymax = bbox[3] + + object_msg.yaw = yaw + + object_msg.height = shape[0] + object_msg.width = shape[1] + + object_msg.sonar = using_sonar + + if self.publishers: + # Flush out 0, 0, 0 values + # if object_msg.coords.x != 0 and object_msg.coords.y != 0 and object_msg.coords.z != 0: + self.publishers[label].publish(object_msg) + + def update_sonar(self, sonar_results): + """ + Callback function for listenting to sonar response + Updates instance variable self.sonar_response based on + what sonar throws back if it is in range (> SONAR_RANGE = 1.75m) + """ + # Check to see if the sonar is in range - are results from sonar valid? + if not sonar_results.is_object: + return + if sonar_results.pose.position.x > SONAR_RANGE and sonar_results.pose.position.x <= SONAR_DEPTH: + self.in_sonar_range = True + self.sonar_response = (sonar_results.pose.position.x, sonar_results.pose.position.y) + else: + self.in_sonar_range = False + + def update_priority(self, object): + """ + Update the current priority class. If the priority class is detected, sonar will be called. + """ + self.current_priority = object + + def run(self): + """ + Runs the selected model on the connected device. + :return: False if the model is not in cv/models/depthai_models.yaml. + Otherwise, the model will be run on the device. + """ + # Check if model is valid + if self.running_model not in self.models: + return False + + # Setup pipeline and publishers + self.init_model(self.running_model) + self.init_publishers(self.running_model) + + with depthai_camera_connect.connect(self.pipeline) as device: + rospy.loginfo("Connected to DepthAI device") + self.device = device + self.init_queues(device) + + while not rospy.is_shutdown(): + self.detect() + + return True + + def compute_angle_from_x_offset(self, x_offset): + """ + See: https://math.stackexchange.com/questions/1320285/convert-a-pixel-displacement-to-angular-rotation + for implementation details. + + :param x_offset: x pixels from center of image + + :return: angle in degrees + """ + image_center_x = self.camera_pixel_width / 2.0 + return math.degrees(math.atan(((x_offset - image_center_x) * 0.005246675486))) + + +if __name__ == '__main__': + DepthAISpatialDetector().run() diff --git a/onboard/catkin_ws/src/cv/scripts/depthai_simulate_detection.py b/onboard/catkin_ws/src/cv/scripts/depthai_simulate_detection.py index 854b1becb..9414018b0 100755 --- a/onboard/catkin_ws/src/cv/scripts/depthai_simulate_detection.py +++ b/onboard/catkin_ws/src/cv/scripts/depthai_simulate_detection.py @@ -85,15 +85,17 @@ def _build_pipeline(self): nn = self.pipeline.create(dai.node.YoloDetectionNetwork) # Neural net / model properties - nn.setConfidenceThreshold(0.5) - nn.setBlobPath(self.nn_path) nn.setNumInferenceThreads(2) + + nn.setBlobPath(self.nn_path) + nn.setConfidenceThreshold(self.model['confidence_threshold']) nn.input.setBlocking(False) + nn.setNumClasses(len(self.model['classes'])) nn.setCoordinateSize(self.model['coordinate_size']) nn.setAnchors(np.array(self.model['anchors'])) nn.setAnchorMasks(self.model['anchor_masks']) - nn.setIouThreshold(0.5) + nn.setIouThreshold(self.model['iou_threshold']) # Create a link between the neural net input and the local image stream output x_in.out.link(nn.input) diff --git a/onboard/catkin_ws/src/cv/scripts/depthai_spatial_detection.py b/onboard/catkin_ws/src/cv/scripts/depthai_spatial_detection.py index 8c8c67cd2..251bd7d2b 100755 --- a/onboard/catkin_ws/src/cv/scripts/depthai_spatial_detection.py +++ b/onboard/catkin_ws/src/cv/scripts/depthai_spatial_detection.py @@ -7,8 +7,10 @@ import depthai_camera_connect import depthai as dai import numpy as np -from utils import DetectionVisualizer +from utils import DetectionVisualizer, calculate_relative_pose from image_tools import ImageTools +import cv2 +import correct from custom_msgs.msg import CVObject, SonarSweepRequest, SonarSweepResponse from sensor_msgs.msg import CompressedImage @@ -24,6 +26,12 @@ SONAR_RESPONSES_PATH = 'sonar/cv/response' TASK_PLANNING_REQUESTS_PATH = "controls/desired_feature" +GATE_IMAGE_WIDTH = 0.2452 # Width of gate images in meters +GATE_IMAGE_HEIGHT = 0.2921 # Height of gate images in meters +FOCAL_LENGTH = 2.75 # Focal length of camera in mm +SENSOR_SIZE = (6.2868, 4.712) # Sensor size in mm +ISP_IMG_SHAPE = (4056, 3040) # Size of ISP image + # Compute detections on live camera feed and publish spatial coordinates for detected objects class DepthAISpatialDetector: @@ -35,12 +43,12 @@ def __init__(self): self.running_model = rospy.get_param("~model") self.rgb_raw = rospy.get_param("~rgb_raw") self.rgb_detections = rospy.get_param("~rgb_detections") - self.queue_rgb = self.rgb_raw or self.rgb_detections # Whether to output RGB feed self.queue_depth = rospy.get_param("~depth") # Whether to output depth map self.sync_nn = rospy.get_param("~sync_nn") self.using_sonar = rospy.get_param("~using_sonar") self.show_class_name = rospy.get_param("~show_class_name") self.show_confidence = rospy.get_param("~show_confidence") + self.correct_color = rospy.get_param("~correct_color") with open(rr.get_filename(DEPTHAI_OBJECT_DETECTION_MODELS_FILEPATH, use_protocol=False)) as f: @@ -104,44 +112,42 @@ def build_pipeline(self, nn_blob_path, sync_nn): # Define sources and outputs cam_rgb = pipeline.create(dai.node.ColorCamera) - spatial_detection_network = pipeline.create(dai.node.YoloSpatialDetectionNetwork) - mono_left = pipeline.create(dai.node.MonoCamera) - mono_right = pipeline.create(dai.node.MonoCamera) - stereo = pipeline.create(dai.node.StereoDepth) + spatial_detection_network = pipeline.create(dai.node.YoloDetectionNetwork) + image_manip = pipeline.create(dai.node.ImageManip) xout_nn = pipeline.create(dai.node.XLinkOut) xout_nn.setStreamName("detections") - if self.queue_rgb: - xout_rgb = pipeline.create(dai.node.XLinkOut) - xout_rgb.setStreamName("rgb") + xout_rgb = pipeline.create(dai.node.XLinkOut) + xout_rgb.setStreamName("rgb") - if self.queue_depth: - xout_depth = pipeline.create(dai.node.XLinkOut) - xout_depth.setStreamName("depth") + xin_nn_input = pipeline.create(dai.node.XLinkIn) + xin_nn_input.setStreamName("nn_input") + xin_nn_input.setNumFrames(2) + xin_nn_input.setMaxDataSize(416*416*3) # Camera properties cam_rgb.setPreviewSize(model['input_size']) - cam_rgb.setResolution(dai.ColorCameraProperties.SensorResolution.THE_1080_P) + cam_rgb.setResolution(dai.ColorCameraProperties.SensorResolution.THE_12_MP) cam_rgb.setInterleaved(False) cam_rgb.setColorOrder(dai.ColorCameraProperties.ColorOrder.BGR) + cam_rgb.setPreviewKeepAspectRatio(False) - mono_left.setResolution(dai.MonoCameraProperties.SensorResolution.THE_400_P) - mono_left.setBoardSocket(dai.CameraBoardSocket.CAM_B) - mono_right.setResolution(dai.MonoCameraProperties.SensorResolution.THE_400_P) - mono_right.setBoardSocket(dai.CameraBoardSocket.CAM_C) + cam_rgb.setIspNumFramesPool(3) # keep this high default + cam_rgb.setPreviewNumFramesPool(1) # breaks if <1 + cam_rgb.setRawNumFramesPool(2) # breaks if <2 + cam_rgb.setStillNumFramesPool(0) + cam_rgb.setVideoNumFramesPool(1) # breaks if <1 - # Stereo properties - stereo.setDefaultProfilePreset(dai.node.StereoDepth.PresetMode.HIGH_DENSITY) - stereo.setDepthAlign(dai.CameraBoardSocket.CAM_A) + image_manip.initialConfig.setResize(model['input_size']) + image_manip.initialConfig.setKeepAspectRatio(False) + image_manip.setMaxOutputFrameSize(model['input_size'][0] * model['input_size'][1] * 3) + image_manip.setNumFramesPool(1) # General spatial detection network parameters spatial_detection_network.setBlobPath(nn_blob_path) spatial_detection_network.setConfidenceThreshold(model['confidence_threshold']) spatial_detection_network.input.setBlocking(False) - spatial_detection_network.setBoundingBoxScaleFactor(0.5) - spatial_detection_network.setDepthLowerThreshold(100) - spatial_detection_network.setDepthUpperThreshold(5000) # Yolo specific parameters spatial_detection_network.setNumClasses(len(model['classes'])) @@ -150,26 +156,13 @@ def build_pipeline(self, nn_blob_path, sync_nn): spatial_detection_network.setAnchorMasks(model['anchor_masks']) spatial_detection_network.setIouThreshold(model['iou_threshold']) - # Linking - mono_left.out.link(stereo.left) - mono_right.out.link(stereo.right) - - cam_rgb.preview.link(spatial_detection_network.input) + xin_nn_input.out.link(spatial_detection_network.input) - if self.queue_rgb: - # To sync RGB frames with NN, link passthrough to xout instead of preview - if sync_nn: - spatial_detection_network.passthrough.link(xout_rgb.input) - else: - cam_rgb.preview.link(xout_rgb.input) + cam_rgb.isp.link(image_manip.inputImage) + image_manip.out.link(xout_rgb.input) spatial_detection_network.out.link(xout_nn.input) - if self.queue_depth: - spatial_detection_network.passthroughDepth.link(xout_depth.input) - - stereo.depth.link(spatial_detection_network.inputDepth) - return pipeline def init_model(self, model_name): @@ -234,10 +227,7 @@ def init_publishers(self, model_name): self.detection_feed_publisher = rospy.Publisher("cv/front/detections/compressed", CompressedImage, queue_size=10) - if self.queue_depth: - self.depth_publisher = rospy.Publisher("camera/front/depth/compressed", CompressedImage, queue_size=10) - - def init_output_queues(self, device): + def init_queues(self, device): """ Assigns output queues from the pipeline to dictionary of queues. :param device: DepthAI.Device object for the connected device. @@ -248,14 +238,12 @@ def init_output_queues(self, device): return # Assign output queues - if self.queue_rgb: - self.output_queues["rgb"] = device.getOutputQueue(name="rgb", maxSize=1, blocking=False) - - if self.queue_depth: - self.output_queues["depth"] = device.getOutputQueue(name="depth", maxSize=1, blocking=False) + self.output_queues["rgb"] = device.getOutputQueue(name="rgb", maxSize=1, blocking=False) self.output_queues["detections"] = device.getOutputQueue(name="detections", maxSize=1, blocking=False) + self.input_queue = device.getInputQueue(name="nn_input", maxSize=1, blocking=False) + self.connected = True # Flag that the output queues have been initialized self.detection_visualizer = DetectionVisualizer(self.classes, self.colors, @@ -270,31 +258,51 @@ def detect(self): rospy.logwarn("Output queues are not initialized so cannot detect. Call init_output_queues first.") return + # Format a cv2 image to be sent to the device + def to_planar(arr: np.ndarray, shape: tuple) -> np.ndarray: + return cv2.resize(arr, shape).transpose(2, 0, 1).flatten() + + inPreview = self.output_queues["rgb"].get() + frame = inPreview.getCvFrame() + + # Publish raw RGB feed + if self.rgb_raw: + frame_img_msg = self.image_tools.convert_to_ros_compressed_msg(frame) + self.rgb_preview_publisher.publish(frame_img_msg) + + # Underwater color correction + if self.correct_color: + mat = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB) + frame = correct.correct(mat) + + # Send a message to the ColorCamera to capture a still image + img = dai.ImgFrame() + img.setType(dai.ImgFrame.Type.BGR888p) + img.setData(to_planar(frame, (416, 416))) + img.setWidth(416) + img.setHeight(416) + self.input_queue.send(img) + # Get detections from output queues - inDet = self.output_queues["detections"].get() + inDet = self.output_queues["detections"].tryGet() + if not inDet: + return detections = inDet.detections - if self.queue_rgb: - inPreview = self.output_queues["rgb"].get() - frame = inPreview.getCvFrame() - - # Publish raw RGB feed - if self.rgb_raw: - frame_img_msg = self.image_tools.convert_to_ros_compressed_msg(frame) - self.rgb_preview_publisher.publish(frame_img_msg) + detections_dict = {} + for detection in detections: + prev_conf, _ = detections_dict.get(detection.label, (None, None)) + if (prev_conf is not None and detection.confidence > prev_conf) or prev_conf is None: + detections_dict[detection.label] = detection.confidence, detection - # Publish detections feed - if self.rgb_detections: - detections_visualized = self.detection_visualizer.visualize_detections(frame, detections) - detections_img_msg = self.image_tools.convert_to_ros_compressed_msg(detections_visualized) - self.detection_feed_publisher.publish(detections_img_msg) + detections = [detection for _, detection in detections_dict.values()] + model = self.models[self.current_model_name] - # Publish depth feed - if self.queue_depth: - raw_img_depth = self.output_queues["depth"].get() - img_depth = raw_img_depth.getCvFrame() - image_msg_depth = self.image_tools.convert_depth_to_ros_compressed_msg(img_depth, 'mono16') - self.depth_publisher.publish(image_msg_depth) + # Publish detections feed + if self.rgb_detections: + detections_visualized = self.detection_visualizer.visualize_detections(frame, detections) + detections_img_msg = self.image_tools.convert_to_ros_compressed_msg(detections_visualized) + self.detection_feed_publisher.publish(detections_img_msg) # Process and publish detections. If using sonar, override det robot x coordinate for detection in detections: @@ -309,36 +317,20 @@ def detect(self): confidence = detection.confidence - # x is left/right axis, where 0 is in middle of the frame, - # to the left is negative x, and to the right is positive x - # y is down/up axis, where 0 is in the middle of the frame, - # down is negative y, and up is positive y - # z is distance of object from camera in mm - x_cam_mm = detection.spatialCoordinates.x - y_cam_mm = detection.spatialCoordinates.y - z_cam_mm = detection.spatialCoordinates.z - - x_cam_meters = mm_to_meters(x_cam_mm) - y_cam_meters = mm_to_meters(y_cam_mm) - z_cam_meters = mm_to_meters(z_cam_mm) - - det_coords_robot_mm = camera_frame_to_robot_frame(x_cam_meters, - y_cam_meters, - z_cam_meters) + # Calculate relative pose + det_coords_robot_mm = calculate_relative_pose(bbox, model['input_size'], model['sizes'][label], + FOCAL_LENGTH, SENSOR_SIZE, 2) # Find yaw angle offset left_end_compute = self.compute_angle_from_x_offset(detection.xmin * self.camera_pixel_width) right_end_compute = self.compute_angle_from_x_offset(detection.xmax * self.camera_pixel_width) midpoint = (left_end_compute + right_end_compute) / 2.0 - yaw_offset = (midpoint) * (math.pi / 180.0) # Degrees to radians + yaw_offset = math.radians(midpoint) # Degrees to radians # Create a new sonar request msg object if using sonar and the current detected # class is the desired class to be returned to task planning if self.using_sonar and label == self.current_priority: - # top_end_compute = self.compute_angle_from_y_offset(detection.ymin * self.camera_pixel_height) - # bottom_end_compute = self.compute_angle_from_y_offset(detection.ymax * self.camera_pixel_height) - # Construct sonar request message sonar_request_msg = SonarSweepRequest() sonar_request_msg.start_angle = int(left_end_compute) @@ -353,8 +345,8 @@ def detect(self): # else, keep default if not (self.sonar_response == (0, 0)) and self.in_sonar_range: det_coords_robot_mm = (self.sonar_response[0], # Override x - -x_cam_meters, # Maintain original y - y_cam_meters) # Maintain original z + det_coords_robot_mm[1], # Maintain original y + det_coords_robot_mm[2]) # Maintain original z self.publish_prediction( bbox, det_coords_robot_mm, yaw_offset, label, confidence, @@ -398,8 +390,7 @@ def publish_prediction(self, bbox, det_coords, yaw, label, confidence, if self.publishers: # Flush out 0, 0, 0 values - if object_msg.coords.x != 0 and object_msg.coords.y != 0 and object_msg.coords.z != 0: - self.publishers[label].publish(object_msg) + self.publishers[label].publish(object_msg) def update_sonar(self, sonar_results): """ @@ -437,7 +428,7 @@ def run(self): self.init_publishers(self.running_model) with depthai_camera_connect.connect(self.pipeline) as device: - self.init_output_queues(device) + self.init_queues(device) while not rospy.is_shutdown(): self.detect() diff --git a/onboard/catkin_ws/src/cv/scripts/path_marker_detector.py b/onboard/catkin_ws/src/cv/scripts/path_marker_detector.py new file mode 100755 index 000000000..df7148a8a --- /dev/null +++ b/onboard/catkin_ws/src/cv/scripts/path_marker_detector.py @@ -0,0 +1,138 @@ +#!/usr/bin/env python + +import rospy +import cv2 +import numpy as np +import math +from sensor_msgs.msg import CompressedImage, Image +from custom_msgs.msg import CVObject +from geometry_msgs.msg import Point +from cv_bridge import CvBridge + +from utils import compute_center_distance + + +class PathMarkerDetector: + MONO_CAM_IMG_SHAPE = (640, 480) # Width, height in pixels + + def __init__(self): + self.bridge = CvBridge() + self.image_sub = rospy.Subscriber("/camera/usb/bottom/compressed", CompressedImage, self.image_callback) + + # define information published for path marker + self.path_marker_hsv_filtered_pub = rospy.Publisher("/cv/bottom/path_marker/hsv_filtered", Image, queue_size=10) + self.path_marker_contour_image_pub = rospy.Publisher("/cv/bottom/path_marker/contour_image", Image, + queue_size=10) + self.path_marker_bounding_box_pub = rospy.Publisher("/cv/bottom/path_marker/bounding_box", CVObject, + queue_size=10) + self.path_marker_distance_pub = rospy.Publisher("/cv/bottom/path_marker/distance", Point, queue_size=10) + + def image_callback(self, data): + # Convert the compressed ROS image to OpenCV format + np_arr = np.frombuffer(data.data, np.uint8) + frame = cv2.imdecode(np_arr, cv2.IMREAD_COLOR) + frame = frame[:, :-20] + + # Process the frame to find and publish information on the bin + self.process_frame(frame) + + def process_frame(self, frame): + # Convert frame to HSV color space + hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV) + + # Define range for blue color and create mask + lower_orange = np.array([0, 130, 100]) + upper_orange = np.array([20, 255, 255]) + # lower_orange = np.array([2, 80, 100]) + # upper_orange = np.array([33, 255, 255]) + mask = cv2.inRange(hsv, lower_orange, upper_orange) + + hsv_filtered_msg = self.bridge.cv2_to_imgmsg(mask, "mono8") + self.path_marker_hsv_filtered_pub.publish(hsv_filtered_msg) + + # Apply morphological operations to clean up the binary image + kernel = np.ones((5, 5), np.uint8) + mask = cv2.morphologyEx(mask, cv2.MORPH_CLOSE, kernel) + + # Find contours in the mask + contours, _ = cv2.findContours(mask, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE) + contours = [contour for contour in contours if len(contour) > 5 and cv2.contourArea(contour) > 500] + + # Fit a line to the largest contour + if len(contours) > 0: + largest_contour = max(contours, key=cv2.contourArea) + center, dimensions, orientation = cv2.fitEllipse(largest_contour) + + # NOTE: CLOCKWISE yaw from 12 o'clock + orientation_in_radians = math.radians(orientation) + + # create CVObject message and populate relevant attributes + bounding_box = CVObject() + + bounding_box.header.stamp = rospy.Time.now() + + orientation_in_radians = math.pi / 2 - orientation_in_radians + + bounding_box.yaw = orientation_in_radians + + bounding_box.width = int(dimensions[0]) + bounding_box.height = int(dimensions[1]) + + # Compute distance between center of bounding box and center of image + # Here, image x is robot's y, and image y is robot's z + dist_x, dist_y = compute_center_distance(center[0], center[1], *self.MONO_CAM_IMG_SHAPE) + + # get distances into publishable format + # also fix axis from camera's POV + dist_point = Point() + dist_point.x = dist_x + dist_point.y = -dist_y + + # publish dist, bbox + self.path_marker_distance_pub.publish(dist_point) + self.path_marker_bounding_box_pub.publish(bounding_box) + + visualized_frame = self.visualize_path_marker_detection(frame, center, bounding_box, + math.pi / 2 - orientation_in_radians) + cv2.circle(visualized_frame, (int(center[0]), int(center[1])), 5, (0, 0, 255), -1) + self.path_marker_contour_image_pub.publish(self.bridge.cv2_to_imgmsg(visualized_frame)) + + def visualize_path_marker_detection(self, frame, center, bounding_box, orientation): + """Returns frame with bounding boxes of the detection.""" + frame_copy = frame.copy() + + center_x, center_y = center + width, height = bounding_box.width, bounding_box.height + orientation = orientation if orientation > 0 else orientation + math.pi + + # Calculate the four corners of the rectangle + angle_cos = math.cos(orientation) + angle_sin = math.sin(orientation) + half_width = width / 2 + half_height = height / 2 + + x1 = int(center_x - half_width * angle_cos + half_height * angle_sin) + y1 = int(center_y - half_width * angle_sin - half_height * angle_cos) + x2 = int(center_x + half_width * angle_cos + half_height * angle_sin) + y2 = int(center_y + half_width * angle_sin - half_height * angle_cos) + x3 = int(center_x + half_width * angle_cos - half_height * angle_sin) + y3 = int(center_y + half_width * angle_sin + half_height * angle_cos) + x4 = int(center_x - half_width * angle_cos - half_height * angle_sin) + y4 = int(center_y - half_width * angle_sin + half_height * angle_cos) + + # Draw the rotated rectangle + cv2.line(frame_copy, (x1, y1), (x2, y2), (0, 0, 255), 3) + cv2.line(frame_copy, (x2, y2), (x3, y3), (0, 0, 255), 3) + cv2.line(frame_copy, (x3, y3), (x4, y4), (0, 0, 255), 3) + cv2.line(frame_copy, (x4, y4), (x1, y1), (0, 0, 255), 3) + + return frame_copy + + +if __name__ == "__main__": + rospy.init_node('path_marker_detector', anonymous=True) + detector = PathMarkerDetector() + try: + rospy.spin() + except KeyboardInterrupt: + rospy.loginfo("Shutting down Path Marker Detector node") diff --git a/onboard/catkin_ws/src/cv/scripts/pink_bins_detector.py b/onboard/catkin_ws/src/cv/scripts/pink_bins_detector.py new file mode 100755 index 000000000..4c0b892f6 --- /dev/null +++ b/onboard/catkin_ws/src/cv/scripts/pink_bins_detector.py @@ -0,0 +1,138 @@ +#!/usr/bin/env python + +import rospy +import cv2 +import numpy as np +from functools import reduce +from sklearn.cluster import DBSCAN +from sensor_msgs.msg import CompressedImage, Image +from custom_msgs.msg import CVObject +from cv_bridge import CvBridge +from utils import compute_yaw + + +class PinkBinsDetector: + + MONO_CAM_IMG_SHAPE = (640, 480) # Width, height in pixels + + def __init__(self): + self.bridge = CvBridge() + + rospy.init_node("pink_bins_detector", anonymous=True) + + self.camera = rospy.get_param("~camera") + self.image_sub = rospy.Subscriber(f"/camera/usb/{self.camera}/compressed", CompressedImage, self.image_callback, + queue_size=1) + + self.pink_bins_hsv_filtered_pub = rospy.Publisher(f"/cv/{self.camera}/pink_bins/hsv_filtered", Image, + queue_size=10) + self.pink_bins_dbscan_pub = rospy.Publisher(f"/cv/{self.camera}/pink_bins/dbscan", Image, queue_size=10) + self.pink_bins_detections_pub = rospy.Publisher(f"/cv/{self.camera}/pink_bins/detections", Image, queue_size=10) + self.pink_bins_bounding_box_pub = rospy.Publisher(f"/cv/{self.camera}/pink_bins/bounding_box", CVObject, + queue_size=10) + + def image_callback(self, data): + # Convert the compressed ROS image to OpenCV format + np_arr = np.frombuffer(data.data, np.uint8) + frame = cv2.imdecode(np_arr, cv2.IMREAD_COLOR) + + # Process the frame to find and publish information on the bin + self.process_frame(frame) + + def process_frame(self, frame): + # Convert frame to HSV color space + hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV) + + # Define range for blue color and create mask + # lower_pink = np.array([110, 100, 150]) + # upper_pink = np.array([170, 255, 255]) + mask_1 = cv2.inRange(hsv, np.array([110, 50, 130]), np.array([130, 100, 200])) + mask_2 = cv2.inRange(hsv, np.array([130, 80, 130]), np.array([160, 150, 255])) + mask_3 = cv2.inRange(hsv, np.array([155, 100, 150]), np.array([175, 255, 255])) + + if self.camera == "bottom": + mask = cv2.inRange(hsv, np.array([160, 150, 200]), np.array([170, 255, 255])) + else: + mask = reduce(cv2.bitwise_or, [mask_1, mask_2, mask_3]) + + # apply filter and publish + hsv_filtered_msg = self.bridge.cv2_to_imgmsg(mask, "mono8") + self.pink_bins_hsv_filtered_pub.publish(hsv_filtered_msg) + + # handle if nothing detected + points = np.argwhere(mask > 0) + if len(points) == 0: + self.publish_with_no_detection(frame, hsv_filtered_msg) + return + + dbscan_img = cv2.cvtColor(mask, cv2.COLOR_GRAY2BGR) + + db = DBSCAN(eps=2, min_samples=10).fit(points) + labels = db.labels_ + + unique_labels = set(labels) + cluster_counts = {k: np.sum(labels == k) for k in unique_labels if k != -1} + + # handle if no clusters + if cluster_counts == {}: + self.publish_with_no_detection(frame, hsv_filtered_msg) + return + + # TODO: hung + sorted_cluster_labels = sorted(cluster_counts, key=cluster_counts.get, reverse=True) + colors = [(0, 0, 255), (0, 255, 255), (255, 0, 0)] + final_x, final_y = 0, 0 + chosen_label_score = None + for label, color in zip(sorted_cluster_labels[:3], colors): + class_member_mask = (labels == label) + max_clust_points = points[class_member_mask] + + for point in max_clust_points: + dbscan_img[point[0], point[1]] = (0, 255, 0) + + center_x = np.mean(max_clust_points[:, 1]) + center_y = np.mean(max_clust_points[:, 0]) + + if center_y > final_y: + final_x = center_x + final_y = center_y + chosen_label_score = max_clust_points.shape[0] + + if chosen_label_score < 100: + self.publish_with_no_detection(frame, hsv_filtered_msg) + return + + final_point_int = (int(final_x), int(final_y)) + + cv2.circle(dbscan_img, final_point_int, 7, (0, 0, 255), -1) + + dbscan_msg = self.bridge.cv2_to_imgmsg(dbscan_img, "bgr8") + self.pink_bins_dbscan_pub.publish(dbscan_msg) + + # Publish the frame with lowest point marked + frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB) + cv2.circle(frame, final_point_int, 7, (255, 0, 0), -1) + frame_msg = self.bridge.cv2_to_imgmsg(frame, "rgb8") + self.pink_bins_detections_pub.publish(frame_msg) + + # Publish the bounding box of the bin + final_x_normalized = final_x / self.MONO_CAM_IMG_SHAPE[0] + cv_object = CVObject() + cv_object.header.stamp = rospy.Time.now() + cv_object.yaw = -compute_yaw(final_x_normalized, final_x_normalized, self.MONO_CAM_IMG_SHAPE[0]) + cv_object.score = chosen_label_score + self.pink_bins_bounding_box_pub.publish(cv_object) + + # called if no detections are made + def publish_with_no_detection(self, frame, hsv_filtered_msg): + frame_msg = self.bridge.cv2_to_imgmsg(frame, "bgr8") + self.pink_bins_dbscan_pub.publish(hsv_filtered_msg) + self.pink_bins_detections_pub.publish(frame_msg) + + +if __name__ == "__main__": + detector = PinkBinsDetector() + try: + rospy.spin() + except KeyboardInterrupt: + rospy.loginfo("Shutting down Pink Bins Detector node") diff --git a/onboard/catkin_ws/src/cv/scripts/usb_camera.py b/onboard/catkin_ws/src/cv/scripts/usb_camera.py index fa6f25e7d..039824589 100755 --- a/onboard/catkin_ws/src/cv/scripts/usb_camera.py +++ b/onboard/catkin_ws/src/cv/scripts/usb_camera.py @@ -14,23 +14,22 @@ class USBCamera: Launch using: roslaunch cv usb_camera.launch :param topic: rostopic to publish the image feed to; default is set to camera/usb_camera/compressed - :param channel: which device channel to read the stream from (e.g., dev/video0) + :param device_path: path to device to read the stream from (e.g., /dev/video0); can be a symlinked path :param framerate: custom framerate to stream the camera at; default is set to device default """ - def __init__(self): - + def __init__(self, topic=None, device_path=None, framerate=None): # Instantiate new USB camera node - rospy.init_node('usb_camera', anonymous=True) + rospy.init_node(f'usb_camera_{topic}', anonymous=True) # Read custom camera configs from launch command - self.topic = rospy.get_param("~topic") - self.topic = f'/camera/{self.topic}/compressed' + self.topic = topic if topic else rospy.get_param("~topic") + self.topic = f'/camera/usb/{self.topic}/compressed' - self.channel = rospy.get_param("~channel") + self.device_path = device_path if device_path else rospy.get_param("~device_path") # If no custom framerate is passed in, set self.framerate to None to trigger default framerate - self.framerate = rospy.get_param("~framerate") + self.framerate = framerate if framerate else rospy.get_param("~framerate") if self.framerate == -1: self.framerate = None @@ -42,38 +41,66 @@ def __init__(self): def run(self): """ - Connect to camera found at self.channel using cv2.VideoCaptures + Connect to camera found at self.device_path using cv2.VideoCaptures and stream every image as it comes in at the device framerate """ - # Try connecting to the camera unless a connection is refused - try: - # Connect to camera at channel - cap = cv2.VideoCapture(self.channel) - # Read first frame - success, img = cap.read() + total_tries = 5 + success = False + + for _ in range(total_tries): + if rospy.is_shutdown(): + break + + # Try connecting to the camera unless a connection is refused + try: + # Connect to camera at device_path + cap = cv2.VideoCapture(self.device_path) + # Read first frame + success, img = cap.read() + + # Set publisher rate (framerate) to custom framerate if specified, otherwise, set to default + loop_rate = None + if self.framerate is None: + loop_rate = rospy.Rate(cap.get(cv2.CAP_PROP_FPS)) + else: + loop_rate = rospy.Rate(self.framerate) + + # If the execution reaches the following statement, and the first frame was successfully read, + # then a successful camera connection was made, and we enter the main while loop + if success: + break + + except Exception: + rospy.loginfo("Failed to connect to USB camera, trying again...") + pass + + if rospy.is_shutdown(): + break - # Set publisher rate (framerate) to custom framerate if specified, otherwise, set to default - loop_rate = None - if self.framerate is None: - loop_rate = rospy.Rate(cap.get(cv2.CAP_PROP_FPS)) - else: - loop_rate = rospy.Rate(self.framerate) + # Wait two seconds before trying again + # This ensures the script does not terminate if the camera is just temporarily unavailable + rospy.sleep(2) + if success: # Including 'not rospy.is_shutdown()' in the loop condition here to ensure if this script is exited # while this loop is running, the script quits without escalating to SIGTERM or SIGKILL - while not rospy.is_shutdown() and success: - # Convert image read from cv2.videoCapture to image message to be published - image_msg = self.image_tools.convert_to_ros_compressed_msg(img) # Compress image - # Publish the image - self.publisher.publish(image_msg) + while not rospy.is_shutdown(): + if success: + # Convert image read from cv2.videoCapture to image message to be published + image_msg = self.image_tools.convert_to_ros_compressed_msg(img) # Compress image + # Publish the image + self.publisher.publish(image_msg) # Read next image success, img = cap.read() # Sleep loop to maintain frame rate loop_rate.sleep() - except Exception: - rospy.logerr(f"Camera not found at channel {self.channel}") + else: + rospy.logerr(f"{total_tries} attempts were made to connect to the USB camera. " + f"The camera was not found at device_path {self.device_path}. All attempts failed.") + raise RuntimeError(f"{total_tries} attempts were made to connect to the USB camera. " + f"The camera was not found at device_path {self.device_path}. All attempts failed.") if __name__ == '__main__': diff --git a/onboard/catkin_ws/src/cv/scripts/usb_camera_connect.py b/onboard/catkin_ws/src/cv/scripts/usb_camera_connect.py new file mode 100755 index 000000000..0726cdfde --- /dev/null +++ b/onboard/catkin_ws/src/cv/scripts/usb_camera_connect.py @@ -0,0 +1,40 @@ +#!/usr/bin/env python3 + +import rospy +import roslaunch +import yaml +import resource_retriever as rr + +CAMERA_CONFIG_PATH = 'package://cv/configs/usb_cameras.yaml' + + +def connect_all(): + # get camera specs + with open(rr.get_filename(CAMERA_CONFIG_PATH, use_protocol=False)) as f: + cameras = yaml.safe_load(f) + + uuid = roslaunch.rlutil.get_or_generate_uuid(None, False) + roslaunch.configure_logging(uuid) + + roslaunch_files = [] + for camera_name in cameras: + # add cli args for each camera to run upon init + camera = cameras[camera_name] + device_path = camera["device_path"] + topic = camera["topic"] + + cli_args = ["cv", "usb_camera.launch", f"topic:={topic}", f"device_path:={device_path}", "framerate:=10"] + roslaunch_file = roslaunch.rlutil.resolve_launch_arguments(cli_args)[0] + roslaunch_files.append((roslaunch_file, cli_args[2:])) + + # actually init + parent = roslaunch.parent.ROSLaunchParent(uuid, roslaunch_files) + parent.start() + + while not rospy.is_shutdown(): + rospy.spin() + + +if __name__ == "__main__": + rospy.init_node("usb_camera_connect") + connect_all() diff --git a/onboard/catkin_ws/src/cv/scripts/utils.py b/onboard/catkin_ws/src/cv/scripts/utils.py index 5649b1a05..05d999425 100755 --- a/onboard/catkin_ws/src/cv/scripts/utils.py +++ b/onboard/catkin_ws/src/cv/scripts/utils.py @@ -1,8 +1,11 @@ #!/usr/bin/env python3 import numpy as np +import math import cv2 import os +from geometry_msgs.msg import Point +from custom_msgs.msg import CVObject def check_file_writable(filepath): @@ -25,6 +28,154 @@ def check_file_writable(filepath): return os.access(pdir, os.W_OK) +def cam_dist_with_obj_width(width_pixels, width_meters, + focal_length, img_shape, sensor_size, adjustment_factor=1): + ''' + Note that adjustment factor is 1 for mono camera and 2 for depthAI camera + ''' + return (focal_length * width_meters * img_shape[0]) \ + / (width_pixels * sensor_size[0]) * adjustment_factor + + +def cam_dist_with_obj_height(height_pixels, height_meters, + focal_length, img_shape, sensor_size, adjustment_factor=1): + return (focal_length * height_meters * img_shape[1]) \ + / (height_pixels * sensor_size[1]) * adjustment_factor + + +def compute_yaw(xmin, xmax, camera_pixel_width): + # Find yaw angle offset + left_end_compute = compute_angle_from_x_offset(xmin * camera_pixel_width, camera_pixel_width) + right_end_compute = compute_angle_from_x_offset(xmax * camera_pixel_width, camera_pixel_width) + midpoint = (left_end_compute + right_end_compute) / 2.0 + return (midpoint) * (math.pi / 180.0) # Degrees to radians + + +def compute_angle_from_x_offset(x_offset, camera_pixel_width): + """ + See: https://math.stackexchange.com/questions/1320285/convert-a-pixel-displacement-to-angular-rotation + for implementation details. + + :param x_offset: x pixels from center of image + + :return: angle in degrees + """ + image_center_x = camera_pixel_width / 2.0 + return math.degrees(math.atan(((x_offset - image_center_x) * 0.005246675486))) + + +def calculate_relative_pose(bbox_bounds, input_size, label_shape, FOCAL_LENGTH, SENSOR_SIZE, adjustment_factor): + """ + Returns rel pose, to be used as a part of the CVObject + + Parameters: + bbox_bounds: the detection object + input_size: array wrt input size ([0] is width, [1] is height) + label_shape: the label shape ([0] is width --> only this is accessed, [1] is height) + FOCAL_LENGTH: a constant to pass in + SENSOR_SIZE: a constant to pass in + adjustment_factor: 1 if mono 2 if depthai + """ + xmin, ymin, xmax, ymax = bbox_bounds + + bbox_width = (xmax - xmin) * input_size[0] + bbox_center_x = (xmin + xmax) / 2 * input_size[0] + bbox_center_y = (ymin + ymax) / 2 * input_size[1] + meters_per_pixel = label_shape[0] / bbox_width + dist_x = bbox_center_x - input_size[0] // 2 + dist_y = bbox_center_y - input_size[1] // 2 + + y_meters = dist_x * meters_per_pixel * -1 + z_meters = dist_y * meters_per_pixel * -1 + + # rospy.loginfo(bbox_width, bbox_center_x, bbox_center_y, ) + + # isp_img_to_det_ratio = ISP_IMG_SHAPE[0] / model['input_size'][0] + + # print(bbox_width) + x_meters = cam_dist_with_obj_width(bbox_width, label_shape[0], FOCAL_LENGTH, input_size, SENSOR_SIZE, + adjustment_factor) + + return [x_meters, y_meters, z_meters] + + +def compute_bbox_dimensions(polygon): + """ + Returns a CVObject messages, containing the following properties of the given Polygon: + width, height, xmin, ymin, xmax, ymax as + + Args: + polygon: Polygon object + """ + + # Ensure there are points in the polygon + if len(polygon.points) < 4: + raise ValueError("Polygon does not represent a bounding box with four points.") + + # Initialize min_x, max_x, min_y, and max_y with the coordinates of the first point + min_x = polygon.points[0].x + max_x = polygon.points[0].x + min_y = polygon.points[0].y + max_y = polygon.points[0].y + + # Iterate through all points to find the min and max x and y coordinates + for point in polygon.points: + if point.x < min_x: + min_x = point.x + if point.x > max_x: + max_x = point.x + if point.y < min_y: + min_y = point.y + if point.y > max_y: + max_y = point.y + + # Compute the width and height + width = max_x - min_x + height = max_y - min_y + + # Create and populate message of CVObject type using polygon fields + msg = CVObject() + + msg.width = width + msg.height = height + + msg.xmin = min_x + msg.xmax = max_x + + msg.ymin = min_y + msg.ymax = max_y + + # TODO double check is coords field of CVObject centre? + center = Point() + center.x = (min_x + max_x) / 2 + center.y = (min_y + max_y) / 2 + + msg.coords = center + + # TODO make this not 0 and actually what its supposed to be + msg.yaw = 0 + + return msg + + +def compute_center_distance(bbox_center_x, bbox_center_y, frame_width, frame_height, width_adjustment_constant=0, + height_adjustment_constant=0): + + ''' + Note that x, y is in the camera's reference frame + ''' + + # Compute the center of the frame + frame_center_x = frame_width / 2 + frame_center_y = frame_height / 2 + + # Compute the distances between the centers + distance_x = bbox_center_x - frame_center_x + height_adjustment_constant + distance_y = bbox_center_y - frame_center_y + width_adjustment_constant + + return distance_x, distance_y + + class DetectionVisualizer: """ Helper methods to visualize detections on an image feed. Adapted from class TextHelper: diff --git a/onboard/catkin_ws/src/cv/udev/99-usb-camera.rules b/onboard/catkin_ws/src/cv/udev/99-usb-camera.rules new file mode 100644 index 000000000..da5380b46 --- /dev/null +++ b/onboard/catkin_ws/src/cv/udev/99-usb-camera.rules @@ -0,0 +1,5 @@ +SUBSYSTEM=="video4linux", ENV{ID_PATH_TAG}=="pci-0000_01_00_0-usb-0_1_1_1_0", ATTR{index}=="0", SYMLINK+="video_front" +SUBSYSTEM=="video4linux", ENV{ID_PATH_TAG}=="pci-0000_01_00_0-usb-0_1_1_1_0", ATTR{index}=="4", SYMLINK+="video_front" + +SUBSYSTEM=="video4linux", ENV{ID_PATH_TAG}=="pci-0000_05_00_3-usb-0_2_4_1_0", ATTR{index}=="0", SYMLINK+="video_bottom" +SUBSYSTEM=="video4linux", ENV{ID_PATH_TAG}=="pci-0000_05_00_3-usb-0_2_4_1_0", ATTR{index}=="4", SYMLINK+="video_bottom" diff --git a/onboard/catkin_ws/src/data_pub/README.md b/onboard/catkin_ws/src/data_pub/README.md index 663abd004..a154ad15e 100644 --- a/onboard/catkin_ws/src/data_pub/README.md +++ b/onboard/catkin_ws/src/data_pub/README.md @@ -1,6 +1,8 @@ # Data Pub -The `data_pub` package contains scripts that interface with various sensors, currently interfacing with the DVL (Doppler Velocity Log), pressure sensor (depth), and voltage sensor. You can launch all nodes in this package using the `pub_all.launch` file. +The `data_pub` package contains scripts that interface with various sensors, currently interfacing with the DVL (Doppler Velocity Log), pressure sensor (depth), voltage sensor, and temperature/humidity. You can launch all nodes in this package using the `pub_all.launch` file. + +Additionally, the package allows control over the marker dropper servo. The servo is controlled by publishing a Bool message to the `servo_control` service. ## Config File This package contains a config file for each robot, stored at `config/.yaml`, where `` is the value of the `ROBOT_NAME` enviornment variable corresponding to each robot. Config files for all robots must be structured as below: @@ -20,7 +22,7 @@ There must be only one top-level key, `dvl`. Under it, there must be the followi ## DVL -We use the [Teledyne Pathfinder DVL](https://www.teledynemarine.com/brands/rdi/pathfinder-dvl) for velocity measurements. The DVL is connected to the robot's main computer via USB. +We use the [Teledyne Pathfinder DVL](https://www.teledynemarine.com/brands/rdi/pathfinder-dvl) for velocity measurements. The DVL is connected to the robot's main computer via a USB serial converter. The `dvl_raw` script publishes the raw DVL data to the `/sensors/dvl/raw` topic with type `custom_msgs/DVLRaw`. It obtains the DVL's FTDI string from the config file mentioned above and uses it to find the DVL's port. @@ -32,11 +34,11 @@ You can launch both scripts using the `pub_dvl.launch` file. The pressure Arduino obtains data from the [Blue Robotics Bar02 Pressure Sensor](https://bluerobotics.com/store/sensors-cameras/sensors/bar02-sensor-r1-rp/) and a generic voltage sensor and sends the data as raw serial messages to the robot's main computer. -To connect to the pressure Arduino, the `sensors.py` script obtains its FTDI string from the config file for the current robot (as indicated by the `ROBOT_NAME` environment variable) in the `offboard_comms` package. The FTDI string is used to find the pressure Arduino's port. +To connect to the pressure Arduino, the `pressure_voltage.py` script obtains its FTDI string from the config file for the current robot (as indicated by the `ROBOT_NAME` environment variable) in the `offboard_comms` package. The FTDI string is used to find the pressure Arduino's port. The data obtained from serial contains tags `P:` and `V:` identifying pressure (depth) and voltage, respectively. -The `sensors.py` script can be launched using the `pub_sensors.launch` file. +The `pressure_voltage.py` script can be launched using the `pub_pressure_voltage.launch` file. ### Pressure/Depth The depth data obtained is filtered, converted into a `geometry_msgs/PoseWithCovarianceStamped` message, and published to the `/sensors/depth` topic, for use in `sensor_fusion`. @@ -50,4 +52,18 @@ These filters are applied to eliminate noise in the data that would otherwise re All data in this `PoseWithCovarianceStamped` message is set to 0 except for the `pose.pose.position.z` value, which is set to the depth in meters. The `pose.pose.orientation` is set to the identity quaternion. Except for the `pose.pose.position.z` value, all other values are unused in `sensor_fusion`. ### Voltage -The same node also gets voltage data from the voltage sensor on the same arduino and is published as a Float64 to `/sensors/voltage`, without modification. \ No newline at end of file +The same node also gets voltage data from the voltage sensor on the same arduino and is published as a Float64 to `/sensors/voltage`, without modification. + +### Temperature/Humidity +On a different Arduino, both temperature and humidity readings are gathered by a [DHT11 sensor](https://www.adafruit.com/product/386), dumped over serial, and published to the `/sensors/temperature` and `/sensors/humidity` topics, respectively. The data is published as a Float64. + +The data obtained from serial contains tags `T:` and `H:` identifying temperature and humidity, respectively. + +Temperature is measured in degrees Fahrenheit and humidity is measured in percentage. + +The `servo_sensors.py` script can be launched using the `pub_servo_sensors.launch` file. This Arduino also houses the servo for the marker dropper, and is designed to be a general-purpose sensor Arduino for future use. + +### Servo +The `servo_control` service allows control over the marker dropper servo. The marker dropper holds two rounds that can be dropped individually. The service takes a `Bool` message with the following values: +- `True`: Drop the "left" round. +- `False`: Drop the "right" round. diff --git a/onboard/catkin_ws/src/data_pub/config/oogway.yaml b/onboard/catkin_ws/src/data_pub/config/oogway.yaml index 4c136e69e..4c74efa91 100644 --- a/onboard/catkin_ws/src/data_pub/config/oogway.yaml +++ b/onboard/catkin_ws/src/data_pub/config/oogway.yaml @@ -2,4 +2,6 @@ dvl: ftdi: D309SFWS negate_x_vel: false negate_y_vel: false - negate_z_vel: false \ No newline at end of file + negate_z_vel: false +ping1D: + ftdi: DP05ZD1V \ No newline at end of file diff --git a/onboard/catkin_ws/src/data_pub/launch/pub_all.launch b/onboard/catkin_ws/src/data_pub/launch/pub_all.launch index 805656a53..afff28e18 100644 --- a/onboard/catkin_ws/src/data_pub/launch/pub_all.launch +++ b/onboard/catkin_ws/src/data_pub/launch/pub_all.launch @@ -1,4 +1,5 @@ - + + diff --git a/onboard/catkin_ws/src/data_pub/launch/pub_ping1D.launch b/onboard/catkin_ws/src/data_pub/launch/pub_ping1D.launch new file mode 100644 index 000000000..33804c91e --- /dev/null +++ b/onboard/catkin_ws/src/data_pub/launch/pub_ping1D.launch @@ -0,0 +1,3 @@ + + + diff --git a/onboard/catkin_ws/src/data_pub/launch/pub_pressure_voltage.launch b/onboard/catkin_ws/src/data_pub/launch/pub_pressure_voltage.launch new file mode 100644 index 000000000..54099cfa3 --- /dev/null +++ b/onboard/catkin_ws/src/data_pub/launch/pub_pressure_voltage.launch @@ -0,0 +1,3 @@ + + + \ No newline at end of file diff --git a/onboard/catkin_ws/src/data_pub/launch/pub_sensors.launch b/onboard/catkin_ws/src/data_pub/launch/pub_sensors.launch deleted file mode 100644 index 7e16f15b2..000000000 --- a/onboard/catkin_ws/src/data_pub/launch/pub_sensors.launch +++ /dev/null @@ -1,3 +0,0 @@ - - - \ No newline at end of file diff --git a/onboard/catkin_ws/src/data_pub/launch/pub_servo_sensors.launch b/onboard/catkin_ws/src/data_pub/launch/pub_servo_sensors.launch new file mode 100644 index 000000000..8d0aa14a0 --- /dev/null +++ b/onboard/catkin_ws/src/data_pub/launch/pub_servo_sensors.launch @@ -0,0 +1,3 @@ + + + \ No newline at end of file diff --git a/onboard/catkin_ws/src/data_pub/scripts/ping1D.py b/onboard/catkin_ws/src/data_pub/scripts/ping1D.py new file mode 100755 index 000000000..b0f74976c --- /dev/null +++ b/onboard/catkin_ws/src/data_pub/scripts/ping1D.py @@ -0,0 +1,79 @@ +#!/usr/bin/env python3 + +import rospy +from brping import Ping1D +import time +import serial.tools.list_ports as list_ports +from std_msgs.msg import Float64 +import yaml +import os +import resource_retriever as rr + + +class Ping1DPublisher: + BAUD_RATE = 115200 + PING_INTERVAL = 100 # ms + SPEED_OF_SOUND = 1482000 # mm/s + RANGE_START = 0 # mm + RANGE_END = 5000 # mm + GAIN_SETTING = 6 + MODE_AUTO = 0 + CONFIDANCE_THRESHOLD = 0 + + CONFIG_FILE_PATH = f'package://data_pub/config/{os.getenv("ROBOT_NAME", "oogway")}.yaml' + _serial_port = None + + PING1D_DEST_TOPIC = 'sensors/ping/distance' + + NODE_NAME = "ping" + + def __init__(self): + rospy.init_node(self.NODE_NAME) + self.ping_publisher = rospy.Publisher(self.PING1D_DEST_TOPIC, Float64, queue_size=10) + + with open(rr.get_filename(self.CONFIG_FILE_PATH, use_protocol=False)) as f: + self._config_data = yaml.safe_load(f) + + def connect(self): + # Make a new Ping + self._ping1D = Ping1D() + while self._serial_port is None and not rospy.is_shutdown(): + try: + ping1D_ftdi_string = self._config_data['ping1D']['ftdi'] + self._serial_port = next(list_ports.grep(ping1D_ftdi_string)).device + except StopIteration: + # rospy.logerr("Ping1D not found, trying again in 0.1 seconds.") # temp removed for competition + rospy.sleep(0.1) + + self._ping1D.connect_serial(self._serial_port, 115200) + + while self._ping1D.initialize() is False: + print("Failed to init Ping1D, retrying in 0.5 seconds... ") + time.sleep(0.5) + + self._ping1D.set_speed_of_sound(self.SPEED_OF_SOUND) # mm/s + self._ping1D.set_ping_interval(self.PING_INTERVAL) + self._ping1D.set_mode_auto(self.MODE_AUTO) + self._ping1D.set_gain_setting(self.GAIN_SETTING) + self._ping1D.set_range(self.RANGE_START, self.RANGE_END) + + def run(self): + rate = rospy.Rate(10) + self.connect() + while not rospy.is_shutdown(): + data = self._ping1D.get_distance_simple() + if data: + distance = data["distance"] + confidance = data["confidence"] + if confidance >= self.CONFIDANCE_THRESHOLD: + self.ping_publisher.publish(float(distance/1000.0)) + else: + rospy.logerr("Failed to get distance data from Ping1D") + rate.sleep() + + +if __name__ == '__main__': + try: + Ping1DPublisher().run() + except rospy.ROSInterruptException: + pass diff --git a/onboard/catkin_ws/src/data_pub/scripts/pressure_voltage.py b/onboard/catkin_ws/src/data_pub/scripts/pressure_voltage.py new file mode 100755 index 000000000..a7d9dfdd3 --- /dev/null +++ b/onboard/catkin_ws/src/data_pub/scripts/pressure_voltage.py @@ -0,0 +1,130 @@ +#!/usr/bin/env python3 + +import rospy +import os +from std_msgs.msg import Float64 +from serial_publisher import SerialPublisher + +# Used for sensor fusion +from geometry_msgs.msg import PoseWithCovarianceStamped + +CONFIG_FILE_PATH = f'package://offboard_comms/config/{os.getenv("ROBOT_NAME", "oogway")}.yaml' +CONFIG_NAME = 'pressure' + +BAUDRATE = 9600 +NODE_NAME = 'pressure_voltage_pub' +DEPTH_DEST_TOPIC = 'sensors/depth' +VOLTAGE_DEST_TOPIC = 'sensors/voltage' + + +class PressureVoltagePublisher(SerialPublisher): + """ + Serial publisher to publish voltage and pressure data to ROS + """ + + MEDIAN_FILTER_SIZE = 3 + + def __init__(self): + super().__init__(NODE_NAME, BAUDRATE, CONFIG_FILE_PATH, CONFIG_NAME) + + self._pressure = None # Pressure to publish + self._previous_pressure = None # Previous pressure readings for median filter + + self._pub_depth = rospy.Publisher(DEPTH_DEST_TOPIC, PoseWithCovarianceStamped, queue_size=50) + self._pub_voltage = rospy.Publisher(VOLTAGE_DEST_TOPIC, Float64, queue_size=10) + + self._current_pressure_msg = PoseWithCovarianceStamped() + self._current_voltage_msg = Float64() + + self._serial_port = None + self._serial = None + + def process_line(self, line): + """" + Reads and publishes individual lines + + @param line: the line to read + + Assumes data comes in the following format: + + P:0.22 + P:0.23 + P:0.22 + P:0.22 + V:15.85 + P:0.24 + ... + """ + tag = line[0:2] # P for pressure and V for voltage + data = line[2:] + if "P:" in tag: + self._update_pressure(float(data)) # Filter out bad readings + self._parse_pressure() # Parse pressure data + self._publish_current_pressure_msg() # Publish pressure data + if "V:" in tag: + self._current_voltage_msg = float(data) + self._publish_current_voltage_msg() + + def _update_pressure(self, new_reading): + """ + Update pressure reading to publish and filter out bad readings + + @param new_reading: new pressure value to be printed + """ + # Ignore readings that are too large + if abs(new_reading) > 7: + return + + # First reading + elif self._pressure is None: + self._pressure = new_reading + self._previous_pressure = [new_reading] * self.MEDIAN_FILTER_SIZE + + # Median filter + else: + self._previous_pressure.append(new_reading) + self._previous_pressure.pop(0) + self._pressure = sorted(self._previous_pressure)[int(self.MEDIAN_FILTER_SIZE / 2)] + + def _parse_pressure(self): + """ + Parses the pressure into an odom message + """ + self._current_pressure_msg.pose.pose.position.x = 0.0 + self._current_pressure_msg.pose.pose.position.y = 0.0 + self._current_pressure_msg.pose.pose.position.z = -1 * float(self._pressure) + + self._current_pressure_msg.pose.pose.orientation.x = 0.0 + self._current_pressure_msg.pose.pose.orientation.y = 0.0 + self._current_pressure_msg.pose.pose.orientation.z = 0.0 + self._current_pressure_msg.pose.pose.orientation.w = 1.0 + + # Only the z,z covariance + self._current_pressure_msg.pose.covariance[14] = 0.01 + + def _publish_current_pressure_msg(self): + """ + Publishes current pressure to ROS node + """ + if abs(self._current_pressure_msg.pose.pose.position.z) > 7: + self._current_pressure_msg = PoseWithCovarianceStamped() + return + + self._current_pressure_msg.header.stamp = rospy.Time.now() + self._current_pressure_msg.header.frame_id = "odom" # World frame + + self._pub_depth.publish(self._current_pressure_msg) + self._current_pressure_msg = PoseWithCovarianceStamped() + + def _publish_current_voltage_msg(self): + """ + Publishes current voltage to ROS node + """ + self._pub_voltage.publish(self._current_voltage_msg) + + +if __name__ == '__main__': + try: + PressureVoltagePublisher().run() + except rospy.ROSInterruptException: + pass diff --git a/onboard/catkin_ws/src/data_pub/scripts/sensors.py b/onboard/catkin_ws/src/data_pub/scripts/sensors.py deleted file mode 100755 index 34f34716b..000000000 --- a/onboard/catkin_ws/src/data_pub/scripts/sensors.py +++ /dev/null @@ -1,153 +0,0 @@ -#!/usr/bin/env python3 - -import rospy -import os -import time -import serial -import serial.tools.list_ports as list_ports -import yaml -import resource_retriever as rr -import traceback -from std_msgs.msg import Float64 - -# Used for sensor fusion -from geometry_msgs.msg import PoseWithCovarianceStamped - - -class PressureRawPublisher: - - DEPTH_DEST_TOPIC = 'sensors/depth' - VOLTAGE_DEST_TOPIC = 'sensors/voltage' - CONFIG_FILE_PATH = f'package://offboard_comms/config/{os.getenv("ROBOT_NAME", "oogway")}.yaml' - - BAUDRATE = 9600 - NODE_NAME = 'pressure_pub' - - MEDIAN_FILTER_SIZE = 3 - - def __init__(self): - with open(rr.get_filename(self.CONFIG_FILE_PATH, use_protocol=False)) as f: - config_data = yaml.safe_load(f) - self._arduino_config = config_data['arduino'] - - self._pressure = None # Pressure to publish - self._previous_pressure = None # Previous pressure readings for median filter - - self._pub_depth = rospy.Publisher(self.DEPTH_DEST_TOPIC, PoseWithCovarianceStamped, queue_size=50) - self._pub_voltage = rospy.Publisher(self.VOLTAGE_DEST_TOPIC, Float64, queue_size=10) - - self._current_pressure_msg = PoseWithCovarianceStamped() - self._current_voltage_msg = Float64() - - self._serial_port = None - self._serial = None - - # Read FTDI strings of all ports in list_ports.grep - def connect(self): - while self._serial_port is None and not rospy.is_shutdown(): - try: - pressure_ftdi_string = self._arduino_config['pressure']['ftdi'] - self._serial_port = next(list_ports.grep(pressure_ftdi_string)).device - self._serial = serial.Serial(self._serial_port, self.BAUDRATE, - timeout=1, write_timeout=None, - bytesize=serial.EIGHTBITS, parity=serial.PARITY_NONE, - stopbits=serial.STOPBITS_ONE) - except StopIteration: - rospy.logerr("Pressure sensor not found, trying again in 0.1 seconds.") - rospy.sleep(0.1) - - # Read line from serial port without blocking - def readline_nonblocking(self, tout=1): - start = time.time() - buff = b'' - while ((time.time() - start) < tout) and (b'\r\n' not in buff): - try: - buff += self._serial.read(1) - except serial.SerialException: - pass - - return buff.decode('utf-8', errors='ignore') - - def run(self): - rospy.init_node(self.NODE_NAME) - self.connect() - while not rospy.is_shutdown(): - try: - # Direct read from device - line = self.readline_nonblocking().strip() - - if not line or line == '': - rospy.logerr("Timeout in pressure serial read, trying again in 1 second.") - rospy.sleep(0.1) - continue # Skip and retry - - tag = line[0:2] # P for pressure and V for voltage - data = line[2:] - if "P:" in tag: - self._update_pressure(float(data)) # Filter out bad readings - self._parse_pressure() # Parse pressure data - self._publish_current_pressure_msg() # Publish pressure data - if "V:" in tag: - self._current_voltage_msg = float(data) - self._publish_current_voltage_msg() - - except Exception: - rospy.logerr("Error in reading pressure information. Reconnecting.") - rospy.logerr(traceback.format_exc()) - self._serial.close() - self._serial = None - self._serial_port = None - self.connect() - - # Update pressure reading to publish and filter out bad readings - def _update_pressure(self, new_reading): - # Ignore readings that are too large - if abs(new_reading) > 7: - return - - # First reading - elif self._pressure is None: - self._pressure = new_reading - self._previous_pressure = [new_reading] * self.MEDIAN_FILTER_SIZE - - # Median filter - else: - self._previous_pressure.append(new_reading) - self._previous_pressure.pop(0) - self._pressure = sorted(self._previous_pressure)[int(self.MEDIAN_FILTER_SIZE / 2)] - - def _parse_pressure(self): - # Pressure data recieved is positive so must flip sign - - self._current_pressure_msg.pose.pose.position.x = 0.0 - self._current_pressure_msg.pose.pose.position.y = 0.0 - self._current_pressure_msg.pose.pose.position.z = -1 * float(self._pressure) - - self._current_pressure_msg.pose.pose.orientation.x = 0.0 - self._current_pressure_msg.pose.pose.orientation.y = 0.0 - self._current_pressure_msg.pose.pose.orientation.z = 0.0 - self._current_pressure_msg.pose.pose.orientation.w = 1.0 - - # Only the z,z covariance - self._current_pressure_msg.pose.covariance[14] = 0.01 - - def _publish_current_pressure_msg(self): - if abs(self._current_pressure_msg.pose.pose.position.z) > 7: - self._current_pressure_msg = PoseWithCovarianceStamped() - return - - self._current_pressure_msg.header.stamp = rospy.Time.now() - self._current_pressure_msg.header.frame_id = "odom" # World frame - - self._pub_depth.publish(self._current_pressure_msg) - self._current_pressure_msg = PoseWithCovarianceStamped() - - def _publish_current_voltage_msg(self): - self._pub_voltage.publish(self._current_voltage_msg) - - -if __name__ == '__main__': - try: - PressureRawPublisher().run() - except rospy.ROSInterruptException: - pass diff --git a/onboard/catkin_ws/src/data_pub/scripts/serial_publisher.py b/onboard/catkin_ws/src/data_pub/scripts/serial_publisher.py new file mode 100644 index 000000000..5402ebd43 --- /dev/null +++ b/onboard/catkin_ws/src/data_pub/scripts/serial_publisher.py @@ -0,0 +1,116 @@ +#!/usr/bin/env python3 + +from abc import ABC, abstractmethod +import rospy +import time +import serial +import serial.tools.list_ports as list_ports +import yaml +import resource_retriever as rr +import traceback + + +class SerialPublisher(ABC): + """ + Abstract serial publisher for a ROS node + """ + + def __init__(self, node_name, baud, config_file_path, config_name): + """ + @param node_name: The ROS node name + @param baud: the baud rate + @config_file_path: location of serial config + @config_name: name of Arduino to reference from config file + """ + + self.config_file_path = config_file_path + self.baud = baud + self.node_name = node_name + self.config_name = config_name + + with open(rr.get_filename(self.config_file_path, use_protocol=False)) as f: + config_data = yaml.safe_load(f) + self._arduino_config = config_data['arduino'] + + def connect(self): + """ + Read FTDI strings of all ports in list_ports.grep + """ + while self._serial_port is None and not rospy.is_shutdown(): + try: + pressure_ftdi_string = self._arduino_config[self.config_name]['ftdi'] + print(f"Looking for {self.config_name} sensor with FTDI string {pressure_ftdi_string}.") + self._serial_port = next(list_ports.grep(pressure_ftdi_string)).device + print(f"{self.config_name} sensor found at {self._serial_port}.") + self._serial = serial.Serial(self._serial_port, self.baud, + timeout=1, write_timeout=None, + bytesize=serial.EIGHTBITS, parity=serial.PARITY_NONE, + stopbits=serial.STOPBITS_ONE) + except StopIteration: + rospy.logerr(f"{self.config_name} sensor not found, trying again in 0.1 seconds.") + rospy.sleep(0.1) + + def readline_nonblocking(self, tout=1): + """ + Read line from serial port without blocking + + @param tout: timeout, default = 1 sec + """ + start = time.time() + buff = b'' + while ((time.time() - start) < tout) and (b'\r\n' not in buff): + try: + buff += self._serial.read(1) + except serial.SerialException: + pass + + return buff.decode('utf-8', errors='ignore') + + def writeline(self, line): + """ + Write line to serial port + + @param line: the line to write + """ + self._serial.write(line.encode('utf-8') + b'\r\n') + + @abstractmethod + def process_line(self, line): + """ + Abstract method to implement how serial input is formatted. + + @param line: the line to be processed + """ + pass + + def run(self): + """ + Runs the serial publisher + + Initializes ROS node + Connects to serial device + Processes and publishes the serial data to ROS + """ + + rospy.init_node(self.node_name) + self.connect() + + while not rospy.is_shutdown(): + try: + # Direct read from device + line = self.readline_nonblocking().strip() # Example: "P: 0.22" + + if not line or line == '': + rospy.logerr(f"Timeout in {self.config_name} serial read, trying again in 1 second.") + rospy.sleep(0.1) + continue # Skip and retry + + self.process_line(line) + + except Exception: + rospy.logerr(f"Error in reading {self.config_name} serial read, trying again in 1 second.") + rospy.logerr(traceback.format_exc()) + self._serial.close() + self._serial = None + self._serial_port = None + self.connect() diff --git a/onboard/catkin_ws/src/data_pub/scripts/servo_sensors.py b/onboard/catkin_ws/src/data_pub/scripts/servo_sensors.py new file mode 100755 index 000000000..d9b61b37e --- /dev/null +++ b/onboard/catkin_ws/src/data_pub/scripts/servo_sensors.py @@ -0,0 +1,147 @@ +#!/usr/bin/env python3 + +import rospy +import os +from std_msgs.msg import Float64 +from std_srvs.srv import SetBool, SetBoolResponse +from serial_publisher import SerialPublisher + +CONFIG_FILE_PATH = f'package://offboard_comms/config/{os.getenv("ROBOT_NAME", "oogway")}.yaml' +CONFIG_NAME = 'servo_sensors' + +BAUDRATE = 9600 +NODE_NAME = 'servo_sensors_pub' +HUMIDITY_DEST_TOPIC = 'sensors/humidity' +TEMPERATURE_DEST_TOPIC = 'sensors/temperature' +SERVO_SERVICE = 'servo_control' + + +class TemperatureHumidityPublisher(SerialPublisher): + """ + Serial publisher to publish temperature and humidity data to ROS + """ + + MEDIAN_FILTER_SIZE = 3 + + def __init__(self): + super().__init__(NODE_NAME, BAUDRATE, CONFIG_FILE_PATH, CONFIG_NAME) + + self._temperature = None # Temperature to publish + self._humidity = None + self._previous_temperature = None # Previous temperature readings for median filter + + self._pub_temperature = rospy.Publisher(TEMPERATURE_DEST_TOPIC, Float64, queue_size=10) + self._pub_humidity = rospy.Publisher(HUMIDITY_DEST_TOPIC, Float64, queue_size=10) + + self._servo_service = rospy.Service(SERVO_SERVICE, SetBool, self.servo_control) + + self._current_temperature_msg = Float64() + self._current_humidity_msg = Float64() + + self._serial_port = None + self._serial = None + + def servo_control(self, req): + """ + Callback for servo control service + + @param req: the request to control the servo + """ + rospy.logdebug('ServoControl received.') + if req.data: + self.writeline('L') + else: + self.writeline('R') + return SetBoolResponse(True, f'Successfully set servo to {"left" if req.data else "right"}.') + + def process_line(self, line): + """" + Reads and publishes individual lines + + @param line: the line to read + + Assumes data comes in the following format: + + T:69.1 + H:30.1 + T:69.2 + H:27.8 + T:69.1 + H:27.8 + T:69.8 + ... + """ + tag = line[0:2] # T for temperature and H for humidity + data = line[2:] + if data == "": + return + if "T:" in tag: + self._update_temperature(float(data)) # Filter out bad readings + self._publish_current_temperature_msg() # Publish temperature data + if "H:" in tag: + self._update_humidity(float(data)) # Filter out bad readings + self._publish_current_humidity_msg() # Publish humidity data + rospy.sleep(1) + + def _update_temperature(self, new_reading): + """ + Update temperature reading to publish and filter out bad readings + + @param new_reading: new temperature value to be printed + """ + # Ignore readings that are too large + if abs(new_reading) > 200: + return + + # First reading + elif self._temperature is None: + self._temperature = new_reading + self._previous_temperature = [new_reading] * self.MEDIAN_FILTER_SIZE + + # Median filter + else: + self._previous_temperature.append(new_reading) + self._previous_temperature.pop(0) + self._temperature = sorted(self._previous_temperature)[int(self.MEDIAN_FILTER_SIZE / 2)] + + def _update_humidity(self, new_reading): + """ + Update humidity reading to publish and filter out bad readings + + @param new_reading: new humidity value to be printed + """ + # Ignore readings that are too large + if abs(new_reading) > 200: + return + + # First reading + elif self._humidity is None: + self._humidity = new_reading + self._previous_humidity = [new_reading] * self.MEDIAN_FILTER_SIZE + + # Median filter + else: + self._previous_humidity.append(new_reading) + self._previous_humidity.pop(0) + self._humidity = sorted(self._previous_humidity)[int(self.MEDIAN_FILTER_SIZE / 2)] + + def _publish_current_temperature_msg(self): + """ + Publishes current temperature to ROS node + """ + self._current_temperature_msg.data = self._temperature + self._pub_temperature.publish(self._current_temperature_msg) + + def _publish_current_humidity_msg(self): + """ + Publishes current humidity to ROS node + """ + self._current_humidity_msg.data = self._humidity + self._pub_humidity.publish(self._current_humidity_msg) + + +if __name__ == '__main__': + try: + TemperatureHumidityPublisher().run() + except rospy.ROSInterruptException: + pass diff --git a/onboard/catkin_ws/src/execute/launch/motion.launch b/onboard/catkin_ws/src/execute/launch/motion.launch index 674a532aa..34de06160 100644 --- a/onboard/catkin_ws/src/execute/launch/motion.launch +++ b/onboard/catkin_ws/src/execute/launch/motion.launch @@ -3,6 +3,7 @@ + @@ -21,7 +22,9 @@ - + + + diff --git a/onboard/catkin_ws/src/execute/launch/sensors.launch b/onboard/catkin_ws/src/execute/launch/sensors.launch index 1f8817b88..4cdc6d447 100644 --- a/onboard/catkin_ws/src/execute/launch/sensors.launch +++ b/onboard/catkin_ws/src/execute/launch/sensors.launch @@ -12,12 +12,36 @@ + - + - + + + + + + + + + + + + + + + + + + + + + + + diff --git a/onboard/catkin_ws/src/offboard_comms/Arduino Sketchbooks/PressureArduino/PressureArduino.ino b/onboard/catkin_ws/src/offboard_comms/Arduino Sketchbooks/PressureArduino/PressureArduino.ino index cb57d5581..4719d2585 100755 --- a/onboard/catkin_ws/src/offboard_comms/Arduino Sketchbooks/PressureArduino/PressureArduino.ino +++ b/onboard/catkin_ws/src/offboard_comms/Arduino Sketchbooks/PressureArduino/PressureArduino.ino @@ -6,7 +6,7 @@ #define BAUD_RATE 9600 #define ONBOARD_VOLTAGE 4.655 // Arduino onboard voltage (true output of 5V pin) #define VPIN 3 // voltage pin analog input -#define VOLTAGE_PERIOD 1000 // how often to print out voltage +#define VOLTAGE_PERIOD 100 // how often to print out voltage MS5837 sensor; bool pressureConnected = false; diff --git a/onboard/catkin_ws/src/offboard_comms/Arduino Sketchbooks/ServoSensorArduino/ServoSensorArduino.ino b/onboard/catkin_ws/src/offboard_comms/Arduino Sketchbooks/ServoSensorArduino/ServoSensorArduino.ino new file mode 100644 index 000000000..01b86810d --- /dev/null +++ b/onboard/catkin_ws/src/offboard_comms/Arduino Sketchbooks/ServoSensorArduino/ServoSensorArduino.ino @@ -0,0 +1,71 @@ +// from L to R: data (pin 4), power (5V), ground +#include +#include +#define DHT11PIN 4 // digital pin 4 + +DHT11 dht11(DHT11PIN); +Servo myservo; + +unsigned long myTime; +boolean servoMoved = false; +unsigned long servoTime; +unsigned long rateServo = 1000; // how much to wait before returning servo to default position +unsigned long rateTemp = 0; // how often to print temp/humidity (no delay) +unsigned long prevTime = 0; +String humiditytag = "H:"; +String temperaturetag = "T:"; +String printHumidity = ""; +String printTemp = ""; + + +void setup() { + Serial.begin(9600); + myservo.attach(9); // servo pin 9 + myservo.writeMicroseconds(1500); // 1500 micros = 90 write + +} + +void loop() { + myTime = millis(); + + if (Serial.available() > 0 && !servoMoved) { + char state = Serial.read(); + if (Serial.available() > 0) { + Serial.readString(); + } + + if (state == 'L') { + myservo.writeMicroseconds(1250); // 1200 = 90 degrees left // 1250 micros = 20 write + servoMoved = true; + servoTime = myTime; + } + if (state == 'R') { + myservo.writeMicroseconds(1750); // 1800 = 90 degrees right // 1750 micros = 160 write + servoMoved = true; + servoTime = myTime; + } + } + + // return to default position after 1 second + if(servoMoved && myTime - servoTime > rateServo) { + myservo.writeMicroseconds(1500); // 1500 micros = 90 write + servoMoved = false; + } + + // printing humidity and temperature + if(myTime - prevTime >= rateTemp) { + prevTime = myTime; + int temperature = 0; + int humidity = 0; + int result = dht11.readTemperatureHumidity(temperature, humidity); + + if (result == 0) { + printHumidity = humiditytag + String((float)humidity); + Serial.println(printHumidity); + + printTemp = temperaturetag + String((float)temperature * 1.8 + 32); // convert Celsius to Fahrenheit + Serial.println(printTemp); + } + // else: error reading sensor + } +} \ No newline at end of file diff --git a/onboard/catkin_ws/src/offboard_comms/config/oogway.yaml b/onboard/catkin_ws/src/offboard_comms/config/oogway.yaml index 8bd068f15..587bfdc33 100644 --- a/onboard/catkin_ws/src/offboard_comms/config/oogway.yaml +++ b/onboard/catkin_ws/src/offboard_comms/config/oogway.yaml @@ -6,10 +6,19 @@ arduino: sketch: Arduino Sketchbooks/PressureArduino requires_ros_lib: false thruster: - ftdi: E49AFA8B51514C4B39202020FF024242 - fqbn: arduino:megaavr:nona4809 - core: arduino:megaavr + ftdi: B0004VDI + fqbn: arduino:avr:nano + core: arduino:avr sketch: Arduino Sketchbooks/ThrusterArduino requires_ros_lib: true pre_compile: rosrun offboard_comms copy_offset.sh 1 - post_compile: rosrun offboard_comms copy_offset.sh 0 \ No newline at end of file + post_compile: rosrun offboard_comms copy_offset.sh 0 + servo_sensors: + ftdi: A10N1UC1 + fqbn: arduino:avr:nano + core: arduino:avr + sketch: Arduino Sketchbooks/ServoSensorArduino + requires_ros_lib: false + libraries: + - servo + - DHT11 diff --git a/onboard/catkin_ws/src/offboard_comms/scripts/servo_wrapper.py b/onboard/catkin_ws/src/offboard_comms/scripts/servo_wrapper.py old mode 100755 new mode 100644 diff --git a/onboard/catkin_ws/src/sensor_fusion/params/main.yaml b/onboard/catkin_ws/src/sensor_fusion/params/main.yaml index a002ffefe..f6ebf1740 100644 --- a/onboard/catkin_ws/src/sensor_fusion/params/main.yaml +++ b/onboard/catkin_ws/src/sensor_fusion/params/main.yaml @@ -2,6 +2,8 @@ odom_frame: odom base_link_frame: base_link world_frame: odom +frequency: 20 + odom0: sensors/dvl/odom odom0_config: [false, false, false, # x, y, z false, false, false, # roll, pitch, yaw @@ -9,14 +11,6 @@ odom0_config: [false, false, false, # x, y, z false, false, false, # angular velocity false, false, false] # linear acceleration -imu0_queue_size: 20 -frequency: 20 - -imu0_differential: true -pose0_differential: true -smooth_lagged_data: true -history_length: 0.3 - imu0: vectornav/IMU imu0_config: [false, false, false, false, false, false, @@ -31,6 +25,13 @@ pose0_config: [false, false, true, false, false, false, false, false, false] +imu0_queue_size: 20 + +pose0_differential: true +odom0_differential: true +smooth_lagged_data: true +history_length: 0.5 + initial_state: [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, @@ -38,20 +39,19 @@ initial_state: [0.0, 0.0, 0.0, 0.0, 0.0, 0.0] dynamic_process_noise_covariance: true -process_noise_covariance: [0.00005, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, - 0.0, 0.00005, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, - 0.0, 0.0, 0.00006, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, - 0.0, 0.0, 0.0, 0.00003, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, - 0.0, 0.0, 0.0, 0.0, 0.00003, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, - 0.0, 0.0, 0.0, 0.0, 0.0, 0.00006, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, - 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.000025, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, - 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.000025, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, - 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.00004, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, - 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.00001, 0.0, 0.0, 0.0, 0.0, 0.0, - 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.00001, 0.0, 0.0, 0.0, 0.0, - 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.01, 0.0, 0.0, 0.0, - 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.00001, 0.0, 0.0, - 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.00001, 0.0, - 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.000015] - +process_noise_covariance: [0.00005, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, + 0.0, 0.00005, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, + 0.0, 0.0, 0.000001, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, + 0.0, 0.0, 0.0, 0.00003, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, + 0.0, 0.0, 0.0, 0.0, 0.00003, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, + 0.0, 0.0, 0.0, 0.0, 0.0, 0.00006, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, + 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.000025, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, + 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.000025, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, + 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.00004, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, + 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.00001, 0.0, 0.0, 0.0, 0.0, 0.0, + 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.00001, 0.0, 0.0, 0.0, 0.0, + 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.001, 0.0, 0.0, 0.0, + 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.00001, 0.0, 0.0, + 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.00001, 0.0, + 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.000015] diff --git a/onboard/catkin_ws/src/sonar/scripts/sonar.py b/onboard/catkin_ws/src/sonar/scripts/sonar.py index 2fde2b9e5..4eba81762 100755 --- a/onboard/catkin_ws/src/sonar/scripts/sonar.py +++ b/onboard/catkin_ws/src/sonar/scripts/sonar.py @@ -25,6 +25,7 @@ class Sonar: FILTER_INDEX = 100 # First x values are filtered out DEFAULT_RANGE = 5 # m + prev_range = DEFAULT_RANGE DEFAULT_NUMER_OF_SAMPLES = 1200 # 1200 is max resolution SONAR_FTDI_OOGWAY = "DK0C1WF7" @@ -148,6 +149,7 @@ def set_new_range(self, range): Returns: Nothing """ + self.prev_range = range self.sample_period = self.range_to_period(range) self.ping360.set_sample_period(self.sample_period) @@ -351,7 +353,7 @@ def preform_sonar_request(self): rospy.loginfo("Bad sonar request") return - if new_range != self.DEFAULT_RANGE: + if new_range != self.prev_range: self.set_new_range(new_range) object_pose, plot, normal_angle = self.get_xy_of_object_in_sweep(left_gradians, right_gradians) diff --git a/onboard/catkin_ws/src/system_utils/launch/record_bag.launch b/onboard/catkin_ws/src/system_utils/launch/record_bag.launch new file mode 100644 index 000000000..f21835d63 --- /dev/null +++ b/onboard/catkin_ws/src/system_utils/launch/record_bag.launch @@ -0,0 +1,6 @@ + + + + + + diff --git a/onboard/catkin_ws/src/system_utils/launch/system_utils.launch b/onboard/catkin_ws/src/system_utils/launch/system_utils.launch index f8c2cab06..20d8efb1b 100644 --- a/onboard/catkin_ws/src/system_utils/launch/system_utils.launch +++ b/onboard/catkin_ws/src/system_utils/launch/system_utils.launch @@ -1,4 +1,9 @@ + + + + + \ No newline at end of file diff --git a/onboard/catkin_ws/src/system_utils/scripts/record_bag.py b/onboard/catkin_ws/src/system_utils/scripts/record_bag.py new file mode 100755 index 000000000..54d1385e3 --- /dev/null +++ b/onboard/catkin_ws/src/system_utils/scripts/record_bag.py @@ -0,0 +1,138 @@ +#!/usr/bin/env python +from pathlib import Path +import rospy +from std_msgs.msg import Float64 + +import subprocess +import signal +import os +from datetime import datetime + + +class RecordBag: + + # Duration of time to wait for a voltage message before stopping recording + TIMEOUT_DURATION = rospy.Duration(5) + + def __init__(self): + # Initialize variables + self.process = None + + # Initialize last message time to current time + self.last_msg_time = rospy.Time.now() + + # Subscribe to the voltage topic + rospy.Subscriber("/sensors/voltage", Float64, self.voltage_callback) + + # Create a timer to check for timeout; calls check_timeout every second + self.timer = rospy.Timer(rospy.Duration(1), self.check_timeout) + + rospy.loginfo("Record bag node started.") + + def voltage_callback(self, data: Float64): + """ + Callback function for the voltage topic. If the voltage is above 5V and the node is not already recording, + start recording. If the voltage drops below 5V and the node is currently recording, stop recording. + + Args: + data: The voltage value published to the topic. + """ + + # Update last received message time + self.last_msg_time = rospy.Time.now() + + # If voltage is below 5V and the node is currently recording, stop recording + if data.data < 5: + if self.process: + rospy.loginfo("Voltage is below 5V. Stopping recording due to low voltage.") + self.stop_recording() + self.shutdown_node() + + # If voltage is above 5V and the node is not currently recording, start recording + else: + if self.process is None: + self.start_recording() + + def start_recording(self): + """ + Start recording all topics to a bag file by executing the `rosbag record` command in the shell. The bag file is + saved in the bag_files directory in the robosub-ros package. The file name is the current date and time in a + human-readable format. + """ + + # Get the current time in seconds since the Unix epoch + current_time_sec = rospy.Time.now().to_sec() + + # Convert to a human-readable format + human_readable_time = datetime.fromtimestamp(current_time_sec).strftime('%Y.%m.%d_%I-%M-%S_%p') + + # Start recording all topics to a bag file + Path("/root/dev/robosub-ros/bag_files/").mkdir(parents=False, exist_ok=True) + + command = f"rosbag record -a -O /root/dev/robosub-ros/bag_files/{human_readable_time}.bag" + self.process = subprocess.Popen(command, stdin=subprocess.PIPE, stdout=subprocess.DEVNULL, shell=True, + preexec_fn=os.setsid) + + rospy.loginfo("Started recording all topics.") + + def stop_recording(self): + """ + Stop recording the bag file, if it is currently recording. + """ + if self.process: + os.killpg(os.getpgid(self.process.pid), signal.SIGINT) + self.process = None + rospy.loginfo("Recording stopped.") + + def check_timeout(self, event): + """ + Check if the last voltage message received was more than TIMEOUT_DURATION ago. If so, and if the node is + currently recording, stop recording and shutdown the node. + + Args: + event: The timer event that triggered this function. + """ + current_time = rospy.Time.now() + if (current_time - self.last_msg_time) > self.TIMEOUT_DURATION: + if self.process is not None: + rospy.loginfo(f"No voltage messages received for {self.TIMEOUT_DURATION.to_sec()} seconds. " + f"Stopping recording.") + self.stop_recording() + self.shutdown_node() + + def shutdown_node(self): + """ + Shutdown the node and stop recording. + """ + rospy.signal_shutdown("Stopping node due to recording stop.") + + def run(self): + """ + Run the node. The node will wait for voltage messages to start recording. If the node is shutdown, it will stop + recording. + """ + rospy.spin() + + if self.process: + rospy.loginfo("Shutting down. Stopping recording.") + self.stop_recording() + + +if __name__ == '__main__': + + rospy.init_node('record_bag', anonymous=True) + + # Get enable_recording launch param + enable_recording = rospy.get_param("~enable_recording", True) + + # If recording is disabled, exit + if enable_recording: + recorder = RecordBag() + + try: + recorder.run() + except rospy.ROSInterruptException: + pass + else: + rospy.loginfo("Recording is disabled.") + rospy.signal_shutdown("Recording is disabled.") diff --git a/onboard/catkin_ws/src/task_planning/launch/task_runner.launch b/onboard/catkin_ws/src/task_planning/launch/task_runner.launch index db12da6e9..84a55f055 100644 --- a/onboard/catkin_ws/src/task_planning/launch/task_runner.launch +++ b/onboard/catkin_ws/src/task_planning/launch/task_runner.launch @@ -1,6 +1,8 @@ - - - - + + + + + + diff --git a/onboard/catkin_ws/src/task_planning/scripts/buoy_task.py b/onboard/catkin_ws/src/task_planning/scripts/buoy_task.py deleted file mode 100644 index daf5e9619..000000000 --- a/onboard/catkin_ws/src/task_planning/scripts/buoy_task.py +++ /dev/null @@ -1,116 +0,0 @@ -from interface.controls import ControlsInterface -from interface.cv import CVInterface -import smach -import cv_tasks -import task_utils -from move_tasks import ( - AllocateVelocityLocalTask, - HoldPositionTask, - MoveToUserDataPoseLocalTask, - MoveToPoseLocalTask, -) - - -class BuoyTask(smach.StateMachine): - CENTER_TOLERANCE = 0.05 - ROTATE_SPEED = 1 - TOO_CLOSE_TO_BUOY = 0.5 - targets = ["buoy_abydos_taurus", "buoy_abydos_sepenscaput"] - - def __init__(self, listener, controls: ControlsInterface, cv: CVInterface): - super().__init__(outcomes=["done"]) # maybe do calls here - also TODO add move backwards at the end of the func - - with self: - for i in range(len(self.targets)): - smach.StateMachine.add( - f"CHOOSE_ROTATE_DIR_{i}", - cv_tasks.SpinDirectionTask( - self.targets[i], self.CENTER_TOLERANCE, cv - ), - transitions={ - "left": f"ROTATE_LEFT_{i}", - "right": f"ROTATE_RIGHT_{i}", - "center": f"BUOY_CENTERED_{i}", - }, - ) - - smach.StateMachine.add( - f"ROTATE_LEFT_{i}", - AllocateVelocityLocalTask( - 0, 0, 0, 0, 0, self.ROTATE_SPEED, controls - ), - transitions={"done": f"CHOOSE_ROTATE_DIR_{i}"}, - ) - smach.StateMachine.add( - f"ROTATE_RIGHT_{i}", - AllocateVelocityLocalTask( - 0, 0, 0, 0, 0, -self.ROTATE_SPEED, controls - ), - transitions={"done": f"CHOOSE_ROTATE_DIR_{i}"}, - ) - - # We are now facing the buoy - smach.StateMachine.add( - f"BUOY_CENTERED_{i}", - HoldPositionTask(controls), - transitions={"done": f"BUOY_VALID_{i}"}, - ) - smach.StateMachine.add( - f"BUOY_VALID_{i}", - cv_tasks.ObjectCoordsTask(self.targets[i], cv), - transitions={ - "valid": f"BUOY_POSE_{i}", - "invalid": f"BUOY_VALID_{i}", - }, - ) - smach.StateMachine.add( - f"BUOY_POSE_{i}", - task_utils.PointToPoseTask(controls), - transitions={"done": f"BUOY_TOO_CLOSE_{i}"}, - ) - smach.StateMachine.add( - f"BUOY_TOO_CLOSE_{i}", - task_utils.LambdaTask( - self.check_too_close, ["close", "far"], input_keys=["pose"] - ), - transitions={ - "close": f"BONK_BUOY_{i}", - "far": f"APPROACH_BUOY_{i}", - }, - ) - smach.StateMachine.add( - f"APPROACH_BUOY_{i}", - MoveToUserDataPoseLocalTask(controls, listener), - transitions={ - "continue": f"BUOY_VALID_{i}", - "done": f"MOVE_BACK_{i}", # We should switch to BONK_BUOY_ before this - }, - ) - smach.StateMachine.add( - f"BONK_BUOY_{i}", - MoveToUserDataPoseLocalTask(controls, listener), - transitions={ - "continue": f"BONK_BUOY_{i}", - "done": f"MOVE_BACK_{i}", - }, - ) - smach.StateMachine.add( - f"MOVE_BACK_{i}", - MoveToPoseLocalTask(-2, 0, 0, 0, 0, 0, controls, listener), - transitions={ - "continue": f"MOVE_BACK_{i}", - "done": f"CHOOSE_ROTATE_DIR_{i+1}" - if i < len(self.targets) - 1 - else "done", - }, - ) - - def check_too_close(self, userdata): - if ( - userdata.pose.position.x**2 - + userdata.pose.position.y**2 - + userdata.pose.position.z**2 - > self.TOO_CLOSE_TO_BUOY - ): - return "far" - return "close" diff --git a/onboard/catkin_ws/src/task_planning/scripts/comp_tasks.py b/onboard/catkin_ws/src/task_planning/scripts/comp_tasks.py new file mode 100644 index 000000000..1969cc7e6 --- /dev/null +++ b/onboard/catkin_ws/src/task_planning/scripts/comp_tasks.py @@ -0,0 +1,1196 @@ +import rospy +import math +import copy +import numpy as np + +from transforms3d.euler import quat2euler + +from std_srvs.srv import SetBool +from geometry_msgs.msg import Twist +from custom_msgs.msg import ControlTypes + +from task import Task, task +import move_tasks +import cv_tasks +from utils import geometry_utils +from task import Yield + +from enum import Enum + +from interface.state import State +from interface.cv import CV +from interface.controls import Controls + +from utils.coroutine_utils import sleep + + +# TODO: move stablize() to move_tasks.py +# +# TODO: see if we can remove sleep() since we already have sleep() in coroutine_utils.py +# +# TODO: create a common skeleton @task class/interface with all the common functions to remove redundancy: +# - move_x +# - move_y +# - move_z +# - correct_x +# - correct_y +# - correct_z +# - correct_yaw +# - correct_roll_and_pitch +# - get_yaw_correction +# - ... +# These implementations can be overridden by the tasks that uses that interface. +# +# TODO: look into creating common higher level routines: +# - yaw_to_cv_object +# - spiral search (e.g. spiral_bin_search) +# - logarithmic search (stretch) +# - track and align with object center for bottom camera (e.g. search_for_bins & center_path_marker) +# - track and move toward CV object (e.g. move_to_pink_bins & move_to_buoy) +# - takes in the termination condition function as a parameter +# - can improve on cv_tasks.move_to_cv_obj implementation (or replace it completely) + + +RECT_HEIGHT_METERS = 0.3048 + + +@task +async def gate_style_task(self: Task, depth_level=0.9) -> Task[None, None, None]: + """ + Complete two full barrel rolls. + """ + + rospy.loginfo("Started gate style task") + + DEPTH_LEVEL = State().orig_depth - depth_level + + async def sleep(secs): + duration = rospy.Duration(secs) + start_time = rospy.Time.now() + while start_time + duration > rospy.Time.now(): + await Yield() + + async def roll(): + power = Twist() + power.angular.x = 1 + Controls().publish_desired_power(power) + rospy.loginfo("Published roll power") + + await sleep(2.25) + + rospy.loginfo("Completed roll") + + Controls().publish_desired_power(Twist()) + rospy.loginfo("Published zero power") + + await sleep(2) + + rospy.loginfo("Completed zero") + + await move_tasks.depth_correction(DEPTH_LEVEL, parent=self) + await roll() + State().reset_pose() + await roll() + State().reset_pose() + await sleep(3) + await move_tasks.depth_correction(DEPTH_LEVEL, parent=self) + await sleep(3) + + imu_orientation = State().imu.orientation + euler_angles = quat2euler([imu_orientation.w, imu_orientation.x, imu_orientation.y, imu_orientation.z]) + roll_correction = -euler_angles[0] + pitch_correction = -euler_angles[1] + + rospy.loginfo(f"Roll, pitch correction: {roll_correction, pitch_correction}") + await move_tasks.move_to_pose_local(geometry_utils.create_pose(0, 0, 0, roll_correction, pitch_correction, 0), + parent=self) + State().reset_pose() + rospy.loginfo("Reset orientation") + + +@task +async def buoy_task(self: Task, turn_to_face_buoy=False, depth=0.7) -> Task[None, None, None]: + """ + Circumnavigate the buoy. Requires robot to have submerged 0.5 meters. + """ + + rospy.loginfo("Starting buoy task") + + DEPTH_LEVEL = State().orig_depth - depth + + async def correct_y(): + await cv_tasks.correct_y("buoy", parent=self) + + async def correct_z(): + await cv_tasks.correct_z(prop="buoy", parent=self) + + async def correct_depth(): + await move_tasks.correct_depth(desired_depth=DEPTH_LEVEL, parent=self) + self.correct_depth = correct_depth + + async def move_x(step=1): + await move_tasks.move_x(step=step, parent=self) + + def get_step_size(dist, dist_threshold): + if dist > 3: + return 2 + elif dist > 2: + return 1 + elif dist > 1.5: + return 0.5 + else: + return min(dist - dist_threshold + 0.1, 0.25) + + async def move_to_buoy(buoy_dist_threshold=1): + buoy_dist = CV().cv_data["buoy"].coords.x + await correct_y() + await correct_depth() + + while buoy_dist > buoy_dist_threshold: + await move_x(step=get_step_size(buoy_dist, buoy_dist_threshold)) + rospy.loginfo(f"Buoy dist: {CV().cv_data['buoy'].coords.x}") + await correct_y() + if buoy_dist < 3: + await correct_z() + else: + await correct_depth() + + await Yield() + buoy_dist = CV().cv_data["buoy"].coords.x + rospy.loginfo(f"Buoy dist: {CV().cv_data['buoy'].coords.x}") + + await correct_z() + + await move_to_buoy() + + start_imu_orientation = copy.deepcopy(State().imu.orientation) + start_imu_euler_angles = quat2euler(geometry_utils.geometry_quat_to_transforms3d_quat(start_imu_orientation)) + + def get_yaw_correction(): + cur_imu_orientation = copy.deepcopy(State().imu.orientation) + cur_imu_euler_angles = quat2euler(geometry_utils.geometry_quat_to_transforms3d_quat(cur_imu_orientation)) + + return start_imu_euler_angles[2] - cur_imu_euler_angles[2] + + async def correct_yaw(): + yaw_correction = get_yaw_correction() + rospy.loginfo(f"Yaw correction: {yaw_correction}") + sign = 1 if yaw_correction > 0.1 else (-1 if yaw_correction < -0.1 else 0) + await move_tasks.move_to_pose_local( + geometry_utils.create_pose(0, 0, 0, 0, 0, yaw_correction + (sign * 0.1)), + keep_level=True, + parent=self + ) + rospy.loginfo("Corrected yaw") + self.correct_yaw = correct_yaw + + async def move_with_directions(directions, correct_yaw=True): + await move_tasks.move_with_directions(directions, correct_yaw=correct_yaw, correct_depth=True, parent=self) + + if turn_to_face_buoy: + def get_step_size_move_away(dist, dist_threshold): + if dist < 0.75: + return -0.5 + else: + return max(dist - dist_threshold - 0.1, -0.25) + + async def move_away_from_buoy(buoy_dist_threshold=1.0): + rospy.loginfo("Moving away from buoy") + buoy_dist = CV().cv_data["buoy"].coords.x + await correct_y() + await correct_z() + while buoy_dist < buoy_dist_threshold: + await move_x(step=get_step_size_move_away(buoy_dist, buoy_dist_threshold)) + + rospy.loginfo(f"Buoy dist: {CV().cv_data['buoy'].coords.x}") + await correct_y() + await correct_z() + await Yield() + buoy_dist = CV().cv_data["buoy"].coords.x + rospy.loginfo(f"Buoy dist: {CV().cv_data['buoy'].coords.x}") + + rospy.loginfo("Moved away from buoy") + + # Circumnavigate buoy + for _ in range(4): + DEPTH_LEVEL = State().depth + directions = [ + (0, 1.5, 0), + (1, 0, 0), + ] + await move_with_directions(directions, correct_yaw=False) + await move_tasks.move_to_pose_local(geometry_utils.create_pose(0, 0, 0, 0, 0, -math.radians(90)), + parent=self) + rospy.loginfo("Yaw 90 deg") + await move_away_from_buoy() + + else: + directions = [ + (0, 1.25, 0), + (2.25, 0, 0), + (0, -2.5, 0), + (-2.5, 0, 0), + (0, 1.25, 0), + ] + await move_with_directions(directions, correct_yaw=False) + + await move_to_buoy() + + +@task +async def after_buoy_task(self: Task): + + DEPTH_LEVEL = State().depth + latency_threshold = 3 + + async def correct_depth(): + await move_tasks.correct_depth(desired_depth=DEPTH_LEVEL, parent=self) + self.correct_depth = correct_depth + + def is_receiving_cv_data(): + return "path_marker" in CV().cv_data and \ + rospy.Time.now().secs - CV().cv_data["path_marker"].header.stamp.secs < latency_threshold + + def stabilize(): + pose_to_hold = copy.deepcopy(State().state.pose.pose) + Controls().publish_desired_position(pose_to_hold) + + directions = [ + (0, -1.25, 0), + (2.25, 0, 0), + (0, 2.5, 0), + (-2.5, 0, 0), + (0, -1.25, 0), + ] + circumnavigate_task = move_tasks.move_with_directions( + directions, correct_yaw=False, correct_depth=True, parent=self) + + while not circumnavigate_task.done: + circumnavigate_task.step() + if is_receiving_cv_data(): + stabilize() + await sleep(5) + break + + await Yield() + + await move_tasks.move_with_directions([(0, 0, 0, 0, 0, -math.radians(90))], parent=self) + + await align_path_marker(direction=-1, parent=self) + + DEPTH_LEVEL = State().orig_depth - 0.7 + + directions = [ + (2, 0, 0), + (2, 0, 0), + (2, 0, 0), + (1, 0, 0) + ] + + await move_tasks.move_with_directions(directions, correct_yaw=False, correct_depth=True, parent=self) + + found_bins = await spiral_bin_search(parent=self) + + if found_bins: + await bin_task(parent=self) + + await yaw_to_cv_object('bin_pink_front', direction=1, yaw_threshold=math.radians(15), + depth_level=1.0, parent=Task.MAIN_ID) + + await octagon_task(direction=1, parent=self) + + +@task +async def buoy_to_octagon(self: Task, direction: int = 1, move_forward: int = 0): + DEPTH_LEVEL = State().orig_depth - 0.7 + + rospy.loginfo("Started buoy to octagon") + + async def move_with_directions(directions): + await move_tasks.move_with_directions(directions, correct_yaw=False, correct_depth=True, parent=self) + + async def correct_depth(): + await move_tasks.correct_depth(desired_depth=DEPTH_LEVEL, parent=self) + self.correct_depth = correct_depth + + # Move towards octagon + directions = [ + (0, 2 * direction, 0), + (0, 2 * direction, 0), + (0, 2 * direction, 0), + (0, 1 * direction, 0), + (move_forward, 0, 0) + ] + await move_with_directions(directions) + + +@task +async def buoy_circumnavigation_power(self: Task, depth=0.7) -> Task[None, None, None]: + + DEPTH_LEVEL = State().orig_depth - depth + + def publish_power(): + power = Twist() + power.linear.y = 0.9 + power.angular.z = -0.1 + Controls().set_axis_control_type(x=ControlTypes.DESIRED_POWER, y=ControlTypes.DESIRED_POWER, + yaw=ControlTypes.DESIRED_POWER) + Controls().publish_desired_power(power, set_control_types=False) + + def stabilize(): + pose_to_hold = copy.deepcopy(State().state.pose.pose) + Controls().publish_desired_position(pose_to_hold) + + async def correct_depth(): + await move_tasks.correct_depth(desired_depth=DEPTH_LEVEL, parent=self) + + for _ in range(4): + publish_power() + rospy.loginfo("Publish power") + await sleep(6) + rospy.loginfo("Sleep 5 (1)") + stabilize() + rospy.loginfo("Stabilized") + await sleep(5) + rospy.loginfo("Sleep 5 (2)") + await correct_depth() + await move_tasks.move_to_pose_local(geometry_utils.create_pose(0, 1, 0, 0, 0, 0), parent=self) + + +@task +async def initial_submerge(self: Task, submerge_dist: float) -> Task[None, None, None]: + """ + Submerge the robot a given amount. + + Args: + submerge_dist: The distance to submerge the robot in meters. + """ + + await move_tasks.move_to_pose_local( + geometry_utils.create_pose(0, 0, submerge_dist, 0, 0, 0), + keep_level=True, + parent=self + ) + rospy.loginfo(f"Submerged {submerge_dist} meters") + + async def correct_roll_and_pitch(): + imu_orientation = State().imu.orientation + euler_angles = quat2euler([imu_orientation.w, imu_orientation.x, imu_orientation.y, imu_orientation.z]) + roll_correction = -euler_angles[0] * 1.2 + pitch_correction = -euler_angles[1] * 1.2 + + rospy.loginfo(f"Roll, pitch correction: {roll_correction, pitch_correction}") + await move_tasks.move_to_pose_local(geometry_utils.create_pose(0, 0, 0, roll_correction, pitch_correction, 0), + parent=self) + + await correct_roll_and_pitch() + + +@task +async def coin_flip(self: Task, depth_level=0.7) -> Task[None, None, None]: + rospy.loginfo("Started coin flip") + DEPTH_LEVEL = State().orig_depth - depth_level + MAXIMUM_YAW = math.radians(30) + + def get_step_size(desired_yaw): + return min(abs(desired_yaw), MAXIMUM_YAW) + + def get_yaw_correction(): + orig_imu_orientation = copy.deepcopy(State().orig_imu.orientation) + orig_imu_euler_angles = quat2euler(geometry_utils.geometry_quat_to_transforms3d_quat(orig_imu_orientation)) + + cur_imu_orientation = copy.deepcopy(State().imu.orientation) + cur_imu_euler_angles = quat2euler(geometry_utils.geometry_quat_to_transforms3d_quat(cur_imu_orientation)) + + correction = orig_imu_euler_angles[2] - cur_imu_euler_angles[2] + + sign_correction = np.sign(correction) + desired_yaw = sign_correction * get_step_size(correction) + rospy.loginfo(f'Coinflip: desired_yaw = {desired_yaw}') + + return correction + # return desired_yaw + + while abs(yaw_correction := get_yaw_correction()) > math.radians(5): + rospy.loginfo(f"Yaw correction: {yaw_correction}") + sign = 1 if yaw_correction > 0.1 else (-1 if yaw_correction < -0.1 else 0) + await move_tasks.move_to_pose_local( + geometry_utils.create_pose(0, 0, 0, 0, 0, yaw_correction + (sign * 0.1)), + keep_level=True, + parent=self + ) + rospy.loginfo("Back to original orientation") + + rospy.loginfo(f"Final yaw correction: {get_yaw_correction()}") + + depth_delta = DEPTH_LEVEL - State().depth + await move_tasks.move_to_pose_local(geometry_utils.create_pose(0, 0, depth_delta, 0, 0, 0), parent=self) + rospy.loginfo(f"Corrected depth {depth_delta}") + + rospy.loginfo("Completed coin flip") + + +@task +async def gate_task_dead_reckoning(self: Task) -> Task[None, None, None]: + rospy.loginfo("Started gate task") + DEPTH_LEVEL = State().orig_depth - 0.6 + STEPS = 5 + + for _ in range(STEPS): + await move_tasks.move_x(step=1, parent=self) + await move_tasks.correct_depth(desired_depth=DEPTH_LEVEL, parent=self) + + rospy.loginfo("Moved through gate") + + +@task +async def gate_task(self: Task, offset: int = 0, direction: int = 1) -> Task[None, None, None]: + rospy.loginfo("Started gate task") + DEPTH_LEVEL = State().orig_depth - 0.7 + + async def correct_y(factor=1): + await cv_tasks.correct_y(prop="gate_red_cw", add_factor=0.2 + offset, mult_factor=factor, parent=self) + + async def correct_z(): + await cv_tasks.correct_z(prop="gate_red_cw", parent=self) + + async def correct_depth(): + await move_tasks.correct_depth(desired_depth=DEPTH_LEVEL, parent=self) + self.correct_depth = correct_depth + + async def move_x(step=1): + await move_tasks.move_x(step=step, parent=self) + + def get_step_size(dist): + if dist > 4: + return 1 + else: + return max(dist-3 + 0.25, 0.25) + + async def sleep(secs): + duration = rospy.Duration(secs) + start_time = rospy.Time.now() + while start_time + duration > rospy.Time.now(): + await Yield() + + rospy.loginfo("Begin sleep") + await sleep(2) + rospy.loginfo("End sleep") + + gate_dist = CV().cv_data["gate_red_cw"].coords.x + # await correct_y(factor=0.5) + await correct_depth() + num_corrections = 0 + while gate_dist > 3: + await move_x(step=get_step_size(gate_dist)) + await yaw_to_cv_object('gate_red_cw', direction=-1, yaw_threshold=math.radians(10), + latency_threshold=2, depth_level=0.6, parent=self), + # await correct_y(factor=(0.5 if num_corrections < 0 else 1)) + await correct_depth() + await Yield() + gate_dist = CV().cv_data["gate_red_cw"].coords.x + rospy.loginfo(f"Gate dist: {gate_dist}") + num_corrections += 1 + + directions = [ + (2, 0, 0), + (0, 0.2 * direction, 0), + (2, 0, 0), + (1, 0, 0) + ] + + await move_tasks.move_with_directions(directions, correct_yaw=False, correct_depth=True, parent=self) + + rospy.loginfo("Moved through gate") + + +@task +async def yaw_to_cv_object(self: Task, cv_object: str, direction=1, + yaw_threshold=math.radians(30), latency_threshold=10, + depth_level=0.5) -> Task[None, None, None]: + """ + Corrects the yaw relative to the CV object + """ + DEPTH_LEVEL = State().orig_depth - depth_level + MAXIMUM_YAW = math.radians(20) + + rospy.loginfo("Starting yaw_to_cv_object") + + async def sleep(secs): + duration = rospy.Duration(secs) + start_time = rospy.Time.now() + while start_time + duration > rospy.Time.now(): + await Yield() + + async def correct_depth(): + # await move_tasks.depth_correction(DEPTH_LEVEL, parent=self) + await move_tasks.depth_correction(desired_depth=DEPTH_LEVEL, parent=self) + + def is_receiving_cv_data(): + return cv_object in CV().cv_data and \ + rospy.Time.now().secs - CV().cv_data[cv_object].header.stamp.secs < latency_threshold + + def get_step_size(desired_yaw): + # desired yaw in radians + return min(abs(desired_yaw), MAXIMUM_YAW) + + async def yaw_until_object_detection(): + while not is_receiving_cv_data(): + rospy.loginfo(f"No {cv_object} detection, setting yaw setpoint {MAXIMUM_YAW}") + await move_tasks.move_to_pose_local(geometry_utils.create_pose(0, 0, 0, 0, 0, MAXIMUM_YAW * direction), + parent=self) + await correct_depth() + await Yield() + + # Yaw until object detection + await correct_depth() + await yaw_until_object_detection() + + rospy.loginfo(f"{cv_object} detected. Now centering {cv_object} in frame...") + + # Center detected object in camera frame + cv_object_yaw = CV().cv_data[cv_object].yaw + await correct_depth() + rospy.loginfo('abs(cv_object_yaw)='+str(abs(cv_object_yaw))) + rospy.loginfo('yaw_threshold='+str(yaw_threshold)) + while abs(cv_object_yaw) > yaw_threshold: + sign_cv_object_yaw = np.sign(cv_object_yaw) + correction = get_step_size(cv_object_yaw) + desired_yaw = sign_cv_object_yaw * correction + + rospy.loginfo(f"Detected yaw {cv_object_yaw} is greater than threshold {yaw_threshold}. Yawing: {desired_yaw}") + await move_tasks.move_to_pose_local(geometry_utils.create_pose(0, 0, 0, 0, 0, desired_yaw), + parent=self) + await correct_depth() + await Yield() + + if (not is_receiving_cv_data()): + rospy.loginfo(f"{cv_object} detection lost, running yaw_until_object_detection()") + await yaw_until_object_detection() + + cv_object_yaw = CV().cv_data[cv_object].yaw + + rospy.loginfo(f"{cv_object} centered.") + + await correct_depth() + + +@task +async def align_path_marker(self: Task, direction=1) -> Task[None, None, None]: + """ + Corrects the yaw relative to the CV object + """ + DEPTH_LEVEL = State().orig_depth - 0.5 + MAXIMUM_YAW = math.radians(30) + YAW_THRESHOLD = math.radians(5) + PIXEL_THRESHOLD = 70 + + rospy.loginfo("Starting align path marker") + + async def move_x(step=1): + await move_tasks.move_x(step=step, parent=self) + + async def move_y(step=1): + await move_tasks.move_y(step=step, parent=self) + + async def correct_depth(): + await move_tasks.depth_correction(desired_depth=DEPTH_LEVEL, parent=self) + + async def sleep(secs): + duration = rospy.Duration(secs) + start_time = rospy.Time.now() + while start_time + duration > rospy.Time.now(): + await Yield() + + def get_step_size(desired_yaw): + # desired yaw in rads + return min(abs(desired_yaw), MAXIMUM_YAW) + + def get_step_mult_factor(dist, threshold): + if abs(dist) < threshold: + return 0 + elif dist > threshold: + return 1 + else: + return -1 + + async def center_path_marker(pixel_threshold, step_size=0.20, x_offset=0, y_offset=0): + rospy.loginfo(CV().cv_data["path_marker_distance"]) + pixel_x = CV().cv_data["path_marker_distance"].x + x_offset + pixel_y = CV().cv_data["path_marker_distance"].y + y_offset + + count = 1 + while (max(pixel_x, pixel_y) > pixel_threshold or min(pixel_x, pixel_y) < -pixel_threshold): + rospy.loginfo(CV().cv_data["path_marker_distance"]) + + await move_x(step=step_size * get_step_mult_factor(pixel_x, pixel_threshold)) + await move_y(step=step_size * get_step_mult_factor(pixel_y, pixel_threshold)) + + pixel_x = CV().cv_data["path_marker_distance"].x + x_offset + pixel_y = CV().cv_data["path_marker_distance"].y + y_offset + + if count % 3 == 0: + rospy.loginfo("Correcting depth") + await correct_depth() + + await Yield() + + rospy.loginfo(f"x: {pixel_x}, y: {pixel_y}") + + count += 1 + + rospy.loginfo("Finished centering path marker") + rospy.loginfo(f"x: {pixel_x}, y: {pixel_y}") + + rospy.loginfo("Now aligning path marker in frame...") + + # Center detected object in camera frame + path_marker_yaw = CV().cv_data["path_marker"].yaw + await correct_depth() + rospy.loginfo(f"abs(path_marker_yaw) = '{abs(path_marker_yaw)}") + rospy.loginfo(f"yaw_threshold = {str(YAW_THRESHOLD)}") + + while abs(path_marker_yaw) > YAW_THRESHOLD: + sign_path_marker_yaw = np.sign(path_marker_yaw) + correction = get_step_size(path_marker_yaw) + desired_yaw = sign_path_marker_yaw * correction + + rospy.loginfo(f"Detected yaw {path_marker_yaw} is greater than threshold {YAW_THRESHOLD}. Yawing: {desired_yaw}" + ) + await move_tasks.move_to_pose_local(geometry_utils.create_pose(0, 0, 0, 0, 0, desired_yaw), + parent=self) + await correct_depth() + await center_path_marker(pixel_threshold=PIXEL_THRESHOLD) + await correct_depth() + + await Yield() + + path_marker_yaw = CV().cv_data["path_marker"].yaw + + rospy.loginfo("Path marker centered.") + + await correct_depth() + + +@task +async def center_path_marker(self: Task): + DEPTH_LEVEL = State().orig_depth - 0.5 + PIXEL_THRESHOLD = 70 + + async def correct_depth(desired_depth=DEPTH_LEVEL): + await move_tasks.correct_depth(desired_depth=desired_depth, parent=self) + self.correct_depth = correct_depth + + async def move_x(step=1): + await move_tasks.move_x(step=step, parent=self) + + async def move_y(step=1): + await move_tasks.move_y(step=step, parent=self) + + def get_step_mult_factor(dist, threshold): + if abs(dist) < threshold: + return 0 + elif dist > threshold: + return 1 + else: + return -1 + + async def center_path_marker(pixel_threshold, step_size=0.20, x_offset=0, y_offset=0): + rospy.loginfo(CV().cv_data["path_marker_distance"]) + pixel_x = CV().cv_data["path_marker_distance"].x + x_offset + pixel_y = CV().cv_data["path_marker_distance"].y + y_offset + + count = 1 + while (max(pixel_x, pixel_y) > pixel_threshold or min(pixel_x, pixel_y) < -pixel_threshold): + rospy.loginfo(CV().cv_data["path_marker_distance"]) + + await move_x(step=step_size * get_step_mult_factor(pixel_x, pixel_threshold)) + await move_y(step=step_size * get_step_mult_factor(pixel_y, pixel_threshold)) + + pixel_x = CV().cv_data["path_marker_distance"].x + x_offset + pixel_y = CV().cv_data["path_marker_distance"].y + y_offset + + if count % 3 == 0: + rospy.loginfo("Correcting depth") + await correct_depth() + + await Yield() + + rospy.loginfo(f"x: {pixel_x}, y: {pixel_y}") + + count += 1 + + rospy.loginfo("Finished centering path marker") + rospy.loginfo(f"x: {pixel_x}, y: {pixel_y}") + + await correct_depth() + await center_path_marker(pixel_threshold=PIXEL_THRESHOLD, x_offset=-120, y_offset=-120) + + +@task +async def path_marker_to_pink_bin(self: Task, maximum_distance: int = 6): + DEPTH_LEVEL = State().orig_depth - 0.5 + AREA_THRESHOLD = 1000 + LATENCY_THRESHOLD = 1 + + rospy.loginfo("Starting path marker to bins") + + async def correct_depth(desired_depth): + await move_tasks.correct_depth(desired_depth=desired_depth, parent=self) + + def is_receiving_bin_data(bin_object, latest_detection_time): + if not latest_detection_time or bin_object not in CV().data: + return False + + width = CV().cv_data[bin_object].width + height = CV().cv_data[bin_object].height + + return width * height >= AREA_THRESHOLD and \ + rospy.Time.now().to_sec() - CV().cv_data[bin_object].header.stamp.secs < LATENCY_THRESHOLD and \ + abs(CV().cv_data[bin_object].header.stamp.secs - latest_detection_time) < LATENCY_THRESHOLD + + async def move_x(step=1): + await move_tasks.move_x(step=step, parent=self) + + def stabilize(): + pose_to_hold = copy.deepcopy(State().state.pose.pose) + Controls().publish_desired_position(pose_to_hold) + + async def sleep(secs): + duration = rospy.Duration(secs) + start_time = rospy.Time.now() + while start_time + duration > rospy.Time.now(): + await Yield() + + async def move_to_bins(): + count = 1 + bin_red_time = None + bin_blue_time = None + + await move_x(step=1) + + while not is_receiving_bin_data("bin_red", bin_red_time) \ + or not is_receiving_bin_data("bin_blue", bin_blue_time): + if "bin_red" in CV().cv_data: + bin_red_time = CV().cv_data["bin_red"].header.stamp.secs + if "bin_blue" in CV().cv_data: + bin_blue_time = CV().cv_data["bin_blue"].header.stamp.secs + + await correct_depth(DEPTH_LEVEL) + + is_receiving_red_bin_data = is_receiving_bin_data("bin_red", bin_red_time) + is_receiving_blue_bin_data = is_receiving_bin_data("bin_blue", bin_blue_time) + + rospy.loginfo(f"Receiving red bin data: {is_receiving_red_bin_data}") + rospy.loginfo(f"Receiving blue bin data: {is_receiving_blue_bin_data}") + + step = 0.5 if (is_receiving_red_bin_data or is_receiving_blue_bin_data) else 1 + await move_x(step=step) + + await Yield() + + if count >= maximum_distance: + rospy.loginfo("Bin not spotted, exiting the loop...") + break + + count += 1 + + rospy.loginfo("Reached pink bins, stabilizing...") + stabilize() + await sleep(5) + + await move_to_bins() + + +@task +async def spiral_bin_search(self: Task) -> Task[None, None, None]: + DEPTH_LEVEL = State().orig_depth - 0.5 + AREA_THRESHOLD = 1000 + LATENCY_THRESHOLD = 1 + + class Direction(Enum): + FORWARD = 1, + BACK = 2, + LEFT = 3, + RIGHT = 4 + + def publish_power(direction: Direction): + power = Twist() + + if direction == Direction.FORWARD: + power.linear.x = 0.7 + elif direction == Direction.BACK: + power.linear.x = -0.7 + elif direction == Direction.LEFT: + power.linear.y = 1.0 + elif direction == Direction.RIGHT: + power.linear.y = -1.0 + + if direction in [Direction.FORWARD, Direction.BACK]: + Controls().set_axis_control_type(x=ControlTypes.DESIRED_POWER, y=ControlTypes.DESIRED_POSITION) + elif direction in [Direction.LEFT, Direction.RIGHT]: + Controls().set_axis_control_type(x=ControlTypes.DESIRED_POSITION, y=ControlTypes.DESIRED_POWER) + + Controls().publish_desired_power(power, set_control_types=False) + + async def move_step(direction: Direction, steps: float): + pose = geometry_utils.create_pose(0, 0, 0, 0, 0, 0) + + if direction == Direction.FORWARD: + pose.position.x = steps + elif direction == Direction.BACK: + pose.position.x = -steps + elif direction == Direction.LEFT: + pose.position.y = steps + elif direction == Direction.RIGHT: + pose.position.y = -steps + + await move_tasks.move_to_pose_local(pose, parent=self) + + DIRECTIONS = [ + (Direction.FORWARD, 1), + (Direction.LEFT, 1), + (Direction.BACK, 2), + (Direction.RIGHT, 2), + (Direction.FORWARD, 3), + (Direction.LEFT, 3), + (Direction.BACK, 4), + (Direction.RIGHT, 4), + (Direction.FORWARD, 5), + (Direction.LEFT, 5), + (Direction.BACK, 6), + (Direction.RIGHT, 6), + (Direction.FORWARD, 7), + (Direction.LEFT, 7), + (Direction.BACK, 8), + (Direction.RIGHT, 8), + ] + + async def correct_depth(desired_depth): + await move_tasks.correct_depth(desired_depth=desired_depth, parent=self) + + def is_receiving_bin_data(bin_object, latest_detection_time): + if not latest_detection_time or bin_object not in CV().cv_data: + return False + + width = CV().cv_data[bin_object].width + height = CV().cv_data[bin_object].height + + return width * height >= AREA_THRESHOLD and \ + rospy.Time.now().to_sec() - CV().cv_data[bin_object].header.stamp.secs < LATENCY_THRESHOLD and \ + abs(CV().cv_data[bin_object].header.stamp.secs - latest_detection_time) < LATENCY_THRESHOLD + + def stabilize(): + pose_to_hold = copy.deepcopy(State().state.pose.pose) + Controls().publish_desired_position(pose_to_hold) + + async def sleep(secs): + duration = rospy.Duration(secs) + start_time = rospy.Time.now() + while start_time + duration > rospy.Time.now(): + await Yield() + + async def search_for_bins(): + rospy.loginfo("Searching for red/blue bins...") + bin_red_time = None + bin_blue_time = None + + for direction, secs in DIRECTIONS: + bin_found = False + secs *= 0.5 + rospy.loginfo(f"Publishing power: {direction, secs}") + + await move_step(direction, secs) + + iterations = secs / 0.1 + for _ in range(int(iterations)): + if "bin_red" in CV().cv_data: + bin_red_time = CV().cv_data["bin_red"].header.stamp.secs + if "bin_blue" in CV().cv_data: + bin_blue_time = CV().cv_data["bin_blue"].header.stamp.secs + + is_receiving_red_bin_data = is_receiving_bin_data("bin_red", bin_red_time) + is_receiving_blue_bin_data = is_receiving_bin_data("bin_blue", bin_blue_time) + + bin_found = is_receiving_red_bin_data and is_receiving_blue_bin_data + + if bin_found: + break + + await sleep(0.1) + await Yield() + + await correct_depth(DEPTH_LEVEL) + + if bin_found: + rospy.loginfo("Found bin, terminating...") + break + + rospy.loginfo(f"Received red bin data: {is_receiving_red_bin_data}") + rospy.loginfo(f"Received blue bin data: {is_receiving_blue_bin_data}") + + return bin_found + + return (await search_for_bins()) + + +@task +async def bin_task(self: Task) -> Task[None, None, None]: + """ + Detects and drops markers into the red bin. Requires robot to have submerged 0.7 meters. + """ + + rospy.loginfo("Started bin task") + START_DEPTH_LEVEL = State().orig_depth - 0.6 + START_PIXEL_THRESHOLD = 70 + MID_DEPTH_LEVEL = State().orig_depth - 1.0 + MID_PIXEL_THRESHOLD = 30 + + FRAME_AREA = 480 * 600 + + TIMEOUT = rospy.Duration(240) + + start_time = rospy.Time.now() + + drop_marker = rospy.ServiceProxy('servo_control', SetBool) + + async def correct_x(target): + await cv_tasks.correct_x(prop=target, parent=self) + + async def correct_y(target): + await cv_tasks.correct_y(prop=target, parent=self) + + async def correct_z(): + pass + + async def correct_depth(desired_depth): + await move_tasks.correct_depth(desired_depth=desired_depth, parent=self) + self.correct_depth = correct_depth + + async def correct_yaw(): + yaw_correction = CV().cv_data["bin_angle"] + rospy.loginfo(f"Yaw correction: {yaw_correction}") + sign = 1 if yaw_correction > 0.1 else (-1 if yaw_correction < -0.1 else 0) + await move_tasks.move_to_pose_local( + geometry_utils.create_pose(0, 0, 0, 0, 0, yaw_correction + (sign * 0.1)), + keep_level=True, + parent=self + ) + rospy.loginfo("Corrected yaw") + self.correct_yaw = correct_yaw + + async def correct_roll_and_pitch(): + imu_orientation = State().imu.orientation + euler_angles = quat2euler([imu_orientation.w, imu_orientation.x, imu_orientation.y, imu_orientation.z]) + roll_correction = -euler_angles[0] * 1.2 + pitch_correction = -euler_angles[1] * 1.2 + + rospy.loginfo(f"Roll, pitch correction: {roll_correction, pitch_correction}") + await move_tasks.move_to_pose_local(geometry_utils.create_pose(0, 0, 0, roll_correction, pitch_correction, 0), + parent=self) + + async def move_x(step=1): + await move_tasks.move_x(step=step, parent=self) + + async def move_y(step=1): + await move_tasks.move_y(step=step, parent=self) + + def get_step_mult_factor(dist, threshold): + if abs(dist) < threshold: + return 0 + elif dist > threshold: + return 1 + else: + return -1 + + async def sleep(secs): + duration = rospy.Duration(secs) + start_time = rospy.Time.now() + while start_time + duration > rospy.Time.now(): + await Yield() + + async def track_bin(target, desired_depth, pixel_threshold, step_size=0.20, x_offset=0, y_offset=0): + rospy.loginfo(CV().cv_data[f"{target}_distance"]) + pixel_x = CV().cv_data[f"{target}_distance"].x + x_offset + pixel_y = CV().cv_data[f"{target}_distance"].y + y_offset + + width = CV().cv_data["bin_red"].width + height = CV().cv_data["bin_red"].height + + count = 1 + while (max(pixel_x, pixel_y) > pixel_threshold or min(pixel_x, pixel_y) < -pixel_threshold) \ + and width * height <= 1/3 * FRAME_AREA: + rospy.loginfo(CV().cv_data[f"{target}_distance"]) + + await move_x(step=step_size * get_step_mult_factor(pixel_x, pixel_threshold)) + await move_y(step=step_size * get_step_mult_factor(pixel_y, pixel_threshold)) + + width = CV().cv_data["bin_red"].width + height = CV().cv_data["bin_red"].height + + pixel_x = CV().cv_data[f"{target}_distance"].x + x_offset + pixel_y = CV().cv_data[f"{target}_distance"].y + y_offset + + if count % 3 == 0: + rospy.loginfo("correcting depth") + await correct_depth(desired_depth=desired_depth) + rospy.loginfo("correcting roll and pitch") + await correct_roll_and_pitch() + + if width * height >= 1/6 * FRAME_AREA and \ + abs(pixel_x) < pixel_threshold * 1.75 and abs(pixel_y) < pixel_threshold * 1.75: + rospy.loginfo(f"Reached area threshold: area = {width * height}") + break + + if rospy.Time.now() - start_time > TIMEOUT: + rospy.logwarn("Track bin timed out") + break + + await Yield() + + rospy.loginfo(f"x: {pixel_x}, y: {pixel_y}, area: {width * height}") + + count += 1 + + rospy.loginfo("Finished tracking bin") + rospy.loginfo(f"x: {pixel_x}, y: {pixel_y}, area: {width * height}") + + await correct_depth(desired_depth=START_DEPTH_LEVEL) + await track_bin(target="bin_red", desired_depth=START_DEPTH_LEVEL, pixel_threshold=START_PIXEL_THRESHOLD) + + await correct_yaw() + + await correct_depth(desired_depth=MID_DEPTH_LEVEL) + await track_bin(target="bin_red", desired_depth=MID_DEPTH_LEVEL, pixel_threshold=MID_PIXEL_THRESHOLD, + step_size=0.18, y_offset=30, x_offset=25) + + # If both balls loaded on the RIGHT, this is False + drop_marker(False) + rospy.loginfo("Dropped first marker") + await sleep(3) + + drop_marker(True) + rospy.loginfo("Dropped second marker") + await sleep(2) + + await correct_depth(desired_depth=START_DEPTH_LEVEL) + rospy.loginfo(f"Corrected depth to {START_DEPTH_LEVEL}") + + rospy.loginfo("Completed bin task") + + +@task +async def octagon_task(self: Task, direction: int = 1) -> Task[None, None, None]: + """ + Detects, move towards the pink bins, then surfaces inside the octagon. Requires robot to have submerged 0.7 meters. + """ + rospy.loginfo("Starting octagon task") + + DEPTH_LEVEL_AT_BINS = State().orig_depth - 1.0 + DEPTH_LEVEL_ABOVE_BINS = State().orig_depth - 0.6 + LATENCY_THRESHOLD = 2 + CONTOUR_SCORE_THRESHOLD = 1000 + CONTOUR_SCORE_THRESHOLD = 1000 + + async def correct_depth(desired_depth): + await move_tasks.correct_depth(desired_depth=desired_depth, parent=self) + + async def correct_yaw(): + yaw_correction = CV().cv_data["bin_pink_front"].yaw + rospy.loginfo(f"Yaw correction: {yaw_correction}") + await move_tasks.move_to_pose_local( + geometry_utils.create_pose(0, 0, 0, 0, 0, yaw_correction * 0.7), + keep_level=True, + parent=self + ) + rospy.loginfo("Corrected yaw") + self.correct_yaw = correct_yaw + + def is_receiving_pink_bin_data(latest_detection_time): + return latest_detection_time and "bin_pink_bottom" in CV().cv_data and \ + CV().cv_data["bin_pink_bottom"].score >= CONTOUR_SCORE_THRESHOLD and \ + rospy.Time.now().to_sec() - CV().cv_data["bin_pink_bottom"].header.stamp.secs < LATENCY_THRESHOLD and \ + abs(CV().cv_data["bin_pink_bottom"].header.stamp.secs - latest_detection_time) < LATENCY_THRESHOLD + + def publish_power(): + power = Twist() + power.linear.x = 0.3 + Controls().set_axis_control_type(x=ControlTypes.DESIRED_POWER) + Controls().publish_desired_power(power, set_control_types=False) + + async def move_x(step=1): + await move_tasks.move_x(step=step, parent=self) + + def stabilize(): + pose_to_hold = copy.deepcopy(State().state.pose.pose) + Controls().publish_desired_position(pose_to_hold) + + async def sleep(secs): + duration = rospy.Duration(secs) + start_time = rospy.Time.now() + while start_time + duration > rospy.Time.now(): + await Yield() + + def get_step_size(last_step_size): + bin_pink_score = CV().cv_data["bin_pink_front"].score + step = 0 + if bin_pink_score < 200: + step = 3 + elif bin_pink_score < 1000: + step = 2 + elif bin_pink_score < 3000: + step = 1 + else: + step = 0.75 + + return min(step, last_step_size) + + async def move_to_pink_bins(): + count = 1 + latest_detection_time = None + moved_above = False + + last_step_size = float('inf') + await move_x(step=1) + while not is_receiving_pink_bin_data(latest_detection_time) and not moved_above: + if "bin_pink_bottom" in CV().cv_data: + latest_detection_time = CV().cv_data["bin_pink_bottom"].header.stamp.secs + + await correct_depth(DEPTH_LEVEL_AT_BINS if not moved_above else DEPTH_LEVEL_ABOVE_BINS) + if not moved_above: + await yaw_to_cv_object('bin_pink_front', direction=direction, yaw_threshold=math.radians(15), + depth_level=0.9, parent=Task.MAIN_ID) + + step = get_step_size(last_step_size) + await move_x(step=step) + last_step_size = step + + rospy.loginfo(f"Bin pink front score: {CV().cv_data['bin_pink_front'].score}") + + if CV().cv_data["bin_pink_front"].score > 4000 and not moved_above: + await correct_depth(DEPTH_LEVEL_ABOVE_BINS + 0.1) + moved_above = True + + rospy.loginfo("Moved above pink bins") + + await Yield() + + count += 1 + + rospy.loginfo(f"Receiving pink bin data: {is_receiving_pink_bin_data(latest_detection_time)}") + + if moved_above: + await move_tasks.move_with_directions([(1.5, 0, 0)], parent=self) + else: + rospy.loginfo("Detected bin_pink_bottom") + + rospy.loginfo("Reached pink bins, stabilizing...") + stabilize() + await sleep(5) + + await move_to_pink_bins() + + rospy.loginfo("Surfacing...") + await move_tasks.move_to_pose_local(geometry_utils.create_pose(0, 0, State().orig_depth - State().depth, 0, 0, 0), + timeout=10, parent=self) + rospy.loginfo("Finished surfacing") diff --git a/onboard/catkin_ws/src/task_planning/scripts/cv_tasks.py b/onboard/catkin_ws/src/task_planning/scripts/cv_tasks.py index 7de97aea6..40d682f0d 100644 --- a/onboard/catkin_ws/src/task_planning/scripts/cv_tasks.py +++ b/onboard/catkin_ws/src/task_planning/scripts/cv_tasks.py @@ -1,68 +1,64 @@ -from interface.cv import CVInterface -import smach +from typing import Optional import rospy -import task_utils - - -# Says where to spin to center a given CV object in the frame -class SpinDirectionTask(smach.State): - '''Says where to spin to center a given CV object in the frame''' - def __init__(self, name: str, tolerance: float, cv: CVInterface): - ''' - Says where to spin to center a given CV object in the frame - - Parameters - name : str - Name of the CV object to center - tolerance : float - How close to the center the object must be to be considered centered - cv : CVInterface - CV object to get data from - ''' - super(SpinDirectionTask, self).__init__(outcomes=["left", "right", "center"]) - self.name = name - self.tolerance = tolerance - self.cv = cv - - def execute(self, _): - cv_data = self.cv.get_data(self.name) - if cv_data is None or cv_data.xmin > 0.5 + self.tolerance: - return "left" - if cv_data.xmax < 0.5 - self.tolerance: - return "right" - return "center" - - -class ObjectCoordsTask(smach.State): - def __init__(self, name, cv: CVInterface): - super().__init__(outcomes=["valid", "invalid"], output_keys=["point"]) - self.name = name - self.cv = cv - - def execute(self, userdata): - cv_data = self.cv.get_data(self.name) - if cv_data is None or not cv_data.sonar: - return "invalid" - userdata.point = cv_data.coords - return "valid" - - -# Might need a refactor -class ObjectVisibleTask(smach.State): - def __init__(self, image_name, timeout=0): - super(ObjectVisibleTask, self).__init__(["undetected", "detected"], - input_keys=['image_name'], - output_keys=['image_name']) - self.image_name = image_name - self.timeout = timeout # in seconds - - def execute(self, userdata): - cycles_per_second = 10 - rate = rospy.Rate(cycles_per_second) - total = 0 - while total <= self.timeout * cycles_per_second: - if task_utils.object_vector(self.cv_data[self.image_name]) is not None: - return "detected" - total += 1 - rate.sleep() - return "undetected" + +from interface.cv import CV +from move_tasks import move_to_pose_local +from task import task, Yield, Task +import move_tasks +from utils import geometry_utils + + +# TODO: this task will likely be depleted once we complete the refactoring tasks in comp_tasks.py +@task +async def move_to_cv_obj(self: Task, name: str) -> Task[None, Optional[str], None]: + """ + Move to the pose of an object detected by CV. Returns when the robot is at the object's pose with zero velocity, + within a small tolerance. + + Args: + name: CV class name of the object to move to + + Send: + CV class name of new object to move to + """ + + # Get initial object location and initialize task to move to it + pose = CV().get_pose(name) + move_task = move_to_pose_local(pose, parent=self) + move_task.send(None) + + # Move until the robot has reached the object's pose + # TODO: Stop when stopped recieving decisions + # TODO: Stop within stop_distance (param) + while not move_task.done: + # Update object to move to + updated_obj = await Yield(pose) + + if updated_obj is not None: + name = updated_obj + + pose = CV().get_pose(name) + + # TODO: Add offset + move_task.send(pose) + + +@task +async def correct_x(self: Task, prop: str, add_factor: float = 0, mult_factor: float = 1): + x = (CV().cv_data[prop].coords.x + add_factor) * mult_factor + await move_tasks.move_to_pose_local(geometry_utils.create_pose(x, 0, 0, 0, 0, 0), parent=self) + rospy.loginfo(f"Corrected x {x}") + + +@task +async def correct_y(self: Task, prop: str, add_factor: float = 0, mult_factor: float = 1): + y = (CV().cv_data[prop].coords.y + add_factor) * mult_factor + await move_tasks.move_to_pose_local(geometry_utils.create_pose(0, y, 0, 0, 0, 0), parent=self) + rospy.loginfo(f"Corrected y {y}") + + +@task +async def correct_z(self: Task, prop: str): + z = CV().cv_data[prop].coords.z + await move_tasks.move_to_pose_local(geometry_utils.create_pose(0, 0, z, 0, 0, 0), parent=self) + rospy.loginfo(f"Corrected z {z}") diff --git a/onboard/catkin_ws/src/task_planning/scripts/gate_task.py b/onboard/catkin_ws/src/task_planning/scripts/gate_task.py deleted file mode 100644 index a137cabcf..000000000 --- a/onboard/catkin_ws/src/task_planning/scripts/gate_task.py +++ /dev/null @@ -1,480 +0,0 @@ -#!/usr/bin/env python - -from statistics import mean - -from numpy import array_equal -from task_utils import cv_object_position, object_vector -from cv_tasks import ObjectVisibleTask -import smach -import rospy -from move_tasks import MoveToPoseLocalTask, MoveToPoseGlobalTask -import tf - - -SIDE_THRESHOLD = 0.1 # means gate post is within 1 tenth of the side of the frame -CENTERED_THRESHOLD = 0.1 # means gate will be considered centered if within 1 tenth of the center of the frame -GATE_TICK_CV_NAME = "bin" # change back to gate_tick - -""" -Plan for task restructure: - -INPUT FROM CMD copper or bootlegger, direction to rotate, time to wait and moving average - -1) rotate ~0.25 radians -2) check if image is in camera frame, maybe see if in center - a) if not in frame, go to 1 - b) if in frame, proceed -3) moving average for some time to find location to move to -4) move to position ~0.5 below and ~1 past image -5) done -""" - - -def main(): - rospy.init_node('gate_task') - - listener = tf.TransformListener() - - # needs direction to work - sm = create_simple_gate_task_sm(listener) - - listener.waitForTransform('odom', 'base_link', rospy.Time(), rospy.Duration(10)) - - # Execute SMACH plan - sm.execute() - -# Here to appease automatic testing -# Check if we actually want this later - - -def create_gate_task_sm(controls, listener): - return create_gate_task_sm_DEFUNCT(controls, listener, 1) - -# Rotate direction is +1 or -1 depending on how we should rotate - - -def create_gate_task_sm_DEFUNCT(controls, listener, rotate_direction): - sm = smach.StateMachine(outcomes=['done']) - gate_euler_position = [0, 0, 0, 0, 0, 0] - image_name = "bootleggerbuoy" - with sm: - smach.StateMachine.add('CHECK_IMAGE_VISIBLE', ObjectVisibleTask(image_name, 3), - transitions={ - 'undetected': 'ROTATE_TO_GATE', - 'detected': 'SURVEY_GATE' - }) - - smach.StateMachine.add('ROTATE_TO_GATE', - MoveToPoseLocalTask(0, 0, 0, 0, 0, 0.25 * rotate_direction, controls, listener), - transitions={ - 'done': 'CHECK_IMAGE_VISIBLE', - 'continue': 'ROTATE_TO_GATE' - }) - - smach.StateMachine.add('SURVEY_GATE', SurveyGateTask(image_name, 20, gate_euler_position), - transitions={ - 'done': 'MOVE_THROUGH_GATE' - }) - - smach.StateMachine.add('MOVE_THROUGH_GATE', MoveToPoseLocalTask(*gate_euler_position, controls, listener), - transitions={ - 'done': 'done', - 'continue': 'MOVE_THROUGH_GATE' - }) - return sm - -# SIMPLE version below - - -def create_simple_gate_task_sm(listener, rotate_direction, image_name): - sm = smach.StateMachine(outcomes=['succeeded', 'failed']) - global_object_position = [0, 0, 0, 0, 0, 0] - with sm: - smach.StateMachine.add('DIVE_TO_GATE', MoveToPoseGlobalTask(0, 0, -2, 0, 0, 0, listener), - transitions={ - 'done': 'SURVEY_GATE_IMAGE_LOCATION' - }) - - smach.StateMachine.add('SURVEY_GATE_IMAGE_LOCATION', SurveyGateImage(image_name, 1, 3), - transitions={ - 'undetected': 'ROTATE_TO_GATE', - 'detected': 'MOVE_TO_GATE' - }) - - smach.StateMachine.add('ROTATE_TO_GATE', MoveToPoseLocalTask(0, 0, 0, 0, 0, 0.25 * rotate_direction, listener), - transitions={ - 'done': 'CHECK_IMAGE_VISIBLE' - }) - - smach.StateMachine.add('MOVE_TO_GATE', MoveToPoseLocalTask(*global_object_position, listener), - transitions={ - 'done': 'succeeded' - }) - - return sm - - -def create_simplest_gate_task_sm(listener): - sm = smach.StateMachine(outcomes=['succeeded', 'failed']) - with sm: - smach.StateMachine.add('MOVE_1', MoveToPoseLocalTask(2.5, 0, -2, 0, 0, 0, listener), - transitions={ - 'done': 'MOVE_2' - }) - - smach.StateMachine.add('MOVE_2', MoveToPoseLocalTask(2.5, 0, 0, 0, 0, 0, listener), - transitions={ - 'done': 'succeeded' - }) - - return sm - - -class SurveyGateImage(smach.State): - def __init__(self, object_name, time, global_object_position): - super(SurveyGateTask, self).__init__(["undetected", "detected"]) - self.object_name = object_name - self.time = time - self.global_object_position = global_object_position - - def execute(self, userdata): - millis = 200 # for 5 times per second - rate = rospy.Rate(millis) - total = 0 - - x_offset = 5 - z_offset = -1 - - last_cv_object_position = cv_object_position(self.cv_data[self.image_name]) - while total < self.time * 1000: - cur_cv_object_position = cv_object_position(self.cv_data[self.image_name]) - if cur_cv_object_position is not None and not array_equal(cur_cv_object_position, last_cv_object_position): - self.global_object_position = [cur_cv_object_position[0] + x_offset, - cur_cv_object_position[1], - cur_cv_object_position[2] + z_offset, - 0, - 0, - 0, ] - return "detected" - last_cv_object_position = cur_cv_object_position - total += millis - rate.sleep() - return "undetected" - - -class SurveyGateTask(smach.State): - def __init__(self, object_name, time, output_euler_position): - super(SurveyGateTask, self).__init__(["done"]) - self.object_name = object_name - self.time = time - self.output_euler_position = output_euler_position - - def execute(self, userdata): - millis = 10 - rate = rospy.Rate(millis) - total = 0 - captured_vectors = [] - - while total < self.time * 1000: - gate_vector = object_vector(self.cv_data[self.object_name]) - if gate_vector is not None: - captured_vectors.append(gate_vector) - total += millis - rate.sleep() - - avg_x = mean([v[0] for v in captured_vectors]) + 1 - avg_y = mean([v[1] for v in captured_vectors]) - avg_z = mean([v[2] for v in captured_vectors]) - 0.5 - self.output_euler_position = [avg_x, avg_y, avg_z, 0, 0, 0] - return "done" - -# class PoseFromVectorsTask(Task): -# def __init__(self, direction_arg, *coefficients): -# super(PoseFromVectorsTask, self).__init__(["done"], -# input_keys=['vector1', 'vector2'], -# output_keys=['x', 'y', 'z', 'roll', 'pitch', 'yaw']) -# self.direction_arg = direction_arg -# self.coefficients = coefficients - -# def run(self, userdata): -# pos = Vector3() -# for i in range(1, len(self.coefficients) + 1): -# print(userdata["vector" + str(i)]) -# pos.x += userdata["vector" + str(i)].x * self.coefficients[i-1] -# pos.y += userdata["vector" + str(i)].y * self.coefficients[i-1] -# pos.z += userdata["vector" + str(i)].z * self.coefficients[i-1] - -# dir_vec = userdata["vector" + str(self.direction_arg)] - -# userdata.x = pos.x -# userdata.y = pos.y -# userdata.z = pos.z - -# userdata.roll = 0 -# userdata.pitch = pi / 2 - arccos(-dir_vec.z) -# userdata.yaw = atan2(-dir_vec.y, -dir_vec.x) - -# print(pos.x, pos.y, pos.z) - -# return "done" - - -# class GateSpinDirectionTask(Task): -# def __init__(self, threshold): -# super(GateSpinDirectionTask, self).__init__(["center","right","left"]) -# self.threshold = threshold - -# def run(self, userdata): -# gate_info = _scrutinize_gate(self.cv_data['gate'], self.cv_data[GATE_TICK_CV_NAME]) -# if gate_info: -# if abs(gate_info["offset_h"]) < self.threshold: -# return "center" -# if gate_info["offset_h"] < 0: -# return "right" -# return "left" -# # default to right if we can't find the gate -# return "right" - -# class GateRotationDoneTask(Task): -# def __init__(self, threshold): -# super(GateRotationDoneTask, self).__init__(["done"]) -# self.threshold = threshold - -# def run(self, userdata): -# rate = rospy.Rate(15) -# while True: -# gate_info = _scrutinize_gate(self.cv_data['gate'], self.cv_data[GATE_TICK_CV_NAME]) -# if gate_info and abs(gate_info["offset_h"]) < self.threshold: -# return "done" -# rate.sleep() - -# class RollDoneTask(Task): -# def __init__(self, roll_amount): -# super(RollDoneTask, self).__init__(["done"]) -# self.roll_amount = roll_amount - -# def run(self, userdata): -# startAngle = self.state.pose.pose.roll - -# rate = rospy.Rate(15) -# while abs(self.state.pose.pose.roll - startAngle) < 4 * pi: -# rate.sleep() - -# return "done" - - -# class GateVerticalAlignmentTask(Task): -# def __init__(self, threshold): -# super(GateVerticalAlignmentTask, self).__init__(["center","top","bottom","spin"]) -# self.threshold = threshold - -# def run(self, userdata): -# gate_info = _scrutinize_gate(self.cv_data['gate'], self.cv_data[GATE_TICK_CV_NAME]) -# if gate_info: - -# if abs(gate_info["offset_v"]) < self.threshold: -# return "center" -# if gate_info["offset_v"] < 0: -# return "top" -# return "bottom" -# return "spin" - - # def run(self, userdata): - # gate_info = _scrutinize_gate(self.cv_data['gate'], self.cv_data[GATE_TICK_CV_NAME]) - # if gate_info: - - # if abs(gate_info["offset_v"]) < self.threshold: - # return "center" - # if gate_info["offset_v"] < 0: - # return "top" - # return "bottom" - # return "spin" - - -# class NearGateTask(Task): -# def __init__(self, threshold): -# super(NearGateTask, self).__init__(["true","false","spin"]) -# self.threshold = threshold - -# def run(self, userdata): -# gate_info = _scrutinize_gate(self.cv_data['gate'], self.cv_data[GATE_TICK_CV_NAME]) -# if gate_info: -# if (gate_info["left"] < self.threshold) and (gate_info["right"] < self.threshold): -# return "true" -# else: -# return "false" -# return "spin" - -# class CalcGatePoseTask(Task): -# def __init__(self, listener): -# super(CalcGatePoseTask, self).__init__(["done"], output_keys=['vector1','vector2']) -# self.listener = listener - -# def run(self, userdata): -# rate = rospy.Rate(10) -# while self.cv_data['gateleftchild'] == None or self.cv_data['gaterightchild'] == None: -# rate.sleep() -# v1, v2 = _find_gate_normal_and_center(self.cv_data['gateleftchild'], -# self.cv_data['gaterightchild'], self.listener) - -# userdata['vector1'] = v1 -# userdata['vector2'] = v2 - -# return "done" -""" -Algorithm plan for finding gate pos from cv data: - -Get angle of gate point from center of camera using cam_yaw = (x_from_center / width (which is just 1)) * horizontal_fov -For left this would be cam_yaw = (left - 0.5) * horizontal_fov -cam_pitch = (y_from_center / height (once again 1)) * vert_fov -We now have global spherical coordinates for our point -phi = 90 + cam_pitch -theta = -cam_yaw -This has robot oriented facing +x as is usual -Then add result to camera offset -Then convert robot local coord result to global coords - -Use our 4 corners to get 2 vectors, then cross product to get normal -Then position robot along that normal and whatever distance we want -""" - -# past_gate_info = [] - -# def _find_gate_normal_and_center(gate_data_l, gate_data_r, listener): -# global past_gate_info -# top_left = _real_pos_from_cv((gate_data_l.xmin + gate_data_l.xmax)/2, -# gate_data_l.ymin, gate_data_l.distance, listener) -# top_right = _real_pos_from_cv((gate_data_r.xmin + gate_data_r.xmax)/2, -# gate_data_r.ymin, gate_data_r.distance, listener) -# bottom_left = _real_pos_from_cv((gate_data_l.xmin + gate_data_l.xmax)/2, -# gate_data_l.ymax, gate_data_l.distance, listener) -# bottom_right = _real_pos_from_cv((gate_data_r.xmin + gate_data_r.xmax)/2, -# gate_data_r.ymax, gate_data_r.distance, listener) - -# # Midpoint between top_left and bottom_right -# center_pt = Vector3(x=(top_left.position.x + bottom_right.position.x) / 2, -# y=(top_left.position.y + bottom_right.position.y) / 2, -# z=(top_left.position.z + bottom_right.position.z) / 2) - -# diag_1 = Vector3(top_left.position.x - bottom_right.position.x, top_left.position.y - bottom_right.position.y, -# top_left.position.z - bottom_right.position.z) -# diag_2 = Vector3(bottom_left.position.x - top_right.position.x, bottom_left.position.y - top_right.position.y, -# bottom_left.position.z - top_right.position.z) - -# # FIXME temporarily setting z to 0 -# center_pt.z = 0 -# normal = normalize(cross(diag_1,diag_2)) - -# if len(past_gate_info) >= 5: -# sum = [Vector3(), Vector3()] -# for info in past_gate_info: -# sum[0] = add_vectors(sum[0], info[0]) -# sum[1] = add_vectors(sum[1], info[1]) -# sum[0] = divide_vector(sum[0], len(past_gate_info)) -# sum[1] = divide_vector(sum[1], len(past_gate_info)) -# if vect_dist(normal, sum[0]) + vect_dist(center_pt, sum[1]) > 0.5: -# return (sum[0], sum[1]) - -# past_gate_info.append((normal, center_pt)) -# if len(past_gate_info) > 5: -# past_gate_info.pop(0) - -# return (normal, center_pt) - -# # add two vectors -# def add_vectors(v1, v2): -# return Vector3(x=v1.x + v2.x, y=v1.y + v2.y, z=v1.z + v2.z) - -# # divide a vector by a scalar -# def divide_vector(v, s): -# return Vector3(x=v.x / s, y=v.y / s, z=v.z / s) - -# # calculate distance between two vectors -# def vect_dist(v1, v2): -# return sqrt(pow(v1.x - v2.x, 2) + pow(v1.y - v2.y, 2) + pow(v1.z - v2.z, 2)) - -# # calculate cross product of two vectors -# def cross(v1, v2): -# return Vector3(x=v1.y*v2.z - v1.z*v2.y, y=v1.z*v2.x - v1.x*v2.z, z=v1.x*v2.y - v1.y*v2.x) - -# # normalize vector -# def normalize(v): -# mag = sqrt(v.x**2 + v.y**2 + v.z**2) -# if mag == 0: -# return Vector3(0, 0, 0) -# return Vector3(x=v.x/mag, y=v.y/mag, z=v.z/mag) - -# """Get the position of a point in global coordinates from its position from the camera -# Parameters: -# x: x position of the point relative to the left of the frame (0 to 1) -# y: y position of the point relative to the top of the frame (0 to 1) -# d: distance from the camera to the point -# """ -# def _real_pos_from_cv(x, y, d, listener): -# # TODO make sure these values are correct -# horizontal_fov = 0.933 -# vertical_fov = 0.586 -# cam_pos_x = 0 -# cam_pos_y = 0 -# cam_pos_z = 0 -# # Fill in camera parameters later -# # Use radians -# cam_yaw = (x - 0.5) * horizontal_fov -# cam_pitch = (y - 0.5) * vertical_fov - -# phi = pi/2 + cam_pitch -# theta = -cam_yaw - -# x_cam = d * sin(phi) * cos(theta) -# y_cam = d * sin(phi) * sin(theta) -# z_cam = d * cos(phi) - -# # Fill in camera position relative to robot -# local_pt = Point(x=x_cam + cam_pos_x, y=y_cam + cam_pos_y, z=z_cam + cam_pos_z) - -# local_pose = Pose(position=local_pt, orientation=Quaternion(0, 0, 0, 1)) - -# return task_utils.transform_pose(listener, 'base_link', 'odom', local_pose) - - -# def _scrutinize_gate(gate_data, gate_tick_data): -# """Finds the distance from the gate to each of the four edges of the frame -# Parameters: -# gate_data (custom_msgs/CVObject): cv data for the gate -# gate_tick_data (custom_msgs/CVObject): cv data for the gate tick -# Returns: -# dict: left - distance from left edge of gate to frame edge (from 0 to 1) -# right - distance from right edge of gate to frame edge (from 0 to 1) -# top - distance from top edge of gate to frame edge (from 0 to 1) -# bottom - distance from bottom edge of gate to frame edge (from 0 to 1) -# offset_h - difference between distances on the right and left sides (from 0 to 1) -# offset_v - difference between distances on the top and bottom sides (from 0 to 1) -# """ -# print(gate_data) -# if not(gate_data) or gate_data.label == 'none': -# return None - -# res = { -# "left": gate_data.xmin, -# "right": 1 - gate_data.xmax, -# "top": gate_data.ymin, -# "bottom": 1 - gate_data.ymax -# } - -# # Adjust the target area if the gate tick is detected -# if gate_tick_data and gate_tick_data.label != 'none' and gate_tick_data.score > 0.5: -# # If the tick is closer to the left side -# if abs(gate_tick_data.xmin - gate_data.xmin) < abs(gate_tick_data.xmax - gate_data.xmax): -# res["right"] = 1 - gate_tick_data.xmax -# else: -# res["left"] = gate_tick_data.xmin - -# res["bottom"] = 1 - gate_tick_data.ymax - -# res["offset_h"] = res["right"] - res["left"] -# res["offset_v"] = res["bottom"] - res["top"] - -# return res - -if __name__ == '__main__': - main() diff --git a/onboard/catkin_ws/src/task_planning/scripts/interface/controls.py b/onboard/catkin_ws/src/task_planning/scripts/interface/controls.py index 151109163..969044f1b 100644 --- a/onboard/catkin_ws/src/task_planning/scripts/interface/controls.py +++ b/onboard/catkin_ws/src/task_planning/scripts/interface/controls.py @@ -1,41 +1,213 @@ import rospy -import actionlib -from nav_msgs.msg import Odometry -from custom_msgs.msg import ControlsDesiredPoseAction, ControlsDesiredTwistAction, \ - ControlsDesiredPoseGoal, ControlsDesiredTwistGoal +from std_srvs.srv import Trigger, SetBool +from geometry_msgs.msg import Pose, Twist +from custom_msgs.msg import ControlTypes, ThrusterAllocs +from custom_msgs.srv import SetControlTypes +from utils.other_utils import singleton +import resource_retriever as rr +import os +import yaml -class ControlsInterface: - STATE_TOPIC = 'state' - DESIRED_POSE_ACTION = 'controls/desired_pose' - DESIRED_TWIST_ACTION = 'controls/desired_twist' +@singleton +class Controls: + """ + A singleton class for the controls interface - def __init__(self, listener): - self.listener = listener + Attributes: + _instance: The singleton instance of this class. Is a static attribute. + _set_control_types: The service proxy for setting control types + _all_axes_control_type: The control type for all axes + _reset_pid_loop: The service proxy for resetting the PID loops + _desired_position_pub: The publisher for the desired position + _desired_velocity_pub: The publisher for the desired velocity + _desired_power_pub: The publisher for the desired power + _read_config: The config file + num_thrusters: The number of thrusters + thruster_dict: The thruster dictionary + _thruster_pub: The publisher for thruster allocations + """ - rospy.Subscriber(self.STATE_TOPIC, Odometry, self._on_receive_state) - self.desired_pose_client = actionlib.SimpleActionClient( - self.DESIRED_POSE_ACTION, ControlsDesiredPoseAction) - self.desired_twist_client = actionlib.SimpleActionClient( - self.DESIRED_TWIST_ACTION, ControlsDesiredTwistAction) - self.state = None + # ROS service name for setting control types + CONTROL_TYPES_SERVICE = 'controls/set_control_types' + RESET_PID_LOOPS_SERVICE = 'controls/reset_pid_loops' + ENABLE_CONTROLS_SERVICE = 'controls/enable' + DESIRED_POSITION_TOPIC = 'controls/desired_position' + DESIRED_VELOCITY_TOPIC = 'controls/desired_velocity' + DESIRED_POWER_TOPIC = 'controls/desired_power' + THRUSTER_ALLOCS_TOPIC = 'controls/thruster_allocs' + CONTROL_TYPES_TOPIC = 'controls/control_types' - self.desired_pose_client.wait_for_server() - self.desired_twist_client.wait_for_server() + def __init__(self, bypass: bool = False): - def move_to_pose_global(self, pose): - self.desired_pose_client.send_goal(ControlsDesiredPoseGoal(pose=pose)) + if not bypass: + rospy.wait_for_service(self.CONTROL_TYPES_SERVICE) + self._set_control_types = rospy.ServiceProxy(self.CONTROL_TYPES_SERVICE, SetControlTypes) + # NOTE: if this variable gets out of sync with the actual control types, bad things may happen + self._all_axes_control_type = None - def move_with_velocity(self, twist): - self.desired_twist_client.send_goal(ControlsDesiredTwistGoal(twist=twist)) + self.control_types: ControlTypes = None + rospy.Subscriber(self.CONTROL_TYPES_TOPIC, ControlTypes, self._update_control_types) - def cancel_movement(self): - self.desired_pose_client.cancel_goal() - self.desired_twist_client.cancel_goal() + if not bypass: + rospy.wait_for_service(self.RESET_PID_LOOPS_SERVICE) + self._reset_pid_loops = rospy.ServiceProxy(self.RESET_PID_LOOPS_SERVICE, Trigger) - def _on_receive_state(self, state): - self.state = state + self._enable_controls = rospy.ServiceProxy(self.ENABLE_CONTROLS_SERVICE, SetBool) - def get_state(self): - return self.state + self._desired_position_pub = rospy.Publisher(self.DESIRED_POSITION_TOPIC, Pose, queue_size=1) + self._desired_velocity_pub = rospy.Publisher(self.DESIRED_VELOCITY_TOPIC, Twist, queue_size=1) + self._desired_power_pub = rospy.Publisher(self.DESIRED_POWER_TOPIC, Twist, queue_size=1) + + self._read_config = None + + self.num_thrusters = None + self.thruster_dict = None + + self.get_thruster_dict() + self._thruster_pub = rospy.Publisher(self.THRUSTER_ALLOCS_TOPIC, ThrusterAllocs, queue_size=1) + self.bypass = bypass + + def _update_control_types(self, control_types): + self.control_types = control_types + + def get_thruster_dict(self) -> None: + """ + Get thruster dictionary + + Returns: + The thruster dictionary + """ + CONFIG_FILE_PATH = 'package://controls/config/%s.yaml' + filename = rr.get_filename(CONFIG_FILE_PATH % os.getenv("ROBOT_NAME", "oogway"), use_protocol=False) + with open(filename) as f: + full_thruster_dict = yaml.safe_load(f) + + thruster_dict = {} + for index, t_dict in enumerate(full_thruster_dict['thrusters']): + thruster_name = t_dict['name'] + thruster_dict[thruster_name] = index + + self.num_thrusters = len(full_thruster_dict['thrusters']) + self.thruster_dict = thruster_dict + return thruster_dict + + def call_enable_controls(self, enable: bool): + """" + Enable or disable controls. + + Args: + enable: Whether to enable (true) or disable (false). + """ + self._enable_controls(enable) + + def _set_all_axes_control_type(self, type: ControlTypes) -> None: + """ + Set the control type for all axes + + Args: + type: The control type to set + """ + if self._all_axes_control_type == type: + return + # TODO: what if this doesn't return success? + if not self.bypass: + self._set_control_types(ControlTypes( + x=type, + y=type, + z=type, + roll=type, + pitch=type, + yaw=type + )) + self._all_axes_control_type = type + self.start_new_move() + + def set_axis_control_type(self, x=None, y=None, z=None, roll=None, pitch=None, yaw=None) -> None: + x = self.control_types.x if x is None else x + y = self.control_types.y if y is None else y + z = self.control_types.z if z is None else z + roll = self.control_types.roll if roll is None else roll + pitch = self.control_types.pitch if pitch is None else pitch + yaw = self.control_types.yaw if yaw is None else yaw + + self._all_axes_control_type = None + + self._set_control_types(ControlTypes(x=x, y=y, z=z, roll=roll, pitch=pitch, yaw=yaw)) + + # Resets the PID loops. Should be called for every "new" movement + def start_new_move(self) -> None: + """ + Start a new movement + """ + if not self.bypass: + self._reset_pid_loops() + + # In global coordinates + def publish_desired_position(self, pose: Pose, set_control_types=True) -> None: + """ + Publish the desired position + + Args: + pose: The desired position + set_control_types: Whether all axes should be set to DESIRED_POSITION + """ + if set_control_types: + self._set_all_axes_control_type(ControlTypes.DESIRED_POSITION) + self._desired_position_pub.publish(pose) + + # In local coordinates + def publish_desired_velocity(self, twist: Twist, set_control_types=True) -> None: + """ + Publish the desired velocity + + Args: + twist: The desired velocity + set_control_types: Whether all axes should be set to DESIRED_TWIST + """ + if set_control_types: + self._set_all_axes_control_type(ControlTypes.DESIRED_TWIST) + self._desired_velocity_pub.publish(twist) + + def publish_desired_power(self, power: Twist, set_control_types=True) -> None: + """ + Publish the desired power + + Args: + power: The desired power + set_control_types: Whether all axes should be set to DESIRED_POWER + """ + if set_control_types: + self._set_all_axes_control_type(ControlTypes.DESIRED_POWER) + self._desired_power_pub.publish(power) + + def publish_thruster_allocs(self, **kwargs) -> None: + """ + Publish the thruster allocations + + Args: + kwargs: The thruster allocations + + Raises: + ValueError: If the thruster name is not in thruster_dict + ValueError: If the thruster alloc is not between -1 and 1 inclusive + """ + thruster_allocs = [0] * self.num_thrusters + + for kwarg_name, kwarg_value in kwargs.items(): + + if kwarg_name not in self.thruster_dict: + raise ValueError(f"Thruster name not in thruster_dict {kwarg_name}") + + if kwarg_value > 1 or kwarg_value < -1: + raise ValueError(f"Recieved {kwarg_value} for thruster {kwarg_name}. Thruster alloc must be between " + + "-1 and 1 inclusive.") + + thruster_allocs[self.thruster_dict[kwarg_name]] = kwarg_value + + thruster_allocs_msg = ThrusterAllocs() + thruster_allocs_msg.header.stamp = rospy.Time.now() + thruster_allocs_msg.allocs = thruster_allocs + + self._thruster_pub.publish(thruster_allocs_msg) diff --git a/onboard/catkin_ws/src/task_planning/scripts/interface/cv.py b/onboard/catkin_ws/src/task_planning/scripts/interface/cv.py index 0dffd0f20..947b5605b 100644 --- a/onboard/catkin_ws/src/task_planning/scripts/interface/cv.py +++ b/onboard/catkin_ws/src/task_planning/scripts/interface/cv.py @@ -1,27 +1,328 @@ import rospy import yaml +import numpy as np import resource_retriever as rr -from custom_msgs.msg import CVObject +from custom_msgs.msg import CVObject, RectInfo +from geometry_msgs.msg import Pose, Point +from std_msgs.msg import Float64 +from vision_msgs.msg import Detection2DArray +from utils.other_utils import singleton -class CVInterface: +@singleton +class CV: + """ + Interface for the CV. + + Attributes: + _instance: The singleton instance of this class. Is a static attribute. + cv_data: Dictionary of the data of the objects + """ + MODELS_PATH = "package://cv/models/depthai_models.yaml" CV_CAMERA = "front" - CV_MODEL = "yolov7_tiny_2023" + # TODO: We may want a better way to sync this between here and the cv node + CV_MODEL = "yolov7_tiny_2023_main" + + FRAME_WIDTH = 640 + FRAME_HEIGHT = 320 + + BIN_WIDTH = 0.3048 # Width of single bin in meters + BUOY_WIDTH = 0.2032 # Width of buoy in meters + GATE_IMAGE_WIDTH = 0.2452 # Width of gate images in meters + GATE_IMAGE_HEIGHT = 0.2921 # Height of gate images in meters - def __init__(self): + MONO_CAM_IMG_SHAPE = (640, 480) # Width, height in pixels + MONO_CAM_SENSOR_SIZE = (3.054, 1.718) # Width, height in mm + MONO_CAM_FOCAL_LENGTH = 2.65 # Focal length in mm + + def __init__(self, bypass: bool = False): self.cv_data = {} + self.bypass = bypass with open(rr.get_filename(self.MODELS_PATH, use_protocol=False)) as f: model = yaml.safe_load(f)[self.CV_MODEL] + for model_class in model['classes']: self.cv_data[model_class] = None topic = f"{model['topic']}{self.CV_CAMERA}/{model_class}" rospy.Subscriber(topic, CVObject, self._on_receive_cv_data, model_class) - def _on_receive_cv_data(self, cv_data, object_type): + rospy.Subscriber("/cv/bottom/lane_marker_angle", Float64, self._on_receive_lane_marker_angle) + self.lane_marker_angles = [] + + rospy.Subscriber("/cv/bottom/lane_marker_dist", Float64, self._on_receive_lane_marker_dist) + self.lane_marker_dists = [] + + rospy.Subscriber("/cv/bottom/lane_marker", RectInfo, self._on_receive_lane_marker_info) + self.lane_marker_heights = [] + + self.lane_marker_angle_publisher = rospy.Publisher("/task_planning/cv/bottom/lane_marker_angle", + Float64, queue_size=1) + + rospy.Subscriber("/cv/front_usb/buoy/bounding_box", CVObject, self._on_receive_cv_data, "buoy") + + rospy.Subscriber('/cv/front/gate_red_cw', CVObject, self._on_receive_cv_data, "gate_red_cw") + rospy.Subscriber('/cv/front/gate_whole', CVObject, self._on_receive_cv_data, "gate_whole") + + rospy.Subscriber("/cv/bottom/bin_blue/bounding_box", CVObject, self._on_receive_cv_data, "bin_blue") + rospy.Subscriber("/cv/bottom/bin_red/bounding_box", CVObject, self._on_receive_cv_data, "bin_red") + rospy.Subscriber("/cv/bottom/bin_center/bounding_box", CVObject, self._on_receive_cv_data, "bin_center") + + rospy.Subscriber("/cv/bottom/bin_blue/distance", Point, self._on_receive_distance_data, "bin_blue") + rospy.Subscriber("/cv/bottom/bin_red/distance", Point, self._on_receive_distance_data, "bin_red") + rospy.Subscriber("/cv/bottom/bin_center/distance", Point, self._on_receive_distance_data, "bin_center") + + self.bin_distances = {object_type: {"x": [], "y": []} for object_type in ["bin_red", "bin_blue"]} + + rospy.Subscriber("/cv/bottom/path_marker/bounding_box", CVObject, self._on_receive_cv_data, "path_marker") + rospy.Subscriber("/cv/bottom/path_marker/distance", Point, self._on_receive_distance_data, "path_marker") + + rospy.Subscriber("/cv/front/pink_bins/bounding_box", CVObject, self._on_receive_cv_data, "bin_pink_front") + rospy.Subscriber("/cv/bottom/pink_bins/bounding_box", CVObject, self._on_receive_cv_data, "bin_pink_bottom") + + def _on_receive_cv_data(self, cv_data: CVObject, object_type: str) -> None: + """ + Parse the received CV data and store it + + Args: + cv_data: The received CV data as a CVObject + object_type: The name/type of the object + """ self.cv_data[object_type] = cv_data - # TODO add useful methods for getting data - def get_data(self, name): - return self.cv_data[name] + def _on_receive_distance_data(self, distance_data: Point, object_type: str, filter_len=10) -> None: + """ + Parse the received distance data and store it + + Args: + distance_data: The received distance data as a Point + object_type: The name/type of the object + """ + # TODO: migrate all self.{object}_distances type objects into a single self.distances dictionary + # TODO: implement a generic moving average filter + # TODO: integrate _on_receive_lane_marker_dist + if object_type == "path_marker": + self.cv_data["path_marker_distance"] = distance_data + return + + if len(self.bin_distances[object_type]["x"]) == filter_len: + self.bin_distances[object_type]["x"].pop(0) + if len(self.bin_distances[object_type]["y"]) == filter_len: + self.bin_distances[object_type]["y"].pop(0) + + self.bin_distances[object_type]["x"].append(distance_data.x) + self.bin_distances[object_type]["y"].append(distance_data.y) + + if "bin_red_distance" not in self.cv_data: + self.cv_data["bin_red_distance"] = Point() + if "bin_blue_distance" not in self.cv_data: + self.cv_data["bin_blue_distance"] = Point() + + distance_x = sum(self.bin_distances[object_type]["x"]) / len(self.bin_distances[object_type]["x"]) + self.cv_data[f"{object_type}_distance"].x = distance_x + + distance_y = sum(self.bin_distances[object_type]["y"]) / len(self.bin_distances[object_type]["y"]) + self.cv_data[f"{object_type}_distance"].y = distance_y + + red_data = self.cv_data["bin_red_distance"] + blue_data = self.cv_data["bin_blue_distance"] + + if (red_data := self.cv_data["bin_red_distance"]) and (blue_data := self.cv_data["bin_blue_distance"]): + red_x, red_y = red_data.x, red_data.y + blue_x, blue_y = blue_data.x, blue_data.y + + center_red_x = self.FRAME_WIDTH / 2 - red_x + center_red_y = self.FRAME_HEIGHT / 2 - red_y + center_blue_x = self.FRAME_WIDTH / 2 - blue_x + center_blue_y = self.FRAME_HEIGHT / 2 - blue_y + + # TODO: do some mathy stuff to make this cleaner + angle = np.arctan2(center_red_y - center_blue_y, center_red_x - center_blue_x) + if angle > np.pi: + angle -= 2 * np.pi + elif angle < -np.pi: + angle += 2 * np.pi + + if angle > np.pi / 2: + angle -= np.pi + elif angle < -np.pi / 2: + angle += np.pi + + self.cv_data["bin_angle"] = angle + + def _on_receive_lane_marker_angle(self, angle: Float64) -> None: + """ + Parse the received angle of the blue rectangle and store it + + Args: + angle: The received angle of the blue rectangle in degrees + """ + filter_len = 10 + skip = 0 + if len(self.lane_marker_angles) == filter_len: + self.lane_marker_angles.pop(0) + + self.lane_marker_angles.append(angle.data) + + lane_marker_angle = sum(self.lane_marker_angles[skip:filter_len-skip]) / len(self.lane_marker_angles) + self.cv_data["lane_marker_angle"] = lane_marker_angle + self.lane_marker_angle_publisher.publish(self.cv_data["lane_marker_angle"]) + + # TODO: remove this and integrate into _on_receive_distance_data + def _on_receive_lane_marker_dist(self, dist: Float64) -> None: + """ + Parse the received dist of the blue rectangle and store it + + Args: + dist: The received dist of the blue rectangle in pixels + """ + filter_len = 10 + skip = 0 + if len(self.lane_marker_dists) == filter_len: + self.lane_marker_dists.pop(0) + + self.lane_marker_dists.append(dist.data) + + lane_marker_dist = sum(self.lane_marker_dists[skip:filter_len-skip]) / len(self.lane_marker_dists) + self.cv_data["lane_marker_dist"] = lane_marker_dist + + def _on_receive_lane_marker_info(self, lane_marker_info: RectInfo) -> None: + """ + Parse the received info of the blue rectangle and store it + + Args: + rect_info: The received info of the blue rectangle + """ + filter_len = 10 + skip = 0 + if len(self.lane_marker_heights) == filter_len: + self.lane_marker_heights.pop(0) + + self.lane_marker_heights.append(lane_marker_info.height) + + lane_marker_height = sum(self.lane_marker_heights[skip:filter_len-skip]) / len(self.lane_marker_heights) + self.cv_data["lane_marker_height"] = lane_marker_height + + # Based on lane_marker_info.center_y and height, determine if lane marker is touching top and/or bottom of frame + self.cv_data["lane_marker_touching_top"] = lane_marker_info.center_y - lane_marker_info.height / 2 <= 0 + self.cv_data["lane_marker_touching_bottom"] = lane_marker_info.center_y + lane_marker_info.height / 2 >= 480 + + def _on_receive_gate_red_cw_detection_depthai(self, msg: CVObject) -> None: + """ + Parse the received detection of the red gate and store it + + Args: + msg: The received detection of the red gate + """ + self.cv_data["gate_red_cw_properties"] = { + "x": msg.coords.x, + "y": msg.coords.y, + "z": msg.coords.z + } + + def _on_receive_gate_whole_detection_depthai(self, msg: CVObject) -> None: + """ + Parse the received detection of the whole gate and store it + + Args: + msg: The received detection of the whole gate + """ + self.cv_data["gate_whole_properties"] = { + "x": msg.coords.x, + "y": msg.coords.y, + "z": msg.coords.z, + "yaw": msg.coords.yaw, + "secs": msg.header.stamp.secs + } + + def _on_receive_gate_detection(self, msg: Detection2DArray) -> None: + for detection in msg.detections: + for result in detection.results: + if result.id == 0: # gate_blue_ccw + self.cv_data["gate_blue_ccw_bbox"] = detection.bbox + elif result.id == 1: # gate_red_cw + self.cv_data["gate_red_cw_bbox"] = detection.bbox + + highest_confidence_blue = -1 + highest_confidence_red = -1 + best_bbox_blue = None + best_bbox_red = None + + for detection in msg.detections: + for result in detection.results: + if result.id == 0 and result.score > highest_confidence_blue: # gate_blue_ccw + highest_confidence_blue = result.score + best_bbox_blue = detection.bbox + elif result.id == 1 and result.score > highest_confidence_red: # gate_red_cw + highest_confidence_red = result.score + best_bbox_red = detection.bbox + + if best_bbox_blue is not None: + self.cv_data["gate_blue_ccw_bbox"] = best_bbox_blue + self.compute_gate_properties("gate_blue_ccw") + if best_bbox_red is not None: + self.cv_data["gate_red_cw_bbox"] = best_bbox_red + self.compute_gate_properties("gate_red_cw") + + def compute_gate_properties(self, gate_class): + if gate_class + "_bbox" not in self.cv_data or self.cv_data[gate_class + "_bbox"] is None: + rospy.logwarn(f"No bounding box data available for {gate_class}") + return None + + bbox = self.cv_data[gate_class + "_bbox"] + + # Assuming bbox is of type vision_msgs/BoundingBox2D + bbox_width, bbox_height = bbox.size_x, bbox.size_y + + # Compute the meters per pixel (assuming GATE_IMAGE_WIDTH is the real width in meters) + meters_per_pixel = self.GATE_IMAGE_WIDTH / bbox_width + + # Use geometry_utils to compute center distances + # Here, image x is robot's y, and image y is robot's z + dist_x = bbox.center.x - self.MONO_CAM_IMG_SHAPE[0] / 2 + dist_y = bbox.center.y - self.MONO_CAM_IMG_SHAPE[1] / 2 + + # Compute distance between center of bounding box and center of image in meters + dist_x_meters = dist_x * meters_per_pixel * -1 + dist_y_meters = dist_y * meters_per_pixel * -1 + + dist_to_obj = self.mono_cam_dist_with_obj_width(bbox_width, self.GATE_IMAGE_WIDTH) + + self.cv_data[gate_class + "_properties"] = { + "bbox_width": bbox_width, + "bbox_height": bbox_height, + "meters_per_pixel": meters_per_pixel, + "x": dist_to_obj, + "y": dist_x_meters, + "z": dist_y_meters, + } + + def mono_cam_dist_with_obj_width(self, width_pixels, width_meters): + return (self.MONO_CAM_FOCAL_LENGTH * width_meters * self.MONO_CAM_IMG_SHAPE[0]) \ + / (width_pixels * self.MONO_CAM_SENSOR_SIZE[0]) + + def mono_cam_dist_with_obj_height(self, height_pixels, height_meters): + return (self.MONO_CAM_FOCAL_LENGTH * height_meters * self.MONO_CAM_IMG_SHAPE[1]) \ + / (height_pixels * self.MONO_CAM_SENSOR_SIZE[1]) + + def get_pose(self, name: str) -> Pose: + """ + Get the pose of a detected object + + Args: + name: The name/type of the object + + Returns: + The pose of the object + """ + data = self.cv_data[name] + pose = Pose() + pose.position.x = data.coords.x + pose.position.y = data.coords.y + pose.position.z = data.coords.z + pose.orientation.x = 0 + pose.orientation.y = 0 + pose.orientation.z = 0 + pose.orientation.w = 1 + return pose diff --git a/onboard/catkin_ws/src/task_planning/scripts/interface/state.py b/onboard/catkin_ws/src/task_planning/scripts/interface/state.py new file mode 100644 index 000000000..d3501987b --- /dev/null +++ b/onboard/catkin_ws/src/task_planning/scripts/interface/state.py @@ -0,0 +1,129 @@ +import rospy +from geometry_msgs.msg import PoseWithCovarianceStamped +from nav_msgs.msg import Odometry +from robot_localization.srv import SetPose +from sensor_msgs.msg import Imu +from tf2_ros.buffer import Buffer +from utils.other_utils import singleton + + +@singleton +class State: + """ + Interface for the state of the robot. + + Attributes: + _instance: The singleton instance of this class. Is a static attribute. + state: The current state of the robot. + tfBuffer: The transform buffer for the robot. + _reset_pose: The service proxy for resetting the pose + """ + + # ROS topics for the state and resetting the pose + STATE_TOPIC = 'state' + DEPTH_TOPIC = '/sensors/depth' + IMU_TOPIC = '/vectornav/IMU' + RESET_POSE_SERVICE = '/set_pose' + + def __init__(self, bypass: bool = False, tfBuffer: Buffer = None): + """ + Args: + tfBuffer: The transform buffer for the robot + """ + self.bypass = bypass + if tfBuffer: + self._tfBuffer = tfBuffer + + self.received_state = False + self.received_depth = False + self.received_imu = False + + rospy.Subscriber(self.STATE_TOPIC, Odometry, self._on_receive_state) + self._state = None + + if not bypass: + rospy.wait_for_service(self.RESET_POSE_SERVICE) + self._reset_pose = rospy.ServiceProxy(self.RESET_POSE_SERVICE, SetPose) + + rospy.Subscriber(self.DEPTH_TOPIC, PoseWithCovarianceStamped, self._on_receive_depth) + + rospy.Subscriber(self.IMU_TOPIC, Imu, self._on_receive_imu) + + @property + def state(self): + """ + The state + """ + return self._state + + @property + def orig_state(self): + """ + The first state message received + """ + return self._orig_state + + @property + def depth(self): + """ + The depth from the pressure sensor + """ + return self._depth + + @property + def orig_depth(self): + """ + The depth from the pressure sensor + """ + return self._orig_depth + + @property + def imu(self): + """ + The IMU data + """ + return self._imu + + @property + def orig_imu(self): + """ + The first IMU message received + """ + return self._orig_imu + + @property + def tfBuffer(self): + """ + The transform buffer + """ + return self._tfBuffer + + def _on_receive_state(self, state): + self._state = state + + if not self.received_state: + self._orig_state = state + self.received_state = True + + def _on_receive_depth(self, depth_msg): + self._depth = depth_msg.pose.pose.position.z + + if not self.received_depth: + self._orig_depth = depth_msg.pose.pose.position.z + self.received_depth = True + + def _on_receive_imu(self, imu_msg): + self._imu = imu_msg + + if not self.received_imu: + self._orig_imu = imu_msg + self.received_imu = True + + def reset_pose(self): + """ + Reset the pose + """ + poseCov = PoseWithCovarianceStamped() + poseCov.pose.pose.orientation.w = 1 + if not self.bypass: + self._reset_pose(poseCov) diff --git a/onboard/catkin_ws/src/task_planning/scripts/message_conversion/jsonpickle_custom_handlers.py b/onboard/catkin_ws/src/task_planning/scripts/message_conversion/jsonpickle_custom_handlers.py new file mode 100644 index 000000000..bd1911c69 --- /dev/null +++ b/onboard/catkin_ws/src/task_planning/scripts/message_conversion/jsonpickle_custom_handlers.py @@ -0,0 +1,53 @@ +import builtins +import jsonpickle +import traceback + +import genpy + + +from message_conversion.ros_message_converter import convert_ros_message_to_dictionary, \ + convert_dictionary_to_ros_message + + +class ROSMessageHandler(jsonpickle.handlers.BaseHandler): + """ + JSONPickle handler to convert ROS messages to and from dictionaries + """ + def flatten(self, obj, data): + data['ros/type'] = obj._type + data['ros/data'] = convert_ros_message_to_dictionary(obj) + return data + + def restore(self, obj): + message_type = obj['ros/type'] + dictionary = obj['ros/data'] + return convert_dictionary_to_ros_message(message_type, dictionary) + + +class BaseExceptionHandler(jsonpickle.handlers.BaseHandler): + """ + JSONPickle handler to convert exceptions to and from dictionaries + """ + def flatten(self, obj, data): + data['exception/type'] = type(obj).__name__ + data['exception/message'] = str(obj) + data['exception/traceback'] = traceback.format_tb(obj.__traceback__) + return data + + def restore(self, obj): + exc_type = obj['exception/type'] + exc_message = obj['exception/message'] + + # Reconstruct the exception object; does not include traceback + exc_class = getattr(builtins, exc_type) + exc_instance = exc_class(exc_message) + + return exc_instance + + +def register_custom_jsonpickle_handlers(): + """ + Register all custom JSONPickle handlers + """ + jsonpickle.handlers.register(genpy.Message, ROSMessageHandler, base=True) + jsonpickle.handlers.register(BaseException, BaseExceptionHandler, base=True) diff --git a/onboard/catkin_ws/src/task_planning/scripts/message_conversion/ros_message_converter.py b/onboard/catkin_ws/src/task_planning/scripts/message_conversion/ros_message_converter.py new file mode 100644 index 000000000..9bc7d234c --- /dev/null +++ b/onboard/catkin_ws/src/task_planning/scripts/message_conversion/ros_message_converter.py @@ -0,0 +1,405 @@ +# -*- coding: utf-8 -*- +# +# Obtained on March 5, 2024, from master branch of: https://github.com/DFKI-NI/rospy_message_converter +# Any edits made to the original are marked with "EDIT" comments. +# +# Software License Agreement (BSD License) +# +# Copyright (c) 2019-2022, Martin Günther (DFKI GmbH) and others +# Copyright (c) 2013-2016, Brandon Alexander +# +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions +# are met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following +# disclaimer in the documentation and/or other materials provided +# with the distribution. +# * Neither the name of this project nor the names of its +# contributors may be used to endorse or promote products derived +# from this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS +# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE +# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, +# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, +# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT +# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN +# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +# POSSIBILITY OF SUCH DAMAGE. + +import logging +import roslib.message +import rospy +import base64 +import sys +import copy +import collections + +python3 = sys.hexversion > 0x03000000 + +python_list_types = [list, tuple] + +if python3: + python_string_types = [str, bytes] + python_int_types = [int] +else: + pass + # EDIT: Code below is commented out and replaced with `pass` above because it results in linter warnings as + # `unicode` and `long` are not defined in Python 3 and ROS Noetic always uses Python 3. + + # python_string_types = [str, unicode] # noqa + # python_int_types = [int, long] # noqa + +python_float_types = [float] + +ros_to_python_type_map = { + 'bool': [bool], + 'float32': copy.deepcopy(python_float_types + python_int_types), + 'float64': copy.deepcopy(python_float_types + python_int_types), + 'int8': copy.deepcopy(python_int_types), + 'int16': copy.deepcopy(python_int_types), + 'int32': copy.deepcopy(python_int_types), + 'int64': copy.deepcopy(python_int_types), + 'uint8': copy.deepcopy(python_int_types), + 'uint16': copy.deepcopy(python_int_types), + 'uint32': copy.deepcopy(python_int_types), + 'uint64': copy.deepcopy(python_int_types), + 'byte': copy.deepcopy(python_int_types), + 'char': copy.deepcopy(python_int_types), + 'string': copy.deepcopy(python_string_types), +} + +try: + import numpy as np + + _ros_to_numpy_type_map = { + 'float32': [np.float32, np.int8, np.int16, np.uint8, np.uint16], + # don't include int32, because conversion to float may change value: + # v = np.iinfo(np.int32).max; np.float32(v) != v + 'float64': [np.float32, np.float64, np.int8, np.int16, np.int32, np.uint8, np.uint16, np.uint32], + 'int8': [np.int8], + 'int16': [np.int8, np.int16, np.uint8], + 'int32': [np.int8, np.int16, np.int32, np.uint8, np.uint16], + 'int64': [np.int8, np.int16, np.int32, np.int64, np.uint8, np.uint16, np.uint32], + 'uint8': [np.uint8], + 'uint16': [np.uint8, np.uint16], + 'uint32': [np.uint8, np.uint16, np.uint32], + 'uint64': [np.uint8, np.uint16, np.uint32, np.uint64], + 'byte': [np.int8], + 'char': [np.uint8], + } + + # merge type_maps + merged = collections.defaultdict(list, ros_to_python_type_map) + for k, v in _ros_to_numpy_type_map.items(): + merged[k].extend(v) + ros_to_python_type_map = dict(merged) +except ImportError: + pass + + +ros_time_types = ['time', 'duration'] +ros_primitive_types = [ + 'bool', + 'byte', + 'char', + 'int8', + 'uint8', + 'int16', + 'uint16', + 'int32', + 'uint32', + 'int64', + 'uint64', + 'float32', + 'float64', + 'string', +] +ros_header_types = ['Header', 'std_msgs/Header', 'roslib/Header'] + + +def convert_dictionary_to_ros_message( + message_type, + dictionary, + kind='message', + strict_mode=True, + check_missing_fields=False, + check_types=True, + log_level='error', +): + """ + Takes in the message type and a Python dictionary and returns a ROS message. + + Example: + >>> msg_type = "std_msgs/String" + >>> dict_msg = { "data": "Hello, Robot" } + >>> convert_dictionary_to_ros_message(msg_type, dict_msg) + data: "Hello, Robot" + + >>> msg_type = "std_srvs/SetBool" + >>> dict_msg = { "data": True } + >>> kind = "request" + >>> convert_dictionary_to_ros_message(msg_type, dict_msg, kind) + data: True + """ + if kind == 'message': + message_class = roslib.message.get_message_class(message_type) + message = message_class() + elif kind == 'request': + service_class = roslib.message.get_service_class(message_type) + message = service_class._request_class() + elif kind == 'response': + service_class = roslib.message.get_service_class(message_type) + message = service_class._response_class() + else: + raise ValueError('Unknown kind "%s".' % kind) + message_fields = dict(_get_message_fields(message)) + + remaining_message_fields = copy.deepcopy(message_fields) + + if dictionary is None: + dictionary = {} + for field_name, field_value in dictionary.items(): + if field_name in message_fields: + field_type = message_fields[field_name] + if field_value is not None: + field_value = _convert_to_ros_type( + field_name, field_type, field_value, strict_mode, check_missing_fields, check_types, log_level + ) + setattr(message, field_name, field_value) + del remaining_message_fields[field_name] + else: + error_message = 'ROS message type "{0}" has no field named "{1}"'.format(message_type, field_name) + if strict_mode: + raise ValueError(error_message) + else: + if log_level not in ["debug", "info", "warning", "error", "critical"]: + log_level = "error" + logger = logging.getLogger('rosout') + log_func = getattr(logger, log_level) + + log_func('{}! It will be ignored.'.format(error_message)) + + if check_missing_fields and remaining_message_fields: + error_message = 'Missing fields "{0}"'.format(remaining_message_fields) + raise ValueError(error_message) + + return message + + +def _convert_to_ros_type( + field_name, + field_type, + field_value, + strict_mode=True, + check_missing_fields=False, + check_types=True, + log_level='error', +): + if _is_ros_binary_type(field_type): + field_value = _convert_to_ros_binary(field_type, field_value) + elif field_type in ros_time_types: + field_value = _convert_to_ros_time(field_type, field_value) + elif field_type in ros_primitive_types: + # Note: one could also use genpy.message.check_type() here, but: + # 1. check_type is "not designed to run fast and is meant only for error diagnosis" + # 2. it doesn't check floats (see ros/genpy#130) + # 3. it rejects numpy types, although they can be serialized + if check_types and type(field_value) not in ros_to_python_type_map[field_type]: + raise TypeError( + "Field '{0}' has wrong type {1} (valid types: {2})".format( + field_name, type(field_value), ros_to_python_type_map[field_type] + ) + ) + field_value = _convert_to_ros_primitive(field_type, field_value) + elif _is_field_type_a_primitive_array(field_type): + field_value = field_value + elif _is_field_type_an_array(field_type): + field_value = _convert_to_ros_array( + field_name, field_type, field_value, strict_mode, check_missing_fields, check_types, log_level + ) + else: + field_value = convert_dictionary_to_ros_message( + field_type, + field_value, + strict_mode=strict_mode, + check_missing_fields=check_missing_fields, + check_types=check_types, + log_level=log_level, + ) + return field_value + + +def _convert_to_ros_binary(field_type, field_value): + if type(field_value) in python_string_types: + if python3: + # base64 in python3 added the `validate` arg: + # If field_value is not properly base64 encoded and there are non-base64-alphabet characters in the input, + # a binascii.Error will be raised. + binary_value_as_string = base64.b64decode(field_value, validate=True) + else: + # base64 in python2 doesn't have the `validate` arg: characters that are not in the base-64 alphabet are + # silently discarded, resulting in garbage output. + binary_value_as_string = base64.b64decode(field_value) + else: + binary_value_as_string = bytes(bytearray(field_value)) + return binary_value_as_string + + +def _convert_to_ros_time(field_type, field_value): + time = None + + if field_type == 'time' and field_value == 'now': + time = rospy.get_rostime() + else: + if field_type == 'time': + time = rospy.rostime.Time() + elif field_type == 'duration': + time = rospy.rostime.Duration() + if 'secs' in field_value and field_value['secs'] is not None: + setattr(time, 'secs', field_value['secs']) + if 'nsecs' in field_value and field_value['nsecs'] is not None: + setattr(time, 'nsecs', field_value['nsecs']) + + return time + + +def _convert_to_ros_primitive(field_type, field_value): + # std_msgs/msg/_String.py always calls encode() on python3, so don't do it here + if field_type == "string" and not python3: + field_value = field_value.encode('utf-8') + return field_value + + +def _convert_to_ros_array( + field_name, + field_type, + list_value, + strict_mode=True, + check_missing_fields=False, + check_types=True, + log_level='error', +): + # use index to raise ValueError if '[' not present + list_type = field_type[: field_type.index('[')] + return [ + _convert_to_ros_type(field_name, list_type, value, strict_mode, check_missing_fields, check_types, log_level) + for value in list_value + ] + + +def convert_ros_message_to_dictionary(message, binary_array_as_bytes=True): + """ + Takes in a ROS message and returns a Python dictionary. + + Example: + >>> import std_msgs.msg + >>> ros_message = std_msgs.msg.UInt32(data=42) + >>> convert_ros_message_to_dictionary(ros_message) + {'data': 42} + """ + dictionary = {} + message_fields = _get_message_fields(message) + for field_name, field_type in message_fields: + field_value = getattr(message, field_name) + dictionary[field_name] = _convert_from_ros_type(field_type, field_value, binary_array_as_bytes) + + return dictionary + + +def _convert_from_ros_type(field_type, field_value, binary_array_as_bytes=True): + if field_type in ros_primitive_types: + field_value = _convert_from_ros_primitive(field_type, field_value) + elif field_type in ros_time_types: + field_value = _convert_from_ros_time(field_type, field_value) + elif _is_ros_binary_type(field_type): + if binary_array_as_bytes: + field_value = _convert_from_ros_binary(field_type, field_value) + # EDIT: If condition below was modifed to use `isinstance` instead of `type` to avoid linter warning. + elif isinstance(field_value, str): + field_value = [ord(v) for v in field_value] + else: + field_value = list(field_value) + elif _is_field_type_a_primitive_array(field_type): + field_value = list(field_value) + elif _is_field_type_an_array(field_type): + field_value = _convert_from_ros_array(field_type, field_value, binary_array_as_bytes) + else: + field_value = convert_ros_message_to_dictionary(field_value, binary_array_as_bytes) + + return field_value + + +def _is_ros_binary_type(field_type): + """Checks if the field is a binary array one, fixed size or not + + >>> _is_ros_binary_type("uint8") + False + >>> _is_ros_binary_type("uint8[]") + True + >>> _is_ros_binary_type("uint8[3]") + True + >>> _is_ros_binary_type("char") + False + >>> _is_ros_binary_type("char[]") + True + >>> _is_ros_binary_type("char[3]") + True + """ + return field_type.startswith('uint8[') or field_type.startswith('char[') + + +def _convert_from_ros_binary(field_type, field_value): + field_value = base64.b64encode(field_value).decode('utf-8') + return field_value + + +def _convert_from_ros_time(field_type, field_value): + field_value = {'secs': field_value.secs, 'nsecs': field_value.nsecs} + return field_value + + +def _convert_from_ros_primitive(field_type, field_value): + # std_msgs/msg/_String.py always calls decode() on python3, so don't do it here + if field_type == "string" and not python3: + field_value = field_value.decode('utf-8') + return field_value + + +def _convert_from_ros_array(field_type, field_value, binary_array_as_bytes=True): + # use index to raise ValueError if '[' not present + list_type = field_type[: field_type.index('[')] + return [_convert_from_ros_type(list_type, value, binary_array_as_bytes) for value in field_value] + + +def _get_message_fields(message): + return zip(message.__slots__, message._slot_types) + + +def _is_field_type_an_array(field_type): + return field_type.find('[') >= 0 + + +def _is_field_type_a_primitive_array(field_type): + bracket_index = field_type.find('[') + if bracket_index < 0: + return False + else: + list_type = field_type[:bracket_index] + return list_type in ros_primitive_types + + +if __name__ == "__main__": + import doctest + + doctest.testmod() diff --git a/onboard/catkin_ws/src/task_planning/scripts/move_tasks.py b/onboard/catkin_ws/src/task_planning/scripts/move_tasks.py index 5cfa812f5..8c2e56338 100644 --- a/onboard/catkin_ws/src/task_planning/scripts/move_tasks.py +++ b/onboard/catkin_ws/src/task_planning/scripts/move_tasks.py @@ -1,249 +1,177 @@ -from geometry_msgs.msg import Pose, Quaternion, Twist, Point, Vector3 -from tf.transformations import quaternion_from_euler -import task_utils -import smach +from typing import Optional, List, Tuple, Union +import copy + +from transforms3d.euler import quat2euler, euler2quat + import rospy +from geometry_msgs.msg import Pose, Twist + +from interface.controls import Controls +from interface.state import State +from task import task, Yield, Task +from utils import geometry_utils, coroutine_utils + + +@task +async def move_to_pose_global(self: Task, pose: Pose, timeout: int = 30) -> Task[None, Optional[Pose], None]: + """ + Move to a global pose in the "odom" frame. Returns when the robot is at the given pose with zero velocity, within + a small tolerance. + + Args: + pose: Global pose to move to + + Send: + New global pose to move to + """ + Controls().start_new_move() + Controls().publish_desired_position(pose) + start_time = rospy.Time.now() + while not geometry_utils.stopped_at_pose(State().state.pose.pose, pose, State().state.twist.twist): + # Allow users of this task to update the pose + new_pose = await Yield() + if new_pose is not None: + pose = new_pose + + Controls().publish_desired_position(pose) + + # Check if the timeout has been reached + if (rospy.Time.now() - start_time).to_sec() > timeout: + rospy.logwarn("Move to pose timed out") + return None + + +@task +async def move_to_pose_local(self: Task, pose: Pose, keep_level=False, + timeout: int = 30) -> Task[None, Optional[Pose], None]: + """ + Move to local pose in the "base_link" frame. Returns when the robot is at the given pose with zero velocity, within + a small tolerance. + + Args: + pose: Local pose to move to + + Send: + New local pose to move to + """ + global_pose = geometry_utils.local_pose_to_global(State().tfBuffer, pose) + + if keep_level: + orig_euler_angles = quat2euler(geometry_utils.geometry_quat_to_transforms3d_quat( + State().orig_state.pose.pose.orientation)) + euler_angles = quat2euler(geometry_utils.geometry_quat_to_transforms3d_quat(global_pose.orientation)) + global_pose.orientation = geometry_utils.transforms3d_quat_to_geometry_quat( + euler2quat(orig_euler_angles[0], orig_euler_angles[1], euler_angles[2])) + + return await coroutine_utils.transform( + move_to_pose_global(global_pose, timeout=timeout, parent=self), + send_transformer=lambda p: geometry_utils.local_pose_to_global(State().tfBuffer, p) if p else p) + + +@task +async def move_with_velocity(self: Task, twist: Twist) -> Task[None, Optional[Twist], None]: + """ + Move with a given velocity. Returns when the robot is moving with the given velocity. + + Args: + twist: Desired velocity + + Send: + New desired velocity to move with + """ + Controls().start_new_move() + Controls().publish_desired_velocity(twist) + while not geometry_utils.at_vel(State().state.twist.twist, twist): + new_twist = await Yield() + if new_twist is not None: + twist = new_twist + + Controls().publish_desired_velocity(twist) + + +@task +async def move_with_power_for_seconds(self: Task, power: Twist, seconds: float) -> Task[None, Optional[Twist], None]: + """ + Move with a given power for a given number of seconds. Returns when the time has elapsed. + + Args: + power: Desired power + seconds: Number of seconds to move with the given power + + Send: + New desired power to move with + """ + Controls().publish_desired_power(power) + endtime = rospy.time.now() + seconds + while (rospy.time.now() < endtime): + new_power = await Yield() + if new_power is not None: + power = new_power + + Controls().publish_desired_power(power) + + +@task +async def hold_position(self: Task) -> Task[bool, None, None]: + """ + Hold the position and orientation the robot is at when this task is first run. Does not return. + + Yields: + If the robot is at the pose it should be holding with zero velocity, within a small tolerance + """ + pose_to_hold = copy.deepcopy(State().state.pose.pose) + while True: + await Yield(geometry_utils.stopped_at_pose(State().state.pose.pose, pose_to_hold, State().state.twist.twist)) + Controls().publish_desired_position(pose_to_hold) + + +@task +async def depth_correction(self: Task, desired_depth: float) -> Task[None, None, None]: + rospy.loginfo(f"State().depth: {State().depth}") + depth_delta = desired_depth - State().depth + rospy.loginfo(f"depth_delta: {depth_delta}") + + rospy.loginfo(f"Started depth correction {depth_delta}") + await move_to_pose_local( + geometry_utils.create_pose(0, 0, depth_delta, 0, 0, 0), + parent=self) + rospy.loginfo(f"Finished depth correction {depth_delta}") + + +@task +async def correct_depth(self: Task, desired_depth: float): + await depth_correction(desired_depth, parent=self) + + +@task +async def move_x(self: Task, step=1): + await move_to_pose_local(geometry_utils.create_pose(step, 0, 0, 0, 0, 0), parent=self) + rospy.loginfo(f"Moved x {step}") + + +@task +async def move_y(self: Task, step=1): + await move_to_pose_local(geometry_utils.create_pose(0, step, 0, 0, 0, 0), parent=self) + rospy.loginfo(f"Moved y {step}") + + +Direction = Union[Tuple[float, float, float], Tuple[float, float, float, float, float, float]] +Directions = List[Direction] + + +@task +async def move_with_directions(self: Task, directions: Directions, correct_yaw=False, correct_depth=False): + + for direction in directions: + assert len(direction) in [3, 6], "Each tuple in the directions list must be of length 3 or 6. Tuple " + f"{direction} has length {len(direction)}." + await move_to_pose_local( + geometry_utils.create_pose(direction[0], direction[1], direction[2], 0, 0, 0), + parent=self) + rospy.loginfo(f"Moved to {direction}") -class MoveToPoseGlobalTask(smach.State): - """Move to pose given in global coordinates.""" - - def __init__(self, x, y, z, roll, pitch, yaw, controls, input_keys=[]): - """ - Parameters: - x (float): x-component of position - y (float): y-component of position - z (float): z-component of position - roll (float): roll-component of orientation - pitch (float): pitch-component of orientation - yaw (float): yaw-component of orientation - controls (interface.ControlsInterface): interface to interact with controls - """ - super(MoveToPoseGlobalTask, self).__init__(outcomes=['done', 'continue'], input_keys=input_keys) - - self.controls = controls - self.last_pose = None - self.desired_pose = Pose() - self.desired_pose.position = Point(x=x, y=y, z=z) - self.desired_pose.orientation = Quaternion( - * - quaternion_from_euler( - roll, - pitch, - yaw)) - - def execute(self, _): - new_pose = self._get_pose() - # Only resend the movement goal if our desired pose has changed - if self.last_pose is None or not task_utils.at_pose(self.last_pose, new_pose, 0.0001, 0.0001): - self.last_pose = new_pose - self.controls.move_to_pose_global(new_pose) - - def run(self, userdata): - print("moving to ", self.desired_pose) - rate = rospy.Rate(15) - while not ( - self.state and task_utils.stopped_at_pose( - self.state.pose.pose, - self.getPose(), - self.state.twist.twist)): - self.publish_desired_pose_global(self.getPose()) - rate.sleep() - return "done" - - def getPose(self): - return self.desired_pose - - -class MoveToUserDataPoseGlobalTask(MoveToPoseGlobalTask): - """Move to pose passed through userdata given in global coordinates.""" - - def __init__(self, controls): - """ - Parameters: - controls (interface.ControlsInterface): interface to interact with controls - """ - super(MoveToUserDataPoseGlobalTask, self).__init__(0, 0, 0, 0, 0, 0, controls, input_keys=['pose']) - - def execute(self, userdata): - # Get pose from userdata - self.desired_pose = userdata.pose - - return super(MoveToUserDataPoseGlobalTask, self).execute(userdata) - - -class MoveToPoseLocalTask(MoveToPoseGlobalTask): - """Move to pose given in local coordinates.""" - - def __init__(self, x, y, z, roll, pitch, yaw, controls, listener, input_keys=[]): - """ - Move to pose given in local coordinates. - - Parameters: - x (float): x-component of position - y (float): y-component of position - z (float): z-component of position - roll (float): roll-component of orientation - pitch (float): pitch-component of orientation - yaw (float): yaw-component of orientation - controls (interface.ControlsInterface): interface to interact with controls - listener (tf.TransformListener): transform listener to go from local to global - """ - super(MoveToPoseLocalTask, self).__init__(x, y, z, roll, pitch, yaw, controls, input_keys=input_keys) - self.listener = listener - self.first_pose = True - - def _get_pose(self): - if self.first_pose: - self.local_pose = task_utils.transform_pose(self.listener, 'base_link', 'odom', self.desired_pose) - self.first_pose = False - return self.local_pose - - -class MoveToUserDataPoseLocalTask(MoveToPoseLocalTask): - """Move to pose passed through userdata given in local coordinates.""" - - def __init__(self, controls, listener): - """ - Move to pose passed through userdata given in local coordinates. - - Parameters: - controls (interface.ControlsInterface): interface to interact with controls - listener (tf.TransformListener): transform listener to go from local to global - """ - super(MoveToUserDataPoseLocalTask, self).__init__(0, 0, 0, 0, 0, 0, controls, listener, input_keys=['pose']) - - def execute(self, userdata): - # Get pose from userdata - self.desired_pose = userdata.pose - - return super(MoveToUserDataPoseLocalTask, self).execute(userdata) - - -class AllocateVelocityLocalTask(smach.State): - """Allocate specified velocity in a direction relative to the robot""" - - def __init__(self, x, y, z, roll, pitch, yaw, controls, input_keys=[]): - """ - Allocate specified velocity in a direction relative to the robot - - Parameters: - x (float): x-component of linear velocity - y (float): y-component of linear velocity - z (float): z-component of linear velocity - roll (float): roll-component of angular velocity - pitch (float): pitch-component of angular velocity - yaw (float): yaw-component of angular velocity - controls (interface.ControlsInterface): interface to interact with controls - """ - super(AllocateVelocityLocalTask, self).__init__(outcomes=['done'], input_keys=input_keys) - self.controls = controls - linear = Vector3(x, y, z) - angular = Vector3(roll, pitch, yaw) - self.desired_twist = Twist(linear=linear, angular=angular) - self.last_twist = None - - def execute(self, _): - new_twist = self._get_twist() - - # Only resend the movement goal if our desired pose has changed - if self.last_twist is None or not task_utils.at_vel(self.last_twist, new_twist, 0.0001, 0.0001): - self.last_twist = new_twist - self.controls.move_with_velocity(new_twist) - - return 'done' - - def _get_twist(self): - return self.desired_twist - - -class AllocateUserDataVelocityLocalTask(AllocateVelocityLocalTask): - """Allocate specified velocity passed through userdata in a direction relative to the robot""" - - def __init__(self, controls): - """ - Parameters: - controls (interface.ControlsInterface): interface to interact with controls - """ - super(AllocateUserDataVelocityLocalTask, self).__init__(0, 0, 0, 0, 0, 0, controls, input_keys=['twist']) - self.first_pose = True - - def execute(self, userdata): - # Get pose from userdata if supplied - self.desired_twist = userdata.twist - - def run(self, userdata): - # rospy.loginfo("publishing desired twist...") - rate = rospy.Rate(15) - while True: - if self.preempt_requested(): - self.service_preempt() - return 'preempted' - self.publish_desired_twist(self.desired_twist) - rate.sleep() - - -class AllocateVelocityGlobalTask(AllocateVelocityLocalTask): - """Allocate specified velocity in a direction relative to the global space""" - - def __init__(self, x, y, z, roll, pitch, yaw, controls, listener, input_keys=[]): - """ - Parameters: - x (float): x-component of linear velocity - y (float): y-component of linear velocity - z (float): z-component of linear velocity - roll (float): roll-component of angular velocity - pitch (float): pitch-component of angular velocity - yaw (float): yaw-component of angular velocity - controls (interface.ControlsInterface): interface to interact with controls - listener (tf.TransformListener): transform listener to go from global to local - """ - super(AllocateVelocityGlobalTask, self).__init__(x, y, z, roll, pitch, yaw, controls, input_keys=input_keys) - self.listener = listener - self.first_twist = True - - def _get_twist(self): - if self.first_twist: - self.global_twist = task_utils.transform_pose(self.listener, 'odom', 'base_link', self.desired_twist) - self.first_twist = False - return self.global_twist - - -class AllocateUserDataVelocityGlobalTask(AllocateVelocityGlobalTask): - """Allocate specified velocity passed through userdata in a direction relative to the global space""" - - def __init__(self, controls): - """ - Parameters: - controls (interface.ControlsInterface): interface to interact with controls - """ - super(AllocateUserDataVelocityGlobalTask, self).__init__(0, 0, 0, 0, 0, 0, controls, input_keys=['twist']) - - def execute(self, userdata): - # Get pose from userdata if supplied - self.desired_twist = userdata.twist - - return super(AllocateUserDataVelocityGlobalTask, self).execute(userdata) - - -class HoldPositionTask(smach.State): - """Hold position at the place the robot is at the first time this runs""" - - def __init__(self, controls): - """ - Hold position at the place the robot is at the first time this runs - - Parameters: - controls (interface.ControlsInterface): interface to interact with controls - """ - super(HoldPositionTask, self).__init__(outcomes=['done']) - self.controls = controls - self.first_pose = True - - def execute(self, userdata): - if self.first_pose: - self.hold_pose = self.controls.get_state().pose.pose - self.controls.move_to_pose_global(self.hold_pose) - self.first_pose = False - - return 'done' + if correct_yaw: + await self.parent.correct_yaw() + if correct_depth: + await self.parent.correct_depth() diff --git a/onboard/catkin_ws/src/task_planning/scripts/old/buoy_task.py b/onboard/catkin_ws/src/task_planning/scripts/old/buoy_task.py deleted file mode 100644 index d5b231cb7..000000000 --- a/onboard/catkin_ws/src/task_planning/scripts/old/buoy_task.py +++ /dev/null @@ -1,157 +0,0 @@ -#!/usr/bin/env python - -from statistics import mean -from task_utils import cv_object_position, object_vector -from numpy import array_equal -import smach -import rospy -from task import Task -from move_tasks import MoveToPoseLocalTask -from tf import TransformListener -from time import sleep - - -SIDE_THRESHOLD = 0.1 # means buoy post is within 1 tenth of the side of the frame -CENTERED_THRESHOLD = 0.1 # means buoy will be considered centered if within 1 tenth of the center of the frame -buoy_TICK_CV_NAME = "bin" # change back to buoy_tick - -""" -Plan for task restructure: - -INPUT FROM CMD copper or bootlegger, direction to rotate, time to wait and moving average - -1) rotate ~0.25 radians -2) check if image is in camera frame, maybe see if in center - a) if not in frame, go to 1 - b) if in frame, proceed -3) moving average for some time to find location to move to -4) move to position ~0.5 below and ~1 past image -5) done -""" - - -def main(): - rospy.init_node('buoy_task') - - sm = create_buoy_task_sm() - sleep(2) - # Execute SMACH plan - sm.execute() - -# Rotate direction is +1 or -1 depending on how we should rotate - - -def create_buoy_task_sm(rotate_direction, image_name): - sm = smach.StateMachine(outcomes=['succeeded', 'failed']) - listener = TransformListener() - buoy_euler_position = [0, 0, 0, 0, 0, 0] - with sm: - smach.StateMachine.add('SURVEY_BUOY_1', SurveyBuoyImage(image_name, 1, 0.5, buoy_euler_position), - transitions={ - 'detected': 'MOVE_TO_BUOY_1', - 'undetected': 'ROTATE_TO_BUOY' - }) - - smach.StateMachine.add('ROTATE_TO_BUOY', MoveToPoseLocalTask(0, 0, 0, 0, 0, 0.25 * rotate_direction, listener), - transitions={ - 'done': 'SURVEY_BUOY_1' - }) - - smach.StateMachine.add('MOVE_TO_BUOY_1', MoveToPoseLocalTask(*buoy_euler_position, listener), - transitions={ - 'done': 'SURVEY_BUOY_2' - }) - - smach.StateMachine.add('SURVEY_BUOY_2', SurveyBuoyImage(image_name, 1, 0.5, buoy_euler_position), - transitions={ - 'detected': 'MOVE_TO_BUOY_2', - 'undetected': 'failed' - }) - - smach.StateMachine.add('MOVE_TO_BUOY_2', MoveToPoseLocalTask(*buoy_euler_position, listener), - transitions={ - 'done': 'SURVEY_BUOY_3' - }) - - smach.StateMachine.add('SURVEY_BUOY_3', SurveyBuoyImage(image_name, 1, 1, buoy_euler_position), - transitions={ - 'detected': 'MOVE_TO_BUOY_3', - 'undetected': 'failed' - }) - - smach.StateMachine.add('MOVE_TO_BUOY_3', MoveToPoseLocalTask(*buoy_euler_position, listener), - transitions={ - 'done': 'MOVE_AWAY_FROM_BUOY' - }) - - smach.StateMachine.add('MOVE_AWAY_FROM_BUOY', MoveToPoseLocalTask(0, 5, 0, 0, 0, 0, listener), - transitions={ - 'done': 'succeeded' - }) - - return sm - - -class SurveyBuoyImage(Task): - def __init__(self, object_name, time, ratio, global_object_position): - super(SurveyBuoyTask, self).__init__(["undetected", "detected"]) - self.object_name = object_name - self.time = time - self.ratio = ratio - self.global_object_position = global_object_position - - def run(self): - millis = 200 # for 5 times per second - rate = rospy.Rate(millis) - total = 0 - - x_offset = 0 - z_offset = 0 - - last_cv_object_position = cv_object_position(self.cv_data[self.image_name]) - while total < self.time * 1000: - cur_cv_object_position = cv_object_position(self.cv_data[self.image_name]) - if cur_cv_object_position is not None and not array_equal(cur_cv_object_position, last_cv_object_position): - self.global_object_position = [(cur_cv_object_position[0] + x_offset) * self.ratio, - (cur_cv_object_position[1]) * self.ratio, - (cur_cv_object_position[2] + z_offset) * self.ratio, - 0, - 0, - 0, ] - return "detected" - last_cv_object_position = cur_cv_object_position - total += millis - rate.sleep() - return "undetected" - - -class SurveyBuoyTask(Task): - def __init__(self, object_name, time, ratio, output_euler_position): - super(SurveyBuoyTask, self).__init__(["done"]) - self.object_name = object_name - self.time = time - self.ratio = ratio - self.output_euler_position = output_euler_position - - def run(self, userdata): - millis = 10 - rate = rospy.Rate(millis) - total = 0 - captured_vectors = [] - - while total < self.time * 1000: - buoy_vector = object_vector(self.cv_data[self.object_name]) - if buoy_vector is not None: - captured_vectors.append(buoy_vector) - total += millis - rate.sleep() - - avg_x = mean([v[0] for v in captured_vectors]) * self.ratio - avg_y = mean([v[1] for v in captured_vectors]) * self.ratio - avg_z = mean([v[2] for v in captured_vectors]) * self.ratio - self.output_euler_position = [avg_x, avg_y, avg_z, 0, 0, 0] - return "done" - - -if __name__ == '__main__': - main() diff --git a/onboard/catkin_ws/src/task_planning/scripts/old/cv_tasks.py b/onboard/catkin_ws/src/task_planning/scripts/old/cv_tasks.py deleted file mode 100644 index 49f851b83..000000000 --- a/onboard/catkin_ws/src/task_planning/scripts/old/cv_tasks.py +++ /dev/null @@ -1,163 +0,0 @@ -from task import Task -from move_tasks import AllocateVelocityLocalTask -import rospy - - -class MoveOneCVPointToAnotherTask(Task): - """Move the robot such that a given point in a CV frame is moved to the specified location in the frame. - method is either "rotate" or "strafe" - camera is either "front" or "down" - coordinates are scaled from 0.0 to 1.0 (e.g. x_curr=0.0 is the left side of the frame), and so is the tolerance - """ - - def __init__(self, cv_obstacle, x_target, y_target, method="strafe", camera="front", tolerance=0.1): - super(MoveOneCVPointToAnotherTask, self).__init__() - - self.cv_obstacle = cv_obstacle - self.x_target = x_target - self.y_target = y_target - self.method = method - self.camera = camera - self.tolerance = tolerance - self.linear_constant = 0.5 - self.rotation_constant = 0.2 - - def _on_task_start(self): - pass - - def _on_task_run(self): # better to use pose once we have depth info - o = self.cv_data[self.cv_obstacle] - if o.label == "none": - # Obstacle not currently in view, or at least not detected by CV - rospy.loginfo("Bounding box not in view! Stopping...") - self.finish() - - x_curr = o.xmin + (o.xmax - o.xmin) / 2 # center of object - y_curr = o.ymin + (o.ymax - o.ymin) / 2 - x_diff = self.x_target - x_curr # difference to center of object - y_diff = self.y_target - y_curr - - if abs(x_diff) <= self.tolerance and abs(y_diff) <= self.tolerance: - if self.vel_task: - self.vel_task.finish() - self.finish() - return - - x_linear_vel = self.linear_constant * x_diff - y_linear_vel = self.linear_constant * y_diff - x_angular_vel = self.rotation_constant * x_diff - y_angular_vel = self.rotation_constant * y_diff - - if self.vel_task: - self.vel_task.finish() - - if self.method == "rotate" and self.camera == "front": - self.vel_task = AllocateVelocityLocalTask(0, 0, 0, 0, -y_angular_vel, x_angular_vel) - elif self.method == "rotate" and self.camera == "down": - self.vel_task = AllocateVelocityLocalTask(0, 0, 0, x_angular_vel, -y_angular_vel, 0) - elif self.method == "strafe" and self.camera == "front": - self.vel_task = AllocateVelocityLocalTask(0, x_linear_vel, y_linear_vel, 0, 0, 0) - elif self.method == "strafe" and self.camera == "down": - self.vel_task = AllocateVelocityLocalTask(y_linear_vel, x_linear_vel, 0, 0, 0, 0) - - if self.vel_task: - self.vel_task.run() - - -class MoveOneCVBoxToAnotherTask(Task): - """Move the robot so a given bounding box in a CV frame is moved to the specified size and location in the frame. - method is either "rotate" or "strafe" - camera is either "front" or "down" - coordinates are scaled from 0.0 to 1.0 (e.g. x_curr=0.0 is the left side of the frame), and so is the tolerance - the center of the box will be aligned to x_target and y_target, and will match at least one of either - w_target or h_target in dimension - """ - - def __init__( - self, - cv_obstacle, - x_target, - y_target, - w_target, - h_target, - method="strafe", - camera="front", - tolerance=0.1): - super(MoveOneCVPointToAnotherTask, self).__init__() - - self.cv_obstacle = cv_obstacle - self.x_target = x_target - self.y_target = y_target - self.w_target = w_target - self.h_target = h_target - self.method = method - self.camera = camera - self.tolerance = tolerance - self.linear_constant = 0.5 - self.rotation_constant = 0.2 - - def _on_task_start(self): - pass - - def _on_task_run(self): # better to use pose once we have depth info - o = self.cv_data[self.cv_obstacle] - if o.label == "none": - # Obstacle not currently in view, or at least not detected by CV - rospy.loginfo("Bounding box f") - self.finish() - - x_curr = o.xmin + (o.xmax - o.xmin) / 2 # center of object - y_curr = o.ymin + (o.ymax - o.ymin) / 2 - x_diff = self.x_target - x_curr # difference to center of object - y_diff = self.y_target - y_curr - - w_curr = o.xmax - o.xmin - h_curr = o.ymax - o.ymin - w_diff = self.w_target - w_curr - h_diff = self.h_target - h_curr - - target_area = self.w_target * self.h_target - current_area = w_curr * h_curr - - # continue here:........... not done - - # determine if need to scale up or scale down box - resize_box = current_area - target_area # probably want to add tolerance and change if statements accordingly - - if resize_box < 0: # current box smaller than target, so scale up - pass - # move to position and then scale up - # check if position met (w or h is within tolerance) - # then execute scale up - elif resize_box > 0: # current box larger than target, so scale down - pass - # scale down then move to position - # scale down and check if size requirement is met - # then execute movement to put (w or H within tolerance) - - if (abs(x_diff) <= self.tolerance and abs(y_diff) <= self.tolerance and - (abs(w_diff) < self.tolerance or abs(h_diff) < self.tolerance)): - if self.vel_task: - self.vel_task.finish() - self.finish() - return - - x_linear_vel = self.linear_constant * x_diff - y_linear_vel = self.linear_constant * y_diff - x_angular_vel = self.rotation_constant * x_diff - y_angular_vel = self.rotation_constant * y_diff - - if self.vel_task: - self.vel_task.finish() - - if self.method == "rotate" and self.camera == "front": - self.vel_task = AllocateVelocityLocalTask(0, 0, 0, 0, -y_angular_vel, x_angular_vel) - elif self.method == "rotate" and self.camera == "down": - self.vel_task = AllocateVelocityLocalTask(0, 0, 0, x_angular_vel, -y_angular_vel, 0) - elif self.method == "strafe" and self.camera == "front": - self.vel_task = AllocateVelocityLocalTask(0, x_linear_vel, y_linear_vel, 0, 0, 0) - elif self.method == "strafe" and self.camera == "down": - self.vel_task = AllocateVelocityLocalTask(y_linear_vel, x_linear_vel, 0, 0, 0, 0) - - if self.vel_task: - self.vel_task.run() diff --git a/onboard/catkin_ws/src/task_planning/scripts/old/gate_pseudo_code.txt b/onboard/catkin_ws/src/task_planning/scripts/old/gate_pseudo_code.txt deleted file mode 100644 index eff8a320c..000000000 --- a/onboard/catkin_ws/src/task_planning/scripts/old/gate_pseudo_code.txt +++ /dev/null @@ -1,23 +0,0 @@ -while (dist_to_gate > x | cannot see full gate) - while (cannot see gate) - rotate - read vector from CV - move along X and Y until near 0 - while (within tolerance of x and y and dist_to_gate > x) - move to gate - -move through gate (forward) -break - -New algo: - -while (not close to gate (i.e. gate (both sides) not near edge of frame) | cannot see gate (no data)) - while (cannot see gate | gate not centered (small section not centered if visible)) - rotate - while (gate centered (within a tolerance)) - move forward - -move through gate (forward) -break - -Second algo assumes that robot is never at a steep angle to the gate. This is likely a fair assumption as we start approximately in line with the gate's normal vector. \ No newline at end of file diff --git a/onboard/catkin_ws/src/task_planning/scripts/old/octagon_task.py b/onboard/catkin_ws/src/task_planning/scripts/old/octagon_task.py deleted file mode 100644 index 3d1dbf469..000000000 --- a/onboard/catkin_ws/src/task_planning/scripts/old/octagon_task.py +++ /dev/null @@ -1,135 +0,0 @@ -#!/usr/bin/env python - -from statistics import mean - -import smach -import rospy -from task import Task -from move_tasks import MoveToPoseLocalTask -from tf import TransformListener -from time import sleep - - -SIDE_THRESHOLD = 0.1 # means octagon post is within 1 tenth of the side of the frame -# Octagon will be considered centered if within 1 tenth of the center of the frame -CENTERED_THRESHOLD = 0.1 -octagon_TICK_CV_NAME = "bin" # change back to octagon_tick - -""" -Plan for task restructure: - -INPUT FROM CMD copper or bootlegger, direction to rotate, time to wait and moving average - -1) rotate ~0.25 radians -2) check if image is in camera frame, maybe see if in center - a) if not in frame, go to 1 - b) if in frame, proceed -3) moving average for some time to find location to move to -4) move to position ~0.5 below and ~1 past image -5) done -""" - - -def main(): - rospy.init_node('octagon_task') - - sm = create_eyeball_octagon_task_sm() - sleep(2) - # Execute SMACH plan - sm.execute() - -# Rotate direction is +1 or -1 depending on how we should rotate - - -def create_acoustics_task_sm(rotate_direction): - # NOTE: THIS IS ENTIRELY WRONG - sm = smach.StateMachine(outcomes=['succeeded', 'failed']) - listener = TransformListener() - octagon_euler_position = [0, 0, 0, 0, 0, 0] - image_name = "gun" - with sm: - smach.StateMachine.add('SURVEY_octagon_1', SurveyOctagonTask(image_name, 20, 0.5, octagon_euler_position), - transitions={ - 'done': 'MOVE_TO_octagon_1' - }) - - smach.StateMachine.add('MOVE_TO_octagon_1', MoveToPoseLocalTask(*octagon_euler_position, listener), - transitions={ - 'done': 'SURVEY_octagon_2' - }) - - smach.StateMachine.add('SURVEY_octagon_2', SurveyOctagonTask(image_name, 20, 0.5, octagon_euler_position), - transitions={ - 'done': 'MOVE_TO_octagon_2' - }) - - smach.StateMachine.add('MOVE_TO_octagon_2', MoveToPoseLocalTask(*octagon_euler_position, listener), - transitions={ - 'done': 'SURVEY_octagon_3' - }) - - smach.StateMachine.add('SURVEY_octagon_3', SurveyOctagonTask(image_name, 20, 1, octagon_euler_position), - transitions={ - 'done': 'MOVE_TO_octagon_3' - }) - - smach.StateMachine.add('MOVE_TO_octagon_3', MoveToPoseLocalTask(*octagon_euler_position, listener), - transitions={ - 'done': 'MOVE_AWAY_FROM_octagon' - }) - - smach.StateMachine.add('MOVE_AWAY_FROM_octagon', MoveToPoseLocalTask(0, 0, 4, 0, 0, 0, listener), - transitions={ - 'done': 'succeeded' - }) - - return sm - - -def create_eyeball_octagon_task_sm(x_estimate, y_estimate): - sm = smach.StateMachine(outcomes=['succeeded', 'failed']) - listener = TransformListener() - with sm: - smach.StateMachine.add('MOVE_TO_OCTAGON', MoveToPoseLocalTask(x_estimate, y_estimate, 0, 0, 0, 0, listener), - transitions={ - 'done': 'SURFACE' - }) - - smach.StateMachine.add('SURFACE', MoveToPoseLocalTask(0, 0, 20, 0, 0, 0, listener), - transitions={ - 'done': 'succeeded' - }) - - return sm - - -class SurveyOctagonTask(Task): - def __init__(self, object_name, time, ratio, output_euler_position): - super(SurveyOctagonTask, self).__init__(["done"]) - self.object_name = object_name - self.time = time - self.ratio = ratio - self.output_euler_position = output_euler_position - - def run(self, userdata): - millis = 10 - rate = rospy.Rate(millis) - total = 0 - captured_vectors = [] - - while total < self.time * 1000: - octagon_vector = 0 # TODO: get octagon vector - if octagon_vector is not None: - captured_vectors.append(octagon_vector) - total += millis - rate.sleep() - - avg_x = mean([v[0] for v in captured_vectors]) * self.ratio - avg_y = mean([v[1] for v in captured_vectors]) * self.ratio - avg_z = (mean([v[2] for v in captured_vectors]) + 2) * self.ratio - self.output_euler_position = [avg_x, avg_y, avg_z, 0, 0, 0] - return "done" - - -if __name__ == '__main__': - main() diff --git a/onboard/catkin_ws/src/task_planning/scripts/old/prequal_task_sm.py b/onboard/catkin_ws/src/task_planning/scripts/old/prequal_task_sm.py deleted file mode 100644 index c43e40baf..000000000 --- a/onboard/catkin_ws/src/task_planning/scripts/old/prequal_task_sm.py +++ /dev/null @@ -1,67 +0,0 @@ -#!/usr/bin/env python - -import smach -import rospy -from task import Task -from move_tasks import MoveToPoseLocalTask -from time import sleep - - -prequal_TICK_CV_NAME = "bin" # change back to prequal_tick - - -def main(): - rospy.init_node('prequal_task') - - sm = create_prequal_task_sm() - sleep(2) - # Execute SMACH plan - sm.execute() - - -def create_prequal_task_sm(velocity=0.2): - sm = smach.StateMachine(outcomes=['prequal_task_succeeded', 'prequal_task_failed']) - with sm: - def concurrence_term_wait_loop_cb(outcome_map): - return outcome_map['WAIT_DONE'] == 'done' - - def concurrence_term_dive_forward_loop_cb(outcome_map): - return outcome_map['DIVE_FORWARD'] == 'done' - - def concurrence_term_move_forward_loop_cb(outcome_map): - return outcome_map['MOVE_FORWARD'] == 'done' - - dive_forward_cc = smach.Concurrence(outcomes=['done'], - default_outcome='done', - child_termination_cb=concurrence_term_wait_loop_cb, - outcome_map={'done': {'WAIT_DONE': 'done'}}) - with dive_forward_cc: - smach.Concurrence.add('WAIT_DONE', WaitTask(5)) - smach.Concurrence.add('DIVE_FORWARD', MoveToPoseLocalTask(2, 0, -2, 0, 0, 0)) - - move_foward_cc = smach.Concurrence(outcomes=['done'], - default_outcome='done', - child_termination_cb=concurrence_term_wait_loop_cb, - outcome_map={'done': {'WAIT_DONE': 'done'}}) - with move_foward_cc: - smach.Concurrence.add('WAIT_DONE', WaitTask(5)) - smach.Concurrence.add('MOVE_FORWARD', MoveToPoseLocalTask(5, 0, 0, 0, 0, 0)) - - smach.StateMachine.add('DIVE_FORWARD', dive_forward_cc, transitions={'done': 'MOVE_FORWARD'}) - smach.StateMachine.add('MOVE_FORWARD', move_foward_cc, transitions={'done': 'prequal_task_succeeded'}) - - return sm - - -class WaitTask(Task): - def __init__(self, time): - super(WaitTask, self).__init__(["done"]) - self.time = time - - def run(self, userdata): - sleep(self.time) - return "done" - - -if __name__ == '__main__': - main() diff --git a/onboard/catkin_ws/src/task_planning/scripts/old/prequal_tasks.py b/onboard/catkin_ws/src/task_planning/scripts/old/prequal_tasks.py deleted file mode 100644 index 83052cb71..000000000 --- a/onboard/catkin_ws/src/task_planning/scripts/old/prequal_tasks.py +++ /dev/null @@ -1,218 +0,0 @@ -#!/usr/bin/env python - -from move_tasks import MoveToPoseLocalTask -import smach -import rospy -import gate_task -from tf import TransformListener -import math -import time -from custom_msgs.msg import ThrusterSpeeds - - -def main(): - rospy.init_node('gate_task') - - jank_prequal_with_turn() - - # sm = create_prequal_task_sm() - # time.sleep(2) - # # Execute SMACH plan - # outcome = sm.execute() - - -SPEED = 80 -DIVE_SPEED = 0 -DEPTH_HOLD_SPEED = 0 -TURN_TIME = 6 -DIAMOND_TIME = 4 -LONG_TIME = 5 - -# NOTE: The third sign is not what I expected, looking at the robot -# We need to figure out what "flipped" means -# It seems to mean counterclockwise -FORWARD_THRUST = [-SPEED, -SPEED, -SPEED, SPEED, DEPTH_HOLD_SPEED, - - DEPTH_HOLD_SPEED, -DEPTH_HOLD_SPEED, DEPTH_HOLD_SPEED] -BACKWARD_THRUST = [SPEED, SPEED, SPEED, -SPEED, DEPTH_HOLD_SPEED, - - DEPTH_HOLD_SPEED, -DEPTH_HOLD_SPEED, DEPTH_HOLD_SPEED] -DOWNWARD_THRUST = [0, 0, 0, 0, DIVE_SPEED, -DIVE_SPEED, -DIVE_SPEED, DIVE_SPEED] -FORWARD_LEFT_THRUST = [-SPEED, 0, 0, SPEED, DEPTH_HOLD_SPEED, -DEPTH_HOLD_SPEED, -DEPTH_HOLD_SPEED, DEPTH_HOLD_SPEED] -FORWARD_RIGHT_THRUST = [0, -SPEED, -SPEED, 0, DEPTH_HOLD_SPEED, -DEPTH_HOLD_SPEED, -DEPTH_HOLD_SPEED, DEPTH_HOLD_SPEED] -BACKWARD_RIGHT_THRUST = [SPEED, 0, 0, -SPEED, DEPTH_HOLD_SPEED, -DEPTH_HOLD_SPEED, -DEPTH_HOLD_SPEED, DEPTH_HOLD_SPEED] -BACKWARD_LEFT_THRUST = [0, SPEED, SPEED, 0, DEPTH_HOLD_SPEED, -DEPTH_HOLD_SPEED, -DEPTH_HOLD_SPEED, DEPTH_HOLD_SPEED] -SPIN_LEFT_THRUST = [-SPEED, SPEED, SPEED, -SPEED, DEPTH_HOLD_SPEED, - - DEPTH_HOLD_SPEED, -DEPTH_HOLD_SPEED, DEPTH_HOLD_SPEED] -SPIN_RIGHT_THRUST = [SPEED, -SPEED, -SPEED, SPEED, DEPTH_HOLD_SPEED, - - DEPTH_HOLD_SPEED, -DEPTH_HOLD_SPEED, DEPTH_HOLD_SPEED] - - -def move_with_thrust_for_seconds(pub, rate, thrust, seconds): - for i in range(seconds * 10): - t = ThrusterSpeeds() - t.speeds = thrust - pub.publish(t) - rate.sleep() - - -def jank_prequal_with_turn(): - pub = rospy.Publisher('/offboard/thruster_speeds', ThrusterSpeeds, queue_size=3) - rate = rospy.Rate(10) - # time.sleep(10) - # Down for 1 second - move_with_thrust_for_seconds(pub, rate, DOWNWARD_THRUST, 1) - # Forward for 3 seconds - move_with_thrust_for_seconds(pub, rate, FORWARD_THRUST, LONG_TIME) - # Spin left for 1 second - move_with_thrust_for_seconds(pub, rate, SPIN_LEFT_THRUST, 1) - # Forward for 2 seconds - move_with_thrust_for_seconds(pub, rate, FORWARD_THRUST, DIAMOND_TIME) - # Spin right for 1 second - move_with_thrust_for_seconds(pub, rate, SPIN_RIGHT_THRUST, TURN_TIME) - # Forward for 2 seconds - move_with_thrust_for_seconds(pub, rate, FORWARD_THRUST, DIAMOND_TIME) - # Spin right for 1 second - move_with_thrust_for_seconds(pub, rate, SPIN_RIGHT_THRUST, TURN_TIME) - # Forward for 2 seconds - move_with_thrust_for_seconds(pub, rate, FORWARD_THRUST, DIAMOND_TIME) - # Spin right for 1 second - move_with_thrust_for_seconds(pub, rate, SPIN_RIGHT_THRUST, TURN_TIME) - # Forward for 2 seconds - move_with_thrust_for_seconds(pub, rate, FORWARD_THRUST, DIAMOND_TIME) - # Spin left for 1 second - move_with_thrust_for_seconds(pub, rate, SPIN_LEFT_THRUST, 1) - # Forward for 3 seconds - move_with_thrust_for_seconds(pub, rate, FORWARD_THRUST, LONG_TIME) - - -def jank_prequal(): - pub = rospy.Publisher('/offboard/thruster_speeds', ThrusterSpeeds, queue_size=3) - rate = rospy.Rate(10) - time.sleep(10) - # Down for 1 second - for i in range(10): - pub.publish(ThrusterSpeeds(DOWNWARD_THRUST)) - rate.sleep() - # Forward for 3 seconds - for i in range(30): - pub.publish(ThrusterSpeeds(FORWARD_THRUST)) - rate.sleep() - # Forward-left for 2 seconds - for i in range(20): - pub.publish(ThrusterSpeeds(FORWARD_LEFT_THRUST)) - rate.sleep() - # Forward-right for 2 seconds - for i in range(20): - pub.publish(ThrusterSpeeds(FORWARD_RIGHT_THRUST)) - rate.sleep() - # Backward-right for 2 seconds - for i in range(20): - pub.publish(ThrusterSpeeds(BACKWARD_RIGHT_THRUST)) - rate.sleep() - # Backward-left for 2 seconds - for i in range(20): - pub.publish(ThrusterSpeeds(BACKWARD_LEFT_THRUST)) - rate.sleep() - # Backward for 3 seconds - for i in range(30): - pub.publish(ThrusterSpeeds(BACKWARD_THRUST)) - rate.sleep() - - -def create_prequal_task_sm(): - sm = smach.StateMachine(outcomes=['prequal_task_succeeded', 'prequal_task_failed']) - listener = TransformListener() - gate_task.create_gate_task_sm() - - DEPTH = -3 - STRETCH_LENGTH = 4 - LOOP_SIZE = 2 - - with sm: - # smach.StateMachine.add('GO_THROUGH_GATE', gate_sm, - # transitions={ - # 'gate_task_succeeded': 'APPROACH_MARKER', - # 'gate_task_failed': 'prequal_task_failed'}) - smach.StateMachine.add('SUBMERGE', MoveToPoseLocalTask(0, 0, DEPTH, 0, 0, 0, listener), - transitions={ - 'done': 'LEFT_OF_MARKER'}) - smach.StateMachine.add('APPROACH_MARKER', MoveToPoseLocalTask(STRETCH_LENGTH, 0, 0, 0, 0, 0, listener), - transitions={ - 'done': 'LEFT_OF_MARKER'}) - smach.StateMachine.add('LEFT_OF_MARKER', MoveToPoseLocalTask(LOOP_SIZE, LOOP_SIZE, 0, 0, 0, 0, listener), - transitions={ - 'done': 'BEHIND_MARKER'}) - smach.StateMachine.add('BEHIND_MARKER', MoveToPoseLocalTask(LOOP_SIZE, -LOOP_SIZE, 0, 0, 0, 0, listener), - transitions={ - 'done': 'RIGHT_OF_MARKER'}) - smach.StateMachine.add('RIGHT_OF_MARKER', MoveToPoseLocalTask(-LOOP_SIZE, -LOOP_SIZE, 0, 0, 0, 0, listener), - transitions={ - 'done': 'BACK_TO_FRONT_OF_MARKER'}) - smach.StateMachine.add('BACK_TO_FRONT_OF_MARKER', MoveToPoseLocalTask(-LOOP_SIZE, LOOP_SIZE, - 0, 0, 0, math.pi, listener), transitions={'done': 'BACK_THROUGH_GATE'}) - smach.StateMachine.add('BACK_THROUGH_GATE', MoveToPoseLocalTask(-STRETCH_LENGTH, 0, 0, 0, 0, 0, listener), - transitions={ - 'done': 'prequal_task_succeeded'}) - # smach.StateMachine.add('BACK_THROUGH_GATE', gate_sm, - # transitions={ - # 'gate_task_succeeded': 'prequal_task_succeeded', - # 'gate_task_failed': 'prequal_task_failed'}) - - return sm - - -if __name__ == '__main__': - main() - - -# class PreQualGlobalTask(Task): -# """ -# Task to complete prequalifying run by moving to global poses -# """ - -# POSE1 = [0, 0, 0, 0, 0, 0] -# POSE2 = [0, 0, 0, 0, 0, 0] -# POSE3 = [0, 0, 0, 0, 0, 0] -# POSE4 = [0, 0, 0, 0, 0, 0] -# POSE5 = [0, 0, 0, 0, 0, 0] - -# def __init__(self): -# super(PreQualGlobalTask, self).__init__() - -# self.list_task = ListTask([MoveToPoseGlobalTask(*self.POSE1), -# MoveToPoseGlobalTask(*self.POSE2), -# MoveToPoseGlobalTask(*self.POSE3), -# MoveToPoseGlobalTask(*self.POSE4), -# MoveToPoseGlobalTask(*self.POSE5)]) - -# def _on_task_run(self): -# self.list_task.run() - -# if self.list_task.finished: -# self.finish() - - -# class PreQualLocalTask(Task): -# """ -# Task to complete prequalifying run by moving to local poses -# """ - -# POSE1 = [0, 0, 0, 0, 0, 0] -# POSE2 = [0, 0, 0, 0, 0, 0] -# POSE3 = [0, 0, 0, 0, 0, 0] -# POSE4 = [0, 0, 0, 0, 0, 0] -# POSE5 = [0, 0, 0, 0, 0, 0] - -# def __init__(self): -# super(PreQualLocalTask, self).__init__() - -# self.list_task = ListTask([MoveToPoseLocalTask(*self.POSE1), -# MoveToPoseLocalTask(*self.POSE2), -# MoveToPoseLocalTask(*self.POSE3), -# MoveToPoseLocalTask(*self.POSE4), -# MoveToPoseLocalTask(*self.POSE5)]) - -# def _on_task_run(self): -# self.list_task.run() - -# if self.list_task.finished: -# self.finish() diff --git a/onboard/catkin_ws/src/task_planning/scripts/old/style_task.py b/onboard/catkin_ws/src/task_planning/scripts/old/style_task.py deleted file mode 100644 index e2d1a8211..000000000 --- a/onboard/catkin_ws/src/task_planning/scripts/old/style_task.py +++ /dev/null @@ -1,105 +0,0 @@ -from task import Task -from geometry_msgs.msg import Pose, Quaternion, Point -from tf.transformations import quaternion_from_euler -import task_utils -import move_tasks -import math -import rospy - - -class StyleTask(Task): - def __init__(self, axis, speed, angle=2 * math.pi, num_segments=4): - """Rotate using a given power past a certain angle and check that it reaches a num_segments - - Parameters: - axis (string): orientation (x, y, z) of turn - power (float): desired twist power of turn in (0, 1] - angle (float): desired angle of turn in radians - num_segments (int): number of segments to check position durning turn (must be >= 3) - - """ - super(StyleTask, self).__init__() - - if num_segments < 3: - raise Exception("num_segments must be >= 3 for StyleTask.") - - if speed <= 0 or speed > 1: - raise Exception("power must be between (0, 1] for StyleTask.") - - speed /= 10000 - self.num_segments = num_segments - self.angle = angle - self.seg_rads = angle / num_segments - self.nintey_points = 100 - direction = angle / abs(angle) - rospy.loginfo("Direction is {} Speed is {}".format(direction, speed)) - - if axis == "x": - self.rotate_task = move_tasks.AllocateVelocityLocalTask(0, 0, 0, speed * direction, 0, 0) - self.q_angle = quaternion_from_euler(self.seg_rads, 0, 0) - elif axis == "y": - self.rotate_task = move_tasks.AllocateVelocityLocalTask(0, 0, 0, 0, speed * direction, 0) - self.q_angle = quaternion_from_euler(0, self.seg_rads, 0) - elif axis == "z": - self.rotate_task = move_tasks.AllocateVelocityLocalTask(0, 0, 0, 0, 0, speed * direction) - self.q_angle = quaternion_from_euler(0, 0, self.seg_rads) - else: - raise Exception("axis must be \"x\", \"y\", or \"z\" for StyleTask") - - rospy.loginfo("{} axis turn of {}".format(axis, speed * direction)) - - def _on_task_start(self): - """Set some starting values for the style rotation.""" - self.current_segment = 0 - self.angle_pose = Pose(Point(), Quaternion(*self.q_angle)) - self.target_pose = task_utils.add_poses([self.state.pose.pose, self.angle_pose]) - self.starting_pose = self.state.pose.pose - self.on_finish_segment = False - self.output["rads_turned"] = 0.0 - self.output["points_scored"] = 0 - rospy.loginfo("Now starting turn...") - # self.velocity_task = AllocateVelocityLocalTask(self.twist.linear.x, - # self.twist.linear.y, - # self.twist.linear.z, - # self.twist.angular.x, - # self.twist.angular.y, - # self.twist.angular.z) - - def _on_task_run(self): - """Go through the rotation using power control, checking that we hit certain segments as we go.""" - # self.publish_desired_twist_power(self.twist) # change to ALlocateVelocityTask - # self.publish_desired_twist(self.twist.linear.x, - # self.twist.linear.y, - # self.twist.linear.z, - # self.twist.angular.x, - # self.twist.angular.y, - # self.twist.angular.z - # ) - - self.rotate_task.run() - - if not self.on_finish_segment and task_utils.at_pose( - self.state.pose.pose, self.target_pose, float("inf"), self.seg_rads / 2): - self.current_segment += 1 - self.target_pose = task_utils.add_poses([self.target_pose, self.angle_pose]) - rospy.loginfo("Now on segment " + str(self.current_segment)) - self.output["rads_turned"] += self.seg_rads - self.output["points_scored"] = int(self.output["rads_turned"] / (math.pi / 2)) * self.nintey_points - - # rospy.loginfo("1 clear") - - if not self.on_finish_segment and self.current_segment == self.num_segments: - self.on_finish_segment = True - - # rospy.loginfo("2 clear") - - if self.on_finish_segment and task_utils.at_pose( - self.state.pose.pose, self.target_pose, float("inf"), self.seg_rads): - rospy.loginfo("Turn Complete!\n") - self.output["rads_turned"] = self.angle - self.output["points_scored"] = int(self.output["rads_turned"] / (math.pi / 2)) * self.nintey_points - rospy.loginfo("Stopping turn!") - self.rotate_task.finish() - self.finish() - - # rospy.loginfo("all clear, self.finished is {}".format(self.finished)) diff --git a/onboard/catkin_ws/src/task_planning/scripts/prequal_tasks.py b/onboard/catkin_ws/src/task_planning/scripts/prequal_tasks.py new file mode 100644 index 000000000..6c738e883 --- /dev/null +++ b/onboard/catkin_ws/src/task_planning/scripts/prequal_tasks.py @@ -0,0 +1,163 @@ +import rospy +import time +import math + +from task import Task, task +import move_tasks +from utils import geometry_utils +from task import Yield + +from interface.controls import Controls +from interface.cv import CV +from interface.state import State + +RECT_HEIGHT_METERS = 0.3048 + + +@task +async def prequal_task(self: Task) -> Task[None, None, None]: + """ + Complete the prequalification task by moving to a series of local poses. Returns when the robot is at the final pose + with zero velocity, within a small tolerance, completing the prequalifiacation task. + """ + + countdown_length = 10 + for i in range(countdown_length): + rospy.loginfo(f"Starting in {countdown_length - i}") + start_time = time.time() + while time.time() - start_time < 1: + await Yield() + time.sleep(0.01) + + Controls().call_enable_controls(True) + + DEPTH_LEVEL = -0.5 + + await move_tasks.move_to_pose_local( + geometry_utils.create_pose(0, 0, DEPTH_LEVEL, 0, 0, 0), + parent=self) + rospy.loginfo(f"Moved to (0, 0, {DEPTH_LEVEL})") + + DEPTH_LEVEL = State().depth + + async def rotate_deg(angle): + rad_angle = math.radians(angle) + await move_tasks.move_to_pose_local( + geometry_utils.create_pose(0, 0, 0, 0, 0, rad_angle), + parent=self) + rospy.loginfo(f"Rotate {angle}") + + async def track_blue_rectangle(distance, direction): + rospy.loginfo(f"track_blue_rectangle {distance} {direction}") + repeats = math.ceil(distance) + total_dist = 0 + prev_touching_top = False + prev_touching_bottom = False + for i in range(repeats): + step = distance - total_dist if i == repeats-1 else 1 + await move_tasks.move_to_pose_local( + geometry_utils.create_pose(step * direction, 0, 0, 0, 0, 0), + parent=self) + + total_dist += step + rospy.loginfo(f"Moved forward {total_dist}") + + touching_top = CV().cv_data["lane_marker_touching_top"] + touching_bottom = CV().cv_data["lane_marker_touching_bottom"] + if touching_top and not touching_bottom: + await move_tasks.move_to_pose_local( + geometry_utils.create_pose(0, 0.2, 0, 0, 0, 0), + parent=self) + rospy.loginfo("Touching top correction (0, 0.2, 0)") + if prev_touching_top and not prev_touching_bottom: + await rotate_deg(20) + elif not touching_top and touching_bottom: + await move_tasks.move_to_pose_local( + geometry_utils.create_pose(0, -0.2, 0, 0, 0, 0), + parent=self) + rospy.loginfo("Touching bottom correction (0, -0.2, 0)") + if not prev_touching_top and prev_touching_bottom: + await rotate_deg(-20) + + angle = (CV().cv_data["lane_marker_angle"] * -1) + if abs(angle) > 0: + rad_angle = math.radians(angle) + await move_tasks.move_to_pose_local( + geometry_utils.create_pose(0, 0, 0, 0, 0, rad_angle), + parent=self) + rospy.loginfo(f"Yaw correction {angle}") + + dist_pixels = CV().cv_data["lane_marker_dist"] + height_pixels = CV().cv_data["lane_marker_height"] + dist_meters = dist_pixels * RECT_HEIGHT_METERS / height_pixels + if abs(dist_meters) > 0: + await move_tasks.move_to_pose_local( + geometry_utils.create_pose(0, dist_meters, 0, 0, 0, 0), + parent=self) + rospy.loginfo(f"Correction {dist_meters}") + + depth_delta = DEPTH_LEVEL - State().depth + if abs(depth_delta) > 0: + await move_tasks.move_to_pose_local( + geometry_utils.create_pose(0, 0, depth_delta, 0, 0, 0), + parent=self) + rospy.loginfo(f"Depth correction {depth_delta}") + + prev_touching_top = touching_top + prev_touching_bottom = touching_bottom + + await track_blue_rectangle(2.5, 1) + + await move_tasks.move_to_pose_local( + geometry_utils.create_pose(0, 0, -0.3, 0, 0, 0), + parent=self) + rospy.loginfo("Moved to (0, 0, -0.3)") + DEPTH_LEVEL = State().depth + + await track_blue_rectangle(2, 1) + + await move_tasks.move_to_pose_local( + geometry_utils.create_pose(0, 0, 0.3, 0, 0, 0), + parent=self) + rospy.loginfo("Moved to (0, 0, 0.3)") + DEPTH_LEVEL = State().depth + + await track_blue_rectangle(7, 1) + + directions = [ + (1, 0, 0), + (1, 0, 0), + (1, 0, 0), + (0, 1, 0), + (0, 1, 0), + (-1, 0, 0), + (-1, 0, 0), + (-1, 0, 0), + (0, -1, 0), + (0, -0.8, 0), + ] + for direction in directions: + await move_tasks.move_to_pose_local( + geometry_utils.create_pose(direction[0], direction[1], direction[2], 0, 0, 0), + parent=self) + rospy.loginfo(f"Moved to {direction}") + + await track_blue_rectangle(7, -1) + + await move_tasks.move_to_pose_local( + geometry_utils.create_pose(0, 0, -0.2, 0, 0, 0), + parent=self) + rospy.loginfo("Moved to (0, 0, -0.2)") + DEPTH_LEVEL = State().depth + + await track_blue_rectangle(2, -1) + + await move_tasks.move_to_pose_local( + geometry_utils.create_pose(0, 0, 0.2, 0, 0, 0), + parent=self) + rospy.loginfo("Moved to (0, 0, 0.2)") + DEPTH_LEVEL = State().depth + + await track_blue_rectangle(3, -1) + + Controls().call_enable_controls(False) diff --git a/onboard/catkin_ws/src/task_planning/scripts/smach_concurrence.py b/onboard/catkin_ws/src/task_planning/scripts/smach_concurrence.py deleted file mode 100644 index 5ac140b47..000000000 --- a/onboard/catkin_ws/src/task_planning/scripts/smach_concurrence.py +++ /dev/null @@ -1,38 +0,0 @@ -from smach import Concurrence -import rospy -import smach - - -class Log(smach.State): - def _init_(self, text, outcomes=['outcome2']): - # - self.text = text - - def execute(self, userdata): - # ff - rospy.loginfo(self.text) - return 'outcome2' - - -cc = Concurrence(outcomes=['outcomey, succeeded'], - default_outcome='outcomey', - input_keys=['sm_input'], - output_keys=['sm_output'], - outcome_map={'succeeded': {'Log1': 'outcome2'}}) - -with cc: - Concurrence.add('Log1', Log('log1')) - Concurrence.add('Log2', Log('log2')) - - -def main(): - rospy.init_node('smach_example_state_machine') - sm_top = smach.StateMachine(outcomes=['outcome6']) - - with sm_top: - smach.StateMachine.add('CON', cc, transitions={'succeeded': 'CON', 'outcomey': 'outcome6'}) - - # smach.StateMachine.add('CON', sm_top, - # transitions = {'succeeded':'CON'}) - - sm_top.execute() diff --git a/onboard/catkin_ws/src/task_planning/scripts/smach_concurrent_sm.py b/onboard/catkin_ws/src/task_planning/scripts/smach_concurrent_sm.py deleted file mode 100644 index b80a7d761..000000000 --- a/onboard/catkin_ws/src/task_planning/scripts/smach_concurrent_sm.py +++ /dev/null @@ -1,79 +0,0 @@ -#!/usr/bin/env python - -import smach -import rospy -import roslib -roslib.load_manifest('smach_tutorials') - -# define state Foo - - -class Foo(smach.State): - def __init__(self): - smach.State.__init__(self, outcomes=['outcome1', 'outcome2']) - self.counter = 0 - - def execute(self, userdata): - rospy.loginfo('Executing state FOO') - if self.counter < 3: - self.counter += 1 - return 'outcome1' - else: - return 'outcome2' - - -# define state Bar -class Bar(smach.State): - def __init__(self): - smach.State.__init__(self, outcomes=['outcome1']) - - def execute(self, userdata): - rospy.loginfo('Executing state BAR') - return 'outcome1' - - -# define state Bas -class Bas(smach.State): - def __init__(self): - smach.State.__init__(self, outcomes=['outcome3']) - - def execute(self, userdata): - rospy.loginfo('Executing state BAS') - return 'outcome3' - - -def main(): - rospy.init_node('smach_example_state_machine') - - # Create the top level SMACH state machine - sm_top = smach.StateMachine(outcomes=['outcome6']) - - # Open the container - with sm_top: - - smach.StateMachine.add('BAS', Bas(), - transitions={'outcome3': 'CON'}) - - # Create the sub SMACH state machine - sm_con = smach.Concurrence(outcomes=['outcome4', 'outcome5'], - default_outcome='outcome4', - outcome_map={'outcome5': - {'FOO': 'outcome2', - 'BAR': 'outcome1'}}) - - # Open the container - with sm_con: - # Add states to the container - smach.Concurrence.add('FOO', Foo()) - smach.Concurrence.add('BAR', Bar()) - - smach.StateMachine.add('CON', sm_con, - transitions={'outcome4': 'CON', - 'outcome5': 'outcome6'}) - - # Execute SMACH plan - sm_top.execute() - - -if __name__ == '__main__': - main() diff --git a/onboard/catkin_ws/src/task_planning/scripts/smach_test.py b/onboard/catkin_ws/src/task_planning/scripts/smach_test.py deleted file mode 100644 index e77c0f24e..000000000 --- a/onboard/catkin_ws/src/task_planning/scripts/smach_test.py +++ /dev/null @@ -1,253 +0,0 @@ -#!/usr/bin/env python - -import rospy -import smach -import random -from move_tasks import MoveToPoseGlobalTask, MoveToPoseLocalTask, AllocateVelocityLocalTask -import cv_tasks -from time import sleep -from geometry_msgs.msg import Vector3 -from tf import TransformListener -# from interface.controls import ControlsInterface -from interface.cv import CVInterface - -# define state Foo - - -def main(): - rospy.init_node('smach_test') - listener = TransformListener() - # controls = ControlsInterface(listener) - cv = CVInterface(listener) - - sm = buoy_cv_test(cv) - - rospy.loginfo("Waiting for transform listener") - listener.waitForTransform('odom', 'base_link', rospy.Time(), rospy.Duration(15)) - - # Execute SMACH plan - sm.execute() - - -def buoy_cv_test(cv): - sm = smach.StateMachine(outcomes=['done']) - - with sm: - smach.StateMachine.add('FindBuoy', cv_tasks.ObjectCoordsTask('bootleggerbuoy', cv), - transitions={ - 'invalid': 'FindBuoy', - 'valid': 'PrintBuoy' - }) - smach.StateMachine.add('PrintBuoy', PrintUserDataTask("coords"), - transitions={ - 'done': 'done' - }) - - return sm - - -class RandomizeOutputPose(smach.State): - def __init__(self): - super(RandomizeOutputPose, self).__init__( - outcomes=["done"], - output_keys=['x', 'y', 'z', 'roll', 'pitch', 'yaw']) - - def execute(self, userdata): - sleep(1) - if self.preempt_requested(): - self.service_preempt() - return 'done' - userdata.x = random.random() * 10 - userdata.y = random.random() * 10 - userdata.z = random.random() * 10 - - userdata.roll = 0 - userdata.pitch = 0 - userdata.yaw = 0 - - return "done" - - -def object_passing(): - sm = smach.StateMachine(outcomes=['done']) - - with sm: - smach.StateMachine.add('calc', OutputVector3Task(Vector3(3, 5, 3)), - transitions={'done': 'print'}) - - smach.StateMachine.add('print', PrintUserDataTask(), - transitions={'done': 'done'}) - - return sm - - -def controls_testing(controls, listener): - # Create a SMACH state machine - sm = smach.StateMachine(outcomes=['done']) - - # Open the container - with sm: - # Add states to the container - smach.StateMachine.add('Move1', MoveToPoseLocalTask(2, 0, 0, 0, 0, 0, controls, listener), - transitions={'done': 'done', 'continue': 'Move1'}) - # left square - # smach.StateMachine.add('MoveLeft2', MoveToPoseLocalTask(0, -1.5, 0, 0, 0, 0, controls, listener), - # transitions={'done': 'MoveLeft3', 'continue': 'MoveLeft2'}) - # smach.StateMachine.add('MoveLeft3', MoveToPoseLocalTask(1.5, 0, 0, 0, 0, 0, controls, listener), - # transitions={'done': 'MoveLeft4', 'continue': 'MoveLeft3'}) - # smach.StateMachine.add('MoveLeft4', MoveToPoseLocalTask(0, 1.5, 0, 0, 0, 0, controls, listener), - # transitions={'done': 'finish', 'continue': 'MoveLeft4'}) - # right square - # smach.StateMachine.add('MoveRight2', MoveToPoseGlobalTask(2, -2, 0, 0, 0, 0, controls), - # transitions={'done': 'MoveRight3', 'continue': 'MoveRight2'}) - # smach.StateMachine.add('MoveRight3', MoveToPoseGlobalTask(0, -2, 0, 0, 0, 0, controls), - # transitions={'done': 'MoveRight4', 'continue': 'MoveRight3'}) - # smach.StateMachine.add('MoveRight4', MoveToPoseGlobalTask(0, 0, 0, 0, 0, 0, controls), - # transitions={'done': 'finish', 'continue': 'MoveRight4'}) - - return sm - - -def concurrency(controls): - # Create a SMACH state machine - sm = smach.StateMachine(outcomes=['finish']) - - # Open the container - with sm: - # Add states to the container - smach.StateMachine.add('Move1', MoveToPoseGlobalTask(2, 0, 0, 0, 0, 0, controls), - transitions={'done': 'ConcurrentMove2', 'continue': 'Move1'}) - cc = smach.Concurrence(outcomes=['done'], - default_outcome='done', - outcome_map={'done': {'Move2': 'done', 'Log': 'done'}}) - with cc: - smach.Concurrence.add('Move2', MoveToPoseGlobalTask(2, 2, 0, 0, 0, 0, controls)) - smach.Concurrence.add('Log', LogSomethingUseful()) - - smach.StateMachine.add('ConcurrentMove2', cc, transitions={'done': 'Move3'}) - - smach.StateMachine.add('Move3', MoveToPoseGlobalTask(0, 2, 0, 0, 0, 0, controls), - transitions={'done': 'Move4', 'continue': 'Move3'}) - smach.StateMachine.add('Move4', MoveToPoseGlobalTask(0, 0, 0, 0, 0, 0, controls), - transitions={'done': 'finish', 'continue': 'Move4'}) - - return sm - - -def decision_making(controls): - # Create a SMACH state machine - sm = smach.StateMachine(outcomes=['finish']) - - # Open the container - with sm: - # Add states to the container - smach.StateMachine.add('Move1', MoveToPoseGlobalTask(2, 0, 0, 0, 0, 0, controls), - transitions={'done': 'choice', 'continue': 'Move1'}) - smach.StateMachine.add("choice", RandomChoice(2), - transitions={'0': 'MoveLeft2', '1': 'MoveRight2'}) - # left square - smach.StateMachine.add('MoveLeft2', MoveToPoseGlobalTask(2, 2, 0, 0, 0, 0, controls), - transitions={'done': 'MoveLeft3', 'continue': 'MoveLeft2'}) - smach.StateMachine.add('MoveLeft3', MoveToPoseGlobalTask(0, 2, 0, 0, 0, 0, controls), - transitions={'done': 'MoveLeft4', 'continue': 'MoveLeft3'}) - smach.StateMachine.add('MoveLeft4', MoveToPoseGlobalTask(0, 0, 0, 0, 0, 0, controls), - transitions={'done': 'finish', 'continue': 'MoveLeft4'}) - # right square - smach.StateMachine.add('MoveRight2', MoveToPoseGlobalTask(2, -2, 0, 0, 0, 0, controls), - transitions={'done': 'MoveRight3', 'continue': 'MoveRight2'}) - smach.StateMachine.add('MoveRight3', MoveToPoseGlobalTask(0, -2, 0, 0, 0, 0, controls), - transitions={'done': 'MoveRight4', 'continue': 'MoveRight3'}) - smach.StateMachine.add('MoveRight4', MoveToPoseGlobalTask(0, 0, 0, 0, 0, 0, controls), - transitions={'done': 'finish', 'continue': 'MoveRight4'}) - - return sm - - -def simple_move_test(controls, listener): - sm = smach.StateMachine(outcomes=['done']) - - with sm: - smach.StateMachine.add("Move", MoveToPoseLocalTask(-2, 0, 0, 0, 0, 0, controls, listener), - transitions={ - 'continue': 'Move', - 'done': 'Surface' - }) - smach.StateMachine.add("Surface", AllocateVelocityLocalTask(0, 0, 1, 0, 0, 0, controls), - transitions={ - 'done': 'Surface' - }) - - return sm - - -# Test if userdata output by a task can be accessed even with another task in between -def userdata_passthrough(): - sm = smach.StateMachine(outcomes=['done']) - - with sm: - smach.StateMachine.add("Output", OutputVector3Task(Vector3(1, 2, 3)), - transitions={ - 'done': 'Print' - }) - smach.StateMachine.add("Print", PrintUserDataTask(), - transitions={ - 'done': 'Print2' - }) - smach.StateMachine.add("Print2", PrintUserDataTask(), - transitions={ - 'done': 'done' - }) - - return sm - - -class OutputVector3Task(smach.State): - def __init__(self, vec): - super().__init__(outcomes=["done"], output_keys=['vec']) - self.vec = vec - - def execute(self, userdata): - userdata.vec = self.vec - return "done" - - -class PrintUserDataTask(smach.State): - def __init__(self, key='vec'): - super().__init__(outcomes=["done"], input_keys=[key]) - self.key = key - - def execute(self, userdata): - print(userdata[self.key]) - return "done" - - -class RandomChoice(smach.State): - """Randomly chooses a number""" - - def __init__(self, options): - super(RandomChoice, self).__init__(outcomes=[str(i) for i in range(options)]) - - self.options = options - - def execute(self, userdata): - res = str(random.randint(0, self.options)) - rospy.loginfo(res) - return res - - -class LogSomethingUseful(smach.State): - """Logs Stuff""" - - def __init__(self): - super(LogSomethingUseful, self).__init__(outcomes=["done"]) - - def execute(self, userdata): - rate = rospy.Rate(1) - for i in range(40): - rospy.loginfo("something important") - rate.sleep() - return "done" - - -if __name__ == '__main__': - main() diff --git a/onboard/catkin_ws/src/task_planning/scripts/task.py b/onboard/catkin_ws/src/task_planning/scripts/task.py index d85f56bb5..52e057af3 100644 --- a/onboard/catkin_ws/src/task_planning/scripts/task.py +++ b/onboard/catkin_ws/src/task_planning/scripts/task.py @@ -1,52 +1,397 @@ -from task_state import TaskState -import dependency_injector.providers as providers -import smach +from __future__ import annotations -from abc import abstractmethod +from enum import IntEnum +import inspect +import jsonpickle +from typing import Any, Callable, Coroutine, Generator, Generic, Optional, Type, TypeVar, Union +import rospy +from custom_msgs.msg import TaskUpdate -class Task(smach.State): - """High level task that represents some function""" +from message_conversion.jsonpickle_custom_handlers import register_custom_jsonpickle_handlers - task_state_provider = providers.Singleton(TaskState) +# Register all JSONPickle handlers for custom classes +register_custom_jsonpickle_handlers() - def __init__(self, outcomes, input_keys=[], output_keys=[], io_keys=[]): + +class TaskStatus(IntEnum): + """ + An enum to represent the status of a task. All values are equivalent to the constants defined in + custom_msgs/TaskUpdate. + + Attributes: + INITIALIZED: The task has been initialized + PAUSED: The task has yielded and paused execution + RESUMED: The task has been sent a value and resumed execution + THREW: An exception has been sent into and raised in the task and it has resumed execution + RETURNED: The task has completed by returning a value + CLOSED: The task has been closed before returning a value + DELETED: The task has been deleted by the garbage collector before returning a value + ERRORED: The task has raised an exception and has been closed + """ + + INITIALIZED = TaskUpdate.INITIALIZED + PAUSED = TaskUpdate.PAUSED + RESUMED = TaskUpdate.RESUMED + THREW = TaskUpdate.THREW + RETURNED = TaskUpdate.RETURNED + CLOSED = TaskUpdate.CLOSED + DELETED = TaskUpdate.DELETED + ERRORED = TaskUpdate.ERRORED + + +class TaskUpdatePublisher: + """ + A singleton class to publish task updates. + + Attributes: + _instance: The singleton instance of this class. Is a static attribute. + publisher: The publisher for the task_updates topic + """ + + _instance = None + + def __new__(cls): + if cls._instance is None: + cls._instance = super(TaskUpdatePublisher, cls).__new__(cls) + cls._instance.__init__() + return cls._instance + + def __init__(self): + self.publisher = rospy.Publisher("/task_planning/updates", TaskUpdate, queue_size=0) + + def publish_update(self, task_id: int, parent_id: int, name: str, status: TaskStatus, data: Any) -> None: + """ + Publish a message to the task_updates topic. + + Args: + task_id: The id of the task + parent_id: The id of the parent task + name: The name of the task + status: The current status of the task + data: The data to publish with the status, which should be encodable to JSON. + + Raises: + AssertionError: If task_id is not an integer + AssertionError: If parent_id is not an integer + AssertionError: If name is not a string + AssertionError: If the status is not a valid TaskStatus + """ + + # Ensure id, parent_id, and name are of the correct types + assert isinstance(task_id, int), "Task id must be an int" + assert isinstance(parent_id, int), "Parent id must be an int" + assert isinstance(name, str), "Name must be a string" + + # Ensure status is one of the valid statuses + assert status in TaskStatus, "Invalid task status" + + # Options for jsonpickle encoding + jsonpickle_options = { + "unpicklable": True, + "make_refs": False, + "warn": True, + "fail_safe": lambda e: "Failed to encode" # If an object fails to encode, replace it with this string + } + + # Encode the data to JSON + msg_data = "" + try: + msg_data = jsonpickle.encode(data, **jsonpickle_options) + except Exception: + rospy.warn(f"Task with id {task_id} failed to encode data to JSON when publishing {status.name}: {data}") + msg_data = jsonpickle.encode(str(data), **jsonpickle_options) + + # Create message header + header = rospy.Header(stamp=rospy.Time.now()) + + # Publish the message + msg = TaskUpdate(header=header, id=task_id, parent_id=parent_id, name=name, status=status, data=msg_data) + self.publisher.publish(msg) + + def __del__(self): + self.publisher.unregister() + + +YieldType = TypeVar("YieldType") +SendType = TypeVar("SendType") +ReturnType = TypeVar("ReturnType") + + +class Task(Generic[YieldType, SendType, ReturnType]): + """ + A wrapper around a coroutine to publish task updates for every action taken on the coroutine. All functions defined + for native coroutines are also defined by this class, including send, throw, close, and __await__. Also defines + step, which sends `None` to the coroutine. + + Attributes: + MAIN_ID: The id of the main task + id: The id of the task + parent_id: The id of the parent task + done: If the coroutine has returned, closed, or deleted + started: If the coroutine has been sent at least one value + name: The name of the coroutine + initialized: If this object has been properly initialized + args: The positional arguments used to initialize the coroutine + kwargs: The keyword arguments used to initialize the coroutine + """ + + MAIN_ID = 0 + + def __init__(self, coroutine: Callable[..., Coroutine[YieldType, SendType, ReturnType]], *args, + parent: Optional[Union[Task, int]] = None, **kwargs): + """ + Initialize the Task object. + + Args: + coroutine: The native coroutine to wrap + args: The positional arguments to pass to the coroutine + parent: The parent task. Must be a Task or Task.MAIN_ID + kwargs: The keyword arguments to pass to the coroutine + + Raises: + ValueError: If the parent is not a Task or Task.MAIN_ID + AssertionError: If the coroutine is not a native coroutine + """ + + self._initialized = False + self._id = id(self) + self._done = False + self._started = False + + # Ensure parent is a Task or the MAIN_ID + if parent == Task.MAIN_ID: + self.parent = None + self._parent_id = Task.MAIN_ID + elif isinstance(parent, Task): + self.parent = parent + self._parent_id = parent.id + else: + raise ValueError("Task parent must be a Task or MAIN_ID. Instead got " + str(parent)) + + # Ensure coroutine is a native coroutine + assert inspect.iscoroutinefunction(coroutine), "Coroutine must be a native coroutine" + + # Initialize the coroutine + self._coroutine = coroutine(self, *args, **kwargs) + self._name = coroutine.__name__ + self._args = args + self._kwargs = kwargs + + initialized_data = {} + if args: + initialized_data["args"] = args + if kwargs: + initialized_data["kwargs"] = kwargs + self._publish_update(TaskStatus.INITIALIZED, initialized_data) + self._initialized = True + + @property + def id(self): + """ + The id of the task + """ + return self._id + + @property + def parent_id(self): + """ + The id of the parent task + """ + return self._parent_id + + @property + def done(self): + """ + If the coroutine has returned, closed, or deleted + """ + return self._done + + @property + def started(self): + """ + If the coroutine has been sent at least one value """ - Create a Task. + return self._started + @property + def name(self): + """ + The name of the coroutine """ - smach.State.__init__(self, outcomes, input_keys, output_keys, io_keys) + return self._name - self.task_state = self.task_state_provider() - self.start_time = None - self.initial_state = None - self.output = {} + @property + def initialized(self): + """ + If this object has been properly initialized + """ + return self._initialized @property - def state(self): - """Wrap task_state.state with just the state property""" - return self.task_state.state + def args(self): + """ + The positional arguments used to initialize the coroutine + """ + return self._args @property - def cv_data(self): - return self.task_state.cv_data + def kwargs(self): + """ + The keyword arguments used to initialize the coroutine + """ + return self._kwargs + + def _publish_update(self, status: TaskStatus, data: Any) -> None: + """ + Publish a message to the task_updates topic. + + Args: + status: The current status of the task + data: The data to publish with the status, which should be encodable to JSON. + + Raises: + AssertionError: If the status is not a valid TaskStatus + """ + + TaskUpdatePublisher().publish_update(self._id, self._parent_id, self._name, status, data) + + def step(self): + """ + Send a None value to the coroutine + """ + return self.send(None) + + def send(self, value: SendType) -> Union[YieldType, ReturnType]: + """ + Send a value to the coroutine. + + Args: + value: The value to send to the coroutine + + Returns: + The value yielded or returned by the coroutine + + Raises: + Type[BaseException]: If the coroutine raises an exception + """ + + self._started = True + try: + self._publish_update(TaskStatus.RESUMED, value) + ret_val = self._coroutine.send(value) + self._publish_update(TaskStatus.PAUSED, ret_val) + return ret_val + except StopIteration as e: + self._done = True + self._publish_update(TaskStatus.RETURNED, e.value) + return e.value + except BaseException as e: + self._done = True + self._publish_update(TaskStatus.ERRORED, e) + raise e + + def throw(self, error: Type[BaseException]) -> Union[YieldType, ReturnType]: + """ + Raise an exception in the coroutine. + + Args: + error: The exception to raise in the coroutine + + Returns: + The value yielded or returned by the coroutine + + Raises: + Type[BaseException]: If the coroutine raises an exception + """ + + self._started = True + try: + self._publish_update(TaskStatus.THREW, error) + ret_val = self._coroutine.throw(error) + self._publish_update(TaskStatus.PAUSED, ret_val) + return ret_val + except StopIteration as e: + self._done = True + self._publish_update(TaskStatus.RETURNED, e.value) + return e.value + except BaseException as e: + self._done = True + self._publish_update(TaskStatus.ERRORED, e) + raise e + + def close(self) -> None: + """ + Close the coroutine. + """ + + if not self._done: + try: + self._publish_update(TaskStatus.CLOSED, None) + self._coroutine.close() + except BaseException as e: + self._publish_update(TaskStatus.ERRORED, e) + raise e + finally: + self._done = True + + def __del__(self) -> None: + """ + Close the coroutine when the object is deleted. + """ + + if self._initialized and not self._done: + try: + self._publish_update(TaskStatus.DELETED, None) + self._coroutine.close() + except BaseException as e: + self._publish_update(TaskStatus.ERRORED, e) + raise e + finally: + self._done = True + + def __await__(self) -> Generator[YieldType, SendType, ReturnType]: + """ + Allow this object to be used in an await expression. Runs the coroutine from where it last left off. + + Yields: + The value yielded by the coroutine + + Returns: + The value returned by the coroutine + + Raises: + Type[BaseException]: If the coroutine raises an exception + """ + + input = None + output = None + while not self._done: + # Yield output and accept input only if the coroutine has been started + if self._started: + input = (yield output) + output = self.send(input) + return output + - @abstractmethod - def run(self, userdata): - """Try to complete the task +def task(func: Callable[..., Coroutine[YieldType, SendType, ReturnType]]) -> \ + Callable[..., Task[YieldType, SendType, ReturnType]]: + """ + A decorator to wrap a coroutine within Task. + """ - Override this method for a particular task.""" - pass + def wrapper(*args, **kwargs): + return Task(func, *args, **kwargs) + return wrapper - def execute(self, userdata): - self.initial_state = self.state - return self.run(userdata) - def publish_desired_pose_global(self, pose): - self.task_state.desired_pose_global_publisher.publish(pose) +class Yield(Generic[YieldType, SendType]): + """ + A class to allow coroutines to yield and accept input. + """ - def publish_desired_power(self, twist_power): - self.task_state.desired_twist_power_publisher.publish(twist_power) + def __init__(self, value: YieldType = None): + self.value = value - def publish_desired_twist(self, twist_velocity): - self.task_state.desired_twist_velocity_publisher.publish(twist_velocity) + def __await__(self) -> Generator[YieldType, SendType, SendType]: + return (yield self.value) diff --git a/onboard/catkin_ws/src/task_planning/scripts/task_runner.py b/onboard/catkin_ws/src/task_planning/scripts/task_runner.py index 1fe3f2f9a..5c2d87c50 100755 --- a/onboard/catkin_ws/src/task_planning/scripts/task_runner.py +++ b/onboard/catkin_ws/src/task_planning/scripts/task_runner.py @@ -1,71 +1,123 @@ #!/usr/bin/env python3 import rospy -import smach -import tf -# from smach_test import controls_testing -from interface.controls import ControlsInterface -from buoy_task import BuoyTask -from interface.cv import CVInterface -# from move_tasks import MoveToPoseGlobalTask -import random - - -class TaskRunner(smach.StateMachine): - RATE = 30 # Hz - - def __init__(self): - super(TaskRunner, self).__init__(outcomes=['done']) - rospy.init_node("task_planning") - self.listener = tf.TransformListener() - self.controls = ControlsInterface(self.listener) - self.cv = CVInterface() - - self.x, self.y = random.randint(-10, 10), random.randint(-10, 10) - - with self: - smach.StateMachine.add('TEST', BuoyTask(self.listener, self.controls, self.cv), - transitions={'done': 'done'}) - # smach.StateMachine.add('TEST', MoveToPoseGlobalTask(self.x, self.y, 0, 0, 0, 0, self.controls), - # transitions={'done': 'done', 'continue': 'TEST'}) - # smach.StateMachine.add('TEST', controls_testing(self.controls, self.listener), - # transitions={'done': 'done'}) - - def execute(self): - rospy.loginfo("Waiting for transform listener") - self.listener.waitForTransform('odom', 'base_link', rospy.Time(), rospy.Duration(15)) - super(TaskRunner, self).execute() +import tf2_ros +import math +from tqdm import tqdm + +from task import Task, TaskStatus, TaskUpdatePublisher +from interface.controls import Controls +from interface.state import State +from interface.cv import CV +import comp_tasks def main(): + + main_initialized = False + rospy.init_node("task_planning") + bypass = rospy.get_param("~bypass") + untethered = rospy.get_param("~untethered") + + # When rospy is shutdown, if main finished initializing, publish that it has closed + def publish_close(): + if main_initialized: + TaskUpdatePublisher().publish_update(Task.MAIN_ID, Task.MAIN_ID, "main", TaskStatus.CLOSED, None) + + rospy.on_shutdown(publish_close) + + # Initialize transform buffer and listener + tfBuffer = tf2_ros.Buffer() + _ = tf2_ros.TransformListener(tfBuffer) + + # Initialize interfaces + Controls(bypass) + state = State(bypass, tfBuffer) + CV(bypass) + + # Initialize the task update publisher + TaskUpdatePublisher() + + # Wait one second for all publishers and subscribers to start + rospy.sleep(1) + + # Ensure transform from odom to base_link is available try: - TaskRunner().execute() - except rospy.ROSInterruptException: + _ = tfBuffer.lookup_transform('odom', 'base_link', rospy.Time(), rospy.Duration(15)) + except (tf2_ros.LookupException, tf2_ros.ConnectivityException, tf2_ros.ExtrapolationException): + rospy.logerr("Failed to get transform") + return + + # Ensure state is available + while not state.state: pass - # from geometry_msgs.msg import Pose, Quaternion, Point - # from tf.transformations import quaternion_from_euler - # from custom_msgs.msg import ControlsDesiredPoseAction, ControlsDesiredPoseGoal - # import actionlib - - # DESIRED_POSE_ACTION = 'controls/desired_pose' - # DESIRED_TWIST_ACTION = 'controls/desired_twist' - - # desired_pose_client = actionlib.SimpleActionClient( - # DESIRED_POSE_ACTION, ControlsDesiredPoseAction) - - # x, y, z = 5, 0, 0 - # roll, pitch, yaw = 0, 0, 0 - - # desired_pose = Pose() - # desired_pose.position = Point(x=x, y=y, z=z) - # desired_pose.orientation = Quaternion( - # * - # quaternion_from_euler( - # roll, - # pitch, - # yaw)) - - # desired_pose_client.send_goal(ControlsDesiredPoseGoal(pose=desired_pose)) + + # Main has initialized + TaskUpdatePublisher().publish_update(Task.MAIN_ID, Task.MAIN_ID, "main", TaskStatus.INITIALIZED, None) + main_initialized = True + + # Run tasks + try: + # Tasks to run + tasks = [ + comp_tasks.initial_submerge(-0.7, parent=Task.MAIN_ID), + comp_tasks.coin_flip(parent=Task.MAIN_ID), + comp_tasks.yaw_to_cv_object('gate_red_cw', direction=1, yaw_threshold=math.radians(10), + latency_threshold=1, depth_level=0.7, parent=Task.MAIN_ID), + comp_tasks.gate_task(offset=-0.1, direction=-1, parent=Task.MAIN_ID), + comp_tasks.gate_style_task(depth_level=0.9, parent=Task.MAIN_ID), + comp_tasks.yaw_to_cv_object('buoy', direction=1, depth_level=0.7, parent=Task.MAIN_ID), + comp_tasks.buoy_task(turn_to_face_buoy=False, depth=0.7, parent=Task.MAIN_ID), + comp_tasks.after_buoy_task(parent=Task.MAIN_ID) + + + # comp_tasks.align_path_marker(direction=-1, parent=Task.MAIN_ID), + # comp_tasks.center_path_marker(parent=Task.MAIN_ID), + # comp_tasks.yaw_to_cv_object('path_marker', yaw_threshold=math.radians(5), direction=-1, depth_level=0.5, + # parent=Task.MAIN_ID), + # comp_tasks.dead_reckoning_path_marker_to_bin(maximum_distance=4, parent=Task.MAIN_ID), + # comp_tasks.path_marker_to_pink_bin(maximum_distance=6, parent=Task.MAIN_ID), + # comp_tasks.buoy_to_octagon(direction=1, move_forward=0, parent=Task.MAIN_ID), + # comp_tasks.let_search_for_bin_turtlesim_style_because_why_not(parent=Task.MAIN_ID), + # comp_tasks.bin_task(parent=Task.MAIN_ID), + # comp_tasks.yaw_to_cv_object('bin_pink_front', direction=-1, yaw_threshold=math.radians(15), + # depth_level=1.0, parent=Task.MAIN_ID), + # comp_tasks.octagon_task(direction=1, parent=Task.MAIN_ID), + ] + input("Press enter to run tasks...") + + if untethered: + rospy.loginfo("Countdown started...") + for i in tqdm(range(10, 0, -1)): + rospy.sleep(1) + if rospy.is_shutdown(): + break + Controls().call_enable_controls(True) + + rospy.loginfo("Running tasks.") + + # Step through tasks, stopping if rospy is shutdown + rate = rospy.Rate(30) + for t in tasks: + while not t.done and not rospy.is_shutdown(): + t.step() + rate.sleep() + if rospy.is_shutdown(): + break + + if untethered: + Controls().call_enable_controls(False) + + except BaseException as e: + + # Main has errored + TaskUpdatePublisher().publish_update(Task.MAIN_ID, Task.MAIN_ID, "main", TaskStatus.ERRORED, e) + raise + + else: + + # Main has returned + TaskUpdatePublisher().publish_update(Task.MAIN_ID, Task.MAIN_ID, "main", TaskStatus.RETURNED, None) if __name__ == '__main__': diff --git a/onboard/catkin_ws/src/task_planning/scripts/task_state.py b/onboard/catkin_ws/src/task_planning/scripts/task_state.py deleted file mode 100644 index ad2c96c1f..000000000 --- a/onboard/catkin_ws/src/task_planning/scripts/task_state.py +++ /dev/null @@ -1,42 +0,0 @@ -import rospy - -from nav_msgs.msg import Odometry -from geometry_msgs.msg import Pose, Twist -from custom_msgs.msg import CVObject - - -class TaskState: - STATE_TOPIC = '/state' - DESIRED_POSE_TOPIC = 'controls/desired_pose' - DESIRED_TWIST_POWER_TOPIC = 'controls/desired_power' - DESIRED_TWIST_VELOCITY_TOPIC = 'controls/desired_twist' - CV_DATA_TOPICS = ["/cv/simulation/bin/left", - "/cv/simulation/bootleggerbuoy/left", - "/cv/simulation/gate/left", - "/cv/simulation/gateleftchild/left", - "/cv/simulation/gaterightchild/left", - "/cv/simulation/gmanbuoy/left", - "/cv/simulation/octagon/left", - "/cv/simulation/pole/left", - "/cv/simulation/straightpathmarker/left"] - - def __init__(self): - self.state_listener = rospy.Subscriber(self.STATE_TOPIC, Odometry, self._on_receive_state) - self.desired_pose_global_publisher = rospy.Publisher(self.DESIRED_POSE_TOPIC, Pose, queue_size=5) - self.desired_twist_power_publisher = rospy.Publisher(self.DESIRED_TWIST_POWER_TOPIC, Twist, queue_size=5) - self.desired_twist_velocity_publisher = rospy.Publisher(self.DESIRED_TWIST_VELOCITY_TOPIC, Twist, queue_size=5) - self.state = None - - self.cv_data = {} - for topic in self.CV_DATA_TOPICS: - name = topic.replace("/left", "").split("/")[-1] - self.cv_data[name] = None - rospy.Subscriber(topic, CVObject, self._on_receive_cv_data, name) - - def _on_receive_state(self, state): - """Receive the state, update initial_state if it is empty - and the task is running""" - self.state = state - - def _on_receive_cv_data(self, cv_data, object_type): - self.cv_data[object_type] = cv_data diff --git a/onboard/catkin_ws/src/task_planning/scripts/task_utils.py b/onboard/catkin_ws/src/task_planning/scripts/task_utils.py deleted file mode 100644 index 87cd3e74c..000000000 --- a/onboard/catkin_ws/src/task_planning/scripts/task_utils.py +++ /dev/null @@ -1,279 +0,0 @@ -import numpy as np -import rospy -import smach -import tf2_geometry_msgs -import tf2_ros -from geometry_msgs.msg import Vector3, Pose, PoseStamped, PoseWithCovariance, \ - Twist, TwistStamped, TwistWithCovariance, Point, Quaternion, PointStamped -from nav_msgs.msg import Odometry -from tf.transformations import euler_from_quaternion, quaternion_multiply -from std_msgs.msg import Header - - -def linear_distance(point1, point2): - """Find linear distance between two points. - - Parameters: - point1 (Point): location 1 - point2 (Point): location 2 - - Returns: - float: magnitude of the linear distance between the points - """ - - vector1 = np.array([point1.x, point1.y, point1.z]) - vector2 = np.array([point2.x, point2.y, point2.z]) - distance = np.linalg.norm(vector2 - vector1) - return distance - - -def angular_distance_quat(quat1, quat2): - """Find the difference between two orientations (quaternions). - - Parameters: - quat1 (Quaternion): orientation 1 - quat2 (Quaternion): orientation 2 - - Returns: - Vector3: magnitude of the two orientations' differences in each axis (roll, pitch, yaw), in radians - """ - rpy1 = euler_from_quaternion([quat1.x, quat1.y, quat1.z, quat1.w]) - rpy2 = euler_from_quaternion([quat2.x, quat2.y, quat2.z, quat2.w]) - return angular_distance_rpy(rpy1, rpy2) - - -def angular_distance_rpy(rpy1, rpy2): - """Find the difference between two orientations (roll-pitch-yaw). - - Parameters: - rpy1 (list): orientation 1 (roll, pitch, yaw), in radians - rpy2 (list): orientation 2 (roll, pitch, yaw), in radians - - Returns: - Vector3: magnitude of the two orientations' differences in each axis - """ - roll = np.fabs(rpy1[0] - rpy2[0]) - pitch = np.fabs(rpy1[1] - rpy2[1]) - yaw = np.fabs(rpy1[2] - rpy2[2]) - return Vector3(roll, pitch, yaw) - - -def at_pose(current_pose, desired_pose, linear_tol=0.1, angular_tol=3): - """Check if within tolerance of a pose (position and orientation). - - Parameters: - current_pose (Pose): current pose - desired_pose (Pose): pose to check with - linear_tol (float): allowable linear distance for robot to be considered at pose - angular_tol (float): allowable angular tolerance (radians) for robot to be considered at pose, applied to all axes - - Returns: - Boolean: true if current_pose is within tolerances of desired_pose - """ - linear = linear_distance(current_pose.position, desired_pose.position) < linear_tol - angular_dist = angular_distance_quat(current_pose.orientation, desired_pose.orientation) - angular = np.all(np.array([angular_dist.x, angular_dist.y, angular_dist.z]) < (np.ones((3)) * angular_tol)) - return linear and angular - - -def at_vel(current_twist, desired_twist, linear_tol=0.1, angular_tol=0.3): - """Check if within tolerance of a twist (linear and angular velocity) - - Parameters: - current_twist (Twist): current twist - desired_twist (Twist): twist to check with - linear_tol (float): allowable linear velocity for robot to be considered at twist - angular_tol (float): allowable angular velocity for robot to be considered at twist, applied to all axes - - Returns: - Boolean: true if current_twist is within tolerances of desired_twist - """ - - lin_curr_vel = np.linalg.norm([current_twist.linear.x, current_twist.linear.y, current_twist.linear.z]) - lin_des_vel = np.linalg.norm([desired_twist.linear.x, desired_twist.linear.y, desired_twist.linear.z]) - linear = np.fabs(lin_curr_vel - lin_des_vel) < linear_tol - - ang_curr_vel = np.linalg.norm([current_twist.angular.x, current_twist.angular.y, current_twist.angular.z]) - ang_des_vel = np.linalg.norm([desired_twist.angular.x, desired_twist.angular.y, desired_twist.angular.z]) - angular = np.fabs(ang_curr_vel - ang_des_vel) < angular_tol - - return linear and angular - - -def stopped_at_pose(current_pose, desired_pose, current_twist): - """Check if within tolerance of a pose (position and orientation) and current_twist = 0 - - Parameters: - current_pose (Pose): current pose - desired_pose (Pose): pose to check with - current_twist (Twist): current twist - - Returns: - Boolean: true if stopped (current_twist = 0) at desired_pose - """ - # FIXME revert hackfix - current_pose.position.z = 0 - desired_pose.position.z = 0 - at_desired_pose = at_pose(current_pose, desired_pose, 0.2, 12) - at_desired_vel = at_vel(current_twist, Twist(), 0.6, 6) - - # print("At Pose:", at_desired_pose, " At Vel:", at_desired_vel) - - return at_desired_pose and at_desired_vel - - -def transform_pose(listener, base_frame, target_frame, pose): - pose_stamped = PoseStamped() - pose_stamped.pose = pose - pose_stamped.header.frame_id = base_frame - - return listener.transformPose(target_frame, pose_stamped).pose - - -def transform(origin_frame, dest_frame, poseORodom): - """Transforms poseORodom from origin_frame to dest_frame frame - - Arguments: - origin_frame: the starting frame - dest_frame: the frame to trasform to - poseORodom: the Pose, PoseStamped, Odometry, or message to transform - - Returns: - Pose, PoseStamped, Odometry : The transformed position - """ - - tfBuffer = tf2_ros.Buffer() - trans = tfBuffer.lookup_transform(dest_frame, origin_frame, rospy.Time(), rospy.Duration(0.5)) - - if isinstance(poseORodom, PoseStamped): - transformed = tf2_geometry_msgs.do_transform_pose(poseORodom, trans) - return transformed - - elif isinstance(poseORodom, Pose): - temp_pose_stamped = PoseStamped() - temp_pose_stamped.pose = poseORodom - transformed = tf2_geometry_msgs.do_transform_pose(temp_pose_stamped, trans) - return transformed.pose - - elif isinstance(poseORodom, Odometry): - temp_pose_stamped = PoseStamped() - temp_twist_stamped = TwistStamped() - temp_pose_stamped.pose = poseORodom.pose.pose - temp_twist_stamped.twist = poseORodom.twist.twist - transformed_pose_stamped = tf2_geometry_msgs.do_transform_pose(temp_pose_stamped, trans) - - # twist as points - twist_poseORodom = poseORodom.twist.twist - temp_twist_point_linear = Point() - temp_twist_point_angular = Point() - temp_twist_point_linear.x = twist_poseORodom.linear.x - temp_twist_point_linear.y = twist_poseORodom.linear.y - temp_twist_point_linear.z = twist_poseORodom.linear.z - temp_twist_point_angular.x = twist_poseORodom.angular.x - temp_twist_point_angular.y = twist_poseORodom.angular.y - temp_twist_point_angular.z = twist_poseORodom.angular.z - twist_point_linear_stamped = PointStamped(point=temp_twist_point_linear) - twist_point_angular_stamped = PointStamped(point=temp_twist_point_angular) - - # points transformed - transformed_twist_point_linear_stamped = tf2_geometry_msgs.do_transform_point( - twist_point_linear_stamped, trans) - transformed_twist_point_angular_stamped = tf2_geometry_msgs.do_transform_point( - twist_point_angular_stamped, trans) - - # points to twist - transformed_twist = Twist() - transformed_twist.linear.x = transformed_twist_point_linear_stamped.point.x - transformed_twist.linear.y = transformed_twist_point_linear_stamped.point.y - transformed_twist.linear.z = transformed_twist_point_linear_stamped.point.z - transformed_twist.angular.x = transformed_twist_point_angular_stamped.point.x - transformed_twist.angular.y = transformed_twist_point_angular_stamped.point.y - transformed_twist.angular.z = transformed_twist_point_angular_stamped.point.z - - transformed_odometry = Odometry(header=Header(frame_id=dest_frame), child_frame_id=dest_frame, - pose=PoseWithCovariance(pose=transformed_pose_stamped.pose), - twist=TwistWithCovariance(twist=transformed_twist)) - return transformed_odometry - - else: - rospy.logerr("Invalid message type passed to transform()") - return None - - -def add_poses(pose_list): - """Adds a list of poses - - Arguments: - pose_list: list of poses to add - - Returns: - Pose: the sum of the Poses - """ - - p_sum = Point(0, 0, 0) - q_sum = [0, 0, 0, 1] - - for pose in pose_list: - p_sum.x += pose.position.x - p_sum.y += pose.position.y - p_sum.z += pose.position.z - - q_sum = quaternion_multiply( - [pose.orientation.x, pose.orientation.y, pose.orientation.z, pose.orientation.w], q_sum) - - return Pose(p_sum, Quaternion(*q_sum)) - - -def parse_pose(pose): - pose_dict = {'x': pose.position.x, 'y': pose.position.y, 'z': pose.position.z} - pose_dict['roll'], pose_dict['pitch'], pose_dict['yaw'] = euler_from_quaternion( - [pose.orientation.x, - pose.orientation.y, - pose.orientation.z, - pose.orientation.w]) - return pose_dict - - -def object_vector(cv_obj_data): - print(cv_obj_data) - if not cv_obj_data or cv_obj_data.label == 'none': - return None - - return [cv_obj_data.x, cv_obj_data.y, cv_obj_data.z] - - -def cv_object_position(cv_obj_data): - if not cv_obj_data or cv_obj_data.label == 'none': - return None - return [cv_obj_data.x, cv_obj_data.y, cv_obj_data.z] - - -class PointToPoseTask(smach.State): - """ - Converts a point to a pose with the current orientation of the robot (optionally multiplied by a quaternion) - """ - def __init__(self, controls, rotation=[0, 0, 0, 1]): - super().__init__(outcomes=["done"], input_keys=["point"], output_keys=["pose"]) - self.rotation = rotation - self.controls = controls - - def execute(self, userdata): - pose = Pose() - pose.position = userdata.point - quat = self.controls.get_state().pose.pose.orientation - pose.orientation = Quaternion(*quaternion_multiply( - [quat.x, quat.y, quat.z, quat.w], self.rotation)) - userdata.pose = pose - return "done" - - -class LambdaTask(smach.State): - """ - A task to wrap a function as a simple task - """ - def __init__(self, func, outcomes, input_keys=[], output_keys=[]): - super().__init__(outcomes=outcomes, input_keys=input_keys, output_keys=output_keys) - self.func = func - - def execute(self, userdata): - return self.func(userdata) diff --git a/onboard/catkin_ws/src/task_planning/scripts/test_move.py b/onboard/catkin_ws/src/task_planning/scripts/test_move.py deleted file mode 100644 index de1b10ad8..000000000 --- a/onboard/catkin_ws/src/task_planning/scripts/test_move.py +++ /dev/null @@ -1,7 +0,0 @@ -import random -import move_tasks - - -def main(): - x, y = random.randint(-10, 10), random.randint(-10, 10) - move_tasks.MoveToPoseGlobalTask(x, y, 0, 0, 0, 0, None, None).execute(None) diff --git a/onboard/catkin_ws/src/task_planning/scripts/test_move_2.py b/onboard/catkin_ws/src/task_planning/scripts/test_move_2.py deleted file mode 100644 index ac5570399..000000000 --- a/onboard/catkin_ws/src/task_planning/scripts/test_move_2.py +++ /dev/null @@ -1,20 +0,0 @@ -import rospy -from custom_msgs.msg import CVObject -from move_tasks import MoveToPoseGlobalTask - - -class TestMoveSubscriber: - def __init__(self): - # gate, gate_side, gate_tick, gate_top, start_gate, buoy_bootlegger, buoy_gman - path = 'gate' - self.sub = rospy.Subscriber(f"/move_base/result/{path}", CVObject, self.callback) - - def callback(self, obj: CVObject): - MoveToPoseGlobalTask(*self.imlazy(obj.pose.position, 'x', 'y', 'z'), 0, 0, 0).execute() - - def imlazy(obj, *args): - '''Returns a list of attributes of an object.''' - items = [] - for arg in args: - items.append(getattr(obj, arg)) - return obj diff --git a/onboard/catkin_ws/src/task_planning/scripts/utils/coroutine_utils.py b/onboard/catkin_ws/src/task_planning/scripts/utils/coroutine_utils.py new file mode 100644 index 000000000..334bc9cb9 --- /dev/null +++ b/onboard/catkin_ws/src/task_planning/scripts/utils/coroutine_utils.py @@ -0,0 +1,79 @@ +import rospy +from typing import Callable, Coroutine, Optional, TypeVar + +from task import Task, Yield + + +SendType = TypeVar('SendType') +TransformedSendType = TypeVar('TransformedSendType') +YieldType = TypeVar('YieldType') +TransformedYieldType = TypeVar('TransformedYieldType') +ReturnType = TypeVar('ReturnType') +TransformedReturnType = TypeVar('TransformedReturnType') + + +async def transform(task: Task[YieldType, TransformedSendType, ReturnType], + send_transformer: Optional[Callable[[SendType], TransformedSendType]] = None, + yield_transformer: Optional[Callable[[YieldType], TransformedYieldType]] = None, + return_transformer: Optional[Callable[[ReturnType], TransformedReturnType]] = None) -> \ + Coroutine[TransformedYieldType, SendType, TransformedReturnType]: + """ + Transform the input and output of a task. + + Args: + task: The task to transform + send_transformer: A function to transform the values sent into the task. If None, the values are not transformed + (SendType and TransformedSendType are the same). + yield_transformer: A function to transform the values yielded by the task. If None, the values are not + transformed (YieldType and TransformedYieldType are the same). + return_transformer: A function to transform the value returned by the task. If None, the value is not + transformed (ReturnType and TransformedReturnType are the same). + + Yields: + The transformed yielded value of the task + + Returns: + The transformed returned value of the task + """ + + input = None + output = None + while not task.done: + # Send non-None input only if the task has been started + if task.started: + + # Yield transformed output and accept input + input = await Yield(output) + + # Transform the input + if send_transformer: + input = send_transformer(input) + + # Send transformed input + output = task.send(input) + + # If the task is done, output is the return value, so don't transform it with yield_transformer + # Instead, break the loop and transform the return value + if task.done: + break + + # Transform the yielded value + if yield_transformer: + output = yield_transformer(output) + + # Transform the return value + if return_transformer: + output = return_transformer(output) + + return output + + +async def sleep(secs: float): + """ + Sleep for a given number of seconds. Yields frequently, then returns when the time has elapsed. + """ + + duration = rospy.Duration(secs) + start_time = rospy.Time.now() + while start_time + duration > rospy.Time.now(): + await Yield() diff --git a/onboard/catkin_ws/src/task_planning/scripts/utils/geometry_utils.py b/onboard/catkin_ws/src/task_planning/scripts/utils/geometry_utils.py new file mode 100644 index 000000000..5184486cd --- /dev/null +++ b/onboard/catkin_ws/src/task_planning/scripts/utils/geometry_utils.py @@ -0,0 +1,304 @@ +import numpy as np +from typing import List, Tuple + +import rospy +from geometry_msgs.msg import Vector3, Pose, PoseStamped, \ + Twist, Point, Quaternion +import tf2_geometry_msgs +import tf2_ros + +from transforms3d.euler import euler2quat, quat2euler +from transforms3d.quaternions import qmult + + +def vector3_to_numpy(vector: Vector3) -> np.ndarray: + """ + Convert a Vector3 to a numpy array. + + Args: + vector: The Vector3 to convert. + + Returns: + The numpy array. The order of the elements is [x, y, z]. + """ + return np.array([vector.x, vector.y, vector.z]) + + +def point_to_numpy(point: Point) -> np.ndarray: + """ + Convert a Point to a numpy array. + + Args: + point: The Point to convert. + + Returns: + The numpy array. The order of the elements is [x, y, z]. + """ + return np.array([point.x, point.y, point.z]) + + +def transforms3d_quat_to_geometry_quat(quat: np.ndarray) -> Quaternion: + """ + Convert a quaternion from the transforms3d library to a geometry_msgs/Quaternion. + + Args: + quat: The transforms3d quaternion to convert. + + Returns: + The converted geometry_msgs/Quaternion. + """ + return Quaternion(quat[1], quat[2], quat[3], quat[0]) + + +def geometry_quat_to_transforms3d_quat(quat: Quaternion) -> np.ndarray: + """ + Convert a quaternion from a geometry_msgs/Quaternion to the format used by the transforms3d library. + + Args: + quat: The geometry_msgs/Quaternion to convert. + + Returns: + The converted quaternion in the format used by the transforms3d library. + """ + return np.array([quat.w, quat.x, quat.y, quat.z]) + + +def point_linear_distance(point1: Point, point2: Point) -> float: + """ + Find the linear distance between two points in 3D space. + + Args: + point1: The first point. + point2: The second point. + + Returns: + The linear (Euclidean) distance between point1 and point2. + """ + return np.linalg.norm(point_to_numpy(point1) - point_to_numpy(point2)) + + +def vector3_linear_distance(vector1: Vector3, vector2: Vector3) -> float: + """ + Find the linear distance between two vectors in 3D space. + + Args: + vector1: The first vector. + vector2: The second vector. + + Returns: + The linear (Euclidean) distance between vector1 and vector2. + """ + return np.linalg.norm(vector3_to_numpy(vector1) - vector3_to_numpy(vector2)) + + +def angular_distance_quat(quat1: Quaternion, quat2: Quaternion) -> Vector3: + """ + Find the difference between two orientations (quaternions). + + Args: + quat1: The first orientation. + quat2: The second orientation. + + Returns: + The magnitude of the two orientations' differences in each axis. + """ + rpy1 = quat2euler(geometry_quat_to_transforms3d_quat(quat1)) + rpy2 = quat2euler(geometry_quat_to_transforms3d_quat(quat2)) + return angular_distance_rpy(rpy1, rpy2) + + +def angular_distance_rpy(rpy1: Tuple[float, float, float], rpy2: Tuple[float, float, float]) -> Vector3: + """ + Find the difference between two orientations, provided as (roll, pitch, yaw). All arguments must be in radians, + or they all must be in degrees. The return value will be in the same units as the input. + + Args: + rpy1: The first orientation with three values, in order: roll, pitch, yaw. + rpy2: The second orientation with three values, in order: roll, pitch, yaw. + + Returns: + The magnitude of the differences between the two orientations in each axis, in the same units as the input. + """ + roll = np.fabs(rpy1[0] - rpy2[0]) + pitch = np.fabs(rpy1[1] - rpy2[1]) + yaw = np.fabs(rpy1[2] - rpy2[2]) + return Vector3(roll, pitch, yaw) + + +def at_pose(current_pose: Pose, desired_pose: Pose, linear_tol: float = 0.15, roll_tol: float = 0.2, + pitch_tol: float = 0.3, yaw_tol: float = 0.10) -> bool: + """ + Check if current pose is within tolerance of a desired pose (position and orientation). + + Args: + current_pose: The current pose. + desired_pose: The desired pose. + linear_tol: The allowable linear distance in meters for the robot to be considered at the desired pose. + angular_tol: The allowable angular distance in radians for each axis for the robot to be considered at the + desired pose. + + Returns: + True if the current pose is within the tolerances of the desired pose. + """ + + linear = point_linear_distance(current_pose.position, desired_pose.position) < linear_tol + angular_dist = angular_distance_quat(current_pose.orientation, desired_pose.orientation) + angular = np.all(vector3_to_numpy(angular_dist) < np.array([roll_tol, pitch_tol, yaw_tol])) + return linear and angular + + +def at_vel(current_twist: Twist, desired_twist: Twist, linear_tol: float = 0.02, angular_tol: float = 0.02) -> bool: + """ + Check if current twist within tolerance of a desired twist (linear and angular velocities). + + Args: + current_twist: The current twist. + desired_twist: The desired twist. + linear_tol: The allowable distance between the current and desired linear velocity vectors in m/s for the robot + to be considered at the desired velocity. + angular_tol: The allowable difference between the current and desired angular velocity vectors in rad/s for the + robot to be considered at the desired velocity. + + Returns: + True if the current twist is within the tolerances of the desired twist. + """ + + linear = vector3_linear_distance(current_twist.linear, desired_twist.linear) < linear_tol + angular = vector3_linear_distance(current_twist.angular, desired_twist.angular) < angular_tol + return linear and angular + + +def stopped_at_pose(current_pose: Pose, desired_pose: Pose, current_twist: Twist) -> bool: + """ + Check if the robot is at the desired pose and has stopped moving. + + Args: + current_pose: The current pose. + desired_pose: The desired pose. + current_twist: The current twist. + + Returns: + True if the robot is at the desired pose and has stopped moving, within the default tolerances. + """ + + at_desired_pose = at_pose(current_pose, desired_pose) + at_desired_vel = at_vel(current_twist, Twist()) + + return at_desired_pose and at_desired_vel + + +def transform_pose(tfBuffer: tf2_ros.Buffer, base_frame: str, target_frame: str, pose: Pose) -> Pose: + """ + Transform a pose from one frame to another. Uses the latest transform available at the time of the call. + + Args: + tfBuffer: The transform buffer. + base_frame: The frame of the input pose. + target_frame: The frame to transform the pose to. + pose: The pose to transform. + + Returns: + The transformed pose. + """ + + pose_stamped = PoseStamped() + pose_stamped.pose = pose + pose_stamped.header.frame_id = base_frame + + trans = tfBuffer.lookup_transform(target_frame, base_frame, rospy.Time(0)) + transformed = tf2_geometry_msgs.do_transform_pose(pose_stamped, trans) + + return transformed.pose + + +def add_poses(pose_list: List[Pose]) -> Pose: + """ + Add a list of poses together. Sums the positions and multiplies the orientations. + + Args: + pose_list: A list of poses to add together. + + Returns: + The sum of the poses. + """ + + p_sum = Point(0, 0, 0) + q_sum = np.array([1, 0, 0, 0]) + + for pose in pose_list: + p_sum.x += pose.position.x + p_sum.y += pose.position.y + p_sum.z += pose.position.z + q_sum = qmult(geometry_quat_to_transforms3d_quat(pose.orientation), q_sum) + + return Pose(p_sum, transforms3d_quat_to_geometry_quat(q_sum)) + + +def parse_pose(pose: Pose) -> dict: + """ + Convert a Pose message to a dictionary. + + Args: + pose: The Pose message to convert. + + Returns: + A dictionary with the position and orientation of the pose. It has the keys 'x', 'y', 'z', 'roll', 'pitch', and + 'yaw'. Roll, pitch, and yaw are in radians. + """ + + pose_dict = {'x': pose.position.x, 'y': pose.position.y, 'z': pose.position.z} + pose_dict['roll'], pose_dict['pitch'], pose_dict['yaw'] = quat2euler( + geometry_quat_to_transforms3d_quat(pose.orientation)) + return pose_dict + + +def create_pose(x: float, y: float, z: float, roll: float, pitch: float, yaw: float) -> Pose: + """ + Create a Pose message from position and orientation values. + + Args: + x: The x position. + y: The y position. + z: The z position. + roll: The roll orientation in radians. + pitch: The pitch orientation in radians. + yaw: The yaw orientation in radians. + + Returns: + The Pose message. + """ + + pose = Pose() + pose.position = Point(x=x, y=y, z=z) + pose.orientation = transforms3d_quat_to_geometry_quat(euler2quat(roll, pitch, yaw)) + return pose + + +def local_pose_to_global(tfBuffer: tf2_ros.Buffer, pose: Pose) -> Pose: + """ + Convert a pose from local coordinates to global coordinates. + + Args: + tfBuffer: The transform buffer. + pose: The pose to convert, in "base_link" frame. + + Returns: + The global pose in "odom" frame. + """ + + return transform_pose(tfBuffer, 'base_link', 'odom', pose) + + +def global_pose_to_local(tfBuffer: tf2_ros.Buffer, pose: Pose) -> Pose: + """ + Convert a pose from global coordinates to local coordinates. + + Args: + tfBuffer: The transform buffer. + pose: The pose to convert, in "odom" frame. + + Returns: + The local pose in "base_link" frame. + """ + + return transform_pose(tfBuffer, 'odom', 'base_link', pose) diff --git a/onboard/catkin_ws/src/task_planning/scripts/utils/other_utils.py b/onboard/catkin_ws/src/task_planning/scripts/utils/other_utils.py new file mode 100644 index 000000000..c9af4fbe1 --- /dev/null +++ b/onboard/catkin_ws/src/task_planning/scripts/utils/other_utils.py @@ -0,0 +1,8 @@ +def singleton(cls: type): + instances = {} + + def getinstance(*args, **kwargs): + if cls not in instances: + instances[cls] = cls(*args, **kwargs) + return instances[cls] + return getinstance diff --git a/scripts/comp.sh b/scripts/comp.sh new file mode 100755 index 000000000..e7271cef8 --- /dev/null +++ b/scripts/comp.sh @@ -0,0 +1,67 @@ +#!/bin/bash + +# A tmux script that opens all the necessary terminals to control our robot. Uses untethered:=True in task_runner.launch. +# Usage: ./comp.sh + +session="pool-test" +tmux new-session -d -s $session + +tmux split-window -h +tmux split-window -h +tmux split-window -h +tmux select-layout even-horizontal + +tmux select-pane -t 0 +tmux split-window -v -l 50 + +tmux select-pane -t 2 +tmux split-window -v -l 50 + +tmux select-pane -t 4 +tmux split-window -v -l 50 + +tmux select-pane -t 6 +tmux split-window -v -l 50 + +tmux select-pane -t 0 +tmux send-keys 'tmux kill-server' + +tmux select-pane -t 2 +tmux send-keys 'dkill' 'Enter' +tmux send-keys 'onboard' 'Enter' +tmux send-keys 'sleep 0.5' C-m # need shorter delay here so that roscore is started first +tmux send-keys 'sshpass -p robotics ssh -p 2200 root@192.168.1.1' 'Enter' +tmux send-keys 'roscore' 'Enter' + +tmux select-pane -t 4 +tmux send-keys 'sleep 2' C-m +tmux send-keys 'sshpass -p robotics ssh -p 2200 root@192.168.1.1' 'Enter' +tmux send-keys 'fg-ws' 'Enter' + +tmux select-pane -t 1 +tmux send-keys 'sleep 2' C-m +tmux send-keys 'sshpass -p robotics ssh -p 2200 root@192.168.1.1' 'Enter' +tmux send-keys 'roslaunch execute motion.launch enable_recording:=true' + +tmux select-pane -t 3 +tmux send-keys 'sleep 2' C-m +tmux send-keys 'sshpass -p robotics ssh -p 2200 root@192.168.1.1' 'Enter' +tmux send-keys 'roslaunch task_planning task_runner.launch untethered:=True' + +tmux select-pane -t 5 +tmux send-keys 'sleep 2' C-m +tmux send-keys 'sshpass -p robotics ssh -p 2200 root@192.168.1.1' 'Enter' +tmux send-keys 'rosservice call /controls/enable 1' + +tmux select-pane -t 6 +tmux send-keys 'sleep 2' C-m +tmux send-keys 'sshpass -p robotics ssh -p 2200 root@192.168.1.1' 'Enter' +tmux send-keys 'roslaunch cv depthai_mono_detection.launch' 'Enter' + +tmux select-pane -t 7 +tmux send-keys 'sleep 2' C-m +tmux send-keys 'sshpass -p robotics ssh -p 2200 root@192.168.1.1' 'Enter' + +tmux select-pane -t 1 + +tmux attach-session -t $session diff --git a/scripts/docker-pool-test.sh b/scripts/docker-pool-test.sh deleted file mode 100755 index cdad58d53..000000000 --- a/scripts/docker-pool-test.sh +++ /dev/null @@ -1,71 +0,0 @@ -#!/bin/bash -# shellcheck disable=SC2016 - -# A tmux script that starts onboard and landside docker containers, -# then opens all the necessary terminals to control our robot. -# Usage: ./docker-pool-test.sh - -session="pool-test" - -tmux new-session -d -s $session - -tmux split-window -h -p 50 -tmux split-window -v -p 0 -tmux select-pane -t 0 -tmux split-window -v -p 100 -tmux split-window -v -p 83 -tmux split-window -v -p 66 -tmux split-window -v -p 50 -tmux split-window -v -p 33 - -tmux select-pane -t 0 -tmux send-keys 'ssh robot@192.168.1.1' 'Enter' -tmux send-keys 'docker run -td --privileged --net=host -e ROBOT_NAME=oogway -v /home/robot/robosub-ros:/root/dev/robosub-ros -v /dev:/dev dukerobotics/robosub-ros:onboard' 'Enter' -tmux send-keys 'cd robosub-ros' 'Enter' -tmux send-keys 'docker run --privileged --net=host -td -p 2201:2201 -v ${PWD}:/root/dev/robosub-ros dukerobotics/robosub-ros:landside' 'Enter' - -tmux select-pane -t 1 -tmux send-keys 'sleep 5' C-m -tmux send-keys 'ssh robot@192.168.1.1' 'Enter' -tmux send-keys 'sshpass -p robotics ssh -p 2200 root@192.168.1.1' 'Enter' -tmux send-keys 'roscore' 'Enter' - -tmux select-pane -t 2 -tmux send-keys 'sleep 5' C-m -tmux send-keys 'ssh robot@192.168.1.1' 'Enter' -tmux send-keys 'sshpass -p robotics ssh -p 2200 root@192.168.1.1' 'Enter' -tmux send-keys 'roslaunch execute motion.launch' - -tmux select-pane -t 3 -tmux send-keys 'sleep 5' C-m -tmux send-keys 'ssh -XY robot@192.168.1.1' 'Enter' -tmux send-keys 'sshpass -p robotics ssh -XY -p 2201 root@192.168.1.1' 'Enter' -tmux send-keys 'roslaunch gui cv_gui.launch' - -tmux select-pane -t 4 -tmux send-keys 'sleep 5' C-m -tmux send-keys 'ssh robot@192.168.1.1' 'Enter' -tmux send-keys 'sshpass -p robotics ssh -p 2200 root@192.168.1.1' 'Enter' -tmux send-keys 'rosservice call /enable_controls True' - -tmux select-pane -t 5 -tmux send-keys 'sleep 5' C-m -tmux send-keys 'ssh robot@192.168.1.1' 'Enter' -tmux send-keys 'sshpass -p robotics ssh -p 2200 root@192.168.1.1' 'Enter' -tmux send-keys './record.sh .bag' - -tmux select-pane -t 6 -tmux send-keys 'sleep 5' C-m -tmux send-keys 'ssh robot@192.168.1.1' 'Enter' -tmux send-keys 'sshpass -p robotics ssh -p 2200 root@192.168.1.1' 'Enter' -tmux send-keys 'rostopic echo /state' - -tmux select-pane -t 7 -tmux send-keys 'sleep 5' C-m -tmux send-keys 'ssh robot@192.168.1.1' 'Enter' -tmux send-keys 'sshpass -p robotics ssh -p 2200 root@192.168.1.1' 'Enter' -tmux send-keys 'rosrun controls test_state_publisher.py' - -tmux select-pane -t 1 - -tmux attach-session -t $session \ No newline at end of file diff --git a/scripts/pool-test.sh b/scripts/pool-test.sh deleted file mode 100755 index f05b65f00..000000000 --- a/scripts/pool-test.sh +++ /dev/null @@ -1,60 +0,0 @@ -#!/bin/bash - -# A tmux script that opens all the necessary terminals to control our robot. -# Usage: ./pool-test.sh - -session="pool-test" - -tmux new-session -d -s $session - -tmux split-window -h -p 50 -tmux split-window -v -p 0 -tmux select-pane -t 0 -tmux split-window -v -p 100 -tmux split-window -v -p 83 -tmux split-window -v -p 66 -tmux split-window -v -p 50 -tmux split-window -v -p 33 - -tmux select-pane -t 0 -tmux send-keys 'ssh robot@192.168.1.1' 'Enter' -tmux send-keys 'cd robosub-ros' 'Enter' - -tmux select-pane -t 1 -tmux send-keys 'ssh robot@192.168.1.1' 'Enter' -tmux send-keys 'sshpass -p robotics ssh -p 2200 root@192.168.1.1' 'Enter' -tmux send-keys 'roscore' 'Enter' - -tmux select-pane -t 2 -tmux send-keys 'ssh robot@192.168.1.1' 'Enter' -tmux send-keys 'sshpass -p robotics ssh -p 2200 root@192.168.1.1' 'Enter' -tmux send-keys 'roslaunch execute motion.launch' - -tmux select-pane -t 3 -tmux send-keys 'ssh -XY robot@192.168.1.1' 'Enter' -tmux send-keys 'sshpass -p robotics ssh -XY -p 2201 root@192.168.1.1' 'Enter' -tmux send-keys 'roslaunch gui cv_gui.launch' - -tmux select-pane -t 4 -tmux send-keys 'ssh robot@192.168.1.1' 'Enter' -tmux send-keys 'sshpass -p robotics ssh -p 2200 root@192.168.1.1' 'Enter' -tmux send-keys 'rosservice call /enable_controls True' - -tmux select-pane -t 5 -tmux send-keys 'ssh robot@192.168.1.1' 'Enter' -tmux send-keys 'sshpass -p robotics ssh -p 2200 root@192.168.1.1' 'Enter' -tmux send-keys './record.sh .bag' - -tmux select-pane -t 6 -tmux send-keys 'ssh robot@192.168.1.1' 'Enter' -tmux send-keys 'sshpass -p robotics ssh -p 2200 root@192.168.1.1' 'Enter' -tmux send-keys 'rostopic echo /state' - -tmux select-pane -t 7 -tmux send-keys 'ssh robot@192.168.1.1' 'Enter' -tmux send-keys 'sshpass -p robotics ssh -p 2200 root@192.168.1.1' 'Enter' -tmux send-keys 'rosrun controls test_state_publisher.py' - -tmux select-pane -t 1 - -tmux attach-session -t $session \ No newline at end of file diff --git a/scripts/test-launch.py b/scripts/test-launch.py index 35fc07b17..6fc055a1b 100755 --- a/scripts/test-launch.py +++ b/scripts/test-launch.py @@ -20,7 +20,9 @@ 'vectornav/launch/vectornav.launch', 'execute/launch/tasks.launch', 'execute/launch/motion.launch', - 'system_utils/launch/sensor_check.launch' + 'system_utils/launch/sensor_check.launch', + 'system_utils/launch/record_bag.launch', + 'system_utils/launch/system_utils.launch', ] diff --git a/scripts/test.sh b/scripts/test.sh new file mode 100755 index 000000000..1e65459f0 --- /dev/null +++ b/scripts/test.sh @@ -0,0 +1,67 @@ +#!/bin/bash + +# A tmux script that opens all the necessary terminals to control our robot. +# Usage: ./test.sh + +session="pool-test" +tmux new-session -d -s $session + +tmux split-window -h +tmux split-window -h +tmux split-window -h +tmux select-layout even-horizontal + +tmux select-pane -t 0 +tmux split-window -v -l 50 + +tmux select-pane -t 2 +tmux split-window -v -l 50 + +tmux select-pane -t 4 +tmux split-window -v -l 50 + +tmux select-pane -t 6 +tmux split-window -v -l 50 + +tmux select-pane -t 0 +tmux send-keys 'tmux kill-server' + +tmux select-pane -t 2 +tmux send-keys 'dkill' 'Enter' +tmux send-keys 'onboard' 'Enter' +tmux send-keys 'sleep 0.5' C-m # need shorter delay here so that roscore is started first +tmux send-keys 'sshpass -p robotics ssh -p 2200 root@192.168.1.1' 'Enter' +tmux send-keys 'roscore' 'Enter' + +tmux select-pane -t 4 +tmux send-keys 'sleep 2' C-m +tmux send-keys 'sshpass -p robotics ssh -p 2200 root@192.168.1.1' 'Enter' +tmux send-keys 'fg-ws' 'Enter' + +tmux select-pane -t 1 +tmux send-keys 'sleep 2' C-m +tmux send-keys 'sshpass -p robotics ssh -p 2200 root@192.168.1.1' 'Enter' +tmux send-keys 'roslaunch execute motion.launch enable_recording:=true' + +tmux select-pane -t 3 +tmux send-keys 'sleep 2' C-m +tmux send-keys 'sshpass -p robotics ssh -p 2200 root@192.168.1.1' 'Enter' +tmux send-keys 'roslaunch task_planning task_runner.launch untethered:=False' + +tmux select-pane -t 5 +tmux send-keys 'sleep 2' C-m +tmux send-keys 'sshpass -p robotics ssh -p 2200 root@192.168.1.1' 'Enter' +tmux send-keys 'rosservice call /controls/enable 1' + +tmux select-pane -t 6 +tmux send-keys 'sleep 2' C-m +tmux send-keys 'sshpass -p robotics ssh -p 2200 root@192.168.1.1' 'Enter' +tmux send-keys 'roslaunch cv depthai_mono_detection.launch' 'Enter' + +tmux select-pane -t 7 +tmux send-keys 'sleep 2' C-m +tmux send-keys 'sshpass -p robotics ssh -p 2200 root@192.168.1.1' 'Enter' + +tmux select-pane -t 1 + +tmux attach-session -t $session