From 06d157859f3e28ee0415a104564cae6694058ccd Mon Sep 17 00:00:00 2001 From: cremebrule <84cremebrule@gmail.com> Date: Tue, 3 Dec 2024 15:48:09 -0800 Subject: [PATCH 01/38] refactor backend to use np for better single-threaded efficiency --- omnigibson/controllers/controller_base.py | 126 +- omnigibson/controllers/dd_controller.py | 9 +- omnigibson/controllers/ik_controller.py | 84 +- omnigibson/controllers/joint_controller.py | 18 +- .../multi_finger_gripper_controller.py | 39 +- .../controllers/null_joint_controller.py | 9 +- omnigibson/controllers/osc_controller.py | 174 ++- omnigibson/macros.py | 4 + omnigibson/objects/controllable_object.py | 87 +- omnigibson/robots/holonomic_base_robot.py | 35 +- omnigibson/simulator.py | 4 + omnigibson/utils/control_utils.py | 30 - omnigibson/utils/python_utils.py | 18 + omnigibson/utils/transform_utils.py | 31 + omnigibson/utils/transform_utils_np.py | 1229 +++++++++++++++++ omnigibson/utils/usd_utils.py | 89 +- 16 files changed, 1725 insertions(+), 261 deletions(-) create mode 100644 omnigibson/utils/transform_utils_np.py diff --git a/omnigibson/controllers/controller_base.py b/omnigibson/controllers/controller_base.py index 7bb6feeb8..a17131e9f 100644 --- a/omnigibson/controllers/controller_base.py +++ b/omnigibson/controllers/controller_base.py @@ -3,8 +3,12 @@ from enum import IntEnum import torch as th +import numpy as np -from omnigibson.utils.python_utils import Recreatable, Registerable, Serializable, assert_valid_key, classproperty +from omnigibson.utils.python_utils import Recreatable, Registerable, Serializable, assert_valid_key, classproperty, \ + recursively_convert_from_torch +import omnigibson.utils.transform_utils as TT +import omnigibson.utils.transform_utils_np as NT # Global dicts that will contain mappings REGISTERED_CONTROLLERS = dict() @@ -34,6 +38,108 @@ class IsGraspingState(IntEnum): FALSE = -1 +class _ControllerBackend: + array = None + int_array = None + prod = None + cat = None + zeros = None + ones = None + to_numpy = None + from_numpy = None + to_torch = None + from_torch = None + from_torch_recursive = None + allclose = None + arr_type = None + as_int = None + as_float32 = None + pinv = None + meshgrid = None + full = None + logical_or = None + all = None + abs = None + sqrt = None + mean = None + copy = None + eye = None + view = None + T = None + + @classmethod + def set_methods(cls, backend): + for attr, fcn in backend.__dict__.items(): + # Do not override reserved functions + if attr.startswith("__"): + continue + # Set function to this backend + setattr(cls, attr, fcn) + + +class _ControllerTorchBackend(_ControllerBackend): + array = lambda *args: th.tensor(*args, dtype=th.float32) + int_array = lambda *args: th.tensor(*args, dtype=th.int) + prod = th.prod + cat = th.cat + zeros = lambda *args: th.zeros(*args, dtype=th.float32) + ones = lambda *args: th.ones(*args, dtype=th.float32) + to_numpy = lambda x: x.numpy() + from_numpy = lambda x: th.from_numpy() + to_torch = lambda x: x + from_torch = lambda x: x + from_torch_recursive = lambda dic: dic + allclose = th.allclose + arr_type = th.Tensor + as_int = lambda arr: arr.int() + as_float32 = lambda arr: arr.float() + pinv = th.linalg.pinv + meshgrid = lambda idx_a, idx_b: th.meshgrid(idx_a, idx_b, indexing="xy") + full = lambda shape, fill_value: th.full(shape, fill_value, dtype=th.float32) + logical_or = th.logical_or + all = th.all + abs = th.abs + sqrt = th.sqrt + mean = lambda val, dim=None, keepdim=False: th.mean(val, dim=dim, keepdim=keepdim) + copy = lambda arr: arr.clone() + eye = th.eye + view = lambda arr, shape: arr.view(shape) + T = TT + + +class _ControllerNumpyBackend(_ControllerBackend): + array = lambda *args: np.array(*args, dtype=np.float32) + int_array = lambda *args: np.array(*args, dtype=int) + prod = np.prod + cat = np.concatenate + zeros = lambda *args: np.zeros(*args, dtype=np.float32) + ones = lambda *args: np.ones(*args, dtype=np.float32) + to_numpy = lambda x: x + from_numpy = lambda x: x + to_torch = lambda x: th.from_numpy(x) + from_torch = lambda x: x.numpy() + from_torch_recursive = recursively_convert_from_torch + allclose = np.allclose + arr_type = np.ndarray + as_int = lambda arr: arr.astype(int) + as_float32 = lambda arr: arr.astype(np.float32) + pinv = np.linalg.pinv + meshgrid = lambda idx_a, idx_b: np.ix_(idx_a, idx_b) + full = lambda shape, fill_value: np.full(shape, fill_value, dtype=np.float32) + logical_or = np.logical_or + all = np.all + abs = np.abs + sqrt = np.sqrt + mean = lambda val, dim=None, keepdim=False: np.mean(val, axis=dim, keepdims=keepdim) + copy = lambda arr: np.array(arr) + eye = np.eye + view = lambda arr, shape: arr.reshape(shape) + T = NT + + +_controller_backend = _ControllerBackend + + # Define macros class ControlType: NONE = -1 @@ -112,11 +218,11 @@ def __init__( ] assert "has_limit" in control_limits, "Expected has_limit specified in control_limits, but does not exist." self._dof_has_limits = control_limits["has_limit"] - self._dof_idx = dof_idx.int() + self._dof_idx = _controller_backend.as_int(dof_idx) # Generate goal information self._goal_shapes = self._get_goal_shapes() - self._goal_dim = int(sum(th.prod(th.tensor(shape)) for shape in self._goal_shapes.values())) + self._goal_dim = int(sum(_controller_backend.prod(_controller_backend.array(shape)) for shape in self._goal_shapes.values())) # Initialize some other variables that will be filled in during runtime self._control = None @@ -169,7 +275,7 @@ def _preprocess_command(self, command): Array[float]: Processed command vector """ # Make sure command is a th.tensor - command = th.tensor([command], dtype=th.float32) if type(command) in {int, float} else command + command = _controller_backend.array([command]) if type(command) in {int, float} else command # We only clip and / or scale if self.command_input_limits exists if self._command_input_limits is not None: # Clip @@ -365,12 +471,12 @@ def _load_state(self, state): def serialize(self, state): # Make sure size of the state is consistent, even if we have no goal goal_state_flattened = ( - th.cat([goal_state.flatten() for goal_state in self._goal.values()]) + _controller_backend.cat([goal_state.flatten() for goal_state in self._goal.values()]) if (state)["goal_is_valid"] - else th.zeros(self.goal_dim) + else _controller_backend.zeros(self.goal_dim) ) - return th.cat([th.tensor([state["goal_is_valid"]]), goal_state_flattened]) + return _controller_backend.cat([_controller_backend.array([state["goal_is_valid"]]), goal_state_flattened]) def deserialize(self, state): goal_is_valid = bool(state[0]) @@ -419,11 +525,11 @@ def nums2array(nums, dim): # Else, input is a single value, so we map to a numpy array of correct size and return return ( nums - if isinstance(nums, th.Tensor) + if isinstance(nums, _controller_backend.arr_type) else ( - th.tensor(nums, dtype=th.float32) + _controller_backend.array(nums) if isinstance(nums, Iterable) - else th.ones(dim, dtype=th.float32) * nums + else _controller_backend.ones(dim) * nums ) ) diff --git a/omnigibson/controllers/dd_controller.py b/omnigibson/controllers/dd_controller.py index 38207b417..da35e3f86 100644 --- a/omnigibson/controllers/dd_controller.py +++ b/omnigibson/controllers/dd_controller.py @@ -1,6 +1,5 @@ -import torch as th - from omnigibson.controllers import ControlType, LocomotionController +from omnigibson.controllers.controller_base import _controller_backend as cb class DifferentialDriveController(LocomotionController): @@ -107,14 +106,14 @@ def compute_control(self, goal_dict, control_dict): right_wheel_joint_vel = (lin_vel + ang_vel * self._wheel_axle_halflength) / self._wheel_radius # Return desired velocities - return th.tensor([left_wheel_joint_vel, right_wheel_joint_vel]) + return cb.array([left_wheel_joint_vel, right_wheel_joint_vel]) def compute_no_op_goal(self, control_dict): # This is zero-vector, since we want zero linear / angular velocity - return dict(vel=th.zeros(2)) + return dict(vel=cb.zeros(2)) def _compute_no_op_action(self, control_dict): - return th.zeros(2, dtype=th.float32) + return cb.zeros(2) def _get_goal_shapes(self): # Add (2, )-array representing linear, angular velocity diff --git a/omnigibson/controllers/ik_controller.py b/omnigibson/controllers/ik_controller.py index fa0e18b2a..3e2c46d94 100644 --- a/omnigibson/controllers/ik_controller.py +++ b/omnigibson/controllers/ik_controller.py @@ -1,12 +1,10 @@ import math +from collections.abc import Iterable +from omnigibson.controllers.controller_base import _controller_backend as cb -import torch as th - -import omnigibson.utils.transform_utils as T from omnigibson.controllers import ControlType, ManipulationController from omnigibson.controllers.joint_controller import JointController from omnigibson.macros import create_module_macros, gm -from omnigibson.utils.control_utils import orientation_error from omnigibson.utils.processing_utils import MovingAverageFilter from omnigibson.utils.python_utils import assert_valid_key from omnigibson.utils.ui_utils import create_module_logger @@ -44,8 +42,8 @@ def __init__( dof_idx, command_input_limits="default", command_output_limits=( - th.tensor([-0.2, -0.2, -0.2, -0.5, -0.5, -0.5], dtype=th.float32), - th.tensor([0.2, 0.2, 0.2, 0.5, 0.5, 0.5], dtype=th.float32), + (-0.2, -0.2, -0.2, -0.5, -0.5, -0.5), + (0.2, 0.2, 0.2, 0.5, 0.5, 0.5), ), pos_kp=None, pos_damping_ratio=None, @@ -118,11 +116,11 @@ def limiter(target_pos: Array[float], target_quat: Array[float], control_dict: D """ # Store arguments control_dim = len(dof_idx) - self.control_filter = ( - None - if smoothing_filter_size in {None, 0} - else MovingAverageFilter(obs_dim=control_dim, filter_width=smoothing_filter_size) - ) + self.control_filter = None #( + # None + # if smoothing_filter_size in {None, 0} + # else MovingAverageFilter(obs_dim=control_dim, filter_width=smoothing_filter_size) + # ) assert mode in IK_MODES, f"Invalid ik mode specified! Valid options are: {IK_MODES}, got: {mode}" # If mode is absolute pose, make sure command input limits / output limits are None @@ -147,29 +145,23 @@ def limiter(target_pos: Array[float], target_quat: Array[float], control_dict: D if command_input_limits is not None: if type(command_input_limits) == str and command_input_limits == "default": command_input_limits = [ - th.tensor([-1.0, -1.0, -1.0, -math.pi, -math.pi, -math.pi], dtype=th.float32), - th.tensor([1.0, 1.0, 1.0, math.pi, math.pi, math.pi], dtype=th.float32), + cb.array([-1.0, -1.0, -1.0, -math.pi, -math.pi, -math.pi]), + cb.array([1.0, 1.0, 1.0, math.pi, math.pi, math.pi]), ] else: - command_input_limits[0][3:] = th.tensor( - [-math.pi] * len(command_input_limits[0][3:]), dtype=th.float32 - ) - command_input_limits[1][3:] = th.tensor( - [math.pi] * len(command_input_limits[1][3:]), dtype=th.float32 - ) + command_input_limits[0][3:] = cb.array([-math.pi] * len(command_input_limits[0][3:])) + command_input_limits[1][3:] = cb.array([math.pi] * len(command_input_limits[1][3:])) if command_output_limits is not None: + if not isinstance(command_output_limits, str) and isinstance(command_output_limits, Iterable): + command_output_limits = cb.array(command_output_limits) if type(command_output_limits) == str and command_output_limits == "default": command_output_limits = [ - th.tensor([-1.0, -1.0, -1.0, -math.pi, -math.pi, -math.pi], dtype=th.float32), - th.tensor([1.0, 1.0, 1.0, math.pi, math.pi, math.pi], dtype=th.float32), + cb.array([-1.0, -1.0, -1.0, -math.pi, -math.pi, -math.pi]), + cb.array([1.0, 1.0, 1.0, math.pi, math.pi, math.pi]), ] else: - command_output_limits[0][3:] = th.tensor( - [-math.pi] * len(command_output_limits[0][3:]), dtype=th.float32 - ) - command_output_limits[1][3:] = th.tensor( - [math.pi] * len(command_output_limits[1][3:]), dtype=th.float32 - ) + command_output_limits[0][3:] = cb.array([-math.pi] * len(command_output_limits[0][3:])) + command_output_limits[1][3:] = cb.array([math.pi] * len(command_output_limits[1][3:])) # Run super init super().__init__( control_freq=control_freq, @@ -197,14 +189,15 @@ def reset(self): @property def state_size(self): # Add state size from the control filter - return super().state_size + self.control_filter.state_size + return super().state_size + (0 if self.control_filter is None else self.control_filter.state_size) def _dump_state(self): # Run super first state = super()._dump_state() # Add internal quaternion target and filter state - state["control_filter"] = self.control_filter.dump_state(serialized=False) + if self.control_filter is not None: + state["control_filter"] = self.control_filter.dump_state(serialized=False) return state @@ -217,17 +210,18 @@ def _load_state(self, state): self._fixed_quat_target = self._goal["target_quat"] # Load relevant info for this controller - self.control_filter.load_state(state["control_filter"], serialized=False) + if self.control_filter is not None: + self.control_filter.load_state(state["control_filter"], serialized=False) def serialize(self, state): # Run super first state_flat = super().serialize(state=state) # Serialize state for this controller - return th.cat( + return cb.cat( [ state_flat, - self.control_filter.serialize(state=state["control_filter"]), + cb.array([]) if self.control_filter is None else self.control_filter.serialize(state=state["control_filter"]), ] ) @@ -236,9 +230,11 @@ def deserialize(self, state): state_dict, idx = super().deserialize(state=state) # Deserialize state for this controller - state_dict["control_filter"], deserialized_items = self.control_filter.deserialize(state=state[idx:]) + if self.control_filter is not None: + state_dict["control_filter"], deserialized_items = self.control_filter.deserialize(state=state[idx:]) + idx += deserialized_items - return state_dict, idx + deserialized_items + return state_dict, idx def _update_goal(self, command, control_dict): # Grab important info from control dict @@ -263,11 +259,11 @@ def _update_goal(self, command, control_dict): target_quat = quat_relative elif self.mode == "pose_absolute_ori" or self.mode == "absolute_pose": # Received "delta" ori is in fact the desired absolute orientation - target_quat = T.axisangle2quat(command[3:6]) + target_quat = cb.T.axisangle2quat(command[3:6]) else: # pose_delta_ori control # Grab dori and compute target ori - dori = T.quat2mat(T.axisangle2quat(command[3:6])) - target_quat = T.mat2quat(dori @ T.quat2mat(quat_relative)) + dori = cb.T.quat2mat(cb.T.axisangle2quat(command[3:6])) + target_quat = cb.T.mat2quat(dori @ cb.T.quat2mat(quat_relative)) # Possibly limit to workspace if specified if self.workspace_pose_limiter is not None: @@ -314,23 +310,23 @@ def compute_control(self, goal_dict, control_dict): # If the delta is really small, we just keep the current joint position. This avoids joint # drift caused by IK solver inaccuracy even when zero delta actions are provided. - if th.allclose(pos_relative, target_pos, atol=1e-4) and th.allclose(quat_relative, target_quat, atol=1e-4): + if cb.allclose(pos_relative, target_pos, atol=1e-4) and cb.allclose(quat_relative, target_quat, atol=1e-4): target_joint_pos = current_joint_pos else: # Compute the pose error. Note that this is computed NOT in the EEF frame but still # in the base frame. pos_err = target_pos - pos_relative - ori_err = orientation_error(T.quat2mat(target_quat), T.quat2mat(quat_relative)) - err = th.cat([pos_err, ori_err]) + ori_err = cb.T.orientation_error(cb.T.quat2mat(target_quat), cb.T.quat2mat(quat_relative)) + err = cb.cat([pos_err, ori_err]) # Use the jacobian to compute a local approximation j_eef = control_dict[f"{self.task_name}_jacobian_relative"][:, self.dof_idx] - j_eef_pinv = th.linalg.pinv(j_eef) + j_eef_pinv = cb.pinv(j_eef) delta_j = j_eef_pinv @ err target_joint_pos = current_joint_pos + delta_j # Clip values to be within the joint limits - target_joint_pos = target_joint_pos.clamp( + target_joint_pos = target_joint_pos.clip( min=self._control_limits[ControlType.get_type("position")][0][self.dof_idx], max=self._control_limits[ControlType.get_type("position")][1][self.dof_idx], ) @@ -353,7 +349,7 @@ def _compute_no_op_action(self, control_dict): pos_relative = control_dict[f"{self.task_name}_pos_relative"] quat_relative = control_dict[f"{self.task_name}_quat_relative"] - command = th.zeros(6, dtype=th.float32, device=pos_relative.device) + command = cb.zeros(6) # Handle position if self.mode == "absolute_pose": @@ -364,7 +360,7 @@ def _compute_no_op_action(self, control_dict): # Handle orientation if self.mode in ("pose_absolute_ori", "absolute_pose"): - command[3:] = T.quat2axisangle(quat_relative) + command[3:] = cb.T.quat2axisangle(quat_relative) else: # For these modes, we don't need to add orientation to the command pass diff --git a/omnigibson/controllers/joint_controller.py b/omnigibson/controllers/joint_controller.py index 4ee046cd6..31bb3f5aa 100644 --- a/omnigibson/controllers/joint_controller.py +++ b/omnigibson/controllers/joint_controller.py @@ -1,8 +1,6 @@ import math -import torch as th - -import omnigibson.utils.transform_utils as T +from omnigibson.controllers.controller_base import _controller_backend as cb from omnigibson.controllers import ( ControlType, GripperController, @@ -158,10 +156,10 @@ def _update_goal(self, command, control_dict): delta_rots = command[[rx_ind, ry_ind, rz_ind]] # Compute the final rotations in the quaternion space. - _, end_quat = T.pose_transform( - th.zeros(3), T.euler2quat(delta_rots), th.zeros(3), T.euler2quat(start_rots) + _, end_quat = cb.T.pose_transform( + cb.zeros(3), cb.T.euler2quat(delta_rots), cb.zeros(3), cb.T.euler2quat(start_rots) ) - end_rots = T.quat2euler(end_quat) + end_rots = cb.T.quat2euler(end_quat) # Update the command target[[rx_ind, ry_ind, rz_ind]] = end_rots @@ -212,7 +210,7 @@ def compute_control(self, goal_dict, control_dict): else: # effort u = target - dof_idxs_mat = th.meshgrid(self.dof_idx, self.dof_idx, indexing="xy") + dof_idxs_mat = cb.meshgrid(self.dof_idx, self.dof_idx) mm = control_dict["mass_matrix"][dof_idxs_mat] u = mm @ u @@ -238,21 +236,21 @@ def compute_no_op_goal(self, control_dict): target = control_dict[f"joint_{self._motor_type}"][self.dof_idx] else: # For velocity / effort, directly set to 0 - target = th.zeros(self.control_dim) + target = cb.zeros(self.control_dim) return dict(target=target) def _compute_no_op_action(self, control_dict): if self.motor_type == "position": if self._use_delta_commands: - return th.zeros(self.command_dim) + return cb.zeros(self.command_dim) else: return control_dict[f"joint_position"][self.dof_idx] elif self.motor_type == "velocity": if self._use_delta_commands: return -control_dict[f"joint_velocity"][self.dof_idx] else: - return th.zeros(self.command_dim) + return cb.zeros(self.command_dim) raise ValueError("Cannot compute noop action for effort motor type.") diff --git a/omnigibson/controllers/multi_finger_gripper_controller.py b/omnigibson/controllers/multi_finger_gripper_controller.py index 9eb70241f..4c8ff6bf4 100644 --- a/omnigibson/controllers/multi_finger_gripper_controller.py +++ b/omnigibson/controllers/multi_finger_gripper_controller.py @@ -1,6 +1,4 @@ -import torch as th - -import omnigibson.utils.transform_utils as T +from omnigibson.controllers.controller_base import _controller_backend as cb from omnigibson.controllers import ControlType, GripperController, IsGraspingState from omnigibson.macros import create_module_macros from omnigibson.utils.python_utils import assert_valid_key @@ -95,8 +93,8 @@ def __init__( self._inverted = inverted self._mode = mode self._limit_tolerance = limit_tolerance - self._open_qpos = open_qpos if open_qpos is None else th.tensor(open_qpos) - self._closed_qpos = closed_qpos if closed_qpos is None else th.tensor(closed_qpos) + self._open_qpos = open_qpos if open_qpos is None else cb.array(open_qpos) + self._closed_qpos = closed_qpos if closed_qpos is None else cb.array(closed_qpos) # Create other args to be filled in at runtime self._is_grasping = IsGraspingState.FALSE @@ -125,9 +123,9 @@ def _preprocess_command(self, command): # We extend this method to make sure command is always n-dimensional if self._mode != "independent": command = ( - th.tensor([command] * self.command_dim) + cb.array([command] * self.command_dim) if type(command) in {int, float} - else th.tensor([command[0]] * self.command_dim) + else cb.array([command[0]] * self.command_dim) ) # Flip the command if the direction is inverted. @@ -178,7 +176,7 @@ def compute_control(self, goal_dict, control_dict): ) else: # Use continuous signal. Make sure to go from command to control dim. - u = th.full((self.control_dim,), target[0]) if len(target) == 1 else target + u = cb.full((self.control_dim,), target[0]) if len(target) == 1 else target # If we're near the joint limits and we're using velocity / torque control, we zero out the action if self._motor_type in {"velocity", "torque"}: @@ -188,7 +186,7 @@ def compute_control(self, goal_dict, control_dict): violate_lower_limit = ( joint_pos < self._control_limits[ControlType.POSITION][0][self.dof_idx] + self._limit_tolerance ) - violation = th.logical_or(violate_upper_limit * (u > 0), violate_lower_limit * (u < 0)) + violation = cb.logical_or(violate_upper_limit * (u > 0), violate_lower_limit * (u < 0)) u *= ~violation # Update whether we're grasping or not @@ -209,7 +207,6 @@ def _update_grasping_state(self, control_dict): joint_velocity: Array of current joint velocities """ # Calculate grasping state based on mode of this controller - # Independent mode of MultiFingerGripperController does not have any good heuristics to determine is_grasping if self._mode == "independent": is_grasping = IsGraspingState.UNKNOWN @@ -219,7 +216,7 @@ def _update_grasping_state(self, control_dict): is_grasping = IsGraspingState.FALSE # Different values in the command for non-independent mode - cannot use heuristics - elif not th.all(self._control == self._control[0]): + elif not cb.all(self._control == self._control[0]): is_grasping = IsGraspingState.UNKNOWN # Joint position tolerance for is_grasping heuristics checking is smaller than or equal to the gripper @@ -231,11 +228,11 @@ def _update_grasping_state(self, control_dict): finger_pos = control_dict["joint_position"][self.dof_idx] # For joint position control, if the desired positions are the same as the current positions, is_grasping unknown - if self._motor_type == "position" and th.mean(th.abs(finger_pos - self._control)) < m.POS_TOLERANCE: + if self._motor_type == "position" and cb.abs(finger_pos - self._control).mean() < m.POS_TOLERANCE: is_grasping = IsGraspingState.UNKNOWN # For joint velocity / torque control, if the desired velocities / torques are zeros, is_grasping unknown - elif self._motor_type in {"velocity", "torque"} and th.mean(th.abs(self._control)) < m.VEL_TOLERANCE: + elif self._motor_type in {"velocity", "torque"} and cb.abs(self._control).mean() < m.VEL_TOLERANCE: is_grasping = IsGraspingState.UNKNOWN # Otherwise, the last control signal intends to "move" the gripper @@ -245,7 +242,7 @@ def _update_grasping_state(self, control_dict): max_pos = self._control_limits[ControlType.POSITION][1][self.dof_idx] # Make sure we don't have any invalid values (i.e.: fingers should be within the limits) - finger_pos = th.clip(finger_pos, min_pos, max_pos) + finger_pos = finger_pos.clip(min_pos, max_pos) # Check distance from both ends of the joint limits dist_from_lower_limit = finger_pos - min_pos @@ -253,12 +250,12 @@ def _update_grasping_state(self, control_dict): # If the joint positions are not near the joint limits with some tolerance (m.POS_TOLERANCE) valid_grasp_pos = ( - th.mean(dist_from_lower_limit) > m.POS_TOLERANCE - and th.mean(dist_from_upper_limit) > m.POS_TOLERANCE + dist_from_lower_limit.mean() > m.POS_TOLERANCE + and dist_from_upper_limit.mean() > m.POS_TOLERANCE ) # And the joint velocities are close to zero with some tolerance (m.VEL_TOLERANCE) - valid_grasp_vel = th.all(th.abs(finger_vel) < m.VEL_TOLERANCE) + valid_grasp_vel = cb.all(cb.abs(finger_vel) < m.VEL_TOLERANCE) # Then the gripper is grasping something, which stops the gripper from reaching its desired state is_grasping = IsGraspingState.TRUE if valid_grasp_pos and valid_grasp_vel else IsGraspingState.FALSE @@ -268,7 +265,7 @@ def _update_grasping_state(self, control_dict): def compute_no_op_goal(self, control_dict): # Just use a zero vector - return dict(target=th.zeros(self.command_dim)) + return dict(target=cb.zeros(self.command_dim)) def _compute_no_op_action(self, control_dict): # Take care of the special case of binary control @@ -276,18 +273,18 @@ def _compute_no_op_action(self, control_dict): command_val = -1 if self.is_grasping() == IsGraspingState.TRUE else 1 if self._inverted: command_val = -1 * command_val - return th.tensor([command_val], dtype=th.float32) + return cb.array([command_val]) if self._motor_type == "position": command = control_dict[f"joint_position"][self.dof_idx] elif self._motor_type == "velocity": - command = th.zeros(self.command_dim) + command = cb.zeros(self.command_dim) else: raise ValueError("Cannot compute noop action for effort motor type.") # Convert to binary / smooth mode if necessary if self._mode == "smooth": - command = th.mean(command, dim=-1, keepdim=True) + command = cb.mean(command, dim=-1, keepdim=True) return command diff --git a/omnigibson/controllers/null_joint_controller.py b/omnigibson/controllers/null_joint_controller.py index 4c92cbf9a..eea678a0a 100644 --- a/omnigibson/controllers/null_joint_controller.py +++ b/omnigibson/controllers/null_joint_controller.py @@ -1,5 +1,4 @@ -import torch as th - +from omnigibson.controllers.controller_base import _controller_backend as cb from omnigibson.controllers import JointController @@ -58,7 +57,7 @@ def __init__( applied """ # Store values - self._default_command = th.zeros(len(dof_idx)) if default_command is None else default_command + self._default_command = cb.zeros(len(dof_idx)) if default_command is None else default_command # Run super init super().__init__( @@ -81,7 +80,7 @@ def compute_no_op_goal(self, control_dict): def _preprocess_command(self, command): # Override super and force the processed command to be internal stored default value - return th.tensor(self._default_command) + return cb.array(self._default_command) def update_default_goal(self, target): """ @@ -95,4 +94,4 @@ def update_default_goal(self, target): len(target) == self.control_dim ), f"Default control must be length: {self.control_dim}, got length: {len(target)}" - self._default_command = th.tensor(target) + self._default_command = cb.array(target) diff --git a/omnigibson/controllers/osc_controller.py b/omnigibson/controllers/osc_controller.py index 01515fc3e..539a89bef 100644 --- a/omnigibson/controllers/osc_controller.py +++ b/omnigibson/controllers/osc_controller.py @@ -1,12 +1,10 @@ import math -import torch as th - -import omnigibson.utils.transform_utils as T +from omnigibson.controllers.controller_base import _ControllerBackend, _ControllerTorchBackend, _ControllerNumpyBackend +from omnigibson.controllers.controller_base import _controller_backend as cb from omnigibson.controllers import ControlType, ManipulationController -from omnigibson.utils.control_utils import orientation_error from omnigibson.utils.processing_utils import MovingAverageFilter -from omnigibson.utils.python_utils import assert_valid_key, nums2array +from omnigibson.utils.python_utils import assert_valid_key from omnigibson.utils.ui_utils import create_module_logger # Create module logger @@ -144,13 +142,13 @@ def limiter(target_pos: Array[float], target_quat: Array[float], control_dict: D ) # Store gains - self.kp = nums2array(nums=kp, dim=6, dtype=th.float32) if kp is not None else None + self.kp = self.nums2array(nums=kp, dim=6) if kp is not None else None self.damping_ratio = damping_ratio - self.kp_null = nums2array(nums=kp_null, dim=control_dim, dtype=th.float32) if kp_null is not None else None - self.kd_null = 2 * th.sqrt(self.kp_null) if kp_null is not None else None # critically damped - self.kp_limits = th.tensor(kp_limits, dtype=th.float32) - self.damping_ratio_limits = th.tensor(damping_ratio_limits, dtype=th.float32) - self.kp_null_limits = th.tensor(kp_null_limits, dtype=th.float32) + self.kp_null = self.nums2array(nums=kp_null, dim=control_dim) if kp_null is not None else None + self.kd_null = 2 * cb.sqrt(self.kp_null) if kp_null is not None else None # critically damped + self.kp_limits = cb.array(kp_limits) + self.damping_ratio_limits = cb.array(damping_ratio_limits) + self.kp_null_limits = cb.array(kp_null_limits) # Store settings for whether we're learning gains or not self.variable_kp = self.kp is None @@ -199,12 +197,12 @@ def limiter(target_pos: Array[float], target_quat: Array[float], control_dict: D is_input_limits_numeric = not (command_input_limits is None or isinstance(command_input_limits, str)) is_output_limits_numeric = not (command_output_limits is None or isinstance(command_output_limits, str)) command_input_limits = ( - [nums2array(lim, dim=6, dtype=th.float32) for lim in command_input_limits] + [self.nums2array(lim, dim=6) for lim in command_input_limits] if is_input_limits_numeric else command_input_limits ) command_output_limits = ( - [nums2array(lim, dim=6, dtype=th.float32) for lim in command_output_limits] + [self.nums2array(lim, dim=6) for lim in command_output_limits] if is_output_limits_numeric else command_output_limits ) @@ -220,12 +218,12 @@ def limiter(target_pos: Array[float], target_quat: Array[float], control_dict: D # Add this to input / output limits if is_input_limits_numeric: command_input_limits = [ - th.cat([lim, nums2array(nums=val, dim=dim, dtype=th.float32)]) + cb.cat([lim, self.nums2array(nums=val, dim=dim)]) for lim, val in zip(command_input_limits, (-1, 1)) ] if is_output_limits_numeric: command_output_limits = [ - th.cat([lim, nums2array(nums=val, dim=dim, dtype=th.float32)]) + cb.cat([lim, self.nums2array(nums=val, dim=dim)]) for lim, val in zip(command_output_limits, gain_limits) ] # Update command dim @@ -293,7 +291,7 @@ def _update_variable_gains(self, gains): idx += 6 if self.variable_kp_null: self.kp_null = gains[:, idx : idx + self.control_dim] - self.kd_null = 2 * th.sqrt(self.kp_null) # critically damped + self.kd_null = 2 * cb.sqrt(self.kp_null) # critically damped idx += self.control_dim def _update_goal(self, command, control_dict): @@ -315,8 +313,8 @@ def _update_goal(self, command, control_dict): frame to control, computed in its local frame (e.g.: robot base frame) """ # Grab important info from control dict - pos_relative = control_dict[f"{self.task_name}_pos_relative"].clone() - quat_relative = control_dict[f"{self.task_name}_quat_relative"].clone() + pos_relative = cb.copy(control_dict[f"{self.task_name}_pos_relative"]) + quat_relative = cb.copy(control_dict[f"{self.task_name}_quat_relative"]) # Convert position command to absolute values if needed if self.mode == "absolute_pose": @@ -336,11 +334,11 @@ def _update_goal(self, command, control_dict): target_quat = quat_relative elif self.mode == "pose_absolute_ori" or self.mode == "absolute_pose": # Received "delta" ori is in fact the desired absolute orientation - target_quat = T.axisangle2quat(command[3:6]) + target_quat = cb.T.axisangle2quat(command[3:6]) else: # pose_delta_ori control # Grab dori and compute target ori - dori = T.quat2mat(T.axisangle2quat(command[3:6])) - target_quat = T.mat2quat(dori @ T.quat2mat(quat_relative)) + dori = cb.T.quat2mat(cb.T.axisangle2quat(command[3:6])) + target_quat = cb.T.mat2quat(dori @ cb.T.quat2mat(quat_relative)) # Possibly limit to workspace if specified if self.workspace_pose_limiter is not None: @@ -352,8 +350,8 @@ def _update_goal(self, command, control_dict): # Set goals and return return dict( - target_pos=target_pos, - target_ori_mat=T.quat2mat(target_quat), + target_pos=cb.as_float32(target_pos), + target_ori_mat=cb.as_float32(cb.T.quat2mat(target_quat)), ) def compute_control(self, goal_dict, control_dict): @@ -389,31 +387,32 @@ def compute_control(self, goal_dict, control_dict): # For now, always use internal values kp = self.kp damping_ratio = self.damping_ratio - kd = 2 * th.sqrt(kp) * damping_ratio + kd = 2 * cb.sqrt(kp) * damping_ratio # Extract relevant values from the control dict - dof_idxs_mat = tuple(th.meshgrid(self.dof_idx, self.dof_idx)) + dof_idxs_mat = tuple(cb.meshgrid(self.dof_idx, self.dof_idx)) q = control_dict["joint_position"][self.dof_idx] qd = control_dict["joint_velocity"][self.dof_idx] mm = control_dict["mass_matrix"][dof_idxs_mat] j_eef = control_dict[f"{self.task_name}_jacobian_relative"][:, self.dof_idx] ee_pos = control_dict[f"{self.task_name}_pos_relative"] ee_quat = control_dict[f"{self.task_name}_quat_relative"] - ee_vel = th.cat( + ee_vel = cb.cat( [control_dict[f"{self.task_name}_lin_vel_relative"], control_dict[f"{self.task_name}_ang_vel_relative"]] ) base_lin_vel = control_dict["root_rel_lin_vel"] base_ang_vel = control_dict["root_rel_ang_vel"] # Calculate torques - u = _compute_osc_torques( + u = cb.compute_osc_torques( q=q, qd=qd, mm=mm, j_eef=j_eef, - ee_pos=ee_pos, - ee_mat=T.quat2mat(ee_quat), - ee_vel=ee_vel, + ee_pos=cb.as_float32(ee_pos), + ee_mat=cb.as_float32(cb.T.quat2mat(ee_quat)), + ee_lin_vel=cb.as_float32(ee_vel[:3]), + ee_ang_vel_err=cb.as_float32(cb.T.quat2axisangle(cb.T.quat_multiply(cb.T.axisangle2quat(-ee_vel[3:]), cb.T.axisangle2quat(base_ang_vel)))), goal_pos=goal_dict["target_pos"], goal_ori_mat=goal_dict["target_ori_mat"], kp=kp, @@ -423,8 +422,8 @@ def compute_control(self, goal_dict, control_dict): rest_qpos=self.reset_joint_pos, control_dim=self.control_dim, decouple_pos_ori=self.decouple_pos_ori, - base_lin_vel=base_lin_vel, - base_ang_vel=base_ang_vel, + base_lin_vel=cb.as_float32(base_lin_vel), + base_ang_vel=cb.as_float32(base_ang_vel), ).flatten() # Add gravity compensation @@ -440,20 +439,20 @@ def compute_control(self, goal_dict, control_dict): def compute_no_op_goal(self, control_dict): # No-op is maintaining current pose - target_pos = control_dict[f"{self.task_name}_pos_relative"].clone() - target_quat = control_dict[f"{self.task_name}_quat_relative"].clone() + target_pos = cb.copy(control_dict[f"{self.task_name}_pos_relative"]) + target_quat = cb.copy(control_dict[f"{self.task_name}_quat_relative"]) # Convert quat into eef ori mat return dict( - target_pos=target_pos, - target_ori_mat=T.quat2mat(target_quat), + target_pos=cb.as_float32(target_pos), + target_ori_mat=cb.as_float32(cb.T.quat2mat(target_quat)), ) def _compute_no_op_action(self, control_dict): pos_relative = control_dict[f"{self.task_name}_pos_relative"] quat_relative = control_dict[f"{self.task_name}_quat_relative"] - command = th.zeros(6, dtype=th.float32, device=pos_relative.device) + command = cb.zeros(6) # Handle position if self.mode == "absolute_pose": @@ -464,7 +463,7 @@ def _compute_no_op_action(self, control_dict): # Handle orientation if self.mode in ("pose_absolute_ori", "absolute_pose"): - command[3:] = T.quat2axisangle(quat_relative) + command[3:] = cb.T.quat2axisangle(quat_relative) else: # For these modes, we don't need to add orientation to the command pass @@ -486,15 +485,18 @@ def command_dim(self): return self._command_dim +import torch as th +import omnigibson.utils.transform_utils as TT @th.jit.script -def _compute_osc_torques( +def _compute_osc_torques_torch( q: th.Tensor, qd: th.Tensor, mm: th.Tensor, j_eef: th.Tensor, ee_pos: th.Tensor, ee_mat: th.Tensor, - ee_vel: th.Tensor, + ee_lin_vel: th.Tensor, + ee_ang_vel_err: th.Tensor, goal_pos: th.Tensor, goal_ori_mat: th.Tensor, kp: th.Tensor, @@ -512,7 +514,7 @@ def _compute_osc_torques( # Calculate error pos_err = goal_pos - ee_pos - ori_err = orientation_error(goal_ori_mat, ee_mat) + ori_err = TT.orientation_error(goal_ori_mat, ee_mat) err = th.cat((pos_err, ori_err)) # Vel target is the base velocity as experienced by the end effector @@ -521,9 +523,8 @@ def _compute_osc_torques( # due to the base linear velocity # For angular velocity, we need to make sure we compute the difference between the base and eef velocity # properly, not simply "subtraction" as in the linear case - lin_vel_err = base_lin_vel + th.linalg.cross(base_ang_vel, ee_pos) - ee_vel[:3] - ang_vel_err = T.quat2axisangle(T.quat_multiply(T.axisangle2quat(-ee_vel[3:]), T.axisangle2quat(base_ang_vel))) - vel_err = th.cat((lin_vel_err, ang_vel_err)) + lin_vel_err = base_lin_vel + th.linalg.cross(base_ang_vel, ee_pos) - ee_lin_vel + vel_err = th.cat((lin_vel_err, ee_ang_vel_err)) # Determine desired wrench err = th.unsqueeze(kp * err + kd * vel_err, dim=-1) @@ -559,3 +560,88 @@ def _compute_osc_torques( u += (th.eye(control_dim, dtype=th.float32) - j_eef.T @ j_eef_inv) @ u_null return u + + +import numpy as np +from numba import jit +import omnigibson.utils.transform_utils_np as NT +# Use numba since faster +@jit(nopython=True) +def _compute_osc_torques_numpy( + q, + qd, + mm, + j_eef, + ee_pos, + ee_mat, + ee_lin_vel, + ee_ang_vel_err, + goal_pos, + goal_ori_mat, + kp, + kd, + kp_null, + kd_null, + rest_qpos, + control_dim, + decouple_pos_ori, + base_lin_vel, + base_ang_vel, +): + # Compute the inverse + mm_inv = np.linalg.inv(mm) + + # Calculate error + pos_err = goal_pos - ee_pos + ori_err = NT.orientation_error(goal_ori_mat, ee_mat).astype(np.float32) + err = np.concatenate((pos_err, ori_err)) + + # Vel target is the base velocity as experienced by the end effector + # For angular velocity, this is just the base angular velocity + # For linear velocity, this is the base linear velocity PLUS the net linear velocity experienced + # due to the base linear velocity + # For angular velocity, we need to make sure we compute the difference between the base and eef velocity + # properly, not simply "subtraction" as in the linear case + lin_vel_err = base_lin_vel + np.cross(base_ang_vel, ee_pos) - ee_lin_vel + vel_err = np.concatenate((lin_vel_err, ee_ang_vel_err)) + + # Determine desired wrench + err = np.expand_dims(kp * err + kd * vel_err, axis=-1) + m_eef_inv = j_eef @ mm_inv @ j_eef.T + m_eef = np.linalg.inv(m_eef_inv) + + if decouple_pos_ori: + # # More efficient, but numba doesn't support 3D tensor operations yet + # j_eef_batch = j_eef.reshape(2, 3, -1) + # m_eef_pose_inv = np.matmul(np.matmul(j_eef_batch, np.expand_dims(mm_inv, axis=0)), np.transpose(j_eef_batch, (0, 2, 1))) + # m_eef_pose = np.linalg.inv(m_eef_pose_inv) # Shape (2, 3, 3) + # wrench = np.matmul(m_eef_pose, err.reshape(2, 3, 1)).flatten() + m_eef_pos_inv = j_eef[:3, :] @ mm_inv @ j_eef[:3, :].T + m_eef_ori_inv = j_eef[3:, :] @ mm_inv @ j_eef[3:, :].T + m_eef_pos = np.linalg.inv(m_eef_pos_inv) + m_eef_ori = np.linalg.inv(m_eef_ori_inv) + wrench_pos = m_eef_pos @ err[:3, :] + wrench_ori = m_eef_ori @ err[3:, :] + wrench = np.concatenate((wrench_pos, wrench_ori)) + else: + wrench = m_eef @ err + + # Compute OSC torques + u = j_eef.T @ wrench + + # Nullspace control torques `u_null` prevents large changes in joint configuration + # They are added into the nullspace of OSC so that the end effector orientation remains constant + # roboticsproceedings.org/rss07/p31.pdf + if rest_qpos is not None: + j_eef_inv = m_eef @ j_eef @ mm_inv + u_null = kd_null * -qd + kp_null * ((rest_qpos - q + np.pi) % (2 * np.pi) - np.pi) + u_null = mm @ np.expand_dims(u_null, axis=-1).astype(np.float32) + u += (np.eye(control_dim, dtype=np.float32) - j_eef.T @ j_eef_inv) @ u_null + + return u + + +# Set these as part of the backend values +setattr(_ControllerBackend, "compute_osc_torques", None) +setattr(_ControllerTorchBackend, "compute_osc_torques", _compute_osc_torques_torch) +setattr(_ControllerNumpyBackend, "compute_osc_torques", _compute_osc_torques_numpy) diff --git a/omnigibson/macros.py b/omnigibson/macros.py index de90df920..852f9c7a7 100644 --- a/omnigibson/macros.py +++ b/omnigibson/macros.py @@ -79,6 +79,10 @@ def determine_gm_path(default_path, env_var_name): gm.HTTP_PORT = os.getenv("OMNIGIBSON_HTTP_PORT", 8211) gm.WEBRTC_PORT = os.getenv("OMNIGIBSON_WEBRTC_PORT", 49100) +# Whether to use numpy or torch controller backend. Numpy is significantly faster and should be used +# for single-threaded (i.e.: non-large scale parallelized env) purposes +gm.USE_NUMPY_CONTROLLER_BACKEND = True + # Whether only the viewport should be shown in the GUI or not (if not, other peripherals are additionally shown) # CANNOT be set at runtime gm.GUI_VIEWPORT_ONLY = False diff --git a/omnigibson/objects/controllable_object.py b/omnigibson/objects/controllable_object.py index 8feb77008..a47d50f5f 100644 --- a/omnigibson/objects/controllable_object.py +++ b/omnigibson/objects/controllable_object.py @@ -10,6 +10,7 @@ import omnigibson as og from omnigibson.controllers import create_controller from omnigibson.controllers.controller_base import ControlType +from omnigibson.controllers.controller_base import _controller_backend as cb from omnigibson.objects.object_base import BaseObject from omnigibson.utils.constants import JointType, PrimType from omnigibson.utils.numpy_utils import NumpyTypes @@ -277,7 +278,7 @@ def _load_controllers(self): cfg["command_input_limits"] = "default" # default is normalized (-1, 1) # Create the controller - controller = create_controller(**cfg) + controller = create_controller(**cb.from_torch_recursive(cfg)) # Verify the controller's DOFs can all be driven for idx in controller.dof_idx: assert self._joints[ @@ -409,8 +410,8 @@ def _create_continuous_action_space(self): return gym.spaces.Box( shape=(self.action_dim,), - low=th.cat(low).cpu().numpy(), - high=th.cat(high).cpu().numpy(), + low=cb.to_numpy(cb.cat(low)), + high=cb.to_numpy(cb.cat(high)), dtype=NumpyTypes.FLOAT32, ) @@ -441,6 +442,9 @@ def apply_action(self, action): self.action_dim, len(action) ) + # Convert action from torch if necessary + action = cb.from_torch(action) + # First, loop over all controllers, and update the desired command idx = 0 @@ -488,9 +492,9 @@ def step(self): idx += controller.command_dim # Compose controls - u_vec = th.zeros(self.n_dof) + u_vec = cb.zeros(self.n_dof) # By default, the control type is None and the control value is 0 (th.zeros) - i.e. no control applied - u_type_vec = th.tensor([ControlType.NONE] * self.n_dof) + u_type_vec = cb.array([ControlType.NONE] * self.n_dof) for group, ctrl in control.items(): idx = self._controllers[group].dof_idx u_vec[idx] = ctrl["value"] @@ -549,9 +553,12 @@ def deploy_control(self, control, control_type): """ # Run sanity check assert len(control) == len(control_type) == self.n_dof, ( - "Control signals, control types, and number of DOF should all be the same!" - "Got {}, {}, and {} respectively.".format(len(control), len(control_type), self.n_dof) + f"Control signals, control types, and number of DOF should all be the same!" + f"Got {len(control)}, {len(control_type)}, and {self.n_dof} respectively." ) + # Cast control type to numpy to avoid having to call .item() individually on torch tensors + control_type = cb.to_numpy(control_type) + # Set indices manually so that we're standardized indices = range(self.n_dof) @@ -561,9 +568,24 @@ def deploy_control(self, control, control_type): # Loop through controls and deploy # We have to use delicate logic to account for the edge cases where a single joint may contain > 1 DOF # (e.g.: spherical joint) - pos_vec, pos_idxs, using_pos = [], [], False - vel_vec, vel_idxs, using_vel = [], [], False - eff_vec, eff_idxs, using_eff = [], [], False + ctrl_to_execute = { + ControlType.EFFORT: { + "vals": [], + "idxs": [], + }, + ControlType.POSITION: { + "vals": [], + "idxs": [], + }, + ControlType.VELOCITY: { + "vals": [], + "idxs": [], + }, + ControlType.NONE: { + "vals": [], + "idxs": [], + }, + } cur_indices_idx = 0 while cur_indices_idx != n_indices: # Grab the current DOF index we're controlling and find the corresponding joint @@ -596,40 +618,29 @@ def deploy_control(self, control, control_type): ctrl_type = control_type[ cur_ctrl_idx ] # In multi-DOF joint case all values were already checked to be the same - if ctrl_type == ControlType.EFFORT: - eff_vec.append(ctrl) - eff_idxs.append(cur_ctrl_idx) - using_eff = True - elif ctrl_type == ControlType.VELOCITY: - vel_vec.append(ctrl) - vel_idxs.append(cur_ctrl_idx) - using_vel = True - elif ctrl_type == ControlType.POSITION: - pos_vec.append(ctrl) - pos_idxs.append(cur_ctrl_idx) - using_pos = True - elif ctrl_type == ControlType.NONE: - # Set zero efforts - eff_vec.append(0) - eff_idxs.append(cur_ctrl_idx) - using_eff = True - else: - raise ValueError("Invalid control type specified: {}".format(ctrl_type)) + ctrl_to_execute[ctrl_type]["vals"].append(ctrl) + ctrl_to_execute[ctrl_type]["idxs"].append(cur_ctrl_idx) # Finally, increment the current index based on how many DOFs were just controlled cur_indices_idx += joint_dof # set the targets for joints - if using_pos: + if len(ctrl_to_execute[ControlType.POSITION]) > 0: ControllableObjectViewAPI.set_joint_position_targets( - self.articulation_root_path, positions=th.tensor(pos_vec, dtype=th.float), indices=th.tensor(pos_idxs) + self.articulation_root_path, + positions=cb.array(ctrl_to_execute[ControlType.POSITION]["vals"]), + indices=cb.int_array(ctrl_to_execute[ControlType.POSITION]["idxs"]), ) - if using_vel: + if len(ctrl_to_execute[ControlType.VELOCITY]) > 0: ControllableObjectViewAPI.set_joint_velocity_targets( - self.articulation_root_path, velocities=th.tensor(vel_vec, dtype=th.float), indices=th.tensor(vel_idxs) + self.articulation_root_path, + velocities=cb.array(ctrl_to_execute[ControlType.VELOCITY]["vals"]), + indices=cb.int_array(ctrl_to_execute[ControlType.VELOCITY]["idxs"]), ) - if using_eff: + if len(ctrl_to_execute[ControlType.VELOCITY]) > 0 or len(ctrl_to_execute[ControlType.NONE]) > 0: ControllableObjectViewAPI.set_joint_efforts( - self.articulation_root_path, efforts=th.tensor(eff_vec, dtype=th.float), indices=th.tensor(eff_idxs) + self.articulation_root_path, + efforts=cb.cat([cb.array(ctrl_to_execute[ControlType.EFFORT]["vals"]), cb.array(ctrl_to_execute[ControlType.NONE]["vals"])]), + indices=cb.cat([cb.int_array(ctrl_to_execute[ControlType.EFFORT]["idxs"]), cb.int_array(ctrl_to_execute[ControlType.NONE]["idxs"])]), ) def get_control_dict(self): @@ -756,9 +767,9 @@ def serialize(self, state): state_flat = super().serialize(state=state) # Serialize the controller states sequentially - controller_states_flat = th.cat( + controller_states_flat = cb.to_torch(cb.cat( [c.serialize(state=state["controllers"][c_name]) for c_name, c in self._controllers.items()] - ) + )) # Concatenate and return return th.cat([state_flat, controller_states_flat]) @@ -770,7 +781,7 @@ def deserialize(self, state): # Deserialize the controller states sequentially controller_states = dict() for c_name, c in self._controllers.items(): - controller_states[c_name], deserialized_items = c.deserialize(state=state[idx:]) + controller_states[c_name], deserialized_items = c.deserialize(state=cb.from_torch(state[idx:])) idx += deserialized_items state_dict["controllers"] = controller_states diff --git a/omnigibson/robots/holonomic_base_robot.py b/omnigibson/robots/holonomic_base_robot.py index 7aa506108..1174d9197 100644 --- a/omnigibson/robots/holonomic_base_robot.py +++ b/omnigibson/robots/holonomic_base_robot.py @@ -6,10 +6,12 @@ import omnigibson as og import omnigibson.lazy as lazy -import omnigibson.utils.transform_utils as T +from omnigibson.controllers.controller_base import _controller_backend as cb from omnigibson.macros import create_module_macros from omnigibson.robots.locomotion_robot import LocomotionRobot +from omnigibson.robots.manipulation_robot import ManipulationRobot from omnigibson.utils.python_utils import classproperty +from omnigibson.utils.usd_utils import ControllableObjectViewAPI m = create_module_macros(module_path=__file__) m.MAX_LINEAR_VELOCITY = 1.5 # linear velocity in meters/second @@ -249,10 +251,11 @@ def set_position_orientation(self, position=None, orientation=None, frame: Liter # ("base_footprint_x") frame. Assign it to the 6 1DoF joints that control the base. # Note that the 6 1DoF joints are originated from the root_link ("base_footprint_x") frame. joint_pos, joint_orn = self.root_link.get_position_orientation() - inv_joint_pos, inv_joint_orn = T.mat2pose(T.pose_inv(T.pose2mat((joint_pos, joint_orn)))) + joint_pos, joint_orn = cb.from_torch(joint_pos), cb.from_torch(joint_orn) + inv_joint_pos, inv_joint_orn = cb.T.mat2pose(cb.T.pose_inv(cb.T.pose2mat((joint_pos, joint_orn)))) - relative_pos, relative_orn = T.pose_transform(inv_joint_pos, inv_joint_orn, position, orientation) - relative_rpy = T.quat2euler(relative_orn) + relative_pos, relative_orn = cb.T.pose_transform(inv_joint_pos, inv_joint_orn, position, orientation) + relative_rpy = cb.T.quat2euler(relative_orn) self.joints["base_footprint_x_joint"].set_pos(relative_pos[0], drive=False) self.joints["base_footprint_y_joint"].set_pos(relative_pos[1], drive=False) self.joints["base_footprint_z_joint"].set_pos(relative_pos[2], drive=False) @@ -275,8 +278,8 @@ def set_linear_velocity(self, velocity: th.Tensor): # Transform the desired linear velocity from the world frame to the root_link ("base_footprint_x") frame # Note that this will also set the target to be the desired linear velocity (i.e. the robot will try to maintain # such velocity), which is different from the default behavior of set_linear_velocity for all other objects. - orn = self.root_link.get_position_orientation()[1] - velocity_in_root_link = T.quat2mat(orn).T @ velocity + orn = cb.from_torch(self.root_link.get_position_orientation()[1]) + velocity_in_root_link = cb.T.quat2mat(orn).T @ cb.from_torch(velocity) self.joints["base_footprint_x_joint"].set_vel(velocity_in_root_link[0], drive=False) self.joints["base_footprint_y_joint"].set_vel(velocity_in_root_link[1], drive=False) self.joints["base_footprint_z_joint"].set_vel(velocity_in_root_link[2], drive=False) @@ -287,8 +290,8 @@ def get_linear_velocity(self) -> th.Tensor: def set_angular_velocity(self, velocity: th.Tensor) -> None: # See comments of self.set_linear_velocity - orn = self.root_link.get_position_orientation()[1] - velocity_in_root_link = T.quat2mat(orn).T @ velocity + orn = cb.from_torch(self.root_link.get_position_orientation()[1]) + velocity_in_root_link = cb.T.quat2mat(orn).T @ cb.from_torch(velocity) self.joints["base_footprint_rx_joint"].set_vel(velocity_in_root_link[0], drive=False) self.joints["base_footprint_ry_joint"].set_vel(velocity_in_root_link[1], drive=False) self.joints["base_footprint_rz_joint"].set_vel(velocity_in_root_link[2], drive=False) @@ -302,23 +305,23 @@ def _postprocess_control(self, control, control_type): u_vec, u_type_vec = super()._postprocess_control(control=control, control_type=control_type) # Change the control from base_footprint_link ("base_footprint") frame to root_link ("base_footprint_x") frame - base_orn = self.base_footprint_link.get_position_orientation()[1] - root_link_orn = self.root_link.get_position_orientation()[1] + base_orn = ControllableObjectViewAPI.get_position_orientation(self.articulation_root_path)[1] + root_link_orn = ControllableObjectViewAPI.get_root_position_orientation(self.articulation_root_path)[1] - cur_orn_mat = T.quat2mat(root_link_orn).T @ T.quat2mat(base_orn) - cur_pose = th.zeros((2, 4, 4)) + cur_orn_mat = cb.T.quat2mat(root_link_orn).T @ cb.T.quat2mat(base_orn) + cur_pose = cb.zeros((2, 4, 4)) cur_pose[:, :3, :3] = cur_orn_mat cur_pose[:, 3, 3] = 1.0 - local_pose = th.zeros((2, 4, 4)) - local_pose[:] = th.eye(4) - local_pose[:, :3, 3] = u_vec[self.base_idx].view(2, 3) + local_pose = cb.zeros((2, 4, 4)) + local_pose[:] = cb.eye(4) + local_pose[:, :3, 3] = cb.view(u_vec[cb.from_torch(self.base_idx)], (2, 3)) # Rotate the linear and angular velocity to the desired frame global_pose = cur_pose @ local_pose lin_vel_global, ang_vel_global = global_pose[0, :3, 3], global_pose[1, :3, 3] - u_vec[self.base_control_idx] = th.tensor([lin_vel_global[0], lin_vel_global[1], ang_vel_global[2]]) + u_vec[cb.from_torch(self.base_control_idx)] = cb.array([lin_vel_global[0], lin_vel_global[1], ang_vel_global[2]]) return u_vec, u_type_vec diff --git a/omnigibson/simulator.py b/omnigibson/simulator.py index ce06d7a21..17cf95dd7 100644 --- a/omnigibson/simulator.py +++ b/omnigibson/simulator.py @@ -239,6 +239,10 @@ def _launch_app(): # Loading Isaac Sim disables Ctrl+C, so we need to re-enable it signal.signal(signal.SIGINT, og.shutdown_handler) + # Set controller backend + import omnigibson.controllers.controller_base as CB + CB._controller_backend.set_methods(CB._ControllerNumpyBackend if gm.USE_NUMPY_CONTROLLER_BACKEND else CB._ControllerTorchBackend) + return app diff --git a/omnigibson/utils/control_utils.py b/omnigibson/utils/control_utils.py index 7585bbbb1..00350688b 100644 --- a/omnigibson/utils/control_utils.py +++ b/omnigibson/utils/control_utils.py @@ -138,33 +138,3 @@ def solve( return th.tensor(ik_results.cspace_position, dtype=th.float32) else: return None - - -@th.jit.script -def orientation_error(desired, current): - """ - This function calculates a 3-dimensional orientation error vector for use in the - impedance controller. It does this by computing the delta rotation between the - inputs and converting that rotation to exponential coordinates (axis-angle - representation, where the 3d vector is axis * angle). - See https://en.wikipedia.org/wiki/Axis%E2%80%93angle_representation for more information. - Optimized function to determine orientation error from matrices - - Args: - desired (tensor): (..., 3, 3) where final two dims are 2d array representing target orientation matrix - current (tensor): (..., 3, 3) where final two dims are 2d array representing current orientation matrix - Returns: - tensor: (..., 3) where final dim is (ax, ay, az) axis-angle representing orientation error - """ - # Compute batch size - batch_size = desired.numel() // 9 # Each 3x3 matrix has 9 elements - - desired_flat = desired.reshape(batch_size, 3, 3) - current_flat = current.reshape(batch_size, 3, 3) - - rc1, rc2, rc3 = current_flat[:, :, 0], current_flat[:, :, 1], current_flat[:, :, 2] - rd1, rd2, rd3 = desired_flat[:, :, 0], desired_flat[:, :, 1], desired_flat[:, :, 2] - - error = 0.5 * (th.linalg.cross(rc1, rd1) + th.linalg.cross(rc2, rd2) + th.linalg.cross(rc3, rd3)) - - return error.reshape(desired.shape[:-2] + (3,)) diff --git a/omnigibson/utils/python_utils.py b/omnigibson/utils/python_utils.py index ad6ed3d7f..f9c4c8a8a 100644 --- a/omnigibson/utils/python_utils.py +++ b/omnigibson/utils/python_utils.py @@ -783,6 +783,24 @@ def recursively_convert_to_torch(state): return state +def recursively_convert_from_torch(state): + # For all the lists in state dict, convert from torch tensor -> numpy array + import numpy as np + for key, value in state.items(): + if isinstance(value, dict): + state[key] = recursively_convert_from_torch(value) + elif isinstance(value, th.Tensor): + state[key] = value.cpu().numpy() + elif (isinstance(value, list) or isinstance(value, tuple)) and len(value) > 0: + if isinstance(value[0], dict): + state[key] = [recursively_convert_from_torch(val) for val in value] + elif isinstance(value[0], th.Tensor): + state[key] = [tensor.numpy() for tensor in value] + elif isinstance(value[0], int) or isinstance(value[0], float): + state[key] = np.array(value) + return state + + def h5py_group_to_torch(group): state = {} for key, value in group.items(): diff --git a/omnigibson/utils/transform_utils.py b/omnigibson/utils/transform_utils.py index fadbc0f6a..e4feba876 100644 --- a/omnigibson/utils/transform_utils.py +++ b/omnigibson/utils/transform_utils.py @@ -1362,3 +1362,34 @@ def quaternions_close(q1: torch.Tensor, q2: torch.Tensor, atol: float = 1e-3) -> Whether the quaternions are close """ return torch.allclose(q1, q2, atol=atol) or torch.allclose(q1, -q2, atol=atol) + + +@torch.compile +def orientation_error(desired, current): + """ + This function calculates a 3-dimensional orientation error vector for use in the + impedance controller. It does this by computing the delta rotation between the + inputs and converting that rotation to exponential coordinates (axis-angle + representation, where the 3d vector is axis * angle). + See https://en.wikipedia.org/wiki/Axis%E2%80%93angle_representation for more information. + Optimized function to determine orientation error from matrices + + Args: + desired (tensor): (..., 3, 3) where final two dims are 2d array representing target orientation matrix + current (tensor): (..., 3, 3) where final two dims are 2d array representing current orientation matrix + Returns: + tensor: (..., 3) where final dim is (ax, ay, az) axis-angle representing orientation error + """ + # Compute batch size + batch_size = desired.numel() // 9 # Each 3x3 matrix has 9 elements + + desired_flat = desired.reshape(batch_size, 3, 3) + current_flat = current.reshape(batch_size, 3, 3) + + rc1, rc2, rc3 = current_flat[:, :, 0], current_flat[:, :, 1], current_flat[:, :, 2] + rd1, rd2, rd3 = desired_flat[:, :, 0], desired_flat[:, :, 1], desired_flat[:, :, 2] + + error = 0.5 * (torch.linalg.cross(rc1, rd1) + torch.linalg.cross(rc2, rd2) + torch.linalg.cross(rc3, rd3)) + + return error.reshape(desired.shape[:-2] + (3,)) + diff --git a/omnigibson/utils/transform_utils_np.py b/omnigibson/utils/transform_utils_np.py new file mode 100644 index 000000000..8c5942985 --- /dev/null +++ b/omnigibson/utils/transform_utils_np.py @@ -0,0 +1,1229 @@ +""" +Utility functions of matrix and vector transformations. + +NOTE: convention for quaternions is (x, y, z, w) +""" + +import math + +import numpy as np +from numba import jit +from scipy.spatial.transform import Rotation as R + +PI = np.pi +EPS = np.finfo(float).eps * 4.0 + +# axis sequences for Euler angles +_NEXT_AXIS = [1, 2, 0, 1] + +# map axes strings to/from tuples of inner axis, parity, repetition, frame +_AXES2TUPLE = { + "sxyz": (0, 0, 0, 0), + "sxyx": (0, 0, 1, 0), + "sxzy": (0, 1, 0, 0), + "sxzx": (0, 1, 1, 0), + "syzx": (1, 0, 0, 0), + "syzy": (1, 0, 1, 0), + "syxz": (1, 1, 0, 0), + "syxy": (1, 1, 1, 0), + "szxy": (2, 0, 0, 0), + "szxz": (2, 0, 1, 0), + "szyx": (2, 1, 0, 0), + "szyz": (2, 1, 1, 0), + "rzyx": (0, 0, 0, 1), + "rxyx": (0, 0, 1, 1), + "ryzx": (0, 1, 0, 1), + "rxzx": (0, 1, 1, 1), + "rxzy": (1, 0, 0, 1), + "ryzy": (1, 0, 1, 1), + "rzxy": (1, 1, 0, 1), + "ryxy": (1, 1, 1, 1), + "ryxz": (2, 0, 0, 1), + "rzxz": (2, 0, 1, 1), + "rxyz": (2, 1, 0, 1), + "rzyz": (2, 1, 1, 1), +} + +_TUPLE2AXES = dict((v, k) for k, v in _AXES2TUPLE.items()) + + +def ewma_vectorized(data, alpha, offset=None, dtype=None, order="C", out=None): + """ + Calculates the exponential moving average over a vector. + Will fail for large inputs. + + Args: + data (Iterable): Input data + alpha (float): scalar in range (0,1) + The alpha parameter for the moving average. + offset (None or float): If specified, the offset for the moving average. None defaults to data[0]. + dtype (None or type): Data type used for calculations. If None, defaults to float64 unless + data.dtype is float32, then it will use float32. + order (None or str): Order to use when flattening the data. Valid options are {'C', 'F', 'A'}. + None defaults to 'C'. + out (None or np.array): If specified, the location into which the result is stored. If provided, it must have + the same shape as the input. If not provided or `None`, + a freshly-allocated array is returned. + + Returns: + np.array: Exponential moving average from @data + """ + data = np.array(data, copy=False) + + if dtype is None: + if data.dtype == np.float32: + dtype = np.float32 + else: + dtype = np.float64 + else: + dtype = np.dtype(dtype) + + if data.ndim > 1: + # flatten input + data = data.reshape(-1, order) + + if out is None: + out = np.empty_like(data, dtype=dtype) + else: + assert out.shape == data.shape + assert out.dtype == dtype + + if data.size < 1: + # empty input, return empty array + return out + + if offset is None: + offset = data[0] + + alpha = np.array(alpha, copy=False).astype(dtype, copy=False) + + # scaling_factors -> 0 as len(data) gets large + # this leads to divide-by-zeros below + scaling_factors = np.power(1.0 - alpha, np.arange(data.size + 1, dtype=dtype), dtype=dtype) + # create cumulative sum array + np.multiply(data, (alpha * scaling_factors[-2]) / scaling_factors[:-1], dtype=dtype, out=out) + np.cumsum(out, dtype=dtype, out=out) + + # cumsums / scaling + out /= scaling_factors[-2::-1] + + if offset != 0: + offset = np.array(offset, copy=False).astype(dtype, copy=False) + # add offsets + out += offset * scaling_factors[1:] + + return out + + +def convert_quat(q, to="xyzw"): + """ + Converts quaternion from one convention to another. + The convention to convert TO is specified as an optional argument. + If to == 'xyzw', then the input is in 'wxyz' format, and vice-versa. + + Args: + q (np.array): a 4-dim array corresponding to a quaternion + to (str): either 'xyzw' or 'wxyz', determining which convention to convert to. + """ + if to == "xyzw": + return q[[1, 2, 3, 0]] + if to == "wxyz": + return q[[3, 0, 1, 2]] + raise Exception("convert_quat: choose a valid `to` argument (xyzw or wxyz)") + + +@jit(nopython=True) +def quat_multiply(quaternion1, quaternion0): + """ + Return multiplication of two quaternions (q1 * q0). + + E.g.: + >>> q = quat_multiply([1, -2, 3, 4], [-5, 6, 7, 8]) + >>> np.allclose(q, [-44, -14, 48, 28]) + True + + Args: + quaternion1 (np.array): (x,y,z,w) quaternion + quaternion0 (np.array): (x,y,z,w) quaternion + + Returns: + np.array: (x,y,z,w) multiplied quaternion + """ + x0, y0, z0, w0 = quaternion0 + x1, y1, z1, w1 = quaternion1 + return np.array( + ( + x1 * w0 + y1 * z0 - z1 * y0 + w1 * x0, + -x1 * z0 + y1 * w0 + z1 * x0 + w1 * y0, + x1 * y0 - y1 * x0 + z1 * w0 + w1 * z0, + -x1 * x0 - y1 * y0 - z1 * z0 + w1 * w0, + ), + dtype=np.float32, + ) + + +def quat_conjugate(quaternion): + """ + Return conjugate of quaternion. + + E.g.: + >>> q0 = random_quaternion() + >>> q1 = quat_conjugate(q0) + >>> q1[3] == q0[3] and all(q1[:3] == -q0[:3]) + True + + Args: + quaternion (np.array): (x,y,z,w) quaternion + + Returns: + np.array: (x,y,z,w) quaternion conjugate + """ + return np.array( + (-quaternion[0], -quaternion[1], -quaternion[2], quaternion[3]), + dtype=np.float32, + ) + + +def quat_inverse(quaternion): + """ + Return inverse of quaternion. + + E.g.: + >>> q0 = random_quaternion() + >>> q1 = quat_inverse(q0) + >>> np.allclose(quat_multiply(q0, q1), [0, 0, 0, 1]) + True + + Args: + quaternion (np.array): (x,y,z,w) quaternion + + Returns: + np.array: (x,y,z,w) quaternion inverse + """ + return quat_conjugate(quaternion) / np.dot(quaternion, quaternion) + + +def quat_distance(quaternion1, quaternion0): + """ + Returns distance between two quaternions, such that distance * quaternion0 = quaternion1 + + Args: + quaternion1 (np.array): (x,y,z,w) quaternion + quaternion0 (np.array): (x,y,z,w) quaternion + + Returns: + np.array: (x,y,z,w) quaternion distance + """ + return quat_multiply(quaternion1, quat_inverse(quaternion0)) + + +def quat_slerp(quat0, quat1, fraction, shortestpath=True): + """ + Return spherical linear interpolation between two quaternions. + + E.g.: + >>> q0 = random_quat() + >>> q1 = random_quat() + >>> q = quat_slerp(q0, q1, 0.0) + >>> np.allclose(q, q0) + True + + >>> q = quat_slerp(q0, q1, 1.0) + >>> np.allclose(q, q1) + True + + >>> q = quat_slerp(q0, q1, 0.5) + >>> angle = math.acos(np.dot(q0, q)) + >>> np.allclose(2.0, math.acos(np.dot(q0, q1)) / angle) or \ + np.allclose(2.0, math.acos(-np.dot(q0, q1)) / angle) + True + + Args: + quat0 (np.array): (x,y,z,w) quaternion startpoint + quat1 (np.array): (x,y,z,w) quaternion endpoint + fraction (float): fraction of interpolation to calculate + shortestpath (bool): If True, will calculate the shortest path + + Returns: + np.array: (x,y,z,w) quaternion distance + """ + q0 = unit_vector(quat0[:4]) + q1 = unit_vector(quat1[:4]) + if fraction == 0.0: + return q0 + elif fraction == 1.0: + return q1 + d = np.dot(q0, q1) + if abs(abs(d) - 1.0) < EPS: + return q0 + if shortestpath and d < 0.0: + # invert rotation + d = -d + q1 *= -1.0 + angle = math.acos(np.clip(d, -1, 1)) + if abs(angle) < EPS: + return q0 + isin = 1.0 / math.sin(angle) + q0 *= math.sin((1.0 - fraction) * angle) * isin + q1 *= math.sin(fraction * angle) * isin + q0 += q1 + return q0 + + +def random_quat(rand=None): + """ + Return uniform random unit quaternion. + + E.g.: + >>> q = random_quat() + >>> np.allclose(1.0, vector_norm(q)) + True + >>> q = random_quat(np.random.random(3)) + >>> q.shape + (4,) + + Args: + rand (3-array or None): If specified, must be three independent random variables that are uniformly distributed + between 0 and 1. + + Returns: + np.array: (x,y,z,w) random quaternion + """ + if rand is None: + rand = np.random.rand(3) + else: + assert len(rand) == 3 + r1 = np.sqrt(1.0 - rand[0]) + r2 = np.sqrt(rand[0]) + pi2 = math.pi * 2.0 + t1 = pi2 * rand[1] + t2 = pi2 * rand[2] + return np.array( + (np.sin(t1) * r1, np.cos(t1) * r1, np.sin(t2) * r2, np.cos(t2) * r2), + dtype=np.float32, + ) + + +def random_axis_angle(angle_limit=None, random_state=None): + """ + Samples an axis-angle rotation by first sampling a random axis + and then sampling an angle. If @angle_limit is provided, the size + of the rotation angle is constrained. + + If @random_state is provided (instance of np.random.RandomState), it + will be used to generate random numbers. + + Args: + angle_limit (None or float): If set, determines magnitude limit of angles to generate + random_state (None or RandomState): RNG to use if specified + + Raises: + AssertionError: [Invalid RNG] + """ + if angle_limit is None: + angle_limit = 2.0 * np.pi + + if random_state is not None: + assert isinstance(random_state, np.random.RandomState) + npr = random_state + else: + npr = np.random + + # sample random axis using a normalized sample from spherical Gaussian. + # see (http://extremelearning.com.au/how-to-generate-uniformly-random-points-on-n-spheres-and-n-balls/) + # for why it works. + random_axis = npr.randn(3) + random_axis /= np.linalg.norm(random_axis) + random_angle = npr.uniform(low=0.0, high=angle_limit) + return random_axis, random_angle + + +def vec(values): + """ + Converts value tuple into a numpy vector. + + Args: + values (n-array): a tuple of numbers + + Returns: + np.array: vector of given values + """ + return np.array(values, dtype=np.float32) + + +def mat4(array): + """ + Converts an array to 4x4 matrix. + + Args: + array (n-array): the array in form of vec, list, or tuple + + Returns: + np.array: a 4x4 numpy matrix + """ + return np.array(array, dtype=np.float32).reshape((4, 4)) + + +def mat2pose(hmat): + """ + Converts a homogeneous 4x4 matrix into pose. + + Args: + hmat (np.array): a 4x4 homogeneous matrix + + Returns: + 2-tuple: + - (np.array) (x,y,z) position array in cartesian coordinates + - (np.array) (x,y,z,w) orientation array in quaternion form + """ + pos = hmat[:3, 3] + orn = mat2quat(hmat[:3, :3]) + return pos, orn + + +def mat2quat(rmat): + """ + Converts given rotation matrix to quaternion. + + Args: + rmat (np.array): (..., 3, 3) rotation matrix + + Returns: + np.array: (..., 4) (x,y,z,w) float quaternion angles + """ + return R.from_matrix(rmat).as_quat() + + +def vec2quat(vec, up=(0, 0, 1.0)): + """ + Converts given 3d-direction vector @vec to quaternion orientation with respect to another direction vector @up + + Args: + vec (3-array): (x,y,z) direction vector (possible non-normalized) + up (3-array): (x,y,z) direction vector representing the canonical up direction (possible non-normalized) + """ + # See https://stackoverflow.com/questions/15873996/converting-a-direction-vector-to-a-quaternion-rotation + # Take cross product of @up and @vec to get @s_n, and then cross @vec and @s_n to get @u_n + # Then compose 3x3 rotation matrix and convert into quaternion + vec_n = vec / np.linalg.norm(vec) # x + up_n = up / np.linalg.norm(up) + s_n = np.cross(up_n, vec_n) # y + u_n = np.cross(vec_n, s_n) # z + return mat2quat(np.array([vec_n, s_n, u_n]).T) + + +def euler2mat(euler): + """ + Converts euler angles into rotation matrix form + + Args: + euler (np.array): (r,p,y) angles + + Returns: + np.array: 3x3 rotation matrix + + Raises: + AssertionError: [Invalid input shape] + """ + + euler = np.asarray(euler, dtype=np.float64) + assert euler.shape[-1] == 3, "Invalid shaped euler {}".format(euler) + + return R.from_euler("xyz", euler).as_matrix() + + +def mat2euler(rmat): + """ + Converts given rotation matrix to euler angles in radian. + + Args: + rmat (np.array): 3x3 rotation matrix + + Returns: + np.array: (r,p,y) converted euler angles in radian vec3 float + """ + M = np.array(rmat, dtype=np.float32, copy=False)[:3, :3] + return R.from_matrix(M).as_euler("xyz") + + +def pose2mat(pose): + """ + Converts pose to homogeneous matrix. + + Args: + pose (2-tuple): a (pos, orn) tuple where pos is vec3 float cartesian, and + orn is vec4 float quaternion. + + Returns: + np.array: 4x4 homogeneous matrix + """ + homo_pose_mat = np.zeros((4, 4), dtype=np.float32) + homo_pose_mat[:3, :3] = quat2mat(pose[1]) + homo_pose_mat[:3, 3] = np.array(pose[0], dtype=np.float32) + homo_pose_mat[3, 3] = 1.0 + return homo_pose_mat + + +def quat2mat(quaternion): + """ + Converts given quaternion to matrix. + + Args: + quaternion (np.array): (..., 4) (x,y,z,w) float quaternion angles + + Returns: + np.array: (..., 3, 3) rotation matrix + """ + return R.from_quat(quaternion).as_matrix() + + +def quat2axisangle(quat): + """ + Converts quaternion to axis-angle format. + Returns a unit vector direction scaled by its angle in radians. + + Args: + quat (np.array): (x,y,z,w) vec4 float angles + + Returns: + np.array: (ax,ay,az) axis-angle exponential coordinates + """ + return R.from_quat(quat).as_rotvec() + + +def axisangle2quat(vec): + """ + Converts scaled axis-angle to quat. + + Args: + vec (np.array): (ax,ay,az) axis-angle exponential coordinates + + Returns: + np.array: (x,y,z,w) vec4 float angles + """ + return R.from_rotvec(vec).as_quat() + + +def euler2quat(euler): + """ + Converts euler angles into quaternion form + + Args: + euler (np.array): (r,p,y) angles + + Returns: + np.array: (x,y,z,w) float quaternion angles + + Raises: + AssertionError: [Invalid input shape] + """ + return R.from_euler("xyz", euler).as_quat() + + +def quat2euler(quat): + """ + Converts euler angles into quaternion form + + Args: + quat (np.array): (x,y,z,w) float quaternion angles + + Returns: + np.array: (r,p,y) angles + + Raises: + AssertionError: [Invalid input shape] + """ + return R.from_quat(quat).as_euler("xyz") + + +def pose_in_A_to_pose_in_B(pose_A, pose_A_in_B): + """ + Converts a homogenous matrix corresponding to a point C in frame A + to a homogenous matrix corresponding to the same point C in frame B. + + Args: + pose_A (np.array): 4x4 matrix corresponding to the pose of C in frame A + pose_A_in_B (np.array): 4x4 matrix corresponding to the pose of A in frame B + + Returns: + np.array: 4x4 matrix corresponding to the pose of C in frame B + """ + + # pose of A in B takes a point in A and transforms it to a point in C. + + # pose of C in B = pose of A in B * pose of C in A + # take a point in C, transform it to A, then to B + # T_B^C = T_A^C * T_B^A + return pose_A_in_B.dot(pose_A) + + +def pose_inv(pose_mat): + """ + Computes the inverse of a homogeneous matrix corresponding to the pose of some + frame B in frame A. The inverse is the pose of frame A in frame B. + + Args: + pose_mat (np.array): 4x4 matrix for the pose to inverse + + Returns: + np.array: 4x4 matrix for the inverse pose + """ + + # Note, the inverse of a pose matrix is the following + # [R t; 0 1]^-1 = [R.T -R.T*t; 0 1] + + # Intuitively, this makes sense. + # The original pose matrix translates by t, then rotates by R. + # We just invert the rotation by applying R-1 = R.T, and also translate back. + # Since we apply translation first before rotation, we need to translate by + # -t in the original frame, which is -R-1*t in the new frame, and then rotate back by + # R-1 to align the axis again. + + pose_inv = np.zeros((4, 4)) + pose_inv[:3, :3] = pose_mat[:3, :3].T + pose_inv[:3, 3] = -pose_inv[:3, :3].dot(pose_mat[:3, 3]) + pose_inv[3, 3] = 1.0 + return pose_inv + + +def pose_transform(pos1, quat1, pos0, quat0): + """ + Conducts forward transform from pose (pos0, quat0) to pose (pos1, quat1): + + pose1 @ pose0, NOT pose0 @ pose1 + + Args: + pos1: (x,y,z) position to transform + quat1: (x,y,z,w) orientation to transform + pos0: (x,y,z) initial position + quat0: (x,y,z,w) initial orientation + + Returns: + 2-tuple: + - (np.array) (x,y,z) position array in cartesian coordinates + - (np.array) (x,y,z,w) orientation array in quaternion form + """ + # Get poses + mat0 = pose2mat((pos0, quat0)) + mat1 = pose2mat((pos1, quat1)) + + # Multiply and convert back to pos, quat + return mat2pose(mat1 @ mat0) + + +def invert_pose_transform(pos, quat): + """ + Inverts a pose transform + + Args: + pos: (x,y,z) position to transform + quat: (x,y,z,w) orientation to transform + + Returns: + 2-tuple: + - (np.array) (x,y,z) position array in cartesian coordinates + - (np.array) (x,y,z,w) orientation array in quaternion form + """ + # Get pose + mat = pose2mat((pos, quat)) + + # Invert pose and convert back to pos, quat + return mat2pose(pose_inv(mat)) + + +def relative_pose_transform(pos1, quat1, pos0, quat0): + """ + Computes relative forward transform from pose (pos0, quat0) to pose (pos1, quat1), i.e.: solves: + + pose1 = pose0 @ transform + + Args: + pos1: (x,y,z) position to transform + quat1: (x,y,z,w) orientation to transform + pos0: (x,y,z) initial position + quat0: (x,y,z,w) initial orientation + + Returns: + 2-tuple: + - (np.array) (x,y,z) position array in cartesian coordinates + - (np.array) (x,y,z,w) orientation array in quaternion form + """ + # Get poses + mat0 = pose2mat((pos0, quat0)) + mat1 = pose2mat((pos1, quat1)) + + # Invert pose0 and calculate transform + return mat2pose(pose_inv(mat0) @ mat1) + + +def _skew_symmetric_translation(pos_A_in_B): + """ + Helper function to get a skew symmetric translation matrix for converting quantities + between frames. + + Args: + pos_A_in_B (np.array): (x,y,z) position of A in frame B + + Returns: + np.array: 3x3 skew symmetric translation matrix + """ + return np.array( + [ + 0.0, + -pos_A_in_B[2], + pos_A_in_B[1], + pos_A_in_B[2], + 0.0, + -pos_A_in_B[0], + -pos_A_in_B[1], + pos_A_in_B[0], + 0.0, + ] + ).reshape((3, 3)) + + +def vel_in_A_to_vel_in_B(vel_A, ang_vel_A, pose_A_in_B): + """ + Converts linear and angular velocity of a point in frame A to the equivalent in frame B. + + Args: + vel_A (np.array): (vx,vy,vz) linear velocity in A + ang_vel_A (np.array): (wx,wy,wz) angular velocity in A + pose_A_in_B (np.array): 4x4 matrix corresponding to the pose of A in frame B + + Returns: + 2-tuple: + + - (np.array) (vx,vy,vz) linear velocities in frame B + - (np.array) (wx,wy,wz) angular velocities in frame B + """ + pos_A_in_B = pose_A_in_B[:3, 3] + rot_A_in_B = pose_A_in_B[:3, :3] + skew_symm = _skew_symmetric_translation(pos_A_in_B) + vel_B = rot_A_in_B.dot(vel_A) + skew_symm.dot(rot_A_in_B.dot(ang_vel_A)) + ang_vel_B = rot_A_in_B.dot(ang_vel_A) + return vel_B, ang_vel_B + + +def force_in_A_to_force_in_B(force_A, torque_A, pose_A_in_B): + """ + Converts linear and rotational force at a point in frame A to the equivalent in frame B. + + Args: + force_A (np.array): (fx,fy,fz) linear force in A + torque_A (np.array): (tx,ty,tz) rotational force (moment) in A + pose_A_in_B (np.array): 4x4 matrix corresponding to the pose of A in frame B + + Returns: + 2-tuple: + + - (np.array) (fx,fy,fz) linear forces in frame B + - (np.array) (tx,ty,tz) moments in frame B + """ + pos_A_in_B = pose_A_in_B[:3, 3] + rot_A_in_B = pose_A_in_B[:3, :3] + skew_symm = _skew_symmetric_translation(pos_A_in_B) + force_B = rot_A_in_B.T.dot(force_A) + torque_B = -rot_A_in_B.T.dot(skew_symm.dot(force_A)) + rot_A_in_B.T.dot(torque_A) + return force_B, torque_B + + +def rotation_matrix(angle, direction, point=None): + """ + Returns matrix to rotate about axis defined by point and direction. + + E.g.: + >>> angle = (random.random() - 0.5) * (2*math.pi) + >>> direc = numpy.random.random(3) - 0.5 + >>> point = numpy.random.random(3) - 0.5 + >>> R0 = rotation_matrix(angle, direc, point) + >>> R1 = rotation_matrix(angle-2*math.pi, direc, point) + >>> is_same_transform(R0, R1) + True + + >>> R0 = rotation_matrix(angle, direc, point) + >>> R1 = rotation_matrix(-angle, -direc, point) + >>> is_same_transform(R0, R1) + True + + >>> I = numpy.identity(4, numpy.float32) + >>> numpy.allclose(I, rotation_matrix(math.pi*2, direc)) + True + + >>> numpy.allclose(2., numpy.trace(rotation_matrix(math.pi/2, + ... direc, point))) + True + + Args: + angle (float): Magnitude of rotation + direction (np.array): (ax,ay,az) axis about which to rotate + point (None or np.array): If specified, is the (x,y,z) point about which the rotation will occur + + Returns: + np.array: 4x4 homogeneous matrix that includes the desired rotation + """ + sina = math.sin(angle) + cosa = math.cos(angle) + direction = unit_vector(direction[:3]) + # rotation matrix around unit vector + R = np.array(((cosa, 0.0, 0.0), (0.0, cosa, 0.0), (0.0, 0.0, cosa)), dtype=np.float32) + R += np.outer(direction, direction) * (1.0 - cosa) + direction *= sina + R += np.array( + ( + (0.0, -direction[2], direction[1]), + (direction[2], 0.0, -direction[0]), + (-direction[1], direction[0], 0.0), + ), + dtype=np.float32, + ) + M = np.identity(4) + M[:3, :3] = R + if point is not None: + # rotation not around origin + point = np.array(point[:3], dtype=np.float32, copy=False) + M[:3, 3] = point - np.dot(R, point) + return M + + +def clip_translation(dpos, limit): + """ + Limits a translation (delta position) to a specified limit + + Scales down the norm of the dpos to 'limit' if norm(dpos) > limit, else returns immediately + + Args: + dpos (n-array): n-dim Translation being clipped (e,g.: (x, y, z)) -- numpy array + limit (float): Value to limit translation by -- magnitude (scalar, in same units as input) + + Returns: + 2-tuple: + + - (np.array) Clipped translation (same dimension as inputs) + - (bool) whether the value was clipped or not + """ + input_norm = np.linalg.norm(dpos) + return (dpos * limit / input_norm, True) if input_norm > limit else (dpos, False) + + +def clip_rotation(quat, limit): + """ + Limits a (delta) rotation to a specified limit + + Converts rotation to axis-angle, clips, then re-converts back into quaternion + + Args: + quat (np.array): (x,y,z,w) rotation being clipped + limit (float): Value to limit rotation by -- magnitude (scalar, in radians) + + Returns: + 2-tuple: + + - (np.array) Clipped rotation quaternion (x, y, z, w) + - (bool) whether the value was clipped or not + """ + clipped = False + + # First, normalize the quaternion + quat = quat / np.linalg.norm(quat) + + den = np.sqrt(max(1 - quat[3] * quat[3], 0)) + if den == 0: + # This is a zero degree rotation, immediately return + return quat, clipped + else: + # This is all other cases + x = quat[0] / den + y = quat[1] / den + z = quat[2] / den + a = 2 * math.acos(quat[3]) + + # Clip rotation if necessary and return clipped quat + if abs(a) > limit: + a = limit * np.sign(a) / 2 + sa = math.sin(a) + ca = math.cos(a) + quat = np.array([x * sa, y * sa, z * sa, ca]) + clipped = True + + return quat, clipped + + +def make_pose(translation, rotation): + """ + Makes a homogeneous pose matrix from a translation vector and a rotation matrix. + + Args: + translation (np.array): (x,y,z) translation value + rotation (np.array): a 3x3 matrix representing rotation + + Returns: + pose (np.array): a 4x4 homogeneous matrix + """ + pose = np.zeros((4, 4)) + pose[:3, :3] = rotation + pose[:3, 3] = translation + pose[3, 3] = 1.0 + return pose + + +def unit_vector(data, axis=None, out=None): + """ + Returns ndarray normalized by length, i.e. eucledian norm, along axis. + + E.g.: + >>> v0 = numpy.random.random(3) + >>> v1 = unit_vector(v0) + >>> numpy.allclose(v1, v0 / numpy.linalg.norm(v0)) + True + + >>> v0 = numpy.random.rand(5, 4, 3) + >>> v1 = unit_vector(v0, axis=-1) + >>> v2 = v0 / numpy.expand_dims(numpy.sqrt(numpy.sum(v0*v0, axis=2)), 2) + >>> numpy.allclose(v1, v2) + True + + >>> v1 = unit_vector(v0, axis=1) + >>> v2 = v0 / numpy.expand_dims(numpy.sqrt(numpy.sum(v0*v0, axis=1)), 1) + >>> numpy.allclose(v1, v2) + True + + >>> v1 = numpy.empty((5, 4, 3), dtype=numpy.float32) + >>> unit_vector(v0, axis=1, out=v1) + >>> numpy.allclose(v1, v2) + True + + >>> list(unit_vector([])) + [] + + >>> list(unit_vector([1.0])) + [1.0] + + Args: + data (np.array): data to normalize + axis (None or int): If specified, determines specific axis along data to normalize + out (None or np.array): If specified, will store computation in this variable + + Returns: + None or np.array: If @out is not specified, will return normalized vector. Otherwise, stores the output in @out + """ + if out is None: + data = np.array(data, dtype=np.float32, copy=True) + if data.ndim == 1: + data /= math.sqrt(np.dot(data, data)) + return data + else: + if out is not data: + out[:] = np.array(data, copy=False) + data = out + length = np.atleast_1d(np.sum(data * data, axis)) + np.sqrt(length, length) + if axis is not None: + length = np.expand_dims(length, axis) + data /= length + if out is None: + return data + + +def get_orientation_error(target_orn, current_orn): + """ + Returns the difference between two quaternion orientations as a 3 DOF numpy array. + For use in an impedance controller / task-space PD controller. + + Args: + target_orn (np.array): (x, y, z, w) desired quaternion orientation + current_orn (np.array): (x, y, z, w) current quaternion orientation + + Returns: + orn_error (np.array): (ax,ay,az) current orientation error, corresponds to + (target_orn - current_orn) + """ + current_orn = np.array([current_orn[3], current_orn[0], current_orn[1], current_orn[2]]) + target_orn = np.array([target_orn[3], target_orn[0], target_orn[1], target_orn[2]]) + + pinv = np.zeros((3, 4)) + pinv[0, :] = [-current_orn[1], current_orn[0], -current_orn[3], current_orn[2]] + pinv[1, :] = [-current_orn[2], current_orn[3], current_orn[0], -current_orn[1]] + pinv[2, :] = [-current_orn[3], -current_orn[2], current_orn[1], current_orn[0]] + orn_error = 2.0 * pinv.dot(np.array(target_orn)) + return orn_error + + +def get_orientation_diff_in_radian(orn0, orn1): + """ + Returns the difference between two quaternion orientations in radian + + Args: + orn0 (np.array): (x, y, z, w) + orn1 (np.array): (x, y, z, w) + + Returns: + orn_diff (float): orientation difference in radian + """ + vec0 = quat2axisangle(orn0) + vec0 /= np.linalg.norm(vec0) + vec1 = quat2axisangle(orn1) + vec1 /= np.linalg.norm(vec1) + return np.arccos(np.dot(vec0, vec1)) + + +def get_pose_error(target_pose, current_pose): + """ + Computes the error corresponding to target pose - current pose as a 6-dim vector. + The first 3 components correspond to translational error while the last 3 components + correspond to the rotational error. + + Args: + target_pose (np.array): a 4x4 homogenous matrix for the target pose + current_pose (np.array): a 4x4 homogenous matrix for the current pose + + Returns: + np.array: 6-dim pose error. + """ + error = np.zeros(6) + + # compute translational error + target_pos = target_pose[:3, 3] + current_pos = current_pose[:3, 3] + pos_err = target_pos - current_pos + + # compute rotational error + r1 = current_pose[:3, 0] + r2 = current_pose[:3, 1] + r3 = current_pose[:3, 2] + r1d = target_pose[:3, 0] + r2d = target_pose[:3, 1] + r3d = target_pose[:3, 2] + rot_err = 0.5 * (np.cross(r1, r1d) + np.cross(r2, r2d) + np.cross(r3, r3d)) + + error[:3] = pos_err + error[3:] = rot_err + return error + + +def matrix_inverse(matrix): + """ + Helper function to have an efficient matrix inversion function. + + Args: + matrix (np.array): 2d-array representing a matrix + + Returns: + np.array: 2d-array representing the matrix inverse + """ + return np.linalg.inv(matrix) + + +def vecs2axisangle(vec0, vec1): + """ + Converts the angle from unnormalized 3D vectors @vec0 to @vec1 into an axis-angle representation of the angle + + Args: + vec0 (np.array): (..., 3) (x,y,z) 3D vector, possibly unnormalized + vec1 (np.array): (..., 3) (x,y,z) 3D vector, possibly unnormalized + """ + # Normalize vectors + vec0 = normalize(vec0, axis=-1) + vec1 = normalize(vec1, axis=-1) + + # Get cross product for direction of angle, and multiply by arcos of the dot product which is the angle + return np.cross(vec0, vec1) * np.arccos((vec0 * vec1).sum(-1, keepdims=True)) + + +def vecs2quat(vec0, vec1, normalized=False): + """ + Converts the angle from unnormalized 3D vectors @vec0 to @vec1 into a quaternion representation of the angle + + Args: + vec0 (np.array): (..., 3) (x,y,z) 3D vector, possibly unnormalized + vec1 (np.array): (..., 3) (x,y,z) 3D vector, possibly unnormalized + normalized (bool): If True, @vec0 and @vec1 are assumed to already be normalized and we will skip the + normalization step (more efficient) + """ + # Normalize vectors if requested + if not normalized: + vec0 = normalize(vec0, axis=-1) + vec1 = normalize(vec1, axis=-1) + + # Half-way Quaternion Solution -- see https://stackoverflow.com/a/11741520 + cos_theta = np.sum(vec0 * vec1, axis=-1, keepdims=True) + quat_unnormalized = np.where( + cos_theta == -1, np.array([1.0, 0, 0, 0]), np.concatenate([np.cross(vec0, vec1), 1 + cos_theta], axis=-1) + ) + return quat_unnormalized / np.linalg.norm(quat_unnormalized, axis=-1, keepdims=True) + + +def l2_distance(v1, v2): + """Returns the L2 distance between vector v1 and v2.""" + return np.linalg.norm(np.array(v1) - np.array(v2)) + + +def frustum(left, right, bottom, top, znear, zfar): + """Create view frustum matrix.""" + assert right != left + assert bottom != top + assert znear != zfar + + M = np.zeros((4, 4), dtype=np.float32) + M[0, 0] = +2.0 * znear / (right - left) + M[2, 0] = (right + left) / (right - left) + M[1, 1] = +2.0 * znear / (top - bottom) + # TODO: Put this back to 3,1 + # M[3, 1] = (top + bottom) / (top - bottom) + M[2, 1] = (top + bottom) / (top - bottom) + M[2, 2] = -(zfar + znear) / (zfar - znear) + M[3, 2] = -2.0 * znear * zfar / (zfar - znear) + M[2, 3] = -1.0 + return M + + +def ortho(left, right, bottom, top, znear, zfar): + """Create orthonormal projection matrix.""" + assert right != left + assert bottom != top + assert znear != zfar + + M = np.zeros((4, 4), dtype=np.float32) + M[0, 0] = 2.0 / (right - left) + M[1, 1] = 2.0 / (top - bottom) + M[2, 2] = -2.0 / (zfar - znear) + M[3, 0] = -(right + left) / (right - left) + M[3, 1] = -(top + bottom) / (top - bottom) + M[3, 2] = -(zfar + znear) / (zfar - znear) + M[3, 3] = 1.0 + return M + + +def perspective(fovy, aspect, znear, zfar): + """Create perspective projection matrix.""" + # fovy is in degree + assert znear != zfar + h = np.tan(fovy / 360.0 * np.pi) * znear + w = h * aspect + return frustum(-w, w, -h, h, znear, zfar) + + +def anorm(x, axis=None, keepdims=False): + """Compute L2 norms alogn specified axes.""" + return np.linalg.norm(x, axis=axis, keepdims=keepdims) + + +def normalize(v, axis=None, eps=1e-10): + """L2 Normalize along specified axes.""" + norm = anorm(v, axis=axis, keepdims=True) + return v / np.where(norm < eps, eps, norm) + + +def cartesian_to_polar(x, y): + """Convert cartesian coordinate to polar coordinate""" + rho = np.sqrt(x**2 + y**2) + phi = np.arctan2(y, x) + return rho, phi + + +def deg2rad(deg): + return deg * np.pi / 180.0 + + +def rad2deg(rad): + return rad * 180.0 / np.pi + + +def check_quat_right_angle(quat, atol=5e-2): + """ + Check by making sure the quaternion is some permutation of +/- (1, 0, 0, 0), + +/- (0.707, 0.707, 0, 0), or +/- (0.5, 0.5, 0.5, 0.5) + Because orientations are all normalized (same L2-norm), every orientation should have a unique L1-norm + So we check the L1-norm of the absolute value of the orientation as a proxy for verifying these values + + Args: + quat (4-array): (x,y,z,w) quaternion orientation to check + atol (float): Absolute tolerance permitted + + Returns: + bool: Whether the quaternion is a right angle or not + """ + return np.any(np.isclose(np.abs(quat).sum(), np.array([1.0, 1.414, 2.0]), atol=atol)) + + +def z_angle_from_quat(quat): + """Get the angle around the Z axis produced by the quaternion.""" + rotated_X_axis = R.from_quat(quat).apply([1, 0, 0]) + return np.arctan2(rotated_X_axis[1], rotated_X_axis[0]) + + +def z_rotation_from_quat(quat): + """Get the quaternion for the rotation around the Z axis produced by the quaternion.""" + return R.from_euler("z", z_angle_from_quat(quat)).as_quat() + + +def integer_spiral_coordinates(n): + """A function to map integers to 2D coordinates in a spiral pattern around the origin.""" + # Map integers from Z to Z^2 in a spiral pattern around the origin. + # Sources: + # https://www.reddit.com/r/askmath/comments/18vqorf/find_the_nth_coordinate_of_a_square_spiral/ + # https://oeis.org/A174344 + m = np.floor(np.sqrt(n)) + x = ((-1) ** m) * ((n - m * (m + 1)) * (np.floor(2 * np.sqrt(n)) % 2) - np.ceil(m / 2)) + y = ((-1) ** (m + 1)) * ((n - m * (m + 1)) * (np.floor(2 * np.sqrt(n) + 1) % 2) + np.ceil(m / 2)) + return int(x), int(y) + + +def calculate_xy_plane_angle(quaternion): + """ + Compute the 2D orientation angle from a quaternion assuming the initial forward vector is along the x-axis. + + Parameters: + quaternion : array_like + The quaternion (w, x, y, z) representing the rotation. + + Returns: + float + The angle (in radians) of the projection of the forward vector onto the XY plane. + Returns 0.0 if the projected vector's magnitude is negligibly small. + """ + fwd = R.from_quat(quaternion).apply([1, 0, 0]) + fwd[2] = 0.0 + + if np.linalg.norm(fwd) < 1e-4: + return 0.0 + + fwd /= np.linalg.norm(fwd) + return np.arctan2(fwd[1], fwd[0]) + + +@jit(nopython=True) +def orientation_error(desired, current): + """ + This function calculates a 3-dimensional orientation error vector for use in the + impedance controller. It does this by computing the delta rotation between the + inputs and converting that rotation to exponential coordinates (axis-angle + representation, where the 3d vector is axis * angle). + See https://en.wikipedia.org/wiki/Axis%E2%80%93angle_representation for more information. + Optimized function to determine orientation error from matrices + + Args: + desired (tensor): (..., 3, 3) where final two dims are 2d array representing target orientation matrix + current (tensor): (..., 3, 3) where final two dims are 2d array representing current orientation matrix + Returns: + tensor: (..., 3) where final dim is (ax, ay, az) axis-angle representing orientation error + """ + # convert input shapes + input_shape = desired.shape[:-2] + desired = desired.reshape(-1, 3, 3) + current = current.reshape(-1, 3, 3) + + # grab relevant info + rc1 = current[:, :, 0] + rc2 = current[:, :, 1] + rc3 = current[:, :, 2] + rd1 = desired[:, :, 0] + rd2 = desired[:, :, 1] + rd3 = desired[:, :, 2] + + error = 0.5 * (np.cross(rc1, rd1) + np.cross(rc2, rd2) + np.cross(rc3, rd3)) + + # Reshape + error = error.reshape(*input_shape, 3) + + return error diff --git a/omnigibson/utils/usd_utils.py b/omnigibson/utils/usd_utils.py index 10939d81e..f80792972 100644 --- a/omnigibson/utils/usd_utils.py +++ b/omnigibson/utils/usd_utils.py @@ -11,6 +11,7 @@ import omnigibson as og import omnigibson.lazy as lazy +from omnigibson.controllers.controller_base import _controller_backend as cb import omnigibson.utils.transform_utils as T from omnigibson.macros import gm from omnigibson.utils.constants import PRIMITIVE_MESH_TYPES, JointType, PrimType @@ -851,19 +852,19 @@ def clear(self): def flush_control(self): if "dof_position_targets" in self._write_idx_cache: - pos_indices = th.tensor(sorted(self._write_idx_cache["dof_position_targets"])) + pos_indices = cb.int_array(sorted(self._write_idx_cache["dof_position_targets"])) pos_targets = self._read_cache["dof_position_targets"] - self._view.set_dof_position_targets(pos_targets, pos_indices) + self._view.set_dof_position_targets(cb.to_torch(pos_targets), cb.to_torch(pos_indices)) if "dof_velocity_targets" in self._write_idx_cache: - vel_indices = th.tensor(sorted(self._write_idx_cache["dof_velocity_targets"])) + vel_indices = cb.int_array(sorted(self._write_idx_cache["dof_velocity_targets"])) vel_targets = self._read_cache["dof_velocity_targets"] - self._view.set_dof_velocity_targets(vel_targets, vel_indices) + self._view.set_dof_velocity_targets(cb.to_torch(vel_targets), cb.to_torch(vel_indices)) if "dof_actuation_forces" in self._write_idx_cache: - eff_indices = th.tensor(sorted(self._write_idx_cache["dof_actuation_forces"])) + eff_indices = cb.int_array(sorted(self._write_idx_cache["dof_actuation_forces"])) eff_targets = self._read_cache["dof_actuation_forces"] - self._view.set_dof_actuation_forces(eff_targets, eff_indices) + self._view.set_dof_actuation_forces(cb.to_torch(eff_targets), cb.to_torch(eff_indices)) def initialize_view(self): # First, get all of the controllable objects in the scene (avoiding circular import) @@ -913,10 +914,10 @@ def set_joint_position_targets(self, prim_path, positions, indices): # Load the current targets. if "dof_position_targets" not in self._read_cache: - self._read_cache["dof_position_targets"] = self._view.get_dof_position_targets() + self._read_cache["dof_position_targets"] = cb.from_torch(self._view.get_dof_position_targets()) # Update the target - self._read_cache["dof_position_targets"][idx][indices] = positions + self._read_cache["dof_position_targets"][idx, indices] = positions # Add this index to the write cache self._write_idx_cache["dof_position_targets"].add(idx) @@ -927,10 +928,10 @@ def set_joint_velocity_targets(self, prim_path, velocities, indices): # Load the current targets. if "dof_velocity_targets" not in self._read_cache: - self._read_cache["dof_velocity_targets"] = self._view.get_dof_velocity_targets() + self._read_cache["dof_velocity_targets"] = cb.from_torch(self._view.get_dof_velocity_targets()) # Update the target - self._read_cache["dof_velocity_targets"][idx][indices] = velocities + self._read_cache["dof_velocity_targets"][idx, indices] = velocities # Add this index to the write cache self._write_idx_cache["dof_velocity_targets"].add(idx) @@ -941,17 +942,17 @@ def set_joint_efforts(self, prim_path, efforts, indices): # Load the current targets. if "dof_actuation_forces" not in self._read_cache: - self._read_cache["dof_actuation_forces"] = self._view.get_dof_actuation_forces() + self._read_cache["dof_actuation_forces"] = cb.from_torch(self._view.get_dof_actuation_forces()) # Update the target - self._read_cache["dof_actuation_forces"][idx][indices] = efforts + self._read_cache["dof_actuation_forces"][idx, indices] = efforts # Add this index to the write cache self._write_idx_cache["dof_actuation_forces"].add(idx) def get_root_transform(self, prim_path): if "root_transforms" not in self._read_cache: - self._read_cache["root_transforms"] = self._view.get_root_transforms() + self._read_cache["root_transforms"] = cb.from_torch(self._view.get_root_transforms()) idx = self._idx[prim_path] pose = self._read_cache["root_transforms"][idx] @@ -982,79 +983,79 @@ def get_angular_velocity(self, prim_path): def get_root_linear_velocity(self, prim_path): if "root_velocities" not in self._read_cache: - self._read_cache["root_velocities"] = self._view.get_root_velocities() + self._read_cache["root_velocities"] = cb.from_torch(self._view.get_root_velocities()) idx = self._idx[prim_path] - return self._read_cache["root_velocities"][idx][:3] + return self._read_cache["root_velocities"][idx, :3] def get_root_angular_velocity(self, prim_path): if "root_velocities" not in self._read_cache: - self._read_cache["root_velocities"] = self._view.get_root_velocities() + self._read_cache["root_velocities"] = cb.from_torch(self._view.get_root_velocities()) idx = self._idx[prim_path] - return self._read_cache["root_velocities"][idx][3:] + return self._read_cache["root_velocities"][idx, 3:] def get_relative_linear_velocity(self, prim_path): orn = self.get_position_orientation(prim_path)[1] linvel = self.get_linear_velocity(prim_path) # x.T --> transpose (inverse) orientation - return T.quat2mat(orn).T @ linvel + return cb.T.quat2mat(orn).T @ linvel def get_relative_angular_velocity(self, prim_path): orn = self.get_position_orientation(prim_path)[1] angvel = self.get_angular_velocity(prim_path) # x.T --> transpose (inverse) orientation - return T.quat2mat(orn).T @ angvel + return cb.T.quat2mat(orn).T @ angvel def get_joint_positions(self, prim_path): if "dof_positions" not in self._read_cache: - self._read_cache["dof_positions"] = self._view.get_dof_positions() + self._read_cache["dof_positions"] = cb.from_torch(self._view.get_dof_positions()) idx = self._idx[prim_path] return self._read_cache["dof_positions"][idx] def get_joint_velocities(self, prim_path): if "dof_velocities" not in self._read_cache: - self._read_cache["dof_velocities"] = self._view.get_dof_velocities() + self._read_cache["dof_velocities"] = cb.from_torch(self._view.get_dof_velocities()) idx = self._idx[prim_path] return self._read_cache["dof_velocities"][idx] def get_joint_efforts(self, prim_path): if "dof_projected_joint_forces" not in self._read_cache: - self._read_cache["dof_projected_joint_forces"] = self._view.get_dof_projected_joint_forces() + self._read_cache["dof_projected_joint_forces"] = cb.from_torch(self._view.get_dof_projected_joint_forces()) idx = self._idx[prim_path] return self._read_cache["dof_projected_joint_forces"][idx] def get_mass_matrix(self, prim_path): if "mass_matrices" not in self._read_cache: - self._read_cache["mass_matrices"] = self._view.get_mass_matrices() + self._read_cache["mass_matrices"] = cb.from_torch(self._view.get_mass_matrices()) idx = self._idx[prim_path] return self._read_cache["mass_matrices"][idx] def get_generalized_gravity_forces(self, prim_path): if "generalized_gravity_forces" not in self._read_cache: - self._read_cache["generalized_gravity_forces"] = self._view.get_generalized_gravity_forces() + self._read_cache["generalized_gravity_forces"] = cb.from_torch(self._view.get_generalized_gravity_forces()) idx = self._idx[prim_path] return self._read_cache["generalized_gravity_forces"][idx] def get_coriolis_and_centrifugal_forces(self, prim_path): if "coriolis_and_centrifugal_forces" not in self._read_cache: - self._read_cache["coriolis_and_centrifugal_forces"] = self._view.get_coriolis_and_centrifugal_forces() + self._read_cache["coriolis_and_centrifugal_forces"] = cb.from_torch(self._view.get_coriolis_and_centrifugal_forces()) idx = self._idx[prim_path] return self._read_cache["coriolis_and_centrifugal_forces"][idx] def get_link_transform(self, prim_path, link_name): if "link_transforms" not in self._read_cache: - self._read_cache["link_transforms"] = self._view.get_link_transforms() + self._read_cache["link_transforms"] = cb.from_torch(self._view.get_link_transforms()) idx = self._idx[prim_path] link_idx = self._link_idx[idx][link_name] - pose = self._read_cache["link_transforms"][idx][link_idx] + pose = self._read_cache["link_transforms"][idx, link_idx] return pose[:3], pose[3:] def get_link_relative_position_orientation(self, prim_path, link_name): @@ -1064,15 +1065,15 @@ def get_link_relative_position_orientation(self, prim_path, link_name): world_pos, world_orn = self.get_position_orientation(prim_path) # Compute the relative position and orientation - return T.relative_pose_transform(pos, orn, world_pos, world_orn) + return cb.T.relative_pose_transform(pos, orn, world_pos, world_orn) def get_link_linear_velocity(self, prim_path, link_name): if "link_velocities" not in self._read_cache: - self._read_cache["link_velocities"] = self._view.get_link_velocities() + self._read_cache["link_velocities"] = cb.from_torch(self._view.get_link_velocities()) idx = self._idx[prim_path] link_idx = self._link_idx[idx][link_name] - vel = self._read_cache["link_velocities"][idx][link_idx] + vel = self._read_cache["link_velocities"][idx, link_idx] linvel = vel[:3] return linvel @@ -1084,15 +1085,15 @@ def get_link_relative_linear_velocity(self, prim_path, link_name): _, world_orn = self.get_position_orientation(prim_path) # Compute the relative position and orientation - return T.quat2mat(world_orn).T @ linvel + return cb.T.quat2mat(world_orn).T @ linvel def get_link_angular_velocity(self, prim_path, link_name): if "link_velocities" not in self._read_cache: - self._read_cache["link_velocities"] = self._view.get_link_velocities() + self._read_cache["link_velocities"] = cb.from_torch(self._view.get_link_velocities()) idx = self._idx[prim_path] link_idx = self._link_idx[idx][link_name] - vel = self._read_cache["link_velocities"][idx][link_idx] + vel = self._read_cache["link_velocities"][idx, link_idx] angvel = vel[3:] return angvel @@ -1104,22 +1105,24 @@ def get_link_relative_angular_velocity(self, prim_path, link_name): _, world_orn = self.get_position_orientation(prim_path) # Compute the relative position and orientation - return T.quat2mat(world_orn).T @ angvel + return cb.T.quat2mat(world_orn).T @ angvel def get_jacobian(self, prim_path): if "jacobians" not in self._read_cache: - self._read_cache["jacobians"] = self._view.get_jacobians() + self._read_cache["jacobians"] = cb.from_torch(self._view.get_jacobians()) idx = self._idx[prim_path] return self._read_cache["jacobians"][idx] def get_relative_jacobian(self, prim_path): jacobian = self.get_jacobian(prim_path) - ori_t = T.quat2mat(self.get_position_orientation(prim_path)[1]).T - tf = th.zeros((1, 6, 6), dtype=th.float32) + ori_t = cb.T.quat2mat(self.get_position_orientation(prim_path)[1]).T + tf = cb.zeros((1, 6, 6)) tf[:, :3, :3] = ori_t tf[:, 3:, 3:] = ori_t - return tf @ jacobian + # Run this explicitly in pytorch since it's order of magnitude faster than numpy! + # e.g.: 2e-4 vs. 3e-5 on R1 + return cb.from_torch(cb.to_torch(tf) @ cb.to_torch(jacobian)) class ControllableObjectViewAPI: @@ -1233,6 +1236,10 @@ def set_joint_efforts(cls, prim_path, efforts, indices): def get_position_orientation(cls, prim_path): return cls._VIEWS_BY_PATTERN[cls._get_pattern_from_prim_path(prim_path)].get_position_orientation(prim_path) + @classmethod + def get_root_position_orientation(cls, prim_path): + return cls._VIEWS_BY_PATTERN[cls._get_pattern_from_prim_path(prim_path)].get_root_transform(prim_path) + @classmethod def get_linear_velocity(cls, prim_path): return cls._VIEWS_BY_PATTERN[cls._get_pattern_from_prim_path(prim_path)].get_linear_velocity(prim_path) @@ -1279,6 +1286,12 @@ def get_coriolis_and_centrifugal_forces(cls, prim_path): prim_path ) + @classmethod + def get_link_position_orientation(cls, prim_path, link_name): + return cls._VIEWS_BY_PATTERN[cls._get_pattern_from_prim_path(prim_path)].get_link_transform( + prim_path, link_name + ) + @classmethod def get_link_relative_position_orientation(cls, prim_path, link_name): return cls._VIEWS_BY_PATTERN[cls._get_pattern_from_prim_path(prim_path)].get_link_relative_position_orientation( From 38b2a473cb299ab4f914368e420816ccad620e14 Mon Sep 17 00:00:00 2001 From: cremebrule <84cremebrule@gmail.com> Date: Tue, 3 Dec 2024 15:48:22 -0800 Subject: [PATCH 02/38] fix small typo --- .../action_primitives/starter_semantic_action_primitives.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/omnigibson/action_primitives/starter_semantic_action_primitives.py b/omnigibson/action_primitives/starter_semantic_action_primitives.py index 1b1aec0d0..d9c5e9ef5 100644 --- a/omnigibson/action_primitives/starter_semantic_action_primitives.py +++ b/omnigibson/action_primitives/starter_semantic_action_primitives.py @@ -36,7 +36,7 @@ from omnigibson.robots.locomotion_robot import LocomotionRobot from omnigibson.robots.manipulation_robot import ManipulationRobot from omnigibson.tasks.behavior_task import BehaviorTask -from omnigibson.utils.control_utils import FKSolver, IKSolver, orientation_error +from omnigibson.utils.control_utils import FKSolver, IKSolver from omnigibson.utils.grasping_planning_utils import get_grasp_poses_for_object_sticky, get_grasp_position_for_open from omnigibson.utils.motion_planning_utils import ( detect_robot_collision_in_sim, @@ -1472,7 +1472,7 @@ def _empty_action(self, follow_arm_targets=True): ) delta_pos = target_pos - current_pos if controller.mode == "pose_delta_ori": - delta_orn = orientation_error( + delta_orn = T.orientation_error( T.quat2mat(T.axisangle2quat(target_orn_axisangle)), T.quat2mat(current_orn) ) partial_action = th.cat((delta_pos, delta_orn)) From 429cfeb5df46af79ce35492b5726fa291f3d3bda Mon Sep 17 00:00:00 2001 From: cremebrule <84cremebrule@gmail.com> Date: Tue, 3 Dec 2024 15:48:36 -0800 Subject: [PATCH 03/38] update setup requirements --- setup.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/setup.py b/setup.py index feaeae568..13ac4cba3 100644 --- a/setup.py +++ b/setup.py @@ -48,6 +48,8 @@ "graphviz>=0.20", "matplotlib>=3.0.0", "lxml", + "numba", + "cffi==1.16.0", ], extras_require={ "dev": [ From 8fa44a0c6a39bd6d4650ec14c5079a3896147d6d Mon Sep 17 00:00:00 2001 From: cremebrule <84cremebrule@gmail.com> Date: Wed, 4 Dec 2024 12:02:35 -0800 Subject: [PATCH 04/38] cache indexing values for better efficiency --- omnigibson/robots/active_camera_robot.py | 3 ++- omnigibson/robots/articulated_trunk_robot.py | 3 ++- omnigibson/robots/holonomic_base_robot.py | 2 +- omnigibson/robots/locomotion_robot.py | 3 ++- omnigibson/robots/manipulation_robot.py | 5 +++-- 5 files changed, 10 insertions(+), 6 deletions(-) diff --git a/omnigibson/robots/active_camera_robot.py b/omnigibson/robots/active_camera_robot.py index cb56e4c77..3bf8317d2 100644 --- a/omnigibson/robots/active_camera_robot.py +++ b/omnigibson/robots/active_camera_robot.py @@ -1,4 +1,5 @@ from abc import abstractmethod +from functools import cached_property import torch as th @@ -126,7 +127,7 @@ def camera_joint_names(self): """ raise NotImplementedError - @property + @cached_property def camera_control_idx(self): """ Returns: diff --git a/omnigibson/robots/articulated_trunk_robot.py b/omnigibson/robots/articulated_trunk_robot.py index 889d0e10d..3323db110 100644 --- a/omnigibson/robots/articulated_trunk_robot.py +++ b/omnigibson/robots/articulated_trunk_robot.py @@ -1,4 +1,5 @@ import torch as th +from functools import cached_property from omnigibson.robots.manipulation_robot import ManipulationRobot from omnigibson.utils.python_utils import classproperty @@ -45,7 +46,7 @@ def trunk_link_names(self): def trunk_joint_names(self): raise NotImplementedError("trunk_joint_names must be implemented in subclass") - @property + @cached_property def trunk_control_idx(self): """ Returns: diff --git a/omnigibson/robots/holonomic_base_robot.py b/omnigibson/robots/holonomic_base_robot.py index 1174d9197..7a230ddf4 100644 --- a/omnigibson/robots/holonomic_base_robot.py +++ b/omnigibson/robots/holonomic_base_robot.py @@ -175,7 +175,7 @@ def _initialize(self): # Reload the controllers to update their command_output_limits and control_limits self.reload_controllers(self._controller_config) - @property + @cached_property def base_idx(self): """ Returns: diff --git a/omnigibson/robots/locomotion_robot.py b/omnigibson/robots/locomotion_robot.py index a723be8e8..6e8274509 100644 --- a/omnigibson/robots/locomotion_robot.py +++ b/omnigibson/robots/locomotion_robot.py @@ -1,4 +1,5 @@ from abc import abstractmethod +from functools import cached_property import torch as th @@ -218,7 +219,7 @@ def base_joint_names(self): """ raise NotImplementedError - @property + @cached_property def base_control_idx(self): """ Returns: diff --git a/omnigibson/robots/manipulation_robot.py b/omnigibson/robots/manipulation_robot.py index f3f6d55ff..d101e7feb 100644 --- a/omnigibson/robots/manipulation_robot.py +++ b/omnigibson/robots/manipulation_robot.py @@ -1,5 +1,6 @@ import math from abc import abstractmethod +from functools import cached_property from collections import namedtuple from typing import Literal @@ -592,7 +593,7 @@ def finger_joint_names(self): """ raise NotImplementedError - @property + @cached_property def arm_control_idx(self): """ Returns: @@ -604,7 +605,7 @@ def arm_control_idx(self): for arm in self.arm_names } - @property + @cached_property def gripper_control_idx(self): """ Returns: From 5d3bce50d6d6396ccd591a0d92566098343fe1ea Mon Sep 17 00:00:00 2001 From: cremebrule <84cremebrule@gmail.com> Date: Wed, 4 Dec 2024 12:04:08 -0800 Subject: [PATCH 05/38] improve control loop efficiency, cache relative tf computations --- omnigibson/controllers/controller_base.py | 13 +- omnigibson/objects/controllable_object.py | 91 ++-------- omnigibson/utils/usd_utils.py | 198 ++++++++++++++-------- 3 files changed, 157 insertions(+), 145 deletions(-) diff --git a/omnigibson/controllers/controller_base.py b/omnigibson/controllers/controller_base.py index a17131e9f..b9f3c38a0 100644 --- a/omnigibson/controllers/controller_base.py +++ b/omnigibson/controllers/controller_base.py @@ -65,6 +65,9 @@ class _ControllerBackend: copy = None eye = None view = None + arange = None + where = None + squeeze = None T = None @classmethod @@ -79,7 +82,7 @@ def set_methods(cls, backend): class _ControllerTorchBackend(_ControllerBackend): array = lambda *args: th.tensor(*args, dtype=th.float32) - int_array = lambda *args: th.tensor(*args, dtype=th.int) + int_array = lambda *args: th.tensor(*args, dtype=th.int32) prod = th.prod cat = th.cat zeros = lambda *args: th.zeros(*args, dtype=th.float32) @@ -104,12 +107,15 @@ class _ControllerTorchBackend(_ControllerBackend): copy = lambda arr: arr.clone() eye = th.eye view = lambda arr, shape: arr.view(shape) + arange = th.arange + where = th.where + squeeze = lambda arr, dim=None: arr.squeeze(dim=dim) T = TT class _ControllerNumpyBackend(_ControllerBackend): array = lambda *args: np.array(*args, dtype=np.float32) - int_array = lambda *args: np.array(*args, dtype=int) + int_array = lambda *args: np.array(*args, dtype=np.int32) prod = np.prod cat = np.concatenate zeros = lambda *args: np.zeros(*args, dtype=np.float32) @@ -134,6 +140,9 @@ class _ControllerNumpyBackend(_ControllerBackend): copy = lambda arr: np.array(arr) eye = np.eye view = lambda arr, shape: arr.reshape(shape) + arange = np.arange + where = np.where + squeeze = lambda arr, dim=None: arr.squeeze(axis=dim) T = NT diff --git a/omnigibson/objects/controllable_object.py b/omnigibson/objects/controllable_object.py index a47d50f5f..bcad603a8 100644 --- a/omnigibson/objects/controllable_object.py +++ b/omnigibson/objects/controllable_object.py @@ -493,8 +493,8 @@ def step(self): # Compose controls u_vec = cb.zeros(self.n_dof) - # By default, the control type is None and the control value is 0 (th.zeros) - i.e. no control applied - u_type_vec = cb.array([ControlType.NONE] * self.n_dof) + # By default, the control type is Effort and the control value is 0 (th.zeros) - i.e. no control applied + u_type_vec = cb.array([ControlType.EFFORT] * self.n_dof) for group, ctrl in control.items(): idx = self._controllers[group].dof_idx u_vec[idx] = ctrl["value"] @@ -556,91 +556,28 @@ def deploy_control(self, control, control_type): f"Control signals, control types, and number of DOF should all be the same!" f"Got {len(control)}, {len(control_type)}, and {self.n_dof} respectively." ) - # Cast control type to numpy to avoid having to call .item() individually on torch tensors - control_type = cb.to_numpy(control_type) - - # Set indices manually so that we're standardized - indices = range(self.n_dof) - - # Standardize normalized input - n_indices = len(indices) - - # Loop through controls and deploy - # We have to use delicate logic to account for the edge cases where a single joint may contain > 1 DOF - # (e.g.: spherical joint) - ctrl_to_execute = { - ControlType.EFFORT: { - "vals": [], - "idxs": [], - }, - ControlType.POSITION: { - "vals": [], - "idxs": [], - }, - ControlType.VELOCITY: { - "vals": [], - "idxs": [], - }, - ControlType.NONE: { - "vals": [], - "idxs": [], - }, - } - cur_indices_idx = 0 - while cur_indices_idx != n_indices: - # Grab the current DOF index we're controlling and find the corresponding joint - joint = self._dof_to_joints[indices[cur_indices_idx]] - cur_ctrl_idx = indices[cur_indices_idx] - joint_dof = joint.n_dof - if joint_dof > 1: - # Run additional sanity checks since the joint has more than one DOF to make sure our controls, - # control types, and indices all match as expected - - # Make sure the indices are mapped correctly - assert ( - indices[cur_indices_idx + joint_dof] == cur_ctrl_idx + joint_dof - ), "Got mismatched control indices for a single joint!" - # Check to make sure all joints, control_types, and normalized as all the same over n-DOF for the joint - for group_name, group in zip( - ("joints", "control_types"), - (self._dof_to_joints, control_type), - ): - assert ( - len({group[indices[cur_indices_idx + i]] for i in range(joint_dof)}) == 1 - ), f"Not all {group_name} were the same when trying to deploy control for a single joint!" - # Assuming this all passes, we grab the control subvector, type, and normalized value accordingly - ctrl = control[cur_ctrl_idx : cur_ctrl_idx + joint_dof] - else: - # Grab specific control. No need to do checks since this is a single value - ctrl = control[cur_ctrl_idx] - - # Deploy control based on type - ctrl_type = control_type[ - cur_ctrl_idx - ] # In multi-DOF joint case all values were already checked to be the same - ctrl_to_execute[ctrl_type]["vals"].append(ctrl) - ctrl_to_execute[ctrl_type]["idxs"].append(cur_ctrl_idx) - # Finally, increment the current index based on how many DOFs were just controlled - cur_indices_idx += joint_dof # set the targets for joints - if len(ctrl_to_execute[ControlType.POSITION]) > 0: + pos_idxs = cb.where(control_type == ControlType.POSITION)[0] + if len(pos_idxs) > 0: ControllableObjectViewAPI.set_joint_position_targets( self.articulation_root_path, - positions=cb.array(ctrl_to_execute[ControlType.POSITION]["vals"]), - indices=cb.int_array(ctrl_to_execute[ControlType.POSITION]["idxs"]), + positions=control[pos_idxs], + indices=pos_idxs, ) - if len(ctrl_to_execute[ControlType.VELOCITY]) > 0: + vel_idxs = cb.where(control_type == ControlType.VELOCITY)[0] + if len(vel_idxs) > 0: ControllableObjectViewAPI.set_joint_velocity_targets( self.articulation_root_path, - velocities=cb.array(ctrl_to_execute[ControlType.VELOCITY]["vals"]), - indices=cb.int_array(ctrl_to_execute[ControlType.VELOCITY]["idxs"]), + velocities=control[vel_idxs], + indices=vel_idxs, ) - if len(ctrl_to_execute[ControlType.VELOCITY]) > 0 or len(ctrl_to_execute[ControlType.NONE]) > 0: + eff_idxs = cb.where(control_type == ControlType.EFFORT)[0] + if len(eff_idxs) > 0: ControllableObjectViewAPI.set_joint_efforts( self.articulation_root_path, - efforts=cb.cat([cb.array(ctrl_to_execute[ControlType.EFFORT]["vals"]), cb.array(ctrl_to_execute[ControlType.NONE]["vals"])]), - indices=cb.cat([cb.int_array(ctrl_to_execute[ControlType.EFFORT]["idxs"]), cb.int_array(ctrl_to_execute[ControlType.NONE]["idxs"])]), + efforts=control[eff_idxs], + indices=eff_idxs, ) def get_control_dict(self): diff --git a/omnigibson/utils/usd_utils.py b/omnigibson/utils/usd_utils.py index f80792972..64c0b6e1e 100644 --- a/omnigibson/utils/usd_utils.py +++ b/omnigibson/utils/usd_utils.py @@ -850,21 +850,54 @@ def clear(self): self._read_cache = {} self._write_idx_cache = collections.defaultdict(set) + def _set_dof_position_targets(self, data, indices, cast=True): + # No casting results in better efficiency + if cast: + data = self._view._frontend.as_contiguous_float32(data) + indices = self._view._frontend.as_contiguous_uint32(indices) + data_desc = self._view._frontend.get_tensor_desc(data) + indices_desc = self._view._frontend.get_tensor_desc(indices) + + if not self._view._backend.set_dof_position_targets(data_desc, indices_desc): + raise Exception("Failed to set DOF positions in backend") + + def _set_dof_velocity_targets(self, data, indices, cast=True): + # No casting results in better efficiency + if cast: + data = self._view._frontend.as_contiguous_float32(data) + indices = self._view._frontend.as_contiguous_uint32(indices) + data_desc = self._view._frontend.get_tensor_desc(data) + indices_desc = self._view._frontend.get_tensor_desc(indices) + + if not self._view._backend.set_dof_velocity_targets(data_desc, indices_desc): + raise Exception("Failed to set DOF velocities in backend") + + def _set_dof_actuation_forces(self, data, indices, cast=True): + # No casting results in better efficiency + if cast: + data = self._view._frontend.as_contiguous_float32(data) + indices = self._view._frontend.as_contiguous_uint32(indices) + data_desc = self._view._frontend.get_tensor_desc(data) + indices_desc = self._view._frontend.get_tensor_desc(indices) + + if not self._view._backend.set_dof_actuation_forces(data_desc, indices_desc): + raise Exception("Failed to set DOF actuation forces in backend") + def flush_control(self): if "dof_position_targets" in self._write_idx_cache: pos_indices = cb.int_array(sorted(self._write_idx_cache["dof_position_targets"])) pos_targets = self._read_cache["dof_position_targets"] - self._view.set_dof_position_targets(cb.to_torch(pos_targets), cb.to_torch(pos_indices)) + self._set_dof_position_targets(cb.to_torch(pos_targets), cb.to_torch(pos_indices), cast=False) if "dof_velocity_targets" in self._write_idx_cache: vel_indices = cb.int_array(sorted(self._write_idx_cache["dof_velocity_targets"])) vel_targets = self._read_cache["dof_velocity_targets"] - self._view.set_dof_velocity_targets(cb.to_torch(vel_targets), cb.to_torch(vel_indices)) + self._set_dof_velocity_targets(cb.to_torch(vel_targets), cb.to_torch(vel_indices), cast=False) if "dof_actuation_forces" in self._write_idx_cache: eff_indices = cb.int_array(sorted(self._write_idx_cache["dof_actuation_forces"])) eff_targets = self._read_cache["dof_actuation_forces"] - self._view.set_dof_actuation_forces(cb.to_torch(eff_targets), cb.to_torch(eff_indices)) + self._set_dof_actuation_forces(cb.to_torch(eff_targets), cb.to_torch(eff_indices), cast=False) def initialize_view(self): # First, get all of the controllable objects in the scene (avoiding circular import) @@ -967,45 +1000,58 @@ def get_position_orientation(self, prim_path): else: return self.get_root_transform(prim_path) - def get_linear_velocity(self, prim_path): + def _get_velocities(self, prim_path): if self._base_footprint_link_names[prim_path] is not None: link_name = self._base_footprint_link_names[prim_path] - return self.get_link_linear_velocity(prim_path, link_name) + return self._get_link_velocities(prim_path, link_name) else: - return self.get_root_linear_velocity(prim_path) + return self._get_root_velocities(prim_path) - def get_angular_velocity(self, prim_path): - if self._base_footprint_link_names[prim_path] is not None: - link_name = self._base_footprint_link_names[prim_path] - return self.get_link_angular_velocity(prim_path, link_name) - else: - return self.get_root_angular_velocity(prim_path) + def _get_relative_velocities(self, prim_path): + if "relative_velocities" not in self._read_cache: + self._read_cache["relative_velocities"] = {} - def get_root_linear_velocity(self, prim_path): - if "root_velocities" not in self._read_cache: - self._read_cache["root_velocities"] = cb.from_torch(self._view.get_root_velocities()) + if prim_path not in self._read_cache["relative_velocities"]: + # Compute all tfs at once, including base as well as all links + if "link_velocities" not in self._read_cache: + self._read_cache["link_velocities"] = cb.from_torch(self._view.get_link_velocities()) - idx = self._idx[prim_path] - return self._read_cache["root_velocities"][idx, :3] + idx = self._idx[prim_path] + vels = cb.zeros((len(self._link_idx[idx]) + 1, 6, 1)) + # base vel is the final -1 index + vels[:-1, :, 0] = self._read_cache["link_velocities"][idx, :] + vels[-1, :, 0] = self._get_velocities(prim_path=prim_path) + + tf = cb.zeros((1, 6, 6)) + orn_t = cb.T.quat2mat(self.get_position_orientation(prim_path)[1]).T + tf[0, :3, :3] = orn_t + tf[0, 3:, 3:] = orn_t + # x.T --> transpose (inverse) orientation + # (1, 6, 6) @ (n_links, 6, 1) -> (n_links, 6, 1) -> (n_links, 6) + self._read_cache["relative_velocities"][prim_path] = cb.squeeze(tf @ vels, dim=-1) - def get_root_angular_velocity(self, prim_path): + return self._read_cache["relative_velocities"][prim_path] + + def get_linear_velocity(self, prim_path): + return self._get_velocities(prim_path)[:3] + + def get_angular_velocity(self, prim_path): + return self._get_velocities(prim_path)[3:] + + def _get_root_velocities(self, prim_path): if "root_velocities" not in self._read_cache: self._read_cache["root_velocities"] = cb.from_torch(self._view.get_root_velocities()) idx = self._idx[prim_path] - return self._read_cache["root_velocities"][idx, 3:] + return self._read_cache["root_velocities"][idx] def get_relative_linear_velocity(self, prim_path): - orn = self.get_position_orientation(prim_path)[1] - linvel = self.get_linear_velocity(prim_path) - # x.T --> transpose (inverse) orientation - return cb.T.quat2mat(orn).T @ linvel + # base corresponds to final index + return self._get_relative_velocities(prim_path)[-1, :3] def get_relative_angular_velocity(self, prim_path): - orn = self.get_position_orientation(prim_path)[1] - angvel = self.get_angular_velocity(prim_path) - # x.T --> transpose (inverse) orientation - return cb.T.quat2mat(orn).T @ angvel + # base corresponds to final index + return self._get_relative_velocities(prim_path)[-1, 3:] def get_joint_positions(self, prim_path): if "dof_positions" not in self._read_cache: @@ -1058,54 +1104,67 @@ def get_link_transform(self, prim_path, link_name): pose = self._read_cache["link_transforms"][idx, link_idx] return pose[:3], pose[3:] - def get_link_relative_position_orientation(self, prim_path, link_name): - pos, orn = self.get_link_transform(prim_path, link_name) + def _get_relative_poses(self, prim_path): + if "relative_poses" not in self._read_cache: + self._read_cache["relative_poses"] = {} - # Get the root world transform too - world_pos, world_orn = self.get_position_orientation(prim_path) + if prim_path not in self._read_cache["relative_poses"]: + # Compute all tfs at once, including base as well as all links + if "link_transforms" not in self._read_cache: + self._read_cache["link_transforms"] = cb.from_torch(self._view.get_link_transforms()) - # Compute the relative position and orientation - return cb.T.relative_pose_transform(pos, orn, world_pos, world_orn) + idx = self._idx[prim_path] + tfs = cb.zeros((len(self._link_idx[idx]), 4, 4)) + # base vel is the final -1 index + link_tfs = self._read_cache["link_transforms"][idx, :] + tfs[:, 3, 3] = 1.0 + tfs[:, :3, 3] = link_tfs[:, :3] + tfs[:, :3, :3] = cb.T.quat2mat(link_tfs[:, 3:]) + base_tf_inv = cb.zeros((1, 4, 4)) + base_tf_inv[0, :, :] = cb.T.pose_inv(cb.T.pose2mat(self.get_position_orientation(prim_path=prim_path))) - def get_link_linear_velocity(self, prim_path, link_name): - if "link_velocities" not in self._read_cache: - self._read_cache["link_velocities"] = cb.from_torch(self._view.get_link_velocities()) + # (1, 4, 4) @ (n_links, 4, 4) -> (n_links, 4, 4) + rel_tfs = base_tf_inv @ tfs - idx = self._idx[prim_path] - link_idx = self._link_idx[idx][link_name] - vel = self._read_cache["link_velocities"][idx, link_idx] - linvel = vel[:3] - - return linvel - - def get_link_relative_linear_velocity(self, prim_path, link_name): - linvel = self.get_link_linear_velocity(prim_path, link_name) + # Re-convert to quat form + rel_poses = cb.zeros((len(self._link_idx[idx]), 7)) + rel_poses[:, :3] = rel_tfs[:, :3, 3] + rel_poses[:, 3:] = cb.T.mat2quat(rel_tfs[:, :3, :3]) + self._read_cache["relative_poses"][prim_path] = rel_poses - # Get the root world transform too - _, world_orn = self.get_position_orientation(prim_path) + return self._read_cache["relative_poses"][prim_path] - # Compute the relative position and orientation - return cb.T.quat2mat(world_orn).T @ linvel + def get_link_relative_position_orientation(self, prim_path, link_name): + idx = self._idx[prim_path] + link_idx = self._link_idx[idx][link_name] + rel_pose = self._get_relative_poses(prim_path)[link_idx] + return rel_pose[:3], rel_pose[3:] - def get_link_angular_velocity(self, prim_path, link_name): + def _get_link_velocities(self, prim_path, link_name): if "link_velocities" not in self._read_cache: self._read_cache["link_velocities"] = cb.from_torch(self._view.get_link_velocities()) idx = self._idx[prim_path] link_idx = self._link_idx[idx][link_name] vel = self._read_cache["link_velocities"][idx, link_idx] - angvel = vel[3:] - return angvel + return vel - def get_link_relative_angular_velocity(self, prim_path, link_name): - angvel = self.get_link_angular_velocity(prim_path, link_name) + def get_link_linear_velocity(self, prim_path, link_name): + return self._get_link_velocities(prim_path, link_name)[:3] - # Get the root world transform too - _, world_orn = self.get_position_orientation(prim_path) + def get_link_relative_linear_velocity(self, prim_path, link_name): + idx = self._idx[prim_path] + link_idx = self._link_idx[idx][link_name] + return self._get_relative_velocities(prim_path)[link_idx, :3] - # Compute the relative position and orientation - return cb.T.quat2mat(world_orn).T @ angvel + def get_link_angular_velocity(self, prim_path, link_name): + return self._get_link_velocities(prim_path, link_name)[3:] + + def get_link_relative_angular_velocity(self, prim_path, link_name): + idx = self._idx[prim_path] + link_idx = self._link_idx[idx][link_name] + return self._get_relative_velocities(prim_path)[link_idx, 3:] def get_jacobian(self, prim_path): if "jacobians" not in self._read_cache: @@ -1115,14 +1174,21 @@ def get_jacobian(self, prim_path): return self._read_cache["jacobians"][idx] def get_relative_jacobian(self, prim_path): - jacobian = self.get_jacobian(prim_path) - ori_t = cb.T.quat2mat(self.get_position_orientation(prim_path)[1]).T - tf = cb.zeros((1, 6, 6)) - tf[:, :3, :3] = ori_t - tf[:, 3:, 3:] = ori_t - # Run this explicitly in pytorch since it's order of magnitude faster than numpy! - # e.g.: 2e-4 vs. 3e-5 on R1 - return cb.from_torch(cb.to_torch(tf) @ cb.to_torch(jacobian)) + if "relative_jacobians" not in self._read_cache: + self._read_cache["relative_jacobians"] = {} + + if prim_path not in self._read_cache["relative_jacobians"]: + jacobian = self.get_jacobian(prim_path) + ori_t = cb.T.quat2mat(self.get_position_orientation(prim_path)[1]).T + tf = cb.zeros((1, 6, 6)) + tf[:, :3, :3] = ori_t + tf[:, 3:, 3:] = ori_t + # Run this explicitly in pytorch since it's order of magnitude faster than numpy! + # e.g.: 2e-4 vs. 3e-5 on R1 + rel_jac = cb.from_torch(cb.to_torch(tf) @ cb.to_torch(jacobian)) + self._read_cache["relative_jacobians"][prim_path] = rel_jac + + return self._read_cache["relative_jacobians"][prim_path] class ControllableObjectViewAPI: From 6787df3d63796f3ee8332e3d5ff74e101066d782 Mon Sep 17 00:00:00 2001 From: cremebrule <84cremebrule@gmail.com> Date: Wed, 4 Dec 2024 16:44:29 -0800 Subject: [PATCH 06/38] optimize controller computation functions --- omnigibson/controllers/ik_controller.py | 131 +++++++++++++++------ omnigibson/controllers/joint_controller.py | 59 +++++++++- omnigibson/controllers/osc_controller.py | 2 +- 3 files changed, 152 insertions(+), 40 deletions(-) diff --git a/omnigibson/controllers/ik_controller.py b/omnigibson/controllers/ik_controller.py index 3e2c46d94..88bd506a5 100644 --- a/omnigibson/controllers/ik_controller.py +++ b/omnigibson/controllers/ik_controller.py @@ -1,5 +1,6 @@ import math from collections.abc import Iterable +from omnigibson.controllers.controller_base import _ControllerBackend, _ControllerTorchBackend, _ControllerNumpyBackend from omnigibson.controllers.controller_base import _controller_backend as cb from omnigibson.controllers import ControlType, ManipulationController @@ -270,8 +271,8 @@ def _update_goal(self, command, control_dict): target_pos, target_quat = self.workspace_pose_limiter(target_pos, target_quat, control_dict) goal_dict = dict( - target_pos=target_pos, - target_quat=target_quat, + target_pos=cb.as_float32(target_pos), + target_ori_mat=cb.as_float32(cb.T.quat2mat(target_quat)), ) return goal_dict @@ -285,7 +286,7 @@ def compute_control(self, goal_dict, control_dict): goal_dict (Dict[str, Any]): dictionary that should include any relevant keyword-mapped goals necessary for controller computation. Must include the following keys: target_pos: robot-frame (x,y,z) desired end effector position - target_quat: robot-frame (x,y,z,w) desired end effector quaternion orientation + target_ori_mat: robot-frame desired end effector quaternion orientation matrix control_dict (Dict[str, Any]): dictionary that should include any relevant keyword-mapped states necessary for controller computation. Must include the following keys: joint_position: Array of current joint positions @@ -299,37 +300,23 @@ def compute_control(self, goal_dict, control_dict): Returns: Array[float]: outputted (non-clipped!) velocity control signal to deploy """ - # Grab important info from control dict - pos_relative = control_dict[f"{self.task_name}_pos_relative"] - quat_relative = control_dict[f"{self.task_name}_quat_relative"] - target_pos = goal_dict["target_pos"] - target_quat = goal_dict["target_quat"] - # Calculate and return IK-backed out joint angles - current_joint_pos = control_dict["joint_position"][self.dof_idx] - - # If the delta is really small, we just keep the current joint position. This avoids joint - # drift caused by IK solver inaccuracy even when zero delta actions are provided. - if cb.allclose(pos_relative, target_pos, atol=1e-4) and cb.allclose(quat_relative, target_quat, atol=1e-4): - target_joint_pos = current_joint_pos - else: - # Compute the pose error. Note that this is computed NOT in the EEF frame but still - # in the base frame. - pos_err = target_pos - pos_relative - ori_err = cb.T.orientation_error(cb.T.quat2mat(target_quat), cb.T.quat2mat(quat_relative)) - err = cb.cat([pos_err, ori_err]) - - # Use the jacobian to compute a local approximation - j_eef = control_dict[f"{self.task_name}_jacobian_relative"][:, self.dof_idx] - j_eef_pinv = cb.pinv(j_eef) - delta_j = j_eef_pinv @ err - target_joint_pos = current_joint_pos + delta_j - - # Clip values to be within the joint limits - target_joint_pos = target_joint_pos.clip( - min=self._control_limits[ControlType.get_type("position")][0][self.dof_idx], - max=self._control_limits[ControlType.get_type("position")][1][self.dof_idx], - ) + q = control_dict["joint_position"][self.dof_idx] + j_eef = control_dict[f"{self.task_name}_jacobian_relative"][:, self.dof_idx] + ee_pos = control_dict[f"{self.task_name}_pos_relative"] + ee_quat = control_dict[f"{self.task_name}_quat_relative"] + + # Calculate desired joint positions + target_joint_pos = cb.compute_ik_qpos( + q=q, + j_eef=j_eef, + ee_pos=cb.as_float32(ee_pos), + ee_mat=cb.as_float32(cb.T.quat2mat(ee_quat)), + goal_pos=goal_dict["target_pos"], + goal_ori_mat=goal_dict["target_ori_mat"], + q_lower_limit=self._control_limits[ControlType.get_type("position")][0][self.dof_idx], + q_upper_limit=self._control_limits[ControlType.get_type("position")][1][self.dof_idx], + ) # Optionally pass through smoothing filter for better stability if self.control_filter is not None: @@ -340,9 +327,13 @@ def compute_control(self, goal_dict, control_dict): def compute_no_op_goal(self, control_dict): # No-op is maintaining current pose + target_pos = cb.copy(control_dict[f"{self.task_name}_pos_relative"]) + target_quat = cb.copy(control_dict[f"{self.task_name}_quat_relative"]) + + # Convert quat into eef ori mat return dict( - target_pos=control_dict[f"{self.task_name}_pos_relative"], - target_quat=control_dict[f"{self.task_name}_quat_relative"], + target_pos=cb.as_float32(target_pos), + target_ori_mat=cb.as_float32(cb.T.quat2mat(target_quat)), ) def _compute_no_op_action(self, control_dict): @@ -370,9 +361,77 @@ def _compute_no_op_action(self, control_dict): def _get_goal_shapes(self): return dict( target_pos=(3,), - target_quat=(4,), + target_ori_mat=(3, 3), ) @property def command_dim(self): return IK_MODE_COMMAND_DIMS[self.mode] + + +import torch as th +import omnigibson.utils.transform_utils as TT +@th.jit.script +def _compute_ik_qpos_torch( + q: th.Tensor, + j_eef: th.Tensor, + ee_pos: th.Tensor, + ee_mat: th.Tensor, + goal_pos: th.Tensor, + goal_ori_mat: th.Tensor, + q_lower_limit: th.Tensor, + q_upper_limit: th.Tensor, +): + # Compute the pose error. Note that this is computed NOT in the EEF frame but still + # in the base frame. + pos_err = goal_pos - ee_pos + ori_err = TT.orientation_error(goal_ori_mat, ee_mat) + err = th.cat([pos_err, ori_err]) + + # Use the jacobian to compute a local approximation + j_eef_pinv = th.linalg.pinv(j_eef) + delta_j = j_eef_pinv @ err + target_joint_pos = q + delta_j + + # Clip values to be within the joint limits + return target_joint_pos.clip( + min=q_lower_limit, + max=q_upper_limit, + ) + + +import numpy as np +from numba import jit +import omnigibson.utils.transform_utils_np as NT +# Use numba since faster +@jit(nopython=True) +def _compute_ik_qpos_numpy( + q, + j_eef, + ee_pos, + ee_mat, + goal_pos, + goal_ori_mat, + q_lower_limit, + q_upper_limit, +): + # Compute the pose error. Note that this is computed NOT in the EEF frame but still + # in the base frame. + pos_err = goal_pos - ee_pos + ori_err = NT.orientation_error(goal_ori_mat, ee_mat).astype(np.float32) + err = np.concatenate((pos_err, ori_err)) + + # Use the jacobian to compute a local approximation + j_eef_pinv = np.linalg.pinv(j_eef) + delta_j = j_eef_pinv @ err + target_joint_pos = q + delta_j + + # Clip values to be within the joint limits + return target_joint_pos.clip(q_lower_limit, q_upper_limit) + + +# Set these as part of the backend values +setattr(_ControllerBackend, "compute_ik_qpos", None) +setattr(_ControllerTorchBackend, "compute_ik_qpos", _compute_ik_qpos_torch) +setattr(_ControllerNumpyBackend, "compute_ik_qpos", _compute_ik_qpos_numpy) + diff --git a/omnigibson/controllers/joint_controller.py b/omnigibson/controllers/joint_controller.py index 31bb3f5aa..662422741 100644 --- a/omnigibson/controllers/joint_controller.py +++ b/omnigibson/controllers/joint_controller.py @@ -1,5 +1,6 @@ import math +from omnigibson.controllers.controller_base import _ControllerBackend, _ControllerTorchBackend, _ControllerNumpyBackend from omnigibson.controllers.controller_base import _controller_backend as cb from omnigibson.controllers import ( ControlType, @@ -210,9 +211,7 @@ def compute_control(self, goal_dict, control_dict): else: # effort u = target - dof_idxs_mat = cb.meshgrid(self.dof_idx, self.dof_idx) - mm = control_dict["mass_matrix"][dof_idxs_mat] - u = mm @ u + u = cb.compute_joint_torques(u, control_dict["mass_matrix"], self.dof_idx) # Add gravity compensation if self._use_gravity_compensation: @@ -284,3 +283,57 @@ def control_type(self): @property def command_dim(self): return len(self.dof_idx) + + +import torch as th +@th.compile +def _compute_joint_torques_torch( + u: th.Tensor, + mm: th.Tensor, + dof_idx: th.Tensor, +): + dof_idxs_mat = th.meshgrid(dof_idx, dof_idx, indexing="xy") + return mm[dof_idxs_mat] @ u + + + +import numpy as np +from numba import jit +# Use numba since faster +@jit(nopython=True) +def numba_ix(arr, rows, cols): + """ + Numba compatible implementation of arr[np.ix_(rows, cols)] for 2D arrays. + + Implementation from: + https://github.com/numba/numba/issues/5894#issuecomment-974701551 + + :param arr: 2D array to be indexed + :param rows: Row indices + :param cols: Column indices + :return: 2D array with the given rows and columns of the input array + """ + one_d_index = np.zeros(len(rows) * len(cols), dtype=np.int32) + for i, r in enumerate(rows): + start = i * len(cols) + one_d_index[start: start + len(cols)] = cols + arr.shape[1] * r + + arr_1d = arr.reshape((arr.shape[0] * arr.shape[1], 1)) + slice_1d = np.take(arr_1d, one_d_index) + return slice_1d.reshape((len(rows), len(cols))) + + +@jit(nopython=True) +def _compute_joint_torques_numpy( + u, + mm, + dof_idx, +): + return numba_ix(mm, dof_idx, dof_idx) @ u + + +# Set these as part of the backend values +setattr(_ControllerBackend, "compute_joint_torques", None) +setattr(_ControllerTorchBackend, "compute_joint_torques", _compute_joint_torques_torch) +setattr(_ControllerNumpyBackend, "compute_joint_torques", _compute_joint_torques_numpy) + diff --git a/omnigibson/controllers/osc_controller.py b/omnigibson/controllers/osc_controller.py index 539a89bef..dc74f1178 100644 --- a/omnigibson/controllers/osc_controller.py +++ b/omnigibson/controllers/osc_controller.py @@ -362,7 +362,7 @@ def compute_control(self, goal_dict, control_dict): goal_dict (Dict[str, Any]): dictionary that should include any relevant keyword-mapped goals necessary for controller computation. Must include the following keys: target_pos: robot-frame (x,y,z) desired end effector position - target_quat: robot-frame (x,y,z,w) desired end effector quaternion orientation + target_ori_mat: robot-frame desired end effector quaternion orientation matrix control_dict (Dict[str, Any]): dictionary that should include any relevant keyword-mapped states necessary for controller computation. Must include the following keys: joint_position: Array of current joint positions From 692df78a0e226ecb31188f531a767309dd323fa4 Mon Sep 17 00:00:00 2001 From: cremebrule <84cremebrule@gmail.com> Date: Wed, 4 Dec 2024 16:45:27 -0800 Subject: [PATCH 07/38] cache properties that should not change during runtime --- omnigibson/objects/object_base.py | 2 +- omnigibson/prims/cloth_prim.py | 3 ++- omnigibson/prims/entity_prim.py | 2 +- omnigibson/prims/joint_prim.py | 16 +++++++++++---- omnigibson/robots/a1.py | 11 +++++----- omnigibson/robots/active_camera_robot.py | 2 +- omnigibson/robots/articulated_trunk_robot.py | 6 +++--- omnigibson/robots/behavior_robot.py | 17 ++++++++-------- omnigibson/robots/fetch.py | 21 ++++++++++---------- omnigibson/robots/franka.py | 13 ++++++------ omnigibson/robots/freight.py | 3 ++- omnigibson/robots/holonomic_base_robot.py | 4 ++-- omnigibson/robots/husky.py | 3 ++- omnigibson/robots/locobot.py | 3 ++- omnigibson/robots/locomotion_robot.py | 8 ++++---- omnigibson/robots/manipulation_robot.py | 18 ++++++++--------- omnigibson/robots/r1.py | 17 ++++++++-------- omnigibson/robots/stretch.py | 15 +++++++------- omnigibson/robots/tiago.py | 21 ++++++++++---------- omnigibson/robots/turtlebot.py | 3 ++- omnigibson/robots/vx300s.py | 13 ++++++------ 21 files changed, 111 insertions(+), 90 deletions(-) diff --git a/omnigibson/objects/object_base.py b/omnigibson/objects/object_base.py index c34231551..a4101b42f 100644 --- a/omnigibson/objects/object_base.py +++ b/omnigibson/objects/object_base.py @@ -234,7 +234,7 @@ def _initialize(self): "emissive_intensity": material.emissive_intensity, } - @property + @cached_property def articulation_root_path(self): has_articulated_joints, has_fixed_joints = self.n_joints > 0, self.n_fixed_joints > 0 if self.kinematic_only or ((not has_articulated_joints) and (not has_fixed_joints)): diff --git a/omnigibson/prims/cloth_prim.py b/omnigibson/prims/cloth_prim.py index 0177d8fab..9c416eaeb 100644 --- a/omnigibson/prims/cloth_prim.py +++ b/omnigibson/prims/cloth_prim.py @@ -9,6 +9,7 @@ import math from collections.abc import Iterable +from functools import cached_property import torch as th @@ -160,7 +161,7 @@ def n_particles(self): """ return self._n_particles - @property + @cached_property def kinematic_only(self): """ Returns: diff --git a/omnigibson/prims/entity_prim.py b/omnigibson/prims/entity_prim.py index 6676097fc..b1dc20cc1 100644 --- a/omnigibson/prims/entity_prim.py +++ b/omnigibson/prims/entity_prim.py @@ -1399,7 +1399,7 @@ def self_collisions(self, flag): self.articulation_root_path, "physxArticulation:enabledSelfCollisions", flag ) - @property + @cached_property def kinematic_only(self): """ Returns: diff --git a/omnigibson/prims/joint_prim.py b/omnigibson/prims/joint_prim.py index 18ac6a867..c3769231b 100644 --- a/omnigibson/prims/joint_prim.py +++ b/omnigibson/prims/joint_prim.py @@ -75,6 +75,8 @@ def __init__( self._joint_type = None self._control_type = None self._driven = None + self._body0 = None + self._body1 = None # The following values will only be valid if this joint is part of an articulation self._n_dof = None # The number of degrees of freedom this joint provides @@ -232,8 +234,10 @@ def body0(self): None or str: Absolute prim path to the body prim to set as this joint's parent link, or None if there is no body0 specified. """ - targets = self._prim.GetRelationship("physics:body0").GetTargets() - return targets[0].__str__() if len(targets) > 0 else None + if self._body0 is None: + targets = self._prim.GetRelationship("physics:body0").GetTargets() + self._body0 = targets[0].__str__() + return self._body0 @body0.setter def body0(self, body0): @@ -246,6 +250,7 @@ def body0(self, body0): # Make sure prim path is valid assert lazy.omni.isaac.core.utils.prims.is_prim_path_valid(body0), f"Invalid body0 path specified: {body0}" self._prim.GetRelationship("physics:body0").SetTargets([lazy.pxr.Sdf.Path(body0)]) + self._body0 = None @property def body1(self): @@ -256,8 +261,10 @@ def body1(self): None or str: Absolute prim path to the body prim to set as this joint's child link, or None if there is no body1 specified. """ - targets = self._prim.GetRelationship("physics:body1").GetTargets() - return targets[0].__str__() + if self._body1 is None: + targets = self._prim.GetRelationship("physics:body1").GetTargets() + self._body1 = targets[0].__str__() + return self._body1 @body1.setter def body1(self, body1): @@ -270,6 +277,7 @@ def body1(self, body1): # Make sure prim path is valid assert lazy.omni.isaac.core.utils.prims.is_prim_path_valid(body1), f"Invalid body1 path specified: {body1}" self._prim.GetRelationship("physics:body1").SetTargets([lazy.pxr.Sdf.Path(body1)]) + self._body1 = None @property def local_orientation(self): diff --git a/omnigibson/robots/a1.py b/omnigibson/robots/a1.py index 1b3e233c2..fcfd6370f 100644 --- a/omnigibson/robots/a1.py +++ b/omnigibson/robots/a1.py @@ -1,4 +1,5 @@ import os +from functools import cached_property import torch as th @@ -167,23 +168,23 @@ def _default_joint_pos(self): def finger_lengths(self): return {self.default_arm: 0.087} - @property + @cached_property def arm_link_names(self): return {self.default_arm: [f"arm_seg{i+1}" for i in range(5)]} - @property + @cached_property def arm_joint_names(self): return {self.default_arm: [f"arm_joint{i+1}" for i in range(6)]} - @property + @cached_property def eef_link_names(self): return {self.default_arm: self._eef_link_names} - @property + @cached_property def finger_link_names(self): return {self.default_arm: self._finger_link_names} - @property + @cached_property def finger_joint_names(self): return {self.default_arm: self._finger_joint_names} diff --git a/omnigibson/robots/active_camera_robot.py b/omnigibson/robots/active_camera_robot.py index 3bf8317d2..6d330b489 100644 --- a/omnigibson/robots/active_camera_robot.py +++ b/omnigibson/robots/active_camera_robot.py @@ -115,7 +115,7 @@ def _default_controller_config(self): return cfg - @property + @cached_property @abstractmethod def camera_joint_names(self): """ diff --git a/omnigibson/robots/articulated_trunk_robot.py b/omnigibson/robots/articulated_trunk_robot.py index 3323db110..d4906c65f 100644 --- a/omnigibson/robots/articulated_trunk_robot.py +++ b/omnigibson/robots/articulated_trunk_robot.py @@ -34,15 +34,15 @@ def get_control_dict(self): return fcns - @property + @cached_property def trunk_links(self): return [self.links[name] for name in self.trunk_link_names] - @property + @cached_property def trunk_link_names(self): raise NotImplementedError - @property + @cached_property def trunk_joint_names(self): raise NotImplementedError("trunk_joint_names must be implemented in subclass") diff --git a/omnigibson/robots/behavior_robot.py b/omnigibson/robots/behavior_robot.py index b41aa0cf3..7d3693412 100644 --- a/omnigibson/robots/behavior_robot.py +++ b/omnigibson/robots/behavior_robot.py @@ -1,4 +1,5 @@ import itertools +from functools import cached_property import math import os from abc import ABC @@ -153,18 +154,18 @@ def n_arms(cls): def arm_names(cls): return ["left", "right"] - @property + @cached_property def eef_link_names(self): dic = {arm: f"{arm}_{m.PALM_LINK_NAME}" for arm in self.arm_names} dic["head"] = "head" return dic - @property + @cached_property def arm_link_names(self): """The head counts as a arm since it has the same 33 joint configuration""" return {arm: [f"{arm}_{component}" for component in m.COMPONENT_SUFFIXES] for arm in self.arm_names + ["head"]} - @property + @cached_property def finger_link_names(self): return { arm: [ @@ -173,22 +174,22 @@ def finger_link_names(self): for arm in self.arm_names } - @property + @cached_property def base_joint_names(self): return [f"base_{component}_joint" for component in m.COMPONENT_SUFFIXES] - @property + @cached_property def camera_joint_names(self): return [f"head_{component}_joint" for component in m.COMPONENT_SUFFIXES] - @property + @cached_property def arm_joint_names(self): """The head counts as a arm since it has the same 33 joint configuration""" return { eef: [f"{eef}_{component}_joint" for component in m.COMPONENT_SUFFIXES] for eef in self.arm_names + ["head"] } - @property + @cached_property def finger_joint_names(self): return { arm: ( @@ -352,7 +353,7 @@ def update_controller_mode(self): self.joints[joint_name].max_effort = m.FINGER_JOINT_MAX_EFFORT self.joints[joint_name].max_velocity = m.FINGER_JOINT_MAX_VELOCITY - @property + @cached_property def base_footprint_link_name(self): """ Name of the actual root link that we are interested in. diff --git a/omnigibson/robots/fetch.py b/omnigibson/robots/fetch.py index 0fd6496b2..4d6a199a0 100644 --- a/omnigibson/robots/fetch.py +++ b/omnigibson/robots/fetch.py @@ -1,5 +1,6 @@ import math import os +from functools import cached_property import torch as th @@ -271,19 +272,19 @@ def disabled_collision_pairs(self): ["wrist_roll_link", "gripper_link"], ] - @property + @cached_property def base_joint_names(self): return ["l_wheel_joint", "r_wheel_joint"] - @property + @cached_property def camera_joint_names(self): return ["head_pan_joint", "head_tilt_joint"] - @property + @cached_property def trunk_joint_names(self): return ["torso_lift_joint"] - @property + @cached_property def manipulation_link_names(self): return [ "torso_lift_link", @@ -301,7 +302,7 @@ def manipulation_link_names(self): "r_gripper_finger_link", ] - @property + @cached_property def arm_link_names(self): return { self.default_arm: [ @@ -315,7 +316,7 @@ def arm_link_names(self): ] } - @property + @cached_property def arm_joint_names(self): return { self.default_arm: [ @@ -329,21 +330,21 @@ def arm_joint_names(self): ] } - @property + @cached_property def eef_link_names(self): return {self.default_arm: "gripper_link"} - @property + @cached_property def finger_link_names(self): return {self.default_arm: ["r_gripper_finger_link", "l_gripper_finger_link"]} - @property + @cached_property def finger_joint_names(self): return {self.default_arm: ["r_gripper_finger_joint", "l_gripper_finger_joint"]} @property def usd_path(self): - return os.path.join(gm.ASSET_PATH, "models/fetch/fetch/fetch.usd") + return os.path.join(gm.ASSET_PATH, "models/fetch/fetch/fetch.usda") @property def robot_arm_descriptor_yamls(self): diff --git a/omnigibson/robots/franka.py b/omnigibson/robots/franka.py index 358d4c2bd..33b825ea5 100644 --- a/omnigibson/robots/franka.py +++ b/omnigibson/robots/franka.py @@ -1,4 +1,5 @@ import os +from functools import cached_property import torch as th @@ -231,23 +232,23 @@ def _default_joint_pos(self): def finger_lengths(self): return {self.default_arm: 0.1} - @property + @cached_property def arm_link_names(self): return {self.default_arm: [f"panda_link{i}" for i in range(8)]} - @property + @cached_property def arm_joint_names(self): return {self.default_arm: [f"panda_joint{i+1}" for i in range(7)]} - @property + @cached_property def eef_link_names(self): return {self.default_arm: self._eef_link_names} - @property + @cached_property def finger_link_names(self): return {self.default_arm: self._finger_link_names} - @property + @cached_property def finger_joint_names(self): return {self.default_arm: self._finger_joint_names} @@ -271,7 +272,7 @@ def curobo_path(self): ), f"Only franka_panda is currently supported for curobo. Got: {self._model_name}" return os.path.join(gm.ASSET_PATH, f"models/franka/{self.model_name}_description_curobo.yaml") - @property + @cached_property def curobo_attached_object_link_names(self): return {self._eef_link_names: "attached_object"} diff --git a/omnigibson/robots/freight.py b/omnigibson/robots/freight.py index 0b3df7ff4..abc44e89b 100644 --- a/omnigibson/robots/freight.py +++ b/omnigibson/robots/freight.py @@ -1,4 +1,5 @@ import os +from functools import cached_property import torch as th @@ -37,7 +38,7 @@ def wheel_radius(self): def wheel_axle_length(self): return 0.372 - @property + @cached_property def base_joint_names(self): return ["r_wheel_joint", "l_wheel_joint"] diff --git a/omnigibson/robots/holonomic_base_robot.py b/omnigibson/robots/holonomic_base_robot.py index 7a230ddf4..915d595d3 100644 --- a/omnigibson/robots/holonomic_base_robot.py +++ b/omnigibson/robots/holonomic_base_robot.py @@ -186,7 +186,7 @@ def base_idx(self): [joints.index(f"base_footprint_{component}_joint") for component in ["x", "y", "z", "rx", "ry", "rz"]] ) - @property + @cached_property def base_joint_names(self): return [f"base_footprint_{component}_joint" for component in ("x", "y", "rz")] @@ -330,7 +330,7 @@ def teleop_data_to_action(self, teleop_action) -> th.Tensor: action[self.base_action_idx] = th.tensor(teleop_action.base).float() * 0.1 return action - @property + @cached_property def base_footprint_link_name(self): raise NotImplementedError("base_footprint_link_name is not implemented for HolonomicBaseRobot") diff --git a/omnigibson/robots/husky.py b/omnigibson/robots/husky.py index fe036e452..2cc829bea 100644 --- a/omnigibson/robots/husky.py +++ b/omnigibson/robots/husky.py @@ -1,4 +1,5 @@ import os +from functools import cached_property import torch as th @@ -23,7 +24,7 @@ def wheel_radius(self): def wheel_axle_length(self): return 0.670 - @property + @cached_property def base_joint_names(self): return ["front_left_wheel", "front_right_wheel", "rear_left_wheel", "rear_right_wheel"] diff --git a/omnigibson/robots/locobot.py b/omnigibson/robots/locobot.py index ff753c6fa..3614c5694 100644 --- a/omnigibson/robots/locobot.py +++ b/omnigibson/robots/locobot.py @@ -1,4 +1,5 @@ import os +from functools import cached_property import torch as th @@ -20,7 +21,7 @@ def wheel_radius(self): def wheel_axle_length(self): return 0.230 - @property + @cached_property def base_joint_names(self): return ["wheel_right_joint", "wheel_left_joint"] diff --git a/omnigibson/robots/locomotion_robot.py b/omnigibson/robots/locomotion_robot.py index 6e8274509..7eafea0e6 100644 --- a/omnigibson/robots/locomotion_robot.py +++ b/omnigibson/robots/locomotion_robot.py @@ -185,19 +185,19 @@ def turn_right(self, delta=0.03): quat = quat_multiply((euler2quat(delta, 0, 0)), quat) self.set_position_orientation(orientation=quat) - @property + @cached_property def non_floor_touching_base_links(self): return [self.links[name] for name in self.non_floor_touching_base_link_names] - @property + @cached_property def non_floor_touching_base_link_names(self): return [self.base_footprint_link_name] - @property + @cached_property def floor_touching_base_links(self): return [self.links[name] for name in self.floor_touching_base_link_names] - @property + @cached_property def floor_touching_base_link_names(self): raise NotImplementedError diff --git a/omnigibson/robots/manipulation_robot.py b/omnigibson/robots/manipulation_robot.py index d101e7feb..87e6ce1cb 100644 --- a/omnigibson/robots/manipulation_robot.py +++ b/omnigibson/robots/manipulation_robot.py @@ -531,7 +531,7 @@ def gripper_action_idx(self): ) return gripper_action_idx - @property + @cached_property @abstractmethod def arm_link_names(self): """ @@ -544,7 +544,7 @@ def arm_link_names(self): """ raise NotImplementedError - @property + @cached_property @abstractmethod def arm_joint_names(self): """ @@ -557,7 +557,7 @@ def arm_joint_names(self): """ raise NotImplementedError - @property + @cached_property @abstractmethod def eef_link_names(self): """ @@ -567,7 +567,7 @@ def eef_link_names(self): """ raise NotImplementedError - @property + @cached_property @abstractmethod def finger_link_names(self): """ @@ -580,7 +580,7 @@ def finger_link_names(self): """ raise NotImplementedError - @property + @cached_property @abstractmethod def finger_joint_names(self): """ @@ -617,7 +617,7 @@ def gripper_control_idx(self): for arm in self.arm_names } - @property + @cached_property def arm_links(self): """ Returns: @@ -626,7 +626,7 @@ def arm_links(self): """ return {arm: [self._links[link] for link in self.arm_link_names[arm]] for arm in self.arm_names} - @property + @cached_property def eef_links(self): """ Returns: @@ -635,7 +635,7 @@ def eef_links(self): """ return {arm: self._links[self.eef_link_names[arm]] for arm in self.arm_names} - @property + @cached_property def finger_links(self): """ Returns: @@ -644,7 +644,7 @@ def finger_links(self): """ return {arm: [self._links[link] for link in self.finger_link_names[arm]] for arm in self.arm_names} - @property + @cached_property def finger_joints(self): """ Returns: diff --git a/omnigibson/robots/r1.py b/omnigibson/robots/r1.py index 21e014a38..ebf96ea63 100644 --- a/omnigibson/robots/r1.py +++ b/omnigibson/robots/r1.py @@ -1,4 +1,5 @@ import os +from functools import cached_property import torch as th @@ -193,15 +194,15 @@ def assisted_grasp_end_points(self): for arm in self.arm_names } - @property + @cached_property def floor_touching_base_link_names(self): return ["wheel_link1", "wheel_link2", "wheel_link3"] - @property + @cached_property def trunk_link_names(self): return ["torso_link1", "torso_link2", "torso_link3", "torso_link4"] - @property + @cached_property def trunk_joint_names(self): return [f"torso_joint{i}" for i in range(1, 5)] @@ -213,23 +214,23 @@ def n_arms(cls): def arm_names(cls): return ["left", "right"] - @property + @cached_property def arm_link_names(self): return {arm: [f"{arm}_arm_link{i}" for i in range(1, 7)] for arm in self.arm_names} - @property + @cached_property def arm_joint_names(self): return {arm: [f"{arm}_arm_joint{i}" for i in range(1, 7)] for arm in self.arm_names} - @property + @cached_property def eef_link_names(self): return {arm: f"{arm}_eef_link" for arm in self.arm_names} - @property + @cached_property def finger_link_names(self): return {arm: [f"{arm}_gripper_link{i}" for i in range(1, 3)] for arm in self.arm_names} - @property + @cached_property def finger_joint_names(self): return {arm: [f"{arm}_gripper_axis{i}" for i in range(1, 3)] for arm in self.arm_names} diff --git a/omnigibson/robots/stretch.py b/omnigibson/robots/stretch.py index b2c5c38fc..293db78dc 100644 --- a/omnigibson/robots/stretch.py +++ b/omnigibson/robots/stretch.py @@ -1,5 +1,6 @@ import math import os +from functools import cached_property import torch as th @@ -150,15 +151,15 @@ def disabled_collision_pairs(self): ["respeaker_base", "link_mast"], ] - @property + @cached_property def base_joint_names(self): return ["joint_left_wheel", "joint_right_wheel"] - @property + @cached_property def camera_joint_names(self): return ["joint_head_pan", "joint_head_tilt"] - @property + @cached_property def arm_link_names(self): return { self.default_arm: [ @@ -178,7 +179,7 @@ def arm_link_names(self): ] } - @property + @cached_property def arm_joint_names(self): return { self.default_arm: [ @@ -193,11 +194,11 @@ def arm_joint_names(self): ] } - @property + @cached_property def eef_link_names(self): return {self.default_arm: "link_grasp_center"} - @property + @cached_property def finger_link_names(self): return { self.default_arm: [ @@ -208,7 +209,7 @@ def finger_link_names(self): ] } - @property + @cached_property def finger_joint_names(self): return {self.default_arm: ["joint_gripper_finger_right", "joint_gripper_finger_left"]} diff --git a/omnigibson/robots/tiago.py b/omnigibson/robots/tiago.py index 7ec03042c..1cde7c87b 100644 --- a/omnigibson/robots/tiago.py +++ b/omnigibson/robots/tiago.py @@ -1,6 +1,7 @@ import math import os from typing import Literal +from functools import cached_property import torch as th @@ -139,7 +140,7 @@ def __init__( **kwargs, ) - @property + @cached_property def arm_joint_names(self): names = dict() for arm in self.arm_names: @@ -208,7 +209,7 @@ def _post_load(self): def base_footprint_link_name(self): return "base_footprint" - @property + @cached_property def floor_touching_base_link_names(self): return [ "wheel_front_left_link", @@ -363,15 +364,15 @@ def disabled_collision_pairs(self): ["arm_right_tool_link", "wrist_right_ft_tool_link"], ] - @property + @cached_property def camera_joint_names(self): return ["head_1_joint", "head_2_joint"] - @property + @cached_property def trunk_joint_names(self): return ["torso_lift_joint"] - @property + @cached_property def manipulation_link_names(self): return [ "torso_fixed_link", @@ -411,19 +412,19 @@ def manipulation_link_names(self): "xtion_link", ] - @property + @cached_property def arm_link_names(self): return {arm: [f"arm_{arm}_{i}_link" for i in range(1, 8)] for arm in self.arm_names} - @property + @cached_property def eef_link_names(self): return {arm: f"{arm}_eef_link" for arm in self.arm_names} - @property + @cached_property def finger_link_names(self): return {arm: [f"gripper_{arm}_right_finger_link", f"gripper_{arm}_left_finger_link"] for arm in self.arm_names} - @property + @cached_property def finger_joint_names(self): return { arm: [f"gripper_{arm}_right_finger_joint", f"gripper_{arm}_left_finger_joint"] for arm in self.arm_names @@ -447,7 +448,7 @@ def curobo_path(self): for emb_sel in CuroboEmbodimentSelection } - @property + @cached_property def curobo_attached_object_link_names(self): return {eef_link_name: f"attached_object_{eef_link_name}" for eef_link_name in self.eef_link_names.values()} diff --git a/omnigibson/robots/turtlebot.py b/omnigibson/robots/turtlebot.py index f545103c4..a365ab9b4 100644 --- a/omnigibson/robots/turtlebot.py +++ b/omnigibson/robots/turtlebot.py @@ -1,4 +1,5 @@ import os +from functools import cached_property import torch as th @@ -21,7 +22,7 @@ def wheel_radius(self): def wheel_axle_length(self): return 0.23 - @property + @cached_property def base_joint_names(self): return ["wheel_left_joint", "wheel_right_joint"] diff --git a/omnigibson/robots/vx300s.py b/omnigibson/robots/vx300s.py index facb36e27..9839f0ac4 100644 --- a/omnigibson/robots/vx300s.py +++ b/omnigibson/robots/vx300s.py @@ -1,5 +1,6 @@ import math import os +from functools import cached_property import torch as th @@ -141,7 +142,7 @@ def disabled_collision_pairs(self): ["gripper_bar_link", "gripper_link"], ] - @property + @cached_property def arm_link_names(self): return { self.default_arm: [ @@ -156,7 +157,7 @@ def arm_link_names(self): ] } - @property + @cached_property def arm_joint_names(self): return { self.default_arm: [ @@ -169,15 +170,15 @@ def arm_joint_names(self): ] } - @property + @cached_property def eef_link_names(self): return {self.default_arm: "ee_gripper_link"} - @property + @cached_property def finger_link_names(self): return {self.default_arm: ["left_finger_link", "right_finger_link"]} - @property + @cached_property def finger_joint_names(self): return {self.default_arm: ["left_finger", "right_finger"]} @@ -197,7 +198,7 @@ def urdf_path(self): def curobo_path(self): return os.path.join(gm.ASSET_PATH, "models/vx300s/vx300s_description_curobo.yaml") - @property + @cached_property def curobo_attached_object_link_names(self): return {"ee_gripper_link": "attached_object"} From d092a6fb4444aa09f2ad6783d13064aa76fdaebd Mon Sep 17 00:00:00 2001 From: cremebrule <84cremebrule@gmail.com> Date: Wed, 4 Dec 2024 16:46:32 -0800 Subject: [PATCH 08/38] optimize np numba transform functions --- omnigibson/utils/transform_utils_np.py | 156 ++++++++++++++++++++++--- omnigibson/utils/usd_utils.py | 87 +++++++++++--- 2 files changed, 209 insertions(+), 34 deletions(-) diff --git a/omnigibson/utils/transform_utils_np.py b/omnigibson/utils/transform_utils_np.py index 8c5942985..237d98377 100644 --- a/omnigibson/utils/transform_utils_np.py +++ b/omnigibson/utils/transform_utils_np.py @@ -7,7 +7,7 @@ import math import numpy as np -from numba import jit +from numba import jit, prange from scipy.spatial.transform import Rotation as R PI = np.pi @@ -394,6 +394,78 @@ def mat2quat(rmat): return R.from_matrix(rmat).as_quat() +@jit(nopython=True, fastmath=True) +def _norm_2d_final_dim(mat): + n_elements = mat.shape[0] + out = np.zeros(n_elements, dtype=np.float32) + for i in prange(n_elements): + vec = mat[i] + out[i] = np.sqrt(np.sum(vec * vec)) + return out + + +@jit(nopython=True) +def mat2quat_batch(rmat): + """ + Converts given rotation matrix to quaternion. + Args: + rmat (torch.Tensor): (3, 3) or (..., 3, 3) rotation matrix + Returns: + torch.Tensor: (4,) or (..., 4) (x,y,z,w) float quaternion angles + """ + batch_shape = rmat.shape[:-2] + mat_flat = rmat.reshape(-1, 3, 3) + + m00, m01, m02 = mat_flat[:, 0, 0], mat_flat[:, 0, 1], mat_flat[:, 0, 2] + m10, m11, m12 = mat_flat[:, 1, 0], mat_flat[:, 1, 1], mat_flat[:, 1, 2] + m20, m21, m22 = mat_flat[:, 2, 0], mat_flat[:, 2, 1], mat_flat[:, 2, 2] + + trace = m00 + m11 + m22 + + trace_positive = trace > 0 + cond1 = (m00 > m11) & (m00 > m22) & ~trace_positive + cond2 = (m11 > m22) & ~(trace_positive | cond1) + cond3 = ~(trace_positive | cond1 | cond2) + + # Trace positive condition + sq = np.where(trace_positive, np.sqrt(trace + 1.0) * 2.0, np.zeros_like(trace)) + qw = np.where(trace_positive, 0.25 * sq, np.zeros_like(trace)) + qx = np.where(trace_positive, (m21 - m12) / sq, np.zeros_like(trace)) + qy = np.where(trace_positive, (m02 - m20) / sq, np.zeros_like(trace)) + qz = np.where(trace_positive, (m10 - m01) / sq, np.zeros_like(trace)) + + # Condition 1 + sq = np.where(cond1, np.sqrt(1.0 + m00 - m11 - m22) * 2.0, sq) + qw = np.where(cond1, (m21 - m12) / sq, qw) + qx = np.where(cond1, 0.25 * sq, qx) + qy = np.where(cond1, (m01 + m10) / sq, qy) + qz = np.where(cond1, (m02 + m20) / sq, qz) + + # Condition 2 + sq = np.where(cond2, np.sqrt(1.0 + m11 - m00 - m22) * 2.0, sq) + qw = np.where(cond2, (m02 - m20) / sq, qw) + qx = np.where(cond2, (m01 + m10) / sq, qx) + qy = np.where(cond2, 0.25 * sq, qy) + qz = np.where(cond2, (m12 + m21) / sq, qz) + + # Condition 3 + sq = np.where(cond3, np.sqrt(1.0 + m22 - m00 - m11) * 2.0, sq) + qw = np.where(cond3, (m10 - m01) / sq, qw) + qx = np.where(cond3, (m02 + m20) / sq, qx) + qy = np.where(cond3, (m12 + m21) / sq, qy) + qz = np.where(cond3, 0.25 * sq, qz) + + quat = np.stack((qx, qy, qz, qw), axis=-1) + + # Normalize the quaternion + quat = quat / _norm_2d_final_dim(quat)[..., np.newaxis] + + # Reshape to match input batch shape + quat = quat.reshape(batch_shape + (4,)) + + return quat + + def vec2quat(vec, up=(0, 0, 1.0)): """ Converts given 3d-direction vector @vec to quaternion orientation with respect to another direction vector @up @@ -446,35 +518,78 @@ def mat2euler(rmat): return R.from_matrix(M).as_euler("xyz") -def pose2mat(pose): +def quat2mat(quaternion): + if quaternion.dtype != np.float32: + quaternion = quaternion.astype(np.float32) + return _quat2mat(quaternion) + + +@jit(nopython=True) +def _quat2mat(quaternion): """ - Converts pose to homogeneous matrix. + Convert quaternions into rotation matrices. Args: - pose (2-tuple): a (pos, orn) tuple where pos is vec3 float cartesian, and - orn is vec4 float quaternion. + quaternion (torch.Tensor): A tensor of shape (..., 4) representing batches of quaternions (w, x, y, z). Returns: - np.array: 4x4 homogeneous matrix + torch.Tensor: A tensor of shape (..., 3, 3) representing batches of rotation matrices. """ - homo_pose_mat = np.zeros((4, 4), dtype=np.float32) - homo_pose_mat[:3, :3] = quat2mat(pose[1]) - homo_pose_mat[:3, 3] = np.array(pose[0], dtype=np.float32) - homo_pose_mat[3, 3] = 1.0 - return homo_pose_mat + # broadcast array is necessary to use numba parallel mode + q1, q2 = np.broadcast_arrays(quaternion[..., np.newaxis], quaternion[..., np.newaxis, :]) + outer = q1 * q2 + # Extract the necessary components + xx = outer[..., 0, 0] + yy = outer[..., 1, 1] + zz = outer[..., 2, 2] + xy = outer[..., 0, 1] + xz = outer[..., 0, 2] + yz = outer[..., 1, 2] + xw = outer[..., 0, 3] + yw = outer[..., 1, 3] + zw = outer[..., 2, 3] -def quat2mat(quaternion): + rotation_matrix = np.empty(quaternion.shape[:-1] + (3, 3), dtype=np.float32) + + rotation_matrix[..., 0, 0] = 1 - 2 * (yy + zz) + rotation_matrix[..., 0, 1] = 2 * (xy - zw) + rotation_matrix[..., 0, 2] = 2 * (xz + yw) + + rotation_matrix[..., 1, 0] = 2 * (xy + zw) + rotation_matrix[..., 1, 1] = 1 - 2 * (xx + zz) + rotation_matrix[..., 1, 2] = 2 * (yz - xw) + + rotation_matrix[..., 2, 0] = 2 * (xz - yw) + rotation_matrix[..., 2, 1] = 2 * (yz + xw) + rotation_matrix[..., 2, 2] = 1 - 2 * (xx + yy) + + return rotation_matrix + + +# def pose2mat(pose): +# if pose.dtype != np.float32: +# pose = pose.astype(np.float32) +# return _pose2mat(pose) + + +@jit(nopython=True) +def pose2mat(pose): """ - Converts given quaternion to matrix. + Converts pose to homogeneous matrix. Args: - quaternion (np.array): (..., 4) (x,y,z,w) float quaternion angles + pose (2-tuple): a (pos, orn) tuple where pos is vec3 float cartesian, and + orn is vec4 float quaternion. Returns: - np.array: (..., 3, 3) rotation matrix + np.array: 4x4 homogeneous matrix """ - return R.from_quat(quaternion).as_matrix() + homo_pose_mat = np.zeros((4, 4), dtype=np.float32) + homo_pose_mat[:3, :3] = _quat2mat(pose[1]) + homo_pose_mat[:3, 3] = pose[0] + homo_pose_mat[3, 3] = 1.0 + return homo_pose_mat def quat2axisangle(quat): @@ -558,6 +673,13 @@ def pose_in_A_to_pose_in_B(pose_A, pose_A_in_B): def pose_inv(pose_mat): + if pose_mat.dtype != np.float32: + pose_mat = pose_mat.astype(np.float32) + return _pose_inv(pose_mat) + + +@jit(nopython=True) +def _pose_inv(pose_mat): """ Computes the inverse of a homogeneous matrix corresponding to the pose of some frame B in frame A. The inverse is the pose of frame A in frame B. @@ -579,7 +701,7 @@ def pose_inv(pose_mat): # -t in the original frame, which is -R-1*t in the new frame, and then rotate back by # R-1 to align the axis again. - pose_inv = np.zeros((4, 4)) + pose_inv = np.zeros((4, 4), dtype=np.float32) pose_inv[:3, :3] = pose_mat[:3, :3].T pose_inv[:3, 3] = -pose_inv[:3, :3].dot(pose_mat[:3, 3]) pose_inv[3, 3] = 1.0 diff --git a/omnigibson/utils/usd_utils.py b/omnigibson/utils/usd_utils.py index 64c0b6e1e..037f669b1 100644 --- a/omnigibson/utils/usd_utils.py +++ b/omnigibson/utils/usd_utils.py @@ -6,11 +6,13 @@ from collections.abc import Iterable import numpy as np +from numba import jit, prange import torch as th import trimesh import omnigibson as og import omnigibson.lazy as lazy +from omnigibson.controllers.controller_base import _ControllerBackend, _ControllerTorchBackend, _ControllerNumpyBackend from omnigibson.controllers.controller_base import _controller_backend as cb import omnigibson.utils.transform_utils as T from omnigibson.macros import gm @@ -1114,23 +1116,12 @@ def _get_relative_poses(self, prim_path): self._read_cache["link_transforms"] = cb.from_torch(self._view.get_link_transforms()) idx = self._idx[prim_path] - tfs = cb.zeros((len(self._link_idx[idx]), 4, 4)) - # base vel is the final -1 index - link_tfs = self._read_cache["link_transforms"][idx, :] - tfs[:, 3, 3] = 1.0 - tfs[:, :3, 3] = link_tfs[:, :3] - tfs[:, :3, :3] = cb.T.quat2mat(link_tfs[:, 3:]) - base_tf_inv = cb.zeros((1, 4, 4)) - base_tf_inv[0, :, :] = cb.T.pose_inv(cb.T.pose2mat(self.get_position_orientation(prim_path=prim_path))) - - # (1, 4, 4) @ (n_links, 4, 4) -> (n_links, 4, 4) - rel_tfs = base_tf_inv @ tfs - - # Re-convert to quat form - rel_poses = cb.zeros((len(self._link_idx[idx]), 7)) - rel_poses[:, :3] = rel_tfs[:, :3, 3] - rel_poses[:, 3:] = cb.T.mat2quat(rel_tfs[:, :3, :3]) - self._read_cache["relative_poses"][prim_path] = rel_poses + self._read_cache["relative_poses"][prim_path] = cb.compute_relative_poses( + idx, + len(self._link_idx[idx]), + self._read_cache["link_transforms"], + self.get_position_orientation(prim_path=prim_path), + ) return self._read_cache["relative_poses"][prim_path] @@ -1881,3 +1872,65 @@ def delete_or_deactivate_prim(prim_path): lazy.omni.usd.commands.DeletePrimsCommand([prim_path], destructive=False).do() return True + + +import omnigibson.utils.transform_utils as TT +@th.compile +def _compute_relative_poses_torch( + idx: int, + n_links: int, + all_tfs: th.Tensor, + base_pose: th.Tensor, +): + tfs = th.zeros((n_links, 4, 4), dtype=th.float32) + # base vel is the final -1 index + link_tfs = all_tfs[idx, :] + tfs[:, 3, 3] = 1.0 + tfs[:, :3, 3] = link_tfs[:, :3] + tfs[:, :3, :3] = TT.quat2mat(link_tfs[:, 3:]) + base_tf_inv = th.zeros((1, 4, 4), dtype=th.float32) + base_tf_inv[0, :, :] = TT.pose_inv(TT.pose2mat(base_pose)) + + # (1, 4, 4) @ (n_links, 4, 4) -> (n_links, 4, 4) + rel_tfs = base_tf_inv @ tfs + + # Re-convert to quat form + rel_poses = th.zeros((n_links, 7), dtype=th.float32) + rel_poses[:, :3] = rel_tfs[:, :3, 3] + rel_poses[:, 3:] = TT.mat2quat(rel_tfs[:, :3, :3]) + + return rel_poses + + +import omnigibson.utils.transform_utils_np as NT +@jit(nopython=True) +def _compute_relative_poses_numpy(idx, n_links, all_tfs, base_pose): + tfs = np.zeros((n_links, 4, 4), dtype=np.float32) + # base vel is the final -1 index + link_tfs = all_tfs[idx, :] + tfs[:, 3, 3] = 1.0 + tfs[:, :3, 3] = link_tfs[:, :3] + tfs[:, :3, :3] = NT._quat2mat(link_tfs[:, 3:]) + # base_tf_inv = np.zeros((1, 4, 4), dtype=np.float32) + # base_tf_inv[0, :, :] = NT._pose_inv(NT.pose2mat(base_pose)) + base_tf_inv = NT._pose_inv(NT.pose2mat(base_pose)) + + # (1, 4, 4) @ (n_links, 4, 4) -> (n_links, 4, 4) + rel_tfs = np.zeros((n_links, 4, 4), dtype=np.float32) + for i in prange(n_links): + rel_tfs[i, :, :] = base_tf_inv @ tfs[i, :, :] + # rel_tfs = base_tf_inv @ tfs + + # Re-convert to quat form + rel_poses = np.zeros((n_links, 7), dtype=np.float32) + rel_poses[:, :3] = rel_tfs[:, :3, 3] + rel_poses[:, 3:] = NT.mat2quat_batch(rel_tfs[:, :3, :3].copy()) + + return rel_poses + + +# Set these as part of the backend values +setattr(_ControllerBackend, "compute_relative_poses", None) +setattr(_ControllerTorchBackend, "compute_relative_poses", _compute_relative_poses_torch) +setattr(_ControllerNumpyBackend, "compute_relative_poses", _compute_relative_poses_numpy) + From 69df439dc8ffa8b3a56881be97c9d8117302f4d8 Mon Sep 17 00:00:00 2001 From: cremebrule <84cremebrule@gmail.com> Date: Wed, 11 Dec 2024 17:24:36 -0800 Subject: [PATCH 09/38] refactor controller backend and general compute backend --- omnigibson/controllers/controller_base.py | 180 +++++------------- omnigibson/controllers/dd_controller.py | 2 +- omnigibson/controllers/ik_controller.py | 26 ++- omnigibson/controllers/joint_controller.py | 10 +- .../multi_finger_gripper_controller.py | 2 +- .../controllers/null_joint_controller.py | 2 +- omnigibson/controllers/osc_controller.py | 10 +- omnigibson/objects/controllable_object.py | 2 +- omnigibson/robots/holonomic_base_robot.py | 2 +- omnigibson/utils/backend_utils.py | 116 +++++++++++ omnigibson/utils/usd_utils.py | 10 +- 11 files changed, 195 insertions(+), 167 deletions(-) create mode 100644 omnigibson/utils/backend_utils.py diff --git a/omnigibson/controllers/controller_base.py b/omnigibson/controllers/controller_base.py index b9f3c38a0..86500641a 100644 --- a/omnigibson/controllers/controller_base.py +++ b/omnigibson/controllers/controller_base.py @@ -3,12 +3,8 @@ from enum import IntEnum import torch as th -import numpy as np - -from omnigibson.utils.python_utils import Recreatable, Registerable, Serializable, assert_valid_key, classproperty, \ - recursively_convert_from_torch -import omnigibson.utils.transform_utils as TT -import omnigibson.utils.transform_utils_np as NT +from omnigibson.utils.python_utils import Recreatable, Registerable, Serializable, assert_valid_key, classproperty +from omnigibson.utils.backend_utils import _compute_backend as cb # Global dicts that will contain mappings REGISTERED_CONTROLLERS = dict() @@ -38,115 +34,7 @@ class IsGraspingState(IntEnum): FALSE = -1 -class _ControllerBackend: - array = None - int_array = None - prod = None - cat = None - zeros = None - ones = None - to_numpy = None - from_numpy = None - to_torch = None - from_torch = None - from_torch_recursive = None - allclose = None - arr_type = None - as_int = None - as_float32 = None - pinv = None - meshgrid = None - full = None - logical_or = None - all = None - abs = None - sqrt = None - mean = None - copy = None - eye = None - view = None - arange = None - where = None - squeeze = None - T = None - @classmethod - def set_methods(cls, backend): - for attr, fcn in backend.__dict__.items(): - # Do not override reserved functions - if attr.startswith("__"): - continue - # Set function to this backend - setattr(cls, attr, fcn) - - -class _ControllerTorchBackend(_ControllerBackend): - array = lambda *args: th.tensor(*args, dtype=th.float32) - int_array = lambda *args: th.tensor(*args, dtype=th.int32) - prod = th.prod - cat = th.cat - zeros = lambda *args: th.zeros(*args, dtype=th.float32) - ones = lambda *args: th.ones(*args, dtype=th.float32) - to_numpy = lambda x: x.numpy() - from_numpy = lambda x: th.from_numpy() - to_torch = lambda x: x - from_torch = lambda x: x - from_torch_recursive = lambda dic: dic - allclose = th.allclose - arr_type = th.Tensor - as_int = lambda arr: arr.int() - as_float32 = lambda arr: arr.float() - pinv = th.linalg.pinv - meshgrid = lambda idx_a, idx_b: th.meshgrid(idx_a, idx_b, indexing="xy") - full = lambda shape, fill_value: th.full(shape, fill_value, dtype=th.float32) - logical_or = th.logical_or - all = th.all - abs = th.abs - sqrt = th.sqrt - mean = lambda val, dim=None, keepdim=False: th.mean(val, dim=dim, keepdim=keepdim) - copy = lambda arr: arr.clone() - eye = th.eye - view = lambda arr, shape: arr.view(shape) - arange = th.arange - where = th.where - squeeze = lambda arr, dim=None: arr.squeeze(dim=dim) - T = TT - - -class _ControllerNumpyBackend(_ControllerBackend): - array = lambda *args: np.array(*args, dtype=np.float32) - int_array = lambda *args: np.array(*args, dtype=np.int32) - prod = np.prod - cat = np.concatenate - zeros = lambda *args: np.zeros(*args, dtype=np.float32) - ones = lambda *args: np.ones(*args, dtype=np.float32) - to_numpy = lambda x: x - from_numpy = lambda x: x - to_torch = lambda x: th.from_numpy(x) - from_torch = lambda x: x.numpy() - from_torch_recursive = recursively_convert_from_torch - allclose = np.allclose - arr_type = np.ndarray - as_int = lambda arr: arr.astype(int) - as_float32 = lambda arr: arr.astype(np.float32) - pinv = np.linalg.pinv - meshgrid = lambda idx_a, idx_b: np.ix_(idx_a, idx_b) - full = lambda shape, fill_value: np.full(shape, fill_value, dtype=np.float32) - logical_or = np.logical_or - all = np.all - abs = np.abs - sqrt = np.sqrt - mean = lambda val, dim=None, keepdim=False: np.mean(val, axis=dim, keepdims=keepdim) - copy = lambda arr: np.array(arr) - eye = np.eye - view = lambda arr, shape: arr.reshape(shape) - arange = np.arange - where = np.where - squeeze = lambda arr, dim=None: arr.squeeze(axis=dim) - T = NT - - -_controller_backend = _ControllerBackend # Define macros @@ -227,11 +115,11 @@ def __init__( ] assert "has_limit" in control_limits, "Expected has_limit specified in control_limits, but does not exist." self._dof_has_limits = control_limits["has_limit"] - self._dof_idx = _controller_backend.as_int(dof_idx) + self._dof_idx = cb.as_int(dof_idx) # Generate goal information self._goal_shapes = self._get_goal_shapes() - self._goal_dim = int(sum(_controller_backend.prod(_controller_backend.array(shape)) for shape in self._goal_shapes.values())) + self._goal_dim = int(sum(cb.prod(cb.array(shape)) for shape in self._goal_shapes.values())) # Initialize some other variables that will be filled in during runtime self._control = None @@ -242,15 +130,12 @@ def __init__( # Standardize command input / output limits to be (min_array, max_array) command_input_limits = ( - (-1.0, 1.0) + self._generate_default_command_input_limits() if type(command_input_limits) == str and command_input_limits == "default" else command_input_limits ) command_output_limits = ( - ( - self._control_limits[self.control_type][0][self.dof_idx], - self._control_limits[self.control_type][1][self.dof_idx], - ) + self._generate_default_command_output_limits() if type(command_output_limits) == str and command_output_limits == "default" else command_output_limits ) @@ -271,6 +156,31 @@ def __init__( ) ) + def _generate_default_command_input_limits(self): + """ + Generates default command input limits based on the control limits + + Returns: + 2-tuple: + - int or array: min command input limits + - int or array: max command input limits + """ + return (-1.0, 1.0) + + def _generate_default_command_output_limits(self): + """ + Generates default command output limits based on the control limits + + Returns: + 2-tuple: + - int or array: min command output limits + - int or array: max command output limits + """ + return ( + self._control_limits[self.control_type][0][self.dof_idx], + self._control_limits[self.control_type][1][self.dof_idx], + ) + def _preprocess_command(self, command): """ Clips + scales inputted @command according to self.command_input_limits and self.command_output_limits. @@ -284,7 +194,7 @@ def _preprocess_command(self, command): Array[float]: Processed command vector """ # Make sure command is a th.tensor - command = _controller_backend.array([command]) if type(command) in {int, float} else command + command = cb.array([command]) if type(command) in {int, float} else command # We only clip and / or scale if self.command_input_limits exists if self._command_input_limits is not None: # Clip @@ -469,23 +379,31 @@ def _dump_state(self): # Default is just the command return dict( goal_is_valid=self._goal is not None, - goal=self._goal, + goal=None if self._goal is None else {k: cb.to_torch(v) for k, v in self._goal.items()}, ) def _load_state(self, state): # Make sure every entry in goal is a numpy array # Load goal - self._goal = None if state["goal"] is None else {name: goal_state for name, goal_state in state["goal"].items()} + if state["goal"] is None: + self._goal = None + else: + self._goal = dict() + for name, goal_state in state["goal"].items(): + if isinstance(goal_state, th.Tensor): + self._goal[name] = cb.from_torch(goal_state) + else: + self._goal[name] = goal_state def serialize(self, state): # Make sure size of the state is consistent, even if we have no goal goal_state_flattened = ( - _controller_backend.cat([goal_state.flatten() for goal_state in self._goal.values()]) + th.cat([goal_state.flatten() for goal_state in state["goal"].values()]) if (state)["goal_is_valid"] - else _controller_backend.zeros(self.goal_dim) + else th.zeros(self.goal_dim) ) - return _controller_backend.cat([_controller_backend.array([state["goal_is_valid"]]), goal_state_flattened]) + return cb.cat([cb.array([state["goal_is_valid"]]), goal_state_flattened]) def deserialize(self, state): goal_is_valid = bool(state[0]) @@ -534,12 +452,8 @@ def nums2array(nums, dim): # Else, input is a single value, so we map to a numpy array of correct size and return return ( nums - if isinstance(nums, _controller_backend.arr_type) - else ( - _controller_backend.array(nums) - if isinstance(nums, Iterable) - else _controller_backend.ones(dim) * nums - ) + if isinstance(nums, cb.arr_type) + else cb.array(nums)if isinstance(nums, Iterable) else cb.ones(dim) * nums ) @property diff --git a/omnigibson/controllers/dd_controller.py b/omnigibson/controllers/dd_controller.py index da35e3f86..c4f15a246 100644 --- a/omnigibson/controllers/dd_controller.py +++ b/omnigibson/controllers/dd_controller.py @@ -1,5 +1,5 @@ from omnigibson.controllers import ControlType, LocomotionController -from omnigibson.controllers.controller_base import _controller_backend as cb +from omnigibson.utils.backend_utils import _compute_backend as cb class DifferentialDriveController(LocomotionController): diff --git a/omnigibson/controllers/ik_controller.py b/omnigibson/controllers/ik_controller.py index 88bd506a5..a42e46228 100644 --- a/omnigibson/controllers/ik_controller.py +++ b/omnigibson/controllers/ik_controller.py @@ -1,13 +1,11 @@ import math from collections.abc import Iterable -from omnigibson.controllers.controller_base import _ControllerBackend, _ControllerTorchBackend, _ControllerNumpyBackend -from omnigibson.controllers.controller_base import _controller_backend as cb +from omnigibson.utils.backend_utils import _ComputeBackend, _ComputeTorchBackend, _ComputeNumpyBackend +from omnigibson.utils.backend_utils import _compute_backend as cb from omnigibson.controllers import ControlType, ManipulationController from omnigibson.controllers.joint_controller import JointController -from omnigibson.macros import create_module_macros, gm from omnigibson.utils.processing_utils import MovingAverageFilter -from omnigibson.utils.python_utils import assert_valid_key from omnigibson.utils.ui_utils import create_module_logger # Create module logger @@ -43,8 +41,8 @@ def __init__( dof_idx, command_input_limits="default", command_output_limits=( - (-0.2, -0.2, -0.2, -0.5, -0.5, -0.5), - (0.2, 0.2, 0.2, 0.5, 0.5, 0.5), + (-0.2, -0.2, -0.2, -0.5, -0.5, -0.5), + (0.2, 0.2, 0.2, 0.5, 0.5, 0.5), ), pos_kp=None, pos_damping_ratio=None, @@ -117,11 +115,11 @@ def limiter(target_pos: Array[float], target_quat: Array[float], control_dict: D """ # Store arguments control_dim = len(dof_idx) - self.control_filter = None #( - # None - # if smoothing_filter_size in {None, 0} - # else MovingAverageFilter(obs_dim=control_dim, filter_width=smoothing_filter_size) - # ) + self.control_filter = ( + None + if smoothing_filter_size in {None, 0} + else MovingAverageFilter(obs_dim=control_dim, filter_width=smoothing_filter_size) + ) assert mode in IK_MODES, f"Invalid ik mode specified! Valid options are: {IK_MODES}, got: {mode}" # If mode is absolute pose, make sure command input limits / output limits are None @@ -431,7 +429,7 @@ def _compute_ik_qpos_numpy( # Set these as part of the backend values -setattr(_ControllerBackend, "compute_ik_qpos", None) -setattr(_ControllerTorchBackend, "compute_ik_qpos", _compute_ik_qpos_torch) -setattr(_ControllerNumpyBackend, "compute_ik_qpos", _compute_ik_qpos_numpy) +setattr(_ComputeBackend, "compute_ik_qpos", None) +setattr(_ComputeTorchBackend, "compute_ik_qpos", _compute_ik_qpos_torch) +setattr(_ComputeNumpyBackend, "compute_ik_qpos", _compute_ik_qpos_numpy) diff --git a/omnigibson/controllers/joint_controller.py b/omnigibson/controllers/joint_controller.py index 662422741..c11eaaf64 100644 --- a/omnigibson/controllers/joint_controller.py +++ b/omnigibson/controllers/joint_controller.py @@ -1,7 +1,7 @@ import math -from omnigibson.controllers.controller_base import _ControllerBackend, _ControllerTorchBackend, _ControllerNumpyBackend -from omnigibson.controllers.controller_base import _controller_backend as cb +from omnigibson.utils.backend_utils import _ComputeBackend, _ComputeTorchBackend, _ComputeNumpyBackend +from omnigibson.utils.backend_utils import _compute_backend as cb from omnigibson.controllers import ( ControlType, GripperController, @@ -333,7 +333,7 @@ def _compute_joint_torques_numpy( # Set these as part of the backend values -setattr(_ControllerBackend, "compute_joint_torques", None) -setattr(_ControllerTorchBackend, "compute_joint_torques", _compute_joint_torques_torch) -setattr(_ControllerNumpyBackend, "compute_joint_torques", _compute_joint_torques_numpy) +setattr(_ComputeBackend, "compute_joint_torques", None) +setattr(_ComputeTorchBackend, "compute_joint_torques", _compute_joint_torques_torch) +setattr(_ComputeNumpyBackend, "compute_joint_torques", _compute_joint_torques_numpy) diff --git a/omnigibson/controllers/multi_finger_gripper_controller.py b/omnigibson/controllers/multi_finger_gripper_controller.py index c07a8f51b..87c356b7a 100644 --- a/omnigibson/controllers/multi_finger_gripper_controller.py +++ b/omnigibson/controllers/multi_finger_gripper_controller.py @@ -1,4 +1,4 @@ -from omnigibson.controllers.controller_base import _controller_backend as cb +from omnigibson.utils.backend_utils import _compute_backend as cb from omnigibson.controllers import ControlType, GripperController, IsGraspingState from omnigibson.macros import create_module_macros from omnigibson.utils.python_utils import assert_valid_key diff --git a/omnigibson/controllers/null_joint_controller.py b/omnigibson/controllers/null_joint_controller.py index eea678a0a..83782c7db 100644 --- a/omnigibson/controllers/null_joint_controller.py +++ b/omnigibson/controllers/null_joint_controller.py @@ -1,4 +1,4 @@ -from omnigibson.controllers.controller_base import _controller_backend as cb +from omnigibson.utils.backend_utils import _compute_backend as cb from omnigibson.controllers import JointController diff --git a/omnigibson/controllers/osc_controller.py b/omnigibson/controllers/osc_controller.py index dc74f1178..53c396a1d 100644 --- a/omnigibson/controllers/osc_controller.py +++ b/omnigibson/controllers/osc_controller.py @@ -1,7 +1,7 @@ import math -from omnigibson.controllers.controller_base import _ControllerBackend, _ControllerTorchBackend, _ControllerNumpyBackend -from omnigibson.controllers.controller_base import _controller_backend as cb +from omnigibson.utils.backend_utils import _ComputeBackend, _ComputeTorchBackend, _ComputeNumpyBackend +from omnigibson.utils.backend_utils import _compute_backend as cb from omnigibson.controllers import ControlType, ManipulationController from omnigibson.utils.processing_utils import MovingAverageFilter from omnigibson.utils.python_utils import assert_valid_key @@ -642,6 +642,6 @@ def _compute_osc_torques_numpy( # Set these as part of the backend values -setattr(_ControllerBackend, "compute_osc_torques", None) -setattr(_ControllerTorchBackend, "compute_osc_torques", _compute_osc_torques_torch) -setattr(_ControllerNumpyBackend, "compute_osc_torques", _compute_osc_torques_numpy) +setattr(_ComputeBackend, "compute_osc_torques", None) +setattr(_ComputeTorchBackend, "compute_osc_torques", _compute_osc_torques_torch) +setattr(_ComputeNumpyBackend, "compute_osc_torques", _compute_osc_torques_numpy) diff --git a/omnigibson/objects/controllable_object.py b/omnigibson/objects/controllable_object.py index bcad603a8..b312ec7b0 100644 --- a/omnigibson/objects/controllable_object.py +++ b/omnigibson/objects/controllable_object.py @@ -10,7 +10,7 @@ import omnigibson as og from omnigibson.controllers import create_controller from omnigibson.controllers.controller_base import ControlType -from omnigibson.controllers.controller_base import _controller_backend as cb +from omnigibson.utils.backend_utils import _compute_backend as cb from omnigibson.objects.object_base import BaseObject from omnigibson.utils.constants import JointType, PrimType from omnigibson.utils.numpy_utils import NumpyTypes diff --git a/omnigibson/robots/holonomic_base_robot.py b/omnigibson/robots/holonomic_base_robot.py index 915d595d3..25ff76691 100644 --- a/omnigibson/robots/holonomic_base_robot.py +++ b/omnigibson/robots/holonomic_base_robot.py @@ -6,7 +6,7 @@ import omnigibson as og import omnigibson.lazy as lazy -from omnigibson.controllers.controller_base import _controller_backend as cb +from omnigibson.utils.backend_utils import _compute_backend as cb from omnigibson.macros import create_module_macros from omnigibson.robots.locomotion_robot import LocomotionRobot from omnigibson.robots.manipulation_robot import ManipulationRobot diff --git a/omnigibson/utils/backend_utils.py b/omnigibson/utils/backend_utils.py new file mode 100644 index 000000000..ad93563fe --- /dev/null +++ b/omnigibson/utils/backend_utils.py @@ -0,0 +1,116 @@ +import torch as th +import numpy as np +import omnigibson.utils.transform_utils as TT +import omnigibson.utils.transform_utils_np as NT +from omnigibson.utils.python_utils import recursively_convert_from_torch + + +class _ComputeBackend: + array = None + int_array = None + prod = None + cat = None + zeros = None + ones = None + to_numpy = None + from_numpy = None + to_torch = None + from_torch = None + from_torch_recursive = None + allclose = None + arr_type = None + as_int = None + as_float32 = None + pinv = None + meshgrid = None + full = None + logical_or = None + all = None + abs = None + sqrt = None + mean = None + copy = None + eye = None + view = None + arange = None + where = None + squeeze = None + T = None + + @classmethod + def set_methods(cls, backend): + for attr, fcn in backend.__dict__.items(): + # Do not override reserved functions + if attr.startswith("__"): + continue + # Set function to this backend + setattr(cls, attr, fcn) + + +class _ComputeTorchBackend(_ComputeBackend): + array = lambda *args: th.tensor(*args, dtype=th.float32) + int_array = lambda *args: th.tensor(*args, dtype=th.int32) + prod = th.prod + cat = th.cat + zeros = lambda *args: th.zeros(*args, dtype=th.float32) + ones = lambda *args: th.ones(*args, dtype=th.float32) + to_numpy = lambda x: x.numpy() + from_numpy = lambda x: th.from_numpy() + to_torch = lambda x: x + from_torch = lambda x: x + from_torch_recursive = lambda dic: dic + allclose = th.allclose + arr_type = th.Tensor + as_int = lambda arr: arr.int() + as_float32 = lambda arr: arr.float() + pinv = th.linalg.pinv + meshgrid = lambda idx_a, idx_b: th.meshgrid(idx_a, idx_b, indexing="xy") + full = lambda shape, fill_value: th.full(shape, fill_value, dtype=th.float32) + logical_or = th.logical_or + all = th.all + abs = th.abs + sqrt = th.sqrt + mean = lambda val, dim=None, keepdim=False: th.mean(val, dim=dim, keepdim=keepdim) + copy = lambda arr: arr.clone() + eye = th.eye + view = lambda arr, shape: arr.view(shape) + arange = th.arange + where = th.where + squeeze = lambda arr, dim=None: arr.squeeze(dim=dim) + T = TT + + +class _ComputeNumpyBackend(_ComputeBackend): + array = lambda *args: np.array(*args, dtype=np.float32) + int_array = lambda *args: np.array(*args, dtype=np.int32) + prod = np.prod + cat = np.concatenate + zeros = lambda *args: np.zeros(*args, dtype=np.float32) + ones = lambda *args: np.ones(*args, dtype=np.float32) + to_numpy = lambda x: x + from_numpy = lambda x: x + to_torch = lambda x: th.from_numpy(x) + from_torch = lambda x: x.numpy() + from_torch_recursive = recursively_convert_from_torch + allclose = np.allclose + arr_type = np.ndarray + as_int = lambda arr: arr.astype(int) + as_float32 = lambda arr: arr.astype(np.float32) + pinv = np.linalg.pinv + meshgrid = lambda idx_a, idx_b: np.ix_(idx_a, idx_b) + full = lambda shape, fill_value: np.full(shape, fill_value, dtype=np.float32) + logical_or = np.logical_or + all = np.all + abs = np.abs + sqrt = np.sqrt + mean = lambda val, dim=None, keepdim=False: np.mean(val, axis=dim, keepdims=keepdim) + copy = lambda arr: np.array(arr) + eye = np.eye + view = lambda arr, shape: arr.reshape(shape) + arange = np.arange + where = np.where + squeeze = lambda arr, dim=None: arr.squeeze(axis=dim) + T = NT + + +_compute_backend = _ComputeBackend diff --git a/omnigibson/utils/usd_utils.py b/omnigibson/utils/usd_utils.py index 037f669b1..b02af97ba 100644 --- a/omnigibson/utils/usd_utils.py +++ b/omnigibson/utils/usd_utils.py @@ -12,8 +12,8 @@ import omnigibson as og import omnigibson.lazy as lazy -from omnigibson.controllers.controller_base import _ControllerBackend, _ControllerTorchBackend, _ControllerNumpyBackend -from omnigibson.controllers.controller_base import _controller_backend as cb +from omnigibson.utils.backend_utils import _ComputeBackend, _ComputeTorchBackend, _ComputeNumpyBackend +from omnigibson.utils.backend_utils import _compute_backend as cb import omnigibson.utils.transform_utils as T from omnigibson.macros import gm from omnigibson.utils.constants import PRIMITIVE_MESH_TYPES, JointType, PrimType @@ -1930,7 +1930,7 @@ def _compute_relative_poses_numpy(idx, n_links, all_tfs, base_pose): # Set these as part of the backend values -setattr(_ControllerBackend, "compute_relative_poses", None) -setattr(_ControllerTorchBackend, "compute_relative_poses", _compute_relative_poses_torch) -setattr(_ControllerNumpyBackend, "compute_relative_poses", _compute_relative_poses_numpy) +setattr(_ComputeBackend, "compute_relative_poses", None) +setattr(_ComputeTorchBackend, "compute_relative_poses", _compute_relative_poses_torch) +setattr(_ComputeNumpyBackend, "compute_relative_poses", _compute_relative_poses_numpy) From 3b8d114f20fd82678bdf485b95153e67afb3cf8f Mon Sep 17 00:00:00 2001 From: cremebrule <84cremebrule@gmail.com> Date: Wed, 11 Dec 2024 17:27:50 -0800 Subject: [PATCH 10/38] update filters to use compute backend --- omnigibson/utils/processing_utils.py | 15 ++++++++------- 1 file changed, 8 insertions(+), 7 deletions(-) diff --git a/omnigibson/utils/processing_utils.py b/omnigibson/utils/processing_utils.py index bb32f35f5..8577c7232 100644 --- a/omnigibson/utils/processing_utils.py +++ b/omnigibson/utils/processing_utils.py @@ -1,6 +1,7 @@ import torch as th from omnigibson.utils.python_utils import Serializable +from omnigibson.utils.backend_utils import _compute_backend as cb class Filter(Serializable): @@ -59,7 +60,7 @@ def __init__(self, obs_dim, filter_width): self.obs_dim = obs_dim assert filter_width > 0, f"MovingAverageFilter must have a non-zero size! Got: {filter_width}" self.filter_width = filter_width - self.past_samples = th.zeros((filter_width, obs_dim)) + self.past_samples = cb.zeros((filter_width, obs_dim)) self.current_idx = 0 self.fully_filled = False # Whether the entire filter buffer is filled or not @@ -103,7 +104,7 @@ def _dump_state(self): state = super()._dump_state() # Add info from this filter - state["past_samples"] = self.past_samples + state["past_samples"] = cb.to_torch(self.past_samples) state["current_idx"] = self.current_idx state["fully_filled"] = self.fully_filled @@ -114,7 +115,7 @@ def _load_state(self, state): super()._load_state(state=state) # Load relevant info for this filter - self.past_samples = state["past_samples"] + self.past_samples = cb.from_torch(state["past_samples"]) self.current_idx = state["current_idx"] self.fully_filled = state["fully_filled"] @@ -159,7 +160,7 @@ def __init__(self, obs_dim, alpha=0.9): alpha (float): The relative weighting of new samples relative to older samples """ self.obs_dim = obs_dim - self.avg = th.zeros(obs_dim) + self.avg = cb.zeros(obs_dim) self.num_samples = 0 self.alpha = alpha @@ -178,7 +179,7 @@ def estimate(self, observation): self.avg = self.alpha * observation + (1.0 - self.alpha) * self.avg self.num_samples += 1 - return th.tensor(self.avg) + return cb.copy(self.avg) def reset(self): # Clear internal state @@ -190,7 +191,7 @@ def _dump_state(self): state = super()._dump_state() # Add info from this filter - state["avg"] = th.tensor(self.avg) + state["avg"] = cb.to_torch(self.avg) state["num_samples"] = self.num_samples return state @@ -200,7 +201,7 @@ def _load_state(self, state): super()._load_state(state=state) # Load relevant info for this filter - self.avg = th.tensor(state["avg"]) + self.avg = cb.from_torch(state["avg"]) self.num_samples = state["num_samples"] def serialize(self, state): From e45e719fc3956435693842ef867011337bfe5ffd Mon Sep 17 00:00:00 2001 From: cremebrule <84cremebrule@gmail.com> Date: Wed, 11 Dec 2024 17:34:03 -0800 Subject: [PATCH 11/38] fix controller bugs --- omnigibson/controllers/ik_controller.py | 4 ++-- .../controllers/multi_finger_gripper_controller.py | 9 ++++++--- omnigibson/objects/dataset_object.py | 2 +- omnigibson/simulator.py | 11 ++++++++--- omnigibson/utils/config_utils.py | 1 + 5 files changed, 18 insertions(+), 9 deletions(-) diff --git a/omnigibson/controllers/ik_controller.py b/omnigibson/controllers/ik_controller.py index a42e46228..b4a8502a4 100644 --- a/omnigibson/controllers/ik_controller.py +++ b/omnigibson/controllers/ik_controller.py @@ -217,10 +217,10 @@ def serialize(self, state): state_flat = super().serialize(state=state) # Serialize state for this controller - return cb.cat( + return th.cat( [ state_flat, - cb.array([]) if self.control_filter is None else self.control_filter.serialize(state=state["control_filter"]), + th.tensor([]) if self.control_filter is None else self.control_filter.serialize(state=state["control_filter"]), ] ) diff --git a/omnigibson/controllers/multi_finger_gripper_controller.py b/omnigibson/controllers/multi_finger_gripper_controller.py index 87c356b7a..933721b05 100644 --- a/omnigibson/controllers/multi_finger_gripper_controller.py +++ b/omnigibson/controllers/multi_finger_gripper_controller.py @@ -112,6 +112,11 @@ def __init__( command_output_limits=command_output_limits, ) + def _generate_default_command_output_limits(self): + command_output_limits = super()._generate_default_command_output_limits() + + return cb.mean(command_output_limits[0]), cb.mean(command_output_limits[1]) + def reset(self): # Call super first super().reset() @@ -249,9 +254,7 @@ def _update_grasping_state(self, control_dict): dist_from_upper_limit = max_pos - finger_pos # If either of the joint positions are not near the joint limits with some tolerance (m.POS_TOLERANCE) - valid_grasp_pos = ( - dist_from_lower_limit.mean() > m.POS_TOLERANCE or dist_from_upper_limit.mean() > m.POS_TOLERANCE - ) + valid_grasp_pos = dist_from_lower_limit.mean() > m.POS_TOLERANCE or dist_from_upper_limit.mean() > m.POS_TOLERANCE # And the joint velocities are close to zero with some tolerance (m.VEL_TOLERANCE) valid_grasp_vel = cb.all(cb.abs(finger_vel) < m.VEL_TOLERANCE) diff --git a/omnigibson/objects/dataset_object.py b/omnigibson/objects/dataset_object.py index a2058af2b..fc1e747e2 100644 --- a/omnigibson/objects/dataset_object.py +++ b/omnigibson/objects/dataset_object.py @@ -244,7 +244,7 @@ def _post_load(self): scale = th.ones(3) if self._load_config["scale"] is None else self._load_config["scale"] # Assert that the scale does not have too small dimensions - assert th.all(scale > 1e-4), f"Scale of {self.name} is too small: {scale}" + assert th.all(th.tensor(scale) > 1e-4), f"Scale of {self.name} is too small: {scale}" # Set this scale in the load config -- it will automatically scale the object during self.initialize() self._load_config["scale"] = scale diff --git a/omnigibson/simulator.py b/omnigibson/simulator.py index 17cf95dd7..183cb5926 100644 --- a/omnigibson/simulator.py +++ b/omnigibson/simulator.py @@ -221,6 +221,7 @@ def _launch_app(): "Content", "Flow", "Semantics Schema Editor", + "VR", ] ) @@ -239,9 +240,13 @@ def _launch_app(): # Loading Isaac Sim disables Ctrl+C, so we need to re-enable it signal.signal(signal.SIGINT, og.shutdown_handler) - # Set controller backend - import omnigibson.controllers.controller_base as CB - CB._controller_backend.set_methods(CB._ControllerNumpyBackend if gm.USE_NUMPY_CONTROLLER_BACKEND else CB._ControllerTorchBackend) + # Set compute backend + import omnigibson.utils.backend_utils as _backend_utils + _backend_utils._compute_backend.set_methods( + _backend_utils._ComputeNumpyBackend + if gm.USE_NUMPY_CONTROLLER_BACKEND + else _backend_utils._ComputeTorchBackend + ) return app diff --git a/omnigibson/utils/config_utils.py b/omnigibson/utils/config_utils.py index dd467c382..82f9b1e91 100644 --- a/omnigibson/utils/config_utils.py +++ b/omnigibson/utils/config_utils.py @@ -2,6 +2,7 @@ import json import os +import numpy as np import torch as th import yaml From e9219c22efdef50640ad4b78b61ef71389fd4782 Mon Sep 17 00:00:00 2001 From: cremebrule <84cremebrule@gmail.com> Date: Wed, 11 Dec 2024 18:55:03 -0800 Subject: [PATCH 12/38] fix proprioception dict generation --- omnigibson/robots/active_camera_robot.py | 4 ++-- omnigibson/robots/articulated_trunk_robot.py | 4 ++-- omnigibson/robots/locomotion_robot.py | 4 ++-- omnigibson/robots/manipulation_robot.py | 20 ++++++++++---------- omnigibson/robots/robot_base.py | 12 +++++++----- 5 files changed, 23 insertions(+), 21 deletions(-) diff --git a/omnigibson/robots/active_camera_robot.py b/omnigibson/robots/active_camera_robot.py index 6d330b489..73347b797 100644 --- a/omnigibson/robots/active_camera_robot.py +++ b/omnigibson/robots/active_camera_robot.py @@ -38,8 +38,8 @@ def _get_proprioception_dict(self): dic = super()._get_proprioception_dict() # Add camera pos info - joint_positions = ControllableObjectViewAPI.get_joint_positions(self.articulation_root_path) - joint_velocities = ControllableObjectViewAPI.get_joint_velocities(self.articulation_root_path) + joint_positions = dic["joint_qpos"] + joint_velocities = dic["joint_qvel"] dic["camera_qpos"] = joint_positions[self.camera_control_idx] dic["camera_qpos_sin"] = th.sin(joint_positions[self.camera_control_idx]) dic["camera_qpos_cos"] = th.cos(joint_positions[self.camera_control_idx]) diff --git a/omnigibson/robots/articulated_trunk_robot.py b/omnigibson/robots/articulated_trunk_robot.py index d4906c65f..7387d7a11 100644 --- a/omnigibson/robots/articulated_trunk_robot.py +++ b/omnigibson/robots/articulated_trunk_robot.py @@ -165,8 +165,8 @@ def _get_proprioception_dict(self): dic = super()._get_proprioception_dict() # Add trunk info - joint_positions = ControllableObjectViewAPI.get_joint_positions(self.articulation_root_path) - joint_velocities = ControllableObjectViewAPI.get_joint_velocities(self.articulation_root_path) + joint_positions = dic["joint_qpos"] + joint_velocities = dic["joint_qvel"] dic["trunk_qpos"] = joint_positions[self.trunk_control_idx] dic["trunk_qvel"] = joint_velocities[self.trunk_control_idx] diff --git a/omnigibson/robots/locomotion_robot.py b/omnigibson/robots/locomotion_robot.py index 7eafea0e6..9cf9d1a54 100644 --- a/omnigibson/robots/locomotion_robot.py +++ b/omnigibson/robots/locomotion_robot.py @@ -40,8 +40,8 @@ def _validate_configuration(self): def _get_proprioception_dict(self): dic = super()._get_proprioception_dict() - joint_positions = ControllableObjectViewAPI.get_joint_positions(self.articulation_root_path) - joint_velocities = ControllableObjectViewAPI.get_joint_velocities(self.articulation_root_path) + joint_positions = dic["joint_qpos"] + joint_velocities = dic["joint_qvel"] # Add base info dic["base_qpos"] = joint_positions[self.base_control_idx] diff --git a/omnigibson/robots/manipulation_robot.py b/omnigibson/robots/manipulation_robot.py index 48450afea..8fa160208 100644 --- a/omnigibson/robots/manipulation_robot.py +++ b/omnigibson/robots/manipulation_robot.py @@ -10,6 +10,7 @@ import omnigibson as og import omnigibson.lazy as lazy +from omnigibson.utils.backend_utils import _compute_backend as cb import omnigibson.utils.transform_utils as T from omnigibson.controllers import ( ControlType, @@ -414,8 +415,8 @@ def _get_proprioception_dict(self): dic = super()._get_proprioception_dict() # Loop over all arms to grab proprio info - joint_positions = ControllableObjectViewAPI.get_joint_positions(self.articulation_root_path) - joint_velocities = ControllableObjectViewAPI.get_joint_velocities(self.articulation_root_path) + joint_positions = dic["joint_qpos"] + joint_velocities = dic["joint_qvel"] for arm in self.arm_names: # Add arm info dic["arm_{}_qpos".format(arm)] = joint_positions[self.arm_control_idx[arm]] @@ -424,11 +425,10 @@ def _get_proprioception_dict(self): dic["arm_{}_qvel".format(arm)] = joint_velocities[self.arm_control_idx[arm]] # Add eef and grasping info - dic["eef_{}_pos".format(arm)], dic["eef_{}_quat".format(arm)] = ( - ControllableObjectViewAPI.get_link_relative_position_orientation( - self.articulation_root_path, self.eef_link_names[arm] - ) + eef_pos, eef_quat = ControllableObjectViewAPI.get_link_relative_position_orientation( + self.articulation_root_path, self.eef_link_names[arm] ) + dic["eef_{}_pos".format(arm)], dic["eef_{}_quat".format(arm)] = cb.to_torch(eef_pos), cb.to_torch(eef_quat) dic["grasp_{}".format(arm)] = th.tensor([self.is_grasping(arm)]) dic["gripper_{}_qpos".format(arm)] = joint_positions[self.gripper_control_idx[arm]] dic["gripper_{}_qvel".format(arm)] = joint_velocities[self.gripper_control_idx[arm]] @@ -979,11 +979,11 @@ def _freeze_gripper(self, arm="default"): def curobo_path(self): """ Returns: - str or Dict[CuroboEmbodimentSelection, str]: file path to the robot curobo file or a mapping from - CuroboEmbodimentSelection to the file path + str or Dict[CuRoboEmbodimentSelection, str]: file path to the robot curobo file or a mapping from + CuRoboEmbodimentSelection to the file path """ # Import here to avoid circular imports - from omnigibson.action_primitives.curobo import CuroboEmbodimentSelection + from omnigibson.action_primitives.curobo import CuRoboEmbodimentSelection # By default, sets the standardized path model = self.model_name.lower() @@ -991,7 +991,7 @@ def curobo_path(self): emb_sel: os.path.join( gm.ASSET_PATH, f"models/{model}/curobo/{model}_description_curobo_{emb_sel.value}.yaml" ) - for emb_sel in CuroboEmbodimentSelection + for emb_sel in CuRoboEmbodimentSelection } @property diff --git a/omnigibson/robots/robot_base.py b/omnigibson/robots/robot_base.py index 834b9bbb8..969a8d0b1 100644 --- a/omnigibson/robots/robot_base.py +++ b/omnigibson/robots/robot_base.py @@ -14,6 +14,7 @@ VisionSensor, create_sensor, ) +from omnigibson.utils.backend_utils import _compute_backend as cb from omnigibson.utils.constants import PrimType from omnigibson.utils.gym_utils import GymObservable from omnigibson.utils.numpy_utils import NumpyTypes @@ -295,10 +296,11 @@ def _get_proprioception_dict(self): dict: keyword-mapped proprioception observations available for this robot. Can be extended by subclasses """ - joint_positions = ControllableObjectViewAPI.get_joint_positions(self.articulation_root_path) - joint_velocities = ControllableObjectViewAPI.get_joint_velocities(self.articulation_root_path) - joint_efforts = ControllableObjectViewAPI.get_joint_efforts(self.articulation_root_path) + joint_positions = cb.to_torch(ControllableObjectViewAPI.get_joint_positions(self.articulation_root_path)) + joint_velocities = cb.to_torch(ControllableObjectViewAPI.get_joint_velocities(self.articulation_root_path)) + joint_efforts = cb.to_torch(ControllableObjectViewAPI.get_joint_efforts(self.articulation_root_path)) pos, quat = ControllableObjectViewAPI.get_position_orientation(self.articulation_root_path) + pos, quat = cb.to_torch(pos), cb.to_torch(quat) ori = T.quat2euler(quat) ori_2d = T.z_angle_from_quat(quat) @@ -316,8 +318,8 @@ def _get_proprioception_dict(self): robot_2d_ori=ori_2d, robot_2d_ori_cos=th.cos(ori_2d), robot_2d_ori_sin=th.sin(ori_2d), - robot_lin_vel=ControllableObjectViewAPI.get_linear_velocity(self.articulation_root_path), - robot_ang_vel=ControllableObjectViewAPI.get_angular_velocity(self.articulation_root_path), + robot_lin_vel=cb.to_torch(ControllableObjectViewAPI.get_linear_velocity(self.articulation_root_path)), + robot_ang_vel=cb.to_torch(ControllableObjectViewAPI.get_angular_velocity(self.articulation_root_path)), ) def _load_observation_space(self): From 50d390a48b8c9ea76647c8314b34fbae96283e7f Mon Sep 17 00:00:00 2001 From: cremebrule <84cremebrule@gmail.com> Date: Wed, 11 Dec 2024 18:56:06 -0800 Subject: [PATCH 13/38] fix holonomic robot pose setting --- omnigibson/robots/holonomic_base_robot.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/omnigibson/robots/holonomic_base_robot.py b/omnigibson/robots/holonomic_base_robot.py index 25ff76691..fca8391f7 100644 --- a/omnigibson/robots/holonomic_base_robot.py +++ b/omnigibson/robots/holonomic_base_robot.py @@ -254,7 +254,7 @@ def set_position_orientation(self, position=None, orientation=None, frame: Liter joint_pos, joint_orn = cb.from_torch(joint_pos), cb.from_torch(joint_orn) inv_joint_pos, inv_joint_orn = cb.T.mat2pose(cb.T.pose_inv(cb.T.pose2mat((joint_pos, joint_orn)))) - relative_pos, relative_orn = cb.T.pose_transform(inv_joint_pos, inv_joint_orn, position, orientation) + relative_pos, relative_orn = cb.T.pose_transform(inv_joint_pos, inv_joint_orn, cb.from_torch(position), cb.from_torch(orientation)) relative_rpy = cb.T.quat2euler(relative_orn) self.joints["base_footprint_x_joint"].set_pos(relative_pos[0], drive=False) self.joints["base_footprint_y_joint"].set_pos(relative_pos[1], drive=False) From fd07d8a8bd50829a5c4f7d0b35444e21878df177 Mon Sep 17 00:00:00 2001 From: cremebrule <84cremebrule@gmail.com> Date: Wed, 11 Dec 2024 18:56:48 -0800 Subject: [PATCH 14/38] fix controllable object and filters state de/serialization --- omnigibson/controllers/controller_base.py | 2 +- omnigibson/objects/controllable_object.py | 6 +++--- omnigibson/utils/processing_utils.py | 4 ++-- tests/test_controllers.py | 19 ++++++++----------- 4 files changed, 14 insertions(+), 17 deletions(-) diff --git a/omnigibson/controllers/controller_base.py b/omnigibson/controllers/controller_base.py index 86500641a..07157bad0 100644 --- a/omnigibson/controllers/controller_base.py +++ b/omnigibson/controllers/controller_base.py @@ -403,7 +403,7 @@ def serialize(self, state): else th.zeros(self.goal_dim) ) - return cb.cat([cb.array([state["goal_is_valid"]]), goal_state_flattened]) + return th.cat([th.tensor([state["goal_is_valid"]]), goal_state_flattened]) def deserialize(self, state): goal_is_valid = bool(state[0]) diff --git a/omnigibson/objects/controllable_object.py b/omnigibson/objects/controllable_object.py index b312ec7b0..60e19fa74 100644 --- a/omnigibson/objects/controllable_object.py +++ b/omnigibson/objects/controllable_object.py @@ -704,9 +704,9 @@ def serialize(self, state): state_flat = super().serialize(state=state) # Serialize the controller states sequentially - controller_states_flat = cb.to_torch(cb.cat( + controller_states_flat = th.cat( [c.serialize(state=state["controllers"][c_name]) for c_name, c in self._controllers.items()] - )) + ) # Concatenate and return return th.cat([state_flat, controller_states_flat]) @@ -718,7 +718,7 @@ def deserialize(self, state): # Deserialize the controller states sequentially controller_states = dict() for c_name, c in self._controllers.items(): - controller_states[c_name], deserialized_items = c.deserialize(state=cb.from_torch(state[idx:])) + controller_states[c_name], deserialized_items = c.deserialize(state=state[idx:]) idx += deserialized_items state_dict["controllers"] = controller_states diff --git a/omnigibson/utils/processing_utils.py b/omnigibson/utils/processing_utils.py index 8577c7232..6d6045626 100644 --- a/omnigibson/utils/processing_utils.py +++ b/omnigibson/utils/processing_utils.py @@ -81,12 +81,12 @@ def estimate(self, observation): # Compute value based on whether we're fully filled or not if not self.fully_filled: - val = self.past_samples[: self.current_idx + 1, :].mean(dim=0) + val = cb.mean(self.past_samples[: self.current_idx + 1, :], dim=0) # Denote that we're fully filled if we're at the end of the buffer if self.current_idx == self.filter_width - 1: self.fully_filled = True else: - val = self.past_samples.mean(dim=0) + val = cb.mean(self.past_samples, dim=0) # Increment the index to write the next sample to self.current_idx = (self.current_idx + 1) % self.filter_width diff --git a/tests/test_controllers.py b/tests/test_controllers.py index 7b13674aa..5b2fda7ff 100644 --- a/tests/test_controllers.py +++ b/tests/test_controllers.py @@ -4,6 +4,7 @@ import omnigibson as og import omnigibson.utils.transform_utils as T +from omnigibson.utils.backend_utils import _compute_backend as cb from omnigibson.robots import LocomotionRobot @@ -18,7 +19,7 @@ def test_arm_control(): { "type": "FrankaPanda", "name": "robot0", - "obs_modalities": "rgb", + "obs_modalities": [], "position": [150, 150, 100], "orientation": [0, 0, 0, 1], "action_normalize": False, @@ -26,7 +27,7 @@ def test_arm_control(): { "type": "Fetch", "name": "robot1", - "obs_modalities": "rgb", + "obs_modalities": [], "position": [150, 150, 105], "orientation": [0, 0, 0, 1], "action_normalize": False, @@ -34,7 +35,7 @@ def test_arm_control(): { "type": "Tiago", "name": "robot2", - "obs_modalities": "rgb", + "obs_modalities": [], "position": [150, 150, 110], "orientation": [0, 0, 0, 1], "action_normalize": False, @@ -42,7 +43,7 @@ def test_arm_control(): { "type": "A1", "name": "robot3", - "obs_modalities": "rgb", + "obs_modalities": [], "position": [150, 150, 115], "orientation": [0, 0, 0, 1], "action_normalize": False, @@ -50,7 +51,7 @@ def test_arm_control(): { "type": "R1", "name": "robot4", - "obs_modalities": "rgb", + "obs_modalities": [], "position": [150, 150, 120], "orientation": [0, 0, 0, 1], "action_normalize": False, @@ -296,12 +297,8 @@ def check_ori_error(curr_orientation, init_orientation, tol=0.1): curr_pos, curr_quat = robot.get_relative_eef_pose(arm=arm) arm_controller = robot.controllers[f"arm_{arm}"] arm_goal = arm_controller.goal - target_pos = arm_goal["target_pos"] - target_quat = ( - arm_goal["target_quat"] - if controller == "InverseKinematicsController" - else T.mat2quat(arm_goal["target_ori_mat"]) - ) + target_pos = cb.to_torch(arm_goal["target_pos"]) + target_quat = T.mat2quat(cb.to_torch(arm_goal["target_ori_mat"])) pos_check = err_checks[controller_mode][action_name]["pos"] if pos_check is not None: is_valid_pos = pos_check(target_pos, curr_pos, init_pos) From 940192279edc476084a9e65d6aa7102f3958a59c Mon Sep 17 00:00:00 2001 From: cremebrule <84cremebrule@gmail.com> Date: Wed, 11 Dec 2024 18:57:03 -0800 Subject: [PATCH 15/38] make np tf use numba --- omnigibson/utils/transform_utils_np.py | 6 ------ 1 file changed, 6 deletions(-) diff --git a/omnigibson/utils/transform_utils_np.py b/omnigibson/utils/transform_utils_np.py index 237d98377..5c4997374 100644 --- a/omnigibson/utils/transform_utils_np.py +++ b/omnigibson/utils/transform_utils_np.py @@ -567,12 +567,6 @@ def _quat2mat(quaternion): return rotation_matrix -# def pose2mat(pose): -# if pose.dtype != np.float32: -# pose = pose.astype(np.float32) -# return _pose2mat(pose) - - @jit(nopython=True) def pose2mat(pose): """ From 920d1618558bcb6c26cbbdf51c632981c3ac5657 Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Thu, 12 Dec 2024 03:07:34 +0000 Subject: [PATCH 16/38] [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --- omnigibson/controllers/controller_base.py | 8 +++---- omnigibson/controllers/ik_controller.py | 17 ++++++++++---- omnigibson/controllers/joint_controller.py | 12 ++++++---- .../multi_finger_gripper_controller.py | 6 +++-- .../controllers/null_joint_controller.py | 2 +- omnigibson/controllers/osc_controller.py | 16 ++++++++++--- omnigibson/objects/controllable_object.py | 2 +- omnigibson/robots/articulated_trunk_robot.py | 3 ++- omnigibson/robots/behavior_robot.py | 2 +- omnigibson/robots/holonomic_base_robot.py | 10 +++++--- omnigibson/robots/manipulation_robot.py | 4 ++-- omnigibson/robots/tiago.py | 2 +- omnigibson/simulator.py | 5 ++-- omnigibson/utils/backend_utils.py | 3 ++- omnigibson/utils/processing_utils.py | 2 +- omnigibson/utils/python_utils.py | 1 + omnigibson/utils/transform_utils.py | 1 - omnigibson/utils/usd_utils.py | 23 +++++++++++-------- tests/test_controllers.py | 2 +- 19 files changed, 76 insertions(+), 45 deletions(-) diff --git a/omnigibson/controllers/controller_base.py b/omnigibson/controllers/controller_base.py index 07157bad0..a863c49cf 100644 --- a/omnigibson/controllers/controller_base.py +++ b/omnigibson/controllers/controller_base.py @@ -3,8 +3,9 @@ from enum import IntEnum import torch as th -from omnigibson.utils.python_utils import Recreatable, Registerable, Serializable, assert_valid_key, classproperty + from omnigibson.utils.backend_utils import _compute_backend as cb +from omnigibson.utils.python_utils import Recreatable, Registerable, Serializable, assert_valid_key, classproperty # Global dicts that will contain mappings REGISTERED_CONTROLLERS = dict() @@ -34,9 +35,6 @@ class IsGraspingState(IntEnum): FALSE = -1 - - - # Define macros class ControlType: NONE = -1 @@ -453,7 +451,7 @@ def nums2array(nums, dim): return ( nums if isinstance(nums, cb.arr_type) - else cb.array(nums)if isinstance(nums, Iterable) else cb.ones(dim) * nums + else cb.array(nums) if isinstance(nums, Iterable) else cb.ones(dim) * nums ) @property diff --git a/omnigibson/controllers/ik_controller.py b/omnigibson/controllers/ik_controller.py index b4a8502a4..1983f846c 100644 --- a/omnigibson/controllers/ik_controller.py +++ b/omnigibson/controllers/ik_controller.py @@ -1,10 +1,10 @@ import math from collections.abc import Iterable -from omnigibson.utils.backend_utils import _ComputeBackend, _ComputeTorchBackend, _ComputeNumpyBackend -from omnigibson.utils.backend_utils import _compute_backend as cb from omnigibson.controllers import ControlType, ManipulationController from omnigibson.controllers.joint_controller import JointController +from omnigibson.utils.backend_utils import _compute_backend as cb +from omnigibson.utils.backend_utils import _ComputeBackend, _ComputeNumpyBackend, _ComputeTorchBackend from omnigibson.utils.processing_utils import MovingAverageFilter from omnigibson.utils.ui_utils import create_module_logger @@ -220,7 +220,11 @@ def serialize(self, state): return th.cat( [ state_flat, - th.tensor([]) if self.control_filter is None else self.control_filter.serialize(state=state["control_filter"]), + ( + th.tensor([]) + if self.control_filter is None + else self.control_filter.serialize(state=state["control_filter"]) + ), ] ) @@ -368,7 +372,10 @@ def command_dim(self): import torch as th + import omnigibson.utils.transform_utils as TT + + @th.jit.script def _compute_ik_qpos_torch( q: th.Tensor, @@ -400,7 +407,10 @@ def _compute_ik_qpos_torch( import numpy as np from numba import jit + import omnigibson.utils.transform_utils_np as NT + + # Use numba since faster @jit(nopython=True) def _compute_ik_qpos_numpy( @@ -432,4 +442,3 @@ def _compute_ik_qpos_numpy( setattr(_ComputeBackend, "compute_ik_qpos", None) setattr(_ComputeTorchBackend, "compute_ik_qpos", _compute_ik_qpos_torch) setattr(_ComputeNumpyBackend, "compute_ik_qpos", _compute_ik_qpos_numpy) - diff --git a/omnigibson/controllers/joint_controller.py b/omnigibson/controllers/joint_controller.py index c11eaaf64..39dcb9a65 100644 --- a/omnigibson/controllers/joint_controller.py +++ b/omnigibson/controllers/joint_controller.py @@ -1,7 +1,5 @@ import math -from omnigibson.utils.backend_utils import _ComputeBackend, _ComputeTorchBackend, _ComputeNumpyBackend -from omnigibson.utils.backend_utils import _compute_backend as cb from omnigibson.controllers import ( ControlType, GripperController, @@ -10,6 +8,8 @@ ManipulationController, ) from omnigibson.macros import create_module_macros +from omnigibson.utils.backend_utils import _compute_backend as cb +from omnigibson.utils.backend_utils import _ComputeBackend, _ComputeNumpyBackend, _ComputeTorchBackend from omnigibson.utils.python_utils import assert_valid_key from omnigibson.utils.ui_utils import create_module_logger @@ -286,6 +286,8 @@ def command_dim(self): import torch as th + + @th.compile def _compute_joint_torques_torch( u: th.Tensor, @@ -296,9 +298,10 @@ def _compute_joint_torques_torch( return mm[dof_idxs_mat] @ u - import numpy as np from numba import jit + + # Use numba since faster @jit(nopython=True) def numba_ix(arr, rows, cols): @@ -316,7 +319,7 @@ def numba_ix(arr, rows, cols): one_d_index = np.zeros(len(rows) * len(cols), dtype=np.int32) for i, r in enumerate(rows): start = i * len(cols) - one_d_index[start: start + len(cols)] = cols + arr.shape[1] * r + one_d_index[start : start + len(cols)] = cols + arr.shape[1] * r arr_1d = arr.reshape((arr.shape[0] * arr.shape[1], 1)) slice_1d = np.take(arr_1d, one_d_index) @@ -336,4 +339,3 @@ def _compute_joint_torques_numpy( setattr(_ComputeBackend, "compute_joint_torques", None) setattr(_ComputeTorchBackend, "compute_joint_torques", _compute_joint_torques_torch) setattr(_ComputeNumpyBackend, "compute_joint_torques", _compute_joint_torques_numpy) - diff --git a/omnigibson/controllers/multi_finger_gripper_controller.py b/omnigibson/controllers/multi_finger_gripper_controller.py index 933721b05..864eb24d4 100644 --- a/omnigibson/controllers/multi_finger_gripper_controller.py +++ b/omnigibson/controllers/multi_finger_gripper_controller.py @@ -1,6 +1,6 @@ -from omnigibson.utils.backend_utils import _compute_backend as cb from omnigibson.controllers import ControlType, GripperController, IsGraspingState from omnigibson.macros import create_module_macros +from omnigibson.utils.backend_utils import _compute_backend as cb from omnigibson.utils.python_utils import assert_valid_key VALID_MODES = { @@ -254,7 +254,9 @@ def _update_grasping_state(self, control_dict): dist_from_upper_limit = max_pos - finger_pos # If either of the joint positions are not near the joint limits with some tolerance (m.POS_TOLERANCE) - valid_grasp_pos = dist_from_lower_limit.mean() > m.POS_TOLERANCE or dist_from_upper_limit.mean() > m.POS_TOLERANCE + valid_grasp_pos = ( + dist_from_lower_limit.mean() > m.POS_TOLERANCE or dist_from_upper_limit.mean() > m.POS_TOLERANCE + ) # And the joint velocities are close to zero with some tolerance (m.VEL_TOLERANCE) valid_grasp_vel = cb.all(cb.abs(finger_vel) < m.VEL_TOLERANCE) diff --git a/omnigibson/controllers/null_joint_controller.py b/omnigibson/controllers/null_joint_controller.py index 83782c7db..42bfd47d6 100644 --- a/omnigibson/controllers/null_joint_controller.py +++ b/omnigibson/controllers/null_joint_controller.py @@ -1,5 +1,5 @@ -from omnigibson.utils.backend_utils import _compute_backend as cb from omnigibson.controllers import JointController +from omnigibson.utils.backend_utils import _compute_backend as cb class NullJointController(JointController): diff --git a/omnigibson/controllers/osc_controller.py b/omnigibson/controllers/osc_controller.py index 53c396a1d..032e501f0 100644 --- a/omnigibson/controllers/osc_controller.py +++ b/omnigibson/controllers/osc_controller.py @@ -1,8 +1,8 @@ import math -from omnigibson.utils.backend_utils import _ComputeBackend, _ComputeTorchBackend, _ComputeNumpyBackend -from omnigibson.utils.backend_utils import _compute_backend as cb from omnigibson.controllers import ControlType, ManipulationController +from omnigibson.utils.backend_utils import _compute_backend as cb +from omnigibson.utils.backend_utils import _ComputeBackend, _ComputeNumpyBackend, _ComputeTorchBackend from omnigibson.utils.processing_utils import MovingAverageFilter from omnigibson.utils.python_utils import assert_valid_key from omnigibson.utils.ui_utils import create_module_logger @@ -412,7 +412,11 @@ def compute_control(self, goal_dict, control_dict): ee_pos=cb.as_float32(ee_pos), ee_mat=cb.as_float32(cb.T.quat2mat(ee_quat)), ee_lin_vel=cb.as_float32(ee_vel[:3]), - ee_ang_vel_err=cb.as_float32(cb.T.quat2axisangle(cb.T.quat_multiply(cb.T.axisangle2quat(-ee_vel[3:]), cb.T.axisangle2quat(base_ang_vel)))), + ee_ang_vel_err=cb.as_float32( + cb.T.quat2axisangle( + cb.T.quat_multiply(cb.T.axisangle2quat(-ee_vel[3:]), cb.T.axisangle2quat(base_ang_vel)) + ) + ), goal_pos=goal_dict["target_pos"], goal_ori_mat=goal_dict["target_ori_mat"], kp=kp, @@ -486,7 +490,10 @@ def command_dim(self): import torch as th + import omnigibson.utils.transform_utils as TT + + @th.jit.script def _compute_osc_torques_torch( q: th.Tensor, @@ -564,7 +571,10 @@ def _compute_osc_torques_torch( import numpy as np from numba import jit + import omnigibson.utils.transform_utils_np as NT + + # Use numba since faster @jit(nopython=True) def _compute_osc_torques_numpy( diff --git a/omnigibson/objects/controllable_object.py b/omnigibson/objects/controllable_object.py index 60e19fa74..ad96d7f10 100644 --- a/omnigibson/objects/controllable_object.py +++ b/omnigibson/objects/controllable_object.py @@ -10,8 +10,8 @@ import omnigibson as og from omnigibson.controllers import create_controller from omnigibson.controllers.controller_base import ControlType -from omnigibson.utils.backend_utils import _compute_backend as cb from omnigibson.objects.object_base import BaseObject +from omnigibson.utils.backend_utils import _compute_backend as cb from omnigibson.utils.constants import JointType, PrimType from omnigibson.utils.numpy_utils import NumpyTypes from omnigibson.utils.python_utils import CachedFunctions, assert_valid_key, merge_nested_dicts diff --git a/omnigibson/robots/articulated_trunk_robot.py b/omnigibson/robots/articulated_trunk_robot.py index 7387d7a11..20a9e9ec1 100644 --- a/omnigibson/robots/articulated_trunk_robot.py +++ b/omnigibson/robots/articulated_trunk_robot.py @@ -1,6 +1,7 @@ -import torch as th from functools import cached_property +import torch as th + from omnigibson.robots.manipulation_robot import ManipulationRobot from omnigibson.utils.python_utils import classproperty from omnigibson.utils.usd_utils import ControllableObjectViewAPI diff --git a/omnigibson/robots/behavior_robot.py b/omnigibson/robots/behavior_robot.py index 7d3693412..b1eebd3de 100644 --- a/omnigibson/robots/behavior_robot.py +++ b/omnigibson/robots/behavior_robot.py @@ -1,9 +1,9 @@ import itertools -from functools import cached_property import math import os from abc import ABC from collections import OrderedDict +from functools import cached_property from typing import Iterable, List, Literal, Tuple import torch as th diff --git a/omnigibson/robots/holonomic_base_robot.py b/omnigibson/robots/holonomic_base_robot.py index fca8391f7..89abac3ee 100644 --- a/omnigibson/robots/holonomic_base_robot.py +++ b/omnigibson/robots/holonomic_base_robot.py @@ -6,10 +6,10 @@ import omnigibson as og import omnigibson.lazy as lazy -from omnigibson.utils.backend_utils import _compute_backend as cb from omnigibson.macros import create_module_macros from omnigibson.robots.locomotion_robot import LocomotionRobot from omnigibson.robots.manipulation_robot import ManipulationRobot +from omnigibson.utils.backend_utils import _compute_backend as cb from omnigibson.utils.python_utils import classproperty from omnigibson.utils.usd_utils import ControllableObjectViewAPI @@ -254,7 +254,9 @@ def set_position_orientation(self, position=None, orientation=None, frame: Liter joint_pos, joint_orn = cb.from_torch(joint_pos), cb.from_torch(joint_orn) inv_joint_pos, inv_joint_orn = cb.T.mat2pose(cb.T.pose_inv(cb.T.pose2mat((joint_pos, joint_orn)))) - relative_pos, relative_orn = cb.T.pose_transform(inv_joint_pos, inv_joint_orn, cb.from_torch(position), cb.from_torch(orientation)) + relative_pos, relative_orn = cb.T.pose_transform( + inv_joint_pos, inv_joint_orn, cb.from_torch(position), cb.from_torch(orientation) + ) relative_rpy = cb.T.quat2euler(relative_orn) self.joints["base_footprint_x_joint"].set_pos(relative_pos[0], drive=False) self.joints["base_footprint_y_joint"].set_pos(relative_pos[1], drive=False) @@ -321,7 +323,9 @@ def _postprocess_control(self, control, control_type): global_pose = cur_pose @ local_pose lin_vel_global, ang_vel_global = global_pose[0, :3, 3], global_pose[1, :3, 3] - u_vec[cb.from_torch(self.base_control_idx)] = cb.array([lin_vel_global[0], lin_vel_global[1], ang_vel_global[2]]) + u_vec[cb.from_torch(self.base_control_idx)] = cb.array( + [lin_vel_global[0], lin_vel_global[1], ang_vel_global[2]] + ) return u_vec, u_type_vec diff --git a/omnigibson/robots/manipulation_robot.py b/omnigibson/robots/manipulation_robot.py index 8fa160208..7f5cec354 100644 --- a/omnigibson/robots/manipulation_robot.py +++ b/omnigibson/robots/manipulation_robot.py @@ -1,8 +1,8 @@ import math import os from abc import abstractmethod -from functools import cached_property from collections import namedtuple +from functools import cached_property from typing import Literal import networkx as nx @@ -10,7 +10,6 @@ import omnigibson as og import omnigibson.lazy as lazy -from omnigibson.utils.backend_utils import _compute_backend as cb import omnigibson.utils.transform_utils as T from omnigibson.controllers import ( ControlType, @@ -24,6 +23,7 @@ from omnigibson.macros import create_module_macros, gm from omnigibson.object_states import ContactBodies from omnigibson.robots.robot_base import BaseRobot +from omnigibson.utils.backend_utils import _compute_backend as cb from omnigibson.utils.constants import JointType, PrimType from omnigibson.utils.geometry_utils import generate_points_in_volume_checker_function from omnigibson.utils.python_utils import assert_valid_key, classproperty diff --git a/omnigibson/robots/tiago.py b/omnigibson/robots/tiago.py index 3b4ec586f..2cfa8876a 100644 --- a/omnigibson/robots/tiago.py +++ b/omnigibson/robots/tiago.py @@ -1,7 +1,7 @@ import math import os -from typing import Literal from functools import cached_property +from typing import Literal import torch as th diff --git a/omnigibson/simulator.py b/omnigibson/simulator.py index 183cb5926..165f22c96 100644 --- a/omnigibson/simulator.py +++ b/omnigibson/simulator.py @@ -242,10 +242,9 @@ def _launch_app(): # Set compute backend import omnigibson.utils.backend_utils as _backend_utils + _backend_utils._compute_backend.set_methods( - _backend_utils._ComputeNumpyBackend - if gm.USE_NUMPY_CONTROLLER_BACKEND - else _backend_utils._ComputeTorchBackend + _backend_utils._ComputeNumpyBackend if gm.USE_NUMPY_CONTROLLER_BACKEND else _backend_utils._ComputeTorchBackend ) return app diff --git a/omnigibson/utils/backend_utils.py b/omnigibson/utils/backend_utils.py index ad93563fe..879d2771d 100644 --- a/omnigibson/utils/backend_utils.py +++ b/omnigibson/utils/backend_utils.py @@ -1,5 +1,6 @@ -import torch as th import numpy as np +import torch as th + import omnigibson.utils.transform_utils as TT import omnigibson.utils.transform_utils_np as NT from omnigibson.utils.python_utils import recursively_convert_from_torch diff --git a/omnigibson/utils/processing_utils.py b/omnigibson/utils/processing_utils.py index 6d6045626..0aa341c1d 100644 --- a/omnigibson/utils/processing_utils.py +++ b/omnigibson/utils/processing_utils.py @@ -1,7 +1,7 @@ import torch as th -from omnigibson.utils.python_utils import Serializable from omnigibson.utils.backend_utils import _compute_backend as cb +from omnigibson.utils.python_utils import Serializable class Filter(Serializable): diff --git a/omnigibson/utils/python_utils.py b/omnigibson/utils/python_utils.py index f9c4c8a8a..9f1e96bbe 100644 --- a/omnigibson/utils/python_utils.py +++ b/omnigibson/utils/python_utils.py @@ -786,6 +786,7 @@ def recursively_convert_to_torch(state): def recursively_convert_from_torch(state): # For all the lists in state dict, convert from torch tensor -> numpy array import numpy as np + for key, value in state.items(): if isinstance(value, dict): state[key] = recursively_convert_from_torch(value) diff --git a/omnigibson/utils/transform_utils.py b/omnigibson/utils/transform_utils.py index be82d8025..c2301bb61 100644 --- a/omnigibson/utils/transform_utils.py +++ b/omnigibson/utils/transform_utils.py @@ -1402,4 +1402,3 @@ def orientation_error(desired, current): error = 0.5 * (torch.linalg.cross(rc1, rd1) + torch.linalg.cross(rc2, rd2) + torch.linalg.cross(rc3, rd3)) return error.reshape(desired.shape[:-2] + (3,)) - diff --git a/omnigibson/utils/usd_utils.py b/omnigibson/utils/usd_utils.py index b02af97ba..04e7fc651 100644 --- a/omnigibson/utils/usd_utils.py +++ b/omnigibson/utils/usd_utils.py @@ -6,16 +6,16 @@ from collections.abc import Iterable import numpy as np -from numba import jit, prange import torch as th import trimesh +from numba import jit, prange import omnigibson as og import omnigibson.lazy as lazy -from omnigibson.utils.backend_utils import _ComputeBackend, _ComputeTorchBackend, _ComputeNumpyBackend -from omnigibson.utils.backend_utils import _compute_backend as cb import omnigibson.utils.transform_utils as T from omnigibson.macros import gm +from omnigibson.utils.backend_utils import _compute_backend as cb +from omnigibson.utils.backend_utils import _ComputeBackend, _ComputeNumpyBackend, _ComputeTorchBackend from omnigibson.utils.constants import PRIMITIVE_MESH_TYPES, JointType, PrimType from omnigibson.utils.numpy_utils import vtarray_to_torch from omnigibson.utils.python_utils import assert_valid_key @@ -1092,7 +1092,9 @@ def get_generalized_gravity_forces(self, prim_path): def get_coriolis_and_centrifugal_forces(self, prim_path): if "coriolis_and_centrifugal_forces" not in self._read_cache: - self._read_cache["coriolis_and_centrifugal_forces"] = cb.from_torch(self._view.get_coriolis_and_centrifugal_forces()) + self._read_cache["coriolis_and_centrifugal_forces"] = cb.from_torch( + self._view.get_coriolis_and_centrifugal_forces() + ) idx = self._idx[prim_path] return self._read_cache["coriolis_and_centrifugal_forces"][idx] @@ -1875,12 +1877,14 @@ def delete_or_deactivate_prim(prim_path): import omnigibson.utils.transform_utils as TT + + @th.compile def _compute_relative_poses_torch( - idx: int, - n_links: int, - all_tfs: th.Tensor, - base_pose: th.Tensor, + idx: int, + n_links: int, + all_tfs: th.Tensor, + base_pose: th.Tensor, ): tfs = th.zeros((n_links, 4, 4), dtype=th.float32) # base vel is the final -1 index @@ -1903,6 +1907,8 @@ def _compute_relative_poses_torch( import omnigibson.utils.transform_utils_np as NT + + @jit(nopython=True) def _compute_relative_poses_numpy(idx, n_links, all_tfs, base_pose): tfs = np.zeros((n_links, 4, 4), dtype=np.float32) @@ -1933,4 +1939,3 @@ def _compute_relative_poses_numpy(idx, n_links, all_tfs, base_pose): setattr(_ComputeBackend, "compute_relative_poses", None) setattr(_ComputeTorchBackend, "compute_relative_poses", _compute_relative_poses_torch) setattr(_ComputeNumpyBackend, "compute_relative_poses", _compute_relative_poses_numpy) - diff --git a/tests/test_controllers.py b/tests/test_controllers.py index 5b2fda7ff..a25c9d1ba 100644 --- a/tests/test_controllers.py +++ b/tests/test_controllers.py @@ -4,8 +4,8 @@ import omnigibson as og import omnigibson.utils.transform_utils as T -from omnigibson.utils.backend_utils import _compute_backend as cb from omnigibson.robots import LocomotionRobot +from omnigibson.utils.backend_utils import _compute_backend as cb def test_arm_control(): From 5121aa587a1cccf2919858a7ca3fbb78c9b4a7e7 Mon Sep 17 00:00:00 2001 From: cremebrule <84cremebrule@gmail.com> Date: Wed, 11 Dec 2024 19:08:26 -0800 Subject: [PATCH 17/38] ensure actions and control dict is handled properly externally --- .../starter_semantic_action_primitives.py | 7 ++++--- omnigibson/controllers/controller_base.py | 2 +- 2 files changed, 5 insertions(+), 4 deletions(-) diff --git a/omnigibson/action_primitives/starter_semantic_action_primitives.py b/omnigibson/action_primitives/starter_semantic_action_primitives.py index d9c5e9ef5..898769da6 100644 --- a/omnigibson/action_primitives/starter_semantic_action_primitives.py +++ b/omnigibson/action_primitives/starter_semantic_action_primitives.py @@ -36,6 +36,7 @@ from omnigibson.robots.locomotion_robot import LocomotionRobot from omnigibson.robots.manipulation_robot import ManipulationRobot from omnigibson.tasks.behavior_task import BehaviorTask +from omnigibson.utils.backend_utils import _compute_backend as cb from omnigibson.utils.control_utils import FKSolver, IKSolver from omnigibson.utils.grasping_planning_utils import get_grasp_poses_for_object_sticky, get_grasp_position_for_open from omnigibson.utils.motion_planning_utils import ( @@ -336,13 +337,13 @@ def __init__( arm = f"arm_{arm_name}" arm_ctrl = self.robot.controllers[arm] if isinstance(arm_ctrl, InverseKinematicsController): - pos_relative = control_dict[f"{eef}_pos_relative"] - quat_relative = control_dict[f"{eef}_quat_relative"] + pos_relative = cb.to_torch(control_dict[f"{eef}_pos_relative"]) + quat_relative = cb.to_torch(control_dict[f"{eef}_quat_relative"]) quat_relative_axis_angle = T.quat2axisangle(quat_relative) self._arm_targets[arm] = (pos_relative, quat_relative_axis_angle) else: - arm_target = control_dict["joint_position"][arm_ctrl.dof_idx] + arm_target = cb.to_torch(control_dict["joint_position"])[arm_ctrl.dof_idx] self._arm_targets[arm] = arm_target self.robot_copy = self._load_robot_copy() diff --git a/omnigibson/controllers/controller_base.py b/omnigibson/controllers/controller_base.py index 07157bad0..d285a033a 100644 --- a/omnigibson/controllers/controller_base.py +++ b/omnigibson/controllers/controller_base.py @@ -367,7 +367,7 @@ def compute_no_op_action(self, control_dict): if self._goal is None: self._goal = self.compute_no_op_goal(control_dict=control_dict) command = self._compute_no_op_action(control_dict=control_dict) - return self._reverse_preprocess_command(command) + return cb.to_torch(self._reverse_preprocess_command(command)) def _compute_no_op_action(self, control_dict): """ From 8d6f25f57c410d9c679a6a408f875bc8deb3fa1a Mon Sep 17 00:00:00 2001 From: cremebrule <84cremebrule@gmail.com> Date: Wed, 11 Dec 2024 19:36:42 -0800 Subject: [PATCH 18/38] fix control torch reference --- omnigibson/robots/manipulation_robot.py | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) diff --git a/omnigibson/robots/manipulation_robot.py b/omnigibson/robots/manipulation_robot.py index 7f5cec354..b92853df7 100644 --- a/omnigibson/robots/manipulation_robot.py +++ b/omnigibson/robots/manipulation_robot.py @@ -1315,23 +1315,24 @@ def _handle_assisted_grasping(self): # a zero action will actually keep the AG setting where it already is. controller = self._controllers[f"gripper_{arm}"] controlled_joints = controller.dof_idx + control = cb.to_torch(controller.control) threshold = th.mean( th.stack([self.joint_lower_limits[controlled_joints], self.joint_upper_limits[controlled_joints]]), dim=0, ) - if controller.control is None: + if control is None: applying_grasp = False elif self._grasping_direction == "lower": applying_grasp = ( - th.any(controller.control < threshold) + th.any(control < threshold) if controller.control_type == ControlType.POSITION - else th.any(controller.control < 0) + else th.any(control < 0) ) else: applying_grasp = ( - th.any(controller.control > threshold) + th.any(control > threshold) if controller.control_type == ControlType.POSITION - else th.any(controller.control > 0) + else th.any(control > 0) ) # Execute gradual release of object if self._ag_obj_in_hand[arm]: From 62d08b9b57f5515a17223e9f1ad0333b9eea17fd Mon Sep 17 00:00:00 2001 From: cremebrule <84cremebrule@gmail.com> Date: Thu, 12 Dec 2024 11:01:39 -0800 Subject: [PATCH 19/38] use compute backend for filter product --- omnigibson/utils/processing_utils.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/omnigibson/utils/processing_utils.py b/omnigibson/utils/processing_utils.py index c47621317..bd81ceba8 100644 --- a/omnigibson/utils/processing_utils.py +++ b/omnigibson/utils/processing_utils.py @@ -109,7 +109,7 @@ def reset(self): @property def state_size(self): # This is the size of the internal buffer plus the current index and fully filled single values - return th.prod(self.past_samples.shape) + 2 + return cb.prod(self.past_samples.shape) + 2 def _dump_state(self): # Run super init first From 5d672a6a9d3682b421fd3d603a92667b85c06b02 Mon Sep 17 00:00:00 2001 From: cremebrule <84cremebrule@gmail.com> Date: Thu, 12 Dec 2024 12:05:14 -0800 Subject: [PATCH 20/38] use 0-dim command for null joint controller --- omnigibson/controllers/null_joint_controller.py | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/omnigibson/controllers/null_joint_controller.py b/omnigibson/controllers/null_joint_controller.py index 42bfd47d6..e4afe1f29 100644 --- a/omnigibson/controllers/null_joint_controller.py +++ b/omnigibson/controllers/null_joint_controller.py @@ -95,3 +95,12 @@ def update_default_goal(self, target): ), f"Default control must be length: {self.control_dim}, got length: {len(target)}" self._default_command = cb.array(target) + + def _compute_no_op_action(self, control_dict): + # Empty tensor since no action should be received + return cb.array([]) + + @property + def command_dim(self): + # Auto-generates control, so no command should be received + return 0 From 0fca8d69aeefa919c37b2e53052caa71569944f0 Mon Sep 17 00:00:00 2001 From: cremebrule <84cremebrule@gmail.com> Date: Thu, 12 Dec 2024 13:54:20 -0800 Subject: [PATCH 21/38] improve curobo test determinism --- tests/test_curobo.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/tests/test_curobo.py b/tests/test_curobo.py index 4161a1e8a..b5b0ef606 100644 --- a/tests/test_curobo.py +++ b/tests/test_curobo.py @@ -361,6 +361,10 @@ def test_curobo(): print(f"Planning for {len(target_pos[robot.eef_link_names[robot.default_arm]])} eef targets...") + # Make sure robot is kept still for better determinism before planning + robot.keep_still() + og.sim.step_physics() + # Generate collision-free trajectories to the sampled eef poses (including self-collisions) successes, traj_paths = cmg.compute_trajectories( target_pos=target_pos, From c83772d5c79bbba3626dd39c521a83efc6733f5b Mon Sep 17 00:00:00 2001 From: cremebrule <84cremebrule@gmail.com> Date: Thu, 12 Dec 2024 14:22:18 -0800 Subject: [PATCH 22/38] make collision activation settable for better reproducability in curobo unit test --- omnigibson/action_primitives/curobo.py | 6 +++++- tests/test_curobo.py | 1 + 2 files changed, 6 insertions(+), 1 deletion(-) diff --git a/omnigibson/action_primitives/curobo.py b/omnigibson/action_primitives/curobo.py index c203bd17d..3450c98a4 100644 --- a/omnigibson/action_primitives/curobo.py +++ b/omnigibson/action_primitives/curobo.py @@ -150,6 +150,7 @@ def __init__( motion_cfg_kwargs=None, batch_size=2, use_cuda_graph=True, + collision_activation_distance=0.005, debug=False, use_default_embodiment_only=False, ): @@ -165,6 +166,9 @@ def __init__( MotionGenConfig.load_from_robot_config(...) batch_size (int): Size of batches for computing trajectories. This must be FIXED use_cuda_graph (bool): Whether to use CUDA graph for motion generation or not + collision_activation_distance (float): Distance threshold at which a collision is detected. + Increasing this value will make the motion planner more conservative in its planning with respect + to the underlying sphere representation of the robot debug (bool): Whether to debug generation or not, setting this True will set use_cuda_graph to False implicitly use_default_embodiment_only (bool): Whether to use only the default embodiment for the robot or not """ @@ -227,7 +231,7 @@ def __init__( num_trajopt_seeds=4, num_graph_seeds=4, interpolation_dt=0.03, - collision_activation_distance=0.005, + collision_activation_distance=collision_activation_distance, self_collision_check=True, maximum_trajectory_dt=None, fixed_iters_trajopt=True, diff --git a/tests/test_curobo.py b/tests/test_curobo.py index b5b0ef606..e20f77505 100644 --- a/tests/test_curobo.py +++ b/tests/test_curobo.py @@ -236,6 +236,7 @@ def test_curobo(): batch_size=batch_size, debug=False, use_cuda_graph=True, + collision_activation_distance=0.01, # Use larger activation distance for better reproducibility use_default_embodiment_only=True, ) From 11b9876c28328aa67a49b691a78476e3368e23ad Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Thu, 12 Dec 2024 22:22:44 +0000 Subject: [PATCH 23/38] [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --- tests/test_curobo.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/test_curobo.py b/tests/test_curobo.py index e20f77505..caf230df4 100644 --- a/tests/test_curobo.py +++ b/tests/test_curobo.py @@ -236,7 +236,7 @@ def test_curobo(): batch_size=batch_size, debug=False, use_cuda_graph=True, - collision_activation_distance=0.01, # Use larger activation distance for better reproducibility + collision_activation_distance=0.01, # Use larger activation distance for better reproducibility use_default_embodiment_only=True, ) From b60600179a8f8add37a3aa8e76ad50fcd70f6212 Mon Sep 17 00:00:00 2001 From: cremebrule <84cremebrule@gmail.com> Date: Thu, 12 Dec 2024 15:30:18 -0800 Subject: [PATCH 24/38] fix IK controller arg parsing --- omnigibson/controllers/ik_controller.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/omnigibson/controllers/ik_controller.py b/omnigibson/controllers/ik_controller.py index 1983f846c..1059fa082 100644 --- a/omnigibson/controllers/ik_controller.py +++ b/omnigibson/controllers/ik_controller.py @@ -152,7 +152,10 @@ def limiter(target_pos: Array[float], target_quat: Array[float], control_dict: D command_input_limits[1][3:] = cb.array([math.pi] * len(command_input_limits[1][3:])) if command_output_limits is not None: if not isinstance(command_output_limits, str) and isinstance(command_output_limits, Iterable): - command_output_limits = cb.array(command_output_limits) + command_output_limits = [ + cb.array(command_output_limits[0]), + cb.array(command_output_limits[1]), + ] if type(command_output_limits) == str and command_output_limits == "default": command_output_limits = [ cb.array([-1.0, -1.0, -1.0, -math.pi, -math.pi, -math.pi]), From 474aa0dd5b4f8fe35294e5ff5b10f1ce386bd9b6 Mon Sep 17 00:00:00 2001 From: cremebrule <84cremebrule@gmail.com> Date: Thu, 12 Dec 2024 15:33:13 -0800 Subject: [PATCH 25/38] update comments external -> custom dataset --- omnigibson/examples/objects/import_custom_object.py | 4 ++-- omnigibson/utils/asset_conversion_utils.py | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/omnigibson/examples/objects/import_custom_object.py b/omnigibson/examples/objects/import_custom_object.py index 688b95d29..959d80281 100644 --- a/omnigibson/examples/objects/import_custom_object.py +++ b/omnigibson/examples/objects/import_custom_object.py @@ -61,8 +61,8 @@ def import_custom_object( overwrite: bool, ): """ - Imports an externally-defined object asset into an OmniGibson-compatible USD format and saves the imported asset - files to the external dataset directory (gm.CUSTOM_DATASET_PATH) + Imports a custom-defined object asset into an OmniGibson-compatible USD format and saves the imported asset + files to the custom dataset directory (gm.CUSTOM_DATASET_PATH) """ assert len(model) == 6 and model.isalpha(), "Model name must be 6 characters long and contain only letters." collision_method = None if collision_method == "none" else collision_method diff --git a/omnigibson/utils/asset_conversion_utils.py b/omnigibson/utils/asset_conversion_utils.py index 85a5bc2ef..dac1fb5f6 100644 --- a/omnigibson/utils/asset_conversion_utils.py +++ b/omnigibson/utils/asset_conversion_utils.py @@ -2308,7 +2308,7 @@ def import_og_asset_from_urdf( decomposition applied. This will only use the convex hull visual_only_links (None or list of str): If specified, links that should have no colliders associated with it merge_fixed_joints (bool): Whether to merge fixed joints or not - dataset_root (str): Dataset root directory to use for writing imported USD file. Default is external dataset + dataset_root (str): Dataset root directory to use for writing imported USD file. Default is custom dataset path set from the global macros hull_count (int): Maximum number of convex hulls to decompose individual visual meshes into. Only relevant if @collision_method is "coacd" From 04c37c5faad69b311522e0c51cf447baa522b432 Mon Sep 17 00:00:00 2001 From: cremebrule <84cremebrule@gmail.com> Date: Thu, 12 Dec 2024 15:48:31 -0800 Subject: [PATCH 26/38] update version and remove extraneous np import --- omnigibson/utils/config_utils.py | 1 - setup.py | 2 +- 2 files changed, 1 insertion(+), 2 deletions(-) diff --git a/omnigibson/utils/config_utils.py b/omnigibson/utils/config_utils.py index 82f9b1e91..dd467c382 100644 --- a/omnigibson/utils/config_utils.py +++ b/omnigibson/utils/config_utils.py @@ -2,7 +2,6 @@ import json import os -import numpy as np import torch as th import yaml diff --git a/setup.py b/setup.py index fee68fb0e..56564cb50 100644 --- a/setup.py +++ b/setup.py @@ -49,7 +49,7 @@ "matplotlib>=3.0.0", "lxml>=5.3.0", "numba>=0.60.0", - "cffi~=1.16.0", + "cffi>=1.16.0", "pillow~=10.2.0", ], extras_require={ From e31c12a14f71c091d2280de12b9251112b2eeb20 Mon Sep 17 00:00:00 2001 From: cremebrule <84cremebrule@gmail.com> Date: Fri, 13 Dec 2024 10:47:01 -0800 Subject: [PATCH 27/38] make urdf paths relative during custom asset import --- omnigibson/utils/asset_conversion_utils.py | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/omnigibson/utils/asset_conversion_utils.py b/omnigibson/utils/asset_conversion_utils.py index dac1fb5f6..ed49e3130 100644 --- a/omnigibson/utils/asset_conversion_utils.py +++ b/omnigibson/utils/asset_conversion_utils.py @@ -1914,6 +1914,7 @@ def get_collision_approximation_for_urdf( during collision generation process """ # Load URDF + urdf_dir = os.path.dirname(urdf_path) tree = ET.parse(urdf_path) root = tree.getroot() @@ -1923,7 +1924,8 @@ def get_collision_approximation_for_urdf( no_decompose_links = set() if no_decompose_links is None else set(no_decompose_links) visual_only_links = set() if visual_only_links is None else set(visual_only_links) ignore_links = set() if ignore_links is None else set(ignore_links) - col_mesh_folder = pathlib.Path(os.path.dirname(urdf_path)) / "meshes" / "collision" + col_mesh_rel_folder = "meshes/collision" + col_mesh_folder = pathlib.Path(urdf_dir) / col_mesh_rel_folder col_mesh_folder.mkdir(exist_ok=True, parents=True) for link in root.findall("link"): link_name = link.attrib["name"] @@ -1996,7 +1998,7 @@ def get_collision_approximation_for_urdf( collision_geometry_xml = ET.SubElement(collision_xml, "geometry") collision_mesh_xml = ET.SubElement(collision_geometry_xml, "mesh") collision_mesh_xml.attrib = { - "filename": str(col_mesh_folder / collision_filename), + "filename": os.path.join(col_mesh_rel_folder, collision_filename), "scale": " ".join([str(item) for item in collision_scale]), } From 6275ab9087d7201ad336d739e67206749b6e414b Mon Sep 17 00:00:00 2001 From: cremebrule <84cremebrule@gmail.com> Date: Fri, 13 Dec 2024 11:34:20 -0800 Subject: [PATCH 28/38] handle vision sensor cleanup properly --- omnigibson/sensors/vision_sensor.py | 21 ++++++++++++++++----- 1 file changed, 16 insertions(+), 5 deletions(-) diff --git a/omnigibson/sensors/vision_sensor.py b/omnigibson/sensors/vision_sensor.py index 1ebdc2729..e4902b68b 100644 --- a/omnigibson/sensors/vision_sensor.py +++ b/omnigibson/sensors/vision_sensor.py @@ -575,16 +575,28 @@ def _remove_modality_from_backend(self, modality): modality (str): Name of the modality to remove from the Replicator backend """ if self._annotators.get(modality, None) is not None: - self._annotators[modality].detach([self._render_product]) + # Passing an explicit list is bugged -- see omni source code + # So we only pass in the product directly, which gets post-processed correctly + self._annotators[modality].detach(self._render_product) self._annotators[modality] = None def remove(self): # Remove from global sensors dictionary self.SENSORS.pop(self.prim_path) + # Remove all modalities + for modality in tuple(self.modalities): + self.remove_modality(modality) + + # Destroy the render product + self._render_product.destroy() + # Remove the viewport if it's not the main viewport if self._viewport.name != "Viewport": self._viewport.destroy() + else: + # We're deleting our camera, so set the normal viewport camera to the default /Perspective camera + self.active_camera_path = "/OmniverseKit_Persp" # Run super super().remove() @@ -882,10 +894,9 @@ def clear(cls): """ Clear all the class-wide variables. """ - for sensor in cls.SENSORS.values(): - # Destroy any sensor that is not attached to the main viewport window - if sensor._viewport.name != "Viewport": - sensor._viewport.destroy() + # Remove all sensors + for sensor in tuple(cls.SENSORS.values()): + sensor.remove() # Render to update render() From 42e1d9d4a5c37bcf3ec155b29de0aee06300c040 Mon Sep 17 00:00:00 2001 From: cremebrule <84cremebrule@gmail.com> Date: Fri, 13 Dec 2024 16:48:48 -0800 Subject: [PATCH 29/38] fix default command output behavior for joint controllers --- omnigibson/controllers/joint_controller.py | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/omnigibson/controllers/joint_controller.py b/omnigibson/controllers/joint_controller.py index 39dcb9a65..9dcb7ea33 100644 --- a/omnigibson/controllers/joint_controller.py +++ b/omnigibson/controllers/joint_controller.py @@ -139,6 +139,14 @@ def __init__( command_output_limits=command_output_limits, ) + def _generate_default_command_output_limits(self): + # Use motor type instead of default control type, since, e.g, use_impedances is commanding joint positions + # but controls low-level efforts + return ( + self._control_limits[ControlType.get_type(self._motor_type)][0][self.dof_idx], + self._control_limits[ControlType.get_type(self._motor_type)][1][self.dof_idx], + ) + def _update_goal(self, command, control_dict): # If we're using delta commands, add this value if self._use_delta_commands: From 2f856aac376334ab38cd9d9a7279d5ecdefd2364 Mon Sep 17 00:00:00 2001 From: cremebrule <84cremebrule@gmail.com> Date: Thu, 19 Dec 2024 11:22:01 -0800 Subject: [PATCH 30/38] refactor compute backend to allow for explicit externally-defined functions --- omnigibson/controllers/ik_controller.py | 15 +++------ omnigibson/controllers/osc_controller.py | 8 ++--- omnigibson/simulator.py | 2 +- omnigibson/utils/backend_utils.py | 43 +++++++++++++++++++++++- 4 files changed, 51 insertions(+), 17 deletions(-) diff --git a/omnigibson/controllers/ik_controller.py b/omnigibson/controllers/ik_controller.py index 1059fa082..5a2b73b21 100644 --- a/omnigibson/controllers/ik_controller.py +++ b/omnigibson/controllers/ik_controller.py @@ -4,7 +4,7 @@ from omnigibson.controllers import ControlType, ManipulationController from omnigibson.controllers.joint_controller import JointController from omnigibson.utils.backend_utils import _compute_backend as cb -from omnigibson.utils.backend_utils import _ComputeBackend, _ComputeNumpyBackend, _ComputeTorchBackend +from omnigibson.utils.backend_utils import add_compute_function from omnigibson.utils.processing_utils import MovingAverageFilter from omnigibson.utils.ui_utils import create_module_logger @@ -312,7 +312,7 @@ def compute_control(self, goal_dict, control_dict): ee_quat = control_dict[f"{self.task_name}_quat_relative"] # Calculate desired joint positions - target_joint_pos = cb.compute_ik_qpos( + target_joint_pos = cb.get_custom_method("compute_ik_qpos")( q=q, j_eef=j_eef, ee_pos=cb.as_float32(ee_pos), @@ -332,13 +332,10 @@ def compute_control(self, goal_dict, control_dict): def compute_no_op_goal(self, control_dict): # No-op is maintaining current pose - target_pos = cb.copy(control_dict[f"{self.task_name}_pos_relative"]) - target_quat = cb.copy(control_dict[f"{self.task_name}_quat_relative"]) - # Convert quat into eef ori mat return dict( - target_pos=cb.as_float32(target_pos), - target_ori_mat=cb.as_float32(cb.T.quat2mat(target_quat)), + target_pos=cb.as_float32(control_dict[f"{self.task_name}_pos_relative"]), + target_ori_mat=cb.as_float32(cb.T.quat2mat(control_dict[f"{self.task_name}_quat_relative"])), ) def _compute_no_op_action(self, control_dict): @@ -442,6 +439,4 @@ def _compute_ik_qpos_numpy( # Set these as part of the backend values -setattr(_ComputeBackend, "compute_ik_qpos", None) -setattr(_ComputeTorchBackend, "compute_ik_qpos", _compute_ik_qpos_torch) -setattr(_ComputeNumpyBackend, "compute_ik_qpos", _compute_ik_qpos_numpy) +add_compute_function(name="compute_ik_qpos", np_function=_compute_ik_qpos_numpy, th_function=_compute_ik_qpos_torch) diff --git a/omnigibson/controllers/osc_controller.py b/omnigibson/controllers/osc_controller.py index 032e501f0..c97a4fbae 100644 --- a/omnigibson/controllers/osc_controller.py +++ b/omnigibson/controllers/osc_controller.py @@ -2,7 +2,7 @@ from omnigibson.controllers import ControlType, ManipulationController from omnigibson.utils.backend_utils import _compute_backend as cb -from omnigibson.utils.backend_utils import _ComputeBackend, _ComputeNumpyBackend, _ComputeTorchBackend +from omnigibson.utils.backend_utils import add_compute_function from omnigibson.utils.processing_utils import MovingAverageFilter from omnigibson.utils.python_utils import assert_valid_key from omnigibson.utils.ui_utils import create_module_logger @@ -404,7 +404,7 @@ def compute_control(self, goal_dict, control_dict): base_ang_vel = control_dict["root_rel_ang_vel"] # Calculate torques - u = cb.compute_osc_torques( + u = cb.get_custom_method("compute_osc_torques")( q=q, qd=qd, mm=mm, @@ -652,6 +652,4 @@ def _compute_osc_torques_numpy( # Set these as part of the backend values -setattr(_ComputeBackend, "compute_osc_torques", None) -setattr(_ComputeTorchBackend, "compute_osc_torques", _compute_osc_torques_torch) -setattr(_ComputeNumpyBackend, "compute_osc_torques", _compute_osc_torques_numpy) +add_compute_function(name="compute_osc_torques", np_function=_compute_osc_torques_numpy, th_function=_compute_osc_torques_torch) diff --git a/omnigibson/simulator.py b/omnigibson/simulator.py index 165f22c96..ebfc01508 100644 --- a/omnigibson/simulator.py +++ b/omnigibson/simulator.py @@ -243,7 +243,7 @@ def _launch_app(): # Set compute backend import omnigibson.utils.backend_utils as _backend_utils - _backend_utils._compute_backend.set_methods( + _backend_utils._compute_backend.set_methods_from_backend( _backend_utils._ComputeNumpyBackend if gm.USE_NUMPY_CONTROLLER_BACKEND else _backend_utils._ComputeTorchBackend ) diff --git a/omnigibson/utils/backend_utils.py b/omnigibson/utils/backend_utils.py index 879d2771d..43c4303a6 100644 --- a/omnigibson/utils/backend_utils.py +++ b/omnigibson/utils/backend_utils.py @@ -6,7 +6,25 @@ from omnigibson.utils.python_utils import recursively_convert_from_torch +# Global function for adding custom compute functions +def add_compute_function(name, np_function, th_function): + """ + Adds a custom compute function defined by @name + + Args: + name (str): name of the function to add + np_function (callable): numpy version of the function + th_function (callable): torch version of the function + """ + _ComputeNumpyBackend.set_custom_method(name, np_function) + _ComputeTorchBackend.set_custom_method(name, th_function) + + class _ComputeBackend: + + # Dictionary mapping custom externally-defined function name to function + _custom_fcns = None + array = None int_array = None prod = None @@ -39,7 +57,30 @@ class _ComputeBackend: T = None @classmethod - def set_methods(cls, backend): + def get_custom_method(cls, method_name): + """ + Gets a custom method from the internal backend + + Args: + method_name (str): Name of the method to get + """ + return cls._custom_fcns[method_name] + + @classmethod + def set_custom_method(cls, method_name, method): + """ + Sets a custom method to the internal backend + + Args: + method_name (str): Name of the method + method (callable): Custom method to add + """ + if cls._custom_fcns is None: + cls._custom_fcns = dict() + cls._custom_fcns[method_name] = method + + @classmethod + def set_methods_from_backend(cls, backend): for attr, fcn in backend.__dict__.items(): # Do not override reserved functions if attr.startswith("__"): From a2d35c9cc6f59acea135ba1b9c3bb938cb7a7dd2 Mon Sep 17 00:00:00 2001 From: cremebrule <84cremebrule@gmail.com> Date: Thu, 19 Dec 2024 11:22:26 -0800 Subject: [PATCH 31/38] fix default command output generation for multi finger controller --- .../controllers/multi_finger_gripper_controller.py | 14 +++++++++++--- 1 file changed, 11 insertions(+), 3 deletions(-) diff --git a/omnigibson/controllers/multi_finger_gripper_controller.py b/omnigibson/controllers/multi_finger_gripper_controller.py index ab888f2fe..1774e3c69 100644 --- a/omnigibson/controllers/multi_finger_gripper_controller.py +++ b/omnigibson/controllers/multi_finger_gripper_controller.py @@ -105,9 +105,9 @@ def __init__( # Create ring buffer for velocity history to avoid high frequency nosie during grasp state inference self._vel_filter = MovingAverageFilter(obs_dim=len(dof_idx), filter_width=5) - # If we're using binary signal, we override the command output limits + # If we're using binary signal, these values will be overridden manually, so set to default for now if mode == "binary": - command_output_limits = (-1.0, 1.0) + command_output_limits = "default" # Run super init super().__init__( @@ -119,9 +119,17 @@ def __init__( ) def _generate_default_command_output_limits(self): + # By default (independent mode), this is simply the super call command_output_limits = super()._generate_default_command_output_limits() - return cb.mean(command_output_limits[0]), cb.mean(command_output_limits[1]) + # If we're in binary mode, output limits should just be (-1.0, 1.0) + if self._mode == "binary": + command_output_limits = (-1.0, 1.0) + # If we're in smoothing mode, output limits should be the average of the independent limits + elif self._mode == "smoothing": + command_output_limits = cb.mean(command_output_limits[0]), cb.mean(command_output_limits[1]) + + return command_output_limits def reset(self): # Call super first From 475cbc78ada61da1a4b3b2d25321accf09975d46 Mon Sep 17 00:00:00 2001 From: cremebrule <84cremebrule@gmail.com> Date: Thu, 19 Dec 2024 11:22:36 -0800 Subject: [PATCH 32/38] fix semantics for null joint controller --- .../controllers/null_joint_controller.py | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/omnigibson/controllers/null_joint_controller.py b/omnigibson/controllers/null_joint_controller.py index e4afe1f29..3f76c7bad 100644 --- a/omnigibson/controllers/null_joint_controller.py +++ b/omnigibson/controllers/null_joint_controller.py @@ -17,7 +17,7 @@ def __init__( dof_idx, command_input_limits="default", command_output_limits="default", - default_command=None, + default_goal=None, pos_kp=None, pos_damping_ratio=None, vel_kp=None, @@ -45,8 +45,8 @@ def __init__( then all inputted command values will be scaled from the input range to the output range. If either is None, no scaling will be used. If "default", then this range will automatically be set to the @control_limits entry corresponding to self.control_type - default_command (None or n-array): if specified, should be the same length as @dof_idx, specifying - the default control for this controller to output + default_goal (None or n-array): if specified, should be the same length as @dof_idx, specifying + the default joint goal for this controller to converge to pos_kp (None or float): If @motor_type is "position" and @use_impedances=True, this is the proportional gain applied to the joint controller. If None, a default value will be used. pos_damping_ratio (None or float): If @motor_type is "position" and @use_impedances=True, this is the @@ -57,7 +57,7 @@ def __init__( applied """ # Store values - self._default_command = cb.zeros(len(dof_idx)) if default_command is None else default_command + self.default_goal = cb.zeros(len(dof_idx)) if default_goal is None else default_goal # Run super init super().__init__( @@ -76,11 +76,11 @@ def __init__( def compute_no_op_goal(self, control_dict): # Set the goal to be internal stored default value - return dict(target=self._default_command) + return dict(target=self.default_goal) def _preprocess_command(self, command): - # Override super and force the processed command to be internal stored default value - return cb.array(self._default_command) + # Override super and force the processed goal to be internal stored default value + return cb.array(self.default_goal) def update_default_goal(self, target): """ @@ -92,9 +92,9 @@ def update_default_goal(self, target): """ assert ( len(target) == self.control_dim - ), f"Default control must be length: {self.control_dim}, got length: {len(target)}" + ), f"Default goal must be length: {self.control_dim}, got length: {len(target)}" - self._default_command = cb.array(target) + self.default_goal = cb.array(target) def _compute_no_op_action(self, control_dict): # Empty tensor since no action should be received From 8faa0a901e19b60e123757d644a9a4260cf08476 Mon Sep 17 00:00:00 2001 From: cremebrule <84cremebrule@gmail.com> Date: Thu, 19 Dec 2024 12:04:16 -0800 Subject: [PATCH 33/38] verify that th and np transform utils have 1-to-1 corresponding functions --- omnigibson/utils/transform_utils.py | 117 ++---- omnigibson/utils/transform_utils_np.py | 546 +++++++++++-------------- omnigibson/utils/usd_utils.py | 8 +- 3 files changed, 276 insertions(+), 395 deletions(-) diff --git a/omnigibson/utils/transform_utils.py b/omnigibson/utils/transform_utils.py index c2301bb61..f9db861db 100644 --- a/omnigibson/utils/transform_utils.py +++ b/omnigibson/utils/transform_utils.py @@ -1,6 +1,8 @@ """ Utility functions of matrix and vector transformations. +NOTE: This file has a 1-to-1 correspondence to transform_utils_np.py + NOTE: convention for quaternions is (x, y, z, w) """ @@ -42,7 +44,7 @@ @torch.compile -def copysign(a, b): +def _copysign(a, b): # type: (float, torch.Tensor) -> torch.Tensor a = torch.tensor(a, device=b.device, dtype=torch.float).repeat(b.shape[0]) return torch.abs(a) * torch.sign(b) @@ -308,6 +310,32 @@ def quat_slerp(quat0, quat1, frac, shortestpath=True, eps=1.0e-15): return val.reshape(list(quat_shape)) +@torch.compile +def random_quaternion(num_quaternions: int = 1) -> torch.Tensor: + """ + Generate random rotation quaternions, uniformly distributed over SO(3). + + Arguments: + num_quaternions (int): number of quaternions to generate (default: 1) + + Returns: + torch.Tensor: A tensor of shape (num_quaternions, 4) containing random unit quaternions. + """ + # Generate four random numbers between 0 and 1 + rand = torch.rand(num_quaternions, 4) + + # Use the formula from Ken Shoemake's "Uniform Random Rotations" + r1 = torch.sqrt(1.0 - rand[:, 0]) + r2 = torch.sqrt(rand[:, 0]) + t1 = 2 * torch.pi * rand[:, 1] + t2 = 2 * torch.pi * rand[:, 2] + + quaternions = torch.stack([r1 * torch.sin(t1), r1 * torch.cos(t1), r2 * torch.sin(t2), r2 * torch.cos(t2)], dim=1) + + return quaternions + + + @torch.compile def random_axis_angle(angle_limit: float = 2.0 * math.pi): """ @@ -444,6 +472,20 @@ def mat2quat(rmat: torch.Tensor) -> torch.Tensor: return quat +def mat2quat_batch(rmat: torch.Tensor) -> torch.Tensor: + """ + Converts given rotation matrix to quaternion. Version optimized for batch operations + + Args: + rmat (torch.Tensor): (3, 3) or (..., 3, 3) rotation matrix + + Returns: + torch.Tensor: (4,) or (..., 4) (x,y,z,w) float quaternion angles + """ + # For torch, no different than basic version + return mat2quat(rmat) + + @torch.compile def mat2pose(hmat): """ @@ -547,7 +589,7 @@ def quat2euler(q): roll = torch.atan2(sinr_cosp, cosr_cosp) # pitch (y-axis rotation) sinp = 2.0 * (q[:, qw] * q[:, qy] - q[:, qz] * q[:, qx]) - pitch = torch.where(torch.abs(sinp) >= 1, copysign(math.pi / 2.0, sinp), torch.asin(sinp)) + pitch = torch.where(torch.abs(sinp) >= 1, _copysign(math.pi / 2.0, sinp), torch.asin(sinp)) # yaw (z-axis rotation) siny_cosp = 2.0 * (q[:, qw] * q[:, qz] + q[:, qx] * q[:, qy]) cosy_cosp = q[:, qw] * q[:, qw] + q[:, qx] * q[:, qx] - q[:, qy] * q[:, qy] - q[:, qz] * q[:, qz] @@ -940,31 +982,6 @@ def rotation_matrix(angle: float, direction: torch.Tensor) -> torch.Tensor: return R -@torch.compile -def transformation_matrix(angle: float, direction: torch.Tensor, point: Optional[torch.Tensor] = None) -> torch.Tensor: - """ - Returns a 4x4 homogeneous transformation matrix to rotate about axis defined by point and direction. - - Args: - angle (float): Magnitude of rotation in radians - direction (torch.Tensor): (ax,ay,az) axis about which to rotate - point (Optional[torch.Tensor]): If specified, is the (x,y,z) point about which the rotation will occur - - Returns: - torch.Tensor: 4x4 homogeneous transformation matrix - """ - R = rotation_matrix(angle, direction) - - M = torch.eye(4, dtype=torch.float32, device=direction.device) - M[:3, :3] = R - - if point is not None: - # Rotation not about origin - point = point.to(dtype=torch.float32) - M[:3, 3] = point - R @ point - return M - - @torch.compile def clip_translation(dpos, limit): """ @@ -1292,31 +1309,6 @@ def integer_spiral_coordinates(n: int) -> Tuple[int, int]: return int(x), int(y) -@torch.compile -def random_quaternion(num_quaternions: int = 1) -> torch.Tensor: - """ - Generate random rotation quaternions, uniformly distributed over SO(3). - - Arguments: - num_quaternions: int, number of quaternions to generate (default: 1) - - Returns: - torch.Tensor: A tensor of shape (num_quaternions, 4) containing random unit quaternions. - """ - # Generate four random numbers between 0 and 1 - rand = torch.rand(num_quaternions, 4) - - # Use the formula from Ken Shoemake's "Uniform Random Rotations" - r1 = torch.sqrt(1.0 - rand[:, 0]) - r2 = torch.sqrt(rand[:, 0]) - t1 = 2 * torch.pi * rand[:, 1] - t2 = 2 * torch.pi * rand[:, 2] - - quaternions = torch.stack([r1 * torch.sin(t1), r1 * torch.cos(t1), r2 * torch.sin(t2), r2 * torch.cos(t2)], dim=1) - - return quaternions - - @torch.compile def transform_points(points: torch.Tensor, matrix: torch.Tensor, translate: bool = True) -> torch.Tensor: """ @@ -1353,27 +1345,6 @@ def transform_points(points: torch.Tensor, matrix: torch.Tensor, translate: bool return torch.mm(matrix[:dim, :dim], points.t()).t() -@torch.compile -def quaternions_close(q1: torch.Tensor, q2: torch.Tensor, atol: float = 1e-3) -> bool: - """ - Whether two quaternions represent the same rotation, - allowing for the possibility that one is the negative of the other. - - Arguments: - q1: torch.Tensor - First quaternion - q2: torch.Tensor - Second quaternion - atol: float - Absolute tolerance for comparison - - Returns: - bool - Whether the quaternions are close - """ - return torch.allclose(q1, q2, atol=atol) or torch.allclose(q1, -q2, atol=atol) - - @torch.compile def orientation_error(desired, current): """ diff --git a/omnigibson/utils/transform_utils_np.py b/omnigibson/utils/transform_utils_np.py index 5c4997374..d0d67ea84 100644 --- a/omnigibson/utils/transform_utils_np.py +++ b/omnigibson/utils/transform_utils_np.py @@ -1,6 +1,9 @@ """ Utility functions of matrix and vector transformations. +NOTE: This file has a 1-to-1 correspondence to transform_utils.py. By default, we use scipy for most transform-related + operations, but we optionally implement numba versions for functions that are often called in "batch" mode + NOTE: convention for quaternions is (x, y, z, w) """ @@ -47,72 +50,90 @@ _TUPLE2AXES = dict((v, k) for k, v in _AXES2TUPLE.items()) -def ewma_vectorized(data, alpha, offset=None, dtype=None, order="C", out=None): - """ - Calculates the exponential moving average over a vector. - Will fail for large inputs. +def anorm(x, axis=None, keepdims=False): + """Compute L2 norms alogn specified axes.""" + return np.linalg.norm(x, axis=axis, keepdims=keepdims) - Args: - data (Iterable): Input data - alpha (float): scalar in range (0,1) - The alpha parameter for the moving average. - offset (None or float): If specified, the offset for the moving average. None defaults to data[0]. - dtype (None or type): Data type used for calculations. If None, defaults to float64 unless - data.dtype is float32, then it will use float32. - order (None or str): Order to use when flattening the data. Valid options are {'C', 'F', 'A'}. - None defaults to 'C'. - out (None or np.array): If specified, the location into which the result is stored. If provided, it must have - the same shape as the input. If not provided or `None`, - a freshly-allocated array is returned. - Returns: - np.array: Exponential moving average from @data +def normalize(v, axis=None, eps=1e-10): + """L2 Normalize along specified axes.""" + norm = anorm(v, axis=axis, keepdims=True) + return v / np.where(norm < eps, eps, norm) + + +def dot(v1, v2, dim=-1, keepdim=False): + return np.sum(v1 * v2, axis=dim, keepdims=keepdim) + + +def unit_vector(data, axis=None, out=None): """ - data = np.array(data, copy=False) + Returns ndarray normalized by length, i.e. eucledian norm, along axis. - if dtype is None: - if data.dtype == np.float32: - dtype = np.float32 - else: - dtype = np.float64 - else: - dtype = np.dtype(dtype) + E.g.: + >>> v0 = numpy.random.random(3) + >>> v1 = unit_vector(v0) + >>> numpy.allclose(v1, v0 / numpy.linalg.norm(v0)) + True - if data.ndim > 1: - # flatten input - data = data.reshape(-1, order) + >>> v0 = numpy.random.rand(5, 4, 3) + >>> v1 = unit_vector(v0, axis=-1) + >>> v2 = v0 / numpy.expand_dims(numpy.sqrt(numpy.sum(v0*v0, axis=2)), 2) + >>> numpy.allclose(v1, v2) + True - if out is None: - out = np.empty_like(data, dtype=dtype) - else: - assert out.shape == data.shape - assert out.dtype == dtype + >>> v1 = unit_vector(v0, axis=1) + >>> v2 = v0 / numpy.expand_dims(numpy.sqrt(numpy.sum(v0*v0, axis=1)), 1) + >>> numpy.allclose(v1, v2) + True - if data.size < 1: - # empty input, return empty array - return out + >>> v1 = numpy.empty((5, 4, 3), dtype=numpy.float32) + >>> unit_vector(v0, axis=1, out=v1) + >>> numpy.allclose(v1, v2) + True - if offset is None: - offset = data[0] + >>> list(unit_vector([])) + [] - alpha = np.array(alpha, copy=False).astype(dtype, copy=False) + >>> list(unit_vector([1.0])) + [1.0] + + Args: + data (np.array): data to normalize + axis (None or int): If specified, determines specific axis along data to normalize + out (None or np.array): If specified, will store computation in this variable - # scaling_factors -> 0 as len(data) gets large - # this leads to divide-by-zeros below - scaling_factors = np.power(1.0 - alpha, np.arange(data.size + 1, dtype=dtype), dtype=dtype) - # create cumulative sum array - np.multiply(data, (alpha * scaling_factors[-2]) / scaling_factors[:-1], dtype=dtype, out=out) - np.cumsum(out, dtype=dtype, out=out) + Returns: + None or np.array: If @out is not specified, will return normalized vector. Otherwise, stores the output in @out + """ + if out is None: + data = np.array(data, dtype=np.float32, copy=True) + if data.ndim == 1: + data /= math.sqrt(np.dot(data, data)) + return data + else: + if out is not data: + out[:] = np.array(data, copy=False) + data = out + length = np.atleast_1d(np.sum(data * data, axis)) + np.sqrt(length, length) + if axis is not None: + length = np.expand_dims(length, axis) + data /= length + if out is None: + return data - # cumsums / scaling - out /= scaling_factors[-2::-1] - if offset != 0: - offset = np.array(offset, copy=False).astype(dtype, copy=False) - # add offsets - out += offset * scaling_factors[1:] +def quat_apply(quat, vec): + """ + Apply a quaternion rotation to a vector (equivalent to R.from_quat(x).apply(y)) + Args: + quat (np.array): (4,) or (N, 4) or (N, 1, 4) quaternion in (x, y, z, w) format + vec (np.array): (3,) or (M, 3) or (1, M, 3) vector to rotate - return out + Returns: + np.array: (M, 3) or (N, M, 3) rotated vector + """ + return R.from_quat(quat).apply(vec) def convert_quat(q, to="xyzw"): @@ -270,38 +291,29 @@ def quat_slerp(quat0, quat1, fraction, shortestpath=True): return q0 -def random_quat(rand=None): +@jit(nopython=True) +def random_quaternion(num_quaternions=1): """ - Return uniform random unit quaternion. + Generate random rotation quaternions, uniformly distributed over SO(3). - E.g.: - >>> q = random_quat() - >>> np.allclose(1.0, vector_norm(q)) - True - >>> q = random_quat(np.random.random(3)) - >>> q.shape - (4,) - - Args: - rand (3-array or None): If specified, must be three independent random variables that are uniformly distributed - between 0 and 1. + Arguments: + num_quaternions (int): number of quaternions to generate (default: 1) Returns: - np.array: (x,y,z,w) random quaternion + np.array: A tensor of shape (num_quaternions, 4) containing random unit quaternions. """ - if rand is None: - rand = np.random.rand(3) - else: - assert len(rand) == 3 - r1 = np.sqrt(1.0 - rand[0]) - r2 = np.sqrt(rand[0]) - pi2 = math.pi * 2.0 - t1 = pi2 * rand[1] - t2 = pi2 * rand[2] - return np.array( - (np.sin(t1) * r1, np.cos(t1) * r1, np.sin(t2) * r2, np.cos(t2) * r2), - dtype=np.float32, - ) + # Generate four random numbers between 0 and 1 + rand = np.random.rand(num_quaternions, 4) + + # Use the formula from Ken Shoemake's "Uniform Random Rotations" + r1 = np.sqrt(1.0 - rand[:, 0]) + r2 = np.sqrt(rand[:, 0]) + t1 = 2 * np.pi * rand[:, 1] + t2 = 2 * np.pi * rand[:, 2] + + quaternions = np.stack((r1 * np.sin(t1), r1 * np.cos(t1), r2 * np.sin(t2), r2 * np.cos(t2)), axis=1) + + return quaternions def random_axis_angle(angle_limit=None, random_state=None): @@ -338,47 +350,53 @@ def random_axis_angle(angle_limit=None, random_state=None): return random_axis, random_angle -def vec(values): - """ - Converts value tuple into a numpy vector. - - Args: - values (n-array): a tuple of numbers - - Returns: - np.array: vector of given values - """ - return np.array(values, dtype=np.float32) +def quat2mat(quaternion): + if quaternion.dtype != np.float32: + quaternion = quaternion.astype(np.float32) + return _quat2mat(quaternion) -def mat4(array): +@jit(nopython=True) +def _quat2mat(quaternion): """ - Converts an array to 4x4 matrix. + Convert quaternions into rotation matrices. Args: - array (n-array): the array in form of vec, list, or tuple + quaternion (torch.Tensor): A tensor of shape (..., 4) representing batches of quaternions (w, x, y, z). Returns: - np.array: a 4x4 numpy matrix + torch.Tensor: A tensor of shape (..., 3, 3) representing batches of rotation matrices. """ - return np.array(array, dtype=np.float32).reshape((4, 4)) + # broadcast array is necessary to use numba parallel mode + q1, q2 = np.broadcast_arrays(quaternion[..., np.newaxis], quaternion[..., np.newaxis, :]) + outer = q1 * q2 + + # Extract the necessary components + xx = outer[..., 0, 0] + yy = outer[..., 1, 1] + zz = outer[..., 2, 2] + xy = outer[..., 0, 1] + xz = outer[..., 0, 2] + yz = outer[..., 1, 2] + xw = outer[..., 0, 3] + yw = outer[..., 1, 3] + zw = outer[..., 2, 3] + rotation_matrix = np.empty(quaternion.shape[:-1] + (3, 3), dtype=np.float32) -def mat2pose(hmat): - """ - Converts a homogeneous 4x4 matrix into pose. + rotation_matrix[..., 0, 0] = 1 - 2 * (yy + zz) + rotation_matrix[..., 0, 1] = 2 * (xy - zw) + rotation_matrix[..., 0, 2] = 2 * (xz + yw) - Args: - hmat (np.array): a 4x4 homogeneous matrix + rotation_matrix[..., 1, 0] = 2 * (xy + zw) + rotation_matrix[..., 1, 1] = 1 - 2 * (xx + zz) + rotation_matrix[..., 1, 2] = 2 * (yz - xw) - Returns: - 2-tuple: - - (np.array) (x,y,z) position array in cartesian coordinates - - (np.array) (x,y,z,w) orientation array in quaternion form - """ - pos = hmat[:3, 3] - orn = mat2quat(hmat[:3, :3]) - return pos, orn + rotation_matrix[..., 2, 0] = 2 * (xz - yw) + rotation_matrix[..., 2, 1] = 2 * (yz + xw) + rotation_matrix[..., 2, 2] = 1 - 2 * (xx + yy) + + return rotation_matrix def mat2quat(rmat): @@ -466,6 +484,23 @@ def mat2quat_batch(rmat): return quat +def mat2pose(hmat): + """ + Converts a homogeneous 4x4 matrix into pose. + + Args: + hmat (np.array): a 4x4 homogeneous matrix + + Returns: + 2-tuple: + - (np.array) (x,y,z) position array in cartesian coordinates + - (np.array) (x,y,z,w) orientation array in quaternion form + """ + pos = hmat[:3, 3] + orn = mat2quat(hmat[:3, :3]) + return pos, orn + + def vec2quat(vec, up=(0, 0, 1.0)): """ Converts given 3d-direction vector @vec to quaternion orientation with respect to another direction vector @up @@ -484,87 +519,70 @@ def vec2quat(vec, up=(0, 0, 1.0)): return mat2quat(np.array([vec_n, s_n, u_n]).T) -def euler2mat(euler): +def euler2quat(euler): """ - Converts euler angles into rotation matrix form + Converts euler angles into quaternion form Args: euler (np.array): (r,p,y) angles Returns: - np.array: 3x3 rotation matrix + np.array: (x,y,z,w) float quaternion angles Raises: AssertionError: [Invalid input shape] """ - - euler = np.asarray(euler, dtype=np.float64) - assert euler.shape[-1] == 3, "Invalid shaped euler {}".format(euler) - - return R.from_euler("xyz", euler).as_matrix() + return R.from_euler("xyz", euler).as_quat() -def mat2euler(rmat): +def quat2euler(quat): """ - Converts given rotation matrix to euler angles in radian. + Converts euler angles into quaternion form Args: - rmat (np.array): 3x3 rotation matrix + quat (np.array): (x,y,z,w) float quaternion angles Returns: - np.array: (r,p,y) converted euler angles in radian vec3 float - """ - M = np.array(rmat, dtype=np.float32, copy=False)[:3, :3] - return R.from_matrix(M).as_euler("xyz") - + np.array: (r,p,y) angles -def quat2mat(quaternion): - if quaternion.dtype != np.float32: - quaternion = quaternion.astype(np.float32) - return _quat2mat(quaternion) + Raises: + AssertionError: [Invalid input shape] + """ + return R.from_quat(quat).as_euler("xyz") -@jit(nopython=True) -def _quat2mat(quaternion): +def euler2mat(euler): """ - Convert quaternions into rotation matrices. + Converts euler angles into rotation matrix form Args: - quaternion (torch.Tensor): A tensor of shape (..., 4) representing batches of quaternions (w, x, y, z). + euler (np.array): (r,p,y) angles Returns: - torch.Tensor: A tensor of shape (..., 3, 3) representing batches of rotation matrices. + np.array: 3x3 rotation matrix + + Raises: + AssertionError: [Invalid input shape] """ - # broadcast array is necessary to use numba parallel mode - q1, q2 = np.broadcast_arrays(quaternion[..., np.newaxis], quaternion[..., np.newaxis, :]) - outer = q1 * q2 - # Extract the necessary components - xx = outer[..., 0, 0] - yy = outer[..., 1, 1] - zz = outer[..., 2, 2] - xy = outer[..., 0, 1] - xz = outer[..., 0, 2] - yz = outer[..., 1, 2] - xw = outer[..., 0, 3] - yw = outer[..., 1, 3] - zw = outer[..., 2, 3] + euler = np.asarray(euler, dtype=np.float64) + assert euler.shape[-1] == 3, "Invalid shaped euler {}".format(euler) - rotation_matrix = np.empty(quaternion.shape[:-1] + (3, 3), dtype=np.float32) + return R.from_euler("xyz", euler).as_matrix() - rotation_matrix[..., 0, 0] = 1 - 2 * (yy + zz) - rotation_matrix[..., 0, 1] = 2 * (xy - zw) - rotation_matrix[..., 0, 2] = 2 * (xz + yw) - rotation_matrix[..., 1, 0] = 2 * (xy + zw) - rotation_matrix[..., 1, 1] = 1 - 2 * (xx + zz) - rotation_matrix[..., 1, 2] = 2 * (yz - xw) +def mat2euler(rmat): + """ + Converts given rotation matrix to euler angles in radian. - rotation_matrix[..., 2, 0] = 2 * (xz - yw) - rotation_matrix[..., 2, 1] = 2 * (yz + xw) - rotation_matrix[..., 2, 2] = 1 - 2 * (xx + yy) + Args: + rmat (np.array): 3x3 rotation matrix - return rotation_matrix + Returns: + np.array: (r,p,y) converted euler angles in radian vec3 float + """ + M = np.array(rmat, dtype=np.float32, copy=False)[:3, :3] + return R.from_matrix(M).as_euler("xyz") @jit(nopython=True) @@ -613,38 +631,6 @@ def axisangle2quat(vec): return R.from_rotvec(vec).as_quat() -def euler2quat(euler): - """ - Converts euler angles into quaternion form - - Args: - euler (np.array): (r,p,y) angles - - Returns: - np.array: (x,y,z,w) float quaternion angles - - Raises: - AssertionError: [Invalid input shape] - """ - return R.from_euler("xyz", euler).as_quat() - - -def quat2euler(quat): - """ - Converts euler angles into quaternion form - - Args: - quat (np.array): (x,y,z,w) float quaternion angles - - Returns: - np.array: (r,p,y) angles - - Raises: - AssertionError: [Invalid input shape] - """ - return R.from_quat(quat).as_euler("xyz") - - def pose_in_A_to_pose_in_B(pose_A, pose_A_in_B): """ Converts a homogenous matrix corresponding to a point C in frame A @@ -983,64 +969,6 @@ def make_pose(translation, rotation): return pose -def unit_vector(data, axis=None, out=None): - """ - Returns ndarray normalized by length, i.e. eucledian norm, along axis. - - E.g.: - >>> v0 = numpy.random.random(3) - >>> v1 = unit_vector(v0) - >>> numpy.allclose(v1, v0 / numpy.linalg.norm(v0)) - True - - >>> v0 = numpy.random.rand(5, 4, 3) - >>> v1 = unit_vector(v0, axis=-1) - >>> v2 = v0 / numpy.expand_dims(numpy.sqrt(numpy.sum(v0*v0, axis=2)), 2) - >>> numpy.allclose(v1, v2) - True - - >>> v1 = unit_vector(v0, axis=1) - >>> v2 = v0 / numpy.expand_dims(numpy.sqrt(numpy.sum(v0*v0, axis=1)), 1) - >>> numpy.allclose(v1, v2) - True - - >>> v1 = numpy.empty((5, 4, 3), dtype=numpy.float32) - >>> unit_vector(v0, axis=1, out=v1) - >>> numpy.allclose(v1, v2) - True - - >>> list(unit_vector([])) - [] - - >>> list(unit_vector([1.0])) - [1.0] - - Args: - data (np.array): data to normalize - axis (None or int): If specified, determines specific axis along data to normalize - out (None or np.array): If specified, will store computation in this variable - - Returns: - None or np.array: If @out is not specified, will return normalized vector. Otherwise, stores the output in @out - """ - if out is None: - data = np.array(data, dtype=np.float32, copy=True) - if data.ndim == 1: - data /= math.sqrt(np.dot(data, data)) - return data - else: - if out is not data: - out[:] = np.array(data, copy=False) - data = out - length = np.atleast_1d(np.sum(data * data, axis)) - np.sqrt(length, length) - if axis is not None: - length = np.expand_dims(length, axis) - data /= length - if out is None: - return data - - def get_orientation_error(target_orn, current_orn): """ Returns the difference between two quaternion orientations as a 3 DOF numpy array. @@ -1169,65 +1097,45 @@ def vecs2quat(vec0, vec1, normalized=False): return quat_unnormalized / np.linalg.norm(quat_unnormalized, axis=-1, keepdims=True) -def l2_distance(v1, v2): - """Returns the L2 distance between vector v1 and v2.""" - return np.linalg.norm(np.array(v1) - np.array(v2)) - +def align_vector_sets(vec_set1, vec_set2): + """ + Computes a single quaternion representing the rotation that best aligns vec_set1 to vec_set2. -def frustum(left, right, bottom, top, znear, zfar): - """Create view frustum matrix.""" - assert right != left - assert bottom != top - assert znear != zfar - - M = np.zeros((4, 4), dtype=np.float32) - M[0, 0] = +2.0 * znear / (right - left) - M[2, 0] = (right + left) / (right - left) - M[1, 1] = +2.0 * znear / (top - bottom) - # TODO: Put this back to 3,1 - # M[3, 1] = (top + bottom) / (top - bottom) - M[2, 1] = (top + bottom) / (top - bottom) - M[2, 2] = -(zfar + znear) / (zfar - znear) - M[3, 2] = -2.0 * znear * zfar / (zfar - znear) - M[2, 3] = -1.0 - return M + Args: + vec_set1 (np.array): (N, 3) tensor of N 3D vectors + vec_set2 (np.array): (N, 3) tensor of N 3D vectors + Returns: + np.array: (4,) Normalized quaternion representing the overall rotation + """ + # Compute the cross-covariance matrix + H = vec_set2.T @ vec_set1 -def ortho(left, right, bottom, top, znear, zfar): - """Create orthonormal projection matrix.""" - assert right != left - assert bottom != top - assert znear != zfar - - M = np.zeros((4, 4), dtype=np.float32) - M[0, 0] = 2.0 / (right - left) - M[1, 1] = 2.0 / (top - bottom) - M[2, 2] = -2.0 / (zfar - znear) - M[3, 0] = -(right + left) / (right - left) - M[3, 1] = -(top + bottom) / (top - bottom) - M[3, 2] = -(zfar + znear) / (zfar - znear) - M[3, 3] = 1.0 - return M + # Compute the elements for the quaternion + trace = H.trace() + w = trace + 1 + x = H[1, 2] - H[2, 1] + y = H[2, 0] - H[0, 2] + z = H[0, 1] - H[1, 0] + # Construct the quaternion + quat = np.stack([x, y, z, w]) -def perspective(fovy, aspect, znear, zfar): - """Create perspective projection matrix.""" - # fovy is in degree - assert znear != zfar - h = np.tan(fovy / 360.0 * np.pi) * znear - w = h * aspect - return frustum(-w, w, -h, h, znear, zfar) + # Handle the case where w is close to zero + if quat[3] < 1e-4: + quat[3] = 0 + max_idx = np.argmax(quat[:3].abs()) + 1 + quat[max_idx] = 1 + # Normalize the quaternion + quat = quat / (np.linalg.norm(quat) + 1e-8) # Add epsilon to avoid division by zero -def anorm(x, axis=None, keepdims=False): - """Compute L2 norms alogn specified axes.""" - return np.linalg.norm(x, axis=axis, keepdims=keepdims) + return quat -def normalize(v, axis=None, eps=1e-10): - """L2 Normalize along specified axes.""" - norm = anorm(v, axis=axis, keepdims=True) - return v / np.where(norm < eps, eps, norm) +def l2_distance(v1, v2): + """Returns the L2 distance between vector v1 and v2.""" + return np.linalg.norm(np.array(v1) - np.array(v2)) def cartesian_to_polar(x, y): @@ -1268,11 +1176,6 @@ def z_angle_from_quat(quat): return np.arctan2(rotated_X_axis[1], rotated_X_axis[0]) -def z_rotation_from_quat(quat): - """Get the quaternion for the rotation around the Z axis produced by the quaternion.""" - return R.from_euler("z", z_angle_from_quat(quat)).as_quat() - - def integer_spiral_coordinates(n): """A function to map integers to 2D coordinates in a spiral pattern around the origin.""" # Map integers from Z to Z^2 in a spiral pattern around the origin. @@ -1285,27 +1188,36 @@ def integer_spiral_coordinates(n): return int(x), int(y) -def calculate_xy_plane_angle(quaternion): +@jit(nopython=True) +def transform_points(points, matrix, translate=True): """ - Compute the 2D orientation angle from a quaternion assuming the initial forward vector is along the x-axis. + Returns points rotated by a homogeneous + transformation matrix. + If points are (n, 2) matrix must be (3, 3) + If points are (n, 3) matrix must be (4, 4) - Parameters: - quaternion : array_like - The quaternion (w, x, y, z) representing the rotation. + Arguments: + points (np.array): (n, dim) where `dim` is 2 or 3. + matrix (np.array): (3, 3) or (4, 4) homogeneous rotation matrix. + translate (bool): whether to apply translation from matrix or not. Returns: - float - The angle (in radians) of the projection of the forward vector onto the XY plane. - Returns 0.0 if the projected vector's magnitude is negligibly small. + np.array: (n, dim) transformed points. """ - fwd = R.from_quat(quaternion).apply([1, 0, 0]) - fwd[2] = 0.0 + if len(points) == 0 or matrix is None: + return points.copy() - if np.linalg.norm(fwd) < 1e-4: - return 0.0 + count, dim = points.shape + # Check if the matrix is close to an identity matrix + identity = np.eye(dim + 1) + if np.abs(matrix - identity[: dim + 1, : dim + 1]).max() < 1e-8: + return points.copy() - fwd /= np.linalg.norm(fwd) - return np.arctan2(fwd[1], fwd[0]) + if translate: + stack = np.ascontiguousarray(np.concatenate((points, np.ones((count, 1))), axis=1)) + return (matrix @ stack.T).T[:, :dim] + else: + return (matrix[:dim, :dim] @ points.T).T @jit(nopython=True) diff --git a/omnigibson/utils/usd_utils.py b/omnigibson/utils/usd_utils.py index 04e7fc651..0c2ff7dd5 100644 --- a/omnigibson/utils/usd_utils.py +++ b/omnigibson/utils/usd_utils.py @@ -15,7 +15,7 @@ import omnigibson.utils.transform_utils as T from omnigibson.macros import gm from omnigibson.utils.backend_utils import _compute_backend as cb -from omnigibson.utils.backend_utils import _ComputeBackend, _ComputeNumpyBackend, _ComputeTorchBackend +from omnigibson.utils.backend_utils import add_compute_function from omnigibson.utils.constants import PRIMITIVE_MESH_TYPES, JointType, PrimType from omnigibson.utils.numpy_utils import vtarray_to_torch from omnigibson.utils.python_utils import assert_valid_key @@ -1118,7 +1118,7 @@ def _get_relative_poses(self, prim_path): self._read_cache["link_transforms"] = cb.from_torch(self._view.get_link_transforms()) idx = self._idx[prim_path] - self._read_cache["relative_poses"][prim_path] = cb.compute_relative_poses( + self._read_cache["relative_poses"][prim_path] = cb.get_custom_method("compute_relative_poses")( idx, len(self._link_idx[idx]), self._read_cache["link_transforms"], @@ -1936,6 +1936,4 @@ def _compute_relative_poses_numpy(idx, n_links, all_tfs, base_pose): # Set these as part of the backend values -setattr(_ComputeBackend, "compute_relative_poses", None) -setattr(_ComputeTorchBackend, "compute_relative_poses", _compute_relative_poses_torch) -setattr(_ComputeNumpyBackend, "compute_relative_poses", _compute_relative_poses_numpy) +add_compute_function(name="compute_relative_poses", np_function=_compute_relative_poses_numpy, th_function=_compute_relative_poses_torch) From 1c67bba6c21579d37c4d354563b7f8444d4864b2 Mon Sep 17 00:00:00 2001 From: cremebrule <84cremebrule@gmail.com> Date: Thu, 19 Dec 2024 12:04:38 -0800 Subject: [PATCH 34/38] update null joint controller default args in robot classes --- omnigibson/robots/active_camera_robot.py | 2 +- omnigibson/robots/articulated_trunk_robot.py | 2 +- omnigibson/robots/locomotion_robot.py | 2 +- omnigibson/robots/manipulation_robot.py | 4 ++-- 4 files changed, 5 insertions(+), 5 deletions(-) diff --git a/omnigibson/robots/active_camera_robot.py b/omnigibson/robots/active_camera_robot.py index 73347b797..10153bdc8 100644 --- a/omnigibson/robots/active_camera_robot.py +++ b/omnigibson/robots/active_camera_robot.py @@ -96,7 +96,7 @@ def _default_camera_null_joint_controller_config(self): "motor_type": "position", "control_limits": self.control_limits, "dof_idx": self.camera_control_idx, - "default_command": self.reset_joint_pos[self.camera_control_idx], + "default_goal": self.reset_joint_pos[self.camera_control_idx], "use_impedances": False, } diff --git a/omnigibson/robots/articulated_trunk_robot.py b/omnigibson/robots/articulated_trunk_robot.py index 20a9e9ec1..7491c05bd 100644 --- a/omnigibson/robots/articulated_trunk_robot.py +++ b/omnigibson/robots/articulated_trunk_robot.py @@ -143,7 +143,7 @@ def _default_trunk_null_joint_controller_config(self): "motor_type": "position", "control_limits": self.control_limits, "dof_idx": self.trunk_control_idx, - "default_command": self.reset_joint_pos[self.trunk_control_idx], + "default_goal": self.reset_joint_pos[self.trunk_control_idx], "use_impedances": False, } diff --git a/omnigibson/robots/locomotion_robot.py b/omnigibson/robots/locomotion_robot.py index 9cf9d1a54..5f5d06926 100644 --- a/omnigibson/robots/locomotion_robot.py +++ b/omnigibson/robots/locomotion_robot.py @@ -100,7 +100,7 @@ def _default_base_null_joint_controller_config(self): "motor_type": "velocity", "control_limits": self.control_limits, "dof_idx": self.base_control_idx, - "default_command": th.zeros(len(self.base_control_idx)), + "default_goal": th.zeros(len(self.base_control_idx)), "use_impedances": False, } diff --git a/omnigibson/robots/manipulation_robot.py b/omnigibson/robots/manipulation_robot.py index b92853df7..752447992 100644 --- a/omnigibson/robots/manipulation_robot.py +++ b/omnigibson/robots/manipulation_robot.py @@ -1090,7 +1090,7 @@ def _default_arm_null_joint_controller_configs(self): "motor_type": "position", "control_limits": self.control_limits, "dof_idx": self.arm_control_idx[arm], - "default_command": self.reset_joint_pos[self.arm_control_idx[arm]], + "default_goal": self.reset_joint_pos[self.arm_control_idx[arm]], "use_impedances": False, } return dic @@ -1153,7 +1153,7 @@ def _default_gripper_null_controller_configs(self): "motor_type": "velocity", "control_limits": self.control_limits, "dof_idx": self.gripper_control_idx[arm], - "default_command": th.zeros(len(self.gripper_control_idx[arm])), + "default_goal": th.zeros(len(self.gripper_control_idx[arm])), "use_impedances": False, } return dic From 66cfff777256b9e2a658e72297cb2ab90512f7c0 Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Thu, 19 Dec 2024 20:05:30 +0000 Subject: [PATCH 35/38] [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --- omnigibson/controllers/osc_controller.py | 4 +++- omnigibson/utils/transform_utils.py | 1 - omnigibson/utils/usd_utils.py | 4 +++- 3 files changed, 6 insertions(+), 3 deletions(-) diff --git a/omnigibson/controllers/osc_controller.py b/omnigibson/controllers/osc_controller.py index c97a4fbae..d7165f931 100644 --- a/omnigibson/controllers/osc_controller.py +++ b/omnigibson/controllers/osc_controller.py @@ -652,4 +652,6 @@ def _compute_osc_torques_numpy( # Set these as part of the backend values -add_compute_function(name="compute_osc_torques", np_function=_compute_osc_torques_numpy, th_function=_compute_osc_torques_torch) +add_compute_function( + name="compute_osc_torques", np_function=_compute_osc_torques_numpy, th_function=_compute_osc_torques_torch +) diff --git a/omnigibson/utils/transform_utils.py b/omnigibson/utils/transform_utils.py index f9db861db..ddbf4a0c2 100644 --- a/omnigibson/utils/transform_utils.py +++ b/omnigibson/utils/transform_utils.py @@ -335,7 +335,6 @@ def random_quaternion(num_quaternions: int = 1) -> torch.Tensor: return quaternions - @torch.compile def random_axis_angle(angle_limit: float = 2.0 * math.pi): """ diff --git a/omnigibson/utils/usd_utils.py b/omnigibson/utils/usd_utils.py index 0c2ff7dd5..41c847fb7 100644 --- a/omnigibson/utils/usd_utils.py +++ b/omnigibson/utils/usd_utils.py @@ -1936,4 +1936,6 @@ def _compute_relative_poses_numpy(idx, n_links, all_tfs, base_pose): # Set these as part of the backend values -add_compute_function(name="compute_relative_poses", np_function=_compute_relative_poses_numpy, th_function=_compute_relative_poses_torch) +add_compute_function( + name="compute_relative_poses", np_function=_compute_relative_poses_numpy, th_function=_compute_relative_poses_torch +) From 374513bcc69d9647a5fa82e7cd4352422b8c55a0 Mon Sep 17 00:00:00 2001 From: cremebrule <84cremebrule@gmail.com> Date: Thu, 19 Dec 2024 12:39:36 -0800 Subject: [PATCH 36/38] re-add quaternions_close --- omnigibson/utils/transform_utils.py | 21 +++++++++++++++++++++ omnigibson/utils/transform_utils_np.py | 16 ++++++++++++++++ 2 files changed, 37 insertions(+) diff --git a/omnigibson/utils/transform_utils.py b/omnigibson/utils/transform_utils.py index f9db861db..0cfcb6606 100644 --- a/omnigibson/utils/transform_utils.py +++ b/omnigibson/utils/transform_utils.py @@ -1345,6 +1345,27 @@ def transform_points(points: torch.Tensor, matrix: torch.Tensor, translate: bool return torch.mm(matrix[:dim, :dim], points.t()).t() +@torch.compile +def quaternions_close(q1: torch.Tensor, q2: torch.Tensor, atol: float = 1e-3) -> bool: + """ + Whether two quaternions represent the same rotation, + allowing for the possibility that one is the negative of the other. + + Arguments: + q1: torch.Tensor + First quaternion + q2: torch.Tensor + Second quaternion + atol: float + Absolute tolerance for comparison + + Returns: + bool + Whether the quaternions are close + """ + return torch.allclose(q1, q2, atol=atol) or torch.allclose(q1, -q2, atol=atol) + + @torch.compile def orientation_error(desired, current): """ diff --git a/omnigibson/utils/transform_utils_np.py b/omnigibson/utils/transform_utils_np.py index d0d67ea84..6cd278322 100644 --- a/omnigibson/utils/transform_utils_np.py +++ b/omnigibson/utils/transform_utils_np.py @@ -1220,6 +1220,22 @@ def transform_points(points, matrix, translate=True): return (matrix[:dim, :dim] @ points.T).T +def quaternions_close(q1, q2, atol=1e-3): + """ + Whether two quaternions represent the same rotation, + allowing for the possibility that one is the negative of the other. + + Arguments: + q1 (np.array): First quaternion + q2 (np.array): Second quaternion + atol (float): Absolute tolerance for comparison + + Returns: + bool: Whether the quaternions are close + """ + return np.allclose(q1, q2, atol=atol) or np.allclose(q1, -q2, atol=atol) + + @jit(nopython=True) def orientation_error(desired, current): """ From 46c6ecb1d9a9aa5afd2bf44cf6bc34cc408ba7ac Mon Sep 17 00:00:00 2001 From: cremebrule <84cremebrule@gmail.com> Date: Thu, 19 Dec 2024 12:41:43 -0800 Subject: [PATCH 37/38] re-add transformation_matrix --- omnigibson/utils/transform_utils.py | 23 +++++++++++++++++++++++ omnigibson/utils/transform_utils_np.py | 22 ++++++++++++++++++++++ 2 files changed, 45 insertions(+) diff --git a/omnigibson/utils/transform_utils.py b/omnigibson/utils/transform_utils.py index 98eb6bb73..985716e9d 100644 --- a/omnigibson/utils/transform_utils.py +++ b/omnigibson/utils/transform_utils.py @@ -981,6 +981,29 @@ def rotation_matrix(angle: float, direction: torch.Tensor) -> torch.Tensor: return R +@torch.compile +def transformation_matrix(angle: float, direction: torch.Tensor, point: Optional[torch.Tensor] = None) -> torch.Tensor: + """ + Returns a 4x4 homogeneous transformation matrix to rotate about axis defined by point and direction. + Args: + angle (float): Magnitude of rotation in radians + direction (torch.Tensor): (ax,ay,az) axis about which to rotate + point (Optional[torch.Tensor]): If specified, is the (x,y,z) point about which the rotation will occur + Returns: + torch.Tensor: 4x4 homogeneous transformation matrix + """ + R = rotation_matrix(angle, direction) + + M = torch.eye(4, dtype=torch.float32, device=direction.device) + M[:3, :3] = R + + if point is not None: + # Rotation not about origin + point = point.to(dtype=torch.float32) + M[:3, 3] = point - R @ point + return M + + @torch.compile def clip_translation(dpos, limit): """ diff --git a/omnigibson/utils/transform_utils_np.py b/omnigibson/utils/transform_utils_np.py index 6cd278322..b530f9a49 100644 --- a/omnigibson/utils/transform_utils_np.py +++ b/omnigibson/utils/transform_utils_np.py @@ -888,6 +888,28 @@ def rotation_matrix(angle, direction, point=None): return M +def transformation_matrix(angle, direction, point=None): + """ + Returns a 4x4 homogeneous transformation matrix to rotate about axis defined by point and direction. + Args: + angle (float): Magnitude of rotation in radians + direction (np.array): (ax,ay,az) axis about which to rotate + point (bool): If specified, is the (x,y,z) point about which the rotation will occur + + Returns: + np.array: 4x4 homogeneous transformation matrix + """ + R = rotation_matrix(angle, direction) + + M = np.eye(4) + M[:3, :3] = R + + if point is not None: + # Rotation not about origin + M[:3, 3] = point - R @ point + return M + + def clip_translation(dpos, limit): """ Limits a translation (delta position) to a specified limit From a1fbadd6e4f17527963f6ad7729bb85b273d9625 Mon Sep 17 00:00:00 2001 From: cremebrule <84cremebrule@gmail.com> Date: Thu, 19 Dec 2024 14:01:56 -0800 Subject: [PATCH 38/38] add copysign to np transform utils --- omnigibson/utils/transform_utils.py | 4 ++-- omnigibson/utils/transform_utils_np.py | 5 +++++ 2 files changed, 7 insertions(+), 2 deletions(-) diff --git a/omnigibson/utils/transform_utils.py b/omnigibson/utils/transform_utils.py index 985716e9d..a89a25491 100644 --- a/omnigibson/utils/transform_utils.py +++ b/omnigibson/utils/transform_utils.py @@ -44,7 +44,7 @@ @torch.compile -def _copysign(a, b): +def copysign(a, b): # type: (float, torch.Tensor) -> torch.Tensor a = torch.tensor(a, device=b.device, dtype=torch.float).repeat(b.shape[0]) return torch.abs(a) * torch.sign(b) @@ -588,7 +588,7 @@ def quat2euler(q): roll = torch.atan2(sinr_cosp, cosr_cosp) # pitch (y-axis rotation) sinp = 2.0 * (q[:, qw] * q[:, qy] - q[:, qz] * q[:, qx]) - pitch = torch.where(torch.abs(sinp) >= 1, _copysign(math.pi / 2.0, sinp), torch.asin(sinp)) + pitch = torch.where(torch.abs(sinp) >= 1, copysign(math.pi / 2.0, sinp), torch.asin(sinp)) # yaw (z-axis rotation) siny_cosp = 2.0 * (q[:, qw] * q[:, qz] + q[:, qx] * q[:, qy]) cosy_cosp = q[:, qw] * q[:, qw] + q[:, qx] * q[:, qx] - q[:, qy] * q[:, qy] - q[:, qz] * q[:, qz] diff --git a/omnigibson/utils/transform_utils_np.py b/omnigibson/utils/transform_utils_np.py index b530f9a49..ddf2ac0c3 100644 --- a/omnigibson/utils/transform_utils_np.py +++ b/omnigibson/utils/transform_utils_np.py @@ -50,6 +50,11 @@ _TUPLE2AXES = dict((v, k) for k, v in _AXES2TUPLE.items()) +def copysign(a, b): + a = np.array(a).repeat(b.shape[0]) + return np.abs(a) * np.sign(b) + + def anorm(x, axis=None, keepdims=False): """Compute L2 norms alogn specified axes.""" return np.linalg.norm(x, axis=axis, keepdims=keepdims)