diff --git a/docs/assets/robots/A1.png b/docs/assets/robots/A1.png index 856daa0cb..19d0c55bc 100644 Binary files a/docs/assets/robots/A1.png and b/docs/assets/robots/A1.png differ diff --git a/docs/assets/robots/R1.png b/docs/assets/robots/R1.png index 8733d8836..29cf0e549 100644 Binary files a/docs/assets/robots/R1.png and b/docs/assets/robots/R1.png differ diff --git a/docs/assets/robots/Stretch.png b/docs/assets/robots/Stretch.png index 79553b17c..192f116f4 100644 Binary files a/docs/assets/robots/Stretch.png and b/docs/assets/robots/Stretch.png differ diff --git a/omnigibson/action_primitives/curobo.py b/omnigibson/action_primitives/curobo.py index c203bd17d..3450c98a4 100644 --- a/omnigibson/action_primitives/curobo.py +++ b/omnigibson/action_primitives/curobo.py @@ -150,6 +150,7 @@ def __init__( motion_cfg_kwargs=None, batch_size=2, use_cuda_graph=True, + collision_activation_distance=0.005, debug=False, use_default_embodiment_only=False, ): @@ -165,6 +166,9 @@ def __init__( MotionGenConfig.load_from_robot_config(...) batch_size (int): Size of batches for computing trajectories. This must be FIXED use_cuda_graph (bool): Whether to use CUDA graph for motion generation or not + collision_activation_distance (float): Distance threshold at which a collision is detected. + Increasing this value will make the motion planner more conservative in its planning with respect + to the underlying sphere representation of the robot debug (bool): Whether to debug generation or not, setting this True will set use_cuda_graph to False implicitly use_default_embodiment_only (bool): Whether to use only the default embodiment for the robot or not """ @@ -227,7 +231,7 @@ def __init__( num_trajopt_seeds=4, num_graph_seeds=4, interpolation_dt=0.03, - collision_activation_distance=0.005, + collision_activation_distance=collision_activation_distance, self_collision_check=True, maximum_trajectory_dt=None, fixed_iters_trajopt=True, diff --git a/omnigibson/action_primitives/starter_semantic_action_primitives.py b/omnigibson/action_primitives/starter_semantic_action_primitives.py index d79c074c2..2f11fc5b8 100644 --- a/omnigibson/action_primitives/starter_semantic_action_primitives.py +++ b/omnigibson/action_primitives/starter_semantic_action_primitives.py @@ -36,7 +36,8 @@ from omnigibson.robots.locomotion_robot import LocomotionRobot from omnigibson.robots.manipulation_robot import ManipulationRobot from omnigibson.tasks.behavior_task import BehaviorTask -from omnigibson.utils.control_utils import FKSolver, IKSolver, orientation_error +from omnigibson.utils.backend_utils import _compute_backend as cb +from omnigibson.utils.control_utils import FKSolver, IKSolver from omnigibson.utils.grasping_planning_utils import get_grasp_poses_for_object_sticky, get_grasp_position_for_open from omnigibson.utils.motion_planning_utils import ( detect_robot_collision_in_sim, @@ -336,13 +337,13 @@ def __init__( arm = f"arm_{arm_name}" arm_ctrl = self.robot.controllers[arm] if isinstance(arm_ctrl, InverseKinematicsController): - pos_relative = control_dict[f"{eef}_pos_relative"] - quat_relative = control_dict[f"{eef}_quat_relative"] + pos_relative = cb.to_torch(control_dict[f"{eef}_pos_relative"]) + quat_relative = cb.to_torch(control_dict[f"{eef}_quat_relative"]) quat_relative_axis_angle = T.quat2axisangle(quat_relative) self._arm_targets[arm] = (pos_relative, quat_relative_axis_angle) else: - arm_target = control_dict["joint_position"][arm_ctrl.dof_idx] + arm_target = cb.to_torch(control_dict["joint_position"])[arm_ctrl.dof_idx] self._arm_targets[arm] = arm_target self.robot_copy = self._load_robot_copy() @@ -1472,7 +1473,7 @@ def _empty_action(self, follow_arm_targets=True): ) delta_pos = target_pos - current_pos if controller.mode == "pose_delta_ori": - delta_orn = orientation_error( + delta_orn = T.orientation_error( T.quat2mat(T.axisangle2quat(target_orn_axisangle)), T.quat2mat(current_orn) ) partial_action = th.cat((delta_pos, delta_orn)) diff --git a/omnigibson/controllers/controller_base.py b/omnigibson/controllers/controller_base.py index 7bb6feeb8..60e5e7adc 100644 --- a/omnigibson/controllers/controller_base.py +++ b/omnigibson/controllers/controller_base.py @@ -4,6 +4,7 @@ import torch as th +from omnigibson.utils.backend_utils import _compute_backend as cb from omnigibson.utils.python_utils import Recreatable, Registerable, Serializable, assert_valid_key, classproperty # Global dicts that will contain mappings @@ -112,11 +113,11 @@ def __init__( ] assert "has_limit" in control_limits, "Expected has_limit specified in control_limits, but does not exist." self._dof_has_limits = control_limits["has_limit"] - self._dof_idx = dof_idx.int() + self._dof_idx = cb.as_int(dof_idx) # Generate goal information self._goal_shapes = self._get_goal_shapes() - self._goal_dim = int(sum(th.prod(th.tensor(shape)) for shape in self._goal_shapes.values())) + self._goal_dim = int(sum(cb.prod(cb.array(shape)) for shape in self._goal_shapes.values())) # Initialize some other variables that will be filled in during runtime self._control = None @@ -127,15 +128,12 @@ def __init__( # Standardize command input / output limits to be (min_array, max_array) command_input_limits = ( - (-1.0, 1.0) + self._generate_default_command_input_limits() if type(command_input_limits) == str and command_input_limits == "default" else command_input_limits ) command_output_limits = ( - ( - self._control_limits[self.control_type][0][self.dof_idx], - self._control_limits[self.control_type][1][self.dof_idx], - ) + self._generate_default_command_output_limits() if type(command_output_limits) == str and command_output_limits == "default" else command_output_limits ) @@ -156,6 +154,31 @@ def __init__( ) ) + def _generate_default_command_input_limits(self): + """ + Generates default command input limits based on the control limits + + Returns: + 2-tuple: + - int or array: min command input limits + - int or array: max command input limits + """ + return (-1.0, 1.0) + + def _generate_default_command_output_limits(self): + """ + Generates default command output limits based on the control limits + + Returns: + 2-tuple: + - int or array: min command output limits + - int or array: max command output limits + """ + return ( + self._control_limits[self.control_type][0][self.dof_idx], + self._control_limits[self.control_type][1][self.dof_idx], + ) + def _preprocess_command(self, command): """ Clips + scales inputted @command according to self.command_input_limits and self.command_output_limits. @@ -169,7 +192,7 @@ def _preprocess_command(self, command): Array[float]: Processed command vector """ # Make sure command is a th.tensor - command = th.tensor([command], dtype=th.float32) if type(command) in {int, float} else command + command = cb.array([command]) if type(command) in {int, float} else command # We only clip and / or scale if self.command_input_limits exists if self._command_input_limits is not None: # Clip @@ -342,7 +365,7 @@ def compute_no_op_action(self, control_dict): if self._goal is None: self._goal = self.compute_no_op_goal(control_dict=control_dict) command = self._compute_no_op_action(control_dict=control_dict) - return self._reverse_preprocess_command(command) + return cb.to_torch(self._reverse_preprocess_command(command)) def _compute_no_op_action(self, control_dict): """ @@ -354,18 +377,26 @@ def _dump_state(self): # Default is just the command return dict( goal_is_valid=self._goal is not None, - goal=self._goal, + goal=None if self._goal is None else {k: cb.to_torch(v) for k, v in self._goal.items()}, ) def _load_state(self, state): # Make sure every entry in goal is a numpy array # Load goal - self._goal = None if state["goal"] is None else {name: goal_state for name, goal_state in state["goal"].items()} + if state["goal"] is None: + self._goal = None + else: + self._goal = dict() + for name, goal_state in state["goal"].items(): + if isinstance(goal_state, th.Tensor): + self._goal[name] = cb.from_torch(goal_state) + else: + self._goal[name] = goal_state def serialize(self, state): # Make sure size of the state is consistent, even if we have no goal goal_state_flattened = ( - th.cat([goal_state.flatten() for goal_state in self._goal.values()]) + th.cat([goal_state.flatten() for goal_state in state["goal"].values()]) if (state)["goal_is_valid"] else th.zeros(self.goal_dim) ) @@ -419,12 +450,8 @@ def nums2array(nums, dim): # Else, input is a single value, so we map to a numpy array of correct size and return return ( nums - if isinstance(nums, th.Tensor) - else ( - th.tensor(nums, dtype=th.float32) - if isinstance(nums, Iterable) - else th.ones(dim, dtype=th.float32) * nums - ) + if isinstance(nums, cb.arr_type) + else cb.array(nums) if isinstance(nums, Iterable) else cb.ones(dim) * nums ) @property diff --git a/omnigibson/controllers/dd_controller.py b/omnigibson/controllers/dd_controller.py index 38207b417..c4f15a246 100644 --- a/omnigibson/controllers/dd_controller.py +++ b/omnigibson/controllers/dd_controller.py @@ -1,6 +1,5 @@ -import torch as th - from omnigibson.controllers import ControlType, LocomotionController +from omnigibson.utils.backend_utils import _compute_backend as cb class DifferentialDriveController(LocomotionController): @@ -107,14 +106,14 @@ def compute_control(self, goal_dict, control_dict): right_wheel_joint_vel = (lin_vel + ang_vel * self._wheel_axle_halflength) / self._wheel_radius # Return desired velocities - return th.tensor([left_wheel_joint_vel, right_wheel_joint_vel]) + return cb.array([left_wheel_joint_vel, right_wheel_joint_vel]) def compute_no_op_goal(self, control_dict): # This is zero-vector, since we want zero linear / angular velocity - return dict(vel=th.zeros(2)) + return dict(vel=cb.zeros(2)) def _compute_no_op_action(self, control_dict): - return th.zeros(2, dtype=th.float32) + return cb.zeros(2) def _get_goal_shapes(self): # Add (2, )-array representing linear, angular velocity diff --git a/omnigibson/controllers/ik_controller.py b/omnigibson/controllers/ik_controller.py index fa0e18b2a..1059fa082 100644 --- a/omnigibson/controllers/ik_controller.py +++ b/omnigibson/controllers/ik_controller.py @@ -1,14 +1,11 @@ import math +from collections.abc import Iterable -import torch as th - -import omnigibson.utils.transform_utils as T from omnigibson.controllers import ControlType, ManipulationController from omnigibson.controllers.joint_controller import JointController -from omnigibson.macros import create_module_macros, gm -from omnigibson.utils.control_utils import orientation_error +from omnigibson.utils.backend_utils import _compute_backend as cb +from omnigibson.utils.backend_utils import _ComputeBackend, _ComputeNumpyBackend, _ComputeTorchBackend from omnigibson.utils.processing_utils import MovingAverageFilter -from omnigibson.utils.python_utils import assert_valid_key from omnigibson.utils.ui_utils import create_module_logger # Create module logger @@ -44,8 +41,8 @@ def __init__( dof_idx, command_input_limits="default", command_output_limits=( - th.tensor([-0.2, -0.2, -0.2, -0.5, -0.5, -0.5], dtype=th.float32), - th.tensor([0.2, 0.2, 0.2, 0.5, 0.5, 0.5], dtype=th.float32), + (-0.2, -0.2, -0.2, -0.5, -0.5, -0.5), + (0.2, 0.2, 0.2, 0.5, 0.5, 0.5), ), pos_kp=None, pos_damping_ratio=None, @@ -147,29 +144,26 @@ def limiter(target_pos: Array[float], target_quat: Array[float], control_dict: D if command_input_limits is not None: if type(command_input_limits) == str and command_input_limits == "default": command_input_limits = [ - th.tensor([-1.0, -1.0, -1.0, -math.pi, -math.pi, -math.pi], dtype=th.float32), - th.tensor([1.0, 1.0, 1.0, math.pi, math.pi, math.pi], dtype=th.float32), + cb.array([-1.0, -1.0, -1.0, -math.pi, -math.pi, -math.pi]), + cb.array([1.0, 1.0, 1.0, math.pi, math.pi, math.pi]), ] else: - command_input_limits[0][3:] = th.tensor( - [-math.pi] * len(command_input_limits[0][3:]), dtype=th.float32 - ) - command_input_limits[1][3:] = th.tensor( - [math.pi] * len(command_input_limits[1][3:]), dtype=th.float32 - ) + command_input_limits[0][3:] = cb.array([-math.pi] * len(command_input_limits[0][3:])) + command_input_limits[1][3:] = cb.array([math.pi] * len(command_input_limits[1][3:])) if command_output_limits is not None: + if not isinstance(command_output_limits, str) and isinstance(command_output_limits, Iterable): + command_output_limits = [ + cb.array(command_output_limits[0]), + cb.array(command_output_limits[1]), + ] if type(command_output_limits) == str and command_output_limits == "default": command_output_limits = [ - th.tensor([-1.0, -1.0, -1.0, -math.pi, -math.pi, -math.pi], dtype=th.float32), - th.tensor([1.0, 1.0, 1.0, math.pi, math.pi, math.pi], dtype=th.float32), + cb.array([-1.0, -1.0, -1.0, -math.pi, -math.pi, -math.pi]), + cb.array([1.0, 1.0, 1.0, math.pi, math.pi, math.pi]), ] else: - command_output_limits[0][3:] = th.tensor( - [-math.pi] * len(command_output_limits[0][3:]), dtype=th.float32 - ) - command_output_limits[1][3:] = th.tensor( - [math.pi] * len(command_output_limits[1][3:]), dtype=th.float32 - ) + command_output_limits[0][3:] = cb.array([-math.pi] * len(command_output_limits[0][3:])) + command_output_limits[1][3:] = cb.array([math.pi] * len(command_output_limits[1][3:])) # Run super init super().__init__( control_freq=control_freq, @@ -197,14 +191,15 @@ def reset(self): @property def state_size(self): # Add state size from the control filter - return super().state_size + self.control_filter.state_size + return super().state_size + (0 if self.control_filter is None else self.control_filter.state_size) def _dump_state(self): # Run super first state = super()._dump_state() # Add internal quaternion target and filter state - state["control_filter"] = self.control_filter.dump_state(serialized=False) + if self.control_filter is not None: + state["control_filter"] = self.control_filter.dump_state(serialized=False) return state @@ -217,7 +212,8 @@ def _load_state(self, state): self._fixed_quat_target = self._goal["target_quat"] # Load relevant info for this controller - self.control_filter.load_state(state["control_filter"], serialized=False) + if self.control_filter is not None: + self.control_filter.load_state(state["control_filter"], serialized=False) def serialize(self, state): # Run super first @@ -227,7 +223,11 @@ def serialize(self, state): return th.cat( [ state_flat, - self.control_filter.serialize(state=state["control_filter"]), + ( + th.tensor([]) + if self.control_filter is None + else self.control_filter.serialize(state=state["control_filter"]) + ), ] ) @@ -236,9 +236,11 @@ def deserialize(self, state): state_dict, idx = super().deserialize(state=state) # Deserialize state for this controller - state_dict["control_filter"], deserialized_items = self.control_filter.deserialize(state=state[idx:]) + if self.control_filter is not None: + state_dict["control_filter"], deserialized_items = self.control_filter.deserialize(state=state[idx:]) + idx += deserialized_items - return state_dict, idx + deserialized_items + return state_dict, idx def _update_goal(self, command, control_dict): # Grab important info from control dict @@ -263,19 +265,19 @@ def _update_goal(self, command, control_dict): target_quat = quat_relative elif self.mode == "pose_absolute_ori" or self.mode == "absolute_pose": # Received "delta" ori is in fact the desired absolute orientation - target_quat = T.axisangle2quat(command[3:6]) + target_quat = cb.T.axisangle2quat(command[3:6]) else: # pose_delta_ori control # Grab dori and compute target ori - dori = T.quat2mat(T.axisangle2quat(command[3:6])) - target_quat = T.mat2quat(dori @ T.quat2mat(quat_relative)) + dori = cb.T.quat2mat(cb.T.axisangle2quat(command[3:6])) + target_quat = cb.T.mat2quat(dori @ cb.T.quat2mat(quat_relative)) # Possibly limit to workspace if specified if self.workspace_pose_limiter is not None: target_pos, target_quat = self.workspace_pose_limiter(target_pos, target_quat, control_dict) goal_dict = dict( - target_pos=target_pos, - target_quat=target_quat, + target_pos=cb.as_float32(target_pos), + target_ori_mat=cb.as_float32(cb.T.quat2mat(target_quat)), ) return goal_dict @@ -289,7 +291,7 @@ def compute_control(self, goal_dict, control_dict): goal_dict (Dict[str, Any]): dictionary that should include any relevant keyword-mapped goals necessary for controller computation. Must include the following keys: target_pos: robot-frame (x,y,z) desired end effector position - target_quat: robot-frame (x,y,z,w) desired end effector quaternion orientation + target_ori_mat: robot-frame desired end effector quaternion orientation matrix control_dict (Dict[str, Any]): dictionary that should include any relevant keyword-mapped states necessary for controller computation. Must include the following keys: joint_position: Array of current joint positions @@ -303,37 +305,23 @@ def compute_control(self, goal_dict, control_dict): Returns: Array[float]: outputted (non-clipped!) velocity control signal to deploy """ - # Grab important info from control dict - pos_relative = control_dict[f"{self.task_name}_pos_relative"] - quat_relative = control_dict[f"{self.task_name}_quat_relative"] - target_pos = goal_dict["target_pos"] - target_quat = goal_dict["target_quat"] - # Calculate and return IK-backed out joint angles - current_joint_pos = control_dict["joint_position"][self.dof_idx] - - # If the delta is really small, we just keep the current joint position. This avoids joint - # drift caused by IK solver inaccuracy even when zero delta actions are provided. - if th.allclose(pos_relative, target_pos, atol=1e-4) and th.allclose(quat_relative, target_quat, atol=1e-4): - target_joint_pos = current_joint_pos - else: - # Compute the pose error. Note that this is computed NOT in the EEF frame but still - # in the base frame. - pos_err = target_pos - pos_relative - ori_err = orientation_error(T.quat2mat(target_quat), T.quat2mat(quat_relative)) - err = th.cat([pos_err, ori_err]) - - # Use the jacobian to compute a local approximation - j_eef = control_dict[f"{self.task_name}_jacobian_relative"][:, self.dof_idx] - j_eef_pinv = th.linalg.pinv(j_eef) - delta_j = j_eef_pinv @ err - target_joint_pos = current_joint_pos + delta_j - - # Clip values to be within the joint limits - target_joint_pos = target_joint_pos.clamp( - min=self._control_limits[ControlType.get_type("position")][0][self.dof_idx], - max=self._control_limits[ControlType.get_type("position")][1][self.dof_idx], - ) + q = control_dict["joint_position"][self.dof_idx] + j_eef = control_dict[f"{self.task_name}_jacobian_relative"][:, self.dof_idx] + ee_pos = control_dict[f"{self.task_name}_pos_relative"] + ee_quat = control_dict[f"{self.task_name}_quat_relative"] + + # Calculate desired joint positions + target_joint_pos = cb.compute_ik_qpos( + q=q, + j_eef=j_eef, + ee_pos=cb.as_float32(ee_pos), + ee_mat=cb.as_float32(cb.T.quat2mat(ee_quat)), + goal_pos=goal_dict["target_pos"], + goal_ori_mat=goal_dict["target_ori_mat"], + q_lower_limit=self._control_limits[ControlType.get_type("position")][0][self.dof_idx], + q_upper_limit=self._control_limits[ControlType.get_type("position")][1][self.dof_idx], + ) # Optionally pass through smoothing filter for better stability if self.control_filter is not None: @@ -344,16 +332,20 @@ def compute_control(self, goal_dict, control_dict): def compute_no_op_goal(self, control_dict): # No-op is maintaining current pose + target_pos = cb.copy(control_dict[f"{self.task_name}_pos_relative"]) + target_quat = cb.copy(control_dict[f"{self.task_name}_quat_relative"]) + + # Convert quat into eef ori mat return dict( - target_pos=control_dict[f"{self.task_name}_pos_relative"], - target_quat=control_dict[f"{self.task_name}_quat_relative"], + target_pos=cb.as_float32(target_pos), + target_ori_mat=cb.as_float32(cb.T.quat2mat(target_quat)), ) def _compute_no_op_action(self, control_dict): pos_relative = control_dict[f"{self.task_name}_pos_relative"] quat_relative = control_dict[f"{self.task_name}_quat_relative"] - command = th.zeros(6, dtype=th.float32, device=pos_relative.device) + command = cb.zeros(6) # Handle position if self.mode == "absolute_pose": @@ -364,7 +356,7 @@ def _compute_no_op_action(self, control_dict): # Handle orientation if self.mode in ("pose_absolute_ori", "absolute_pose"): - command[3:] = T.quat2axisangle(quat_relative) + command[3:] = cb.T.quat2axisangle(quat_relative) else: # For these modes, we don't need to add orientation to the command pass @@ -374,9 +366,82 @@ def _compute_no_op_action(self, control_dict): def _get_goal_shapes(self): return dict( target_pos=(3,), - target_quat=(4,), + target_ori_mat=(3, 3), ) @property def command_dim(self): return IK_MODE_COMMAND_DIMS[self.mode] + + +import torch as th + +import omnigibson.utils.transform_utils as TT + + +@th.jit.script +def _compute_ik_qpos_torch( + q: th.Tensor, + j_eef: th.Tensor, + ee_pos: th.Tensor, + ee_mat: th.Tensor, + goal_pos: th.Tensor, + goal_ori_mat: th.Tensor, + q_lower_limit: th.Tensor, + q_upper_limit: th.Tensor, +): + # Compute the pose error. Note that this is computed NOT in the EEF frame but still + # in the base frame. + pos_err = goal_pos - ee_pos + ori_err = TT.orientation_error(goal_ori_mat, ee_mat) + err = th.cat([pos_err, ori_err]) + + # Use the jacobian to compute a local approximation + j_eef_pinv = th.linalg.pinv(j_eef) + delta_j = j_eef_pinv @ err + target_joint_pos = q + delta_j + + # Clip values to be within the joint limits + return target_joint_pos.clip( + min=q_lower_limit, + max=q_upper_limit, + ) + + +import numpy as np +from numba import jit + +import omnigibson.utils.transform_utils_np as NT + + +# Use numba since faster +@jit(nopython=True) +def _compute_ik_qpos_numpy( + q, + j_eef, + ee_pos, + ee_mat, + goal_pos, + goal_ori_mat, + q_lower_limit, + q_upper_limit, +): + # Compute the pose error. Note that this is computed NOT in the EEF frame but still + # in the base frame. + pos_err = goal_pos - ee_pos + ori_err = NT.orientation_error(goal_ori_mat, ee_mat).astype(np.float32) + err = np.concatenate((pos_err, ori_err)) + + # Use the jacobian to compute a local approximation + j_eef_pinv = np.linalg.pinv(j_eef) + delta_j = j_eef_pinv @ err + target_joint_pos = q + delta_j + + # Clip values to be within the joint limits + return target_joint_pos.clip(q_lower_limit, q_upper_limit) + + +# Set these as part of the backend values +setattr(_ComputeBackend, "compute_ik_qpos", None) +setattr(_ComputeTorchBackend, "compute_ik_qpos", _compute_ik_qpos_torch) +setattr(_ComputeNumpyBackend, "compute_ik_qpos", _compute_ik_qpos_numpy) diff --git a/omnigibson/controllers/joint_controller.py b/omnigibson/controllers/joint_controller.py index 4ee046cd6..9dcb7ea33 100644 --- a/omnigibson/controllers/joint_controller.py +++ b/omnigibson/controllers/joint_controller.py @@ -1,8 +1,5 @@ import math -import torch as th - -import omnigibson.utils.transform_utils as T from omnigibson.controllers import ( ControlType, GripperController, @@ -11,6 +8,8 @@ ManipulationController, ) from omnigibson.macros import create_module_macros +from omnigibson.utils.backend_utils import _compute_backend as cb +from omnigibson.utils.backend_utils import _ComputeBackend, _ComputeNumpyBackend, _ComputeTorchBackend from omnigibson.utils.python_utils import assert_valid_key from omnigibson.utils.ui_utils import create_module_logger @@ -140,6 +139,14 @@ def __init__( command_output_limits=command_output_limits, ) + def _generate_default_command_output_limits(self): + # Use motor type instead of default control type, since, e.g, use_impedances is commanding joint positions + # but controls low-level efforts + return ( + self._control_limits[ControlType.get_type(self._motor_type)][0][self.dof_idx], + self._control_limits[ControlType.get_type(self._motor_type)][1][self.dof_idx], + ) + def _update_goal(self, command, control_dict): # If we're using delta commands, add this value if self._use_delta_commands: @@ -158,10 +165,10 @@ def _update_goal(self, command, control_dict): delta_rots = command[[rx_ind, ry_ind, rz_ind]] # Compute the final rotations in the quaternion space. - _, end_quat = T.pose_transform( - th.zeros(3), T.euler2quat(delta_rots), th.zeros(3), T.euler2quat(start_rots) + _, end_quat = cb.T.pose_transform( + cb.zeros(3), cb.T.euler2quat(delta_rots), cb.zeros(3), cb.T.euler2quat(start_rots) ) - end_rots = T.quat2euler(end_quat) + end_rots = cb.T.quat2euler(end_quat) # Update the command target[[rx_ind, ry_ind, rz_ind]] = end_rots @@ -212,9 +219,7 @@ def compute_control(self, goal_dict, control_dict): else: # effort u = target - dof_idxs_mat = th.meshgrid(self.dof_idx, self.dof_idx, indexing="xy") - mm = control_dict["mass_matrix"][dof_idxs_mat] - u = mm @ u + u = cb.compute_joint_torques(u, control_dict["mass_matrix"], self.dof_idx) # Add gravity compensation if self._use_gravity_compensation: @@ -238,21 +243,21 @@ def compute_no_op_goal(self, control_dict): target = control_dict[f"joint_{self._motor_type}"][self.dof_idx] else: # For velocity / effort, directly set to 0 - target = th.zeros(self.control_dim) + target = cb.zeros(self.control_dim) return dict(target=target) def _compute_no_op_action(self, control_dict): if self.motor_type == "position": if self._use_delta_commands: - return th.zeros(self.command_dim) + return cb.zeros(self.command_dim) else: return control_dict[f"joint_position"][self.dof_idx] elif self.motor_type == "velocity": if self._use_delta_commands: return -control_dict[f"joint_velocity"][self.dof_idx] else: - return th.zeros(self.command_dim) + return cb.zeros(self.command_dim) raise ValueError("Cannot compute noop action for effort motor type.") @@ -286,3 +291,59 @@ def control_type(self): @property def command_dim(self): return len(self.dof_idx) + + +import torch as th + + +@th.compile +def _compute_joint_torques_torch( + u: th.Tensor, + mm: th.Tensor, + dof_idx: th.Tensor, +): + dof_idxs_mat = th.meshgrid(dof_idx, dof_idx, indexing="xy") + return mm[dof_idxs_mat] @ u + + +import numpy as np +from numba import jit + + +# Use numba since faster +@jit(nopython=True) +def numba_ix(arr, rows, cols): + """ + Numba compatible implementation of arr[np.ix_(rows, cols)] for 2D arrays. + + Implementation from: + https://github.com/numba/numba/issues/5894#issuecomment-974701551 + + :param arr: 2D array to be indexed + :param rows: Row indices + :param cols: Column indices + :return: 2D array with the given rows and columns of the input array + """ + one_d_index = np.zeros(len(rows) * len(cols), dtype=np.int32) + for i, r in enumerate(rows): + start = i * len(cols) + one_d_index[start : start + len(cols)] = cols + arr.shape[1] * r + + arr_1d = arr.reshape((arr.shape[0] * arr.shape[1], 1)) + slice_1d = np.take(arr_1d, one_d_index) + return slice_1d.reshape((len(rows), len(cols))) + + +@jit(nopython=True) +def _compute_joint_torques_numpy( + u, + mm, + dof_idx, +): + return numba_ix(mm, dof_idx, dof_idx) @ u + + +# Set these as part of the backend values +setattr(_ComputeBackend, "compute_joint_torques", None) +setattr(_ComputeTorchBackend, "compute_joint_torques", _compute_joint_torques_torch) +setattr(_ComputeNumpyBackend, "compute_joint_torques", _compute_joint_torques_numpy) diff --git a/omnigibson/controllers/multi_finger_gripper_controller.py b/omnigibson/controllers/multi_finger_gripper_controller.py index 70dbc9681..ab888f2fe 100644 --- a/omnigibson/controllers/multi_finger_gripper_controller.py +++ b/omnigibson/controllers/multi_finger_gripper_controller.py @@ -2,6 +2,7 @@ from omnigibson.controllers import ControlType, GripperController, IsGraspingState from omnigibson.macros import create_module_macros +from omnigibson.utils.backend_utils import _compute_backend as cb from omnigibson.utils.processing_utils import MovingAverageFilter from omnigibson.utils.python_utils import assert_valid_key @@ -95,8 +96,8 @@ def __init__( self._inverted = inverted self._mode = mode self._limit_tolerance = limit_tolerance - self._open_qpos = open_qpos if open_qpos is None else th.tensor(open_qpos) - self._closed_qpos = closed_qpos if closed_qpos is None else th.tensor(closed_qpos) + self._open_qpos = open_qpos if open_qpos is None else cb.array(open_qpos) + self._closed_qpos = closed_qpos if closed_qpos is None else cb.array(closed_qpos) # Create other args to be filled in at runtime self._is_grasping = IsGraspingState.FALSE @@ -117,6 +118,11 @@ def __init__( command_output_limits=command_output_limits, ) + def _generate_default_command_output_limits(self): + command_output_limits = super()._generate_default_command_output_limits() + + return cb.mean(command_output_limits[0]), cb.mean(command_output_limits[1]) + def reset(self): # Call super first super().reset() @@ -136,9 +142,9 @@ def _preprocess_command(self, command): # We extend this method to make sure command is always n-dimensional if self._mode != "independent": command = ( - th.tensor([command] * self.command_dim) + cb.array([command] * self.command_dim) if type(command) in {int, float} - else th.tensor([command[0]] * self.command_dim) + else cb.array([command[0]] * self.command_dim) ) # Flip the command if the direction is inverted. @@ -189,7 +195,7 @@ def compute_control(self, goal_dict, control_dict): ) else: # Use continuous signal. Make sure to go from command to control dim. - u = th.full((self.control_dim,), target[0]) if len(target) == 1 else target + u = cb.full((self.control_dim,), target[0]) if len(target) == 1 else target # If we're near the joint limits and we're using velocity / torque control, we zero out the action if self._motor_type in {"velocity", "torque"}: @@ -199,7 +205,7 @@ def compute_control(self, goal_dict, control_dict): violate_lower_limit = ( joint_pos < self._control_limits[ControlType.POSITION][0][self.dof_idx] + self._limit_tolerance ) - violation = th.logical_or(violate_upper_limit * (u > 0), violate_lower_limit * (u < 0)) + violation = cb.logical_or(violate_upper_limit * (u > 0), violate_lower_limit * (u < 0)) u *= ~violation # Update whether we're grasping or not @@ -223,7 +229,6 @@ def _update_grasping_state(self, control_dict): finger_vel = self._vel_filter.estimate(control_dict["joint_velocity"][self.dof_idx]) # Calculate grasping state based on mode of this controller - # Independent mode of MultiFingerGripperController does not have any good heuristics to determine is_grasping if self._mode == "independent": is_grasping = IsGraspingState.UNKNOWN @@ -233,7 +238,7 @@ def _update_grasping_state(self, control_dict): is_grasping = IsGraspingState.FALSE # Different values in the command for non-independent mode - cannot use heuristics - elif not th.all(self._control == self._control[0]): + elif not cb.all(self._control == self._control[0]): is_grasping = IsGraspingState.UNKNOWN # Joint position tolerance for is_grasping heuristics checking is smaller than or equal to the gripper @@ -245,11 +250,11 @@ def _update_grasping_state(self, control_dict): finger_pos = control_dict["joint_position"][self.dof_idx] # For joint position control, if the desired positions are the same as the current positions, is_grasping unknown - if self._motor_type == "position" and th.mean(th.abs(finger_pos - self._control)) < m.POS_TOLERANCE: + if self._motor_type == "position" and cb.abs(finger_pos - self._control).mean() < m.POS_TOLERANCE: is_grasping = IsGraspingState.UNKNOWN # For joint velocity / torque control, if the desired velocities / torques are zeros, is_grasping unknown - elif self._motor_type in {"velocity", "torque"} and th.mean(th.abs(self._control)) < m.VEL_TOLERANCE: + elif self._motor_type in {"velocity", "torque"} and cb.abs(self._control).mean() < m.VEL_TOLERANCE: is_grasping = IsGraspingState.UNKNOWN # Otherwise, the last control signal intends to "move" the gripper @@ -258,7 +263,7 @@ def _update_grasping_state(self, control_dict): max_pos = self._control_limits[ControlType.POSITION][1][self.dof_idx] # Make sure we don't have any invalid values (i.e.: fingers should be within the limits) - finger_pos = th.clip(finger_pos, min_pos, max_pos) + finger_pos = finger_pos.clip(min_pos, max_pos) # Check distance from both ends of the joint limits dist_from_lower_limit = finger_pos - min_pos @@ -266,11 +271,11 @@ def _update_grasping_state(self, control_dict): # If either of the joint positions are not near the joint limits with some tolerance (m.POS_TOLERANCE) valid_grasp_pos = ( - th.mean(dist_from_lower_limit) > m.POS_TOLERANCE or th.mean(dist_from_upper_limit) > m.POS_TOLERANCE + dist_from_lower_limit.mean() > m.POS_TOLERANCE or dist_from_upper_limit.mean() > m.POS_TOLERANCE ) # And the joint velocities are close to zero with some tolerance (m.VEL_TOLERANCE) - valid_grasp_vel = th.all(th.abs(finger_vel) < m.VEL_TOLERANCE) + valid_grasp_vel = cb.all(cb.abs(finger_vel) < m.VEL_TOLERANCE) # Then the gripper is grasping something, which stops the gripper from reaching its desired state is_grasping = IsGraspingState.TRUE if valid_grasp_pos and valid_grasp_vel else IsGraspingState.FALSE @@ -280,7 +285,7 @@ def _update_grasping_state(self, control_dict): def compute_no_op_goal(self, control_dict): # Just use a zero vector - return dict(target=th.zeros(self.command_dim)) + return dict(target=cb.zeros(self.command_dim)) def _compute_no_op_action(self, control_dict): # Take care of the special case of binary control @@ -288,18 +293,18 @@ def _compute_no_op_action(self, control_dict): command_val = -1 if self.is_grasping() == IsGraspingState.TRUE else 1 if self._inverted: command_val = -1 * command_val - return th.tensor([command_val], dtype=th.float32) + return cb.array([command_val]) if self._motor_type == "position": command = control_dict[f"joint_position"][self.dof_idx] elif self._motor_type == "velocity": - command = th.zeros(self.command_dim) + command = cb.zeros(self.command_dim) else: raise ValueError("Cannot compute noop action for effort motor type.") # Convert to binary / smooth mode if necessary if self._mode == "smooth": - command = th.mean(command, dim=-1, keepdim=True) + command = cb.mean(command, dim=-1, keepdim=True) return command diff --git a/omnigibson/controllers/null_joint_controller.py b/omnigibson/controllers/null_joint_controller.py index 4c92cbf9a..e4afe1f29 100644 --- a/omnigibson/controllers/null_joint_controller.py +++ b/omnigibson/controllers/null_joint_controller.py @@ -1,6 +1,5 @@ -import torch as th - from omnigibson.controllers import JointController +from omnigibson.utils.backend_utils import _compute_backend as cb class NullJointController(JointController): @@ -58,7 +57,7 @@ def __init__( applied """ # Store values - self._default_command = th.zeros(len(dof_idx)) if default_command is None else default_command + self._default_command = cb.zeros(len(dof_idx)) if default_command is None else default_command # Run super init super().__init__( @@ -81,7 +80,7 @@ def compute_no_op_goal(self, control_dict): def _preprocess_command(self, command): # Override super and force the processed command to be internal stored default value - return th.tensor(self._default_command) + return cb.array(self._default_command) def update_default_goal(self, target): """ @@ -95,4 +94,13 @@ def update_default_goal(self, target): len(target) == self.control_dim ), f"Default control must be length: {self.control_dim}, got length: {len(target)}" - self._default_command = th.tensor(target) + self._default_command = cb.array(target) + + def _compute_no_op_action(self, control_dict): + # Empty tensor since no action should be received + return cb.array([]) + + @property + def command_dim(self): + # Auto-generates control, so no command should be received + return 0 diff --git a/omnigibson/controllers/osc_controller.py b/omnigibson/controllers/osc_controller.py index 01515fc3e..032e501f0 100644 --- a/omnigibson/controllers/osc_controller.py +++ b/omnigibson/controllers/osc_controller.py @@ -1,12 +1,10 @@ import math -import torch as th - -import omnigibson.utils.transform_utils as T from omnigibson.controllers import ControlType, ManipulationController -from omnigibson.utils.control_utils import orientation_error +from omnigibson.utils.backend_utils import _compute_backend as cb +from omnigibson.utils.backend_utils import _ComputeBackend, _ComputeNumpyBackend, _ComputeTorchBackend from omnigibson.utils.processing_utils import MovingAverageFilter -from omnigibson.utils.python_utils import assert_valid_key, nums2array +from omnigibson.utils.python_utils import assert_valid_key from omnigibson.utils.ui_utils import create_module_logger # Create module logger @@ -144,13 +142,13 @@ def limiter(target_pos: Array[float], target_quat: Array[float], control_dict: D ) # Store gains - self.kp = nums2array(nums=kp, dim=6, dtype=th.float32) if kp is not None else None + self.kp = self.nums2array(nums=kp, dim=6) if kp is not None else None self.damping_ratio = damping_ratio - self.kp_null = nums2array(nums=kp_null, dim=control_dim, dtype=th.float32) if kp_null is not None else None - self.kd_null = 2 * th.sqrt(self.kp_null) if kp_null is not None else None # critically damped - self.kp_limits = th.tensor(kp_limits, dtype=th.float32) - self.damping_ratio_limits = th.tensor(damping_ratio_limits, dtype=th.float32) - self.kp_null_limits = th.tensor(kp_null_limits, dtype=th.float32) + self.kp_null = self.nums2array(nums=kp_null, dim=control_dim) if kp_null is not None else None + self.kd_null = 2 * cb.sqrt(self.kp_null) if kp_null is not None else None # critically damped + self.kp_limits = cb.array(kp_limits) + self.damping_ratio_limits = cb.array(damping_ratio_limits) + self.kp_null_limits = cb.array(kp_null_limits) # Store settings for whether we're learning gains or not self.variable_kp = self.kp is None @@ -199,12 +197,12 @@ def limiter(target_pos: Array[float], target_quat: Array[float], control_dict: D is_input_limits_numeric = not (command_input_limits is None or isinstance(command_input_limits, str)) is_output_limits_numeric = not (command_output_limits is None or isinstance(command_output_limits, str)) command_input_limits = ( - [nums2array(lim, dim=6, dtype=th.float32) for lim in command_input_limits] + [self.nums2array(lim, dim=6) for lim in command_input_limits] if is_input_limits_numeric else command_input_limits ) command_output_limits = ( - [nums2array(lim, dim=6, dtype=th.float32) for lim in command_output_limits] + [self.nums2array(lim, dim=6) for lim in command_output_limits] if is_output_limits_numeric else command_output_limits ) @@ -220,12 +218,12 @@ def limiter(target_pos: Array[float], target_quat: Array[float], control_dict: D # Add this to input / output limits if is_input_limits_numeric: command_input_limits = [ - th.cat([lim, nums2array(nums=val, dim=dim, dtype=th.float32)]) + cb.cat([lim, self.nums2array(nums=val, dim=dim)]) for lim, val in zip(command_input_limits, (-1, 1)) ] if is_output_limits_numeric: command_output_limits = [ - th.cat([lim, nums2array(nums=val, dim=dim, dtype=th.float32)]) + cb.cat([lim, self.nums2array(nums=val, dim=dim)]) for lim, val in zip(command_output_limits, gain_limits) ] # Update command dim @@ -293,7 +291,7 @@ def _update_variable_gains(self, gains): idx += 6 if self.variable_kp_null: self.kp_null = gains[:, idx : idx + self.control_dim] - self.kd_null = 2 * th.sqrt(self.kp_null) # critically damped + self.kd_null = 2 * cb.sqrt(self.kp_null) # critically damped idx += self.control_dim def _update_goal(self, command, control_dict): @@ -315,8 +313,8 @@ def _update_goal(self, command, control_dict): frame to control, computed in its local frame (e.g.: robot base frame) """ # Grab important info from control dict - pos_relative = control_dict[f"{self.task_name}_pos_relative"].clone() - quat_relative = control_dict[f"{self.task_name}_quat_relative"].clone() + pos_relative = cb.copy(control_dict[f"{self.task_name}_pos_relative"]) + quat_relative = cb.copy(control_dict[f"{self.task_name}_quat_relative"]) # Convert position command to absolute values if needed if self.mode == "absolute_pose": @@ -336,11 +334,11 @@ def _update_goal(self, command, control_dict): target_quat = quat_relative elif self.mode == "pose_absolute_ori" or self.mode == "absolute_pose": # Received "delta" ori is in fact the desired absolute orientation - target_quat = T.axisangle2quat(command[3:6]) + target_quat = cb.T.axisangle2quat(command[3:6]) else: # pose_delta_ori control # Grab dori and compute target ori - dori = T.quat2mat(T.axisangle2quat(command[3:6])) - target_quat = T.mat2quat(dori @ T.quat2mat(quat_relative)) + dori = cb.T.quat2mat(cb.T.axisangle2quat(command[3:6])) + target_quat = cb.T.mat2quat(dori @ cb.T.quat2mat(quat_relative)) # Possibly limit to workspace if specified if self.workspace_pose_limiter is not None: @@ -352,8 +350,8 @@ def _update_goal(self, command, control_dict): # Set goals and return return dict( - target_pos=target_pos, - target_ori_mat=T.quat2mat(target_quat), + target_pos=cb.as_float32(target_pos), + target_ori_mat=cb.as_float32(cb.T.quat2mat(target_quat)), ) def compute_control(self, goal_dict, control_dict): @@ -364,7 +362,7 @@ def compute_control(self, goal_dict, control_dict): goal_dict (Dict[str, Any]): dictionary that should include any relevant keyword-mapped goals necessary for controller computation. Must include the following keys: target_pos: robot-frame (x,y,z) desired end effector position - target_quat: robot-frame (x,y,z,w) desired end effector quaternion orientation + target_ori_mat: robot-frame desired end effector quaternion orientation matrix control_dict (Dict[str, Any]): dictionary that should include any relevant keyword-mapped states necessary for controller computation. Must include the following keys: joint_position: Array of current joint positions @@ -389,31 +387,36 @@ def compute_control(self, goal_dict, control_dict): # For now, always use internal values kp = self.kp damping_ratio = self.damping_ratio - kd = 2 * th.sqrt(kp) * damping_ratio + kd = 2 * cb.sqrt(kp) * damping_ratio # Extract relevant values from the control dict - dof_idxs_mat = tuple(th.meshgrid(self.dof_idx, self.dof_idx)) + dof_idxs_mat = tuple(cb.meshgrid(self.dof_idx, self.dof_idx)) q = control_dict["joint_position"][self.dof_idx] qd = control_dict["joint_velocity"][self.dof_idx] mm = control_dict["mass_matrix"][dof_idxs_mat] j_eef = control_dict[f"{self.task_name}_jacobian_relative"][:, self.dof_idx] ee_pos = control_dict[f"{self.task_name}_pos_relative"] ee_quat = control_dict[f"{self.task_name}_quat_relative"] - ee_vel = th.cat( + ee_vel = cb.cat( [control_dict[f"{self.task_name}_lin_vel_relative"], control_dict[f"{self.task_name}_ang_vel_relative"]] ) base_lin_vel = control_dict["root_rel_lin_vel"] base_ang_vel = control_dict["root_rel_ang_vel"] # Calculate torques - u = _compute_osc_torques( + u = cb.compute_osc_torques( q=q, qd=qd, mm=mm, j_eef=j_eef, - ee_pos=ee_pos, - ee_mat=T.quat2mat(ee_quat), - ee_vel=ee_vel, + ee_pos=cb.as_float32(ee_pos), + ee_mat=cb.as_float32(cb.T.quat2mat(ee_quat)), + ee_lin_vel=cb.as_float32(ee_vel[:3]), + ee_ang_vel_err=cb.as_float32( + cb.T.quat2axisangle( + cb.T.quat_multiply(cb.T.axisangle2quat(-ee_vel[3:]), cb.T.axisangle2quat(base_ang_vel)) + ) + ), goal_pos=goal_dict["target_pos"], goal_ori_mat=goal_dict["target_ori_mat"], kp=kp, @@ -423,8 +426,8 @@ def compute_control(self, goal_dict, control_dict): rest_qpos=self.reset_joint_pos, control_dim=self.control_dim, decouple_pos_ori=self.decouple_pos_ori, - base_lin_vel=base_lin_vel, - base_ang_vel=base_ang_vel, + base_lin_vel=cb.as_float32(base_lin_vel), + base_ang_vel=cb.as_float32(base_ang_vel), ).flatten() # Add gravity compensation @@ -440,20 +443,20 @@ def compute_control(self, goal_dict, control_dict): def compute_no_op_goal(self, control_dict): # No-op is maintaining current pose - target_pos = control_dict[f"{self.task_name}_pos_relative"].clone() - target_quat = control_dict[f"{self.task_name}_quat_relative"].clone() + target_pos = cb.copy(control_dict[f"{self.task_name}_pos_relative"]) + target_quat = cb.copy(control_dict[f"{self.task_name}_quat_relative"]) # Convert quat into eef ori mat return dict( - target_pos=target_pos, - target_ori_mat=T.quat2mat(target_quat), + target_pos=cb.as_float32(target_pos), + target_ori_mat=cb.as_float32(cb.T.quat2mat(target_quat)), ) def _compute_no_op_action(self, control_dict): pos_relative = control_dict[f"{self.task_name}_pos_relative"] quat_relative = control_dict[f"{self.task_name}_quat_relative"] - command = th.zeros(6, dtype=th.float32, device=pos_relative.device) + command = cb.zeros(6) # Handle position if self.mode == "absolute_pose": @@ -464,7 +467,7 @@ def _compute_no_op_action(self, control_dict): # Handle orientation if self.mode in ("pose_absolute_ori", "absolute_pose"): - command[3:] = T.quat2axisangle(quat_relative) + command[3:] = cb.T.quat2axisangle(quat_relative) else: # For these modes, we don't need to add orientation to the command pass @@ -486,15 +489,21 @@ def command_dim(self): return self._command_dim +import torch as th + +import omnigibson.utils.transform_utils as TT + + @th.jit.script -def _compute_osc_torques( +def _compute_osc_torques_torch( q: th.Tensor, qd: th.Tensor, mm: th.Tensor, j_eef: th.Tensor, ee_pos: th.Tensor, ee_mat: th.Tensor, - ee_vel: th.Tensor, + ee_lin_vel: th.Tensor, + ee_ang_vel_err: th.Tensor, goal_pos: th.Tensor, goal_ori_mat: th.Tensor, kp: th.Tensor, @@ -512,7 +521,7 @@ def _compute_osc_torques( # Calculate error pos_err = goal_pos - ee_pos - ori_err = orientation_error(goal_ori_mat, ee_mat) + ori_err = TT.orientation_error(goal_ori_mat, ee_mat) err = th.cat((pos_err, ori_err)) # Vel target is the base velocity as experienced by the end effector @@ -521,9 +530,8 @@ def _compute_osc_torques( # due to the base linear velocity # For angular velocity, we need to make sure we compute the difference between the base and eef velocity # properly, not simply "subtraction" as in the linear case - lin_vel_err = base_lin_vel + th.linalg.cross(base_ang_vel, ee_pos) - ee_vel[:3] - ang_vel_err = T.quat2axisangle(T.quat_multiply(T.axisangle2quat(-ee_vel[3:]), T.axisangle2quat(base_ang_vel))) - vel_err = th.cat((lin_vel_err, ang_vel_err)) + lin_vel_err = base_lin_vel + th.linalg.cross(base_ang_vel, ee_pos) - ee_lin_vel + vel_err = th.cat((lin_vel_err, ee_ang_vel_err)) # Determine desired wrench err = th.unsqueeze(kp * err + kd * vel_err, dim=-1) @@ -559,3 +567,91 @@ def _compute_osc_torques( u += (th.eye(control_dim, dtype=th.float32) - j_eef.T @ j_eef_inv) @ u_null return u + + +import numpy as np +from numba import jit + +import omnigibson.utils.transform_utils_np as NT + + +# Use numba since faster +@jit(nopython=True) +def _compute_osc_torques_numpy( + q, + qd, + mm, + j_eef, + ee_pos, + ee_mat, + ee_lin_vel, + ee_ang_vel_err, + goal_pos, + goal_ori_mat, + kp, + kd, + kp_null, + kd_null, + rest_qpos, + control_dim, + decouple_pos_ori, + base_lin_vel, + base_ang_vel, +): + # Compute the inverse + mm_inv = np.linalg.inv(mm) + + # Calculate error + pos_err = goal_pos - ee_pos + ori_err = NT.orientation_error(goal_ori_mat, ee_mat).astype(np.float32) + err = np.concatenate((pos_err, ori_err)) + + # Vel target is the base velocity as experienced by the end effector + # For angular velocity, this is just the base angular velocity + # For linear velocity, this is the base linear velocity PLUS the net linear velocity experienced + # due to the base linear velocity + # For angular velocity, we need to make sure we compute the difference between the base and eef velocity + # properly, not simply "subtraction" as in the linear case + lin_vel_err = base_lin_vel + np.cross(base_ang_vel, ee_pos) - ee_lin_vel + vel_err = np.concatenate((lin_vel_err, ee_ang_vel_err)) + + # Determine desired wrench + err = np.expand_dims(kp * err + kd * vel_err, axis=-1) + m_eef_inv = j_eef @ mm_inv @ j_eef.T + m_eef = np.linalg.inv(m_eef_inv) + + if decouple_pos_ori: + # # More efficient, but numba doesn't support 3D tensor operations yet + # j_eef_batch = j_eef.reshape(2, 3, -1) + # m_eef_pose_inv = np.matmul(np.matmul(j_eef_batch, np.expand_dims(mm_inv, axis=0)), np.transpose(j_eef_batch, (0, 2, 1))) + # m_eef_pose = np.linalg.inv(m_eef_pose_inv) # Shape (2, 3, 3) + # wrench = np.matmul(m_eef_pose, err.reshape(2, 3, 1)).flatten() + m_eef_pos_inv = j_eef[:3, :] @ mm_inv @ j_eef[:3, :].T + m_eef_ori_inv = j_eef[3:, :] @ mm_inv @ j_eef[3:, :].T + m_eef_pos = np.linalg.inv(m_eef_pos_inv) + m_eef_ori = np.linalg.inv(m_eef_ori_inv) + wrench_pos = m_eef_pos @ err[:3, :] + wrench_ori = m_eef_ori @ err[3:, :] + wrench = np.concatenate((wrench_pos, wrench_ori)) + else: + wrench = m_eef @ err + + # Compute OSC torques + u = j_eef.T @ wrench + + # Nullspace control torques `u_null` prevents large changes in joint configuration + # They are added into the nullspace of OSC so that the end effector orientation remains constant + # roboticsproceedings.org/rss07/p31.pdf + if rest_qpos is not None: + j_eef_inv = m_eef @ j_eef @ mm_inv + u_null = kd_null * -qd + kp_null * ((rest_qpos - q + np.pi) % (2 * np.pi) - np.pi) + u_null = mm @ np.expand_dims(u_null, axis=-1).astype(np.float32) + u += (np.eye(control_dim, dtype=np.float32) - j_eef.T @ j_eef_inv) @ u_null + + return u + + +# Set these as part of the backend values +setattr(_ComputeBackend, "compute_osc_torques", None) +setattr(_ComputeTorchBackend, "compute_osc_torques", _compute_osc_torques_torch) +setattr(_ComputeNumpyBackend, "compute_osc_torques", _compute_osc_torques_numpy) diff --git a/omnigibson/envs/data_wrapper.py b/omnigibson/envs/data_wrapper.py index 8e3e19557..3c15ed57e 100644 --- a/omnigibson/envs/data_wrapper.py +++ b/omnigibson/envs/data_wrapper.py @@ -60,6 +60,8 @@ def __init__(self, env, output_path, only_successes=True): self.add_metadata(group=data_grp, name="config", data=config) self.add_metadata(group=data_grp, name="scene_file", data=scene_file) + self.is_recording = False + # Run super super().__init__(env=env) @@ -86,12 +88,13 @@ def step(self, action): next_obs, reward, terminated, truncated, info = self.env.step(action) self.step_count += 1 - # Aggregate step data - step_data = self._parse_step_data(action, next_obs, reward, terminated, truncated, info) + if self.is_recording: + # Aggregate step data + step_data = self._parse_step_data(action, next_obs, reward, terminated, truncated, info) - # Update obs and traj history - self.current_traj_history.append(step_data) - self.current_obs = next_obs + # Update obs and traj history + self.current_traj_history.append(step_data) + self.current_obs = next_obs return next_obs, reward, terminated, truncated, info @@ -290,6 +293,18 @@ def __init__(self, env, output_path, viewport_camera_path="/World/viewer_camera" # Configure the simulator to optimize for data collection self._optimize_sim_for_data_collection(viewport_camera_path=viewport_camera_path) + def start_recording(self): + """ + Start recording data + """ + self.is_recording = True + + def stop_recording(self): + """ + Stop recording data + """ + self.is_recording = False + def _optimize_sim_for_data_collection(self, viewport_camera_path): """ Configures the simulator to optimize for data collection @@ -310,12 +325,13 @@ def _optimize_sim_for_data_collection(self, viewport_camera_path): # toggling these settings to be True -> False -> True # Only setting it to True once will actually freeze the GUI for some reason! if not gm.HEADLESS: - lazy.carb.settings.get_settings().set_bool("/app/asyncRendering", True) - lazy.carb.settings.get_settings().set_bool("/app/asyncRenderingLowLatency", True) - lazy.carb.settings.get_settings().set_bool("/app/asyncRendering", False) - lazy.carb.settings.get_settings().set_bool("/app/asyncRenderingLowLatency", False) - lazy.carb.settings.get_settings().set_bool("/app/asyncRendering", True) - lazy.carb.settings.get_settings().set_bool("/app/asyncRenderingLowLatency", True) + # TODO: VR doesn't work with async rendering + # lazy.carb.settings.get_settings().set_bool("/app/asyncRendering", True) + # lazy.carb.settings.get_settings().set_bool("/app/asyncRenderingLowLatency", True) + # lazy.carb.settings.get_settings().set_bool("/app/asyncRendering", False) + # lazy.carb.settings.get_settings().set_bool("/app/asyncRenderingLowLatency", False) + # lazy.carb.settings.get_settings().set_bool("/app/asyncRendering", True) + # lazy.carb.settings.get_settings().set_bool("/app/asyncRenderingLowLatency", True) # Disable mouse grabbing since we're only using the UI passively lazy.carb.settings.get_settings().set_bool("/physics/mouseInteractionEnabled", False) diff --git a/omnigibson/envs/env_base.py b/omnigibson/envs/env_base.py index 6b1796160..2e578d60d 100644 --- a/omnigibson/envs/env_base.py +++ b/omnigibson/envs/env_base.py @@ -717,7 +717,7 @@ def reset(self, get_obs=True, **kwargs): log.error(f"Expected: {exp_obs[k]}") log.error(f"Received: {real_obs[k]}") - raise ValueError("Observation space does not match returned observations!") + # raise ValueError("Observation space does not match returned observations!") return obs, {} diff --git a/omnigibson/examples/objects/import_custom_object.py b/omnigibson/examples/objects/import_custom_object.py index 688b95d29..959d80281 100644 --- a/omnigibson/examples/objects/import_custom_object.py +++ b/omnigibson/examples/objects/import_custom_object.py @@ -61,8 +61,8 @@ def import_custom_object( overwrite: bool, ): """ - Imports an externally-defined object asset into an OmniGibson-compatible USD format and saves the imported asset - files to the external dataset directory (gm.CUSTOM_DATASET_PATH) + Imports a custom-defined object asset into an OmniGibson-compatible USD format and saves the imported asset + files to the custom dataset directory (gm.CUSTOM_DATASET_PATH) """ assert len(model) == 6 and model.isalpha(), "Model name must be 6 characters long and contain only letters." collision_method = None if collision_method == "none" else collision_method diff --git a/omnigibson/examples/teleoperation/vr_robot_control_demo.py b/omnigibson/examples/teleoperation/vr_robot_control_demo.py new file mode 100644 index 000000000..509720abe --- /dev/null +++ b/omnigibson/examples/teleoperation/vr_robot_control_demo.py @@ -0,0 +1,104 @@ +""" +Example script for interacting with OmniGibson scenes with VR and BehaviorRobot. +""" + +import torch as th + +import omnigibson as og +from omnigibson.macros import gm +from omnigibson.utils.teleop_utils import OVXRSystem + +gm.ENABLE_OBJECT_STATES = False +gm.ENABLE_TRANSITION_RULES = False +gm.ENABLE_FLATCACHE = True +gm.GUI_VIEWPORT_ONLY = True + +# import torch._dynamo +# torch._dynamo.config.suppress_errors = True + + +def main(): + """ + Spawn a BehaviorRobot in Rs_int and users can navigate around and interact with the scene using VR. + """ + # Create the config for generating the environment we want + scene_cfg = {"type": "InteractiveTraversableScene", "scene_model": "Rs_int"} + robot0_cfg = { + "type": "R1", + "obs_modalities": ["rgb"], + "controller_config": { + "arm_left": { + "name": "InverseKinematicsController", + "mode": "absolute_pose", + "command_input_limits": None, + "command_output_limits": None, + }, + "arm_right": { + "name": "InverseKinematicsController", + "mode": "absolute_pose", + "command_input_limits": None, + "command_output_limits": None, + }, + "gripper_left": {"name": "MultiFingerGripperController", "command_input_limits": "default"}, + "gripper_right": {"name": "MultiFingerGripperController", "command_input_limits": "default"}, + }, + "action_normalize": False, + "reset_joint_pos": [ + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 1.0, + -1.8000, + -0.8000, + 0.0000, + -0.0068, + 0.0059, + 2.6054, + 2.5988, + -1.4515, + -1.4478, + -0.0065, + 0.0052, + 1.5670, + -1.5635, + -1.1428, + 1.1610, + 0.0087, + 0.0087, + 0.0087, + 0.0087, + ], + } + cfg = dict(scene=scene_cfg, robots=[robot0_cfg]) + + # Create the environment + env = og.Environment(configs=cfg) + env.reset() + # start vrsys + vrsys = OVXRSystem( + robot=env.robots[0], + show_control_marker=True, + system="SteamVR", + eef_tracking_mode="controller", + align_anchor_to="camera", + # roll, pitch, yaw + view_angle_limits=[180, 30, 30], + ) + vrsys.start() + + for _ in range(3000): + # update the VR system + vrsys.update() + # get the action from the VR system and step the environment + env.step(vrsys.get_robot_teleop_action()) + + print("Cleaning up...") + vrsys.stop() + og.clear() + + +if __name__ == "__main__": + main() diff --git a/omnigibson/examples/teleoperation/vr_scene_tour_demo.py b/omnigibson/examples/teleoperation/vr_scene_tour_demo.py new file mode 100644 index 000000000..f73169058 --- /dev/null +++ b/omnigibson/examples/teleoperation/vr_scene_tour_demo.py @@ -0,0 +1,71 @@ +""" +Example script for interacting with OmniGibson scenes with VR. +""" + +import torch as th +import torch._dynamo + +import omnigibson as og +from omnigibson.macros import gm +from omnigibson.utils.asset_utils import get_available_og_scenes +from omnigibson.utils.teleop_utils import OVXRSystem +from omnigibson.utils.ui_utils import choose_from_options + +torch._dynamo.config.suppress_errors = True + + +def main(): + """ + Users can navigate around and interact with a selected scene using VR. + """ + + # Choose the scene model to load + scenes = get_available_og_scenes() + scene_model = choose_from_options(options=scenes, name="scene model") + + # Create the config for generating the environment we want + scene_cfg = {"type": "Scene"} + # scene_cfg = {"type": "InteractiveTraversableScene", "scene_model": scene_model} + robot0_cfg = { + "type": "Fetch", + "obs_modalities": [], + } + cfg = dict(scene=scene_cfg, robots=[robot0_cfg]) + + # Create the environment + env = og.Environment(configs=cfg) + env.reset() + # start vrsys + vrsys = OVXRSystem( + robot=env.robots[0], + show_control_marker=True, + system="SteamVR", + eef_tracking_mode="disabled", + align_anchor_to="touchpad", + ) + vrsys.start() + # set headset position to be 1m above ground and facing +x + # vrsys.set_initial_transform(pos=th.tensor([0.0, 0.2, 1.0]), orn=th.tensor([0.0, 0.0, 0.0, 1.0])) + vrsys.vr_profile.set_physical_world_to_world_anchor_transform_to_match_xr_device( + vrsys.og2xr(th.tensor([0.1, 0.2, 1.0]), th.tensor([0.0, 0.0, 0.0, 1.0])).numpy(), vrsys.hmd + ) + + # generate a list of pitch angles in radians + pitch_angles = th.linspace(-th.pi / 2, th.pi / 2, 10) + yaw_angles = th.linspace(-th.pi / 2, th.pi / 2, 10) + + # main simulation loop + while True: + # step the VR system to get the latest data from VR runtime + # for angle in yaw_angles: + vrsys.update() + # vrsys.vr_profile.add_rotate_physical_world_around_device(yaw = angle.item(), device=vrsys.hmd) + og.sim.render() + + # Shut down the environment cleanly at the end + vrsys.stop() + og.clear() + + +if __name__ == "__main__": + main() diff --git a/omnigibson/examples/teleoperation/vr_simple_demo.py b/omnigibson/examples/teleoperation/vr_simple_demo.py deleted file mode 100644 index fa4cad1d8..000000000 --- a/omnigibson/examples/teleoperation/vr_simple_demo.py +++ /dev/null @@ -1,49 +0,0 @@ -""" -Example script for interacting with OmniGibson scenes with VR and BehaviorRobot. -""" - -import omnigibson as og -from omnigibson.utils.teleop_utils import OVXRSystem - - -def main(): - """ - Spawn a BehaviorRobot in Rs_int and users can navigate around and interact with the scene using VR. - """ - # Create the config for generating the environment we want - scene_cfg = {"type": "Scene"} # "InteractiveTraversableScene", "scene_model": "Rs_int"} - robot0_cfg = { - "type": "Tiago", - "controller_config": { - "gripper_left": {"command_input_limits": "default"}, - "gripper_right": {"command_input_limits": "default"}, - }, - } - cfg = dict(scene=scene_cfg, robots=[robot0_cfg]) - - # Create the environment - env = og.Environment(configs=cfg) - env.reset() - # start vrsys - vrsys = OVXRSystem( - robot=env.robots[0], show_control_marker=False, system="SteamVR", align_anchor_to_robot_base=True - ) - vrsys.start() - # set headset position to be 1m above ground and facing +x - vrsys.set_initial_transform(pos=[0, 0, 1], orn=[0, 0, 0, 1]) - - # main simulation loop - for _ in range(10000): - # step the VR system to get the latest data from VR runtime - vrsys.update() - # generate robot action and step the environment - action = vrsys.teleop_data_to_action() - env.step(action) - - # Shut down the environment cleanly at the end - vrsys.stop() - og.clear() - - -if __name__ == "__main__": - main() diff --git a/omnigibson/install.py b/omnigibson/install.py index eec12a267..8156fe80b 100644 --- a/omnigibson/install.py +++ b/omnigibson/install.py @@ -14,32 +14,32 @@ # List of NVIDIA PyPI packages needed for OmniGibson ISAAC_SIM_PACKAGES = [ - "omniverse_kit-106.0.1.126909", - "isaacsim_kernel-4.1.0.0", - "isaacsim_app-4.1.0.0", - "isaacsim_core-4.1.0.0", - "isaacsim_gui-4.1.0.0", - "isaacsim_utils-4.1.0.0", - "isaacsim_storage-4.1.0.0", - "isaacsim_asset-4.1.0.0", - "isaacsim_sensor-4.1.0.0", - "isaacsim_robot_motion-4.1.0.0", - "isaacsim_robot-4.1.0.0", - "isaacsim_benchmark-4.1.0.0", - "isaacsim_code_editor-4.1.0.0", - "isaacsim_ros1-4.1.0.0", - "isaacsim_cortex-4.1.0.0", - "isaacsim_example-4.1.0.0", - "isaacsim_replicator-4.1.0.0", - "isaacsim_rl-4.1.0.0", - "isaacsim_robot_setup-4.1.0.0", - "isaacsim_ros2-4.1.0.0", - "isaacsim_template-4.1.0.0", - "isaacsim_test-4.1.0.0", - "isaacsim-4.1.0.0", - "isaacsim_extscache_physics-4.1.0.0", - "isaacsim_extscache_kit-4.1.0.0", - "isaacsim_extscache_kit_sdk-4.1.0.0", + "omniverse_kit-106.1.0.140981", + "isaacsim_kernel-4.2.0.2", + "isaacsim_app-4.2.0.2", + "isaacsim_core-4.2.0.2", + "isaacsim_gui-4.2.0.2", + "isaacsim_utils-4.2.0.2", + "isaacsim_storage-4.2.0.2", + "isaacsim_asset-4.2.0.2", + "isaacsim_sensor-4.2.0.2", + "isaacsim_robot_motion-4.2.0.2", + "isaacsim_robot-4.2.0.2", + "isaacsim_benchmark-4.2.0.2", + "isaacsim_code_editor-4.2.0.2", + "isaacsim_ros1-4.2.0.2", + "isaacsim_cortex-4.2.0.2", + "isaacsim_example-4.2.0.2", + "isaacsim_replicator-4.2.0.2", + "isaacsim_rl-4.2.0.2", + "isaacsim_robot_setup-4.2.0.2", + "isaacsim_ros2-4.2.0.2", + "isaacsim_template-4.2.0.2", + "isaacsim_test-4.2.0.2", + "isaacsim-4.2.0.2", + "isaacsim_extscache_physics-4.2.0.2", + "isaacsim_extscache_kit-4.2.0.2", + "isaacsim_extscache_kit_sdk-4.2.0.2", ] BASE_URL = "https://pypi.nvidia.com" @@ -216,7 +216,7 @@ def _launcher_based_install(isaac_sim_path: Optional[Path]): isaac_version_str = version_content.split("-")[0] isaac_version_tuple = tuple(map(int, isaac_version_str.split(".")[:3])) - if isaac_version_tuple not in ((4, 0, 0), (4, 1, 0)): + if isaac_version_tuple not in ((4, 0, 0), (4, 1, 0), (4, 2, 0)): click.echo(f"Isaac Sim version {isaac_version_str} is not supported by OmniGibson.") return False diff --git a/omnigibson/macros.py b/omnigibson/macros.py index 56073daff..46b03d09c 100644 --- a/omnigibson/macros.py +++ b/omnigibson/macros.py @@ -77,6 +77,10 @@ def determine_gm_path(default_path, env_var_name): gm.HTTP_PORT = os.getenv("OMNIGIBSON_HTTP_PORT", 8211) gm.WEBRTC_PORT = os.getenv("OMNIGIBSON_WEBRTC_PORT", 49100) +# Whether to use numpy or torch controller backend. Numpy is significantly faster and should be used +# for single-threaded (i.e.: non-large scale parallelized env) purposes +gm.USE_NUMPY_CONTROLLER_BACKEND = True + # Whether only the viewport should be shown in the GUI or not (if not, other peripherals are additionally shown) # CANNOT be set at runtime gm.GUI_VIEWPORT_ONLY = False diff --git a/omnigibson/object_states/factory.py b/omnigibson/object_states/factory.py index c4d26c88c..9bbe27134 100644 --- a/omnigibson/object_states/factory.py +++ b/omnigibson/object_states/factory.py @@ -65,6 +65,12 @@ ] ) +_MIST_STATE_SET = frozenset( + [ + ParticleApplier, + ] +) + _TEXTURE_CHANGE_STATE_SET = frozenset( [ Frozen, @@ -84,7 +90,7 @@ ] ) -_VISUAL_STATE_SET = frozenset(_FIRE_STATE_SET | _STEAM_STATE_SET | _TEXTURE_CHANGE_STATE_SET) +_VISUAL_STATE_SET = frozenset(_FIRE_STATE_SET | _STEAM_STATE_SET | _TEXTURE_CHANGE_STATE_SET | _MIST_STATE_SET) _TEXTURE_CHANGE_PRIORITY = { Frozen: 4, @@ -107,6 +113,10 @@ def get_steam_states(): return _STEAM_STATE_SET +def get_mist_states(): + return _MIST_STATE_SET + + def get_texture_change_states(): return _TEXTURE_CHANGE_STATE_SET diff --git a/omnigibson/object_states/particle_modifier.py b/omnigibson/object_states/particle_modifier.py index a7f655eab..f1342548d 100644 --- a/omnigibson/object_states/particle_modifier.py +++ b/omnigibson/object_states/particle_modifier.py @@ -1012,7 +1012,6 @@ def __init__( self.projection_system = None self.projection_system_prim = None - self.projection_emitter = None # Run super super().__init__(obj=obj, method=method, conditions=conditions, projection_mesh_params=projection_mesh_params) @@ -1026,79 +1025,6 @@ def _initialize(self): # This will initialize the system if it's not initialized already. system = self.obj.scene.get_system(system_name) - if self.visualize: - assert self._projection_mesh_params["type"] in { - "Cylinder", - "Cone", - }, f"{self.__class__.__name__} visualization only supports Cylinder and Cone types!" - radius, height = ( - th.mean(self._projection_mesh_params["extents"][:2]) / 2.0, - self._projection_mesh_params["extents"][2], - ) - # Generate the projection visualization - particle_radius = ( - m.VISUAL_PARTICLE_PROJECTION_PARTICLE_RADIUS - if self.obj.scene.is_visual_particle_system(system_name=system.name) - else system.particle_radius - ) - - name_prefix = f"{self.obj.name}_{self.__class__.__name__}" - # Create the projection visualization if it doesn't already exist, otherwise we reference it directly - projection_name = f"{name_prefix}_projection_visualization" - projection_path = f"/OmniGraph/{projection_name}" - projection_visualization_path = f"{self.link.prim_path}/projection_visualization" - if lazy.omni.isaac.core.utils.prims.is_prim_path_valid(projection_path): - self.projection_system = lazy.omni.isaac.core.utils.prims.get_prim_at_path(projection_path) - self.projection_emitter = lazy.omni.isaac.core.utils.prims.get_prim_at_path( - f"{projection_path}/emitter" - ) - else: - self.projection_system, self.projection_emitter = create_projection_visualization( - scene=self.obj.scene, - prim_path=projection_visualization_path, - shape=self._projection_mesh_params["type"], - projection_name=projection_name, - projection_radius=radius, - projection_height=height, - particle_radius=particle_radius, - parent_scale=self.link.scale, - material=system.material, - ) - relative_projection_system_path = absolute_prim_path_to_scene_relative( - self.obj.scene, self.projection_system.GetPrimPath().pathString - ) - self.projection_system_prim = BasePrim( - relative_prim_path=relative_projection_system_path, name=projection_name - ) - self.projection_system_prim.load(self.obj.scene) - - # Create the visual geom instance referencing the generated source mesh prim, and then hide it - relative_projection_source_path = absolute_prim_path_to_scene_relative( - self.obj.scene, projection_visualization_path - ) - self.projection_source_sphere = VisualGeomPrim( - relative_prim_path=relative_projection_source_path, name=f"{name_prefix}_projection_source_sphere" - ) - self.projection_source_sphere.load(self.obj.scene) - self.projection_source_sphere.initialize() - self.projection_source_sphere.visible = False - # Rotate by 90 degrees in y-axis so that the projection visualization aligns with the projection mesh - self.projection_source_sphere.set_position_orientation( - orientation=T.euler2quat(th.tensor([0, math.pi / 2, 0], dtype=th.float32)), frame="parent" - ) - - # Make sure the meta mesh is aligned with the meta link if visualizing - # This corresponds to checking (a) position of tip of projection mesh should align with origin of - # metalink, and (b) zero relative orientation between the metalink and the projection mesh - local_pos, local_quat = self.projection_mesh.get_position_orientation(frame="parent") - assert th.all( - th.isclose(local_pos + th.tensor([0, 0, height / 2.0]), th.zeros_like(local_pos)) - ), "Projection mesh tip should align with metalink position!" - local_euler = T.quat2euler(local_quat) - assert th.all( - th.isclose(local_euler, th.zeros_like(local_euler)) - ), "Projection mesh orientation should align with metalink orientation!" - # Store which method to use for sampling particle locations if self._sample_with_raycast: if self.method == ParticleModifyMethod.PROJECTION: @@ -1193,16 +1119,6 @@ def _compute_particle_spawn_information(self, system): ) self._in_mesh_local_particle_directions = directions / th.norm(directions, dim=-1).reshape(-1, 1) - def _update(self): - # If we're about to check for modification, update whether it the visualization should be active or not - if self.visualize and self._current_step == 0: - # Only one system in our conditions, so next(iter()) suffices - is_active = all(condition(self.obj) for condition in next(iter(self.conditions.values()))) - self.projection_emitter.GetProperty("inputs:active").Set(is_active) - - # Run super - super()._update() - def remove(self): # We need to remove the projection visualization if it exists if self.projection_system_prim is not None: @@ -1489,8 +1405,8 @@ def systems_to_check(self): @property def projection_is_active(self): - # Only active if the projection mesh is enabled - return self.projection_emitter.GetProperty("inputs:active").Get() + # Only one system in our conditions, so next(iter()) suffices + return all(condition(self.obj) for condition in next(iter(self.conditions.values()))) @classproperty def metalink_prefix(cls): diff --git a/omnigibson/objects/controllable_object.py b/omnigibson/objects/controllable_object.py index 8feb77008..ad96d7f10 100644 --- a/omnigibson/objects/controllable_object.py +++ b/omnigibson/objects/controllable_object.py @@ -11,6 +11,7 @@ from omnigibson.controllers import create_controller from omnigibson.controllers.controller_base import ControlType from omnigibson.objects.object_base import BaseObject +from omnigibson.utils.backend_utils import _compute_backend as cb from omnigibson.utils.constants import JointType, PrimType from omnigibson.utils.numpy_utils import NumpyTypes from omnigibson.utils.python_utils import CachedFunctions, assert_valid_key, merge_nested_dicts @@ -277,7 +278,7 @@ def _load_controllers(self): cfg["command_input_limits"] = "default" # default is normalized (-1, 1) # Create the controller - controller = create_controller(**cfg) + controller = create_controller(**cb.from_torch_recursive(cfg)) # Verify the controller's DOFs can all be driven for idx in controller.dof_idx: assert self._joints[ @@ -409,8 +410,8 @@ def _create_continuous_action_space(self): return gym.spaces.Box( shape=(self.action_dim,), - low=th.cat(low).cpu().numpy(), - high=th.cat(high).cpu().numpy(), + low=cb.to_numpy(cb.cat(low)), + high=cb.to_numpy(cb.cat(high)), dtype=NumpyTypes.FLOAT32, ) @@ -441,6 +442,9 @@ def apply_action(self, action): self.action_dim, len(action) ) + # Convert action from torch if necessary + action = cb.from_torch(action) + # First, loop over all controllers, and update the desired command idx = 0 @@ -488,9 +492,9 @@ def step(self): idx += controller.command_dim # Compose controls - u_vec = th.zeros(self.n_dof) - # By default, the control type is None and the control value is 0 (th.zeros) - i.e. no control applied - u_type_vec = th.tensor([ControlType.NONE] * self.n_dof) + u_vec = cb.zeros(self.n_dof) + # By default, the control type is Effort and the control value is 0 (th.zeros) - i.e. no control applied + u_type_vec = cb.array([ControlType.EFFORT] * self.n_dof) for group, ctrl in control.items(): idx = self._controllers[group].dof_idx u_vec[idx] = ctrl["value"] @@ -549,87 +553,31 @@ def deploy_control(self, control, control_type): """ # Run sanity check assert len(control) == len(control_type) == self.n_dof, ( - "Control signals, control types, and number of DOF should all be the same!" - "Got {}, {}, and {} respectively.".format(len(control), len(control_type), self.n_dof) + f"Control signals, control types, and number of DOF should all be the same!" + f"Got {len(control)}, {len(control_type)}, and {self.n_dof} respectively." ) - # Set indices manually so that we're standardized - indices = range(self.n_dof) - - # Standardize normalized input - n_indices = len(indices) - - # Loop through controls and deploy - # We have to use delicate logic to account for the edge cases where a single joint may contain > 1 DOF - # (e.g.: spherical joint) - pos_vec, pos_idxs, using_pos = [], [], False - vel_vec, vel_idxs, using_vel = [], [], False - eff_vec, eff_idxs, using_eff = [], [], False - cur_indices_idx = 0 - while cur_indices_idx != n_indices: - # Grab the current DOF index we're controlling and find the corresponding joint - joint = self._dof_to_joints[indices[cur_indices_idx]] - cur_ctrl_idx = indices[cur_indices_idx] - joint_dof = joint.n_dof - if joint_dof > 1: - # Run additional sanity checks since the joint has more than one DOF to make sure our controls, - # control types, and indices all match as expected - - # Make sure the indices are mapped correctly - assert ( - indices[cur_indices_idx + joint_dof] == cur_ctrl_idx + joint_dof - ), "Got mismatched control indices for a single joint!" - # Check to make sure all joints, control_types, and normalized as all the same over n-DOF for the joint - for group_name, group in zip( - ("joints", "control_types"), - (self._dof_to_joints, control_type), - ): - assert ( - len({group[indices[cur_indices_idx + i]] for i in range(joint_dof)}) == 1 - ), f"Not all {group_name} were the same when trying to deploy control for a single joint!" - # Assuming this all passes, we grab the control subvector, type, and normalized value accordingly - ctrl = control[cur_ctrl_idx : cur_ctrl_idx + joint_dof] - else: - # Grab specific control. No need to do checks since this is a single value - ctrl = control[cur_ctrl_idx] - - # Deploy control based on type - ctrl_type = control_type[ - cur_ctrl_idx - ] # In multi-DOF joint case all values were already checked to be the same - if ctrl_type == ControlType.EFFORT: - eff_vec.append(ctrl) - eff_idxs.append(cur_ctrl_idx) - using_eff = True - elif ctrl_type == ControlType.VELOCITY: - vel_vec.append(ctrl) - vel_idxs.append(cur_ctrl_idx) - using_vel = True - elif ctrl_type == ControlType.POSITION: - pos_vec.append(ctrl) - pos_idxs.append(cur_ctrl_idx) - using_pos = True - elif ctrl_type == ControlType.NONE: - # Set zero efforts - eff_vec.append(0) - eff_idxs.append(cur_ctrl_idx) - using_eff = True - else: - raise ValueError("Invalid control type specified: {}".format(ctrl_type)) - # Finally, increment the current index based on how many DOFs were just controlled - cur_indices_idx += joint_dof # set the targets for joints - if using_pos: + pos_idxs = cb.where(control_type == ControlType.POSITION)[0] + if len(pos_idxs) > 0: ControllableObjectViewAPI.set_joint_position_targets( - self.articulation_root_path, positions=th.tensor(pos_vec, dtype=th.float), indices=th.tensor(pos_idxs) + self.articulation_root_path, + positions=control[pos_idxs], + indices=pos_idxs, ) - if using_vel: + vel_idxs = cb.where(control_type == ControlType.VELOCITY)[0] + if len(vel_idxs) > 0: ControllableObjectViewAPI.set_joint_velocity_targets( - self.articulation_root_path, velocities=th.tensor(vel_vec, dtype=th.float), indices=th.tensor(vel_idxs) + self.articulation_root_path, + velocities=control[vel_idxs], + indices=vel_idxs, ) - if using_eff: + eff_idxs = cb.where(control_type == ControlType.EFFORT)[0] + if len(eff_idxs) > 0: ControllableObjectViewAPI.set_joint_efforts( - self.articulation_root_path, efforts=th.tensor(eff_vec, dtype=th.float), indices=th.tensor(eff_idxs) + self.articulation_root_path, + efforts=control[eff_idxs], + indices=eff_idxs, ) def get_control_dict(self): diff --git a/omnigibson/objects/dataset_object.py b/omnigibson/objects/dataset_object.py index 722ab2a06..11b525b83 100644 --- a/omnigibson/objects/dataset_object.py +++ b/omnigibson/objects/dataset_object.py @@ -244,7 +244,7 @@ def _post_load(self): scale = th.ones(3) if self._load_config["scale"] is None else self._load_config["scale"] # Assert that the scale does not have too small dimensions - assert th.all(scale > 1e-4), f"Scale of {self.name} is too small: {scale}" + assert th.all(th.tensor(scale) > 1e-4), f"Scale of {self.name} is too small: {scale}" # Set this scale in the load config -- it will automatically scale the object during self.initialize() self._load_config["scale"] = scale diff --git a/omnigibson/objects/object_base.py b/omnigibson/objects/object_base.py index c34231551..43e361a56 100644 --- a/omnigibson/objects/object_base.py +++ b/omnigibson/objects/object_base.py @@ -228,13 +228,16 @@ def _initialize(self): self._highlight_cached_values = dict() for material in self.materials: + if material.is_glass: + # Skip glass materials + continue self._highlight_cached_values[material] = { "enable_emission": material.enable_emission, "emissive_color": material.emissive_color, "emissive_intensity": material.emissive_intensity, } - @property + @cached_property def articulation_root_path(self): has_articulated_joints, has_fixed_joints = self.n_joints > 0, self.n_fixed_joints > 0 if self.kinematic_only or ((not has_articulated_joints) and (not has_fixed_joints)): @@ -334,6 +337,9 @@ def highlighted(self, enabled): return for material in self.materials: + if material.is_glass: + # Skip glass materials + continue if enabled: # Store values before swapping self._highlight_cached_values[material] = { diff --git a/omnigibson/objects/stateful_object.py b/omnigibson/objects/stateful_object.py index a1bd5fc1e..650f23014 100644 --- a/omnigibson/objects/stateful_object.py +++ b/omnigibson/objects/stateful_object.py @@ -12,6 +12,7 @@ from omnigibson.object_states.factory import ( get_default_states, get_fire_states, + get_mist_states, get_requirements_for_ability, get_state_name, get_states_by_dependency_order, @@ -24,7 +25,7 @@ from omnigibson.object_states.heat_source_or_sink import HeatSourceOrSink from omnigibson.object_states.object_state_base import REGISTERED_OBJECT_STATES from omnigibson.object_states.on_fire import OnFire -from omnigibson.object_states.particle_modifier import ParticleRemover +from omnigibson.object_states.particle_modifier import ParticleApplier, ParticleRemover from omnigibson.objects.object_base import BaseObject from omnigibson.renderer_settings.renderer_settings import RendererSettings from omnigibson.utils.constants import EmitterType, PrimType @@ -167,6 +168,9 @@ def _initialize(self): if len(states_set & get_fire_states()) > 0: self._create_emitter_apis(EmitterType.FIRE) + if len(states_set & get_mist_states()) > 0: + self._create_emitter_apis(EmitterType.MIST) + def add_state(self, state): """ Adds state @state with name @name to self.states. @@ -336,8 +340,20 @@ def _create_emitter_apis(self, emitter_type): emitter_config["gravity"] = (0, 0, -50.0) emitter_config["constantMask"] = 10.0 emitter_config["attenuation"] = 1.5 + elif emitter_type == EmitterType.MIST: + link = self.states[ParticleApplier].link + emitter_config["name"] = "flowEmitterMesh" + emitter_config["type"] = "FlowEmitterMesh" + emitter_config["position"] = (0.0, 0.0, 0.0) + emitter_config["fuel"] = 1.0 + emitter_config["coupleRateFuel"] = 0.5 + emitter_config["buoyancyPerTemp"] = 0.05 + emitter_config["burnPerTemp"] = 0.5 + emitter_config["gravity"] = (0, 0, 0.0) + emitter_config["constantMask"] = 10.0 + emitter_config["attenuation"] = 1.5 else: - raise ValueError("Currently, only EmitterTypes FIRE and STEAM are supported!") + raise ValueError("Currently, only EmitterTypes FIRE, STEAM, and MIST are supported!") # Define prim paths. # The flow system is created under the root link so that it automatically updates its pose as the object moves @@ -465,6 +481,8 @@ def update_visuals(self): emitter_enabled[EmitterType.STEAM] |= state.get_value() if state_type in get_fire_states(): emitter_enabled[EmitterType.FIRE] |= state.get_value() + if state_type in get_mist_states(): + emitter_enabled[EmitterType.MIST] |= state.projection_is_active for emitter_type in emitter_enabled: self.set_emitter_enabled(emitter_type, emitter_enabled[emitter_type]) diff --git a/omnigibson/omnigibson_4_2_0.kit b/omnigibson/omnigibson_4_2_0.kit new file mode 100644 index 000000000..517024b4e --- /dev/null +++ b/omnigibson/omnigibson_4_2_0.kit @@ -0,0 +1,367 @@ +[package] +title = "OmniGibson" +description = "A platform for accelerating Embodied AI research" +version = "4.2.0" + +# That makes it browsable in UI with "experience" filter +keywords = ["experience", "app", "usd"] + +[dependencies] +# Isaac Sim extensions +"omni.isaac.cloner" = {} +"omni.isaac.core" = {} +## "omni.isaac.core_archive" = {} +"omni.isaac.core_nodes" = {} +"omni.isaac.cortex" = {} +"omni.isaac.cortex.sample_behaviors" = {} +"omni.isaac.debug_draw" = {} +"omni.isaac.dynamic_control" = {} +"omni.isaac.franka" = {} +"omni.isaac.kit" = {} +"omni.isaac.lula" = {} +"omni.isaac.manipulators" = {} +## "omni.isaac.ml_archive" = {} +"omni.isaac.motion_generation" = {} +"omni.isaac.menu" = {} +"omni.isaac.nucleus" = {} +"omni.isaac.occupancy_map" = {} +"omni.isaac.quadruped" = {} +"omni.isaac.range_sensor" = {} +"omni.isaac.scene_blox" = {} +"omni.isaac.sensor" = {} +"omni.isaac.surface_gripper" = {} +"omni.isaac.universal_robots" = {} +"omni.isaac.utils" = {} +"omni.isaac.wheeled_robots" = {} +## "omni.kit.property.isaac" = {} +## "omni.pip.cloud" = {} +## "omni.pip.compute" = {} +"omni.replicator.isaac" = {} + +# Kit extensions +#"omni.activity.profiler" = {} +#"omni.activity.pump" = {} +"omni.anim.graph.schema" = {} +"omni.anim.navigation.schema" = {} +"omni.usd.schema.omniscripting" = {} +"omni.graph.bundle.action" = {} +"omni.graph.visualization.nodes" = {} +"omni.graph.window.action" = {} +"omni.graph.window.generic" = {} +"omni.hydra.engine.stats" = {} +"omni.importer.mjcf" = {} +"omni.importer.urdf" = {} +"omni.kit.collaboration.channel_manager" = {} +"omni.kit.context_menu" = {} +"omni.kit.hotkeys.window" = {} +"omni.kit.loop-isaac" = {} +"omni.kit.manipulator.prim" = {} +"omni.kit.manipulator.transform" = {} +"omni.kit.menu.common" = {} +"omni.kit.menu.create" = {} +"omni.kit.menu.edit" = {} +"omni.kit.menu.file" = {} +"omni.kit.menu.stage" = {} +"omni.kit.menu.utils" = {} +"omni.kit.primitive.mesh" = {} +"omni.kit.profiler.window" = {} +"omni.kit.property.bundle" = {} +"omni.kit.property.layer" = {} +"omni.kit.renderer.core" = {} +"omni.kit.selection" = {} +"omni.kit.stage_column.payload" = {} +"omni.kit.stage_column.variant" = {} +"omni.kit.stage_templates" = {} +"omni.kit.telemetry" = {} +"omni.kit.uiapp" = {} +"omni.kit.viewport.bundle" = {} +"omni.kit.viewport.menubar.lighting" = {} +# "omni.kit.viewport.ready" = {} +"omni.kit.viewport.rtx" = {} +"omni.kit.widget.cache_indicator" = {} +"omni.kit.widget.extended_searchfield" = {} +"omni.kit.widget.filebrowser" = {} +"omni.kit.widget.layers" = {} +"omni.kit.widget.live" = {} +"omni.kit.widget.timeline" = {} +"omni.kit.window.commands" = {} +"omni.kit.window.console" = {} +"omni.kit.window.content_browser" = {} +"omni.kit.window.cursor" = {} +"omni.kit.window.extensions" = {} +"omni.kit.window.file" = {} +"omni.kit.window.filepicker" = {} +"omni.kit.window.property" = {} +"omni.kit.window.script_editor" = {} +"omni.kit.window.stage" = {} +"omni.kit.window.stats" = {order = 1000} +"omni.kit.window.status_bar" = {} +"omni.kit.window.title" = {} +"omni.kit.window.toolbar" = {} +"omni.physx.bundle" = {} +# "omni.physx.fabric" = {} +"omni.physx.tensors" = {} +"omni.replicator.core" = {} +"omni.replicator.replicator_yaml" = {} +"omni.resourcemonitor" = {} +"omni.rtx.settings.core" = {} +"omni.stats" = {} +"omni.syntheticdata" = {} +"omni.usd.schema.scene.visualization" = {} +"omni.warp" = {} +"semantics.schema.editor" = {} +"semantics.schema.property" = {} +"omni.kit.xr.profile.vr" = { version = "106.0.71" } + +[settings] +renderer.active = "rtx" +exts."omni.kit.viewport.menubar.camera".expand = true # Expand the extra-camera settings by default +exts."omni.kit.window.file".useNewFilePicker = true +exts."omni.kit.tool.asset_importer".useNewFilePicker = true +exts."omni.kit.tool.collect".useNewFilePicker = true +exts."omni.kit.widget.layers".useNewFilePicker = true +exts."omni.kit.renderer.core".imgui.enableMips = true +exts."omni.kit.widget.cloud_share".require_access_code = false +exts."omni.kit.pipapi".installCheckIgnoreVersion = true +exts."omni.kit.viewport.window".startup.windowName="Viewport" # Rename from Viewport Next +exts."omni.kit.menu.utils".logDeprecated = false + +# app.content.emptyStageOnStart = false +app.file.ignoreUnsavedOnExit = true # prevents save dialog when exiting + +# deprecate support for old kit.ui.menu +app.menu.legacy_mode = false +# use omni.ui.Menu for the MenuBar +app.menu.compatibility_mode = false +# Setting the port for the embedded http server +exts."omni.services.transport.server.http".port = 8211 + +# default viewport is fill +app.runLoops.rendering_0.fillResolution = false + +exts."omni.kit.test".includeTests = [ "*isaac*" ] + + +[settings.app.python] +# These disable the kit app from also printing out python output, which gets confusing +interceptSysStdOutput = false +logSysStdOutput = false + +[settings.app.settings] +persistent = false +dev_build = false +fabricDefaultStageFrameHistoryCount = 3 # needed for omni.syntheticdata TODO105 Still True? + +[settings.app.window] +title = "OmniGibson" +hideUi = false +_iconSize = 256 +iconPath = "${omni.isaac.kit}/data/omni.isaac.sim.png" + +# width = 1700 +# height = 900 +# x = -1 +# y = -1 + +# Fonts +[setting.app.font] +file = "${fonts}/OpenSans-SemiBold.ttf" +size = 16 + +# [setting.app.runLoops] +# main.rateLimitEnabled = false +# main.rateLimitFrequency = 60 +# main.rateLimitUseBusyLoop = false +# rendering_0.rateLimitEnabled = false + +[settings.exts.'omni.kit.window.extensions'] +# List extensions here we want to show as featured when extension manager is opened +featuredExts = [] + + +[settings] +# MGPU is always on, you can turn it from the settings, and force this off to save even more resource if you +# only want to use a single GPU on your MGPU system +renderer.multiGpu.enabled = true +renderer.multiGpu.autoEnable = true +# This setting forces all GPUs to copy their render results to the main GPU. +# This legacy setting should not be needed anymore +app.gatherRenderResults = false +'rtx-transient'.resourcemanager.enableTextureStreaming = true +# app.hydra.aperture.conform = 4 # in 105.1 pixels are square by default +app.hydraEngine.waitIdle = false +rtx.newDenoiser.enabled = true + +# Enable Iray and pxr by setting this to "rtx,iray,pxr" +renderer.enabled = "rtx" + +physics.autoPopupSimulationOutputWindow=false + +### async rendering settings +omni.replicator.asyncRendering = false +app.asyncRendering = false +app.asyncRenderingLowLatency = false + +### Render thread settings +app.runLoops.main.rateLimitEnabled = false +app.runLoops.main.rateLimitFrequency = 120 +app.runLoops.main.rateLimitUsePrecisionSleep = true +app.runLoops.main.syncToPresent = false +app.runLoops.present.rateLimitFrequency = 60 +app.runLoops.present.rateLimitUsePrecisionSleep = true +app.runLoops.rendering_0.rateLimitFrequency = 120 +app.runLoops.rendering_0.rateLimitUsePrecisionSleep = true +app.runLoops.rendering_0.syncToPresent = false +app.runLoops.rendering_1.rateLimitFrequency = 120 +app.runLoops.rendering_1.rateLimitUsePrecisionSleep = true +app.runLoops.rendering_1.syncToPresent = false +app.runLoopsGlobal.syncToPresent = false +app.vsync = false +exts."omni.kit.renderer.core".present.enabled = false +exts."omni.kit.renderer.core".present.presentAfterRendering = false +persistent.app.viewport.defaults.tickRate = 120 +rtx-transient.dlssg.enabled = false + +app.audio.enabled = false + +privacy.externalBuild = true + +# Enable Vulkan +app.vulkan = true + +# Basic Kit App +################################ +app.versionFile = "${app}/../VERSION" +app.name = "Isaac-Sim" +app.version = "4.2.0" + +# hide NonToggleable Exts +exts."omni.kit.window.extensions".hideNonToggleableExts = true +exts."omni.kit.window.extensions".showFeatureOnly = false + +# Hang Detector +################################ +# app.hangDetector.enabled = false +# app.hangDetector.timeout = 120 + +# RTX Settings +############################### +[settings.rtx] +translucency.worldEps = 0.005 + +# Content Browser +############################### +[settings.exts."omni.kit.window.content_browser"] +enable_thumbnail_generation_images = false # temp fix to avoid leaking python processes + +# Extensions +############################### +[settings.exts."omni.kit.registry.nucleus"] +registries = [ + { name = "kit/default", url = "https://ovextensionsprod.blob.core.windows.net/exts/kit/prod/106/shared" }, + { name = "kit/sdk", url = "https://ovextensionsprod.blob.core.windows.net/exts/kit/prod/sdk/${kit_version_short}/${kit_git_hash}" }, + { name = "kit/community", url = "https://dw290v42wisod.cloudfront.net/exts/kit/community" }, +] + +[settings.app.extensions] +skipPublishVerification = false +registryEnabled = true + +[settings.exts."omni.kit.window.modifier.titlebar"] +titleFormatString = " Isaac Sim {version:${app}/../SHORT_VERSION,font_color=0x909090,font_size=16} {separator} {file, board=true}" +showFileFullPath = true +icon.file = "${app}/../exts/omni.isaac.app.setup/data/nvidia-omniverse-isaacsim.ico" +icon.size = 256 +defaultFont.name = "Arial" +defaultFont.size = 16 +defaultFont.color = 0xD0D0D0 +separator.color = 0x00B976 +separator.width = 1 +windowBorder.color = 0x0F0F0F +windowBorder.width = 2 +colors.caption = 0x0F0F0F +colors.client = 0x0F0F0F +respondOnMouseUp = true +changeWindowRegion = true + + +# Register extension folder from this repo in kit +[settings.app.exts] +folders = ["${app}/../exts", "${app}/../extscache", "${app}/../extsPhysics", "${app}/../extsUser"] + +[settings.crashreporter.data] +experience = "Isaac Sim Python" + +# Isaac Sim Settings +############################### +[settings.app.renderer] +skipWhileMinimized = false +sleepMsOnFocus = 0 +sleepMsOutOfFocus = 0 +resolution.width=1280 +resolution.height=720 + +[settings.app.livestream] +proto = "ws" +allowResize = true +outDirectory = "${data}" + +# default camera position in meters +[settings.app.viewport] +defaultCamPos.x = 5 +defaultCamPos.y = 5 +defaultCamPos.z = 5 + +[settings.rtx] +raytracing.fractionalCutoutOpacity = false +hydra.enableSemanticSchema = true +mdltranslator.mdlDistilling = false +# descriptorSets=60000 +# reservedDescriptors=500000 +# sceneDb.maxInstances=1000000 +# Enable this for static scenes, improves visual quality +# directLighting.sampledLighting.enabled = true + +[settings.persistent] +app.file.recentFiles = [] +app.stage.upAxis = "Z" +app.stage.timeCodeRange = [0, 1000000] +app.stage.movePrimInPlace = false +app.stage.instanceableOnCreatingReference = false +app.stage.materialStrength = "weakerThanDescendants" + +app.transform.gizmoUseSRT = true +app.viewport.grid.scale = 1.0 +app.viewport.pickingMode = "kind:model.ALL" +app.viewport.camMoveVelocity = 0.05 # 5 m/s +app.viewport.gizmo.scale = 0.01 # scaled to meters +app.viewport.previewOnPeek = false +app.viewport.snapToSurface = false +app.viewport.displayOptions = 31887 # Disable Frame Rate and Resolution by default +app.window.uiStyle = "NvidiaDark" +app.primCreation.DefaultXformOpType = "Scale, Orient, Translate" +app.primCreation.DefaultXformOpOrder="xformOp:translate, xformOp:orient, xformOp:scale" +app.primCreation.typedDefaults.camera.clippingRange = [0.01, 10000000.0] +simulation.minFrameRate = 15 +simulation.defaultMetersPerUnit = 1.0 +omnigraph.updateToUsd = false +omnigraph.useSchemaPrims = true +omnigraph.disablePrimNodes = true +physics.updateToUsd = true +physics.updateVelocitiesToUsd = true +physics.useFastCache = false +physics.visualizationDisplayJoints = false +physics.visualizationSimulationOutput = false +omni.replicator.captureOnPlay = true +exts."omni.anim.navigation.core".navMesh.viewNavMesh = false +exts."omni.anim.navigation.core".navMesh.config.autoRebakeOnChanges = false +resourcemonitor.timeBetweenQueries = 100 + +renderer.startupMessageDisplayed = true # hides the IOMMU popup window + +# Make Detail panel visible by default +app.omniverse.content_browser.options_menu.show_details = true +app.omniverse.filepicker.options_menu.show_details = true + +[settings.ngx] +enabled=true # Enable this for DLSS diff --git a/omnigibson/prims/cloth_prim.py b/omnigibson/prims/cloth_prim.py index 0177d8fab..9c416eaeb 100644 --- a/omnigibson/prims/cloth_prim.py +++ b/omnigibson/prims/cloth_prim.py @@ -9,6 +9,7 @@ import math from collections.abc import Iterable +from functools import cached_property import torch as th @@ -160,7 +161,7 @@ def n_particles(self): """ return self._n_particles - @property + @cached_property def kinematic_only(self): """ Returns: diff --git a/omnigibson/prims/entity_prim.py b/omnigibson/prims/entity_prim.py index 6676097fc..b1dc20cc1 100644 --- a/omnigibson/prims/entity_prim.py +++ b/omnigibson/prims/entity_prim.py @@ -1399,7 +1399,7 @@ def self_collisions(self, flag): self.articulation_root_path, "physxArticulation:enabledSelfCollisions", flag ) - @property + @cached_property def kinematic_only(self): """ Returns: diff --git a/omnigibson/prims/joint_prim.py b/omnigibson/prims/joint_prim.py index 18ac6a867..c3769231b 100644 --- a/omnigibson/prims/joint_prim.py +++ b/omnigibson/prims/joint_prim.py @@ -75,6 +75,8 @@ def __init__( self._joint_type = None self._control_type = None self._driven = None + self._body0 = None + self._body1 = None # The following values will only be valid if this joint is part of an articulation self._n_dof = None # The number of degrees of freedom this joint provides @@ -232,8 +234,10 @@ def body0(self): None or str: Absolute prim path to the body prim to set as this joint's parent link, or None if there is no body0 specified. """ - targets = self._prim.GetRelationship("physics:body0").GetTargets() - return targets[0].__str__() if len(targets) > 0 else None + if self._body0 is None: + targets = self._prim.GetRelationship("physics:body0").GetTargets() + self._body0 = targets[0].__str__() + return self._body0 @body0.setter def body0(self, body0): @@ -246,6 +250,7 @@ def body0(self, body0): # Make sure prim path is valid assert lazy.omni.isaac.core.utils.prims.is_prim_path_valid(body0), f"Invalid body0 path specified: {body0}" self._prim.GetRelationship("physics:body0").SetTargets([lazy.pxr.Sdf.Path(body0)]) + self._body0 = None @property def body1(self): @@ -256,8 +261,10 @@ def body1(self): None or str: Absolute prim path to the body prim to set as this joint's child link, or None if there is no body1 specified. """ - targets = self._prim.GetRelationship("physics:body1").GetTargets() - return targets[0].__str__() + if self._body1 is None: + targets = self._prim.GetRelationship("physics:body1").GetTargets() + self._body1 = targets[0].__str__() + return self._body1 @body1.setter def body1(self, body1): @@ -270,6 +277,7 @@ def body1(self, body1): # Make sure prim path is valid assert lazy.omni.isaac.core.utils.prims.is_prim_path_valid(body1), f"Invalid body1 path specified: {body1}" self._prim.GetRelationship("physics:body1").SetTargets([lazy.pxr.Sdf.Path(body1)]) + self._body1 = None @property def local_orientation(self): diff --git a/omnigibson/prims/material_prim.py b/omnigibson/prims/material_prim.py index 3098658a3..fcc414acc 100644 --- a/omnigibson/prims/material_prim.py +++ b/omnigibson/prims/material_prim.py @@ -7,7 +7,7 @@ import omnigibson.lazy as lazy from omnigibson.prims.prim_base import BasePrim from omnigibson.utils.physx_utils import bind_material -from omnigibson.utils.usd_utils import absolute_prim_path_to_scene_relative +from omnigibson.utils.usd_utils import absolute_prim_path_to_scene_relative, get_sdf_value_type_name class MaterialPrim(BasePrim): @@ -70,6 +70,7 @@ def __init__( ): # Other values that will be filled in at runtime self._shader = None + self._shader_node = None # Users of this material: should be a set of BaseObject and BaseSystem self._users = set() @@ -148,6 +149,7 @@ def _post_load(self): # Generate shader reference self._shader = lazy.omni.usd.get_shader_from_material(self._prim) + self._shader_node = lazy.usd.mdl.RegistryUtils.GetShaderNodeForPrim(self._shader.GetPrim()) def bind(self, target_prim_path): """ @@ -194,7 +196,7 @@ def shader_update_asset_paths_with_root_path(self, root_path, relative=False): Otherwise, @root_path will be pre-appended to the original asset paths """ - for inp_name in self.shader_input_names_by_type("SdfAssetPath"): + for inp_name in self.get_shader_input_names_by_type("SdfAssetPath", include_default=True): inp = self.get_input(inp_name) # If the input doesn't have any path, skip if inp is None: @@ -220,7 +222,11 @@ def get_input(self, inp): Returns: any: value of the requested @inp """ - return self._shader.GetInput(inp).Get() + non_default_inp = self._shader.GetInput(inp).Get() + if non_default_inp is not None: + return non_default_inp + + return self._shader_node.GetInput(inp).GetDefaultValue() def set_input(self, inp, val): """ @@ -230,11 +236,14 @@ def set_input(self, inp, val): inp (str): Name of the shader input whose value will be set val (any): Value to set for the input. This should be the valid type for that attribute. """ - # Make sure the input exists first, so we avoid segfaults with "invalid null prim" - assert ( - inp in self.shader_input_names - ), f"Got invalid shader input to set! Current inputs are: {self.shader_input_names}. Got: {inp}" - self._shader.GetInput(inp).Set(val) + # # Make sure the input exists first, so we avoid segfaults with "invalid null prim" + if inp in self.shader_input_names: + self._shader.GetInput(inp).Set(val) + elif inp in self.shader_default_input_names: + input_type = get_sdf_value_type_name(val) + self._shader.CreateInput(inp, input_type).Set(val) + else: + raise ValueError(f"Got invalid shader input to set! Got: {inp}") @property def is_glass(self): @@ -242,7 +251,7 @@ def is_glass(self): Returns: bool: Whether this material is a glass material or not """ - return "glass_color" in self.shader_input_names + return "glass_color" in self.shader_input_names | self.shader_default_input_names @property def shader(self): @@ -260,15 +269,34 @@ def shader_input_names(self): """ return {inp.GetBaseName() for inp in self._shader.GetInputs()} - def shader_input_names_by_type(self, input_type): + @property + def shader_default_input_names(self): + """ + Returns: + set: All the shader input names associated with this material that have default values + """ + return set(self._shader_node.GetInputNames()) + + def get_shader_input_names_by_type(self, input_type, include_default=False): """ Args: input_type (str): input type + include_default (bool): whether to include default inputs Returns: set: All the shader input names associated with this material that match the given input type """ - return {inp.GetBaseName() for inp in self._shader.GetInputs() if inp.GetTypeName().cppTypeName == input_type} + shader_input_names = { + inp.GetBaseName() for inp in self._shader.GetInputs() if inp.GetTypeName().cppTypeName == input_type + } + if not include_default: + return shader_input_names + shader_default_input_names = { + inp_name + for inp_name in self.shader_default_input_names + if self._shader_node.GetInput(inp_name).GetType() == input_type + } + return shader_input_names | shader_default_input_names @property def diffuse_color_constant(self): diff --git a/omnigibson/robots/a1.py b/omnigibson/robots/a1.py index 4e5ba4e5d..17b9549fe 100644 --- a/omnigibson/robots/a1.py +++ b/omnigibson/robots/a1.py @@ -1,4 +1,5 @@ import os +from functools import cached_property import torch as th @@ -167,23 +168,23 @@ def _default_joint_pos(self): def finger_lengths(self): return {self.default_arm: 0.087} - @property + @cached_property def arm_link_names(self): return {self.default_arm: [f"arm_seg{i+1}" for i in range(5)]} - @property + @cached_property def arm_joint_names(self): return {self.default_arm: [f"arm_joint{i+1}" for i in range(6)]} - @property + @cached_property def eef_link_names(self): return {self.default_arm: self._eef_link_names} - @property + @cached_property def finger_link_names(self): return {self.default_arm: self._finger_link_names} - @property + @cached_property def finger_joint_names(self): return {self.default_arm: self._finger_joint_names} diff --git a/omnigibson/robots/active_camera_robot.py b/omnigibson/robots/active_camera_robot.py index cb56e4c77..73347b797 100644 --- a/omnigibson/robots/active_camera_robot.py +++ b/omnigibson/robots/active_camera_robot.py @@ -1,4 +1,5 @@ from abc import abstractmethod +from functools import cached_property import torch as th @@ -37,8 +38,8 @@ def _get_proprioception_dict(self): dic = super()._get_proprioception_dict() # Add camera pos info - joint_positions = ControllableObjectViewAPI.get_joint_positions(self.articulation_root_path) - joint_velocities = ControllableObjectViewAPI.get_joint_velocities(self.articulation_root_path) + joint_positions = dic["joint_qpos"] + joint_velocities = dic["joint_qvel"] dic["camera_qpos"] = joint_positions[self.camera_control_idx] dic["camera_qpos_sin"] = th.sin(joint_positions[self.camera_control_idx]) dic["camera_qpos_cos"] = th.cos(joint_positions[self.camera_control_idx]) @@ -114,7 +115,7 @@ def _default_controller_config(self): return cfg - @property + @cached_property @abstractmethod def camera_joint_names(self): """ @@ -126,7 +127,7 @@ def camera_joint_names(self): """ raise NotImplementedError - @property + @cached_property def camera_control_idx(self): """ Returns: diff --git a/omnigibson/robots/articulated_trunk_robot.py b/omnigibson/robots/articulated_trunk_robot.py index 889d0e10d..20a9e9ec1 100644 --- a/omnigibson/robots/articulated_trunk_robot.py +++ b/omnigibson/robots/articulated_trunk_robot.py @@ -1,3 +1,5 @@ +from functools import cached_property + import torch as th from omnigibson.robots.manipulation_robot import ManipulationRobot @@ -33,19 +35,19 @@ def get_control_dict(self): return fcns - @property + @cached_property def trunk_links(self): return [self.links[name] for name in self.trunk_link_names] - @property + @cached_property def trunk_link_names(self): raise NotImplementedError - @property + @cached_property def trunk_joint_names(self): raise NotImplementedError("trunk_joint_names must be implemented in subclass") - @property + @cached_property def trunk_control_idx(self): """ Returns: @@ -164,8 +166,8 @@ def _get_proprioception_dict(self): dic = super()._get_proprioception_dict() # Add trunk info - joint_positions = ControllableObjectViewAPI.get_joint_positions(self.articulation_root_path) - joint_velocities = ControllableObjectViewAPI.get_joint_velocities(self.articulation_root_path) + joint_positions = dic["joint_qpos"] + joint_velocities = dic["joint_qvel"] dic["trunk_qpos"] = joint_positions[self.trunk_control_idx] dic["trunk_qvel"] = joint_velocities[self.trunk_control_idx] diff --git a/omnigibson/robots/behavior_robot.py b/omnigibson/robots/behavior_robot.py index b41aa0cf3..b1eebd3de 100644 --- a/omnigibson/robots/behavior_robot.py +++ b/omnigibson/robots/behavior_robot.py @@ -3,6 +3,7 @@ import os from abc import ABC from collections import OrderedDict +from functools import cached_property from typing import Iterable, List, Literal, Tuple import torch as th @@ -153,18 +154,18 @@ def n_arms(cls): def arm_names(cls): return ["left", "right"] - @property + @cached_property def eef_link_names(self): dic = {arm: f"{arm}_{m.PALM_LINK_NAME}" for arm in self.arm_names} dic["head"] = "head" return dic - @property + @cached_property def arm_link_names(self): """The head counts as a arm since it has the same 33 joint configuration""" return {arm: [f"{arm}_{component}" for component in m.COMPONENT_SUFFIXES] for arm in self.arm_names + ["head"]} - @property + @cached_property def finger_link_names(self): return { arm: [ @@ -173,22 +174,22 @@ def finger_link_names(self): for arm in self.arm_names } - @property + @cached_property def base_joint_names(self): return [f"base_{component}_joint" for component in m.COMPONENT_SUFFIXES] - @property + @cached_property def camera_joint_names(self): return [f"head_{component}_joint" for component in m.COMPONENT_SUFFIXES] - @property + @cached_property def arm_joint_names(self): """The head counts as a arm since it has the same 33 joint configuration""" return { eef: [f"{eef}_{component}_joint" for component in m.COMPONENT_SUFFIXES] for eef in self.arm_names + ["head"] } - @property + @cached_property def finger_joint_names(self): return { arm: ( @@ -352,7 +353,7 @@ def update_controller_mode(self): self.joints[joint_name].max_effort = m.FINGER_JOINT_MAX_EFFORT self.joints[joint_name].max_velocity = m.FINGER_JOINT_MAX_VELOCITY - @property + @cached_property def base_footprint_link_name(self): """ Name of the actual root link that we are interested in. diff --git a/omnigibson/robots/fetch.py b/omnigibson/robots/fetch.py index 3a789dfa9..47df3eea3 100644 --- a/omnigibson/robots/fetch.py +++ b/omnigibson/robots/fetch.py @@ -1,5 +1,6 @@ import math import os +from functools import cached_property import torch as th @@ -230,19 +231,19 @@ def assisted_grasp_end_points(self): ] } - @property + @cached_property def base_joint_names(self): return ["l_wheel_joint", "r_wheel_joint"] - @property + @cached_property def camera_joint_names(self): return ["head_pan_joint", "head_tilt_joint"] - @property + @cached_property def trunk_joint_names(self): return ["torso_lift_joint"] - @property + @cached_property def manipulation_link_names(self): return [ "torso_lift_link", @@ -260,7 +261,7 @@ def manipulation_link_names(self): "r_gripper_finger_link", ] - @property + @cached_property def arm_link_names(self): return { self.default_arm: [ @@ -274,7 +275,7 @@ def arm_link_names(self): ] } - @property + @cached_property def arm_joint_names(self): return { self.default_arm: [ @@ -288,15 +289,15 @@ def arm_joint_names(self): ] } - @property + @cached_property def eef_link_names(self): return {self.default_arm: "eef_link"} - @property + @cached_property def finger_link_names(self): return {self.default_arm: ["r_gripper_finger_link", "l_gripper_finger_link"]} - @property + @cached_property def finger_joint_names(self): return {self.default_arm: ["r_gripper_finger_joint", "l_gripper_finger_joint"]} diff --git a/omnigibson/robots/franka.py b/omnigibson/robots/franka.py index 25334de49..30619c71e 100644 --- a/omnigibson/robots/franka.py +++ b/omnigibson/robots/franka.py @@ -1,4 +1,5 @@ import os +from functools import cached_property import torch as th @@ -231,23 +232,23 @@ def _default_joint_pos(self): def finger_lengths(self): return {self.default_arm: 0.1} - @property + @cached_property def arm_link_names(self): return {self.default_arm: [f"panda_link{i}" for i in range(8)]} - @property + @cached_property def arm_joint_names(self): return {self.default_arm: [f"panda_joint{i+1}" for i in range(7)]} - @property + @cached_property def eef_link_names(self): return {self.default_arm: self._eef_link_names} - @property + @cached_property def finger_link_names(self): return {self.default_arm: self._finger_link_names} - @property + @cached_property def finger_joint_names(self): return {self.default_arm: self._finger_joint_names} @@ -267,7 +268,7 @@ def curobo_path(self): ), f"Only franka_panda is currently supported for curobo. Got: {self._model_name}" return os.path.join(gm.ASSET_PATH, f"models/franka/{self.model_name}_description_curobo.yaml") - @property + @cached_property def curobo_attached_object_link_names(self): return {self._eef_link_names: "attached_object"} diff --git a/omnigibson/robots/freight.py b/omnigibson/robots/freight.py index c88fbc814..9f579bb1e 100644 --- a/omnigibson/robots/freight.py +++ b/omnigibson/robots/freight.py @@ -1,4 +1,5 @@ import os +from functools import cached_property import torch as th @@ -24,7 +25,7 @@ def wheel_radius(self): def wheel_axle_length(self): return 0.372 - @property + @cached_property def base_joint_names(self): return ["l_wheel_joint", "r_wheel_joint"] diff --git a/omnigibson/robots/holonomic_base_robot.py b/omnigibson/robots/holonomic_base_robot.py index 7aa506108..c7aa370f4 100644 --- a/omnigibson/robots/holonomic_base_robot.py +++ b/omnigibson/robots/holonomic_base_robot.py @@ -6,10 +6,12 @@ import omnigibson as og import omnigibson.lazy as lazy -import omnigibson.utils.transform_utils as T from omnigibson.macros import create_module_macros from omnigibson.robots.locomotion_robot import LocomotionRobot +from omnigibson.robots.manipulation_robot import ManipulationRobot +from omnigibson.utils.backend_utils import _compute_backend as cb from omnigibson.utils.python_utils import classproperty +from omnigibson.utils.usd_utils import ControllableObjectViewAPI m = create_module_macros(module_path=__file__) m.MAX_LINEAR_VELOCITY = 1.5 # linear velocity in meters/second @@ -173,7 +175,7 @@ def _initialize(self): # Reload the controllers to update their command_output_limits and control_limits self.reload_controllers(self._controller_config) - @property + @cached_property def base_idx(self): """ Returns: @@ -184,7 +186,7 @@ def base_idx(self): [joints.index(f"base_footprint_{component}_joint") for component in ["x", "y", "z", "rx", "ry", "rz"]] ) - @property + @cached_property def base_joint_names(self): return [f"base_footprint_{component}_joint" for component in ("x", "y", "rz")] @@ -249,10 +251,13 @@ def set_position_orientation(self, position=None, orientation=None, frame: Liter # ("base_footprint_x") frame. Assign it to the 6 1DoF joints that control the base. # Note that the 6 1DoF joints are originated from the root_link ("base_footprint_x") frame. joint_pos, joint_orn = self.root_link.get_position_orientation() - inv_joint_pos, inv_joint_orn = T.mat2pose(T.pose_inv(T.pose2mat((joint_pos, joint_orn)))) + joint_pos, joint_orn = cb.from_torch(joint_pos), cb.from_torch(joint_orn) + inv_joint_pos, inv_joint_orn = cb.T.mat2pose(cb.T.pose_inv(cb.T.pose2mat((joint_pos, joint_orn)))) - relative_pos, relative_orn = T.pose_transform(inv_joint_pos, inv_joint_orn, position, orientation) - relative_rpy = T.quat2euler(relative_orn) + relative_pos, relative_orn = cb.T.pose_transform( + inv_joint_pos, inv_joint_orn, cb.from_torch(position), cb.from_torch(orientation) + ) + relative_rpy = cb.T.quat2euler(relative_orn) self.joints["base_footprint_x_joint"].set_pos(relative_pos[0], drive=False) self.joints["base_footprint_y_joint"].set_pos(relative_pos[1], drive=False) self.joints["base_footprint_z_joint"].set_pos(relative_pos[2], drive=False) @@ -275,8 +280,8 @@ def set_linear_velocity(self, velocity: th.Tensor): # Transform the desired linear velocity from the world frame to the root_link ("base_footprint_x") frame # Note that this will also set the target to be the desired linear velocity (i.e. the robot will try to maintain # such velocity), which is different from the default behavior of set_linear_velocity for all other objects. - orn = self.root_link.get_position_orientation()[1] - velocity_in_root_link = T.quat2mat(orn).T @ velocity + orn = cb.from_torch(self.root_link.get_position_orientation()[1]) + velocity_in_root_link = cb.T.quat2mat(orn).T @ cb.from_torch(velocity) self.joints["base_footprint_x_joint"].set_vel(velocity_in_root_link[0], drive=False) self.joints["base_footprint_y_joint"].set_vel(velocity_in_root_link[1], drive=False) self.joints["base_footprint_z_joint"].set_vel(velocity_in_root_link[2], drive=False) @@ -287,8 +292,8 @@ def get_linear_velocity(self) -> th.Tensor: def set_angular_velocity(self, velocity: th.Tensor) -> None: # See comments of self.set_linear_velocity - orn = self.root_link.get_position_orientation()[1] - velocity_in_root_link = T.quat2mat(orn).T @ velocity + orn = cb.from_torch(self.root_link.get_position_orientation()[1]) + velocity_in_root_link = cb.T.quat2mat(orn).T @ cb.from_torch(velocity) self.joints["base_footprint_rx_joint"].set_vel(velocity_in_root_link[0], drive=False) self.joints["base_footprint_ry_joint"].set_vel(velocity_in_root_link[1], drive=False) self.joints["base_footprint_rz_joint"].set_vel(velocity_in_root_link[2], drive=False) @@ -302,32 +307,34 @@ def _postprocess_control(self, control, control_type): u_vec, u_type_vec = super()._postprocess_control(control=control, control_type=control_type) # Change the control from base_footprint_link ("base_footprint") frame to root_link ("base_footprint_x") frame - base_orn = self.base_footprint_link.get_position_orientation()[1] - root_link_orn = self.root_link.get_position_orientation()[1] + base_orn = ControllableObjectViewAPI.get_position_orientation(self.articulation_root_path)[1] + root_link_orn = ControllableObjectViewAPI.get_root_position_orientation(self.articulation_root_path)[1] - cur_orn_mat = T.quat2mat(root_link_orn).T @ T.quat2mat(base_orn) - cur_pose = th.zeros((2, 4, 4)) + cur_orn_mat = cb.T.quat2mat(root_link_orn).T @ cb.T.quat2mat(base_orn) + cur_pose = cb.zeros((2, 4, 4)) cur_pose[:, :3, :3] = cur_orn_mat cur_pose[:, 3, 3] = 1.0 - local_pose = th.zeros((2, 4, 4)) - local_pose[:] = th.eye(4) - local_pose[:, :3, 3] = u_vec[self.base_idx].view(2, 3) + local_pose = cb.zeros((2, 4, 4)) + local_pose[:] = cb.eye(4) + local_pose[:, :3, 3] = cb.view(u_vec[cb.from_torch(self.base_idx)], (2, 3)) # Rotate the linear and angular velocity to the desired frame global_pose = cur_pose @ local_pose lin_vel_global, ang_vel_global = global_pose[0, :3, 3], global_pose[1, :3, 3] - u_vec[self.base_control_idx] = th.tensor([lin_vel_global[0], lin_vel_global[1], ang_vel_global[2]]) + u_vec[cb.from_torch(self.base_control_idx)] = cb.array( + [lin_vel_global[0], lin_vel_global[1], ang_vel_global[2]] + ) return u_vec, u_type_vec def teleop_data_to_action(self, teleop_action) -> th.Tensor: action = ManipulationRobot.teleop_data_to_action(self, teleop_action) - action[self.base_action_idx] = th.tensor(teleop_action.base).float() * 0.1 + action[self.base_action_idx] = teleop_action.base.float() return action - @property + @cached_property def base_footprint_link_name(self): raise NotImplementedError("base_footprint_link_name is not implemented for HolonomicBaseRobot") diff --git a/omnigibson/robots/husky.py b/omnigibson/robots/husky.py index f04e5d9bf..a893f9d87 100644 --- a/omnigibson/robots/husky.py +++ b/omnigibson/robots/husky.py @@ -1,4 +1,5 @@ import os +from functools import cached_property import torch as th @@ -23,7 +24,7 @@ def wheel_radius(self): def wheel_axle_length(self): return 0.670 - @property + @cached_property def base_joint_names(self): return ["front_left_wheel", "front_right_wheel", "rear_left_wheel", "rear_right_wheel"] diff --git a/omnigibson/robots/locobot.py b/omnigibson/robots/locobot.py index d27655f33..846fffaae 100644 --- a/omnigibson/robots/locobot.py +++ b/omnigibson/robots/locobot.py @@ -1,4 +1,5 @@ import os +from functools import cached_property import torch as th @@ -20,7 +21,7 @@ def wheel_radius(self): def wheel_axle_length(self): return 0.230 - @property + @cached_property def base_joint_names(self): return ["wheel_left_joint", "wheel_right_joint"] diff --git a/omnigibson/robots/locomotion_robot.py b/omnigibson/robots/locomotion_robot.py index a723be8e8..9cf9d1a54 100644 --- a/omnigibson/robots/locomotion_robot.py +++ b/omnigibson/robots/locomotion_robot.py @@ -1,4 +1,5 @@ from abc import abstractmethod +from functools import cached_property import torch as th @@ -39,8 +40,8 @@ def _validate_configuration(self): def _get_proprioception_dict(self): dic = super()._get_proprioception_dict() - joint_positions = ControllableObjectViewAPI.get_joint_positions(self.articulation_root_path) - joint_velocities = ControllableObjectViewAPI.get_joint_velocities(self.articulation_root_path) + joint_positions = dic["joint_qpos"] + joint_velocities = dic["joint_qvel"] # Add base info dic["base_qpos"] = joint_positions[self.base_control_idx] @@ -184,19 +185,19 @@ def turn_right(self, delta=0.03): quat = quat_multiply((euler2quat(delta, 0, 0)), quat) self.set_position_orientation(orientation=quat) - @property + @cached_property def non_floor_touching_base_links(self): return [self.links[name] for name in self.non_floor_touching_base_link_names] - @property + @cached_property def non_floor_touching_base_link_names(self): return [self.base_footprint_link_name] - @property + @cached_property def floor_touching_base_links(self): return [self.links[name] for name in self.floor_touching_base_link_names] - @property + @cached_property def floor_touching_base_link_names(self): raise NotImplementedError @@ -218,7 +219,7 @@ def base_joint_names(self): """ raise NotImplementedError - @property + @cached_property def base_control_idx(self): """ Returns: diff --git a/omnigibson/robots/manipulation_robot.py b/omnigibson/robots/manipulation_robot.py index be725b24f..345878665 100644 --- a/omnigibson/robots/manipulation_robot.py +++ b/omnigibson/robots/manipulation_robot.py @@ -2,6 +2,7 @@ import os from abc import abstractmethod from collections import namedtuple +from functools import cached_property from typing import Literal import networkx as nx @@ -22,6 +23,7 @@ from omnigibson.macros import create_module_macros, gm from omnigibson.object_states import ContactBodies from omnigibson.robots.robot_base import BaseRobot +from omnigibson.utils.backend_utils import _compute_backend as cb from omnigibson.utils.constants import JointType, PrimType from omnigibson.utils.geometry_utils import generate_points_in_volume_checker_function from omnigibson.utils.python_utils import assert_valid_key, classproperty @@ -413,8 +415,8 @@ def _get_proprioception_dict(self): dic = super()._get_proprioception_dict() # Loop over all arms to grab proprio info - joint_positions = ControllableObjectViewAPI.get_joint_positions(self.articulation_root_path) - joint_velocities = ControllableObjectViewAPI.get_joint_velocities(self.articulation_root_path) + joint_positions = dic["joint_qpos"] + joint_velocities = dic["joint_qvel"] for arm in self.arm_names: # Add arm info dic["arm_{}_qpos".format(arm)] = joint_positions[self.arm_control_idx[arm]] @@ -423,11 +425,10 @@ def _get_proprioception_dict(self): dic["arm_{}_qvel".format(arm)] = joint_velocities[self.arm_control_idx[arm]] # Add eef and grasping info - dic["eef_{}_pos".format(arm)], dic["eef_{}_quat".format(arm)] = ( - ControllableObjectViewAPI.get_link_relative_position_orientation( - self.articulation_root_path, self.eef_link_names[arm] - ) + eef_pos, eef_quat = ControllableObjectViewAPI.get_link_relative_position_orientation( + self.articulation_root_path, self.eef_link_names[arm] ) + dic["eef_{}_pos".format(arm)], dic["eef_{}_quat".format(arm)] = cb.to_torch(eef_pos), cb.to_torch(eef_quat) dic["grasp_{}".format(arm)] = th.tensor([self.is_grasping(arm)]) dic["gripper_{}_qpos".format(arm)] = joint_positions[self.gripper_control_idx[arm]] dic["gripper_{}_qvel".format(arm)] = joint_velocities[self.gripper_control_idx[arm]] @@ -531,7 +532,7 @@ def gripper_action_idx(self): ) return gripper_action_idx - @property + @cached_property @abstractmethod def arm_link_names(self): """ @@ -544,7 +545,7 @@ def arm_link_names(self): """ raise NotImplementedError - @property + @cached_property @abstractmethod def arm_joint_names(self): """ @@ -557,7 +558,7 @@ def arm_joint_names(self): """ raise NotImplementedError - @property + @cached_property @abstractmethod def eef_link_names(self): """ @@ -567,7 +568,7 @@ def eef_link_names(self): """ raise NotImplementedError - @property + @cached_property @abstractmethod def finger_link_names(self): """ @@ -580,7 +581,7 @@ def finger_link_names(self): """ raise NotImplementedError - @property + @cached_property @abstractmethod def finger_joint_names(self): """ @@ -593,7 +594,7 @@ def finger_joint_names(self): """ raise NotImplementedError - @property + @cached_property def arm_control_idx(self): """ Returns: @@ -605,7 +606,7 @@ def arm_control_idx(self): for arm in self.arm_names } - @property + @cached_property def gripper_control_idx(self): """ Returns: @@ -617,7 +618,7 @@ def gripper_control_idx(self): for arm in self.arm_names } - @property + @cached_property def arm_links(self): """ Returns: @@ -626,7 +627,7 @@ def arm_links(self): """ return {arm: [self._links[link] for link in self.arm_link_names[arm]] for arm in self.arm_names} - @property + @cached_property def eef_links(self): """ Returns: @@ -635,7 +636,7 @@ def eef_links(self): """ return {arm: self._links[self.eef_link_names[arm]] for arm in self.arm_names} - @property + @cached_property def finger_links(self): """ Returns: @@ -644,7 +645,7 @@ def finger_links(self): """ return {arm: [self._links[link] for link in self.finger_link_names[arm]] for arm in self.arm_names} - @property + @cached_property def finger_joints(self): """ Returns: @@ -1314,23 +1315,24 @@ def _handle_assisted_grasping(self): # a zero action will actually keep the AG setting where it already is. controller = self._controllers[f"gripper_{arm}"] controlled_joints = controller.dof_idx + control = cb.to_torch(controller.control) threshold = th.mean( th.stack([self.joint_lower_limits[controlled_joints], self.joint_upper_limits[controlled_joints]]), dim=0, ) - if controller.control is None: + if control is None: applying_grasp = False elif self._grasping_direction == "lower": applying_grasp = ( - th.any(controller.control < threshold) + th.any(control < threshold) if controller.control_type == ControlType.POSITION - else th.any(controller.control < 0) + else th.any(control < 0) ) else: applying_grasp = ( - th.any(controller.control > threshold) + th.any(control > threshold) if controller.control_type == ControlType.POSITION - else th.any(controller.control > 0) + else th.any(control > 0) ) # Execute gradual release of object if self._ag_obj_in_hand[arm]: @@ -1596,12 +1598,12 @@ def teleop_data_to_action(self, teleop_action) -> th.Tensor: hands = ["left", "right"] if self.n_arms == 2 else ["right"] for i, hand in enumerate(hands): arm_name = self.arm_names[i] - arm_action = th.tensor(teleop_action[hand]).float() + arm_action = teleop_action[hand] # arm action assert isinstance(self._controllers[f"arm_{arm_name}"], InverseKinematicsController) or isinstance( self._controllers[f"arm_{arm_name}"], OperationalSpaceController ), f"Only IK and OSC controllers are supported for arm {arm_name}!" - target_pos, target_orn = arm_action[:3], T.quat2axisangle(T.euler2quat(arm_action[3:6])) + target_pos, target_orn = arm_action[:3], arm_action[3:6] action[self.arm_action_idx[arm_name]] = th.cat((target_pos, target_orn)) # gripper action assert isinstance( diff --git a/omnigibson/robots/r1.py b/omnigibson/robots/r1.py index 6979026e0..3321192c4 100644 --- a/omnigibson/robots/r1.py +++ b/omnigibson/robots/r1.py @@ -1,4 +1,5 @@ import os +from functools import cached_property import torch as th @@ -196,15 +197,15 @@ def assisted_grasp_end_points(self): for arm in self.arm_names } - @property + @cached_property def floor_touching_base_link_names(self): return ["wheel_link1", "wheel_link2", "wheel_link3"] - @property + @cached_property def trunk_link_names(self): return ["torso_link1", "torso_link2", "torso_link3", "torso_link4"] - @property + @cached_property def trunk_joint_names(self): return [f"torso_joint{i}" for i in range(1, 5)] @@ -216,23 +217,23 @@ def n_arms(cls): def arm_names(cls): return ["left", "right"] - @property + @cached_property def arm_link_names(self): return {arm: [f"{arm}_arm_link{i}" for i in range(1, 7)] for arm in self.arm_names} - @property + @cached_property def arm_joint_names(self): return {arm: [f"{arm}_arm_joint{i}" for i in range(1, 7)] for arm in self.arm_names} - @property + @cached_property def eef_link_names(self): return {arm: f"{arm}_eef_link" for arm in self.arm_names} - @property + @cached_property def finger_link_names(self): return {arm: [f"{arm}_gripper_link{i}" for i in range(1, 3)] for arm in self.arm_names} - @property + @cached_property def finger_joint_names(self): return {arm: [f"{arm}_gripper_axis{i}" for i in range(1, 3)] for arm in self.arm_names} diff --git a/omnigibson/robots/robot_base.py b/omnigibson/robots/robot_base.py index 834b9bbb8..969a8d0b1 100644 --- a/omnigibson/robots/robot_base.py +++ b/omnigibson/robots/robot_base.py @@ -14,6 +14,7 @@ VisionSensor, create_sensor, ) +from omnigibson.utils.backend_utils import _compute_backend as cb from omnigibson.utils.constants import PrimType from omnigibson.utils.gym_utils import GymObservable from omnigibson.utils.numpy_utils import NumpyTypes @@ -295,10 +296,11 @@ def _get_proprioception_dict(self): dict: keyword-mapped proprioception observations available for this robot. Can be extended by subclasses """ - joint_positions = ControllableObjectViewAPI.get_joint_positions(self.articulation_root_path) - joint_velocities = ControllableObjectViewAPI.get_joint_velocities(self.articulation_root_path) - joint_efforts = ControllableObjectViewAPI.get_joint_efforts(self.articulation_root_path) + joint_positions = cb.to_torch(ControllableObjectViewAPI.get_joint_positions(self.articulation_root_path)) + joint_velocities = cb.to_torch(ControllableObjectViewAPI.get_joint_velocities(self.articulation_root_path)) + joint_efforts = cb.to_torch(ControllableObjectViewAPI.get_joint_efforts(self.articulation_root_path)) pos, quat = ControllableObjectViewAPI.get_position_orientation(self.articulation_root_path) + pos, quat = cb.to_torch(pos), cb.to_torch(quat) ori = T.quat2euler(quat) ori_2d = T.z_angle_from_quat(quat) @@ -316,8 +318,8 @@ def _get_proprioception_dict(self): robot_2d_ori=ori_2d, robot_2d_ori_cos=th.cos(ori_2d), robot_2d_ori_sin=th.sin(ori_2d), - robot_lin_vel=ControllableObjectViewAPI.get_linear_velocity(self.articulation_root_path), - robot_ang_vel=ControllableObjectViewAPI.get_angular_velocity(self.articulation_root_path), + robot_lin_vel=cb.to_torch(ControllableObjectViewAPI.get_linear_velocity(self.articulation_root_path)), + robot_ang_vel=cb.to_torch(ControllableObjectViewAPI.get_angular_velocity(self.articulation_root_path)), ) def _load_observation_space(self): diff --git a/omnigibson/robots/stretch.py b/omnigibson/robots/stretch.py index e8e88a3cb..87ae373e3 100644 --- a/omnigibson/robots/stretch.py +++ b/omnigibson/robots/stretch.py @@ -1,5 +1,6 @@ import math import os +from functools import cached_property import torch as th @@ -90,15 +91,15 @@ def disabled_collision_pairs(self): ["link_arm_l0", "link_arm_l3"], ] - @property + @cached_property def base_joint_names(self): return ["joint_left_wheel", "joint_right_wheel"] - @property + @cached_property def camera_joint_names(self): return ["joint_head_pan", "joint_head_tilt"] - @property + @cached_property def arm_link_names(self): return { self.default_arm: [ @@ -113,7 +114,7 @@ def arm_link_names(self): ] } - @property + @cached_property def arm_joint_names(self): return { self.default_arm: [ @@ -128,11 +129,11 @@ def arm_joint_names(self): ] } - @property + @cached_property def eef_link_names(self): return {self.default_arm: "eef_link"} - @property + @cached_property def finger_link_names(self): return { self.default_arm: [ @@ -141,6 +142,6 @@ def finger_link_names(self): ] } - @property + @cached_property def finger_joint_names(self): return {self.default_arm: ["joint_gripper_finger_right", "joint_gripper_finger_left"]} diff --git a/omnigibson/robots/tiago.py b/omnigibson/robots/tiago.py index 5ee13e1dc..2cfa8876a 100644 --- a/omnigibson/robots/tiago.py +++ b/omnigibson/robots/tiago.py @@ -1,5 +1,6 @@ import math import os +from functools import cached_property from typing import Literal import torch as th @@ -139,7 +140,7 @@ def __init__( **kwargs, ) - @property + @cached_property def arm_joint_names(self): names = dict() for arm in self.arm_names: @@ -208,7 +209,7 @@ def _post_load(self): def base_footprint_link_name(self): return "base_footprint" - @property + @cached_property def floor_touching_base_link_names(self): return [ "wheel_front_left_link", @@ -363,15 +364,15 @@ def disabled_collision_pairs(self): ["arm_right_tool_link", "wrist_right_ft_tool_link"], ] - @property + @cached_property def camera_joint_names(self): return ["head_1_joint", "head_2_joint"] - @property + @cached_property def trunk_joint_names(self): return ["torso_lift_joint"] - @property + @cached_property def manipulation_link_names(self): return [ "torso_fixed_link", @@ -409,19 +410,19 @@ def manipulation_link_names(self): "xtion_link", ] - @property + @cached_property def arm_link_names(self): return {arm: [f"arm_{arm}_{i}_link" for i in range(1, 8)] for arm in self.arm_names} - @property + @cached_property def eef_link_names(self): return {arm: f"{arm}_eef_link" for arm in self.arm_names} - @property + @cached_property def finger_link_names(self): return {arm: [f"gripper_{arm}_right_finger_link", f"gripper_{arm}_left_finger_link"] for arm in self.arm_names} - @property + @cached_property def finger_joint_names(self): return { arm: [f"gripper_{arm}_right_finger_joint", f"gripper_{arm}_left_finger_joint"] for arm in self.arm_names diff --git a/omnigibson/robots/turtlebot.py b/omnigibson/robots/turtlebot.py index 6dc053ed9..b184147eb 100644 --- a/omnigibson/robots/turtlebot.py +++ b/omnigibson/robots/turtlebot.py @@ -1,4 +1,5 @@ import os +from functools import cached_property import torch as th @@ -21,7 +22,7 @@ def wheel_radius(self): def wheel_axle_length(self): return 0.23 - @property + @cached_property def base_joint_names(self): return ["wheel_left_joint", "wheel_right_joint"] diff --git a/omnigibson/robots/vx300s.py b/omnigibson/robots/vx300s.py index fccdf4234..624a4206c 100644 --- a/omnigibson/robots/vx300s.py +++ b/omnigibson/robots/vx300s.py @@ -1,5 +1,6 @@ import math import os +from functools import cached_property import torch as th @@ -133,7 +134,7 @@ def _default_joint_pos(self): def finger_lengths(self): return {self.default_arm: 0.1} - @property + @cached_property def arm_link_names(self): return { self.default_arm: [ @@ -147,7 +148,7 @@ def arm_link_names(self): ] } - @property + @cached_property def arm_joint_names(self): return { self.default_arm: [ @@ -160,15 +161,15 @@ def arm_joint_names(self): ] } - @property + @cached_property def eef_link_names(self): return {self.default_arm: "eef_link"} - @property + @cached_property def finger_link_names(self): return {self.default_arm: ["left_finger_link", "right_finger_link"]} - @property + @cached_property def finger_joint_names(self): return {self.default_arm: ["left_finger", "right_finger"]} diff --git a/omnigibson/sensors/vision_sensor.py b/omnigibson/sensors/vision_sensor.py index 1ebdc2729..e4902b68b 100644 --- a/omnigibson/sensors/vision_sensor.py +++ b/omnigibson/sensors/vision_sensor.py @@ -575,16 +575,28 @@ def _remove_modality_from_backend(self, modality): modality (str): Name of the modality to remove from the Replicator backend """ if self._annotators.get(modality, None) is not None: - self._annotators[modality].detach([self._render_product]) + # Passing an explicit list is bugged -- see omni source code + # So we only pass in the product directly, which gets post-processed correctly + self._annotators[modality].detach(self._render_product) self._annotators[modality] = None def remove(self): # Remove from global sensors dictionary self.SENSORS.pop(self.prim_path) + # Remove all modalities + for modality in tuple(self.modalities): + self.remove_modality(modality) + + # Destroy the render product + self._render_product.destroy() + # Remove the viewport if it's not the main viewport if self._viewport.name != "Viewport": self._viewport.destroy() + else: + # We're deleting our camera, so set the normal viewport camera to the default /Perspective camera + self.active_camera_path = "/OmniverseKit_Persp" # Run super super().remove() @@ -882,10 +894,9 @@ def clear(cls): """ Clear all the class-wide variables. """ - for sensor in cls.SENSORS.values(): - # Destroy any sensor that is not attached to the main viewport window - if sensor._viewport.name != "Viewport": - sensor._viewport.destroy() + # Remove all sensors + for sensor in tuple(cls.SENSORS.values()): + sensor.remove() # Render to update render() diff --git a/omnigibson/simulator.py b/omnigibson/simulator.py index ce06d7a21..96579f14c 100644 --- a/omnigibson/simulator.py +++ b/omnigibson/simulator.py @@ -73,6 +73,7 @@ m.KIT_FILES = { (4, 0, 0): "omnigibson_4_0_0.kit", (4, 1, 0): "omnigibson_4_1_0.kit", + (4, 2, 0): "omnigibson_4_2_0.kit", } @@ -158,7 +159,6 @@ def _launch_app(): # Enable additional extensions we need lazy.omni.isaac.core.utils.extensions.enable_extension("omni.flowusd") - lazy.omni.isaac.core.utils.extensions.enable_extension("omni.particle.system.bundle") # Additional import for windows if os.name == "nt": @@ -221,6 +221,7 @@ def _launch_app(): "Content", "Flow", "Semantics Schema Editor", + "VR", ] ) @@ -239,6 +240,13 @@ def _launch_app(): # Loading Isaac Sim disables Ctrl+C, so we need to re-enable it signal.signal(signal.SIGINT, og.shutdown_handler) + # Set compute backend + import omnigibson.utils.backend_utils as _backend_utils + + _backend_utils._compute_backend.set_methods( + _backend_utils._ComputeNumpyBackend if gm.USE_NUMPY_CONTROLLER_BACKEND else _backend_utils._ComputeTorchBackend + ) + return app diff --git a/omnigibson/systems/micro_particle_system.py b/omnigibson/systems/micro_particle_system.py index a9322638a..0f08befb2 100644 --- a/omnigibson/systems/micro_particle_system.py +++ b/omnigibson/systems/micro_particle_system.py @@ -494,9 +494,6 @@ def initialize(self, scene): self.system_prim = self._create_particle_system() # Get material material = self._get_particle_material_template() - # Load the material if it's newly created and has never been loaded before - if not material.loaded: - material.load() material.add_user(self) self._material = material # Bind the material to the particle system (for isosurface) and the prototypes (for non-isosurface) diff --git a/omnigibson/utils/asset_conversion_utils.py b/omnigibson/utils/asset_conversion_utils.py index 85a5bc2ef..ed49e3130 100644 --- a/omnigibson/utils/asset_conversion_utils.py +++ b/omnigibson/utils/asset_conversion_utils.py @@ -1914,6 +1914,7 @@ def get_collision_approximation_for_urdf( during collision generation process """ # Load URDF + urdf_dir = os.path.dirname(urdf_path) tree = ET.parse(urdf_path) root = tree.getroot() @@ -1923,7 +1924,8 @@ def get_collision_approximation_for_urdf( no_decompose_links = set() if no_decompose_links is None else set(no_decompose_links) visual_only_links = set() if visual_only_links is None else set(visual_only_links) ignore_links = set() if ignore_links is None else set(ignore_links) - col_mesh_folder = pathlib.Path(os.path.dirname(urdf_path)) / "meshes" / "collision" + col_mesh_rel_folder = "meshes/collision" + col_mesh_folder = pathlib.Path(urdf_dir) / col_mesh_rel_folder col_mesh_folder.mkdir(exist_ok=True, parents=True) for link in root.findall("link"): link_name = link.attrib["name"] @@ -1996,7 +1998,7 @@ def get_collision_approximation_for_urdf( collision_geometry_xml = ET.SubElement(collision_xml, "geometry") collision_mesh_xml = ET.SubElement(collision_geometry_xml, "mesh") collision_mesh_xml.attrib = { - "filename": str(col_mesh_folder / collision_filename), + "filename": os.path.join(col_mesh_rel_folder, collision_filename), "scale": " ".join([str(item) for item in collision_scale]), } @@ -2308,7 +2310,7 @@ def import_og_asset_from_urdf( decomposition applied. This will only use the convex hull visual_only_links (None or list of str): If specified, links that should have no colliders associated with it merge_fixed_joints (bool): Whether to merge fixed joints or not - dataset_root (str): Dataset root directory to use for writing imported USD file. Default is external dataset + dataset_root (str): Dataset root directory to use for writing imported USD file. Default is custom dataset path set from the global macros hull_count (int): Maximum number of convex hulls to decompose individual visual meshes into. Only relevant if @collision_method is "coacd" diff --git a/omnigibson/utils/backend_utils.py b/omnigibson/utils/backend_utils.py new file mode 100644 index 000000000..879d2771d --- /dev/null +++ b/omnigibson/utils/backend_utils.py @@ -0,0 +1,117 @@ +import numpy as np +import torch as th + +import omnigibson.utils.transform_utils as TT +import omnigibson.utils.transform_utils_np as NT +from omnigibson.utils.python_utils import recursively_convert_from_torch + + +class _ComputeBackend: + array = None + int_array = None + prod = None + cat = None + zeros = None + ones = None + to_numpy = None + from_numpy = None + to_torch = None + from_torch = None + from_torch_recursive = None + allclose = None + arr_type = None + as_int = None + as_float32 = None + pinv = None + meshgrid = None + full = None + logical_or = None + all = None + abs = None + sqrt = None + mean = None + copy = None + eye = None + view = None + arange = None + where = None + squeeze = None + T = None + + @classmethod + def set_methods(cls, backend): + for attr, fcn in backend.__dict__.items(): + # Do not override reserved functions + if attr.startswith("__"): + continue + # Set function to this backend + setattr(cls, attr, fcn) + + +class _ComputeTorchBackend(_ComputeBackend): + array = lambda *args: th.tensor(*args, dtype=th.float32) + int_array = lambda *args: th.tensor(*args, dtype=th.int32) + prod = th.prod + cat = th.cat + zeros = lambda *args: th.zeros(*args, dtype=th.float32) + ones = lambda *args: th.ones(*args, dtype=th.float32) + to_numpy = lambda x: x.numpy() + from_numpy = lambda x: th.from_numpy() + to_torch = lambda x: x + from_torch = lambda x: x + from_torch_recursive = lambda dic: dic + allclose = th.allclose + arr_type = th.Tensor + as_int = lambda arr: arr.int() + as_float32 = lambda arr: arr.float() + pinv = th.linalg.pinv + meshgrid = lambda idx_a, idx_b: th.meshgrid(idx_a, idx_b, indexing="xy") + full = lambda shape, fill_value: th.full(shape, fill_value, dtype=th.float32) + logical_or = th.logical_or + all = th.all + abs = th.abs + sqrt = th.sqrt + mean = lambda val, dim=None, keepdim=False: th.mean(val, dim=dim, keepdim=keepdim) + copy = lambda arr: arr.clone() + eye = th.eye + view = lambda arr, shape: arr.view(shape) + arange = th.arange + where = th.where + squeeze = lambda arr, dim=None: arr.squeeze(dim=dim) + T = TT + + +class _ComputeNumpyBackend(_ComputeBackend): + array = lambda *args: np.array(*args, dtype=np.float32) + int_array = lambda *args: np.array(*args, dtype=np.int32) + prod = np.prod + cat = np.concatenate + zeros = lambda *args: np.zeros(*args, dtype=np.float32) + ones = lambda *args: np.ones(*args, dtype=np.float32) + to_numpy = lambda x: x + from_numpy = lambda x: x + to_torch = lambda x: th.from_numpy(x) + from_torch = lambda x: x.numpy() + from_torch_recursive = recursively_convert_from_torch + allclose = np.allclose + arr_type = np.ndarray + as_int = lambda arr: arr.astype(int) + as_float32 = lambda arr: arr.astype(np.float32) + pinv = np.linalg.pinv + meshgrid = lambda idx_a, idx_b: np.ix_(idx_a, idx_b) + full = lambda shape, fill_value: np.full(shape, fill_value, dtype=np.float32) + logical_or = np.logical_or + all = np.all + abs = np.abs + sqrt = np.sqrt + mean = lambda val, dim=None, keepdim=False: np.mean(val, axis=dim, keepdims=keepdim) + copy = lambda arr: np.array(arr) + eye = np.eye + view = lambda arr, shape: arr.reshape(shape) + arange = np.arange + where = np.where + squeeze = lambda arr, dim=None: arr.squeeze(axis=dim) + T = NT + + +_compute_backend = _ComputeBackend diff --git a/omnigibson/utils/config_utils.py b/omnigibson/utils/config_utils.py index dd467c382..8012da699 100644 --- a/omnigibson/utils/config_utils.py +++ b/omnigibson/utils/config_utils.py @@ -2,6 +2,7 @@ import json import os +import numpy as np import torch as th import yaml @@ -74,6 +75,6 @@ def load_default_config(): class TorchEncoder(json.JSONEncoder): def default(self, obj): - if isinstance(obj, th.Tensor): + if isinstance(obj, (th.Tensor, np.ndarray)): return obj.tolist() return json.JSONEncoder.default(self, obj) diff --git a/omnigibson/utils/constants.py b/omnigibson/utils/constants.py index 4cd7b8ab8..5680d54fc 100644 --- a/omnigibson/utils/constants.py +++ b/omnigibson/utils/constants.py @@ -76,6 +76,7 @@ class PrimType(IntEnum): class EmitterType(IntEnum): FIRE = 0 STEAM = 1 + MIST = 2 # Valid primitive mesh types diff --git a/omnigibson/utils/control_utils.py b/omnigibson/utils/control_utils.py index 7585bbbb1..00350688b 100644 --- a/omnigibson/utils/control_utils.py +++ b/omnigibson/utils/control_utils.py @@ -138,33 +138,3 @@ def solve( return th.tensor(ik_results.cspace_position, dtype=th.float32) else: return None - - -@th.jit.script -def orientation_error(desired, current): - """ - This function calculates a 3-dimensional orientation error vector for use in the - impedance controller. It does this by computing the delta rotation between the - inputs and converting that rotation to exponential coordinates (axis-angle - representation, where the 3d vector is axis * angle). - See https://en.wikipedia.org/wiki/Axis%E2%80%93angle_representation for more information. - Optimized function to determine orientation error from matrices - - Args: - desired (tensor): (..., 3, 3) where final two dims are 2d array representing target orientation matrix - current (tensor): (..., 3, 3) where final two dims are 2d array representing current orientation matrix - Returns: - tensor: (..., 3) where final dim is (ax, ay, az) axis-angle representing orientation error - """ - # Compute batch size - batch_size = desired.numel() // 9 # Each 3x3 matrix has 9 elements - - desired_flat = desired.reshape(batch_size, 3, 3) - current_flat = current.reshape(batch_size, 3, 3) - - rc1, rc2, rc3 = current_flat[:, :, 0], current_flat[:, :, 1], current_flat[:, :, 2] - rd1, rd2, rd3 = desired_flat[:, :, 0], desired_flat[:, :, 1], desired_flat[:, :, 2] - - error = 0.5 * (th.linalg.cross(rc1, rd1) + th.linalg.cross(rc2, rd2) + th.linalg.cross(rc3, rd3)) - - return error.reshape(desired.shape[:-2] + (3,)) diff --git a/omnigibson/utils/deprecated_utils.py b/omnigibson/utils/deprecated_utils.py index 740407476..a5933ac9e 100644 --- a/omnigibson/utils/deprecated_utils.py +++ b/omnigibson/utils/deprecated_utils.py @@ -20,8 +20,6 @@ from omni.isaac.core.utils.prims import get_prim_at_path, get_prim_parent from omni.kit.primitive.mesh.command import CreateMeshPrimWithDefaultXformCommand as CMPWDXC from omni.kit.primitive.mesh.command import _get_all_evaluators -from omni.particle.system.core.scripts.core import Core as OmniCore -from omni.particle.system.core.scripts.utils import Utils as OmniUtils from omni.replicator.core import random_colours from PIL import Image, ImageDraw from pxr import PhysxSchema, Sdf, Usd, UsdGeom, UsdPhysics, UsdShade @@ -82,96 +80,6 @@ def __init__(self, prim_type: str, **kwargs): assert isinstance(self._evaluator_class, type) -class Utils(OmniUtils): - def create_material(self, name): - material_url = carb.settings.get_settings().get("/exts/omni.particle.system.core/material") - - # TODO: THIS IS THE ONLY LINE WE CHANGE! "/" SHOULD BE "" - material_path = "" - default_prim = self.stage.GetDefaultPrim() - if default_prim: - material_path = default_prim.GetPath().pathString - - if not self.stage.GetPrimAtPath(material_path + "/Looks"): - self.stage.DefinePrim(material_path + "/Looks", "Scope") - material_path += "/Looks/" + name - material_path = ou.get_stage_next_free_path(self.stage, material_path, False) - prim = self.stage.DefinePrim(material_path, "Material") - if material_url: - prim.GetReferences().AddReference(material_url) - else: - carb.log_error("Failed to find material URL in settings") - - return [material_path] - - -class Core(OmniCore): - """ - Subclass that overrides a specific function within Omni's Core class to fix a bug - """ - - def __init__(self, popup_callback: Callable[[str], None], particle_system_name: str): - self._popup_callback = popup_callback - self.utils = Utils() - self.context = ou.get_context() - self.stage = self.context.get_stage() - self.selection = self.context.get_selection() - self.particle_system_name = particle_system_name - self.sub_stage_update = self.context.get_stage_event_stream().create_subscription_to_pop(self.on_stage_update) - self.on_stage_update() - - def get_compute_graph(self, selected_paths, create_new_graph=True, created_paths=None): - """ - Returns the first ComputeGraph found in selected_paths. - If no graph is found and create_new_graph is true, a new graph will be created and its - path appended to created_paths (if provided). - """ - graph = None - graph_paths = [ - path - for path in selected_paths - if self.stage.GetPrimAtPath(path).GetTypeName() in ["ComputeGraph", "OmniGraph"] - ] - - if len(graph_paths) > 0: - graph = ogc.get_graph_by_path(graph_paths[0]) - if len(graph_paths) > 1: - carb.log_warn( - f"Multiple ComputeGraph prims selected. Only the first will be used: {graph.get_path_to_graph()}" - ) - elif create_new_graph: - # If no graph was found in the selected prims, we'll make a new graph. - # TODO: THIS IS THE ONLY LINE THAT WE CHANGE! ONCE FIXED, REMOVE THIS - graph_path = Sdf.Path(f"/OmniGraph/{self.particle_system_name}").MakeAbsolutePath(Sdf.Path.absoluteRootPath) - graph_path = ou.get_stage_next_free_path(self.stage, graph_path, True) - - # prim = self.stage.GetDefaultPrim() - # path = str(prim.GetPath()) if prim else "" - self.stage.DefinePrim("/OmniGraph", "Scope") - - container_graphs = ogc.get_global_container_graphs() - # FIXME: container_graphs[0] should be the simulation orchestration graph, but this may change in the future. - container_graph = container_graphs[0] - result, wrapper_node = ogc.cmds.CreateGraphAsNode( - graph=container_graph, - node_name=Sdf.Path(graph_path).name, - graph_path=graph_path, - evaluator_name="push", - is_global_graph=True, - backed_by_usd=True, - fc_backing_type=ogc.GraphBackingType.GRAPH_BACKING_TYPE_FLATCACHE_SHARED, - pipeline_stage=ogc.GraphPipelineStage.GRAPH_PIPELINE_STAGE_SIMULATION, - ) - graph = wrapper_node.get_wrapped_graph() - - if created_paths is not None: - created_paths.append(graph.get_path_to_graph()) - - carb.log_info(f"No ComputeGraph selected. A new graph has been created at {graph.get_path_to_graph()}") - - return graph - - class ArticulationView(_ArticulationView): """ArticulationView with some additional functionality implemented.""" diff --git a/omnigibson/utils/processing_utils.py b/omnigibson/utils/processing_utils.py index 0f2527cd8..bd81ceba8 100644 --- a/omnigibson/utils/processing_utils.py +++ b/omnigibson/utils/processing_utils.py @@ -1,5 +1,6 @@ import torch as th +from omnigibson.utils.backend_utils import _compute_backend as cb from omnigibson.utils.python_utils import Serializable @@ -66,7 +67,7 @@ def __init__(self, obs_dim, filter_width): self.obs_dim = obs_dim assert filter_width > 0, f"MovingAverageFilter must have a non-zero size! Got: {filter_width}" self.filter_width = filter_width - self.past_samples = th.zeros((filter_width, obs_dim)) + self.past_samples = cb.zeros((filter_width, obs_dim)) self.current_idx = 0 self.fully_filled = False # Whether the entire filter buffer is filled or not @@ -87,12 +88,12 @@ def estimate(self, observation): # Compute value based on whether we're fully filled or not if not self.fully_filled: - val = self.past_samples[: self.current_idx + 1, :].mean(dim=0) + val = cb.mean(self.past_samples[: self.current_idx + 1, :], dim=0) # Denote that we're fully filled if we're at the end of the buffer if self.current_idx == self.filter_width - 1: self.fully_filled = True else: - val = self.past_samples.mean(dim=0) + val = cb.mean(self.past_samples, dim=0) # Increment the index to write the next sample to self.current_idx = (self.current_idx + 1) % self.filter_width @@ -108,14 +109,14 @@ def reset(self): @property def state_size(self): # This is the size of the internal buffer plus the current index and fully filled single values - return th.prod(self.past_samples.shape) + 2 + return cb.prod(self.past_samples.shape) + 2 def _dump_state(self): # Run super init first state = super()._dump_state() # Add info from this filter - state["past_samples"] = self.past_samples + state["past_samples"] = cb.to_torch(self.past_samples) state["current_idx"] = self.current_idx state["fully_filled"] = self.fully_filled @@ -126,7 +127,7 @@ def _load_state(self, state): super()._load_state(state=state) # Load relevant info for this filter - self.past_samples = state["past_samples"] + self.past_samples = cb.from_torch(state["past_samples"]) self.current_idx = state["current_idx"] self.fully_filled = state["fully_filled"] @@ -171,7 +172,7 @@ def __init__(self, obs_dim, alpha=0.9): alpha (float): The relative weighting of new samples relative to older samples """ self.obs_dim = obs_dim - self.avg = th.zeros(obs_dim) + self.avg = cb.zeros(obs_dim) self.num_samples = 0 self.alpha = alpha @@ -190,7 +191,7 @@ def estimate(self, observation): self.avg = self.alpha * observation + (1.0 - self.alpha) * self.avg self.num_samples += 1 - return th.tensor(self.avg) + return cb.copy(self.avg) def reset(self): # Clear internal state @@ -207,7 +208,7 @@ def _dump_state(self): state = super()._dump_state() # Add info from this filter - state["avg"] = th.tensor(self.avg) + state["avg"] = cb.to_torch(self.avg) state["num_samples"] = self.num_samples return state @@ -217,7 +218,7 @@ def _load_state(self, state): super()._load_state(state=state) # Load relevant info for this filter - self.avg = th.tensor(state["avg"]) + self.avg = cb.from_torch(state["avg"]) self.num_samples = state["num_samples"] def serialize(self, state): diff --git a/omnigibson/utils/python_utils.py b/omnigibson/utils/python_utils.py index ad6ed3d7f..9f1e96bbe 100644 --- a/omnigibson/utils/python_utils.py +++ b/omnigibson/utils/python_utils.py @@ -783,6 +783,25 @@ def recursively_convert_to_torch(state): return state +def recursively_convert_from_torch(state): + # For all the lists in state dict, convert from torch tensor -> numpy array + import numpy as np + + for key, value in state.items(): + if isinstance(value, dict): + state[key] = recursively_convert_from_torch(value) + elif isinstance(value, th.Tensor): + state[key] = value.cpu().numpy() + elif (isinstance(value, list) or isinstance(value, tuple)) and len(value) > 0: + if isinstance(value[0], dict): + state[key] = [recursively_convert_from_torch(val) for val in value] + elif isinstance(value[0], th.Tensor): + state[key] = [tensor.numpy() for tensor in value] + elif isinstance(value[0], int) or isinstance(value[0], float): + state[key] = np.array(value) + return state + + def h5py_group_to_torch(group): state = {} for key, value in group.items(): diff --git a/omnigibson/utils/teleop_utils.py b/omnigibson/utils/teleop_utils.py index 9a48eec31..a5fdffa8a 100644 --- a/omnigibson/utils/teleop_utils.py +++ b/omnigibson/utils/teleop_utils.py @@ -1,5 +1,7 @@ import time -from typing import Iterable, Optional, Tuple +from collections.abc import Iterable +from dataclasses import dataclass, field +from typing import Iterable, Literal, Optional, Tuple import torch as th @@ -8,20 +10,48 @@ import omnigibson.utils.transform_utils as T from omnigibson.macros import create_module_macros from omnigibson.objects import USDObject +from omnigibson.prims.geom_prim import VisualGeomPrim +from omnigibson.prims.xform_prim import XFormPrim from omnigibson.robots.robot_base import BaseRobot +from omnigibson.sensors import VisionSensor +from omnigibson.utils.control_utils import IKSolver +from omnigibson.utils.ui_utils import KeyboardEventHandler, create_module_logger +from omnigibson.utils.usd_utils import scene_relative_prim_path_to_absolute try: from telemoma.configs.base_config import teleop_config - from telemoma.human_interface.teleop_core import TeleopAction, TeleopObservation + + # from telemoma.human_interface.teleop_core import TeleopAction, TeleopObservation from telemoma.human_interface.teleop_policy import TeleopPolicy from telemoma.utils.general_utils import AttrDict except ImportError as e: raise e from ValueError("For teleoperation, install telemoma by running 'pip install telemoma'") +# Create module logger +log = create_module_logger(module_name=__name__) + m = create_module_macros(module_path=__file__) m.movement_speed = 0.2 # the speed of the robot base movement +@dataclass +class TeleopAction(AttrDict): + left: th.Tensor = field(default_factory=lambda: th.cat((th.zeros(6), th.ones(1)))) + right: th.Tensor = field(default_factory=lambda: th.cat((th.zeros(6), th.ones(1)))) + base: th.Tensor = field(default_factory=lambda: th.zeros(3)) + torso: float = field(default_factory=lambda: 0.0) + extra: dict = field(default_factory=dict) + + +@dataclass +class TeleopObservation(AttrDict): + left: th.Tensor = field(default_factory=lambda: th.cat((th.zeros(6), th.ones(2)))) + right: th.Tensor = field(default_factory=lambda: th.cat((th.zeros(6), th.ones(2)))) + base: th.Tensor = field(default_factory=lambda: th.zeros(3)) + torso: float = field(default_factory=lambda: 0.0) + extra: dict = field(default_factory=dict) + + class TeleopSystem(TeleopPolicy): """ Base class for teleop policy @@ -36,8 +66,8 @@ def __init__(self, config: AttrDict, robot: BaseRobot, show_control_marker: bool show_control_marker (bool): whether to show a visual marker that indicates the target pose of the control. """ super().__init__(config) - self.teleop_action: TeleopAction = TeleopAction() - self.robot_obs: TeleopObservation = TeleopObservation() + self.teleop_action = TeleopAction() + self.robot_obs = TeleopObservation() self.robot = robot self.robot_arms = ["left", "right"] if self.robot.n_arms == 2 else ["right"] # robot parameters @@ -119,9 +149,10 @@ def __init__( show_control_marker: bool = False, system: str = "SteamVR", disable_display_output: bool = False, - enable_touchpad_movement: bool = False, - align_anchor_to_robot_base: bool = False, - use_hand_tracking: bool = False, + eef_tracking_mode: Literal["controller", "hand", "disabled"] = "controller", + # TODO: fix this to only take something like a prim path + align_anchor_to: Literal["camera", "base", "touchpad"] = "camera", + view_angle_limits: Optional[Iterable[float]] = None, ) -> None: """ Initializes the VR system @@ -130,32 +161,34 @@ def __init__( show_control_marker (bool): whether to show a control marker system (str): the VR system to use, one of ["OpenXR", "SteamVR"], default is "SteamVR". disable_display_output (bool): whether we will not display output to the VR headset (only use controller tracking), default is False. - enable_touchpad_movement (bool): whether to enable VR system anchor movement by controller, default is False. - align_anchor_to_robot_base (bool): whether to align VR anchor to robot base, default is False. - use_hand_tracking (bool): whether to use hand tracking instead of controllers, default is False. - show_controller (bool): whether to show the controller model in the scene, default is False. - - NOTE: enable_touchpad_movement and align_anchor_to_robot_base cannot be enabled at the same time. - The former is to enable free movement of the VR system (i.e. the user), while the latter is constraining the VR system to the robot pose. - """ + eef_tracking_mode (Literal): whether to use controller tracking or hand tracking, one of ["controller", "hand", "disabled"], default is controller. + align_anchor_to (Literal): specify where the VR view aligns to, one of ["camera", "base", "touchpad"], defaults to robot camera. + The "touchpad" option enables free movement of the VR view (i.e. the user), while the other two constrain the VR view to the either the robot base or camera pose. + view_angle_limits (Iterable): the view angle limits for the VR system (roll, pitch, and yaw) in degrees, default is None. + """ + align_to_prim = isinstance(align_anchor_to, XFormPrim) + assert ( + align_anchor_to + in [ + "camera", + "base", + "touchpad", + ] + or align_to_prim + ), "align_anchor_to must be one of ['camera', 'base', 'touchpad'] or a XFormPrim" + self.align_anchor_to = align_anchor_to + self.anchor_prim = None + if align_to_prim: + self.set_anchor_with_prim(self.align_anchor_to) self.raw_data = {} # enable xr extension lazy.omni.isaac.core.utils.extensions.enable_extension("omni.kit.xr.profile.vr") - self.xr_device_class = lazy.omni.kit.xr.core.XRDeviceClass # run super method super().__init__(teleop_config, robot, show_control_marker) - # we want to further slow down the movement speed if we are using touchpad movement - if enable_touchpad_movement: - self.movement_speed *= 0.3 # get xr core and profile self.xr_core = lazy.omni.kit.xr.core.XRCore.get_singleton() self.vr_profile = self.xr_core.get_profile("vr") self.disable_display_output = disable_display_output - self.enable_touchpad_movement = enable_touchpad_movement - self.align_anchor_to_robot_base = align_anchor_to_robot_base - assert not ( - self.enable_touchpad_movement and self.align_anchor_to_robot_base - ), "enable_touchpad_movement and align_anchor_to_robot_base cannot be True at the same time!" # set avatar if self.show_control_marker: self.vr_profile.set_avatar( @@ -166,9 +199,12 @@ def __init__( lazy.omni.kit.xr.ui.stage.common.XRAvatarManager.get_singleton().create_avatar("empty_avatar", {}) ) # set anchor mode to be custom anchor + # lazy.carb.settings.get_settings().set(self.vr_profile.get_scene_persistent_path() + "anchorMode", "scene_origin") lazy.carb.settings.get_settings().set( - self.vr_profile.get_scene_persistent_path() + "anchorMode", "scene origin" + self.vr_profile.get_scene_persistent_path() + "anchorMode", "custom_anchor" ) + # set override leveled basis to be true (if this is false, headset would not track anchor pitch orientation) + lazy.carb.settings.get_settings().set(self.vr_profile.get_persistent_path() + "overrideLeveledBasis", True) # set vr system lazy.carb.settings.get_settings().set(self.vr_profile.get_persistent_path() + "system/display", system) # set display mode @@ -180,17 +216,126 @@ def __init__( self.hmd = None self.controllers = {} self.trackers = {} - self.xr2og_orn_offset = th.tensor([0.5, -0.5, -0.5, -0.5]) - self.og2xr_orn_offset = th.tensor([-0.5, 0.5, 0.5, -0.5]) # setup event subscriptions self.reset() - self.use_hand_tracking = use_hand_tracking - if use_hand_tracking: + self.eef_tracking_mode = eef_tracking_mode + if eef_tracking_mode == "hand": self.raw_data["hand_data"] = {} self.teleop_action.hand_data = {} self._hand_tracking_subscription = self.xr_core.get_event_stream().create_subscription_to_pop_by_type( lazy.omni.kit.xr.core.XRCoreEventType.hand_joints, self._update_hand_tracking_data, name="hand tracking" ) + self.robot_cameras = [s for s in self.robot.sensors.values() if isinstance(s, VisionSensor)] + # TODO: this camera id is specific to R1, because it has 3 cameras, we need to make this more general + self.active_camera_id = 2 + if self.align_anchor_to == "camera" and len(self.robot_cameras) == 0: + raise ValueError("No camera found on robot, cannot align anchor to camera") + # we want to further slow down the movement speed if we are using touchpad movement + if self.align_anchor_to == "touchpad": + self.movement_speed *= 0.3 + # self.global_ik_solver = {} + # for arm in self.robot_arms: + # control_idx = self.robot.arm_control_idx[arm] + # self.global_ik_solver[arm] = IKSolver( + # robot_description_path=self.robot.robot_arm_descriptor_yamls[arm], + # robot_urdf_path=self.robot.urdf_path, + # reset_joint_pos=self.robot.get_joint_positions()[control_idx], + # eef_name=self.robot.eef_link_names[arm], + # ) + self.head_canonical_transformation = None + KeyboardEventHandler.add_keyboard_callback( + key=lazy.carb.input.KeyboardInput.ENTER, + callback_fn=self.register_head_canonical_transformation, + ) + KeyboardEventHandler.add_keyboard_callback( + key=lazy.carb.input.KeyboardInput.ESCAPE, + callback_fn=self.stop, + ) + self._update_camera_callback = self.xr_core.get_event_stream().create_subscription_to_pop_by_type( + lazy.omni.kit.xr.core.XRCoreEventType.pre_render_update, self._update_camera_pose, name="update camera" + ) + self._view_blackout_prim = None + self._view_angle_limits = ( + [T.deg2rad(limit) for limit in view_angle_limits] if view_angle_limits is not None else None + ) + if self._view_angle_limits is not None: + scene = self.robot.scene + blackout_relative_path = "/view_blackout" + blackout_prim_path = scene_relative_prim_path_to_absolute(scene, blackout_relative_path) + blackout_sphere = lazy.pxr.UsdGeom.Sphere.Define(og.sim.stage, blackout_prim_path) + blackout_sphere.CreateRadiusAttr().Set(0.1) + blackout_sphere.CreateDisplayColorAttr().Set(lazy.pxr.Vt.Vec3fArray([255, 255, 255])) + self._view_blackout_prim = VisualGeomPrim( + relative_prim_path=blackout_relative_path, + name="view_blackout", + ) + self._view_blackout_prim.load(scene) + self._view_blackout_prim.initialize() + self._view_blackout_prim.visible = False + + def _update_camera_pose(self, e) -> None: + if self.align_anchor_to == "touchpad": + # we use x, y from right controller for 2d movement and y from left controller for z movement + self._move_anchor( + pos_offset=th.cat((th.tensor([self.teleop_action.torso]), self.teleop_action.base[[0, 2]])) + ) + else: + if self.anchor_prim is not None: + reference_frame = self.anchor_prim + elif self.align_anchor_to == "camera": + reference_frame = self.robot_cameras[self.active_camera_id] + elif self.align_anchor_to == "base": + reference_frame = self.robot + else: + raise ValueError(f"Invalid anchor: {self.align_anchor_to}") + + anchor_pos, anchor_orn = reference_frame.get_position_orientation() + + if self.head_canonical_transformation is not None: + current_head_physical_world_pose = self.xr2og(self.hmd.get_physical_world_pose()) + # Find the orientation change from canonical to current physical orientation + _, relative_orientation = T.relative_pose_transform( + *current_head_physical_world_pose, *self.head_canonical_transformation + ) + anchor_orn = T.quat_multiply(anchor_orn, relative_orientation) + + if self._view_blackout_prim is not None: + relative_ori_in_euler = T.quat2euler(relative_orientation) + roll_limit, pitch_limit, yaw_limit = self._view_angle_limits + # OVXR has a different coordinate system than OmniGibson + if ( + abs(relative_ori_in_euler[0]) > pitch_limit + or abs(relative_ori_in_euler[1]) > yaw_limit + or abs(relative_ori_in_euler[2]) > roll_limit + ): + self._view_blackout_prim.set_position_orientation(anchor_pos, anchor_orn) + self._view_blackout_prim.visible = True + else: + self._view_blackout_prim.visible = False + anchor_pose = self.og2xr(anchor_pos, anchor_orn) + self.vr_profile.set_physical_world_to_world_anchor_transform_to_match_xr_device( + anchor_pose.numpy(), self.hmd + ) + + def register_head_canonical_transformation(self): + """ + Here's what we need to do: + 1) Let the user press a button to record head canonical orientation (GELLO and head facing forward) + 2) when the user turn their head, get orientation change from canonical to current physical orientation as R + 3) set the head in virtual world to robot head orientation + R, same position + """ + if self.hmd is None: + log.warning("No HMD found, cannot register head canonical orientation") + return + self.head_canonical_transformation = self.xr2og(self.hmd.get_physical_world_pose()) + + def set_anchor_with_prim(self, prim) -> None: + """ + Set the anchor to a prim + Args: + prim (BasePrim): the prim to set the anchor to + """ + self.anchor_prim = prim def xr2og(self, transform: th.tensor) -> Tuple[th.tensor, th.tensor]: """ @@ -203,7 +348,6 @@ def xr2og(self, transform: th.tensor) -> Tuple[th.tensor, th.tensor]: tuple(th.tensor, th.Tensor): the position and orientation in the OmniGibson coordinate system """ pos, orn = T.mat2pose(th.tensor(transform).T) - orn = T.quat_multiply(orn, self.xr2og_orn_offset) return pos, orn def og2xr(self, pos: th.tensor, orn: th.tensor) -> th.Tensor: @@ -215,7 +359,6 @@ def og2xr(self, pos: th.tensor, orn: th.tensor) -> th.Tensor: Returns: th.tensor: the transform matrix in the Omniverse XR coordinate system """ - orn = T.quat_multiply(self.og2xr_orn_offset, orn) return T.pose2mat((pos, orn)).T.double() def reset(self) -> None: @@ -249,6 +392,14 @@ def start(self) -> None: print("[VRSys] Waiting for VR headset to become active...") self._update_devices() if self.hmd is not None: + print("[VRSys] VR headset connected, put on the headset to start") + # Note that stepping the vr system multiple times is necessary here due to a bug in OVXR plugin + for _ in range(50): + self.update() + og.sim.step() + self.reset_head_transform() + print("[VRSys] VR system is ready") + self.register_head_canonical_transformation() break time.sleep(1) og.sim.step() @@ -269,23 +420,28 @@ def update(self) -> None: self._update_devices() self._update_device_transforms() self._update_button_data() - # Update teleop data based on controller input if not using hand tracking - if not self.use_hand_tracking: - self.teleop_action.base = th.zeros(3) - self.teleop_action.torso = 0.0 - # update right hand related info + # Update teleop data based on controller input + if self.eef_tracking_mode == "controller": + # update eef related info for arm_name, arm in zip(["left", "right"], self.robot_arms): if arm in self.controllers: + controller_pose_in_robot_frame = self._pose_in_robot_frame( + self.raw_data["transforms"]["controllers"][arm][0], + self.raw_data["transforms"]["controllers"][arm][1], + ) self.teleop_action[arm_name] = th.cat( ( - self.raw_data["transforms"]["controllers"][arm][0], - T.quat2euler( + controller_pose_in_robot_frame[0], + T.quat2axisangle( T.quat_multiply( - self.raw_data["transforms"]["controllers"][arm][1], - self.robot.teleop_rotation_offset[self.robot.arm_names[self.robot_arms.index(arm)]], + controller_pose_in_robot_frame[1], + self.robot.teleop_rotation_offset[arm_name], ) ), - [self.raw_data["button_data"][arm]["axis"]["trigger"]], + # When trigger is pressed, this value would be 1.0, otherwise 0.0 + # Our multi-finger gripper controller closes the gripper when the value is -1.0 and opens when > 0.0 + # So we need to negate the value here + th.tensor([-self.raw_data["button_data"][arm]["axis"]["trigger"]], dtype=th.float32), ) ) self.teleop_action.is_valid[arm_name] = self._is_valid_transform( @@ -293,44 +449,36 @@ def update(self) -> None: ) else: self.teleop_action.is_valid[arm_name] = False - # update base and reset info - if "right" in self.controllers: - self.teleop_action.reset["right"] = self.raw_data["button_data"]["right"]["press"]["grip"] - right_axis = self.raw_data["button_data"]["right"]["axis"] - self.teleop_action.base[0] = right_axis["touchpad_y"] * self.movement_speed - self.teleop_action.torso = -right_axis["touchpad_x"] * self.movement_speed - if "left" in self.controllers: - self.teleop_action.reset["left"] = self.raw_data["button_data"]["left"]["press"]["grip"] - left_axis = self.raw_data["button_data"]["left"]["axis"] - self.teleop_action.base[1] = -left_axis["touchpad_x"] * self.movement_speed - self.teleop_action.base[2] = left_axis["touchpad_y"] * self.movement_speed - # update head related info - self.teleop_action.head = th.cat( - (self.raw_data["transforms"]["head"][0], th.tensor(T.quat2euler(self.raw_data["transforms"]["head"][1]))) - ) - self.teleop_action.is_valid["head"] = self._is_valid_transform(self.raw_data["transforms"]["head"]) - # Optionally move anchor - if self.enable_touchpad_movement: - # we use x, y from right controller for 2d movement and y from left controller for z movement - self._move_anchor( - pos_offset=th.cat((th.tensor([self.teleop_action.torso]), self.teleop_action.base[[0, 2]])) + + # update base, torso, and reset info + self.teleop_action.base = th.zeros(3) + self.teleop_action.torso = 0.0 + for controller_name in self.controllers.keys(): + self.teleop_action.reset[controller_name] = self.raw_data["button_data"][controller_name]["press"][ + "grip" + ] + axis = self.raw_data["button_data"][controller_name]["axis"] + if controller_name == "right": + self.teleop_action.base[0] = axis["touchpad_y"] * self.movement_speed + self.teleop_action.torso = -axis["touchpad_x"] * self.movement_speed + elif controller_name == "left": + self.teleop_action.base[1] = -axis["touchpad_x"] * self.movement_speed + self.teleop_action.base[2] = axis["touchpad_y"] * self.movement_speed + # update head related info + self.teleop_action.head = th.cat( + (self.raw_data["transforms"]["head"][0], T.quat2euler(self.raw_data["transforms"]["head"][1])) ) - if self.align_anchor_to_robot_base: - robot_base_pos, robot_base_orn = self.robot.get_position_orientation() - self.vr_profile.set_virtual_world_anchor_transform(self.og2xr(robot_base_pos, robot_base_orn[[0, 2, 1, 3]])) + self.teleop_action.is_valid["head"] = self._is_valid_transform(self.raw_data["transforms"]["head"]) - def teleop_data_to_action(self) -> th.Tensor: + def get_robot_teleop_action(self) -> th.Tensor: """ - Generate action data from VR input for robot teleoperation + Get the robot teleop action Returns: - th.tensor: array of action data + th.tensor: the robot teleop action """ - # optionally update control marker - if self.show_control_marker: - self.update_control_marker() return self.robot.teleop_data_to_action(self.teleop_action) - def reset_transform_mapping(self, arm: str = "right") -> None: + def snap_controller_to_eef(self, arm: str = "right") -> None: """ Snap device to the robot end effector (ManipulationRobot only) Args: @@ -342,21 +490,68 @@ def reset_transform_mapping(self, arm: str = "right") -> None: ].get_position_orientation()[0] target_transform = self.og2xr(pos=robot_eef_pos, orn=robot_base_orn) self.vr_profile.set_physical_world_to_world_anchor_transform_to_match_xr_device( - target_transform, self.controllers[arm] + target_transform.numpy(), self.controllers[arm] + ) + + def snap_eef_to_controller(self, arm: str = "right") -> None: + """ + Snap robot end effector to the device (ManipulationRobot only) + Args: + arm(str): name of the arm, one of "left" or "right". Default is "right". + """ + self.robot.keep_still() + pos, quat = self.raw_data["transforms"]["controllers"][arm] + ik_solver = self.global_ik_solver[arm] + control_idx = self.robot.arm_control_idx[arm] + joint_pos = ik_solver.solve( + target_pos=pos, + target_quat=quat, + tolerance_pos=0.01, + tolerance_quat=0.01, + weight_pos=20.0, + weight_quat=0.05, + max_iterations=100, + initial_joint_pos=self.robot.get_joint_positions()[control_idx], + ) + if joint_pos is not None: + self.robot.set_joint_positions(joint_pos, indices=control_idx, drive=False) + print("Snap succeeded.") + else: + print("Snap failed. No IK solution found.") + + def reset_head_transform(self) -> None: + """ + Function that resets the transform of the VR system (w.r.t.) head + """ + if self.align_anchor_to == "touchpad": + pos = th.tensor([0.0, 0.0, 1.0]) + orn = th.tensor([0.0, 0.0, 0.0, 1.0]) + else: + if self.anchor_prim is not None: + reference_frame = self.anchor_prim + elif self.align_anchor_to == "camera": + reference_frame = self.robot_cameras[self.active_camera_id] + elif self.align_anchor_to == "base": + reference_frame = self.robot + else: + raise ValueError(f"Invalid anchor: {self.align_anchor_to}") + pos, orn = reference_frame.get_position_orientation() + self.robot.keep_still() + self.vr_profile.set_physical_world_to_world_anchor_transform_to_match_xr_device( + self.og2xr(pos, orn).numpy(), self.hmd ) - def set_initial_transform(self, pos: Iterable[float], orn: Iterable[float] = [0, 0, 0, 1]) -> None: + def _pose_in_robot_frame(self, pos: th.tensor, orn: th.tensor) -> Tuple[th.tensor, th.tensor]: """ - Function that sets the initial transform of the VR system (w.r.t.) head - Note that stepping the vr system multiple times is necessary here due to a bug in OVXR plugin + Get the pose in the robot frame Args: - pos(Iterable[float]): initial position of the vr system - orn(Iterable[float]): initial orientation of the vr system + pos (th.tensor): the position in the world frame + orn (th.tensor): the orientation in the world frame + Returns: + tuple(th.tensor, th.tensor): the position and orientation in the robot frame """ - for _ in range(10): - self.update() - og.sim.step() - self.vr_profile.set_physical_world_to_world_anchor_transform_to_match_xr_device(self.og2xr(pos, orn), self.hmd) + robot_base_pos, robot_base_orn = self.robot.get_position_orientation() + return T.relative_pose_transform(pos, orn, robot_base_pos, robot_base_orn) def _move_anchor( self, pos_offset: Optional[Iterable[float]] = None, rot_offset: Optional[Iterable[float]] = None @@ -369,62 +564,62 @@ def _move_anchor( """ if pos_offset is not None: # note that x is forward, y is down, z is left for ovxr, but x is forward, y is left, z is up for og - pos_offset = th.tensor([-pos_offset[0], pos_offset[2], -pos_offset[1]]).double() + pos_offset = th.tensor([-pos_offset[0], pos_offset[2], -pos_offset[1]]).double().tolist() self.vr_profile.add_move_physical_world_relative_to_device(pos_offset) if rot_offset is not None: - rot_offset = th.tensor(rot_offset).double() + rot_offset = th.tensor(rot_offset).double().tolist() self.vr_profile.add_rotate_physical_world_around_device(rot_offset) + # TODO: check if this is necessary def _is_valid_transform(self, transform: Tuple[th.tensor, th.tensor]) -> bool: """ Determine whether the transform is valid (ovxr plugin will return a zero position and rotation if not valid) """ - return th.any(transform[0] != th.zeros(3)) and th.any(transform[1] != self.og2xr_orn_offset) + return th.any(transform[0] != th.zeros(3)) def _update_devices(self) -> None: """ Update the VR device list """ for device in self.vr_profile.get_device_list(): - if device.get_class() == self.xr_device_class.xrdisplaydevice: + device_class = device.get_class() + if device_class == lazy.omni.kit.xr.core.XRDeviceClass.xrdisplaydevice: self.hmd = device - elif device.get_class() == self.xr_device_class.xrcontroller: + elif device_class == lazy.omni.kit.xr.core.XRDeviceClass.xrcontroller: # we want the first 2 controllers to be corresponding to the left and right hand d_idx = device.get_index() controller_name = ["left", "right"][d_idx] if d_idx < 2 else f"controller_{d_idx+1}" self.controllers[controller_name] = device - elif device.get_class() == self.xr_device_class.xrtracker: + elif device_class == lazy.omni.kit.xr.core.XRDeviceClass.xrtracker: self.trackers[device.get_index()] = device def _update_device_transforms(self) -> None: """ Get the transform matrix of each VR device *in world frame* and store in self.raw_data """ - transforms = {} - transforms["head"] = self.xr2og(self.hmd.get_virtual_world_pose()) - transforms["controllers"] = {} - transforms["trackers"] = {} - for controller_name in self.controllers: - transforms["controllers"][controller_name] = self.xr2og( - self.controllers[controller_name].get_virtual_world_pose() - ) - for tracker_index in self.trackers: - transforms["trackers"][tracker_index] = self.xr2og(self.trackers[tracker_index].get_virtual_world_pose()) - self.raw_data["transforms"] = transforms + assert self.hmd is not None, "VR headset device not found" + self.raw_data["transforms"] = { + "head": self.xr2og(self.hmd.get_virtual_world_pose()), + "controllers": { + name: self.xr2og(controller.get_virtual_world_pose()) for name, controller in self.controllers.items() + }, + "trackers": { + index: self.xr2og(tracker.get_virtual_world_pose()) for index, tracker in self.trackers.items() + }, + } def _update_button_data(self): """ Get the button data for each controller and store in self.raw_data - Returns: - dict: a dictionary of whether each button is pressed or touched, and the axis state for touchpad and joysticks - """ - button_data = {} - for controller_name in self.controllers: - button_data[controller_name] = {} - button_data[controller_name]["press"] = self.controllers[controller_name].get_button_press_state() - button_data[controller_name]["touch"] = self.controllers[controller_name].get_button_touch_state() - button_data[controller_name]["axis"] = self.controllers[controller_name].get_axis_state() - self.raw_data["button_data"] = button_data + """ + self.raw_data["button_data"] = { + name: { + "press": controller.get_button_press_state(), + "touch": controller.get_button_touch_state(), + "axis": controller.get_axis_state(), + } + for name, controller in self.controllers.items() + } def _update_hand_tracking_data(self, e) -> None: """ diff --git a/omnigibson/utils/transform_utils.py b/omnigibson/utils/transform_utils.py index a7dfdda02..c2301bb61 100644 --- a/omnigibson/utils/transform_utils.py +++ b/omnigibson/utils/transform_utils.py @@ -1372,3 +1372,33 @@ def quaternions_close(q1: torch.Tensor, q2: torch.Tensor, atol: float = 1e-3) -> Whether the quaternions are close """ return torch.allclose(q1, q2, atol=atol) or torch.allclose(q1, -q2, atol=atol) + + +@torch.compile +def orientation_error(desired, current): + """ + This function calculates a 3-dimensional orientation error vector for use in the + impedance controller. It does this by computing the delta rotation between the + inputs and converting that rotation to exponential coordinates (axis-angle + representation, where the 3d vector is axis * angle). + See https://en.wikipedia.org/wiki/Axis%E2%80%93angle_representation for more information. + Optimized function to determine orientation error from matrices + + Args: + desired (tensor): (..., 3, 3) where final two dims are 2d array representing target orientation matrix + current (tensor): (..., 3, 3) where final two dims are 2d array representing current orientation matrix + Returns: + tensor: (..., 3) where final dim is (ax, ay, az) axis-angle representing orientation error + """ + # Compute batch size + batch_size = desired.numel() // 9 # Each 3x3 matrix has 9 elements + + desired_flat = desired.reshape(batch_size, 3, 3) + current_flat = current.reshape(batch_size, 3, 3) + + rc1, rc2, rc3 = current_flat[:, :, 0], current_flat[:, :, 1], current_flat[:, :, 2] + rd1, rd2, rd3 = desired_flat[:, :, 0], desired_flat[:, :, 1], desired_flat[:, :, 2] + + error = 0.5 * (torch.linalg.cross(rc1, rd1) + torch.linalg.cross(rc2, rd2) + torch.linalg.cross(rc3, rd3)) + + return error.reshape(desired.shape[:-2] + (3,)) diff --git a/omnigibson/utils/transform_utils_np.py b/omnigibson/utils/transform_utils_np.py new file mode 100644 index 000000000..5c4997374 --- /dev/null +++ b/omnigibson/utils/transform_utils_np.py @@ -0,0 +1,1345 @@ +""" +Utility functions of matrix and vector transformations. + +NOTE: convention for quaternions is (x, y, z, w) +""" + +import math + +import numpy as np +from numba import jit, prange +from scipy.spatial.transform import Rotation as R + +PI = np.pi +EPS = np.finfo(float).eps * 4.0 + +# axis sequences for Euler angles +_NEXT_AXIS = [1, 2, 0, 1] + +# map axes strings to/from tuples of inner axis, parity, repetition, frame +_AXES2TUPLE = { + "sxyz": (0, 0, 0, 0), + "sxyx": (0, 0, 1, 0), + "sxzy": (0, 1, 0, 0), + "sxzx": (0, 1, 1, 0), + "syzx": (1, 0, 0, 0), + "syzy": (1, 0, 1, 0), + "syxz": (1, 1, 0, 0), + "syxy": (1, 1, 1, 0), + "szxy": (2, 0, 0, 0), + "szxz": (2, 0, 1, 0), + "szyx": (2, 1, 0, 0), + "szyz": (2, 1, 1, 0), + "rzyx": (0, 0, 0, 1), + "rxyx": (0, 0, 1, 1), + "ryzx": (0, 1, 0, 1), + "rxzx": (0, 1, 1, 1), + "rxzy": (1, 0, 0, 1), + "ryzy": (1, 0, 1, 1), + "rzxy": (1, 1, 0, 1), + "ryxy": (1, 1, 1, 1), + "ryxz": (2, 0, 0, 1), + "rzxz": (2, 0, 1, 1), + "rxyz": (2, 1, 0, 1), + "rzyz": (2, 1, 1, 1), +} + +_TUPLE2AXES = dict((v, k) for k, v in _AXES2TUPLE.items()) + + +def ewma_vectorized(data, alpha, offset=None, dtype=None, order="C", out=None): + """ + Calculates the exponential moving average over a vector. + Will fail for large inputs. + + Args: + data (Iterable): Input data + alpha (float): scalar in range (0,1) + The alpha parameter for the moving average. + offset (None or float): If specified, the offset for the moving average. None defaults to data[0]. + dtype (None or type): Data type used for calculations. If None, defaults to float64 unless + data.dtype is float32, then it will use float32. + order (None or str): Order to use when flattening the data. Valid options are {'C', 'F', 'A'}. + None defaults to 'C'. + out (None or np.array): If specified, the location into which the result is stored. If provided, it must have + the same shape as the input. If not provided or `None`, + a freshly-allocated array is returned. + + Returns: + np.array: Exponential moving average from @data + """ + data = np.array(data, copy=False) + + if dtype is None: + if data.dtype == np.float32: + dtype = np.float32 + else: + dtype = np.float64 + else: + dtype = np.dtype(dtype) + + if data.ndim > 1: + # flatten input + data = data.reshape(-1, order) + + if out is None: + out = np.empty_like(data, dtype=dtype) + else: + assert out.shape == data.shape + assert out.dtype == dtype + + if data.size < 1: + # empty input, return empty array + return out + + if offset is None: + offset = data[0] + + alpha = np.array(alpha, copy=False).astype(dtype, copy=False) + + # scaling_factors -> 0 as len(data) gets large + # this leads to divide-by-zeros below + scaling_factors = np.power(1.0 - alpha, np.arange(data.size + 1, dtype=dtype), dtype=dtype) + # create cumulative sum array + np.multiply(data, (alpha * scaling_factors[-2]) / scaling_factors[:-1], dtype=dtype, out=out) + np.cumsum(out, dtype=dtype, out=out) + + # cumsums / scaling + out /= scaling_factors[-2::-1] + + if offset != 0: + offset = np.array(offset, copy=False).astype(dtype, copy=False) + # add offsets + out += offset * scaling_factors[1:] + + return out + + +def convert_quat(q, to="xyzw"): + """ + Converts quaternion from one convention to another. + The convention to convert TO is specified as an optional argument. + If to == 'xyzw', then the input is in 'wxyz' format, and vice-versa. + + Args: + q (np.array): a 4-dim array corresponding to a quaternion + to (str): either 'xyzw' or 'wxyz', determining which convention to convert to. + """ + if to == "xyzw": + return q[[1, 2, 3, 0]] + if to == "wxyz": + return q[[3, 0, 1, 2]] + raise Exception("convert_quat: choose a valid `to` argument (xyzw or wxyz)") + + +@jit(nopython=True) +def quat_multiply(quaternion1, quaternion0): + """ + Return multiplication of two quaternions (q1 * q0). + + E.g.: + >>> q = quat_multiply([1, -2, 3, 4], [-5, 6, 7, 8]) + >>> np.allclose(q, [-44, -14, 48, 28]) + True + + Args: + quaternion1 (np.array): (x,y,z,w) quaternion + quaternion0 (np.array): (x,y,z,w) quaternion + + Returns: + np.array: (x,y,z,w) multiplied quaternion + """ + x0, y0, z0, w0 = quaternion0 + x1, y1, z1, w1 = quaternion1 + return np.array( + ( + x1 * w0 + y1 * z0 - z1 * y0 + w1 * x0, + -x1 * z0 + y1 * w0 + z1 * x0 + w1 * y0, + x1 * y0 - y1 * x0 + z1 * w0 + w1 * z0, + -x1 * x0 - y1 * y0 - z1 * z0 + w1 * w0, + ), + dtype=np.float32, + ) + + +def quat_conjugate(quaternion): + """ + Return conjugate of quaternion. + + E.g.: + >>> q0 = random_quaternion() + >>> q1 = quat_conjugate(q0) + >>> q1[3] == q0[3] and all(q1[:3] == -q0[:3]) + True + + Args: + quaternion (np.array): (x,y,z,w) quaternion + + Returns: + np.array: (x,y,z,w) quaternion conjugate + """ + return np.array( + (-quaternion[0], -quaternion[1], -quaternion[2], quaternion[3]), + dtype=np.float32, + ) + + +def quat_inverse(quaternion): + """ + Return inverse of quaternion. + + E.g.: + >>> q0 = random_quaternion() + >>> q1 = quat_inverse(q0) + >>> np.allclose(quat_multiply(q0, q1), [0, 0, 0, 1]) + True + + Args: + quaternion (np.array): (x,y,z,w) quaternion + + Returns: + np.array: (x,y,z,w) quaternion inverse + """ + return quat_conjugate(quaternion) / np.dot(quaternion, quaternion) + + +def quat_distance(quaternion1, quaternion0): + """ + Returns distance between two quaternions, such that distance * quaternion0 = quaternion1 + + Args: + quaternion1 (np.array): (x,y,z,w) quaternion + quaternion0 (np.array): (x,y,z,w) quaternion + + Returns: + np.array: (x,y,z,w) quaternion distance + """ + return quat_multiply(quaternion1, quat_inverse(quaternion0)) + + +def quat_slerp(quat0, quat1, fraction, shortestpath=True): + """ + Return spherical linear interpolation between two quaternions. + + E.g.: + >>> q0 = random_quat() + >>> q1 = random_quat() + >>> q = quat_slerp(q0, q1, 0.0) + >>> np.allclose(q, q0) + True + + >>> q = quat_slerp(q0, q1, 1.0) + >>> np.allclose(q, q1) + True + + >>> q = quat_slerp(q0, q1, 0.5) + >>> angle = math.acos(np.dot(q0, q)) + >>> np.allclose(2.0, math.acos(np.dot(q0, q1)) / angle) or \ + np.allclose(2.0, math.acos(-np.dot(q0, q1)) / angle) + True + + Args: + quat0 (np.array): (x,y,z,w) quaternion startpoint + quat1 (np.array): (x,y,z,w) quaternion endpoint + fraction (float): fraction of interpolation to calculate + shortestpath (bool): If True, will calculate the shortest path + + Returns: + np.array: (x,y,z,w) quaternion distance + """ + q0 = unit_vector(quat0[:4]) + q1 = unit_vector(quat1[:4]) + if fraction == 0.0: + return q0 + elif fraction == 1.0: + return q1 + d = np.dot(q0, q1) + if abs(abs(d) - 1.0) < EPS: + return q0 + if shortestpath and d < 0.0: + # invert rotation + d = -d + q1 *= -1.0 + angle = math.acos(np.clip(d, -1, 1)) + if abs(angle) < EPS: + return q0 + isin = 1.0 / math.sin(angle) + q0 *= math.sin((1.0 - fraction) * angle) * isin + q1 *= math.sin(fraction * angle) * isin + q0 += q1 + return q0 + + +def random_quat(rand=None): + """ + Return uniform random unit quaternion. + + E.g.: + >>> q = random_quat() + >>> np.allclose(1.0, vector_norm(q)) + True + >>> q = random_quat(np.random.random(3)) + >>> q.shape + (4,) + + Args: + rand (3-array or None): If specified, must be three independent random variables that are uniformly distributed + between 0 and 1. + + Returns: + np.array: (x,y,z,w) random quaternion + """ + if rand is None: + rand = np.random.rand(3) + else: + assert len(rand) == 3 + r1 = np.sqrt(1.0 - rand[0]) + r2 = np.sqrt(rand[0]) + pi2 = math.pi * 2.0 + t1 = pi2 * rand[1] + t2 = pi2 * rand[2] + return np.array( + (np.sin(t1) * r1, np.cos(t1) * r1, np.sin(t2) * r2, np.cos(t2) * r2), + dtype=np.float32, + ) + + +def random_axis_angle(angle_limit=None, random_state=None): + """ + Samples an axis-angle rotation by first sampling a random axis + and then sampling an angle. If @angle_limit is provided, the size + of the rotation angle is constrained. + + If @random_state is provided (instance of np.random.RandomState), it + will be used to generate random numbers. + + Args: + angle_limit (None or float): If set, determines magnitude limit of angles to generate + random_state (None or RandomState): RNG to use if specified + + Raises: + AssertionError: [Invalid RNG] + """ + if angle_limit is None: + angle_limit = 2.0 * np.pi + + if random_state is not None: + assert isinstance(random_state, np.random.RandomState) + npr = random_state + else: + npr = np.random + + # sample random axis using a normalized sample from spherical Gaussian. + # see (http://extremelearning.com.au/how-to-generate-uniformly-random-points-on-n-spheres-and-n-balls/) + # for why it works. + random_axis = npr.randn(3) + random_axis /= np.linalg.norm(random_axis) + random_angle = npr.uniform(low=0.0, high=angle_limit) + return random_axis, random_angle + + +def vec(values): + """ + Converts value tuple into a numpy vector. + + Args: + values (n-array): a tuple of numbers + + Returns: + np.array: vector of given values + """ + return np.array(values, dtype=np.float32) + + +def mat4(array): + """ + Converts an array to 4x4 matrix. + + Args: + array (n-array): the array in form of vec, list, or tuple + + Returns: + np.array: a 4x4 numpy matrix + """ + return np.array(array, dtype=np.float32).reshape((4, 4)) + + +def mat2pose(hmat): + """ + Converts a homogeneous 4x4 matrix into pose. + + Args: + hmat (np.array): a 4x4 homogeneous matrix + + Returns: + 2-tuple: + - (np.array) (x,y,z) position array in cartesian coordinates + - (np.array) (x,y,z,w) orientation array in quaternion form + """ + pos = hmat[:3, 3] + orn = mat2quat(hmat[:3, :3]) + return pos, orn + + +def mat2quat(rmat): + """ + Converts given rotation matrix to quaternion. + + Args: + rmat (np.array): (..., 3, 3) rotation matrix + + Returns: + np.array: (..., 4) (x,y,z,w) float quaternion angles + """ + return R.from_matrix(rmat).as_quat() + + +@jit(nopython=True, fastmath=True) +def _norm_2d_final_dim(mat): + n_elements = mat.shape[0] + out = np.zeros(n_elements, dtype=np.float32) + for i in prange(n_elements): + vec = mat[i] + out[i] = np.sqrt(np.sum(vec * vec)) + return out + + +@jit(nopython=True) +def mat2quat_batch(rmat): + """ + Converts given rotation matrix to quaternion. + Args: + rmat (torch.Tensor): (3, 3) or (..., 3, 3) rotation matrix + Returns: + torch.Tensor: (4,) or (..., 4) (x,y,z,w) float quaternion angles + """ + batch_shape = rmat.shape[:-2] + mat_flat = rmat.reshape(-1, 3, 3) + + m00, m01, m02 = mat_flat[:, 0, 0], mat_flat[:, 0, 1], mat_flat[:, 0, 2] + m10, m11, m12 = mat_flat[:, 1, 0], mat_flat[:, 1, 1], mat_flat[:, 1, 2] + m20, m21, m22 = mat_flat[:, 2, 0], mat_flat[:, 2, 1], mat_flat[:, 2, 2] + + trace = m00 + m11 + m22 + + trace_positive = trace > 0 + cond1 = (m00 > m11) & (m00 > m22) & ~trace_positive + cond2 = (m11 > m22) & ~(trace_positive | cond1) + cond3 = ~(trace_positive | cond1 | cond2) + + # Trace positive condition + sq = np.where(trace_positive, np.sqrt(trace + 1.0) * 2.0, np.zeros_like(trace)) + qw = np.where(trace_positive, 0.25 * sq, np.zeros_like(trace)) + qx = np.where(trace_positive, (m21 - m12) / sq, np.zeros_like(trace)) + qy = np.where(trace_positive, (m02 - m20) / sq, np.zeros_like(trace)) + qz = np.where(trace_positive, (m10 - m01) / sq, np.zeros_like(trace)) + + # Condition 1 + sq = np.where(cond1, np.sqrt(1.0 + m00 - m11 - m22) * 2.0, sq) + qw = np.where(cond1, (m21 - m12) / sq, qw) + qx = np.where(cond1, 0.25 * sq, qx) + qy = np.where(cond1, (m01 + m10) / sq, qy) + qz = np.where(cond1, (m02 + m20) / sq, qz) + + # Condition 2 + sq = np.where(cond2, np.sqrt(1.0 + m11 - m00 - m22) * 2.0, sq) + qw = np.where(cond2, (m02 - m20) / sq, qw) + qx = np.where(cond2, (m01 + m10) / sq, qx) + qy = np.where(cond2, 0.25 * sq, qy) + qz = np.where(cond2, (m12 + m21) / sq, qz) + + # Condition 3 + sq = np.where(cond3, np.sqrt(1.0 + m22 - m00 - m11) * 2.0, sq) + qw = np.where(cond3, (m10 - m01) / sq, qw) + qx = np.where(cond3, (m02 + m20) / sq, qx) + qy = np.where(cond3, (m12 + m21) / sq, qy) + qz = np.where(cond3, 0.25 * sq, qz) + + quat = np.stack((qx, qy, qz, qw), axis=-1) + + # Normalize the quaternion + quat = quat / _norm_2d_final_dim(quat)[..., np.newaxis] + + # Reshape to match input batch shape + quat = quat.reshape(batch_shape + (4,)) + + return quat + + +def vec2quat(vec, up=(0, 0, 1.0)): + """ + Converts given 3d-direction vector @vec to quaternion orientation with respect to another direction vector @up + + Args: + vec (3-array): (x,y,z) direction vector (possible non-normalized) + up (3-array): (x,y,z) direction vector representing the canonical up direction (possible non-normalized) + """ + # See https://stackoverflow.com/questions/15873996/converting-a-direction-vector-to-a-quaternion-rotation + # Take cross product of @up and @vec to get @s_n, and then cross @vec and @s_n to get @u_n + # Then compose 3x3 rotation matrix and convert into quaternion + vec_n = vec / np.linalg.norm(vec) # x + up_n = up / np.linalg.norm(up) + s_n = np.cross(up_n, vec_n) # y + u_n = np.cross(vec_n, s_n) # z + return mat2quat(np.array([vec_n, s_n, u_n]).T) + + +def euler2mat(euler): + """ + Converts euler angles into rotation matrix form + + Args: + euler (np.array): (r,p,y) angles + + Returns: + np.array: 3x3 rotation matrix + + Raises: + AssertionError: [Invalid input shape] + """ + + euler = np.asarray(euler, dtype=np.float64) + assert euler.shape[-1] == 3, "Invalid shaped euler {}".format(euler) + + return R.from_euler("xyz", euler).as_matrix() + + +def mat2euler(rmat): + """ + Converts given rotation matrix to euler angles in radian. + + Args: + rmat (np.array): 3x3 rotation matrix + + Returns: + np.array: (r,p,y) converted euler angles in radian vec3 float + """ + M = np.array(rmat, dtype=np.float32, copy=False)[:3, :3] + return R.from_matrix(M).as_euler("xyz") + + +def quat2mat(quaternion): + if quaternion.dtype != np.float32: + quaternion = quaternion.astype(np.float32) + return _quat2mat(quaternion) + + +@jit(nopython=True) +def _quat2mat(quaternion): + """ + Convert quaternions into rotation matrices. + + Args: + quaternion (torch.Tensor): A tensor of shape (..., 4) representing batches of quaternions (w, x, y, z). + + Returns: + torch.Tensor: A tensor of shape (..., 3, 3) representing batches of rotation matrices. + """ + # broadcast array is necessary to use numba parallel mode + q1, q2 = np.broadcast_arrays(quaternion[..., np.newaxis], quaternion[..., np.newaxis, :]) + outer = q1 * q2 + + # Extract the necessary components + xx = outer[..., 0, 0] + yy = outer[..., 1, 1] + zz = outer[..., 2, 2] + xy = outer[..., 0, 1] + xz = outer[..., 0, 2] + yz = outer[..., 1, 2] + xw = outer[..., 0, 3] + yw = outer[..., 1, 3] + zw = outer[..., 2, 3] + + rotation_matrix = np.empty(quaternion.shape[:-1] + (3, 3), dtype=np.float32) + + rotation_matrix[..., 0, 0] = 1 - 2 * (yy + zz) + rotation_matrix[..., 0, 1] = 2 * (xy - zw) + rotation_matrix[..., 0, 2] = 2 * (xz + yw) + + rotation_matrix[..., 1, 0] = 2 * (xy + zw) + rotation_matrix[..., 1, 1] = 1 - 2 * (xx + zz) + rotation_matrix[..., 1, 2] = 2 * (yz - xw) + + rotation_matrix[..., 2, 0] = 2 * (xz - yw) + rotation_matrix[..., 2, 1] = 2 * (yz + xw) + rotation_matrix[..., 2, 2] = 1 - 2 * (xx + yy) + + return rotation_matrix + + +@jit(nopython=True) +def pose2mat(pose): + """ + Converts pose to homogeneous matrix. + + Args: + pose (2-tuple): a (pos, orn) tuple where pos is vec3 float cartesian, and + orn is vec4 float quaternion. + + Returns: + np.array: 4x4 homogeneous matrix + """ + homo_pose_mat = np.zeros((4, 4), dtype=np.float32) + homo_pose_mat[:3, :3] = _quat2mat(pose[1]) + homo_pose_mat[:3, 3] = pose[0] + homo_pose_mat[3, 3] = 1.0 + return homo_pose_mat + + +def quat2axisangle(quat): + """ + Converts quaternion to axis-angle format. + Returns a unit vector direction scaled by its angle in radians. + + Args: + quat (np.array): (x,y,z,w) vec4 float angles + + Returns: + np.array: (ax,ay,az) axis-angle exponential coordinates + """ + return R.from_quat(quat).as_rotvec() + + +def axisangle2quat(vec): + """ + Converts scaled axis-angle to quat. + + Args: + vec (np.array): (ax,ay,az) axis-angle exponential coordinates + + Returns: + np.array: (x,y,z,w) vec4 float angles + """ + return R.from_rotvec(vec).as_quat() + + +def euler2quat(euler): + """ + Converts euler angles into quaternion form + + Args: + euler (np.array): (r,p,y) angles + + Returns: + np.array: (x,y,z,w) float quaternion angles + + Raises: + AssertionError: [Invalid input shape] + """ + return R.from_euler("xyz", euler).as_quat() + + +def quat2euler(quat): + """ + Converts euler angles into quaternion form + + Args: + quat (np.array): (x,y,z,w) float quaternion angles + + Returns: + np.array: (r,p,y) angles + + Raises: + AssertionError: [Invalid input shape] + """ + return R.from_quat(quat).as_euler("xyz") + + +def pose_in_A_to_pose_in_B(pose_A, pose_A_in_B): + """ + Converts a homogenous matrix corresponding to a point C in frame A + to a homogenous matrix corresponding to the same point C in frame B. + + Args: + pose_A (np.array): 4x4 matrix corresponding to the pose of C in frame A + pose_A_in_B (np.array): 4x4 matrix corresponding to the pose of A in frame B + + Returns: + np.array: 4x4 matrix corresponding to the pose of C in frame B + """ + + # pose of A in B takes a point in A and transforms it to a point in C. + + # pose of C in B = pose of A in B * pose of C in A + # take a point in C, transform it to A, then to B + # T_B^C = T_A^C * T_B^A + return pose_A_in_B.dot(pose_A) + + +def pose_inv(pose_mat): + if pose_mat.dtype != np.float32: + pose_mat = pose_mat.astype(np.float32) + return _pose_inv(pose_mat) + + +@jit(nopython=True) +def _pose_inv(pose_mat): + """ + Computes the inverse of a homogeneous matrix corresponding to the pose of some + frame B in frame A. The inverse is the pose of frame A in frame B. + + Args: + pose_mat (np.array): 4x4 matrix for the pose to inverse + + Returns: + np.array: 4x4 matrix for the inverse pose + """ + + # Note, the inverse of a pose matrix is the following + # [R t; 0 1]^-1 = [R.T -R.T*t; 0 1] + + # Intuitively, this makes sense. + # The original pose matrix translates by t, then rotates by R. + # We just invert the rotation by applying R-1 = R.T, and also translate back. + # Since we apply translation first before rotation, we need to translate by + # -t in the original frame, which is -R-1*t in the new frame, and then rotate back by + # R-1 to align the axis again. + + pose_inv = np.zeros((4, 4), dtype=np.float32) + pose_inv[:3, :3] = pose_mat[:3, :3].T + pose_inv[:3, 3] = -pose_inv[:3, :3].dot(pose_mat[:3, 3]) + pose_inv[3, 3] = 1.0 + return pose_inv + + +def pose_transform(pos1, quat1, pos0, quat0): + """ + Conducts forward transform from pose (pos0, quat0) to pose (pos1, quat1): + + pose1 @ pose0, NOT pose0 @ pose1 + + Args: + pos1: (x,y,z) position to transform + quat1: (x,y,z,w) orientation to transform + pos0: (x,y,z) initial position + quat0: (x,y,z,w) initial orientation + + Returns: + 2-tuple: + - (np.array) (x,y,z) position array in cartesian coordinates + - (np.array) (x,y,z,w) orientation array in quaternion form + """ + # Get poses + mat0 = pose2mat((pos0, quat0)) + mat1 = pose2mat((pos1, quat1)) + + # Multiply and convert back to pos, quat + return mat2pose(mat1 @ mat0) + + +def invert_pose_transform(pos, quat): + """ + Inverts a pose transform + + Args: + pos: (x,y,z) position to transform + quat: (x,y,z,w) orientation to transform + + Returns: + 2-tuple: + - (np.array) (x,y,z) position array in cartesian coordinates + - (np.array) (x,y,z,w) orientation array in quaternion form + """ + # Get pose + mat = pose2mat((pos, quat)) + + # Invert pose and convert back to pos, quat + return mat2pose(pose_inv(mat)) + + +def relative_pose_transform(pos1, quat1, pos0, quat0): + """ + Computes relative forward transform from pose (pos0, quat0) to pose (pos1, quat1), i.e.: solves: + + pose1 = pose0 @ transform + + Args: + pos1: (x,y,z) position to transform + quat1: (x,y,z,w) orientation to transform + pos0: (x,y,z) initial position + quat0: (x,y,z,w) initial orientation + + Returns: + 2-tuple: + - (np.array) (x,y,z) position array in cartesian coordinates + - (np.array) (x,y,z,w) orientation array in quaternion form + """ + # Get poses + mat0 = pose2mat((pos0, quat0)) + mat1 = pose2mat((pos1, quat1)) + + # Invert pose0 and calculate transform + return mat2pose(pose_inv(mat0) @ mat1) + + +def _skew_symmetric_translation(pos_A_in_B): + """ + Helper function to get a skew symmetric translation matrix for converting quantities + between frames. + + Args: + pos_A_in_B (np.array): (x,y,z) position of A in frame B + + Returns: + np.array: 3x3 skew symmetric translation matrix + """ + return np.array( + [ + 0.0, + -pos_A_in_B[2], + pos_A_in_B[1], + pos_A_in_B[2], + 0.0, + -pos_A_in_B[0], + -pos_A_in_B[1], + pos_A_in_B[0], + 0.0, + ] + ).reshape((3, 3)) + + +def vel_in_A_to_vel_in_B(vel_A, ang_vel_A, pose_A_in_B): + """ + Converts linear and angular velocity of a point in frame A to the equivalent in frame B. + + Args: + vel_A (np.array): (vx,vy,vz) linear velocity in A + ang_vel_A (np.array): (wx,wy,wz) angular velocity in A + pose_A_in_B (np.array): 4x4 matrix corresponding to the pose of A in frame B + + Returns: + 2-tuple: + + - (np.array) (vx,vy,vz) linear velocities in frame B + - (np.array) (wx,wy,wz) angular velocities in frame B + """ + pos_A_in_B = pose_A_in_B[:3, 3] + rot_A_in_B = pose_A_in_B[:3, :3] + skew_symm = _skew_symmetric_translation(pos_A_in_B) + vel_B = rot_A_in_B.dot(vel_A) + skew_symm.dot(rot_A_in_B.dot(ang_vel_A)) + ang_vel_B = rot_A_in_B.dot(ang_vel_A) + return vel_B, ang_vel_B + + +def force_in_A_to_force_in_B(force_A, torque_A, pose_A_in_B): + """ + Converts linear and rotational force at a point in frame A to the equivalent in frame B. + + Args: + force_A (np.array): (fx,fy,fz) linear force in A + torque_A (np.array): (tx,ty,tz) rotational force (moment) in A + pose_A_in_B (np.array): 4x4 matrix corresponding to the pose of A in frame B + + Returns: + 2-tuple: + + - (np.array) (fx,fy,fz) linear forces in frame B + - (np.array) (tx,ty,tz) moments in frame B + """ + pos_A_in_B = pose_A_in_B[:3, 3] + rot_A_in_B = pose_A_in_B[:3, :3] + skew_symm = _skew_symmetric_translation(pos_A_in_B) + force_B = rot_A_in_B.T.dot(force_A) + torque_B = -rot_A_in_B.T.dot(skew_symm.dot(force_A)) + rot_A_in_B.T.dot(torque_A) + return force_B, torque_B + + +def rotation_matrix(angle, direction, point=None): + """ + Returns matrix to rotate about axis defined by point and direction. + + E.g.: + >>> angle = (random.random() - 0.5) * (2*math.pi) + >>> direc = numpy.random.random(3) - 0.5 + >>> point = numpy.random.random(3) - 0.5 + >>> R0 = rotation_matrix(angle, direc, point) + >>> R1 = rotation_matrix(angle-2*math.pi, direc, point) + >>> is_same_transform(R0, R1) + True + + >>> R0 = rotation_matrix(angle, direc, point) + >>> R1 = rotation_matrix(-angle, -direc, point) + >>> is_same_transform(R0, R1) + True + + >>> I = numpy.identity(4, numpy.float32) + >>> numpy.allclose(I, rotation_matrix(math.pi*2, direc)) + True + + >>> numpy.allclose(2., numpy.trace(rotation_matrix(math.pi/2, + ... direc, point))) + True + + Args: + angle (float): Magnitude of rotation + direction (np.array): (ax,ay,az) axis about which to rotate + point (None or np.array): If specified, is the (x,y,z) point about which the rotation will occur + + Returns: + np.array: 4x4 homogeneous matrix that includes the desired rotation + """ + sina = math.sin(angle) + cosa = math.cos(angle) + direction = unit_vector(direction[:3]) + # rotation matrix around unit vector + R = np.array(((cosa, 0.0, 0.0), (0.0, cosa, 0.0), (0.0, 0.0, cosa)), dtype=np.float32) + R += np.outer(direction, direction) * (1.0 - cosa) + direction *= sina + R += np.array( + ( + (0.0, -direction[2], direction[1]), + (direction[2], 0.0, -direction[0]), + (-direction[1], direction[0], 0.0), + ), + dtype=np.float32, + ) + M = np.identity(4) + M[:3, :3] = R + if point is not None: + # rotation not around origin + point = np.array(point[:3], dtype=np.float32, copy=False) + M[:3, 3] = point - np.dot(R, point) + return M + + +def clip_translation(dpos, limit): + """ + Limits a translation (delta position) to a specified limit + + Scales down the norm of the dpos to 'limit' if norm(dpos) > limit, else returns immediately + + Args: + dpos (n-array): n-dim Translation being clipped (e,g.: (x, y, z)) -- numpy array + limit (float): Value to limit translation by -- magnitude (scalar, in same units as input) + + Returns: + 2-tuple: + + - (np.array) Clipped translation (same dimension as inputs) + - (bool) whether the value was clipped or not + """ + input_norm = np.linalg.norm(dpos) + return (dpos * limit / input_norm, True) if input_norm > limit else (dpos, False) + + +def clip_rotation(quat, limit): + """ + Limits a (delta) rotation to a specified limit + + Converts rotation to axis-angle, clips, then re-converts back into quaternion + + Args: + quat (np.array): (x,y,z,w) rotation being clipped + limit (float): Value to limit rotation by -- magnitude (scalar, in radians) + + Returns: + 2-tuple: + + - (np.array) Clipped rotation quaternion (x, y, z, w) + - (bool) whether the value was clipped or not + """ + clipped = False + + # First, normalize the quaternion + quat = quat / np.linalg.norm(quat) + + den = np.sqrt(max(1 - quat[3] * quat[3], 0)) + if den == 0: + # This is a zero degree rotation, immediately return + return quat, clipped + else: + # This is all other cases + x = quat[0] / den + y = quat[1] / den + z = quat[2] / den + a = 2 * math.acos(quat[3]) + + # Clip rotation if necessary and return clipped quat + if abs(a) > limit: + a = limit * np.sign(a) / 2 + sa = math.sin(a) + ca = math.cos(a) + quat = np.array([x * sa, y * sa, z * sa, ca]) + clipped = True + + return quat, clipped + + +def make_pose(translation, rotation): + """ + Makes a homogeneous pose matrix from a translation vector and a rotation matrix. + + Args: + translation (np.array): (x,y,z) translation value + rotation (np.array): a 3x3 matrix representing rotation + + Returns: + pose (np.array): a 4x4 homogeneous matrix + """ + pose = np.zeros((4, 4)) + pose[:3, :3] = rotation + pose[:3, 3] = translation + pose[3, 3] = 1.0 + return pose + + +def unit_vector(data, axis=None, out=None): + """ + Returns ndarray normalized by length, i.e. eucledian norm, along axis. + + E.g.: + >>> v0 = numpy.random.random(3) + >>> v1 = unit_vector(v0) + >>> numpy.allclose(v1, v0 / numpy.linalg.norm(v0)) + True + + >>> v0 = numpy.random.rand(5, 4, 3) + >>> v1 = unit_vector(v0, axis=-1) + >>> v2 = v0 / numpy.expand_dims(numpy.sqrt(numpy.sum(v0*v0, axis=2)), 2) + >>> numpy.allclose(v1, v2) + True + + >>> v1 = unit_vector(v0, axis=1) + >>> v2 = v0 / numpy.expand_dims(numpy.sqrt(numpy.sum(v0*v0, axis=1)), 1) + >>> numpy.allclose(v1, v2) + True + + >>> v1 = numpy.empty((5, 4, 3), dtype=numpy.float32) + >>> unit_vector(v0, axis=1, out=v1) + >>> numpy.allclose(v1, v2) + True + + >>> list(unit_vector([])) + [] + + >>> list(unit_vector([1.0])) + [1.0] + + Args: + data (np.array): data to normalize + axis (None or int): If specified, determines specific axis along data to normalize + out (None or np.array): If specified, will store computation in this variable + + Returns: + None or np.array: If @out is not specified, will return normalized vector. Otherwise, stores the output in @out + """ + if out is None: + data = np.array(data, dtype=np.float32, copy=True) + if data.ndim == 1: + data /= math.sqrt(np.dot(data, data)) + return data + else: + if out is not data: + out[:] = np.array(data, copy=False) + data = out + length = np.atleast_1d(np.sum(data * data, axis)) + np.sqrt(length, length) + if axis is not None: + length = np.expand_dims(length, axis) + data /= length + if out is None: + return data + + +def get_orientation_error(target_orn, current_orn): + """ + Returns the difference between two quaternion orientations as a 3 DOF numpy array. + For use in an impedance controller / task-space PD controller. + + Args: + target_orn (np.array): (x, y, z, w) desired quaternion orientation + current_orn (np.array): (x, y, z, w) current quaternion orientation + + Returns: + orn_error (np.array): (ax,ay,az) current orientation error, corresponds to + (target_orn - current_orn) + """ + current_orn = np.array([current_orn[3], current_orn[0], current_orn[1], current_orn[2]]) + target_orn = np.array([target_orn[3], target_orn[0], target_orn[1], target_orn[2]]) + + pinv = np.zeros((3, 4)) + pinv[0, :] = [-current_orn[1], current_orn[0], -current_orn[3], current_orn[2]] + pinv[1, :] = [-current_orn[2], current_orn[3], current_orn[0], -current_orn[1]] + pinv[2, :] = [-current_orn[3], -current_orn[2], current_orn[1], current_orn[0]] + orn_error = 2.0 * pinv.dot(np.array(target_orn)) + return orn_error + + +def get_orientation_diff_in_radian(orn0, orn1): + """ + Returns the difference between two quaternion orientations in radian + + Args: + orn0 (np.array): (x, y, z, w) + orn1 (np.array): (x, y, z, w) + + Returns: + orn_diff (float): orientation difference in radian + """ + vec0 = quat2axisangle(orn0) + vec0 /= np.linalg.norm(vec0) + vec1 = quat2axisangle(orn1) + vec1 /= np.linalg.norm(vec1) + return np.arccos(np.dot(vec0, vec1)) + + +def get_pose_error(target_pose, current_pose): + """ + Computes the error corresponding to target pose - current pose as a 6-dim vector. + The first 3 components correspond to translational error while the last 3 components + correspond to the rotational error. + + Args: + target_pose (np.array): a 4x4 homogenous matrix for the target pose + current_pose (np.array): a 4x4 homogenous matrix for the current pose + + Returns: + np.array: 6-dim pose error. + """ + error = np.zeros(6) + + # compute translational error + target_pos = target_pose[:3, 3] + current_pos = current_pose[:3, 3] + pos_err = target_pos - current_pos + + # compute rotational error + r1 = current_pose[:3, 0] + r2 = current_pose[:3, 1] + r3 = current_pose[:3, 2] + r1d = target_pose[:3, 0] + r2d = target_pose[:3, 1] + r3d = target_pose[:3, 2] + rot_err = 0.5 * (np.cross(r1, r1d) + np.cross(r2, r2d) + np.cross(r3, r3d)) + + error[:3] = pos_err + error[3:] = rot_err + return error + + +def matrix_inverse(matrix): + """ + Helper function to have an efficient matrix inversion function. + + Args: + matrix (np.array): 2d-array representing a matrix + + Returns: + np.array: 2d-array representing the matrix inverse + """ + return np.linalg.inv(matrix) + + +def vecs2axisangle(vec0, vec1): + """ + Converts the angle from unnormalized 3D vectors @vec0 to @vec1 into an axis-angle representation of the angle + + Args: + vec0 (np.array): (..., 3) (x,y,z) 3D vector, possibly unnormalized + vec1 (np.array): (..., 3) (x,y,z) 3D vector, possibly unnormalized + """ + # Normalize vectors + vec0 = normalize(vec0, axis=-1) + vec1 = normalize(vec1, axis=-1) + + # Get cross product for direction of angle, and multiply by arcos of the dot product which is the angle + return np.cross(vec0, vec1) * np.arccos((vec0 * vec1).sum(-1, keepdims=True)) + + +def vecs2quat(vec0, vec1, normalized=False): + """ + Converts the angle from unnormalized 3D vectors @vec0 to @vec1 into a quaternion representation of the angle + + Args: + vec0 (np.array): (..., 3) (x,y,z) 3D vector, possibly unnormalized + vec1 (np.array): (..., 3) (x,y,z) 3D vector, possibly unnormalized + normalized (bool): If True, @vec0 and @vec1 are assumed to already be normalized and we will skip the + normalization step (more efficient) + """ + # Normalize vectors if requested + if not normalized: + vec0 = normalize(vec0, axis=-1) + vec1 = normalize(vec1, axis=-1) + + # Half-way Quaternion Solution -- see https://stackoverflow.com/a/11741520 + cos_theta = np.sum(vec0 * vec1, axis=-1, keepdims=True) + quat_unnormalized = np.where( + cos_theta == -1, np.array([1.0, 0, 0, 0]), np.concatenate([np.cross(vec0, vec1), 1 + cos_theta], axis=-1) + ) + return quat_unnormalized / np.linalg.norm(quat_unnormalized, axis=-1, keepdims=True) + + +def l2_distance(v1, v2): + """Returns the L2 distance between vector v1 and v2.""" + return np.linalg.norm(np.array(v1) - np.array(v2)) + + +def frustum(left, right, bottom, top, znear, zfar): + """Create view frustum matrix.""" + assert right != left + assert bottom != top + assert znear != zfar + + M = np.zeros((4, 4), dtype=np.float32) + M[0, 0] = +2.0 * znear / (right - left) + M[2, 0] = (right + left) / (right - left) + M[1, 1] = +2.0 * znear / (top - bottom) + # TODO: Put this back to 3,1 + # M[3, 1] = (top + bottom) / (top - bottom) + M[2, 1] = (top + bottom) / (top - bottom) + M[2, 2] = -(zfar + znear) / (zfar - znear) + M[3, 2] = -2.0 * znear * zfar / (zfar - znear) + M[2, 3] = -1.0 + return M + + +def ortho(left, right, bottom, top, znear, zfar): + """Create orthonormal projection matrix.""" + assert right != left + assert bottom != top + assert znear != zfar + + M = np.zeros((4, 4), dtype=np.float32) + M[0, 0] = 2.0 / (right - left) + M[1, 1] = 2.0 / (top - bottom) + M[2, 2] = -2.0 / (zfar - znear) + M[3, 0] = -(right + left) / (right - left) + M[3, 1] = -(top + bottom) / (top - bottom) + M[3, 2] = -(zfar + znear) / (zfar - znear) + M[3, 3] = 1.0 + return M + + +def perspective(fovy, aspect, znear, zfar): + """Create perspective projection matrix.""" + # fovy is in degree + assert znear != zfar + h = np.tan(fovy / 360.0 * np.pi) * znear + w = h * aspect + return frustum(-w, w, -h, h, znear, zfar) + + +def anorm(x, axis=None, keepdims=False): + """Compute L2 norms alogn specified axes.""" + return np.linalg.norm(x, axis=axis, keepdims=keepdims) + + +def normalize(v, axis=None, eps=1e-10): + """L2 Normalize along specified axes.""" + norm = anorm(v, axis=axis, keepdims=True) + return v / np.where(norm < eps, eps, norm) + + +def cartesian_to_polar(x, y): + """Convert cartesian coordinate to polar coordinate""" + rho = np.sqrt(x**2 + y**2) + phi = np.arctan2(y, x) + return rho, phi + + +def deg2rad(deg): + return deg * np.pi / 180.0 + + +def rad2deg(rad): + return rad * 180.0 / np.pi + + +def check_quat_right_angle(quat, atol=5e-2): + """ + Check by making sure the quaternion is some permutation of +/- (1, 0, 0, 0), + +/- (0.707, 0.707, 0, 0), or +/- (0.5, 0.5, 0.5, 0.5) + Because orientations are all normalized (same L2-norm), every orientation should have a unique L1-norm + So we check the L1-norm of the absolute value of the orientation as a proxy for verifying these values + + Args: + quat (4-array): (x,y,z,w) quaternion orientation to check + atol (float): Absolute tolerance permitted + + Returns: + bool: Whether the quaternion is a right angle or not + """ + return np.any(np.isclose(np.abs(quat).sum(), np.array([1.0, 1.414, 2.0]), atol=atol)) + + +def z_angle_from_quat(quat): + """Get the angle around the Z axis produced by the quaternion.""" + rotated_X_axis = R.from_quat(quat).apply([1, 0, 0]) + return np.arctan2(rotated_X_axis[1], rotated_X_axis[0]) + + +def z_rotation_from_quat(quat): + """Get the quaternion for the rotation around the Z axis produced by the quaternion.""" + return R.from_euler("z", z_angle_from_quat(quat)).as_quat() + + +def integer_spiral_coordinates(n): + """A function to map integers to 2D coordinates in a spiral pattern around the origin.""" + # Map integers from Z to Z^2 in a spiral pattern around the origin. + # Sources: + # https://www.reddit.com/r/askmath/comments/18vqorf/find_the_nth_coordinate_of_a_square_spiral/ + # https://oeis.org/A174344 + m = np.floor(np.sqrt(n)) + x = ((-1) ** m) * ((n - m * (m + 1)) * (np.floor(2 * np.sqrt(n)) % 2) - np.ceil(m / 2)) + y = ((-1) ** (m + 1)) * ((n - m * (m + 1)) * (np.floor(2 * np.sqrt(n) + 1) % 2) + np.ceil(m / 2)) + return int(x), int(y) + + +def calculate_xy_plane_angle(quaternion): + """ + Compute the 2D orientation angle from a quaternion assuming the initial forward vector is along the x-axis. + + Parameters: + quaternion : array_like + The quaternion (w, x, y, z) representing the rotation. + + Returns: + float + The angle (in radians) of the projection of the forward vector onto the XY plane. + Returns 0.0 if the projected vector's magnitude is negligibly small. + """ + fwd = R.from_quat(quaternion).apply([1, 0, 0]) + fwd[2] = 0.0 + + if np.linalg.norm(fwd) < 1e-4: + return 0.0 + + fwd /= np.linalg.norm(fwd) + return np.arctan2(fwd[1], fwd[0]) + + +@jit(nopython=True) +def orientation_error(desired, current): + """ + This function calculates a 3-dimensional orientation error vector for use in the + impedance controller. It does this by computing the delta rotation between the + inputs and converting that rotation to exponential coordinates (axis-angle + representation, where the 3d vector is axis * angle). + See https://en.wikipedia.org/wiki/Axis%E2%80%93angle_representation for more information. + Optimized function to determine orientation error from matrices + + Args: + desired (tensor): (..., 3, 3) where final two dims are 2d array representing target orientation matrix + current (tensor): (..., 3, 3) where final two dims are 2d array representing current orientation matrix + Returns: + tensor: (..., 3) where final dim is (ax, ay, az) axis-angle representing orientation error + """ + # convert input shapes + input_shape = desired.shape[:-2] + desired = desired.reshape(-1, 3, 3) + current = current.reshape(-1, 3, 3) + + # grab relevant info + rc1 = current[:, :, 0] + rc2 = current[:, :, 1] + rc3 = current[:, :, 2] + rd1 = desired[:, :, 0] + rd2 = desired[:, :, 1] + rd3 = desired[:, :, 2] + + error = 0.5 * (np.cross(rc1, rd1) + np.cross(rc2, rd2) + np.cross(rc3, rd3)) + + # Reshape + error = error.reshape(*input_shape, 3) + + return error diff --git a/omnigibson/utils/usd_utils.py b/omnigibson/utils/usd_utils.py index 10939d81e..0b5a1966f 100644 --- a/omnigibson/utils/usd_utils.py +++ b/omnigibson/utils/usd_utils.py @@ -8,11 +8,14 @@ import numpy as np import torch as th import trimesh +from numba import jit, prange import omnigibson as og import omnigibson.lazy as lazy import omnigibson.utils.transform_utils as T from omnigibson.macros import gm +from omnigibson.utils.backend_utils import _compute_backend as cb +from omnigibson.utils.backend_utils import _ComputeBackend, _ComputeNumpyBackend, _ComputeTorchBackend from omnigibson.utils.constants import PRIMITIVE_MESH_TYPES, JointType, PrimType from omnigibson.utils.numpy_utils import vtarray_to_torch from omnigibson.utils.python_utils import assert_valid_key @@ -849,21 +852,54 @@ def clear(self): self._read_cache = {} self._write_idx_cache = collections.defaultdict(set) + def _set_dof_position_targets(self, data, indices, cast=True): + # No casting results in better efficiency + if cast: + data = self._view._frontend.as_contiguous_float32(data) + indices = self._view._frontend.as_contiguous_uint32(indices) + data_desc = self._view._frontend.get_tensor_desc(data) + indices_desc = self._view._frontend.get_tensor_desc(indices) + + if not self._view._backend.set_dof_position_targets(data_desc, indices_desc): + raise Exception("Failed to set DOF positions in backend") + + def _set_dof_velocity_targets(self, data, indices, cast=True): + # No casting results in better efficiency + if cast: + data = self._view._frontend.as_contiguous_float32(data) + indices = self._view._frontend.as_contiguous_uint32(indices) + data_desc = self._view._frontend.get_tensor_desc(data) + indices_desc = self._view._frontend.get_tensor_desc(indices) + + if not self._view._backend.set_dof_velocity_targets(data_desc, indices_desc): + raise Exception("Failed to set DOF velocities in backend") + + def _set_dof_actuation_forces(self, data, indices, cast=True): + # No casting results in better efficiency + if cast: + data = self._view._frontend.as_contiguous_float32(data) + indices = self._view._frontend.as_contiguous_uint32(indices) + data_desc = self._view._frontend.get_tensor_desc(data) + indices_desc = self._view._frontend.get_tensor_desc(indices) + + if not self._view._backend.set_dof_actuation_forces(data_desc, indices_desc): + raise Exception("Failed to set DOF actuation forces in backend") + def flush_control(self): if "dof_position_targets" in self._write_idx_cache: - pos_indices = th.tensor(sorted(self._write_idx_cache["dof_position_targets"])) + pos_indices = cb.int_array(sorted(self._write_idx_cache["dof_position_targets"])) pos_targets = self._read_cache["dof_position_targets"] - self._view.set_dof_position_targets(pos_targets, pos_indices) + self._set_dof_position_targets(cb.to_torch(pos_targets), cb.to_torch(pos_indices), cast=False) if "dof_velocity_targets" in self._write_idx_cache: - vel_indices = th.tensor(sorted(self._write_idx_cache["dof_velocity_targets"])) + vel_indices = cb.int_array(sorted(self._write_idx_cache["dof_velocity_targets"])) vel_targets = self._read_cache["dof_velocity_targets"] - self._view.set_dof_velocity_targets(vel_targets, vel_indices) + self._set_dof_velocity_targets(cb.to_torch(vel_targets), cb.to_torch(vel_indices), cast=False) if "dof_actuation_forces" in self._write_idx_cache: - eff_indices = th.tensor(sorted(self._write_idx_cache["dof_actuation_forces"])) + eff_indices = cb.int_array(sorted(self._write_idx_cache["dof_actuation_forces"])) eff_targets = self._read_cache["dof_actuation_forces"] - self._view.set_dof_actuation_forces(eff_targets, eff_indices) + self._set_dof_actuation_forces(cb.to_torch(eff_targets), cb.to_torch(eff_indices), cast=False) def initialize_view(self): # First, get all of the controllable objects in the scene (avoiding circular import) @@ -913,10 +949,10 @@ def set_joint_position_targets(self, prim_path, positions, indices): # Load the current targets. if "dof_position_targets" not in self._read_cache: - self._read_cache["dof_position_targets"] = self._view.get_dof_position_targets() + self._read_cache["dof_position_targets"] = cb.from_torch(self._view.get_dof_position_targets()) # Update the target - self._read_cache["dof_position_targets"][idx][indices] = positions + self._read_cache["dof_position_targets"][idx, indices] = positions # Add this index to the write cache self._write_idx_cache["dof_position_targets"].add(idx) @@ -927,10 +963,10 @@ def set_joint_velocity_targets(self, prim_path, velocities, indices): # Load the current targets. if "dof_velocity_targets" not in self._read_cache: - self._read_cache["dof_velocity_targets"] = self._view.get_dof_velocity_targets() + self._read_cache["dof_velocity_targets"] = cb.from_torch(self._view.get_dof_velocity_targets()) # Update the target - self._read_cache["dof_velocity_targets"][idx][indices] = velocities + self._read_cache["dof_velocity_targets"][idx, indices] = velocities # Add this index to the write cache self._write_idx_cache["dof_velocity_targets"].add(idx) @@ -941,17 +977,17 @@ def set_joint_efforts(self, prim_path, efforts, indices): # Load the current targets. if "dof_actuation_forces" not in self._read_cache: - self._read_cache["dof_actuation_forces"] = self._view.get_dof_actuation_forces() + self._read_cache["dof_actuation_forces"] = cb.from_torch(self._view.get_dof_actuation_forces()) # Update the target - self._read_cache["dof_actuation_forces"][idx][indices] = efforts + self._read_cache["dof_actuation_forces"][idx, indices] = efforts # Add this index to the write cache self._write_idx_cache["dof_actuation_forces"].add(idx) def get_root_transform(self, prim_path): if "root_transforms" not in self._read_cache: - self._read_cache["root_transforms"] = self._view.get_root_transforms() + self._read_cache["root_transforms"] = cb.from_torch(self._view.get_root_transforms()) idx = self._idx[prim_path] pose = self._read_cache["root_transforms"][idx] @@ -966,160 +1002,186 @@ def get_position_orientation(self, prim_path): else: return self.get_root_transform(prim_path) - def get_linear_velocity(self, prim_path): + def _get_velocities(self, prim_path): if self._base_footprint_link_names[prim_path] is not None: link_name = self._base_footprint_link_names[prim_path] - return self.get_link_linear_velocity(prim_path, link_name) + return self._get_link_velocities(prim_path, link_name) else: - return self.get_root_linear_velocity(prim_path) + return self._get_root_velocities(prim_path) - def get_angular_velocity(self, prim_path): - if self._base_footprint_link_names[prim_path] is not None: - link_name = self._base_footprint_link_names[prim_path] - return self.get_link_angular_velocity(prim_path, link_name) - else: - return self.get_root_angular_velocity(prim_path) + def _get_relative_velocities(self, prim_path): + if "relative_velocities" not in self._read_cache: + self._read_cache["relative_velocities"] = {} - def get_root_linear_velocity(self, prim_path): - if "root_velocities" not in self._read_cache: - self._read_cache["root_velocities"] = self._view.get_root_velocities() + if prim_path not in self._read_cache["relative_velocities"]: + # Compute all tfs at once, including base as well as all links + if "link_velocities" not in self._read_cache: + self._read_cache["link_velocities"] = cb.from_torch(self._view.get_link_velocities()) - idx = self._idx[prim_path] - return self._read_cache["root_velocities"][idx][:3] + idx = self._idx[prim_path] + vels = cb.zeros((len(self._link_idx[idx]) + 1, 6, 1)) + # base vel is the final -1 index + vels[:-1, :, 0] = self._read_cache["link_velocities"][idx, :] + vels[-1, :, 0] = self._get_velocities(prim_path=prim_path) - def get_root_angular_velocity(self, prim_path): + tf = cb.zeros((1, 6, 6)) + orn_t = cb.T.quat2mat(self.get_position_orientation(prim_path)[1]).T + tf[0, :3, :3] = orn_t + tf[0, 3:, 3:] = orn_t + # x.T --> transpose (inverse) orientation + # (1, 6, 6) @ (n_links, 6, 1) -> (n_links, 6, 1) -> (n_links, 6) + self._read_cache["relative_velocities"][prim_path] = cb.squeeze(tf @ vels, dim=-1) + + return self._read_cache["relative_velocities"][prim_path] + + def get_linear_velocity(self, prim_path): + return self._get_velocities(prim_path)[:3] + + def get_angular_velocity(self, prim_path): + return self._get_velocities(prim_path)[3:] + + def _get_root_velocities(self, prim_path): if "root_velocities" not in self._read_cache: - self._read_cache["root_velocities"] = self._view.get_root_velocities() + self._read_cache["root_velocities"] = cb.from_torch(self._view.get_root_velocities()) idx = self._idx[prim_path] - return self._read_cache["root_velocities"][idx][3:] + return self._read_cache["root_velocities"][idx] def get_relative_linear_velocity(self, prim_path): - orn = self.get_position_orientation(prim_path)[1] - linvel = self.get_linear_velocity(prim_path) - # x.T --> transpose (inverse) orientation - return T.quat2mat(orn).T @ linvel + # base corresponds to final index + return self._get_relative_velocities(prim_path)[-1, :3] def get_relative_angular_velocity(self, prim_path): - orn = self.get_position_orientation(prim_path)[1] - angvel = self.get_angular_velocity(prim_path) - # x.T --> transpose (inverse) orientation - return T.quat2mat(orn).T @ angvel + # base corresponds to final index + return self._get_relative_velocities(prim_path)[-1, 3:] def get_joint_positions(self, prim_path): if "dof_positions" not in self._read_cache: - self._read_cache["dof_positions"] = self._view.get_dof_positions() + self._read_cache["dof_positions"] = cb.from_torch(self._view.get_dof_positions()) idx = self._idx[prim_path] return self._read_cache["dof_positions"][idx] def get_joint_velocities(self, prim_path): if "dof_velocities" not in self._read_cache: - self._read_cache["dof_velocities"] = self._view.get_dof_velocities() + self._read_cache["dof_velocities"] = cb.from_torch(self._view.get_dof_velocities()) idx = self._idx[prim_path] return self._read_cache["dof_velocities"][idx] def get_joint_efforts(self, prim_path): if "dof_projected_joint_forces" not in self._read_cache: - self._read_cache["dof_projected_joint_forces"] = self._view.get_dof_projected_joint_forces() + self._read_cache["dof_projected_joint_forces"] = cb.from_torch(self._view.get_dof_projected_joint_forces()) idx = self._idx[prim_path] return self._read_cache["dof_projected_joint_forces"][idx] def get_mass_matrix(self, prim_path): if "mass_matrices" not in self._read_cache: - self._read_cache["mass_matrices"] = self._view.get_mass_matrices() + self._read_cache["mass_matrices"] = cb.from_torch(self._view.get_mass_matrices()) idx = self._idx[prim_path] return self._read_cache["mass_matrices"][idx] def get_generalized_gravity_forces(self, prim_path): if "generalized_gravity_forces" not in self._read_cache: - self._read_cache["generalized_gravity_forces"] = self._view.get_generalized_gravity_forces() + self._read_cache["generalized_gravity_forces"] = cb.from_torch(self._view.get_generalized_gravity_forces()) idx = self._idx[prim_path] return self._read_cache["generalized_gravity_forces"][idx] def get_coriolis_and_centrifugal_forces(self, prim_path): if "coriolis_and_centrifugal_forces" not in self._read_cache: - self._read_cache["coriolis_and_centrifugal_forces"] = self._view.get_coriolis_and_centrifugal_forces() + self._read_cache["coriolis_and_centrifugal_forces"] = cb.from_torch( + self._view.get_coriolis_and_centrifugal_forces() + ) idx = self._idx[prim_path] return self._read_cache["coriolis_and_centrifugal_forces"][idx] def get_link_transform(self, prim_path, link_name): if "link_transforms" not in self._read_cache: - self._read_cache["link_transforms"] = self._view.get_link_transforms() + self._read_cache["link_transforms"] = cb.from_torch(self._view.get_link_transforms()) idx = self._idx[prim_path] link_idx = self._link_idx[idx][link_name] - pose = self._read_cache["link_transforms"][idx][link_idx] + pose = self._read_cache["link_transforms"][idx, link_idx] return pose[:3], pose[3:] - def get_link_relative_position_orientation(self, prim_path, link_name): - pos, orn = self.get_link_transform(prim_path, link_name) + def _get_relative_poses(self, prim_path): + if "relative_poses" not in self._read_cache: + self._read_cache["relative_poses"] = {} + + if prim_path not in self._read_cache["relative_poses"]: + # Compute all tfs at once, including base as well as all links + if "link_transforms" not in self._read_cache: + self._read_cache["link_transforms"] = cb.from_torch(self._view.get_link_transforms()) + + idx = self._idx[prim_path] + self._read_cache["relative_poses"][prim_path] = cb.compute_relative_poses( + idx, + len(self._link_idx[idx]), + self._read_cache["link_transforms"], + self.get_position_orientation(prim_path=prim_path), + ) - # Get the root world transform too - world_pos, world_orn = self.get_position_orientation(prim_path) + return self._read_cache["relative_poses"][prim_path] - # Compute the relative position and orientation - return T.relative_pose_transform(pos, orn, world_pos, world_orn) + def get_link_relative_position_orientation(self, prim_path, link_name): + idx = self._idx[prim_path] + link_idx = self._link_idx[idx][link_name] + rel_pose = self._get_relative_poses(prim_path)[link_idx] + return rel_pose[:3], rel_pose[3:] - def get_link_linear_velocity(self, prim_path, link_name): + def _get_link_velocities(self, prim_path, link_name): if "link_velocities" not in self._read_cache: - self._read_cache["link_velocities"] = self._view.get_link_velocities() + self._read_cache["link_velocities"] = cb.from_torch(self._view.get_link_velocities()) idx = self._idx[prim_path] link_idx = self._link_idx[idx][link_name] - vel = self._read_cache["link_velocities"][idx][link_idx] - linvel = vel[:3] - - return linvel + vel = self._read_cache["link_velocities"][idx, link_idx] - def get_link_relative_linear_velocity(self, prim_path, link_name): - linvel = self.get_link_linear_velocity(prim_path, link_name) - - # Get the root world transform too - _, world_orn = self.get_position_orientation(prim_path) + return vel - # Compute the relative position and orientation - return T.quat2mat(world_orn).T @ linvel - - def get_link_angular_velocity(self, prim_path, link_name): - if "link_velocities" not in self._read_cache: - self._read_cache["link_velocities"] = self._view.get_link_velocities() + def get_link_linear_velocity(self, prim_path, link_name): + return self._get_link_velocities(prim_path, link_name)[:3] + def get_link_relative_linear_velocity(self, prim_path, link_name): idx = self._idx[prim_path] link_idx = self._link_idx[idx][link_name] - vel = self._read_cache["link_velocities"][idx][link_idx] - angvel = vel[3:] + return self._get_relative_velocities(prim_path)[link_idx, :3] - return angvel + def get_link_angular_velocity(self, prim_path, link_name): + return self._get_link_velocities(prim_path, link_name)[3:] def get_link_relative_angular_velocity(self, prim_path, link_name): - angvel = self.get_link_angular_velocity(prim_path, link_name) - - # Get the root world transform too - _, world_orn = self.get_position_orientation(prim_path) - - # Compute the relative position and orientation - return T.quat2mat(world_orn).T @ angvel + idx = self._idx[prim_path] + link_idx = self._link_idx[idx][link_name] + return self._get_relative_velocities(prim_path)[link_idx, 3:] def get_jacobian(self, prim_path): if "jacobians" not in self._read_cache: - self._read_cache["jacobians"] = self._view.get_jacobians() + self._read_cache["jacobians"] = cb.from_torch(self._view.get_jacobians()) idx = self._idx[prim_path] return self._read_cache["jacobians"][idx] def get_relative_jacobian(self, prim_path): - jacobian = self.get_jacobian(prim_path) - ori_t = T.quat2mat(self.get_position_orientation(prim_path)[1]).T - tf = th.zeros((1, 6, 6), dtype=th.float32) - tf[:, :3, :3] = ori_t - tf[:, 3:, 3:] = ori_t - return tf @ jacobian + if "relative_jacobians" not in self._read_cache: + self._read_cache["relative_jacobians"] = {} + + if prim_path not in self._read_cache["relative_jacobians"]: + jacobian = self.get_jacobian(prim_path) + ori_t = cb.T.quat2mat(self.get_position_orientation(prim_path)[1]).T + tf = cb.zeros((1, 6, 6)) + tf[:, :3, :3] = ori_t + tf[:, 3:, 3:] = ori_t + # Run this explicitly in pytorch since it's order of magnitude faster than numpy! + # e.g.: 2e-4 vs. 3e-5 on R1 + rel_jac = cb.from_torch(cb.to_torch(tf) @ cb.to_torch(jacobian)) + self._read_cache["relative_jacobians"][prim_path] = rel_jac + + return self._read_cache["relative_jacobians"][prim_path] class ControllableObjectViewAPI: @@ -1233,6 +1295,10 @@ def set_joint_efforts(cls, prim_path, efforts, indices): def get_position_orientation(cls, prim_path): return cls._VIEWS_BY_PATTERN[cls._get_pattern_from_prim_path(prim_path)].get_position_orientation(prim_path) + @classmethod + def get_root_position_orientation(cls, prim_path): + return cls._VIEWS_BY_PATTERN[cls._get_pattern_from_prim_path(prim_path)].get_root_transform(prim_path) + @classmethod def get_linear_velocity(cls, prim_path): return cls._VIEWS_BY_PATTERN[cls._get_pattern_from_prim_path(prim_path)].get_linear_velocity(prim_path) @@ -1279,6 +1345,12 @@ def get_coriolis_and_centrifugal_forces(cls, prim_path): prim_path ) + @classmethod + def get_link_position_orientation(cls, prim_path, link_name): + return cls._VIEWS_BY_PATTERN[cls._get_pattern_from_prim_path(prim_path)].get_link_transform( + prim_path, link_name + ) + @classmethod def get_link_relative_position_orientation(cls, prim_path, link_name): return cls._VIEWS_BY_PATTERN[cls._get_pattern_from_prim_path(prim_path)].get_link_relative_position_orientation( @@ -1802,3 +1874,96 @@ def delete_or_deactivate_prim(prim_path): lazy.omni.usd.commands.DeletePrimsCommand([prim_path], destructive=False).do() return True + + +def get_sdf_value_type_name(val): + """ + Determines the appropriate Sdf value type based on the input value. + + Args: + val: The input value to determine the type for. + + Returns: + lazy.pxr.Sdf.ValueTypeName: The corresponding Sdf value type. + + Raises: + ValueError: If the input value type is not supported. + """ + SDF_TYPE_MAPPING = { + lazy.pxr.Gf.Vec3f: lazy.pxr.Sdf.ValueTypeNames.Float3, + lazy.pxr.Gf.Vec2f: lazy.pxr.Sdf.ValueTypeNames.Float2, + lazy.pxr.Sdf.AssetPath: lazy.pxr.Sdf.ValueTypeNames.Asset, + int: lazy.pxr.Sdf.ValueTypeNames.Int, + float: lazy.pxr.Sdf.ValueTypeNames.Float, + bool: lazy.pxr.Sdf.ValueTypeNames.Bool, + str: lazy.pxr.Sdf.ValueTypeNames.String, + } + for type_, usd_type in SDF_TYPE_MAPPING.items(): + if isinstance(val, type_): + return usd_type + raise ValueError(f"Unsupported input type: {type(val)}") + + +import omnigibson.utils.transform_utils as TT + + +@th.compile +def _compute_relative_poses_torch( + idx: int, + n_links: int, + all_tfs: th.Tensor, + base_pose: th.Tensor, +): + tfs = th.zeros((n_links, 4, 4), dtype=th.float32) + # base vel is the final -1 index + link_tfs = all_tfs[idx, :] + tfs[:, 3, 3] = 1.0 + tfs[:, :3, 3] = link_tfs[:, :3] + tfs[:, :3, :3] = TT.quat2mat(link_tfs[:, 3:]) + base_tf_inv = th.zeros((1, 4, 4), dtype=th.float32) + base_tf_inv[0, :, :] = TT.pose_inv(TT.pose2mat(base_pose)) + + # (1, 4, 4) @ (n_links, 4, 4) -> (n_links, 4, 4) + rel_tfs = base_tf_inv @ tfs + + # Re-convert to quat form + rel_poses = th.zeros((n_links, 7), dtype=th.float32) + rel_poses[:, :3] = rel_tfs[:, :3, 3] + rel_poses[:, 3:] = TT.mat2quat(rel_tfs[:, :3, :3]) + + return rel_poses + + +import omnigibson.utils.transform_utils_np as NT + + +@jit(nopython=True) +def _compute_relative_poses_numpy(idx, n_links, all_tfs, base_pose): + tfs = np.zeros((n_links, 4, 4), dtype=np.float32) + # base vel is the final -1 index + link_tfs = all_tfs[idx, :] + tfs[:, 3, 3] = 1.0 + tfs[:, :3, 3] = link_tfs[:, :3] + tfs[:, :3, :3] = NT._quat2mat(link_tfs[:, 3:]) + # base_tf_inv = np.zeros((1, 4, 4), dtype=np.float32) + # base_tf_inv[0, :, :] = NT._pose_inv(NT.pose2mat(base_pose)) + base_tf_inv = NT._pose_inv(NT.pose2mat(base_pose)) + + # (1, 4, 4) @ (n_links, 4, 4) -> (n_links, 4, 4) + rel_tfs = np.zeros((n_links, 4, 4), dtype=np.float32) + for i in prange(n_links): + rel_tfs[i, :, :] = base_tf_inv @ tfs[i, :, :] + # rel_tfs = base_tf_inv @ tfs + + # Re-convert to quat form + rel_poses = np.zeros((n_links, 7), dtype=np.float32) + rel_poses[:, :3] = rel_tfs[:, :3, 3] + rel_poses[:, 3:] = NT.mat2quat_batch(rel_tfs[:, :3, :3].copy()) + + return rel_poses + + +# Set these as part of the backend values +setattr(_ComputeBackend, "compute_relative_poses", None) +setattr(_ComputeTorchBackend, "compute_relative_poses", _compute_relative_poses_torch) +setattr(_ComputeNumpyBackend, "compute_relative_poses", _compute_relative_poses_numpy) diff --git a/setup.py b/setup.py index 381624d45..56564cb50 100644 --- a/setup.py +++ b/setup.py @@ -48,6 +48,8 @@ "graphviz>=0.20", "matplotlib>=3.0.0", "lxml>=5.3.0", + "numba>=0.60.0", + "cffi>=1.16.0", "pillow~=10.2.0", ], extras_require={ diff --git a/tests/test_controllers.py b/tests/test_controllers.py index 7b13674aa..a25c9d1ba 100644 --- a/tests/test_controllers.py +++ b/tests/test_controllers.py @@ -5,6 +5,7 @@ import omnigibson as og import omnigibson.utils.transform_utils as T from omnigibson.robots import LocomotionRobot +from omnigibson.utils.backend_utils import _compute_backend as cb def test_arm_control(): @@ -18,7 +19,7 @@ def test_arm_control(): { "type": "FrankaPanda", "name": "robot0", - "obs_modalities": "rgb", + "obs_modalities": [], "position": [150, 150, 100], "orientation": [0, 0, 0, 1], "action_normalize": False, @@ -26,7 +27,7 @@ def test_arm_control(): { "type": "Fetch", "name": "robot1", - "obs_modalities": "rgb", + "obs_modalities": [], "position": [150, 150, 105], "orientation": [0, 0, 0, 1], "action_normalize": False, @@ -34,7 +35,7 @@ def test_arm_control(): { "type": "Tiago", "name": "robot2", - "obs_modalities": "rgb", + "obs_modalities": [], "position": [150, 150, 110], "orientation": [0, 0, 0, 1], "action_normalize": False, @@ -42,7 +43,7 @@ def test_arm_control(): { "type": "A1", "name": "robot3", - "obs_modalities": "rgb", + "obs_modalities": [], "position": [150, 150, 115], "orientation": [0, 0, 0, 1], "action_normalize": False, @@ -50,7 +51,7 @@ def test_arm_control(): { "type": "R1", "name": "robot4", - "obs_modalities": "rgb", + "obs_modalities": [], "position": [150, 150, 120], "orientation": [0, 0, 0, 1], "action_normalize": False, @@ -296,12 +297,8 @@ def check_ori_error(curr_orientation, init_orientation, tol=0.1): curr_pos, curr_quat = robot.get_relative_eef_pose(arm=arm) arm_controller = robot.controllers[f"arm_{arm}"] arm_goal = arm_controller.goal - target_pos = arm_goal["target_pos"] - target_quat = ( - arm_goal["target_quat"] - if controller == "InverseKinematicsController" - else T.mat2quat(arm_goal["target_ori_mat"]) - ) + target_pos = cb.to_torch(arm_goal["target_pos"]) + target_quat = T.mat2quat(cb.to_torch(arm_goal["target_ori_mat"])) pos_check = err_checks[controller_mode][action_name]["pos"] if pos_check is not None: is_valid_pos = pos_check(target_pos, curr_pos, init_pos) diff --git a/tests/test_curobo.py b/tests/test_curobo.py index 4161a1e8a..caf230df4 100644 --- a/tests/test_curobo.py +++ b/tests/test_curobo.py @@ -236,6 +236,7 @@ def test_curobo(): batch_size=batch_size, debug=False, use_cuda_graph=True, + collision_activation_distance=0.01, # Use larger activation distance for better reproducibility use_default_embodiment_only=True, ) @@ -361,6 +362,10 @@ def test_curobo(): print(f"Planning for {len(target_pos[robot.eef_link_names[robot.default_arm]])} eef targets...") + # Make sure robot is kept still for better determinism before planning + robot.keep_still() + og.sim.step_physics() + # Generate collision-free trajectories to the sampled eef poses (including self-collisions) successes, traj_paths = cmg.compute_trajectories( target_pos=target_pos, diff --git a/tests/test_robot_teleoperation.py b/tests/test_robot_teleoperation.py index 1e922b997..e1e92fdf5 100644 --- a/tests/test_robot_teleoperation.py +++ b/tests/test_robot_teleoperation.py @@ -45,7 +45,7 @@ def test_teleop(): # test moving robot arm teleop_action.right = th.cat(([0.01], th.zeros(6))) for _ in range(50): - action = robot.teleop_data_to_action(teleop_action) + action = robot.get_robot_teleop_action(teleop_action) env.step(action) cur_eef_pose = robot.links[robot.eef_link_names[robot.default_arm]].get_position_orientation() assert cur_eef_pose[0][0] - start_eef_pose[0][0] > 0.02, "Robot arm not moving forward" @@ -54,7 +54,7 @@ def test_teleop(): teleop_action.right = th.zeros(7) teleop_action.base = th.tensor([0.1, 0, 0.1]) for _ in range(50): - action = robot.teleop_data_to_action(teleop_action) + action = robot.get_robot_teleop_action(teleop_action) env.step(action) cur_base_pose = robot.get_position_orientation() assert cur_base_pose[0][0] - start_base_pose[0][0] > 0.02, "robot base not moving forward" diff --git a/tests/test_transform_utils.py b/tests/test_transform_utils.py index 318dfae81..d7eaac50f 100644 --- a/tests/test_transform_utils.py +++ b/tests/test_transform_utils.py @@ -358,7 +358,7 @@ def test_quat_slerp(self): key_rots = R.from_quat(np.stack([q1.cpu().numpy(), q2.cpu().numpy()])) key_times = [0, 1] slerp = Slerp(key_times, key_rots) - scipy_q_slerp = slerp([t.item()]).as_quat()[0].astype(NumpyTypes.FLOAT32) + scipy_q_slerp = slerp(t).as_quat()[0].astype(NumpyTypes.FLOAT32) assert quaternions_close(q_slerp, th.from_numpy(scipy_q_slerp)) assert_close(th.norm(q_slerp), th.tensor(1.0))