Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Optional Numpy Compute Backend #1051

Merged
merged 49 commits into from
Dec 19, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
49 commits
Select commit Hold shift + click to select a range
06d1578
refactor backend to use np for better single-threaded efficiency
cremebrule Dec 3, 2024
38b2a47
fix small typo
cremebrule Dec 3, 2024
429cfeb
update setup requirements
cremebrule Dec 3, 2024
8fa44a0
cache indexing values for better efficiency
cremebrule Dec 4, 2024
5d3bce5
improve control loop efficiency, cache relative tf computations
cremebrule Dec 4, 2024
6787df3
optimize controller computation functions
cremebrule Dec 5, 2024
692df78
cache properties that should not change during runtime
cremebrule Dec 5, 2024
d092a6f
optimize np numba transform functions
cremebrule Dec 5, 2024
4ef6f4d
Merge branch 'asset-conversion' into feat/np-opt
cremebrule Dec 12, 2024
69df439
refactor controller backend and general compute backend
cremebrule Dec 12, 2024
3b8d114
update filters to use compute backend
cremebrule Dec 12, 2024
e45e719
fix controller bugs
cremebrule Dec 12, 2024
e9219c2
fix proprioception dict generation
cremebrule Dec 12, 2024
50d390a
fix holonomic robot pose setting
cremebrule Dec 12, 2024
fd07d8a
fix controllable object and filters state de/serialization
cremebrule Dec 12, 2024
9401922
make np tf use numba
cremebrule Dec 12, 2024
920d161
[pre-commit.ci] auto fixes from pre-commit.com hooks
pre-commit-ci[bot] Dec 12, 2024
5121aa5
ensure actions and control dict is handled properly externally
cremebrule Dec 12, 2024
60d65ab
Merge remote-tracking branch 'origin/feat/np-opt' into feat/np-opt
cremebrule Dec 12, 2024
8d6f25f
fix control torch reference
cremebrule Dec 12, 2024
1dd3b28
Merge branch 'asset-conversion' into feat/np-opt
cremebrule Dec 12, 2024
d4553eb
Merge branch 'asset-conversion' into feat/np-opt
cremebrule Dec 12, 2024
f73f9f0
Merge branch 'asset-conversion' into feat/np-opt
cremebrule Dec 12, 2024
64fab4f
Merge branch 'asset-conversion' into feat/np-opt
cremebrule Dec 12, 2024
62d08b9
use compute backend for filter product
cremebrule Dec 12, 2024
5d672a6
use 0-dim command for null joint controller
cremebrule Dec 12, 2024
0fca8d6
improve curobo test determinism
cremebrule Dec 12, 2024
c83772d
make collision activation settable for better reproducability in curo…
cremebrule Dec 12, 2024
11b9876
[pre-commit.ci] auto fixes from pre-commit.com hooks
pre-commit-ci[bot] Dec 12, 2024
8268247
Merge branch 'asset-conversion' into feat/np-opt
cremebrule Dec 12, 2024
a546d45
Merge remote-tracking branch 'origin/feat/np-opt' into feat/np-opt
cremebrule Dec 12, 2024
b606001
fix IK controller arg parsing
cremebrule Dec 12, 2024
474aa0d
update comments external -> custom dataset
cremebrule Dec 12, 2024
ee78e94
Merge branch 'asset-conversion' into feat/np-opt
cremebrule Dec 12, 2024
04c37c5
update version and remove extraneous np import
cremebrule Dec 12, 2024
3e015f7
Merge branch 'og-develop' into feat/np-opt
cremebrule Dec 12, 2024
e31c12a
make urdf paths relative during custom asset import
cremebrule Dec 13, 2024
6275ab9
handle vision sensor cleanup properly
cremebrule Dec 13, 2024
42e1d9d
fix default command output behavior for joint controllers
cremebrule Dec 14, 2024
2f856aa
refactor compute backend to allow for explicit externally-defined fun…
cremebrule Dec 19, 2024
a2d35c9
fix default command output generation for multi finger controller
cremebrule Dec 19, 2024
475cbc7
fix semantics for null joint controller
cremebrule Dec 19, 2024
8faa0a9
verify that th and np transform utils have 1-to-1 corresponding funct…
cremebrule Dec 19, 2024
1c67bba
update null joint controller default args in robot classes
cremebrule Dec 19, 2024
66cfff7
[pre-commit.ci] auto fixes from pre-commit.com hooks
pre-commit-ci[bot] Dec 19, 2024
374513b
re-add quaternions_close
cremebrule Dec 19, 2024
209a8b3
Merge remote-tracking branch 'origin/feat/np-opt' into feat/np-opt
cremebrule Dec 19, 2024
46c6ecb
re-add transformation_matrix
cremebrule Dec 19, 2024
a1fbadd
add copysign to np transform utils
cremebrule Dec 19, 2024
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
6 changes: 5 additions & 1 deletion omnigibson/action_primitives/curobo.py
Original file line number Diff line number Diff line change
Expand Up @@ -150,6 +150,7 @@ def __init__(
motion_cfg_kwargs=None,
batch_size=2,
use_cuda_graph=True,
collision_activation_distance=0.005,
debug=False,
use_default_embodiment_only=False,
):
Expand All @@ -165,6 +166,9 @@ def __init__(
MotionGenConfig.load_from_robot_config(...)
batch_size (int): Size of batches for computing trajectories. This must be FIXED
use_cuda_graph (bool): Whether to use CUDA graph for motion generation or not
collision_activation_distance (float): Distance threshold at which a collision is detected.
Increasing this value will make the motion planner more conservative in its planning with respect
to the underlying sphere representation of the robot
debug (bool): Whether to debug generation or not, setting this True will set use_cuda_graph to False implicitly
use_default_embodiment_only (bool): Whether to use only the default embodiment for the robot or not
"""
Expand Down Expand Up @@ -227,7 +231,7 @@ def __init__(
num_trajopt_seeds=4,
num_graph_seeds=4,
interpolation_dt=0.03,
collision_activation_distance=0.005,
collision_activation_distance=collision_activation_distance,
self_collision_check=True,
maximum_trajectory_dt=None,
fixed_iters_trajopt=True,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -36,7 +36,8 @@
from omnigibson.robots.locomotion_robot import LocomotionRobot
from omnigibson.robots.manipulation_robot import ManipulationRobot
from omnigibson.tasks.behavior_task import BehaviorTask
from omnigibson.utils.control_utils import FKSolver, IKSolver, orientation_error
from omnigibson.utils.backend_utils import _compute_backend as cb
from omnigibson.utils.control_utils import FKSolver, IKSolver
from omnigibson.utils.grasping_planning_utils import get_grasp_poses_for_object_sticky, get_grasp_position_for_open
from omnigibson.utils.motion_planning_utils import (
detect_robot_collision_in_sim,
Expand Down Expand Up @@ -336,13 +337,13 @@ def __init__(
arm = f"arm_{arm_name}"
arm_ctrl = self.robot.controllers[arm]
if isinstance(arm_ctrl, InverseKinematicsController):
pos_relative = control_dict[f"{eef}_pos_relative"]
quat_relative = control_dict[f"{eef}_quat_relative"]
pos_relative = cb.to_torch(control_dict[f"{eef}_pos_relative"])
quat_relative = cb.to_torch(control_dict[f"{eef}_quat_relative"])
quat_relative_axis_angle = T.quat2axisangle(quat_relative)
self._arm_targets[arm] = (pos_relative, quat_relative_axis_angle)
else:

arm_target = control_dict["joint_position"][arm_ctrl.dof_idx]
arm_target = cb.to_torch(control_dict["joint_position"])[arm_ctrl.dof_idx]
self._arm_targets[arm] = arm_target

self.robot_copy = self._load_robot_copy()
Expand Down Expand Up @@ -1472,7 +1473,7 @@ def _empty_action(self, follow_arm_targets=True):
)
delta_pos = target_pos - current_pos
if controller.mode == "pose_delta_ori":
delta_orn = orientation_error(
delta_orn = T.orientation_error(
T.quat2mat(T.axisangle2quat(target_orn_axisangle)), T.quat2mat(current_orn)
)
partial_action = th.cat((delta_pos, delta_orn))
Expand Down
63 changes: 45 additions & 18 deletions omnigibson/controllers/controller_base.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,6 +4,7 @@

import torch as th

from omnigibson.utils.backend_utils import _compute_backend as cb
from omnigibson.utils.python_utils import Recreatable, Registerable, Serializable, assert_valid_key, classproperty

# Global dicts that will contain mappings
Expand Down Expand Up @@ -112,11 +113,11 @@ def __init__(
]
assert "has_limit" in control_limits, "Expected has_limit specified in control_limits, but does not exist."
self._dof_has_limits = control_limits["has_limit"]
self._dof_idx = dof_idx.int()
self._dof_idx = cb.as_int(dof_idx)

# Generate goal information
self._goal_shapes = self._get_goal_shapes()
self._goal_dim = int(sum(th.prod(th.tensor(shape)) for shape in self._goal_shapes.values()))
self._goal_dim = int(sum(cb.prod(cb.array(shape)) for shape in self._goal_shapes.values()))

# Initialize some other variables that will be filled in during runtime
self._control = None
Expand All @@ -127,15 +128,12 @@ def __init__(

# Standardize command input / output limits to be (min_array, max_array)
command_input_limits = (
(-1.0, 1.0)
self._generate_default_command_input_limits()
if type(command_input_limits) == str and command_input_limits == "default"
else command_input_limits
)
command_output_limits = (
(
self._control_limits[self.control_type][0][self.dof_idx],
self._control_limits[self.control_type][1][self.dof_idx],
)
self._generate_default_command_output_limits()
if type(command_output_limits) == str and command_output_limits == "default"
else command_output_limits
)
Expand All @@ -156,6 +154,31 @@ def __init__(
)
)

def _generate_default_command_input_limits(self):
"""
Generates default command input limits based on the control limits

Returns:
2-tuple:
- int or array: min command input limits
- int or array: max command input limits
"""
return (-1.0, 1.0)

def _generate_default_command_output_limits(self):
"""
Generates default command output limits based on the control limits

Returns:
2-tuple:
- int or array: min command output limits
- int or array: max command output limits
"""
return (
self._control_limits[self.control_type][0][self.dof_idx],
self._control_limits[self.control_type][1][self.dof_idx],
)

def _preprocess_command(self, command):
"""
Clips + scales inputted @command according to self.command_input_limits and self.command_output_limits.
Expand All @@ -169,7 +192,7 @@ def _preprocess_command(self, command):
Array[float]: Processed command vector
"""
# Make sure command is a th.tensor
command = th.tensor([command], dtype=th.float32) if type(command) in {int, float} else command
command = cb.array([command]) if type(command) in {int, float} else command
# We only clip and / or scale if self.command_input_limits exists
if self._command_input_limits is not None:
# Clip
Expand Down Expand Up @@ -342,7 +365,7 @@ def compute_no_op_action(self, control_dict):
if self._goal is None:
self._goal = self.compute_no_op_goal(control_dict=control_dict)
command = self._compute_no_op_action(control_dict=control_dict)
return self._reverse_preprocess_command(command)
return cb.to_torch(self._reverse_preprocess_command(command))

def _compute_no_op_action(self, control_dict):
"""
Expand All @@ -354,18 +377,26 @@ def _dump_state(self):
# Default is just the command
return dict(
goal_is_valid=self._goal is not None,
goal=self._goal,
goal=None if self._goal is None else {k: cb.to_torch(v) for k, v in self._goal.items()},
)

def _load_state(self, state):
# Make sure every entry in goal is a numpy array
# Load goal
self._goal = None if state["goal"] is None else {name: goal_state for name, goal_state in state["goal"].items()}
if state["goal"] is None:
self._goal = None
else:
self._goal = dict()
for name, goal_state in state["goal"].items():
if isinstance(goal_state, th.Tensor):
self._goal[name] = cb.from_torch(goal_state)
else:
self._goal[name] = goal_state

def serialize(self, state):
# Make sure size of the state is consistent, even if we have no goal
goal_state_flattened = (
th.cat([goal_state.flatten() for goal_state in self._goal.values()])
th.cat([goal_state.flatten() for goal_state in state["goal"].values()])
if (state)["goal_is_valid"]
else th.zeros(self.goal_dim)
)
Expand Down Expand Up @@ -419,12 +450,8 @@ def nums2array(nums, dim):
# Else, input is a single value, so we map to a numpy array of correct size and return
return (
nums
if isinstance(nums, th.Tensor)
else (
th.tensor(nums, dtype=th.float32)
if isinstance(nums, Iterable)
else th.ones(dim, dtype=th.float32) * nums
)
if isinstance(nums, cb.arr_type)
else cb.array(nums) if isinstance(nums, Iterable) else cb.ones(dim) * nums
)

@property
Expand Down
9 changes: 4 additions & 5 deletions omnigibson/controllers/dd_controller.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,5 @@
import torch as th

from omnigibson.controllers import ControlType, LocomotionController
from omnigibson.utils.backend_utils import _compute_backend as cb


class DifferentialDriveController(LocomotionController):
Expand Down Expand Up @@ -107,14 +106,14 @@ def compute_control(self, goal_dict, control_dict):
right_wheel_joint_vel = (lin_vel + ang_vel * self._wheel_axle_halflength) / self._wheel_radius

# Return desired velocities
return th.tensor([left_wheel_joint_vel, right_wheel_joint_vel])
return cb.array([left_wheel_joint_vel, right_wheel_joint_vel])

def compute_no_op_goal(self, control_dict):
# This is zero-vector, since we want zero linear / angular velocity
return dict(vel=th.zeros(2))
return dict(vel=cb.zeros(2))

def _compute_no_op_action(self, control_dict):
return th.zeros(2, dtype=th.float32)
return cb.zeros(2)

def _get_goal_shapes(self):
# Add (2, )-array representing linear, angular velocity
Expand Down
Loading
Loading